diff --git "a/6239.jsonl" "b/6239.jsonl" new file mode 100644--- /dev/null +++ "b/6239.jsonl" @@ -0,0 +1,2068 @@ +{"seq_id":"74210938248","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport __init__\nimport src.load_data.loader as data_loader\n\nif __name__ == \"__main__\":\n url = 'https://raw.githubusercontent.com/jvns/pandas-cookbook/master/data/weather_2012.csv'\n dest_folder = 'weather/'\n #data_loader.fetch_data(url, dest_folder)\n data_file = 'weather_2012.csv'\n local_path = data_loader.dataset_path(dest_folder, data_file)\n weather_2012_final = pd.read_csv(local_path, index_col='Date/Time', parse_dates=True)\n temperatures = weather_2012_final[[u'Temp (C)']].copy()\n print(temperatures.head())\n temperatures.loc[:,'Hour'] = weather_2012_final.index.hour\n print(temperatures.head())\n temperatures.groupby('Hour').aggregate(np.median).plot()\n plt.show()\n temperatures.groupby('Hour').aggregate(np.mean).plot()\n plt.show()\n\n\n","repo_name":"gaborchris/tutorials","sub_path":"pandas_tutorial/chap5.py","file_name":"chap5.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20340973188","text":"#!/usr/bin/env python3\nimport time\nfrom smbus import SMBus\nimport requests\nprint(\"Starting PH Probe\")\nbus = SMBus(1)\n\ndef readChannel(params):\n global bus\n bus.write_byte(0x48, params & 0x03)\n bus.write_byte(0x48, 0)\n return bus.read_byte(0x48)\n\ndef analogOut(out):\n global bus\n bus.write_byte(0x48, 0x40)\n bus.write_byte(0x48, out & 0xFF)\n bus.write_byte(0x48, 0x00)\n\ndef readAll():\n global bus\n bus.write_byte(0x48, 0x04)\n data = []\n for _ in range(4):\n data.append(bus.read_byte(0x48))\n return data\n\nurl = \"https://beer.tanger.dev/ph\"\nheaders = {'Content-type': 'application/json'}\nwhile(True):\n print('channel 1 is:')\n print(readChannel(1))\n print('check AOUT, should be about 2.5v')\n print(analogOut(127))\n val = readChannel(1)\n val = (val / 10.0)\n try:\n response = requests.post(url, json={\"ph\": (val / 10.0), \"data\": \"Test\"}, headers=headers)\n if response.status_code == 200:\n print(f\"PH request sent with data: {val}\")\n print(\n f\"PH request NOT sent successfully with data: {val} and response code: {response.status_code}: {response.content}\"\n )\n except requests.ConnectionError:\n print(\n f\"PH request NOT sent successfully with data: {val} and response code: {response.status_code}: {response.content}\"\n )\n time.sleep(3)\n","repo_name":"Tangdongle/InFermneter","sub_path":"ph_probe.py","file_name":"ph_probe.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1755340766","text":"# https://www.hackerrank.com/challenges/defaultdict-tutorial\n\nfrom collections import defaultdict\n\ndef readn(n):\n\tl = []\n\tfor i in range(0, n):\n\t\tl.append(input().strip())\n\treturn l\n\nn, m = map(int, list(input().split(' ')))\nA = readn(n)\nB = readn(m)\nd = defaultdict(list)\nfor i in B:\n\tif d[i] == []:\n\t\tfor j in range(0,len(A)):\n\t\t\tif i == A[j]:\n\t\t\t\td[i].append(j + 1)\n\tif d[i] == []:\n\t\td[i].append(-1)\n\tprint(' '.join(map(str, d[i])))\n\n\n\n\n\n","repo_name":"tamarit/hackerrank_phyton","sub_path":"defaultdict-tutorial/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3608460339","text":"import datetime\nimport json\nimport os\nfrom time import sleep\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\n\nclass ShopDetails:\n def __init__(self, order_number, url):\n\n # initialized a dictionary to store the dara\n\n self.order_number = order_number\n self.DATA_DICT = {\"Shop Name\": [], \"ShopAddress\": [], \"Phone Number\": [], \"Shop URL\": [],\n \"Description\": []}\n\n self.URL = f\"{url}\"\n self.BaseURL = \"https://www.northpointcity.com.sg/\"\n\n def appendingDataInJSONfile(self):\n\n self.getShopDescription()\n names = self.DATA_DICT['Shop Name']\n locations = self.DATA_DICT['ShopAddress']\n urls = self.DATA_DICT['Shop URL']\n phone_numbers = self.DATA_DICT['Phone Number']\n descriptions = self.DATA_DICT['Description']\n\n data = {\n \"Shops\": []\n }\n\n for name, location, url, phone_number, description in zip(names, locations, urls, phone_numbers, descriptions):\n data['Shops'].append(\n {\n 'Shop Name': name,\n 'Shop Address': location,\n 'Shop URL': url,\n 'Phone Number': phone_number,\n 'Description': description\n\n }\n )\n order_id = self.order_number\n folder_path = 'Data'\n current_utc_datetime = datetime.datetime.utcnow()\n file_name = f\"{order_id} - {current_utc_datetime.strftime('%Y-%m-%d__%H-%M')}.json\"\n file_path = os.path.join(folder_path, file_name)\n if not os.path.exists('Data'):\n os.mkdir(folder_path)\n with open(file_path, 'w', encoding=\"utf-8\") as json_file:\n json.dump(data, json_file, indent=4, ensure_ascii=False)\n\n print(\"All Data Scrapped Successfully\")\n\n def getShopDescription(self):\n\n # in this function getting description of Shops\n shop_description_list = []\n # getting Shops URLs by calling the url extractor Description\n data = self.getShopData()\n res_urls = data[0]\n names = data[1]\n for url, name in zip(res_urls, names):\n print(f'Scrapping data of \"{name}\"')\n response = requests.get(url)\n # making soap of every url page getting from Description of Shops\n\n urlSoap = BeautifulSoup(response.text, 'html.parser')\n # print(urlSoap)\n if urlSoap is not None:\n\n # Finding Description of Restaurants\n\n description_div = urlSoap.find('div', class_=\"textbody\")\n\n if description_div is not None:\n\n description = description_div.text.replace(\"\\n\", \" \").replace(\" \", \"\").replace('\\u00a0',\n \" \").replace('\\r',\n \"\").replace(\n \"\\\"\", \"\")\n\n shop_description_list.append(description)\n else:\n shop_description_list.append(\"Not available\")\n else:\n\n shop_description_list.append(\"Not Available\")\n\n self.DATA_DICT['Description'] = shop_description_list\n\n def getShopData(self):\n\n soap = self.getShopSoap()\n\n shop_urls_list = []\n shop_name_list = []\n shop_address_list = []\n shop_number_list = []\n # Shop URLs\n if soap is not None:\n list_of_restaurants = soap.findAll('div', class_=\"storename\")\n for index, shopurl in enumerate(list_of_restaurants):\n url = shopurl.find('a')\n if url is not None:\n shop_urls_list.append(self.BaseURL + url['href'])\n # print(f\"{index} Scrapping Details FOR {self.BaseURL + url['href']}\")\n else:\n shop_urls_list.append(\"Not Available\")\n print(\"Info Not Available\")\n # Shop Names\n shop_name = shopurl.text.replace('\\n', \"\")\n if shop_name is not None:\n shop_name_list.append(shop_name)\n else:\n shop_name_list.append(\"Not Available\")\n # print(len(shop_name_list))\n # Shop Address\n list_of_shop_address = soap.findAll('div', class_=\"col findus\")\n for shopAddress in list_of_shop_address:\n if shopAddress is not None:\n shop_address_list.append(shopAddress.findNext('div', class_='info').text)\n else:\n shop_address_list.append(\"Not Available\")\n list_of_shop_number = soap.findAll('div', class_='col callus')\n for shopNumbar in list_of_shop_number:\n if shopNumbar is not None:\n shop_number_list.append(shopNumbar.findNext('div', class_=\"info\").text)\n else:\n shop_number_list.append(\"Not Available\")\n else:\n shop_address_list.append(\"Not Available\")\n shop_urls_list.append(\"Not Available\")\n shop_name_list.append(\"Not Available\")\n\n self.DATA_DICT['Shop Name'] = shop_name_list\n self.DATA_DICT['ShopAddress'] = shop_address_list\n self.DATA_DICT['Phone Number'] = shop_number_list\n self.DATA_DICT['Shop URL'] = shop_urls_list\n return shop_urls_list, shop_name_list\n\n def getShopSoap(self):\n print(\"Scrapping Process start\")\n chrome_options = Options()\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36\")\n chrome_options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=chrome_options)\n driver.get(self.URL)\n print(\"Loading....\")\n print(\"Loading website All data\")\n while True:\n driver.implicitly_wait(10)\n try:\n driver.find_element(By.XPATH, \"/html/body/div[2]/section/a\").click()\n except:\n break\n driver.implicitly_wait(10)\n sleep(5)\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n driver.close()\n return soup\n","repo_name":"Mask02/Data_Graber_For_AWS_Remote_Server","sub_path":"Data_Graber(Beautiful Soap)/NORTH_POINT_CITY/NORTH_POINT_CITY.py","file_name":"NORTH_POINT_CITY.py","file_ext":"py","file_size_in_byte":6671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17203036989","text":"import sys\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nfrom params import get_scale_params\n\n\n# python main.py Москва, ул. Ак. Королева, 12\ntoponym_to_find = \" \".join(sys.argv[1:])\n\ngeocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n\ngeocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": toponym_to_find,\n \"format\": \"json\"}\n\nresponse = requests.get(geocoder_api_server, params=geocoder_params)\n\nif not response:\n print('ERROR')\n pass\n\n# Преобразуем ответ в json-объект\njson_response = response.json()\n\n# Собираем параметры для запроса к StaticMapsAPI:\nmap_params = {\n \"ll\": ','.join([get_scale_params(json_response)['longitude'], get_scale_params(json_response)['latitude']]),\n \"spn\": get_scale_params(json_response)[\"spn\"],\n \"l\": \"map\",\n \"pt\": f\"{','.join([get_scale_params(json_response)['longitude'], get_scale_params(json_response)['latitude']])},pm2rdm1\"\n}\n\nmap_api_server = \"http://static-maps.yandex.ru/1.x/\"\nresponse = requests.get(map_api_server, params=map_params)\n\nImage.open(BytesIO(\n response.content)).show()\n","repo_name":"HotCucumber1/Full-search","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1756642731","text":"#-*- coding:utf-8 -*-\r\n\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import absolute_import\r\n\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nfrom torch.multiprocessing import Pool\r\nfrom utils.nms_wrapper import nms\r\nfrom utils.timer import Timer\r\nfrom configs.CC import Config\r\nimport argparse\r\nfrom layers.functions import Detect, PriorBox\r\nfrom peleenet import build_net\r\nfrom data import BaseTransform, VOC_CLASSES\r\nfrom utils.core import *\r\nfrom utils.pycocotools.coco import COCO\r\n\r\n\r\nparser = argparse.ArgumentParser(description='Pelee Testing')\r\nparser.add_argument('-c', '--config', default='configs/Pelee_VOC.py')\r\nparser.add_argument('-d', '--dataset', default='VOC',\r\n help='VOC or COCO dataset')\r\nparser.add_argument('-m', '--trained_model', default=None,\r\n type=str, help='Trained state_dict file path to open')\r\nparser.add_argument('-t', '--thresh', default=0.5, type=float,\r\n help='visidutation threshold')\r\nparser.add_argument('--show', action='store_true',\r\n help='Whether to display the images')\r\nargs = parser.parse_args()\r\n\r\nprint_info(' ----------------------------------------------------------------------\\n'\r\n '| Pelee Demo Program |\\n'\r\n ' ----------------------------------------------------------------------', ['yellow', 'bold'])\r\n\r\nglobal cfg\r\ncfg = Config.fromfile(args.config)\r\nanchor_config = anchors(cfg.model)\r\nprint_info('The Anchor info: \\n{}'.format(anchor_config))\r\npriorbox = PriorBox(anchor_config)\r\nnet = build_net('test', cfg.model.input_size, cfg.model)\r\ninit_net(net, cfg, args.trained_model)\r\nprint_info('===> Finished constructing and loading model', ['yellow', 'bold'])\r\nnet.eval()\r\n\r\nnum_classes = cfg.model.num_classes\r\n\r\nimgs_path_dict = {'VOC': 'imgs/VOC', 'COCO': 'imgs/COCO'}\r\nim_path = imgs_path_dict[args.dataset]\r\n\r\nimgs_result_path = os.path.join(im_path, 'im_res')\r\nif not os.path.exists(imgs_result_path):\r\n os.makedirs(imgs_result_path)\r\n\r\nwith torch.no_grad():\r\n priors = priorbox.forward()\r\n if cfg.test_cfg.cuda:\r\n net = net.cuda()\r\n priors = priors.cuda()\r\n cudnn.benchmark = True\r\n else:\r\n net = net.cpu()\r\n_preprocess = BaseTransform(\r\n cfg.model.input_size, cfg.model.rgb_means, (2, 0, 1))\r\ndetector = Detect(num_classes,\r\n cfg.loss.bkg_label, anchor_config)\r\n\r\n\r\ndef _to_color(indx, base):\r\n \"\"\" return (b, r, g) tuple\"\"\"\r\n base2 = base * base\r\n b = 2 - indx / base2\r\n r = 2 - (indx % base2) / base\r\n g = 2 - (indx % base2) % base\r\n return b * 127, r * 127, g * 127\r\nbase = int(np.ceil(pow(num_classes, 1. / 3)))\r\ncolors = [_to_color(x, base)\r\n for x in range(num_classes)]\r\ncats = [_.strip().split(',')[-1]\r\n for _ in open('data/coco_labels.txt', 'r').readlines()]\r\nlabel_config = {'VOC': VOC_CLASSES, 'COCO': tuple(['__background__'] + cats)}\r\nlabels = label_config[args.dataset]\r\n\r\n\r\ndef draw_detection(im, bboxes, scores, cls_inds, fps, thr=0.2):\r\n imgcv = np.copy(im)\r\n h, w, _ = imgcv.shape\r\n for i, box in enumerate(bboxes):\r\n if scores[i] < thr:\r\n continue\r\n cls_indx = int(cls_inds[i])\r\n box = [int(_) for _ in box]\r\n thick = int((h + w) / 300)\r\n cv2.rectangle(imgcv,\r\n (box[0], box[1]), (box[2], box[3]),\r\n colors[cls_indx], thick)\r\n mess = '%s: %.3f' % (labels[cls_indx], scores[i])\r\n cv2.putText(imgcv, mess, (box[0], box[1] - 7),\r\n 0, 1e-3 * h, colors[cls_indx], thick // 3)\r\n if fps >= 0:\r\n cv2.putText(imgcv, '%.2f' % fps + ' fps', (w - 160, h - 15),\r\n 0, 2e-3 * h, (255, 255, 255), thick // 2)\r\n\r\n return imgcv\r\n\r\nim_fnames = sorted((fname for fname in os.listdir(im_path)\r\n if os.path.splitext(fname)[-1] == '.jpg'))\r\nim_fnames = (os.path.join(im_path, fname) for fname in im_fnames)\r\nim_iter = iter(im_fnames)\r\n\r\nfor fname in im_fnames:\r\n image = cv2.imread(fname, cv2.IMREAD_COLOR)\r\n loop_start = time.time()\r\n w, h = image.shape[1], image.shape[0]\r\n img = _preprocess(image).unsqueeze(0)\r\n if cfg.test_cfg.cuda:\r\n img = img.cuda()\r\n scale = torch.Tensor([w, h, w, h])\r\n out = net(img)\r\n boxes, scores = detector.forward(out, priors)\r\n boxes = (boxes[0] * scale).cpu().numpy()\r\n scores = scores[0].cpu().numpy()\r\n allboxes = []\r\n for j in range(1, num_classes):\r\n inds = np.where(scores[:, j] > cfg.test_cfg.score_threshold)[0]\r\n if len(inds) == 0:\r\n continue\r\n c_bboxes = boxes[inds]\r\n c_scores = scores[inds, j]\r\n c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(\r\n np.float32, copy=False)\r\n soft_nms = cfg.test_cfg.soft_nms\r\n # min_thresh, device_id=0 if cfg.test_cfg.cuda else None)\r\n keep = nms(c_dets, cfg.test_cfg.iou, force_cpu=soft_nms)\r\n keep = keep[:cfg.test_cfg.keep_per_class]\r\n c_dets = c_dets[keep, :]\r\n allboxes.extend([_.tolist() + [j] for _ in c_dets])\r\n\r\n loop_time = time.time() - loop_start\r\n allboxes = np.array(allboxes)\r\n boxes = allboxes[:, :4]\r\n scores = allboxes[:, 4]\r\n cls_inds = allboxes[:, 5]\r\n im2show = draw_detection(image, boxes, scores, cls_inds, -1, args.thresh)\r\n if im2show.shape[0] > 1100:\r\n im2show = cv2.resize(im2show,\r\n (int(1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))\r\n if args.show:\r\n cv2.imshow('test', im2show)\r\n cv2.waitKey(2000)\r\n\r\n filename = os.path.join(imgs_result_path, '{}_stdn.jpg'.format(\r\n os.path.basename(fname).split('.')[0]))\r\n cv2.imwrite(filename, im2show)\r\n","repo_name":"Tessellate-Imaging/Monk_Object_Detection","sub_path":"15_pytorch_peleenet/lib/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":608,"dataset":"github-code","pt":"16"} +{"seq_id":"35220121586","text":"import os\nimport sys\n\n# dont do this in production code, this is bad practice it would seem, only for tests\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + \"/../../servicemanager\"))\n\nfrom servicemanager.actions import actions\nfrom servicemanager.serviceresolver import ServiceResolver\nfrom servicemanager.smcontext import SmApplication, SmContext\n\nimport time\nimport shutil\nimport unittest\nimport subprocess\n\n\nclass TestBase(unittest.TestCase):\n\n config_dir_override = os.path.join(os.path.dirname(__file__), \"../conf\")\n default_time_out = 10\n\n def setUp(self):\n self.set_up_and_clean_workspace()\n self.setup_local_git()\n self.bintrayContext = None\n self.artifactoryContext = None\n self.nexusContext = None\n\n def tearDown(self):\n self.stopFakeBintray()\n self.stopFakeArtifactory()\n self.stopFakeNexus()\n self.tear_down_local_git()\n\n def set_up_and_clean_workspace(self):\n workspace_dir = os.path.join(os.path.dirname(__file__), \"workspace\")\n if os.path.exists(workspace_dir):\n shutil.rmtree(workspace_dir)\n os.mkdir(workspace_dir)\n os.environ[\"WORKSPACE\"] = workspace_dir\n os.chdir(workspace_dir)\n\n def setup_local_git(self):\n workspace_dir = os.path.join(os.path.dirname(__file__), \"workspace\")\n testapp_dir = os.path.join(os.path.dirname(__file__), \"../testapps/basicplayapp\")\n os.makedirs(os.path.join(workspace_dir, \"git\"))\n shutil.copytree(testapp_dir, os.path.join(workspace_dir, \"git\", \"basicplayapp\"))\n os.chdir(os.path.join(workspace_dir, \"git\", \"basicplayapp\"))\n command = \"git init && git add . && git commit -m 'test'\"\n ps_command = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, universal_newlines=True)\n ps_command.communicate()\n os.chdir(workspace_dir)\n\n def tear_down_local_git(self):\n workspace_dir = os.path.join(os.path.dirname(__file__), \"workspace\")\n git_dir = os.path.join(workspace_dir, \"git\", \"basicplayapp\")\n if os.path.exists(git_dir):\n shutil.rmtree(git_dir)\n\n def createContext(self):\n return SmContext(SmApplication(self.config_dir_override), None, False, False)\n\n def start_service_and_wait(self, context, servicetostart):\n sm_application = SmApplication(self.config_dir_override)\n service_resolver = ServiceResolver(sm_application)\n actions.start_and_wait(\n service_resolver,\n context,\n [servicetostart],\n source=False,\n fatjar=True,\n release=False,\n proxy=None,\n port=None,\n seconds_to_wait=5,\n append_args=None,\n )\n\n def startFakeBintray(self):\n self.bintrayContext = self.createContext()\n self.start_service_and_wait(self.bintrayContext, \"FAKE_BINTRAY\")\n self.assertIsNotNone(self.bintrayContext.get_service(\"FAKE_BINTRAY\").status())\n\n def startFakeArtifactory(self):\n self.artifactoryContext = self.createContext()\n self.start_service_and_wait(self.artifactoryContext, \"FAKE_ARTIFACTORY\")\n self.assertIsNotNone(self.artifactoryContext.get_service(\"FAKE_ARTIFACTORY\").status())\n\n def startFakeNexus(self):\n self.nexusContext = self.createContext()\n self.start_service_and_wait(self.nexusContext, \"FAKE_NEXUS\")\n self.assertIsNotNone(self.nexusContext.get_service(\"FAKE_NEXUS\").status())\n\n def stopFakeNexus(self):\n if self.nexusContext is not None:\n self.nexusContext.kill(\"FAKE_NEXUS\", True)\n self.assertEqual(self.nexusContext.get_service(\"FAKE_NEXUS\").status(), [])\n\n def stopFakeBintray(self):\n if self.bintrayContext is not None:\n self.bintrayContext.kill(\"FAKE_BINTRAY\", True)\n self.assertEqual(self.bintrayContext.get_service(\"FAKE_BINTRAY\").status(), [])\n\n def stopFakeArtifactory(self):\n if self.artifactoryContext is not None:\n self.artifactoryContext.kill(\"FAKE_ARTIFACTORY\", True)\n self.assertEqual(self.artifactoryContext.get_service(\"FAKE_ARTIFACTORY\").status(), [])\n\n def waitForCondition(self, f, expected, time_out_secs=default_time_out):\n dead_line = time.time() + time_out_secs\n value = None\n while time.time() < dead_line:\n value = f()\n if value == expected:\n return\n time.sleep(0.1)\n\n command = \"ps -eo ppid,pid,etime,rss,args\"\n ps_command = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, universal_newlines=True)\n stdout, _ = ps_command.communicate()\n print(stdout)\n\n self.assertEqual(value, expected)\n","repo_name":"hmrc/service-manager","sub_path":"test/it/testbase.py","file_name":"testbase.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"16"} +{"seq_id":"72353348167","text":"# Create a function that will take unlimited arguments and should add all the\n# arguments which are passed.\n\ndef sum(*args):\n Sum = 0\n lst1 = []\n for arg in args:\n lst1.append(arg)\n Sum += arg\n print(\"Result: \", Sum)\n\n\nlst = list(map(int, input(\"Please enter the arguments to be taken: \").split()))\nsum(*tuple(lst))\n","repo_name":"zeciljain8197/SBS_Python_Postgres_Training_Repo","sub_path":"python_ex1/Que18.py","file_name":"Que18.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17660939545","text":"from logging import Logger\n\nfrom botocore.exceptions import ClientError, NoCredentialsError\n\n\ndef boto_client_error(logger: Logger, message: str = \"\"):\n def decorator(func):\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ClientError as error:\n if error.response['Error']['Code'] == 'InternalError': # Generic error\n # We grab the message, request ID, and HTTP code to give to customer support\n logger.error('Error Message: {}'.format(error.response['Error']['Message']))\n logger.error('Request ID: {}'.format(error.response['ResponseMetadata']['RequestId']))\n logger.error('Http code: {}'.format(error.response['ResponseMetadata']['HTTPStatusCode']))\n else:\n logger.error(f\"boto3 clientError raised in function {func.__name__}\" + repr(error) + message)\n raise\n except NoCredentialsError as error:\n logger.error(f\"boto3 NoCredentialsError raised in function {func.__name__}: {repr(error)}\"\n f\"Check if the IAM role has the right permission or if you need to increase IMDS retry.\")\n raise\n\n return wrapper\n return decorator\n","repo_name":"awslabs/aws-glue-libs","sub_path":"awsglue/scripts/connector_activation_util.py","file_name":"connector_activation_util.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":592,"dataset":"github-code","pt":"16"} +{"seq_id":"32805278993","text":"from aiogram.types import ReplyKeyboardRemove, \\\n ReplyKeyboardMarkup, KeyboardButton, \\\n InlineKeyboardMarkup, InlineKeyboardButton\n\nfrom models import Database\ndb = Database()\n\nmain_menu = ReplyKeyboardMarkup(resize_keyboard = True)\nmain_menu.add(\n KeyboardButton(\"🛍 Каталог\"),\n KeyboardButton(\"🛒 Корзина\")\n )\nmain_menu.add(\n KeyboardButton('👥Партнёрская программа'),\n KeyboardButton(\"💼 Личный кабинет\")\n )\nmain_menu.add(\n KeyboardButton('🏪 О магазине'),\n KeyboardButton(\"📨 Техподдержка\")\n )\nadmin_main_menu = ReplyKeyboardMarkup(resize_keyboard = True)\nadmin_main_menu.add(\n KeyboardButton(\"🛍 Каталог\"),\n KeyboardButton(\"🛒 Корзина\")\n )\nadmin_main_menu.add(\n KeyboardButton('👥Партнёрская программа'),\n KeyboardButton(\"💼 Личный кабинет\")\n )\nadmin_main_menu.add(\n KeyboardButton('🏪 О магазине'),\n KeyboardButton(\"📨 Техподдержка\")\n )\nadmin_main_menu.add(\n KeyboardButton('🔐Административная панель')\n )\nadmin_menu = ReplyKeyboardMarkup(resize_keyboard = True)\nadmin_menu.add(\n KeyboardButton('📩Рассылка'),\n KeyboardButton('📊Статистика')\n \n )\n \nadmin_menu.add(\n KeyboardButton('ℹ️Пробить по базе'),\n KeyboardButton('👤Доб. Админа')\n ) \n \nadmin_menu.add(\n KeyboardButton('🛒 История покупок'),\n KeyboardButton('🖋Написать юзеру')\n )\nadmin_menu.add(\n \"◀️Назад\"\n ) \nall_categories_menu = InlineKeyboardMarkup()\nall_categories_menu.add(\n InlineKeyboardButton(\n text = '👕Футболки',\n callback_data = \"category_t-shirts_1\"\n ),\n InlineKeyboardButton(\n text = '🧢 Кепки',\n callback_data = \"category_caps_1\"\n ), \n )\nall_categories_menu.add(\n InlineKeyboardButton(\n text = '👖Джинсы',\n callback_data = \"category_jeans_1\"\n ),\n InlineKeyboardButton(\n text = '👟Кросовки',\n callback_data = \"category_sneakers_1\"\n ), \n )\n \nall_categories_menu.add(\n InlineKeyboardButton(\n text = '🧦Носки',\n callback_data = \"category_socks_1\"\n )\n ) \n \nasync def get_item_navigation_menu(category_name,item_name, pos):\n menu = InlineKeyboardMarkup()\n back_pos = pos - 1\n if pos == 1:\n back_pos = 5\n next_pos = pos+1 \n if pos == 5:\n next_pos = 1\n menu.add(\n InlineKeyboardButton(\n text = \"◀️\" ,\n callback_data = f\"category_{category_name}_{back_pos}\"\n ),\n InlineKeyboardButton(\n text = \"▶️\" ,\n callback_data = f\"category_{category_name}_{next_pos}\"\n ) \n )\n menu.add(\n InlineKeyboardButton(\n text = '🛒 Добавить в корзину',\n callback_data = f'add_to_cart_{item_name}'\n )\n )\n return menu\n\nchoose_sex_menu = InlineKeyboardMarkup()\nchoose_sex_menu.add(\n InlineKeyboardButton(\n text = 'Мужской',\n callback_data = 'sex_Мужской'\n ),\n InlineKeyboardButton(\n text = 'Женский',\n callback_data = 'sex_Женский'\n ) \n )\n \nchoose_age_menu = InlineKeyboardMarkup()\nchoose_age_menu.add(\n InlineKeyboardButton(\n text = 'до 20',\n callback_data = 'age_до 20'\n ),\n InlineKeyboardButton(\n text = '20-30',\n callback_data = 'age_30-20'\n ),\n InlineKeyboardButton(\n text = 'более 30',\n callback_data = 'age_более 30'\n ) \n ) \nasync def choose_color_menu(choosen_color = []): \n menu = InlineKeyboardMarkup(row_width = 4)\n colors = [['⚫️', 'Черный'],['🔴', 'Красный'],['⚪️', 'Белый'],['🟢', 'Зеленый']]\n finaly_menu = []\n for color in colors:\n if color[1] in choosen_color:\n finaly_menu.append(\n InlineKeyboardButton(\n text = '☑️',\n callback_data = 'pass'\n ) \n )\n else:\n finaly_menu.append(\n InlineKeyboardButton(\n text = color[0],\n callback_data = f'color_{color[1]}'\n )\n )\n menu.add(\n *finaly_menu\n )\n menu.add(\n InlineKeyboardButton(\n text = '✅ Подтвердить',\n callback_data = 'set_colors'\n )\n )\n return menu\n\nasync def basket_navigation_menu(item_name, pos, id):\n last_pos = await db.get_last_position_bascket(id)\n sum = await db.get_sum_cost_bascket(id)\n next_pos = pos+1\n back_pos = pos-1\n print(last_pos)\n if pos == last_pos:\n next_pos = 1\n elif pos <=1:\n \n back_pos = last_pos\n menu = InlineKeyboardMarkup()\n menu.add(\n InlineKeyboardButton(\n text = '🔺',\n callback_data = f'add_qty_item_{item_name}_{pos}'\n ),\n InlineKeyboardButton(\n text = '❌',\n callback_data = f'delete_from_bascket_{item_name}'\n ),\n InlineKeyboardButton(\n text = '🔻',\n callback_data = f'take_away_qty_item_{item_name}_{pos}'\n ), \n \n )\n menu.add(\n InlineKeyboardButton(\n text = '◀️',\n callback_data = f'bascket_item_pos_{back_pos}'\n ),\n InlineKeyboardButton(\n text = '1/2',\n callback_data = 'test'\n ),\n InlineKeyboardButton(\n text = '▶️',\n callback_data = f'bascket_item_pos_{next_pos}'\n ), \n ) \n menu.add(\n InlineKeyboardButton(\n text =f'✅ Оформить заказ нa {sum}?',\n callback_data = 'order'\n )\n )\n menu.add(\n InlineKeyboardButton(\n text ='🛍 Проложить покупки',\n callback_data = 'catalog'\n )\n )\n return menu\n\n\ncalncel = ReplyKeyboardMarkup(resize_keyboard = True)\ncalncel.add(\n KeyboardButton('🚫 Отмена')\n \n )\n \nget_lacation_menu = ReplyKeyboardMarkup(resize_keyboard = True)\nget_lacation_menu.add(\n KeyboardButton('📍 Отправить геолокация',request_location = True)\n \n )\nget_lacation_menu.add(\n KeyboardButton('🚫 Отмена')\n \n )\n \nadd_buttons = ReplyKeyboardMarkup(resize_keyboard = True)\nadd_buttons.add(\n KeyboardButton('📤 Отправить сообщение'),\n)\nadd_buttons.add(\n KeyboardButton('☑️ Добвить инлайн кнопки'),\n )\n\nadd_buttons.add(\n KeyboardButton('🚫 Отмена')\n )\n \n \nasync def create_markup(markpus):\n menu = InlineKeyboardMarkup()\n for markpup in markpus:\n \n ii = [markpup.split(' | ')]\n num = 0\n for i in ii:\n if len(i) > 1:\n markup1 = i[0].split(' - ')\n markup2 = i[1].split(' - ')\n menu.add(\n InlineKeyboardButton(\n text = markup1[0],\n url = markup1[1]\n ),\n InlineKeyboardButton(\n text = markup2[0],\n url = markup2[1]\n )\n )\n else:\n markup1 = i[0].split(' - ')\n \n menu.add(\n InlineKeyboardButton(\n text = markup1[0],\n url = markup1[1]\n ),\n )\n \n \n \n return menu\ninline_back_button = InlineKeyboardMarkup()\ninline_back_button.add(\n InlineKeyboardButton(\n text = '🔙 Назад',\n callback_data = 'back'\n )\n \n )\n \n\nget_actions = ReplyKeyboardMarkup(resize_keyboard = True)\n\n\nget_actions.add(\n KeyboardButton('✅ Подтверждаю')\n )\n\nget_actions.add(\n KeyboardButton('🚫 Отмена')\n ) \n \ncancel = ReplyKeyboardMarkup(resize_keyboard = True)\ncancel.add(\n KeyboardButton('🚫 ��тмена')\n ) \n \n \nconfim_order = ReplyKeyboardMarkup(resize_keyboard = True)\nconfim_order.add('✅ Подтверждаю')\n\n","repo_name":"ceoaleksandr/test-shop-bot","sub_path":"markup.py","file_name":"markup.py","file_ext":"py","file_size_in_byte":9239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12165425470","text":"#!/usr/bin/env python3\n\nimport urllib.request\nimport urllib.error\n\n\ndef get_link(ip, port, path):\n return \"http://{}:{}/{}\".format(ip, port, path)\n\n\ndef make_get_request(ip, port, path):\n contents = ''\n try:\n contents = urllib.request.urlopen(get_link(ip, port, path)).read()\n except urllib.error.HTTPError as e:\n print(\"Error during GET request:\", e)\n finally:\n return contents\n\n\ndef make_post_request(ip, port, path, data):\n contents = ''\n try:\n contents = urllib.request.urlopen(get_link(ip, port, path),\n data=data).read()\n except urllib.error.HTTPError as e:\n print(\"Error during POST request:\", e)\n finally:\n return contents\n","repo_name":"sakshamsharma/distributed-control-panel","sub_path":"dcp/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16982685730","text":"import json\nfrom abc import ABC, abstractmethod\nfrom datetime import timedelta\nfrom typing import Optional, Union, Dict, Tuple, List, cast\n\n\nclass BaseStorageClient(ABC):\n \"\"\"\n The base class for storage clients with operations to list, read, write files and generate signed urls.\n \"\"\"\n\n _GZIP_MAGIC_NUMBER = (\n b\"\\x1f\\x8b\" # Hex signature used to identify a gzip compressed files\n )\n\n class GenericError(Exception):\n pass\n\n class PermissionsError(GenericError):\n pass\n\n class NotFoundError(GenericError):\n pass\n\n def __init__(self, prefix: Optional[str] = None):\n self._prefix = prefix or \"\"\n if self._prefix and not self._prefix.endswith(\"/\"):\n self._prefix = f\"{self._prefix}/\"\n\n @property\n @abstractmethod\n def bucket_name(self) -> str:\n \"\"\"\n Returns the bucket name referenced by this client\n \"\"\"\n pass\n\n @abstractmethod\n def write(self, key: str, obj_to_write: Union[bytes, str]) -> None:\n \"\"\"\n Writes a file in the given key, contents are included as bytes or string.\n :param key: path to the file, for example /dir/name.ext\n :param obj_to_write: contents for the file, specified as a bytes array or string\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def read(\n self,\n key: str,\n decompress: Optional[bool] = False,\n encoding: Optional[str] = None,\n ) -> Union[bytes, str]:\n \"\"\"\n Returns the contents of the specified file.\n :param key: path to the file, for example /dir/name.ext\n :param decompress: flag indicating if `gzip` contents should be decompressed automatically\n :param encoding: if set binary content will be decoded using this encoding and a string will be returned\n :return: a bytes object, unless encoding is set, in which case it returns a string.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def delete(self, key: str) -> None:\n \"\"\"\n Deletes the file at `key`\n :param key: path to the file, for example /dir/name.ext\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def download_file(self, key: str, download_path: str) -> None:\n \"\"\"\n Downloads the file at `key` to the local file indicated by `download_path`.\n :param key: path to the file, for example /dir/name.ext\n :param download_path: local path to the file where the contents of `key` will be stored.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def upload_file(self, key: str, local_file_path: str) -> None:\n \"\"\"\n Uploads the file at `local_file_path` to `key` in the associated bucket.\n :param key: path to the file, for example /dir/name.ext\n :param local_file_path: local path to the file to upload.\n \"\"\"\n raise NotImplementedError()\n\n def read_json(self, key: str) -> Dict:\n \"\"\"\n Returns the contents as a dictionary of the JSON file at `key`.\n :param key: path to the file, for example /dir/name.ext\n :return: a Dictionary loaded from the JSON document.\n \"\"\"\n data = self.read(key)\n return json.loads(data.decode(\"utf-8\") if isinstance(data, bytes) else data)\n\n @abstractmethod\n def read_many_json(self, prefix: str) -> Dict:\n \"\"\"\n Reads all JSON files under `prefix` and returns a dictionary where the key is the file path and the value\n is the dictionary loaded from the JSON file.\n :param prefix: Prefix for the files to load, for example: `/dir/`\n :return: a dictionary where the key is the file path and the value is the dictionary loaded from the JSON file.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def managed_download(self, key: str, download_path: str):\n \"\"\"\n Performs a managed transfer that might be multipart, downloads the file at `key` to the local file at\n `download_path`.\n :param key: path to the file, for example /dir/name.ext\n :param download_path: local path to the file where the contents of `key` will be stored.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def list_objects(\n self,\n prefix: Optional[str] = None,\n batch_size: Optional[int] = None,\n continuation_token: Optional[str] = None,\n delimiter: Optional[str] = None,\n *args, # type: ignore\n **kwargs, # type: ignore\n ) -> Tuple[Union[List, None], Union[str, None]]:\n \"\"\"\n List objects (files and folder) under the specified prefix.\n Delimiter is set to \"/\" to return sub-folders, this works for all storage providers as it works for S3.\n Documentation about delimiter in S3 requests available here:\n https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax\n Prefix can be used to return contents of folders.\n :param prefix: Prefix to use for listing, it can be used to list folders, for example: `prefix=/dir/`\n :param batch_size: Used to page the result\n :param continuation_token: Used to page the result, the second value in the resulting tuple is the continuation\n token for the next call.\n :param delimiter: Set to \"/\" to return sub-folders, when set the result will include the list of prefixes\n returned by the storage provider instead of metadata for the objects.\n :return: A tuple with the result list and the continuation token. The result list includes the following\n attributes (when no delimiter is set): ETag, Key, Size, LastModified, StorageClass. If delimiter is\n specified only Prefix is included in the result for each listed folder.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def generate_presigned_url(self, key: str, expiration: timedelta) -> str:\n \"\"\"\n Generates a pre-signed url for the given file with the specified expiration.\n :param key: path to the file, for example /dir/name.ext\n :param expiration: time for the generated link to expire, expressed as a timedelta object.\n :return: a pre-signed url to access the specified file.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def is_bucket_private(self) -> bool:\n \"\"\"\n Checks if the bucket is configured with public access disabled.\n\n :return: True if public access is disabled for the bucket and False if the bucket is publicly available.\n \"\"\"\n raise NotImplementedError()\n\n def _is_gzip(self, content: bytes) -> bool:\n return content[:2] == self._GZIP_MAGIC_NUMBER\n\n def _apply_prefix(self, key: Optional[str]) -> Optional[str]:\n if not key:\n return self._prefix\n if self._prefix:\n return f\"{self._prefix}{key}\"\n else:\n return key\n\n def _remove_prefix(self, key: str) -> str:\n if self._prefix and key.startswith(self._prefix):\n return key[len(self._prefix) :]\n else:\n return key\n\n def _remove_prefix_from_prefixes(\n self, entries: Optional[List[Dict]]\n ) -> Optional[List[Dict]]:\n if not entries or not self._prefix:\n return entries\n return [\n {\"Prefix\": self._remove_prefix(cast(str, entry.get(\"Prefix\")))}\n for entry in entries\n ]\n\n def _remove_prefix_from_entries(\n self, entries: Optional[List[Dict]]\n ) -> Optional[List[Dict]]:\n if not entries or not self._prefix:\n return entries\n return [\n {**entry, \"Key\": self._remove_prefix(cast(str, entry.get(\"Key\")))}\n for entry in entries\n ]\n","repo_name":"monte-carlo-data/apollo-agent","sub_path":"apollo/integrations/storage/base_storage_client.py","file_name":"base_storage_client.py","file_ext":"py","file_size_in_byte":7839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33778230569","text":"class Solution:\n def minOperations(self, boxes: str) -> List[int]:\n posB = list()\n res = list()\n for i, element in enumerate(boxes):\n if element=='1':\n posB.append(i)\n \n for i in range(len(boxes)):\n count = 0\n for x in posB:\n count += abs(x-i) \n res.append(count)\n return res","repo_name":"bzlee-bio/coding-study","sub_path":"modulabs/week4/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22352194640","text":"from selenium.common import StaleElementReferenceException\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass wait_for_value_to_start_with(object):\n def __init__(self, locator, text_):\n self.locator = locator\n self.text = text_\n\n def __call__(self, driver):\n try:\n element_text = driver.find_element(*self.locator).get_attribute('value')\n return element_text.startswith(self.text)\n except StaleElementReferenceException:\n return False\n\n\nplaylist = \"\"\nprint(\"Keep in mind that the playlist must be public.\")\nwhile True:\n print(\"Enter the link of the playlist you want to download: \")\n playlist = str(input())\n if not playlist.startswith(\"https://www.deezer.com/\") or \"/playlist/\" not in playlist:\n print(\"You entered the wrong link\")\n continue\n else:\n break\nopen('links_file.txt', 'w').close()\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\ndriver.get(playlist)\nlistoflinks = []\nprint(\"Make sure you expand your browser window!\")\nprint(\"Links to every track from playlist: \")\ntry:\n try:\n refuse_button = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"gdpr-btn-refuse-all\"))\n )\n refuse_button.click()\n except:\n print(\"Refuse button didn't pop up!\")\n numberoftracks = driver.find_element(By.CSS_SELECTOR, \"#page_naboo_playlist > div.catalog-content > div > div._5BJsj > div > div._2yyo6 > ul > li:nth-child(1)\").text\n numberoftracks = numberoftracks[0:len(numberoftracks)-7]\n numberoftracks = int(numberoftracks)\n for x in range(1, numberoftracks + 1):\n try:\n button = driver.find_element(By.CSS_SELECTOR,\n \"div._2OACy[aria-rowindex=\\'\" + str(x) + \"\\'] .popper-wrapper button\")\n button.click()\n div = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"_2ZkBf\"))\n )\n share_button = div.find_element(By.XPATH, \"//*[contains(text(), 'Share')]\")\n share_button.click()\n except Exception as e:\n print(e)\n driver.execute_script(\n \"document.querySelector(\\\"div._2OACy[aria-rowindex=\\'\" + str(x) + \"\\']\\\").scrollIntoView()\")\n continue\n WebDriverWait(driver, 10).until(wait_for_value_to_start_with((By.CSS_SELECTOR,\n \"#modal_sharebox > div.modal-body > div.share-content.share-infos > div.share-thumbnail-infos > div.share-action > div > div.control-input > input\"),\n \"https:\"))\n input = driver.find_element(By.CSS_SELECTOR,\n \"#modal_sharebox > div.modal-body > div.share-content.share-infos > div.share-thumbnail-infos > div.share-action > div > div.control-input > input\")\n link = input.get_attribute('value')\n listoflinks.append(link)\n print(str(x) + \". track: \" + link)\n close_button = driver.find_element(By.ID, \"modal-close\")\n close_button.click()\n driver.execute_script(\n \"document.querySelector(\\\"div._2OACy[aria-rowindex=\\'\" + str(x) + \"\\']\\\").scrollIntoView()\")\nexcept Exception as e:\n print(e)\n driver.quit()\ndriver.quit()\nwith open(\"links_file.txt\", \"w\") as f:\n for element in listoflinks:\n f.write(str(element) + \"\\n\")\nprint(\"Links to all \" + str(len(listoflinks)) + \" tracks are in the \\\"links_file.txt\\\"\")","repo_name":"djordjije11/DeezerPlaylistsDownload","sub_path":"deezer_main.py","file_name":"deezer_main.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15916140894","text":"def decor(fanc):\n def check(name):\n if not name.isalpha():\n print('Please enter valid data')\n else:\n fanc(name)\n return check\n\n\n@decor\ndef wish(name):\n print(f'Best of luck {name}')\n\n\nwish('John')\nwish('1234')\n\ndecorfanction = decor(wish)\ndecorfanction('Shubham')\ndecorfanction('1111')\n\n\n\n\n","repo_name":"shubhampaanchal/Python","sub_path":"Decorator/Decorator.py","file_name":"Decorator.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8181293379","text":"#!/usr/bin/env python3\n\"\"\"Load paths to sound files.\"\"\"\n\nimport os\nimport sys\n\n\ndef load_sound_paths():\n \"\"\"meh.\"\"\"\n sound_dir = os.path.join(os.path.dirname(__file__), 'sound_files')\n\n for f in os.listdir(sound_dir):\n name = os.path.splitext(os.path.basename(f))[0]\n path = os.path.join(sound_dir, f)\n setattr(sys.modules[__name__], name, path)\n\n\nload_sound_paths()\n","repo_name":"muppetjones/remgame","sub_path":"gamelib/sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7438361292","text":"#!/usr/bin/env python\n\nimport zmq\nfrom sys import argv\nfrom pprint import pprint\n\ndef main():\n context = zmq.Context()\n\n request = context.socket(zmq.REQ)\n request.connect('tcp://localhost:3333')\n\n # Statistics\n # cache_stats / config / all_services\n request.send_json({\n \t'version' : 1,\n 'action': 'all_services'\n })\n\n pprint(request.recv_json())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sguzwf/lsd","sub_path":"misc/stats_watcher.py","file_name":"stats_watcher.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72053587209","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\ndef doormat(r, c):\n for i in range(r//2):\n print((\".|.\"*(i*2+1)).center(c, \"-\"))\n print(\"WELCOME\".center(c, \"-\"))\n for i in range(r//2-1, -1, -1):\n print((\".|.\"*(i*2+1)).center(c, \"-\"))\n\n\ndef main():\n rows, columns = [int(s) for s in input().strip().split()]\n doormat(rows, columns)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"hideyukikanazawa/Challenges","sub_path":"hackerrank/python/10. printDoormat.py","file_name":"10. printDoormat.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7126732772","text":"import ee\nimport geemap\n\n# Create a map centered at (lat, lon).\nMap = geemap.Map(center=[40, -100], zoom=4)\n\n# Add some data to the Map\ndem = ee.Image(\"JAXA/ALOS/AW3D30_V1_1\").select('MED')\nMap.addLayer(dem, {'min': 0, 'max': 5000, 'palette': ['000000', 'ffffff'] }, 'DEM', True)\n\n# TEST Map.setCenter\nMap.setCenter(0, 28, 2.5)\n\n# Display the map.\nMap\n","repo_name":"giswqs/earthengine-py-examples","sub_path":"Gena/map_set_center.py","file_name":"map_set_center.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"16"} +{"seq_id":"31818073393","text":"from __future__ import annotations\n\nimport os\nimport sys\nimport pickle\nimport csv\nimport datetime as dt\nfrom secrets import token_hex\nfrom typing import List, Dict, Tuple, IO, Optional\n\nimport discord\nfrom discord.ext import commands\nfrom bidict import bidict\nfrom bidict._exc import *\n\nfrom comp3000bot import config\nfrom comp3000bot.singleton import Singleton\nfrom comp3000bot.logger import get_logger\nfrom comp3000bot.utils import (\n generate_file,\n generate_csv_file,\n get_text_channel,\n get_guild,\n get_role,\n get_text_channel_or_curr,\n)\n\nlogger = get_logger()\n\n\nclass StudentInformation:\n def __init__(self, name: str, number: int, email: str):\n self.name = name\n self.number = number\n self.email = email\n self.discord_name = None # type: str\n self.discord_id = None # type: int\n self.is_registered = False\n self.generate_new_secret()\n\n def __repr__(self):\n return f'Student(name={self.name})'\n\n def __hash__(self):\n return hash(self.number)\n\n def __eq__(self, other):\n return self.number == other.number\n\n def generate_new_secret(self):\n self.secret = token_hex(32)\n\n def register(self, member: discord.Member):\n self.discord_name = member.name\n self.discord_id = member.id\n self.is_registered = True\n\n def reset(self):\n self.discord_name = None\n self.discord_id = None\n self.is_registered = False\n\n @classmethod\n def csv_header(cls) -> Tuple[str, str, str, str, str]:\n return ('name', 'number', 'email', 'discord_name', 'secret')\n\n def csv_row(self) -> Tuple[str, int, str, str, str]:\n return (self.name, self.number, self.email, self.discord_name, self.secret)\n\n def to_csv_file(self) -> discord.File:\n return generate_csv_file(\n f'{self.name}_{self.number}.csv', self.csv_header(), [self.csv_row()]\n )\n\n\nclass Students(metaclass=Singleton):\n __FILE_NAME = os.path.join(config.STUDENTS_DIR, f'students_{config.GUILD_ID}.dat')\n\n def __init__(self):\n self.students = bidict({}) # type: bidict[str, StudentInformation]\n self.registered_students = bidict({}) # type: bidict[int, StudentInformation]\n\n @staticmethod\n def factory():\n \"\"\"\n Load students from disk or create an empty Students class.\n \"\"\"\n try:\n return Singleton._instances[Students]\n except Exception:\n pass\n try:\n Students._from_disk()\n except FileNotFoundError as e:\n logger.warn(f\"Unable to load students from disk: {repr(e)}\")\n return Students()\n\n @staticmethod\n def _from_disk() -> Students:\n \"\"\"\n Load Students from a file saved on disk. Don't call this directly. Use Students.factory() instead.\n \"\"\"\n fname = Students.__FILE_NAME\n logger.info(f'Loading {fname}...')\n with open(fname, 'rb') as f:\n obj = pickle.load(f)\n if not isinstance(obj, Students):\n raise TypeError(\"Unpicked object is not of type Students\")\n Singleton._instances[Students] = obj\n logger.info(f'Loaded {fname}')\n return obj\n\n def add_student(\n self, name: str, number: int, email: str, overwrite: bool = False\n ) -> StudentInformation:\n \"\"\"\n Add a new student to the collection of students.\n \"\"\"\n student = StudentInformation(name, number, email)\n if overwrite:\n self.students.forceput(student.secret, student)\n else:\n try:\n self.students[student.secret] = student\n except ValueDuplicationError:\n raise Exception(\n f'Refusing to update existing {repr(student)}. You may wish to set overwrite to True.'\n )\n return student\n\n def remove_student(self, number: int):\n secret = self.students.inverse[StudentInformation('', number, '')]\n try:\n _id = self.registered_students.inverse[StudentInformation('', number, '')]\n self.registered_students.pop(_id)\n except Exception:\n pass\n return self.students.pop(secret)\n\n def reset_student(self, number: int):\n secret = self.students.inverse[StudentInformation('', number, '')]\n student = self.students[secret]\n student.reset()\n return student\n\n def register_student(self, student: StudentInformation, member: discord.Member):\n student.register(member)\n self.registered_students[member.id] = student\n return student\n\n def student_by_secret(self, secret: str) -> StudentInformation:\n \"\"\"\n Get a student by their secret.\n \"\"\"\n try:\n return self.students[secret]\n except KeyError:\n raise KeyError(f'No student with secret {secret}') from None\n\n def student_by_member(self, member: discord.Member) -> StudentInformation:\n \"\"\"\n Get a student by their discord member.\n \"\"\"\n try:\n return self.registered_students[member.id]\n except KeyError:\n raise KeyError(f'No student associated with {member.name}') from None\n\n def to_csv_file(self) -> discord.File:\n return generate_csv_file(\n f'student_information.csv',\n StudentInformation.csv_header(),\n [student.csv_row() for student in self.students.values()],\n )\n\n def to_disk(self):\n \"\"\"\n Write Students to disk.\n \"\"\"\n fname = Students.__FILE_NAME\n logger.info(f'Saving {fname}...')\n with open(fname, 'wb+') as f:\n pickle.dump(self, f)\n\n async def populate_from_csv_file(\n self,\n ctx: commands.Context,\n fp: IO[str],\n has_header: bool,\n overwrite: bool = False,\n ) -> 'StudentInformation':\n \"\"\"\n Populate this student manager from an open CSV file.\n The format should be as follows:\n fname, lname, email, number\n \"\"\"\n reader = csv.reader(fp)\n failed_count = 0\n success_count = 0\n if has_header:\n reader = reader[1:]\n for fname, lname, email, number in reader:\n name = f'{fname} {lname}'\n try:\n self.add_student(name, int(number), email, overwrite)\n success_count += 1\n except Exception as e:\n logger.error(f'Unable to add student ({name}, {number})', exc_info=e)\n failed_count += 1\n if failed_count:\n await ctx.send(f'Failed to add {failed_count} students')\n else:\n await ctx.send(f'Added {success_count} students successfully')\n","repo_name":"willfindlay/comp3000bot","sub_path":"comp3000bot/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"48074749260","text":"import itertools\nfrom logging import getLogger\n\nimport matplotlib\nimport numpy as np\n\nmatplotlib.use('Agg')\nimport pylab as plt\n\nfrom omnium import Analyser\nfrom omnium.utils import get_cube\nfrom omnium.consts import L, cp, Lf\n\nlogger = getLogger('scaf.dump_entr')\n\nSCALARS = ['MSE', 'FMSE']\nCLD_OPTS = ['scaffold', 'scaffold_plus', 'scaffold_plus_strong', 'SS2012', 'swann_ud', 'swann_core']\nMETHODS = ['normal', 'acc']\n\nclass DumpEntr(Analyser):\n analysis_name = 'dump_entr'\n multi_expt = True\n\n input_dir = 'share/data/history/{expt}'\n input_filename_glob = '{input_dir}/atmosa_da480.nc'\n output_dir = 'omnium_output/{version_dir}/suite_{expts}'\n output_filenames = ['{output_dir}/atmos.dump_entr.dummy']\n\n def load(self):\n self.load_cubes()\n\n def run(self):\n pass\n\n def save(self, state, suite):\n with open(self.task.output_filenames[0], 'w') as f:\n f.write('done')\n\n def display_results(self):\n self._plot_entrainment()\n plt.close('all')\n\n def _plot_entrainment(self):\n opts = []\n for p in itertools.product(SCALARS, CLD_OPTS, METHODS):\n opt = {'SCALAR': p[0],\n 'CLD_DEF': p[1],\n 'METHOD': p[2]}\n opts.append(opt)\n\n for i, expt in enumerate(self.task.expts):\n da = self.expt_cubes[expt]\n\n theta_cube = get_cube(da, 0, 4)\n q_cube = get_cube(da, 0, 10)\n\n qcl_cube = get_cube(da, 0, 254)\n qcf_cube = get_cube(da, 0, 12)\n qcf2_cube = get_cube(da, 0, 271)\n qgr_cube = get_cube(da, 0, 273)\n w_cube = get_cube(da, 0, 150)\n\n z = theta_cube.coord('atmosphere_hybrid_height_coordinate').points\n\n # Calc MSE (h), FMSE (h2)\n theta = theta_cube.data\n q = q_cube.data\n qcf = qcf_cube.data\n qcf2 = qcf2_cube.data\n qgr = qgr_cube.data\n h = theta + L / cp * q\n h2 = theta + L / cp * q - Lf / cp * (qcf + qcf2 + qgr)\n\n w = w_cube.data\n qcl = qcl_cube.data\n qcf = qcf_cube.data\n qcf2 = qcf2_cube.data\n theta = theta_cube.data\n\n for opt in opts:\n logger.debug('entr for {}: {}', expt, opt)\n if opt['CLD_DEF'] == 'SS2012':\n # Using def from Stirling and Stratton 2012:\n # qcl or qcf (or qcf2) > 1e-5 kg/kg\n # w > 0\n # +ve buoyancy (here defined as theta > theta.mean())\n qcl_thresh = 1e-5\n w_thresh = 0\n env_mask = (((qcl > qcl_thresh) | (qcf > qcl_thresh) | (qcf2 > qcl_thresh))\n & (w > w_thresh)\n & (theta > theta.mean(axis=(1, 2))[:, None, None]))\n elif opt['CLD_DEF'] == 'swann_ud':\n qcl_thresh = 1e-6\n w_thresh = 0\n env_mask = (((qcl > qcl_thresh) | (qcf > qcl_thresh) | (qcf2 > qcl_thresh))\n & (w > w_thresh))\n elif opt['CLD_DEF'] == 'swann_core':\n qcl_thresh = 1e-6\n w_thresh = 0\n env_mask = (((qcl > qcl_thresh) | (qcf > qcl_thresh) | (qcf2 > qcl_thresh))\n & (w > w_thresh)\n & (theta > theta.mean(axis=(1, 2))[:, None, None]))\n elif opt['CLD_DEF'] == 'scaffold':\n qcl_thresh = 5e-6\n w_thresh = 1\n env_mask = (qcl > qcl_thresh) & (w > w_thresh)\n elif opt['CLD_DEF'] == 'scaffold_plus':\n qcl_thresh = 5e-6\n w_thresh = 1\n env_mask = (((qcl > qcl_thresh) | (qcf > qcl_thresh) | (qcf2 > qcl_thresh))\n & (w > w_thresh))\n elif opt['CLD_DEF'] == 'scaffold_plus_strong':\n qcl_thresh = 5e-6\n w_thresh = 5\n env_mask = (((qcl > qcl_thresh) | (qcf > qcl_thresh) | (qcf2 > qcl_thresh))\n & (w > w_thresh))\n cld_mask = ~env_mask\n\n # Calc entr.\n if opt['SCALAR'] == 'MSE':\n scalar = h\n elif opt['SCALAR'] == 'FMSE':\n scalar = h2\n # Can't do without knowing source terms of qt.\n # elif opt['SCALAR'] == 'qt':\n # scalar = (expt_res['q_cube'].data +\n # expt_res['qcl_cube'].data +\n # expt_res['qcf_cube'].data +\n # expt_res['qcf2_cube'].data)\n\n if opt['METHOD'] == 'acc':\n scalar_c = (np.ma.masked_array(scalar * w, cld_mask).mean(axis=(1, 2))\n / np.ma.masked_array(w, cld_mask).mean(axis=(1, 2)))\n scalar_e = np.ma.masked_array(scalar, env_mask).mean(axis=(1, 2))\n\n dscalar_c_dz = (scalar_c[2:] - scalar_c[:-2])/(z[2:] - z[:-2])\n entr = dscalar_c_dz / (scalar_e[1:-1] - scalar_c[1:-1])\n\n elif opt['METHOD'] == 'normal':\n scalar_c = np.ma.masked_array(scalar, cld_mask).mean(axis=(1, 2))\n scalar_e = np.ma.masked_array(scalar, env_mask).mean(axis=(1, 2))\n\n dscalar_c_dz = (scalar_c[2:] - scalar_c[:-2]) / (z[2:] - z[:-2])\n entr = dscalar_c_dz / (scalar_e[1:-1] - scalar_c[1:-1])\n\n # Plot results\n z_km = z / 1000\n\n figname = 'entr_{}_{}_{}_{}'.format(expt,\n opt['SCALAR'],\n opt['CLD_DEF'],\n opt['METHOD'])\n fig = plt.figure(figname)\n plt.clf()\n fig, axgrid = plt.subplots(1, 5, fig=fig, sharey=True, num=figname, figsize=(15, 12))\n ax0, ax1, ax2, ax3, ax4 = axgrid\n\n # ~10 km\n # ztop_index = 55\n ztop_index = 75\n # full.\n # ztop_index = 98\n ax0.plot(scalar_c[1:ztop_index], z_km[1:ztop_index], label='cld')\n ax0.plot(scalar_e[1:ztop_index], z_km[1:ztop_index], label='env')\n ax0.legend()\n\n ax1.plot(dscalar_c_dz[:ztop_index - 1] * 1000, z_km[1:ztop_index])\n\n ax2.plot(scalar_e[1:ztop_index] - scalar_c[1:ztop_index], z_km[1:ztop_index])\n\n ax3.plot(env_mask.sum(axis=(1, 2)) / (env_mask.shape[1] * env_mask.shape[2]), z_km)\n ax3.set_xlim((0, 0.1))\n\n ax4.plot(entr[:ztop_index - 1] * 1000, z_km[1:ztop_index])\n ax4.set_xlim((-1, 1))\n ax4.axvline(x=0, color='k', linestyle='--')\n\n ax0.set_ylabel('height (km)')\n ax0.set_ylim((0, 15))\n ax0.set_xlabel('{}'.format(opt['SCALAR']))\n ax1.set_xlabel('$\\\\frac{{d{}_c}}{{dz}}$'.format(opt['SCALAR']))\n ax2.set_xlabel('${0}_e - {0}_c$'.format(opt['SCALAR']))\n ax3.set_xlabel('$\\\\sigma$')\n ax4.set_xlabel('$\\\\epsilon$ (km$^{-1}$)')\n\n plt.savefig(self.file_path('entr_' + figname + '.png'))\n","repo_name":"markmuetz/scaffold_analysis","sub_path":"scaffold/suite/dump_entr.py","file_name":"dump_entr.py","file_ext":"py","file_size_in_byte":7484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35803219623","text":"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\n# The header block is where all import statements should live\nimport os\nimport uuid\nimport json\nimport shutil\nfrom KBaseReport.KBaseReportClient import KBaseReport\nfrom Workspace.WorkspaceClient import Workspace\nfrom taxaspec.filter import filter_file\nfrom taxaspec import acquire\nimport zipfile\n#END_HEADER\n\n\nclass MetabolomicsTools:\n '''\n Module Name:\n MetabolomicsTools\n\n Module Description:\n A KBase module: MetabolomicsTools\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"1.0.0\"\n GIT_URL = \"git@github.com:JamesJeffryes/MetabolomicsTools.git\"\n GIT_COMMIT_HASH = \"c76fc0898314fad8a852c976f5d6d0ca5082fcf0\"\n\n #BEGIN_CLASS_HEADER\n # Class variables and functions can be defined in this block\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n \n # Any configuration parameters that are important should be parsed and\n # saved in the constructor.\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.workspaceURL = config['workspace-url']\n self.shared_folder = config['scratch']\n\n #END_CONSTRUCTOR\n pass\n\n\n def get_mona_spectra(self, ctx, params):\n \"\"\"\n :param params: instance of type \"GetSpectraParams\" -> structure:\n parameter \"workspace_name\" of String, parameter \"metabolic_model\"\n of type \"model_ref\" (A reference to a kbase metabolic model),\n parameter \"spectra_source\" of String, parameter \"spectra_query\" of\n String\n :returns: instance of type \"SpectraResults\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN get_mona_spectra\n # Parse/examine the parameters and catch any errors\n print('Validating parameters.')\n for val in ('workspace_name', 'metabolic_model', 'spectra_source',\n 'spectra_query'):\n if val not in params:\n raise ValueError('Parameter %s is not set in input arguments'\n % val)\n\n uuid_string = str(uuid.uuid4())\n scratch = self.shared_folder + \"/\" + uuid_string\n os.mkdir(scratch)\n token = ctx['token']\n ws_client = Workspace(self.workspaceURL, token=token)\n with open('/kb/module/data/Compound_Data.json') as infile:\n comp_data = json.load(infile)\n # acquire metabolic model from the workspace and get inchikeys & names\n try:\n kb_model = ws_client.get_objects(\n [{'name': params['metabolic_model'],\n 'workspace': params['workspace_name']}])[0]\n except Exception as e:\n raise ValueError(\n 'Unable to get metabolic model object from workspace: (' +\n params['workspace_name'] + '/' +\n params['metabolic_model'] + ')' + str(e))\n kb_ids = [x['id'].replace('_c0', '')\n for x in kb_model['data']['modelcompounds']]\n names, inchis = set(), set()\n for cid in kb_ids:\n if cid in comp_data:\n names.update(comp_data[cid].get('names', None))\n inchis.add(comp_data[cid].get('inchikey', None))\n\n # Acquire Spectral Library\n if params['spectra_source'] == 'MoNA-API':\n spec_file = acquire.from_mona(params['spectra_query'],\n '/kb/module/data/')\n else:\n spec_file = '/kb/module/data/%s' % params['spectra_source']\n try:\n z = zipfile.ZipFile(spec_file + \".zip\")\n z.extractall('/kb/module/data/')\n except ValueError:\n raise ValueError('%s is not a supported spectra source'\n % params['spectra_source'])\n\n # Filter Spectral Library\n n_in_spectra, n_out_spectra, output_file = filter_file(spec_file, None,\n inchis, names)\n print(n_in_spectra, n_out_spectra)\n if not n_out_spectra:\n raise RuntimeError(\"No matching spectra found\")\n\n new_path = \"%s/%s%s.msp\" % (scratch, os.path.basename(output_file)[:-8],\n params['metabolic_model'])\n shutil.move(output_file, new_path)\n\n # Package report\n report_files = [{'path': new_path,\n 'name': os.path.basename(new_path),\n 'label': os.path.basename(new_path),\n 'description': 'Spectral Library filtered with '\n 'supplied metabolic model'}]\n report_params = {\n 'objects_created': [],\n 'message': 'Acquired %s matching spectra and filtered library to '\n '%s spectra which match the %s model' % (\n n_in_spectra, n_out_spectra, params['metabolic_model']),\n 'file_links': report_files,\n 'workspace_name': params['workspace_name'],\n 'report_object_name': 'mass_spectra_report_' + uuid_string\n }\n\n # Construct the output to send back\n report_client = KBaseReport(self.callback_url)\n report_info = report_client.create_extended_report(report_params)\n output = {'report_name': report_info['name'],\n 'report_ref': report_info['ref'],\n }\n #END get_mona_spectra\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method get_mona_spectra return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n","repo_name":"kbaseapps/MetabolomicsTools","sub_path":"lib/MetabolomicsTools/MetabolomicsToolsImpl.py","file_name":"MetabolomicsToolsImpl.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3350715117","text":"\"\"\"A collection of function for doing my project.\"\"\"\n\nimport random\n\ndef greeting():\n \"\"\"Greets user and asks for the inputs that will be used all throughour the project. \"\"\"\n \n # Get name, temperature, top and color from the user\n name = input('Hi, What is your name? ')\n temperature = input('What is the temperature in farenheit? ')\n top = input('What kind of top do you want to wear? ')\n color = input('What color is the top? ')\n \n return name, int(temperature), top, color\n\n#Function on weather\ndef weather(temperature):\n \"\"\"Given a temperature value it decides if the weather is going to feel chilly, hot or cold. \n \n Parameters\n ----------\n temperature : int\n The number to be compared. \n \n Returns\n -------\n output : str\n The way that the weather is going to feel like. \n \"\"\"\n \n # Use if/elif/else statements to know how the temperature is going to feel like\n if temperature > 60 and temperature < 72:\n temp = 'chilly'\n elif temperature >= 72 and temperature < 100:\n temp = 'hot'\n elif temperature <= 60:\n temp = 'cold'\n else:\n temp = None\n \n return temp\n\n#Function on type of bottoms\n#I can use random function to choose any of the choices in either of the lists\n\ndef bottoms_shoes(temp):\n \"\"\"Given the temperature returns the bottoms and shoes for it. \n \n Parameters\n ----------\n temp : str\n The temperature to be compared to check what kind of bottoms. \n \n Returns\n -------\n choice_1 : str\n The bottoms that the student should use. \n choice_2 : str\n The shoes that the student should use.\n \"\"\"\n\n bottoms_chilly = ['jeans', 'long overalls']\n bottoms_hot = ['shorts', 'skirt', 'short overalls']\n bottoms_cold = ['leggings', 'leather pants', 'joggers']\n shoes_chilly = ['sneakers', 'loafers']\n shoes_cold = ['ankle boots', 'over the knee boots', 'knee high boots']\n shoes_hot = ['sandals', 'slides']\n \n # The if/elif/else loop, using random from ramdon package chooses a bottom from the list that temp belongs to.\n if temp == 'chilly':\n choice_1 = random.choice(bottoms_chilly)\n choice_2 = random.choice(shoes_chilly)\n elif temp == 'hot':\n choice_1 = random.choice(bottoms_hot)\n choice_2 = random.choice(shoes_hot)\n elif temp == 'cold':\n choice_1 = random.choice(bottoms_cold)\n choice_2 = random.choice(bottoms_cold)\n else:\n choice_1 = None\n choice_2 = None\n \n return choice_1, choice_2\n\n#Function on mix of colors\n#How about two colors come back\n\ndef colors_mix(color):\n \"\"\"Given a primary color it randomly chooses a color that matches. \n \n Parameters\n ----------\n color : str\n The color of the top the student wants to wear. \n \n Returns\n -------\n choice : str\n The color that matches the given color of the top. \n \"\"\"\n \n lst_primary = ['black', 'white', 'red', 'yellow', 'blue']\n white_mix = ['black', 'red', 'yellow', 'blue', 'cyan', 'orange', 'pink','violet', 'indigo','chartreuse', 'orange']\n black_mix = ['white', 'red', 'yellow', 'blue', 'cyan', 'orange', 'pink','violet', 'indigo','chartreuse', 'orange']\n red_mix = ['cyan', 'orange', 'pink']\n blue_mix = ['yellow', 'violet', 'indigo']\n yellow_mix = ['blue', 'chartreuse', 'orange']\n \n # Makes the string all lowercase\n color = color.lower()\n \n #The first if/elif/else loop checks that the lowercase primary color is in our list to then assign a color that matches\n if color in lst_primary:\n # Then this loop uses random from the random package to choose a color from our list\n if color == 'black':\n choice = random.choice(black_mix)\n elif color == 'white':\n choice = random.choice(white_mix)\n elif color == 'red':\n choice = random.choice(red_mix)\n elif color == 'blue':\n choice = random.choice(blue_mix)\n elif color == 'yellow':\n choice = random.choice(yellow_mix)\n else:\n choice = None \n else:\n choice = 'color should be primary: black, white, red, blue or yellow'\n \n return choice\n\n#final function \n#should say goodbye \n\ndef farewell():\n \n name, temperature, top, color = greeting()\n feels_like_temp = weather(temperature)\n outfit = bottoms_shoes(feels_like_temp)\n final_color = colors_mix(color)\n \n print('Hello ' + name + ' your outfit is ' + outfit[0] + ' and ' + outfit[1])\n print('The color that matches with ' + color + ' is ' + final_color)\n \nfarewell()","repo_name":"avenerio/COGS18_Project","sub_path":"My_project/my_module/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6062146532","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 21 20:30:21 2019\r\n\r\n@author: x00423910\r\n\"\"\"\r\n\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport random\r\n\r\n\r\ndef preProcessDataForOffLineInfer(fileName):\r\n #导入测试数据\r\n with open(fileName, \"r\") as rf:\r\n file_data = pd.read_csv(rf)\r\n file_data = np.array(file_data.get_values(), dtype=np.float32)\r\n print(\"fileName:\", fileName, \" shape of file data:\", file_data.shape)\r\n \r\n #至此,我们把fileName文件里的数据都读到file_test_set_data中了\r\n #此时的file_test_set_data是个维度为[XXX, 18]的数组\r\n #开始对每个数据样本构造数据特征\r\n test_xs = []\r\n for i in range(len(file_data)):\r\n org_data = file_data[i]\r\n #和训练代码中预处理一致地,我们需要构建之前的3个数据特征\r\n feature_dis_2d = np.sqrt(np.power(org_data[12]-org_data[1],2)+np.power(org_data[13]-org_data[2],2)) \r\n feature_RS_Power = org_data[8]\r\n feature_dis_3d = np.sqrt(np.power(org_data[12]-org_data[1],2)\r\n +np.power(org_data[13]-org_data[2],2)\r\n +np.power(org_data[14]-org_data[9],2))\r\n tmp_test_xs = [feature_dis_2d/100, feature_RS_Power, feature_dis_3d/100]\r\n test_xs.append(tmp_test_xs)\r\n #将test_xs转换为numpy数组类型\r\n test_xs = np.array(test_xs) \r\n print(\"shape of test_xs:\", test_xs.shape) \r\n return test_xs\r\n\r\n\r\ndef infer(fileName):\r\n test_xs = preProcessDataForOffLineInfer(fileName)\r\n \r\n #从保存的模型文件中将模型加载回来\r\n with tf.Session(graph=tf.Graph()) as sess:\r\n tf.saved_model.loader.load(sess, [\"serve\"], \"./model_0922\")\r\n graph = tf.get_default_graph()\r\n x = sess.graph.get_tensor_by_name('haha_input_x:0')\r\n y = sess.graph.get_tensor_by_name('haha_output_y:0')\r\n infer_y_value = sess.run(y, feed_dict={x: test_xs})\r\n print(\"shape of infer_y_value:\", infer_y_value.shape)\r\n #保存结果为csv文件 \r\n np.savetxt(fileName+\"_infer_res.csv\", infer_y_value, delimiter=',')\r\n \r\n\r\n#test_xs = preProcessDataForOffLineInfer(os.path.join(\"test_set\",\"test_112501.csv\"))\r\ninfer(os.path.join(\"test_set\",\"test_112501.csv\"))\r\n","repo_name":"Aplicity/Huawei_shumo_LSTM","sub_path":"shumo_Demo/off_line_infer_code/off_line_infer_code.py","file_name":"off_line_infer_code.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"32444668448","text":"\nimport grid2op\nimport warnings\nimport unittest\nimport pdb\n\n\nclass Issue533Tester(unittest.TestCase):\n def setUp(self) -> None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n self.env = grid2op.make('l2rpn_neurips_2020_track1',\n test=True,\n _add_to_name=type(self).__name__,\n )\n self.env.seed(0)\n return super().setUp()\n \n def tearDown(self):\n self.env.close()\n \n def test_issue_as_serializable_dict(self):\n actions = self.env.action_space.get_all_unitary_topologies_set(self.env.action_space, sub_id=1) \n actions[1].as_serializable_dict()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rte-france/Grid2Op","sub_path":"grid2op/tests/test_issue_533.py","file_name":"test_issue_533.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"16"} +{"seq_id":"38189510462","text":"# AND function calculation between an odd number of XOR masked data shares\r\n# for every i this algorithm calculates:\r\n# Ci = [XOR(k!=i: Ak) AND XOR(j!=i: Bj)] XOR (Ai AND Bi)\r\n\r\n# The generic parameters enable arbitrary wide words to be AND-ed which consist of arbitrary odd\r\n# number masked shares. The algorithm does not work with an even number of shares due to its math\r\n# properties!\r\n\r\ndef masked_AND_rolled_out(am, bm):\r\n #any non-zero offset creates non-completeness, can even be random\r\n offset = 1\r\n #calculate AND between masked values\r\n cm = [am[i] & bm[i] for i in range(r)]\r\n #calculating correction terms\r\n for i in range(r):\r\n summa = 0\r\n #calculating no parities between all but 1 Ak and Bj bits which could cause unmasking\r\n for j in range(r):\r\n if i==j:\r\n continue\r\n summa ^= am[i] & bm[j]\r\n #addig correction term to the particular output share\r\n #which is the \"AND\" between the parities\r\n cm[i-offset] ^= summa\r\n return cm\r\n\r\n# BEGIN TESTING\r\n\r\na = 0x76857f6f\r\nb = 0x5432f1f2\r\nprint(\"A: \", hex(a))\r\nprint(\"B: \", hex(b))\r\nprint(\"AND: \", hex(a & b))\r\n\r\n# preparing odd pieces of random numbers for masking\r\n# the algorithm doesn't work with an even number of shares!\r\n\r\nr = 7 #6 #uncomment to test even number of shares\r\n\r\nra =[0x67978544,\r\n 0xa6f328ab,\r\n 0x68979586,\r\n 0xa76ef4bc,\r\n 0xc9876d3e, # comment to test even number of shares\r\n 0xa7656b36] \r\n\r\nrb =[0x6abdc744,\r\n 0x895728ab,\r\n 0xfe45c8d6,\r\n 0xc9876d3e,\r\n 0xe346a867, # comment to test even number of shares\r\n 0x86f675ed] \r\n\r\n# the sum of the masks has to be zero at first...*1\r\nrasum = 0\r\nfor ma in ra:\r\n rasum ^= ma\r\nra.append(rasum)\r\nrbsum = 0\r\nfor mb in rb:\r\n rbsum ^= mb\r\nrb.append(rbsum)\r\n\r\n# *1...so that adding a and b to any arbitrary random number yields valid shares\r\nam = ra\r\nam[2] ^= a #index is arbitrary\r\nbm = rb\r\nbm[r-2] ^= b #index is arbitrary\r\n\r\n# test vectors for VHDL code\r\n#for s in am:\r\n# print(hex(s))# test vectors for VHDL code\r\n#for s in bm:\r\n# print(hex(s))\r\n\r\n# double checking the sum of shares\r\na = 0\r\nb = 0\r\nfor i in range(r):\r\n a ^= am[i]\r\n b ^= bm[i]\r\nprint(\"A sum: \", hex(a))\r\nprint(\"B sum: \", hex(b))\r\n\r\n# AND calculation between masked shares\r\ncm = masked_AND_rolled_out(am, bm)\r\n\r\n# test vectors for VHDL code\r\n#for s in cm:\r\n# print(hex(s))\r\n\r\n#summing up the result shares\r\nc = 0\r\nfor k in range(len(cm)):\r\n c ^= cm[k]\r\nprint(\"Result: \", hex(c))\r\nprint(\"Correct: \", c == a&b)","repo_name":"InfamousTechnician/Side_channel_attack_countermeasures","sub_path":"masked_AND_by_me/rolled_out_masked_and.py","file_name":"rolled_out_masked_and.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"6780018669","text":"# improvements:\n# use kellylist dataset to use more frequently used words as this shit comes up with some weird ass words\n\nimport json\n\nwith open(\"svenska-ord.json\",\"r\") as f:\n data = json.load(f)\n\ndef generate5WordFile(data):\n newData = [word.lower() for word in data if len(word) == 5 and (\" \" not in word) and (\"-\" not in word)]\n with open(\"sample.json\", \"w\") as outfile:\n json.dump(newData, outfile, ensure_ascii=False)\n\n","repo_name":"theojohnhenry/ordle","sub_path":"json/dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74152213449","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactDimensionOperator,\n DataQualityOperator)\nfrom helpers import SqlQueries\nfrom airflow.models import Variable\n\n\n#/opt/airflow/start.sh\n\ndefault_args = {\n 'owner': 'udacity',\n 'start_date': datetime(2022, 5, 29),\n 'depends_on_past': False,\n 'retries': 3,\n 'retry_delay': 300,\n 'catchup': False,\n 'email_on_retry': False\n}\n\ndag = DAG('election_data_dag',\n default_args=default_args,\n description='Load and transform brazil election data in Redshift',\n schedule_interval='0 * * * *',\n max_active_runs=1\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\nwait_operator = DummyOperator(task_id='Load_complete', dag=dag)\n\n\nstage_candidates_data_to_redshift = StageToRedshiftOperator(\n task_id='stage_candidates_data_task',\n dag=dag,\n redshift_conn_id='redshift',\n create_table_query=SqlQueries.create_staging_candidates,\n destination_schema='raw_data',\n destination_table='election_candidates',\n s3_path='s3://udacity-brazil-election-votes/consulta_cand/',\n file_format= 'csv',\n copy_parameters=\" delimiter as ',' truncatecolumns IGNOREHEADER 1 blanksasnull emptyasnull\",\n aws_credentials='aws_credentials'\n)\n\nstage_votes_data_to_redshift = StageToRedshiftOperator(\n task_id='stage_votes_data_task',\n dag=dag,\n redshift_conn_id='redshift',\n create_table_query=SqlQueries.create_staging_votes,\n destination_schema='raw_data',\n destination_table='election_votes',\n s3_path='s3://udacity-brazil-election-votes/votacao_secao/',\n file_format= 'csv',\n copy_parameters=\"delimiter as ',' truncatecolumns IGNOREHEADER 1 blanksasnull emptyasnull\",\n aws_credentials='aws_credentials'\n)\n\n\nload_dim_candidate_table = LoadFactDimensionOperator(\n task_id='load_dim_candidate_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"dim\",\n destination_table=\"candidate\",\n create_table_query=SqlQueries.create_dim_candidate,\n insert_table_query=SqlQueries.insert_dim_candidate\n)\n\nload_dim_date_table = LoadFactDimensionOperator(\n task_id='load_dim_date_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"dim\",\n destination_table=\"date\",\n create_table_query=SqlQueries.create_dim_date,\n insert_table_query=SqlQueries.insert_dim_date\n)\n\nload_dim_election_table = LoadFactDimensionOperator(\n task_id='load_dim_election_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"dim\",\n destination_table=\"election\",\n create_table_query=SqlQueries.create_dim_election,\n insert_table_query=SqlQueries.insert_dim_election\n)\n\nload_dim_party_table = LoadFactDimensionOperator(\n task_id='load_dim_party_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"dim\",\n destination_table=\"party\",\n create_table_query=SqlQueries.create_dim_party,\n insert_table_query=SqlQueries.insert_dim_party\n)\n\nload_dim_location_table = LoadFactDimensionOperator(\n task_id='load_dim_location_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"dim\",\n destination_table=\"location\",\n create_table_query=SqlQueries.create_dim_location,\n insert_table_query=SqlQueries.insert_dim_location\n)\n\nload_fact_votes_table = LoadFactDimensionOperator(\n task_id='load_fact_votes_table_task',\n dag=dag,\n redshift_conn_id=\"redshift\",\n destination_schema=\"fact\",\n destination_table=\"votes\",\n create_table_query=SqlQueries.create_fact_votes,\n insert_table_query=SqlQueries.insert_fact_votes\n)\n\nrun_quality_checks_dim_location = DataQualityOperator(\n task_id='run_quality_checks_dim_location_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='dim',\n table='location',\n id_column='id',\n checks=['unique_key',\n 'load_successful']\n)\n\nrun_quality_checks_dim_date = DataQualityOperator(\n task_id='run_quality_checks_dim_date_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='dim',\n table='date',\n id_column='date',\n checks=['unique_key',\n 'load_successful']\n)\n\nrun_quality_checks_dim_election = DataQualityOperator(\n task_id='run_quality_checks_dim_election_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='dim',\n table='election',\n id_column='id',\n checks=['unique_key',\n 'load_successful']\n)\n\nrun_quality_checks_dim_candidate = DataQualityOperator(\n task_id='run_quality_checks_dim_candidate_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='dim',\n table='candidate',\n id_column='cpf_number',\n checks=['unique_key',\n 'load_successful']\n)\n\nrun_quality_checks_dim_party = DataQualityOperator(\n task_id='run_quality_checks_dim_party_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='dim',\n table='party',\n id_column='id',\n checks=['unique_key',\n 'load_successful']\n)\n\nrun_quality_checks_fact_votes = DataQualityOperator(\n task_id='run_quality_checks_fact_votes_task',\n dag=dag,\n redshift_conn_id='redshift',\n table_schema='fact',\n table='votes',\n checks=['load_successful']\n)\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\nstart_operator >> stage_candidates_data_to_redshift >> wait_operator\nstart_operator >> stage_votes_data_to_redshift >> wait_operator\n\nwait_operator >> load_dim_candidate_table >> run_quality_checks_dim_candidate >> end_operator\nwait_operator >> load_dim_date_table >> run_quality_checks_dim_date >> end_operator\nwait_operator >> load_dim_party_table >> run_quality_checks_dim_party >> end_operator\nwait_operator >> load_dim_location_table >> run_quality_checks_dim_location >> end_operator\nwait_operator >> load_dim_election_table >> run_quality_checks_dim_election >> end_operator\nwait_operator >> load_fact_votes_table >> run_quality_checks_fact_votes >> end_operator\n\n\n\n","repo_name":"betinamehl/udacity_de_nanodegree","sub_path":"capstone_project/airflow/dags/election_data_dag.py","file_name":"election_data_dag.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18710593687","text":"import sys\n\nn, l = map(int, input().split())\n\ns = [input() for i in range(n)]\n\ns.sort()\n\nmin_str = \"\"\n\nfor item in s:\n min_str += item\n\nprint(\"{}\".format(min_str))","repo_name":"shio408/AtCoderBeginnerContest-practice","sub_path":"question32.py","file_name":"question32.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17549977051","text":"import requests\r\nimport yaml\r\n\r\nyaml_path = './config.yaml'\r\n\r\ndef map_get(center, markers=None):\r\n with open(yaml_path, 'r') as f:\r\n cfg = yaml.load(f.read(), Loader = yaml.FullLoader)\r\n ak = cfg['API']['ak']\r\n map_get_url = cfg['API']['map_get_url']\r\n zoom = cfg['API']['zoom']\r\n width = cfg['API']['width']\r\n height = cfg['API']['height']\r\n markerStyles = cfg['API']['markerStyles']\r\n scale = cfg['API']['scale']\r\n \r\n params = { # 参考文档:https://lbsyun.baidu.com/index.php?title=static\r\n 'ak': ak,\r\n 'center': center,\r\n 'zoom': zoom,\r\n 'width': width,\r\n 'height': height,\r\n 'markerStyles': markerStyles,\r\n 'scale': scale,\r\n }\r\n\r\n params['markers'] = markers if markers else center # 显示中心点位置或者船的位置\r\n # print(params)\r\n\r\n response = requests.get(map_get_url, params=params)\r\n # with open('D:/project/original_map/map.html', 'wb') as f:\r\n # f.write(response.content)\r\n with open('./original_map/current_map.png', 'wb') as f:\r\n f.write(response.content)","repo_name":"NicerY/plant_seg_demo","sub_path":"map/map_get.py","file_name":"map_get.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18675389659","text":"from suds.client import Client\nfrom suds.xsd.doctor import ImportDoctor, Import\nimport requests\nimport json\nimport pytest\nimport datetime\n\nurl = \"http://www.webxml.com.cn/WebServices/IpAddressSearchWebService.asmx?wsdl\"\n\nimp = Import('http://www.w3.org/2001/XMLSchema', location='http://www.w3.org/2001/XMLSchema.xsd')\n\nimp.filter.add('http://WebXml.com.cn')\n\n# doctor = ImportDoctor(imp)\n# client = Client(url,doctor=doctor)\n# print(client.service.getCountryCityByIp(\"117.28.35.18\"))\n# rsp = requests.get(url)\n\n# print(rsp.text)\n\ndata = '''\n\n \n \n \n \n 117.75.179.13\n \n \n'''\n\n# rsp = requests.post(url=url,data=data.encode(\"utf-8\"))\n# print(rsp.text)\n\n\n'''\ndata = [\"13588620187\",\"135886236187\",\"13188620187\"]\n@pytest.fixture(params=data,scope=\"module\")\ndef getuse(request):\n data = request.param\n url1 =\"https://tcc.taobao.com/cc/json/mobile_tel_segment.htm?tel={}\".format(data)\n#data1 = json.dumps(data1)\n session = requests.Session()\n rsp = session.get(url = url1)\n\n yield rsp\n with open('1.txt','a') as f:\n f.write(rsp.text)\n\ndef test_case(getuse):\n assert \"mts\" in getuse.text ,\"mts 不存在\"\ndef test_case1(getuse):\n assert getuse.status_code ==200 ,\"访问失败\"\ndef test_case2(getuse):\n assert \"province\" in getuse.text, \"province 不存在\"\ndef test_case3(getuse):\n assert \"catName\" in getuse.text, \"catName 不存在\"\n\n\nif __name__ == '__main__':\n pytest.main([\"C:/Users/Administrator/PycharmProjects/untitled/123test/lianxi02.py\"])\n'''\nimport itertools\n\n\ndef A(l):\n for i in l:\n if i % 2 == 0:\n yield i\n\n\nl = [1, 2, 3, 4, 5, 6, 7, 9]\nl1 = [1, 2, 3, 4, 5, 6, 7, 9]\n\n\ndef B(l):\n n = 0\n for i in A(l):\n n += i\n return n\n\n\n# print(B(l))\nz = sum(i for i in l if i % 2 == 0)\n# print(z)\n\nfor a, b in itertools.product(l, l1):\n if a + b == 10:\n pass\n #print(a, b)\n# print(a,b)\nfor i in itertools.islice(l,0,None,2):\n print(i)\n","repo_name":"shiqinghuana/Python","sub_path":"123test/lianxi02.py","file_name":"lianxi02.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10250734585","text":"import skimage\n\nimport numpy as np\nimport scipy\nimport skimage\n\nimport numba\nimport pandas as pd\n\nfrom glob import glob\nimport os\n\nfrom ._tqdm import tqdm\n\ndef lowres_image_iterator(path, img_as_float=True):\n\t\"\"\"\n\tIterator over all of a scene's low-resolution images (LR*.png) and their\n\tcorresponding status maps (QM*.png).\n\t\n\tReturns at each iteration a `(l, c)` tuple, where:\n\t* `l`: matrix with the loaded low-resolution image (values as np.uint16 or\n\t np.float64 depending on `img_as_float`),\n\t* `c`: the image's corresponding \"clear pixel?\" boolean mask.\n\t\n\tScenes' image files are described at:\n\thttps://kelvins.esa.int/proba-v-super-resolution/data/\n\t\"\"\"\n\tpath = path if path[-1] in {'/', '\\\\'} else (path + '/')\n\tfor f in glob(path + 'LR*.png'):\n\t\tq = f.replace('LR', 'QM')\n\t\tl = skimage.io.imread(f, dtype=np.uint16)\n\t\tc = skimage.io.imread(q, dtype=np.bool)\n\t\tif img_as_float:\n\t\t\tl = skimage.img_as_float64(l)\n\t\tyield (l, c)\n\n\ndef bicubic_upscaling(img):\n \"\"\"\n Compute a bicubic upscaling by a factor of 3.\n \"\"\"\n r = skimage.transform.rescale(img, scale=3, order=3, mode='edge',\n anti_aliasing=False, multichannel=False)\n # NOTE: Don't change these options. They're required by `baseline_upscale`.\n # http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.rescale\n # http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.warp\n return r\n\n\ndef baseline_upscale(path):\n\t\"\"\"\n\tReimplementation of the image enhancement operation performed by the\n\tbaseline code (`generate_sample_submission.py`) provided in:\n\thttps://kelvins.esa.int/proba-v-super-resolution/submission-rules/\n\t\n\t\t\"takes all low resolution images that have the maximum amount of clear\n\t\tpixels, computes a bicubic upscaling by a factor of 3 and averages their\n\t\tpixel intensities.\"\n\t\n\tThis function takes as argument the `path` to a single scene, and returns\n\tthe matrix with the scene's enhanced image.\n\t\"\"\"\n\tclearance = {}\n\tfor (l, c) in lowres_image_iterator(path, img_as_float=True):\n\t\tclearance.setdefault(c.sum(), []).append(l)\n\t\n\t# take all the images that have the same maximum clearance\n\timgs = max(clearance.items(), key=lambda i: i[0])[1]\n\t\n\tsr = np.mean([\n\t\tbicubic_upscaling(i)\n\t\tfor i in imgs\n\t\t], axis=0)\n\t\n\treturn sr\n\n\n\ndef central_tendency(images, agg_with='median',\n\t only_clear=False, fill_obscured=False,\n\t img_as_float=True):\n\t\"\"\"\n\tAggregate the given `images` through a statistical central tendency measure,\n\tchosen by setting `agg_with` to either 'mean', 'median' or 'mode'.\n\t\n\tExpects `images` to be a list of `(image, status map)` tuples.\n\tShould `images` be a string, it's interpreted as the path to a scene's\n\tfiles. The code will then aggregate that scene's low resolution images\n\t(LR*.png), while taking also into account their status maps (QM*.png).\n\t\n\tWill optionally aggregate only images' clear pixels (if `only_clear=True`)\n\tby using the information in images' corresponding status maps.\n\t\n\tIn some scenes, some pixels are obscured in all of the low-resolution\n\timages. Aggregation with mean/median will return np.nan for those pixels,\n\tand aggregation with mode will return 0.0.\n\tIf called with `fill_obscured=True` those pixels will be filled with the\n\t`agg_with` aggregate of the values at all those obscured pixels. Setting\n\t`fill_obscured` to one of 'mean', 'median' or 'mode' will indicate that is\n\tthe measure that should be used to aggregate obscured pixels.\n\t\"\"\"\n\tagg_opts = {\n\t\t'mean' : lambda i: np.nanmean(i, axis=0),\n\t\t'median' : lambda i: np.nanmedian(i, axis=0),\n\t\t'mode' : lambda i: scipy.stats.mode(i, axis=0, nan_policy='omit').mode[0],\n\t\t}\n\tagg = agg_opts[agg_with]\n\t\n\timgs = []\n\tobsc = []\n\t\n\tif isinstance(images, str):\n\t\timages = lowres_image_iterator(images, img_as_float or only_clear)\n\telif only_clear:\n\t\t# Images were given by the caller, rather than loaded here.\n\t\t# Because `only_clear=True`, we generate copies of all lr images, so the\n\t\t# function will have no unintended side-effects on the caller's side.\n\t\timages = [(l.copy(), c) for (l,c) in images]\n\t\n\tfor (l, c) in images:\n\t\t\n\t\tif only_clear:\n\t\t\t\n\t\t\t# keep track of the values at obscured pixels\n\t\t\tif fill_obscured != False:\n\t\t\t\to = l.copy()\n\t\t\t\to[c] = np.nan\n\t\t\t\tobsc.append(o)\n\t\t\t\n\t\t\t# replace values at obscured pixels with NaNs\n\t\t\tl[~c] = np.nan\n\t\t\n\t\timgs.append(l)\n\t\n\t# aggregate the images\n\twith np.warnings.catch_warnings(): ## https://stackoverflow.com/a/29348184\n\t\t# suppress the warnings that originate when `only_clear=True`\n\t\t# but some pixels are never clear in any of the images\n\t\tnp.warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')\n\t\tnp.warnings.filterwarnings('ignore', r'Mean of empty slice')\n\t\t\n\t\tagg_img = agg(imgs)\n\t\t\n\t\tif only_clear and fill_obscured != False:\n\t\t\tif isinstance(fill_obscured, str):\n\t\t\t\tagg = agg_opts[fill_obscured]\n\t\t\tsome_clear = np.isnan(obsc).any(axis=0)\n\t\t\tobsc = agg(obsc)\n\t\t\tobsc[some_clear] = 0.0\n\t\t\tnp.nan_to_num(agg_img, copy=False)\n\t\t\tagg_img += obsc\n\t\n\treturn agg_img\n\n\ndef highres_image(path, img_as_float=True):\n\t\"\"\"\n\tLoad a scene's high resolution image and its corresponding status map.\n\t\n\tReturns a `(hr, sm)` tuple, where:\n\t* `hr`: matrix with the loaded high-resolution image (values as np.uint16 or\n\t np.float64 depending on `img_as_float`),\n\t* `sm`: the image's corresponding \"clear pixel?\" boolean mask.\n\t\n\tScenes' image files are described at:\n\thttps://kelvins.esa.int/proba-v-super-resolution/data/\n\t\"\"\"\n\tpath = path if path[-1] in {'/', '\\\\'} else (path + '/')\n\thr = skimage.io.imread(path + 'HR.png')\n\tsm = skimage.io.imread(path + 'SM.png')\n\tif img_as_float:\n\t\thr = skimage.img_as_float64(hr)\n\treturn (hr, sm)\n\t\n\n\ndef lowres_image_iterator(path, img_as_float=True):\n\t\"\"\"\n\tIterator over all of a scene's low-resolution images (LR*.png) and their\n\tcorresponding status maps (QM*.png).\n\t\n\tReturns at each iteration a `(l, c)` tuple, where:\n\t* `l`: matrix with the loaded low-resolution image (values as np.uint16 or\n\t np.float64 depending on `img_as_float`),\n\t* `c`: the image's corresponding \"clear pixel?\" boolean mask.\n\t\n\tScenes' image files are described at:\n\thttps://kelvins.esa.int/proba-v-super-resolution/data/\n\t\"\"\"\n\tpath = path if path[-1] in {'/', '\\\\'} else (path + '/')\n\tfor f in glob(path + 'LR*.png'):\n\t\tq = f.replace('LR', 'QM')\n\t\tl = skimage.io.imread(f)\n\t\tc = skimage.io.imread(q)\n\t\tif img_as_float:\n\t\t\tl = skimage.img_as_float64(l)\n\t\tyield (l, c)\n\t\n\n\n# [============================================================================]\n\n\ndef check_img_as_float(img, validate=True):\n\t\"\"\"\n\tEnsure `img` is a matrix of values in floating point format in [0.0, 1.0].\n\tReturns `img` if it already obeys those requirements, otherwise converts it.\n\t\"\"\"\n\tif not issubclass(img.dtype.type, np.floating):\n\t\timg = skimage.img_as_float64(img)\n\t# https://scikit-image.org/docs/dev/api/skimage.html#img-as-float64\n\t\n\tif validate:\n\t\t# safeguard against unwanted conversions to values outside the\n\t\t# [0.0, 1.0] range (would happen if `img` had signed values).\n\t\tassert img.min() >= 0.0 and img.max() <= 1.0\n\t\n\treturn img\n\t\n\n\n# [============================================================================]\n\n\ndef all_scenes_paths(base_path):\n\t\"\"\"\n\tGenerate a list of the paths to all scenes available under `base_path`.\n\t\"\"\"\n\tbase_path = base_path if base_path[-1] in {'/', '\\\\'} else (base_path + '/')\n\treturn [\n\t\tbase_path + c + s\n\t\tfor c in ['RED/', 'NIR/']\n\t\tfor s in sorted(os.listdir(base_path + c))\n\t\t]\n\t\n\n\ndef scene_id(scene_path, incl_channel=False):\n\t\"\"\"\n\tExtract from a scene's path its unique identifier.\n\t\n\tExamples\n\t--------\n\t>>> scene_id('probav/train/RED/imgset0559/')\n\t'imgset0559'\n\t>>> scene_id('probav/train/RED/imgset0559', incl_channel=True)\n\t'RED/imgset0559'\n\t\"\"\"\n\tsep = os.path.normpath(scene_path).split(os.sep)\n\tif incl_channel:\n\t\treturn '/'.join(sep[-2:])\n\telse:\n\t\treturn sep[-1]\n\t\n\n# [============================================================================]\n\ndef prepare_submission(images, scenes, subm_fname='submission.zip'):\n\t\"\"\"\n\tPrepare a set of images for submission.\n\t\n\tGiven a list of `images` (as matrices of shape (384, 384)), and the paths\n\tto the `scenes` to which they correspond, write a zip file containing all\n\timages as .png files, named after their scene's identification\n\t(example: imgset1160.png).\n\t\"\"\"\n\tassert len(images) == 290, '%d images provided, 290 expected.' % len(images)\n\tassert len(images) == len(scenes), \"Mismatch in number of images and scenes.\"\n\tassert subm_fname[-4:] == '.zip'\n\t\n\t# specific warnings we wish to ignore\n\twarns = [\n\t\t'tmp.png is a low contrast image',\n\t\t'Possible precision loss when converting from float64 to uint16']\n\t\n\twith np.warnings.catch_warnings():\n\t\tfor w in warns:\n\t\t\tnp.warnings.filterwarnings('ignore', w)\n\t\t\n\t\tprint('Preparing submission. Writing to \"%s\".' % subm_fname)\n\t\t\n\t\twith ZipFile(subm_fname, mode='w') as zf:\n\t\t\t\n\t\t\tfor img, scene in zip(tqdm(images), scenes):\n\t\t\t\tassert img.shape == (384, 384), \\\n\t\t\t\t\t'Wrong dimensions in image for scene %s.' % scene\n\t\t\t\t\n\t\t\t\tskimage.io.imsave('tmp.png', img)\n\t\t\t\tzf.write('tmp.png', arcname=scene_id(scene) + '.png')\n\t\t\n\t\tos.remove('tmp.png')\n \n \n# [============================================================================]\n\n\n# Baseline cPSNR values for the dataset's images. Used for normalizing scores.\n# (provided by the competition's organizers)\nbaseline_cPSNR = pd.read_csv(\n os.path.dirname(os.path.abspath(__file__)) + '/norm.csv',\n names = ['scene', 'cPSNR'],\n index_col = 'scene',\n sep = ' ')\n\n\ndef score_images(imgs, scenes_paths, *args):\n\t\"\"\"\n\tMeasure the overall (mean) score across multiple super-resolved images.\n\t\n\tTakes as input a sequence of images (`imgs`), a sequence with the paths to\n\tthe corresponding scenes (`scenes_paths`), and optionally a sequence of\n\t(hr, sm) tuples with the pre-loaded high-resolution images of those scenes.\n\t\"\"\"\n\treturn np.mean([\n#\t\tscore_image(*i)\n\t\tscore_image_fast(*i)\n\t\tfor i in zip(tqdm(imgs), scenes_paths, *args)\n\t\t])\n\n\ndef score_image(sr, scene_path, hr_sm=None):\n\t\"\"\"\n\tCalculate the individual score (cPSNR, clear Peak Signal to Noise Ratio) for\n\t`sr`, a super-resolved image from the scene at `scene_path`.\n\t\n\tParameters\n\t----------\n\tsr : matrix of shape 384x384\n\t\tsuper-resolved image.\n\tscene_path : str\n\t\tpath where the scene's corresponding high-resolution image can be found.\n\thr_sm : tuple, optional\n\t\tthe scene's high resolution image and its status map. Loaded if `None`.\n\t\"\"\"\n\thr, sm = highres_image(scene_path) if hr_sm is None else hr_sm\n\t\n\t# \"We assume that the pixel-intensities are represented\n\t# as real numbers ∈ [0,1] for any given image.\"\n\t#sr = check_img_as_float(sr)\n\t#hr = check_img_as_float(hr, validate=False)\n\t\n\t# \"Let N(HR) be the baseline cPSNR of image HR as found in the file norm.csv.\"\n\tN = baseline_cPSNR.loc[scene_id(scene_path)][0]\n\t\n\t# \"To compensate for pixel-shifts, the submitted images are\n\t# cropped by a 3 pixel border, resulting in a 378x378 format.\"\n\tsr_crop = sr[3 : -3, 3 : -3]\n\t\n\tcrop_scores = []\n\t\n\tfor (hr_crop, sm_crop) in hr_crops(hr, sm):\n\t\t# values at the cropped versions of each image that\n\t\t# fall in clear pixels of the cropped `hr` image\n\t\t_hr = hr_crop[sm_crop]\n\t\t_sr = sr_crop[sm_crop]\n\t\t\n\t\t# \"we first compute the bias in brightness b\"\n\t\tpixel_diff = _hr - _sr\n\t\tb = np.mean(pixel_diff)\n\t\t\n\t\t# \"Next, we compute the corrected clear mean-square\n\t\t# error cMSE of SR w.r.t. HR_{u,v}\"\n\t\tpixel_diff -= b\n\t\tcMSE = np.mean(pixel_diff * pixel_diff)\n\t\t\n\t\t# \"which results in a clear Peak Signal to Noise Ratio of\"\n\t\tcPSNR = -10. * np.log10(cMSE)\n\t\t\n\t\t# normalized cPSNR\n\t\tcrop_scores.append(N / cPSNR)\n#\t\tcrop_scores.append(cMSE)\n\t\n\t# \"The individual score for image SR is\"\n\tsr_score = min(crop_scores)\n#\tsr_score = N / (-10. * np.log10(min(crop_scores)))\n\t\n\treturn sr_score\n\n\n# [===================================]\n\n\ndef hr_crops(hr, sm):\n\t\"\"\"\n\t\"We denote the cropped 378x378 images as follows: for all u,v ∈ {0,…,6},\n\tHR_{u,v} is the subimage of HR with its upper left corner at coordinates\n\t(u,v) and its lower right corner at (378+u, 378+v).\"\n\t-- https://kelvins.esa.int/proba-v-super-resolution/scoring/\n\t\"\"\"\n\tnum_cropped = 6\n\tmax_u, max_v = np.array(hr.shape) - num_cropped\n\t\n\tfor u in range(num_cropped + 1):\n\t\tfor v in range(num_cropped + 1):\n\t\t\tyield hr[u : max_u + u, v : max_v + v], \\\n\t\t\t\t sm[u : max_u + u, v : max_v + v]\n\t\n\n \n# [============================================================================]\n\n\ndef score_image_fast(sr, scene_path, hr_sm=None):\n \"\"\"\n Calculate the individual score (cPSNR, clear Peak Signal to Noise Ratio) for\n `sr`, a super-resolved image from the scene at `scene_path`.\n\n Parameters\n ----------\n sr : matrix of shape 384x384\n super-resolved image.\n scene_path : str\n path where the scene's corresponding high-resolution image can be found.\n hr_sm : tuple, optional\n the scene's high resolution image and its status map. Loaded if `None`.\n \"\"\"\n\n hr, sm = highres_image(scene_path) if hr_sm is None else hr_sm\n\n # \"We assume that the pixel-intensities are represented\n # as real numbers ∈ [0,1] for any given image.\"\n #sr = check_img_as_float(sr)\n #hr = check_img_as_float(hr, validate=False)\n\n # \"Let N(HR) be the baseline cPSNR of image HR as found in the file norm.csv.\"\n N = baseline_cPSNR.loc[scene_id(scene_path)][0]\n\n return score_against_hr(sr, hr, sm, N)\n\ndef score_image_fast_normalized(sr, hr, scene_path):\n \"\"\"\n Calculate the individual score (cPSNR, clear Peak Signal to Noise Ratio) for\n `sr`, a super-resolved image from the scene at `scene_path`.\n\n Parameters\n ----------\n sr : matrix of shape 384x384\n super-resolved image.\n scene_path : str\n path where the scene's corresponding high-resolution image can be found.\n hr_sm : tuple, optional\n the scene's high resolution image and its status map. Loaded if `None`.\n \"\"\"\n\n # \"We assume that the pixel-intensities are represented\n # as real numbers ∈ [0,1] for any given image.\"\n #sr = check_img_as_float(sr)\n #hr = check_img_as_float(hr, validate=False)\n\n # \"Let N(HR) be the baseline cPSNR of image HR as found in the file norm.csv.\"\n N = baseline_cPSNR.loc[scene_id(scene_path)][0]\n\n return score_against_hr_normalized(sr, hr, N)\n\n\n@numba.jit(nopython=True, parallel=True)\ndef score_against_hr_normalized(sr, hr, N):\n \"\"\"\n Numba-compiled version of the scoring function.\n \"\"\"\n num_cropped = 6\n max_u, max_v = np.array(hr.shape) - num_cropped\n\n # \"To compensate for pixel-shifts, the submitted images are\n # cropped by a 3 pixel border, resulting in a 378x378 format.\"\n c = num_cropped // 2\n sr_crop = sr[c : -c, c : -c].ravel()\n\n #crop_scores = []\n cMSEs = np.zeros((num_cropped + 1, num_cropped + 1), np.float64)\n\n for u in numba.prange(num_cropped + 1):\n for v in numba.prange(num_cropped + 1):\n\n # \"We denote the cropped 378x378 images as follows: for all u,v ∈\n # {0,…,6}, HR_{u,v} is the subimage of HR with its upper left corner\n # at coordinates (u,v) and its lower right corner at (378+u, 378+v)\"\n hr_crop = hr[u : max_u + u, v : max_v + v].ravel()\n\n # \"we first compute the bias in brightness b\"\n pixel_diff = hr_crop - sr_crop\n b = np.nanmean(pixel_diff)\n\n # \"Next, we compute the corrected clear mean-square\n # error cMSE of SR w.r.t. HR_{u,v}\"\n pixel_diff -= b\n pixel_diff *= pixel_diff\n cMSE = np.nanmean(pixel_diff)\n\n # \"which results in a clear Peak Signal to Noise Ratio of\"\n #cPSNR = -10. * np.log10(cMSE)\n\n # normalized cPSNR\n #crop_scores.append(N / cPSNR)\n\n cMSEs[u, v] = cMSE\n\n # \"The individual score for image SR is\"\n #sr_score = min(crop_scores)\n sr_score = N / (-10. * np.log10(cMSEs.min()))\n\n return sr_score\n\n\n\n#@numba.jit('f8(f8[:,:], f8[:,:], b1[:,:], f8)', nopython=True, parallel=True)\n@numba.jit(nopython=True, parallel=True)\ndef score_against_hr(sr, hr, sm, N):\n \"\"\"\n Numba-compiled version of the scoring function.\n \"\"\"\n num_cropped = 6\n max_u, max_v = np.array(hr.shape) - num_cropped\n\n # \"To compensate for pixel-shifts, the submitted images are\n # cropped by a 3 pixel border, resulting in a 378x378 format.\"\n c = num_cropped // 2\n sr_crop = sr[c : -c, c : -c].ravel()\n\n # create a copy of `hr` with NaNs at obscured pixels\n # (`flatten` used to bypass numba's indexing limitations)\n hr_ = hr.flatten()\n hr_[(~sm).ravel()] = np.nan\n hr = hr_.reshape(hr.shape)\n\n #crop_scores = []\n cMSEs = np.zeros((num_cropped + 1, num_cropped + 1), np.float64)\n\n for u in numba.prange(num_cropped + 1):\n for v in numba.prange(num_cropped + 1):\n\n # \"We denote the cropped 378x378 images as follows: for all u,v ∈\n # {0,…,6}, HR_{u,v} is the subimage of HR with its upper left corner\n # at coordinates (u,v) and its lower right corner at (378+u, 378+v)\"\n hr_crop = hr[u : max_u + u, v : max_v + v].ravel()\n\n # \"we first compute the bias in brightness b\"\n pixel_diff = hr_crop - sr_crop\n b = np.nanmean(pixel_diff)\n\n # \"Next, we compute the corrected clear mean-square\n # error cMSE of SR w.r.t. HR_{u,v}\"\n pixel_diff -= b\n pixel_diff *= pixel_diff\n cMSE = np.nanmean(pixel_diff)\n\n # \"which results in a clear Peak Signal to Noise Ratio of\"\n #cPSNR = -10. * np.log10(cMSE)\n\n # normalized cPSNR\n #crop_scores.append(N / cPSNR)\n\n cMSEs[u, v] = cMSE\n\n # \"The individual score for image SR is\"\n #sr_score = min(crop_scores)\n sr_score = N / (-10. * np.log10(cMSEs.min()))\n\n return sr_score\n\n\n# [============================================================================]\n\n\nclass scorer(object):\n\t\n\tdef __init__(self, scene_paths, preload_hr=True):\n\t\t\"\"\"\n\t\tWrapper to `score_image()` that simplifies the scoring of multiple\n\t\tsuper-resolved images.\n\t\t\n\t\tThe scenes over which the scorer will operate should be given in\n\t\t`scene_paths`. This is either a sequence of paths to a subset of scenes\n\t\tor a string with a single path. In this case, it is interpreted as the\n\t\tbase path to the full dataset, and `all_scenes_paths()` will be used to\n\t\tlocate all the scenes it contains.\n\t\t\n\t\tScene paths are stored in the object's `.paths` variable.\n\t\tWhen scoring, only the super-resolved images need to be provided.\n\t\tThey are assumed to be in the same order as the scenes in `.paths`.\n\t\t\n\t\tIf the object is instantiated with `preload_hr=True` (the default),\n\t\tall scene's high-resolution images and their status maps will be\n\t\tpreloaded. When scoring they will be sent to `score_image()`, thus\n\t\tsaving computation time in repeated scoring, at the expense of memory.\n\t\t\"\"\"\n\t\tif isinstance(scene_paths, str):\n\t\t\tself.paths = all_scenes_paths(scene_paths)\n\t\telse:\n\t\t\tself.paths = scene_paths\n\t\t\n\t\tself.hr_sm = [] if not preload_hr else [\n\t\t\thighres_image(scn_path, img_as_float=True)\n\t\t\tfor scn_path in tqdm(self.paths, desc='Preloading hi-res images')]\n\t\t\n\t\tself.scores = []\n\t\t\n\t\n\tdef __call__(self, sr_imgs, per_image=False, progbar=True, desc=''):\n\t\t\"\"\"\n\t\tScore all the given super-resolved images (`sr_imgs`), which correspond\n\t\tto the scenes at the matching positions of the object's `.paths`.\n\t\t\n\t\tReturns the overall score (mean normalized cPSNR).\n\t\t\n\t\tAn additional value is returned if `per_image=True`: a list with each\n\t\timage's individual cPSNR score. In either case, this list remains\n\t\tavailable in the object's `.scores` variable until the next call.\n\t\t\"\"\"\n\t\tscenes_paths = tqdm(self.paths, desc=desc) if progbar else self.paths\n\t\thr_sm = [] if self.hr_sm == [] else [self.hr_sm]\n\t\t\n\t\tself.scores = [\n#\t\t\tscore_image(*i)\n\t\t\tscore_image_fast(*i)\n\t\t\tfor i in zip(sr_imgs, scenes_paths, *hr_sm)]\n\t\t\n\t\tassert len(self.scores) == len(self.paths)\n\t\t\n\t\tscore = np.mean(self.scores)\n\t\t\n\t\tif per_image:\n\t\t\treturn score, self.scores\n\t\telse:\n\t\t\treturn score\n \n","repo_name":"Ahaeflig/probaVSuperRes","sub_path":"supreshelper/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":20240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23330953824","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"cpick\",\n version=\"0.1.3\",\n author=\"Toby Slight\",\n author_email=\"tslight@pm.me\",\n description=\"Curses List Picker\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tslight/cpick\",\n install_requires=[\"columns\"],\n packages=setuptools.find_packages(),\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: ISC License (ISCL)\",\n \"Operating System :: OS Independent\",\n ),\n entry_points={\"console_scripts\": [\"cpick = cpick.__main__:main\"]},\n)\n","repo_name":"tslight/cpick","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"32441529313","text":"import requests\nfrom bs4 import BeautifulSoup\nimport boto3\nimport json\n\ndef scrape_aruodas(event, context):\n page = requests.get('https://en.aruodas.lt/butai/vilniuje/puslapis/2/')\n soup = BeautifulSoup(page.content, 'html.parser')\n s3 = boto3.resource('s3', region_name='eu-west-2')\n\n for row in soup.select('tr.list-row td.list-adress h3 a'):\n place = {'website': 'aruodas'}\n headers = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'\n }\n\n print(row.attrs['href'])\n\n single_page = requests.get(row.attrs['href'], headers=headers)\n single_soup = BeautifulSoup(single_page.content, 'html.parser')\n name = single_soup.select('h1.obj-header-text')[0]\n place['name'] = name.text.strip()\n\n stats = single_soup.select('div.obj-stats dl dd')\n place['href'] = stats[0].text.strip()\n\n path = place['href'].split('lt/')[1].split('-')\n place['id'] = path[1]\n\n if path[0] == '1':\n place['house'] = False\n else:\n place['house'] = True\n\n place['created_at'] = stats[1].text.strip()\n place['updated_at'] = stats[2].text.strip()\n\n price = single_soup.select('.price-block .price-left .price-eur')[0].text.strip()\n place['price'] = int(price.replace(' ', '').replace('€', ''))\n\n for r in single_soup.select('.obj-details dt'):\n rt = r.text.strip()\n if rt == 'Plotas:':\n area = r.find_next().text.strip()\n place['area'] = float(area.replace(' m²', '').replace(',', '.'))\n if rt == 'Kambarių sk.:':\n place['rooms'] = int(r.find_next().text.strip())\n if rt == 'Metai:':\n place['year'] = int(r.find_next().text.strip())\n if rt == 'Įrengimas:':\n place['equipment'] = r.find_next().contents[0].strip()\n\n map_url = 'https://www.aruodas.lt/map/?id=' + place['href'].split('lt/')[1] + '&position=popup'\n map_page = requests.get(map_url, headers=headers)\n map_soup = BeautifulSoup(map_page.content, 'html.parser')\n\n for line in map_soup.prettify().splitlines():\n if 'var locationCoordinate =' in line:\n location = line.split('var locationCoordinate = ')[1].replace(\"'\", '').replace(';', '').split(',')\n place['lat'] = float(location[0])\n place['lng'] = float(location[1])\n\n s3.Object('chum-buket', place['id'] + \".json\").put(Body=json.dumps(place))\n\n return {\n 'message': 'Scrape those houses!',\n 'event': event\n }\n\n\nscrape_aruodas(1, 1)\n","repo_name":"Rhymond/home-scrape","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28079452212","text":"import random\nfrom tkinter import *\n\n\n# ================= Roll the dice ==============================\ndef roll(d, n, m):\n if n != 0:\n str1 = f\"{n} d{d}'s.\\n\"\n total = 0\n log = \"\"\n for i in range(n):\n roll = random.randint(1, d)\n total += roll + m\n if m > 0:\n log += f\"{roll}(+{m}) \"\n elif m < 0:\n log += f\"{roll}({m}) \"\n else:\n log += f\"{roll} \"\n str1 += \"Rolls: \" + log + \"\\n\"\n return total, str1\n return 0, \"\"\n\n# four - twenty (type of dice)\n# modDice - modifier per dice // modTotal - modifier at the end of the result\ndef dice_roller(four=0, six=0, eight=0, ten=0, twelve=0, hundred=0, twenty=0, modDice=0, modTotal=0):\n if four < 0 or six < 0 or eight < 0 or ten < 0 or twelve < 0 or hundred < 0 or twenty < 0:\n strEnd = \"Invalid number of dice!\"\n return strEnd\n else:\n result = 0\n stringResult = \"\\nRolling...\\n\"\n dice = [4, 6, 8, 10, 12, 100, 20]\n nDice = [four, six, eight, ten, twelve, hundred, twenty]\n for i in range(7):\n rolled = roll(dice[i], nDice[i], modDice)\n result += rolled[0]\n stringResult += rolled[1]\n if modTotal > 0:\n result += modTotal\n stringResult += f\"Modifier: +{modTotal}\\n\"\n elif modTotal < 0:\n result += modTotal\n stringResult += f\"Modifier: {modTotal}\"\n stringResult += f\"Total: {result}.\"\n return stringResult\n\n\n# ================= Interface ===================================\n# main\nwindow = Tk()\nwindow.title(\"Dice Roller v1.0\")\n\n# functions\ndef clickRoll():\n four = int(nFour.get()) # returns the value in the text box\n six = int(nSix.get())\n eight = int(nEight.get())\n ten = int(nTen.get())\n twelve = int(nTwelve.get())\n hundred = int(nHundred.get())\n twenty = int(nTwenty.get())\n dMod = int(modDie.get())\n tMod = int(modTotal.get())\n output.configure(state=\"normal\")\n output.insert(END, dice_roller(four, six, eight, ten,\n twelve, hundred, twenty, dMod, tMod))\n output.configure(state=\"disabled\")\n\n\ndef clickClear():\n nFour.delete(0, END)\n nFour.insert(0, 0)\n nSix.delete(0, END)\n nSix.insert(0, 0)\n nEight.delete(0, END)\n nEight.insert(0, 0)\n nTen.delete(0, END)\n nTen.insert(0, 0)\n nTwelve.delete(0, END)\n nTwelve.insert(0, 0)\n nHundred.delete(0, END)\n nHundred.insert(0, 0)\n nTwenty.delete(0, END)\n nTwenty.insert(0, 0)\n modDie.delete(0, END)\n modDie.insert(0, 0)\n modTotal.delete(0, END)\n modTotal.insert(0, 0)\n output.configure(state=\"normal\")\n output.delete(0.0, END)\n output.configure(state=\"disabled\")\n\n\n# text labels\nLabel(window, text=\"D4\", width=5).grid(row=1, column=0, sticky=W)\nLabel(window, text=\"D6\", width=5).grid(row=2, column=0, sticky=W)\nLabel(window, text=\"D8\", width=5).grid(row=3, column=0, sticky=W)\nLabel(window, text=\"D10\", width=5).grid(row=4, column=0, sticky=W)\nLabel(window, text=\"D12\", width=5).grid(row=5, column=0, sticky=W)\nLabel(window, text=\"D100\", width=5).grid(row=6, column=0, sticky=W)\nLabel(window, text=\"D20\", width=5).grid(row=7, column=0, sticky=W)\n\nLabel(window, text=\"Number\").grid(row=0, column=1)\nLabel(window, text=\"Dice Mod\").grid(row=0, column=2)\nLabel(window, text=\"Result\").grid(row=0, column=3, columnspan=2)\n\n# input\n# d4\nnFour = Entry(window, width=10)\nnFour.grid(row=1, column=1, sticky=W)\nnFour.insert(0, 0)\n\n# d6\nnSix = Entry(window, width=10)\nnSix.grid(row=2, column=1, sticky=W)\nnSix.insert(0, 0)\n\n# d8\nnEight = Entry(window, width=10)\nnEight.grid(row=3, column=1, sticky=W)\nnEight.insert(0, 0)\n\n# d10\nnTen = Entry(window, width=10)\nnTen.grid(row=4, column=1, sticky=W)\nnTen.insert(0, 0)\n\n# d12\nnTwelve = Entry(window, width=10)\nnTwelve.grid(row=5, column=1, sticky=W)\nnTwelve.insert(0, 0)\n\n# d100\nnHundred = Entry(window, width=10)\nnHundred.grid(row=6, column=1, sticky=W)\nnHundred.insert(0, 0)\n\n# d20\nnTwenty = Entry(window, width=10)\nnTwenty.grid(row=7, column=1, sticky=W)\nnTwenty.insert(0, 0)\n\n# mod\nLabel(window, text=\"Per Die\").grid(row=2, column=2, padx=10)\nmodDie = Entry(window, width=10)\nmodDie.insert(0, 0)\nmodDie.grid(row=3, column=2, sticky=W, padx=10)\n\nLabel(window, text=\"Total\").grid(row=5, column=2, padx=10)\nmodTotal = Entry(window, width=10)\nmodTotal.insert(0, 0)\nmodTotal.grid(row=6, column=2, sticky=W, padx=10)\n\n# roll button\nButton(window, text=\"Roll\", command=clickRoll).grid(row=7, column=3)\nButton(window, text=\"Clear\", command=clickClear).grid(row=7, column=4)\n\n# output\noutput = Text(window, wrap=WORD, width=20)\noutput.grid(row=1, column=3, rowspan=6, columnspan=2, sticky=W)\noutput.insert(END, \"Select your Dice!\\n\")\noutput.configure(state=\"disabled\")\n\n# run the main loop\nwindow.mainloop()\n\n\n# = TEST ========================================================\n# print(dice_roller(six=1,eight=2))\n","repo_name":"tomasteicol19452/DnD-Py","sub_path":"DiceRoller/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17113277165","text":"import math \nimport numpy as np\nimport random\nimport sys\n\n#Helper Methods ------------------------------------------------------------------------\n# Finds the mean/centroid of a cluster.\ndef centerOfCluster(cluster):\n cluster = np.asarray(cluster)\n center =np.array([0,0])\n\n result = np.array([0,0])\n for i in range(len(cluster)):\n result = np.sum([cluster[i], result], axis = 0)\n\n numberOfPoints = len(cluster)\n if numberOfPoints > 0:\n result = result * (1 / numberOfPoints)\n return result\n else: \n print (\"A cluster is empty\")\n sys.exit()\n\n\n# Calculates the distance between 2 2d vectors. \ndef distVector(vectorA, vectorB):\n vectorA = np.asarray(vectorA)\n vectorB = np.asarray(vectorB)\n\n tem1 = np.subtract(vectorA[0], vectorB[0])\n tem2 = np.subtract(vectorA[1], vectorB[1])\n tem1 = np.power(tem1,2)\n tem2 = np.power(tem2,2)\n \n result = np.add(tem1,tem2)\n result = math.sqrt(result)\n \n return result\n\n\n# Calculates the sum of distance for all points in a clusters to its center.\ndef distClustSum(cluster):\n cluster = np.asarray(cluster)\n center = centerOfCluster(cluster)\n\n dist = 0\n for i in range(len(cluster)):\n temp = pow(distVector(center,cluster[i]),2)\n dist = temp + dist\n return dist\n\n#End helper methods\n#---------------------------------------------------------------------------------------\n\n#Calculates the TD2 measure for\ndef TD2(data):\n result = 0\n data = np.asarray(data)\n\n\n for i in range(len(data)):\n result = distClustSum(data[i]) + result\n return result\n\n# Is this forgy / Lloyd or not?\ndef kMeans(data):\n points = [item for sublist in data for item in sublist]\n k = len(data)\n clusters=[[]for i in range(k)]\n \n centroids = []\n for i in range(len(data)):\n centroids.append(centerOfCluster(data[i]))\n\n for x in points:\n lengths = []\n\n for i in centroids:\n lengths.append(distVector(x,i))\n # Find the centroid that are clossest to the point\n minimum = min(lengths)\n\n # Assign that point to the cluster, that centroid belongs too. \n cluster =lengths.index(minimum)\n \n for j in range(k):\n if cluster==j:\n clusters[j].append(x)\n \n return clusters\n\n#kMeansAlgo tager imod data og et k\n#data: En liste af lister af 2d punkter.\n#k: En liste af antal cluster vi gerne vil prøve at finde. \ndef kMeansAlgo(data,k):\n # points takes the list from data, anc uses list comprehension on it. \n points = [item for sublist in data for item in sublist]\n \n TD=[] # TD2squared soloutions for clusters\n for i in k:\n # lav k antal tomme clusters\n clusters=[[]for j in range(i)]\n for x in points:\n # tildel alle punkterne i vores data, tilfædigt i de k antal lister. \n random.choice(clusters).append(x)\n # As long as there are changes for kMeans update, otherwise calcule TD2 and end.\n while kMeans(clusters)!=clusters:\n clusters= kMeans(clusters)\n \n temptTD = TD2(clusters)\n #TD.append(TD2(clusters))\n print(\"Clusters with k =\", i,\"is:\", clusters, \"and TD2 =\", temptTD)\n TD.append(temptTD)\n #return TD # Har jeg udkommenteret fordi jeg ikke bruger den. Det er bare en liste af TD2 for \n # de forskellige clusters. \n\n\nsquares = [[1],[2]]\ntriangle = [[4],[6]]\ncircle = [[8],[9],[10]]\n\ndata = [squares,triangle,circle] \n\n#Her kører vi med værdierne som de er i grafen\nprint (\"k-means with initialie values set = \",kMeans(data), \"TD2 = \", TD2(kMeans(data)))\n\nkMeansAlgo(data, [1,2,3])","repo_name":"RuneSemey/course-help","sub_path":"DM566 - Machine Learning/Clustering/kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"14803909470","text":"\"\"\"\nProcesses data for the button analysis\n\"\"\"\nimport logging as log\nimport os\nimport csv\n\nfrom dirs import DIR_PROCESSED, DIR_DATA_SIMU\nfrom utils import read_csv_line\nfrom split_raw_simu_data import str2time\n\n\n# ADAS in the name\n\n\ndef main():\n # log.basicConfig(format='%(asctime)-15s [%(levelname)-10s][%(subject)-10s][%(scenario)-50s]: %(message)s',\n # level=log.INFO)\n log.basicConfig(format='%(subject)s,%(scenario)s,%(message)s', level=log.INFO)\n results = []\n buttons_map = {'down': 'brake',\n 'space': 'left',\n 'b': 'right'}\n for root, dirs, files in os.walk(DIR_DATA_SIMU):\n # Only ADAS scenarios\n if not \"adas\" in root.lower():\n continue\n # No stop and go scenarios\n if \"LCB_B1_Long_Alt\" in root:\n continue\n\n # Getting scenario and subject\n _, scenario = os.path.split(root)\n _, subject = os.path.split(_)\n\n d = {'subject': subject, 'scenario': scenario}\n button_file_path = os.path.join(root, \"keyboardvalues.csv\")\n if not os.path.exists(button_file_path):\n log.warning(\"No button data\".format(subject=subject, scenario=scenario), extra=d)\n continue\n\n button = None\n t = None\n start_time = None\n time_to_answer = None\n for l in read_csv_line(button_file_path):\n action = l[1].lower()\n if action == 'start':\n start_time = str2time(l[0])\n if action not in ['start', 'stop']:\n if action not in buttons_map:\n log.warning(\"Pressed the unhandled button '{b}'\".format(b=action), extra=d)\n continue\n if button is None:\n button = buttons_map[action]\n t = str2time(l[0])\n time_to_answer = (t - start_time).total_seconds()\n elif button != buttons_map[action]:\n log.fatal(\n \"Pressed too many handled buttons ({b1}, {b2})\".format(b1=button, b2=buttons_map[action]),\n extra=d)\n return\n\n if button is None:\n log.warning(\"Pressed no button\", extra=d)\n button = \"---\"\n t = \"---\"\n time_to_answer = \"---\"\n\n results.append([scenario, subject, t, button, time_to_answer])\n\n header = [\"scenario\",\n \"subject\",\n \"timecode\",\n \"answer\",\n \"time_to_answer\",\n ]\n\n assert (len(header) == len(results[0]))\n\n res_fn = os.path.join(DIR_PROCESSED, \"button_answers.csv\")\n with open(res_fn, 'w+') as subject_res:\n subject_res_writer = csv.writer(subject_res, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL,\n lineterminator='\\n')\n subject_res_writer.writerow(header)\n for r in results:\n subject_res_writer.writerow(r)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Anais-Hoarau/BING_GUI_Plugins","sub_path":"pynd/scripts/Matt/button_process.py","file_name":"button_process.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8969130005","text":"import csv\nimport datetime\nimport operator\n\nCSV_FILE = './overtime.csv'\nCSV_SEP = \",\"\nDATETIME_FORMAT = \"%m/%d/%Y %H:%M\"\nWORKING_HOURS = 9\nDATE_ROW = 0\nENTRY_ROW = 1\nEXIT_ROW = 2\n\n\nclass CalculateOverTime(object):\n def __init__(self):\n self.csv_file = CSV_FILE\n self.dt_format = DATETIME_FORMAT\n self.working_hours = WORKING_HOURS\n self.date_row = DATE_ROW\n self.entry_row = ENTRY_ROW\n self.exit_row = EXIT_ROW\n self.csv_sep = CSV_SEP\n self.weekly_overtime_dict = {}\n self.weekly_overtime = datetime.timedelta(0)\n self.current_week = -1\n self.total_overtime = datetime.timedelta(0)\n\n def get_minus_time(self, td):\n td = datetime.timedelta(days=1) - td\n sign = \"-\"\n return \"{}{}:{}\".format(sign, (td.seconds) // 3600, td.seconds // 60 % 60)\n\n def get_plus_time(self, td):\n sign = \"+\"\n return \"{}{}:{}\".format(sign, (td.days*86400 + td.seconds) // 3600, td.seconds // 60 % 60)\n\n def get_hour_minutes(self, td, return_type=\"str\"):\n if return_type == \"str\":\n sign = \"+\"\n if td.days <= -1:\n return self.get_minus_time(td)\n else:\n return self.get_plus_time(td)\n else:\n return (td.days*86400 + td.seconds)//3600, (td.seconds//60) % 60\n\n def get_overtime(self, start_time, end_time):\n current_time = end_time - start_time\n overtime = current_time - datetime.timedelta(hours=WORKING_HOURS)\n return overtime\n\n def generate_daily_report(self, date, start, end, overtime):\n overtime = self.get_hour_minutes(overtime)\n print(\"Date: {date: <10}, Start: {start: <5}, End: {end: <5},\"\n \" Overtime: {overtime: <6}\".format(\n date=date,\n start=start,\n end=end,\n overtime=overtime\n ))\n\n def get_datetime(self, date, time):\n time = time.replace(\" \", \"\")\n return datetime.datetime.strptime('{} {}'.format(\n date,\n time\n ), DATETIME_FORMAT\n )\n\n def get_week(self, datetime_obj):\n return datetime_obj.strftime(\"%V\")\n\n def update_weekly_overtime(self, overtime):\n self.weekly_overtime += overtime\n\n def get_weekly_report(self, week_no):\n return self.weekly_overtime_dict.get(week_no, datetime.timedelta(0))\n\n def create_weekly_report(self, overtime):\n return {'overtime': overtime}\n\n def get_updated_overtime(self, week_overtime, overtime):\n return week_overtime + overtime\n\n def update_weekly_report(self, week_no, overtime):\n weekly_overtime = self.get_weekly_report(week_no)\n updated_overtime = self.get_updated_overtime(weekly_overtime, overtime)\n self.weekly_overtime_dict[week_no] = updated_overtime\n\n def update_total_overtime(self, curr_overtime):\n self.total_overtime += curr_overtime\n\n def process_csv(self):\n try:\n self.show_header(\"Daily Overtime\", 61)\n with open(self.csv_file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=self.csv_sep)\n line = 0\n for row in csv_reader:\n line += 1\n if line == 1:\n continue\n date = row[self.date_row]\n entry = row[self.entry_row]\n end = row[self.exit_row]\n entry_time = self.get_datetime(date, entry)\n exit_time = self.get_datetime(date, end)\n current_week = self.get_week(entry_time)\n if entry_time == exit_time:\n continue\n current_overtime = self.get_overtime(entry_time, exit_time)\n self.update_total_overtime(current_overtime)\n self.update_weekly_report(current_week, current_overtime)\n self.generate_daily_report(\n date, entry, end, current_overtime)\n print('-'*61)\n except IOError:\n print(\"File not found at {}\".format(self.csv_file))\n\n except IndexError:\n print(\"Delimiter '{}' not found\".format(self.csv_sep))\n\n except ValueError as e:\n print(str(e))\n\n except Exception as e:\n print(\"Generic Exception\")\n print(str(e))\n\n def display_weekly_entry(self, week_no, overtime):\n overtime = self.get_hour_minutes(overtime)\n print(\n \"Week {week_no: <3} \"\n \"Overtime: {overtime: <6}\".format(\n week_no=week_no,\n overtime=overtime)\n )\n\n def show_header(self, text=\"\", max_line=24):\n print('-'*max_line)\n print(text.center(max_line))\n print('-'*max_line)\n\n def generate_weekly_report(self):\n max_line = 24\n self.show_header(\"Weekly Report\", max_line)\n total_overtime = datetime.timedelta(0)\n for key, value in sorted(self.weekly_overtime_dict.items(), key=operator.itemgetter(0)):\n self.display_weekly_entry(key, value)\n total_overtime += value\n print('-' * max_line)\n\n def show_total_overtime(self):\n hours, minutes = self.get_hour_minutes(\n self.total_overtime, return_type=\"int\")\n self.show_header(\"Total Overtime\", 22)\n print(\n \"Hours: {hours: <3} Minutes: {minutes: <3}\".format(\n hours=hours,\n minutes=minutes\n )\n )\n print('-'*22)\n\n\nif __name__ == \"__main__\":\n overtime = CalculateOverTime()\n overtime.process_csv()\n overtime.generate_weekly_report()\n overtime.show_total_overtime()\n","repo_name":"ruddra/cefalo-overtime-calculator","sub_path":"overtime.py","file_name":"overtime.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"24136651871","text":"import pandas as pd\nimport datetime as dt\nimport oandapyV20.endpoints.accounts as accounts\nimport oandapyV20.endpoints.instruments as instruments\nimport pytz\nfrom oandapyV20 import API\nfrom oandapyV20.contrib.factories import InstrumentsCandlesFactory\nfrom oandapyV20.contrib.requests import MarketOrderRequest\nfrom oandapyV20.contrib.requests import TakeProfitDetails, StopLossDetails\nimport oandapyV20.endpoints.orders as orders\nimport oandapyV20.endpoints.positions as positions\nfrom config.keys import oanda_keys\n\nPRACTICE_API_HOST = 'api-fxpractice.forex.com'\nPRACTICE_STREAM_HOST = 'stream-fxpractice.forex.com'\nLIVE_API_HOST = 'api-fxtrade.forex.com'\nLIVE_STREAM_HOST = 'stream-fxtrade.forex.com'\nPORT = '443'\n\n\nclass OandaBroker2:\n # tz = pytz.timezone('America/New_York')\n\n def __init__(self, account_id, access_token, is_live=False):\n if is_live:\n host = LIVE_API_HOST\n stream_host = LIVE_STREAM_HOST\n else:\n host = PRACTICE_API_HOST\n stream_host = PRACTICE_STREAM_HOST\n\n self.account_id = account_id\n self.access_token = access_token\n self.client = API(access_token=self.access_token)\n self.support = None\n self.resistance = None\n\n def get_positions(self):\n r = positions.OpenPositions(accountID=self.account_id)\n self.client.request(r)\n\n all_positions = r.response.get(\"positions\", [])\n for position in all_positions:\n instrument = position['instrument']\n unrealized_pnl = position['unrealizedPL']\n pnl = position['pl']\n long = position['long']\n short = position['short']\n\n if short['units']:\n self.on_position_event(\n instrument, False, short['units'], unrealized_pnl, pnl)\n elif long['units']:\n self.on_position_event(\n instrument, True, long['units'], unrealized_pnl, pnl)\n else:\n self.on_position_event(\n instrument, None, 0, unrealized_pnl, pnl)\n return all_positions\n\n def send_market_order(self, instrument, quantity, is_buy, take_profit=None, stop_loss=None):\n\n tp = None if take_profit is None else TakeProfitDetails(price=take_profit).data\n\n sl = None if stop_loss is None else StopLossDetails(price=stop_loss).data\n\n if is_buy:\n mkt_order = MarketOrderRequest(instrument=instrument,\n units=quantity,\n takeProfitOnFill=tp,\n stopLossOnFill=sl)\n else:\n mkt_order = MarketOrderRequest(instrument=instrument,\n units=(quantity * -1),\n takeProfitOnFill=tp,\n stopLossOnFill=sl)\n\n r = orders.OrderCreate(self.account_id, data=mkt_order.data)\n self.client.request(r)\n\n if r.status_code != 201:\n self.on_order_event(instrument, quantity, is_buy, None, 'NOT_FILLED')\n return False\n\n if 'orderCancelTransaction' in r.response:\n self.on_order_event(instrument, quantity, is_buy, None, 'NOT_FILLED')\n return False\n\n transaction_id = r.response.get('lastTransactionID', None)\n self.on_order_event(instrument, quantity, is_buy, transaction_id, 'FILLED')\n return r\n\n def get_prices(self, instruments_, params):\n if isinstance(instruments_, list):\n df_list = []\n for instrument in instruments_:\n df_list.append(self.get_prices(instrument, params))\n return pd.concat(df_list)\n\n return self.get_prices_instrument(instruments_, params)\n\n def get_prices_instrument(self, instrument, params):\n \"\"\"\n @param instrument:\n @param params:\n @return: dataframe of live candles data\n \"\"\"\n client = self.client\n r = instruments.InstrumentsCandles(instrument=instrument,\n params=params)\n client.request(r)\n candles = r.response.get(\"candles\")\n instrument = r.response.get(\"instrument\")\n granularity = r.response.get(\"granularity\")\n df1 = pd.DataFrame(candles)[['complete', 'volume', 'time']]\n df2 = pd.DataFrame(list(pd.DataFrame(candles)['mid']))\n df = pd.concat([df1, df2], axis=1)\n df.rename(mapper={'o': 'open', 'h': 'high', 'l': 'low', 'c': 'close'}, inplace=True, axis=1)\n df['time'] = pd.to_datetime(df['time'])\n # df['time'] = df['time'].dt.tz_convert('America/New_York')\n\n df[['open', 'high', 'low', 'close']] = df[['open', 'high', 'low', 'close']].apply(pd.to_numeric,\n errors='coerce')\n df['instrument'] = instrument\n df['granularity'] = granularity\n return df\n\n def on_order_event(self, instrument, quantity, is_buy, transaction_id, status):\n print(\n dt.datetime.now(), '[ORDER]',\n 'account_id:', self.account_id,\n 'transaction_id:', transaction_id,\n 'status:', status,\n 'instrument:', instrument,\n 'quantity:', quantity,\n 'is_buy:', is_buy,\n )\n\n def on_position_event(self, instrument, is_long, units, unrealized_pnl, pnl):\n print(\n dt.datetime.now(), '[POSITION]',\n 'account_id:', self.account_id,\n 'instrument:', instrument,\n 'is_long:', is_long,\n 'units:', units,\n 'upnl:', unrealized_pnl,\n 'pnl:', pnl\n )\n\n\nif __name__ == '__main__':\n broker = OandaBroker2(account_id=oanda_keys['account_id'], access_token=oanda_keys['access_token'])\n # pos = broker.get_positions()\n # print(pos)\n # for p in pos:\n # print(p['instrument'], p['long'])\n # print(p['instrument'], p['short'])\n # order = broker.send_market_order('EUR_USD', 1, True)\n # print(order)\n\n print(broker.get_positions())\n print('end')\n","repo_name":"vhphan/algotrading101","sub_path":"traders/oanda/broker_oanda.py","file_name":"broker_oanda.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"32625161971","text":"from collections import defaultdict\n\nclass UnionFind():\n def __init__(self, n):\n self.n = n\n self.parents = [-1] * n\n\n def find(self, x):\n if self.parents[x] < 0:\n return x\n else:\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n\n def union(self, x, y):\n x = self.find(x)\n y = self.find(y)\n\n if x == y:\n return\n\n if self.parents[x] > self.parents[y]:\n x, y = y, x\n\n self.parents[x] += self.parents[y]\n self.parents[y] = x\n\n def size(self, x):\n return -self.parents[self.find(x)]\n\n def same(self, x, y):\n return self.find(x) == self.find(y)\n\n def members(self, x):\n root = self.find(x)\n return [i for i in range(self.n) if self.find(i) == root]\n\n def roots(self):\n return [i for i, x in enumerate(self.parents) if x < 0]\n\n def group_count(self):\n return len(self.roots())\n\n def all_group_members(self):\n group_members = defaultdict(list)\n for member in range(self.n):\n group_members[self.find(member)].append(member)\n return group_members\n\n def __str__(self):\n return '\\n'.join(f'{r}: {m}' for r, m in self.all_group_members().items())\n\nN, M = map(int, input().split())\nC, L, R = [], [], []\nfor _ in range(M):\n c, l, r = map(int, input().split())\n C.append(c)\n L.append(l)\n R.append(r)\n\nuf = UnionFind(N+1)\n\nC, L, R = zip(*sorted(zip(C, L, R)))\n\nans = 0\nmembers = 0\n\nfor i in range(M):\n c, l, r = C[i], L[i], R[i]\n\n if uf.same(l-1, r):\n continue\n\n members += 1\n ans += c\n uf.union(l-1, r)\n if members == N:\n break\n\nif members == N:\n print(ans)\nelse:\n print(-1)\n","repo_name":"tamlog06/Atcoder-Beginner-Contest","sub_path":"problems/Typical/typical90/aw/typical90_aw.py","file_name":"typical90_aw.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2172850383","text":"class Solution:\n def isGood(self, nums: List[int]) -> bool:\n nums.sort()\n m=max(nums)\n l=len(nums)\n if(nums[l-1]==m and nums.count(m)==2 and l-1==m):\n flag=1\n for i in range(l-2):\n if(nums.count(i)>1):\n flag=0\n break\n if(flag==1):\n return True\n return False\n\n\n","repo_name":"Harsha-vardhan1/Product-Based-Company-Training-By-SRU","sub_path":"day 13/check if array is good.py","file_name":"check if array is good.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11806393867","text":"from utils import table as table_utils\n\n\"\"\"\n Author: Jaelson Carvalho - 11427671\n Python 3.4.3\n Usage: python binomial_coefficient.py\n\n Resources:\n https://github.com/jcarva/algorithms/blob/dynamic-programming/binomial-coefficient.js\n http://www.geeksforgeeks.org/dynamic-programming-set-9-binomial-coefficient/\n https://en.wikipedia.org/wiki/Binomial_coefficient\n\n Description:\n A binomial coefficient C(n, k) also gives the number of ways, disregarding order, that\n k objects can be chosen from among n objects; more formally, the number of k-element subsets\n (or k-combinations) of an n-element set.\n\n Complexity: O(n*k)\n\n Applications:\n Some modeling chemical systems and solving by numerical approaches uses binomial coefficient\n to generate central difference equations to odd-ordered partial differentials in a single-step\n operation. All finite difference equations to partial differentials shown herein display finite\n series of palindromic coefficients with alternating signs.\n\"\"\"\n\ndef solve(input):\n \"\"\"\n :param input: array that contains two non negatives integers, where the second value should be\n less or equal to the first.\n ex: [50, 3]\n\n :return: integer that represents the calculated binomial coefficient.\n ex: 19600\n \"\"\"\n return _binomial_coefficient(input[0], input[1])\n\n\ndef _binomial_coefficient(n, k):\n if (n >= k) and (k >= 0):\n\n # Create a table to store values that are used to solve shortest problems\n c = table_utils.initialize(1, k + 1, 0)[0]\n\n # Set the first position with 1\n c[0] = 1\n\n for i in range(1, n+1):\n\n # Calculate the current row of pascal triangle using the previous column\n j = min(i, k)\n while j > 0:\n c[j] = c[j] + c[j-1]\n j -= 1\n\n return c[k]\n\n else:\n return -1\n","repo_name":"fernandobrito/algorithms_design_assignments","sub_path":"dynamic_greedy_lib/dynamic/binomial_coefficient.py","file_name":"binomial_coefficient.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25487846846","text":"# A simple interactive program that alerts consumers of their tax and the minimum tip they can give\r\ncost = int(input('meal cost: '))\r\ntip = float(cost) * 0.18\r\ntax = float(cost) * 0.07\r\n\r\ndef addnumbers (cost, tip, tax):\r\n return (cost + tip + tax)\r\n\r\nsum = float(cost) + float(tip) + float(tax)\r\nresult = (\"The sum of {0}, {1} and {2} is {3}\".format(cost, tip, tax, sum))\r\n\r\nprint('Your tip is ', tip)\r\nprint('Your tax is ', tax)\r\nprint(result)\r\nprint(\"Thank you. See you again!\")","repo_name":"kwakuduah12/Tax-Tip","sub_path":"Tax_Tip.py","file_name":"Tax_Tip.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34614546279","text":"# $HeadURL: $\n''' TokenAgent\n\n This agent inspect all elements, and resets their tokens if necessary.\n\n'''\n\nimport datetime\n\nfrom DIRAC import S_OK, S_ERROR\nfrom DIRAC.Core.Base.AgentModule import AgentModule\nfrom DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient\nfrom DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient\nfrom DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient\nfrom DIRAC.ResourceStatusSystem.PolicySystem.PDP import PDP\nfrom DIRAC.ResourceStatusSystem.Utilities import RssConfiguration \n\n__RCSID__ = '$Id: $'\nAGENT_NAME = 'ResourceStatus/TokenAgent'\n\nclass TokenAgent( AgentModule ):\n '''\n TokenAgent is in charge of checking tokens assigned on resources.\n Notifications are sent to those users owning expiring tokens.\n '''\n\n # Too many public methods\n # pylint: disable-msg=R0904\n\n def initialize( self ):\n '''\n TokenAgent initialization\n '''\n \n # Attribute defined outside __init__\n # pylint: disable-msg=W0201\n\n self.notifyHours = self.am_getOption( 'notifyHours', 10 )\n\n try:\n self.rsClient = ResourceStatusClient()\n self.rmClient = ResourceManagementClient()\n self.noClient = NotificationClient()\n\n return S_OK()\n except Exception:\n errorStr = \"TokenAgent initialization\"\n self.log.exception( errorStr )\n return S_ERROR( errorStr )\n\n def execute( self ):\n '''\n The main TokenAgent execution method.\n Checks for tokens owned by users that are expiring, and notifies those users.\n Calls rsClient.setToken() to set 'RS_SVC' as owner for those tokens that expired.\n '''\n\n adminMail = ''\n\n try:\n\n reason = 'Out of date token'\n\n #reAssign the token to RS_SVC\n #for g in self.ELEMENTS:\n\n validElements = RssConfiguration.getValidElements()\n\n for granularity in validElements:\n tokensExpired = self.rsClient.getTokens( granularity, \n tokenExpiration = datetime.datetime.utcnow() )\n\n if tokensExpired[ 'Value' ]:\n adminMail += '\\nLIST OF EXPIRED %s TOKENS\\n' % granularity\n adminMail += '%s|%s|%s\\n' % ( 'user'.ljust(20), 'name'.ljust(15), 'status type')\n\n for token in tokensExpired[ 'Value' ]:\n\n name = token[ 1 ]\n stype = token[ 2 ]\n user = token[ 9 ]\n\n self.rsClient.setToken( granularity, name, stype, reason, 'RS_SVC', \n datetime.datetime( 9999, 12, 31, 23, 59, 59 ) )\n adminMail += ' %s %s %s\\n' %( user.ljust(20), name.ljust(15), stype )\n\n #notify token owners\n inNHours = datetime.datetime.utcnow() + datetime.timedelta( hours = self.notifyHours )\n #for g in self.ELEMENTS:\n for granularity in validElements:\n\n tokensExpiring = self.rsClient.getTokens( granularity, tokenExpiration = inNHours )\n\n if tokensExpiring[ 'Value' ]:\n adminMail += '\\nLIST OF EXPIRING %s TOKENS\\n' % granularity\n adminMail += '%s|%s|%s\\n' % ( 'user'.ljust(20),'name'.ljust(15),'status type')\n\n for token in tokensExpiring[ 'Value' ]:\n\n name = token[ 1 ]\n stype = token[ 2 ]\n user = token[ 9 ]\n\n adminMail += '\\n %s %s %s\\n' %( user.ljust(20), name.ljust(15), stype )\n\n #If user is RS_SVC, we ignore this, whenever the token is out, this\n #agent will set again the token to RS_SVC\n if user == 'RS_SVC':\n continue\n\n pdp = PDP( granularity = granularity, name = name, statusType = stype )\n\n decision = pdp.takeDecision()\n pcresult = decision[ 'PolicyCombinedResult' ]\n spresult = decision[ 'SinglePolicyResults' ]\n\n expiration = token[ 10 ]\n\n mailMessage = \"The token for %s %s ( %s )\" % ( granularity, name, stype )\n mailMessage = mailMessage + \" will expire on %s\\n\\n\" % expiration\n mailMessage = mailMessage + \"You can renew it with command 'dirac-rss-renew-token'.\\n\"\n mailMessage = mailMessage + \"If you don't take any action, RSS will take control of the resource.\\n\\n\"\n\n policyMessage = ''\n\n if pcresult[ 'Action' ]:\n\n policyMessage += \" Policies applied will set status to %s.\\n\" % pcresult[ 'Status' ]\n\n for spr in spresult:\n policyMessage += \" %s Status->%s\\n\" % ( spr[ 'PolicyName' ].ljust(25), spr[ 'Status' ] )\n\n mailMessage += policyMessage\n adminMail += policyMessage\n\n self.noClient.sendMail( self.rmClient.getUserRegistryCache( user )[ 'Value' ][ 0 ][ 2 ],\n 'Token for %s is expiring' % name, mailMessage )\n if adminMail != '':\n #FIXME: 'ubeda' is not generic ;p\n self.noClient.sendMail( self.rmClient.getUserRegistryCache( 'ubeda' )[ 'Value' ][ 0 ][ 2 ],\n \"Token's summary\", adminMail )\n\n return S_OK()\n\n except Exception:\n errorStr = \"TokenAgent execution\"\n self.log.exception( errorStr )\n return S_ERROR( errorStr )\n\n################################################################################\n#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF","repo_name":"dlaur/DIRAC","sub_path":"ResourceStatusSystem/Agent/TokenAgent.py","file_name":"TokenAgent.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"32623311251","text":"#!/usr/bin/env python3\n# from typing import *\n\n\n# def solve(K: int) -> int:\ndef solve(K):\n see = [False] * K\n\n a = 0\n for i in range(K):\n a = a*10 + 7\n a %= K\n\n if see[a]:\n return -1\n\n see[a] = True\n if a == 0:\n return i+1\n\n return -1\n\n\n# generated by oj-template v4.8.1 (https://github.com/online-judge-tools/template-generator)\ndef main():\n K = int(input())\n a = solve(K)\n print(a)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tamlog06/Atcoder-Beginner-Contest","sub_path":"problems/ABC/174/c/abc174_c.py","file_name":"abc174_c.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33398884171","text":"import string\nfrom abc import ABC, abstractmethod\n\nfrom .symboltable import SymbolTable\nfrom .exceptions import (\n NoAddressError, BadVariableError, AddressOutOfBoundsError,\n DestinationError, ComputationError, JumpError, RAMError)\n\n\nclass Instruction(ABC):\n \"\"\"\n An abstract instruction class.\n\n Includes an abstract _check_valid method to ensure created\n instructions are valid on initialisation.\n\n Methods\n -------\n get_line()\n Return number in the file of the line from which the instruction\n was obtained\n get_inst()\n Return full instruction (excluding comments and surrounding\n whitespace) as written in the provided file\n encoded()\n Return the instruction in machine language\n \"\"\"\n\n _symbol_table = SymbolTable()\n _ram_address = 16\n\n def __init__(self, line, inst):\n self._line = line\n self._inst = inst\n self._check_valid()\n self._encoded = self._encode()\n\n def get_line(self):\n return self._line\n\n def get_inst(self):\n return self._inst\n\n def encoded(self):\n return self._encoded\n\n @abstractmethod\n def _check_valid(self):\n pass\n\n @abstractmethod\n def _encode(self):\n pass\n\n @classmethod\n def load_table(cls, symbol_table):\n cls._symbol_table = symbol_table\n\n\nclass AInstruction(Instruction):\n \"\"\"\n An A-Instruction class.\n\n Extends the Instruction class by also containing the value\n provided for the given A-Instruction.\n\n Methods\n -------\n get_line()\n Return number in the file of the line from which the instruction\n was obtained\n get_inst()\n Return full instruction (excluding comments and surrounding\n whitespace) as written in the provided file\n get_value()\n Return value after the '@' in the given A-Instruction\n is_numeric()\n Return true if the given address is numeric, false if symbolic\n encoded()\n Return the instruction in machine language\n \"\"\"\n\n _VALID_CHARS = frozenset(string.ascii_letters + string.digits + \"_.$:\")\n\n def __init__(self, line, inst, value):\n self._value = value\n self._numeric = value.isdigit()\n super().__init__(line, inst)\n\n def get_value(self):\n return self._value\n\n def is_numeric(self):\n return self._numeric\n\n def _check_valid(self):\n if self._value == \"\":\n raise NoAddressError(self)\n if len(self._value.split()) > 1:\n raise BadVariableError(self)\n if self._numeric:\n address = int(self._value)\n if address < 0 or address > 32767:\n raise AddressOutOfBoundsError(self)\n elif self._value[0].isdigit():\n raise BadVariableError(self)\n else:\n for c in self._value:\n if c not in AInstruction._VALID_CHARS:\n raise BadVariableError(self)\n\n def _encode(self):\n if self._numeric:\n address = int(self._value)\n elif Instruction._symbol_table.contains(self._value):\n address = Instruction._symbol_table.get_address(self._value)\n else:\n if Instruction._ram_address == 16383:\n raise RAMError(self)\n address = Instruction._ram_address\n Instruction._symbol_table.add_entry(self._value, address)\n Instruction._ram_address += 1\n bits = bin(address).replace(\"0b\", \"\")\n return \"0\" * (16 - len(bits)) + bits\n\n\nclass CInstruction(Instruction):\n \"\"\"\n A C-Instruction class.\n\n Extends the Instruction class by also containing relevant\n C-Instruction information.\n\n Methods\n -------\n get_line()\n Return number in the file of the line from which the instruction\n was obtained\n get_inst()\n Return full instruction (excluding comments and surrounding\n whitespace) as written in the provided file\n get_dest()\n Return the destination registers for the computation\n get_comp()\n Return the desired computation\n get_jump()\n Return the jump operation\n encoded()\n Return the instruction in machine language\n \"\"\"\n\n DEST = {None: \"000\", \"M\": \"001\", \"D\": \"010\", \"MD\": \"011\", \"A\": \"100\",\n \"AM\": \"101\", \"AD\": \"110\", \"AMD\": \"111\"}\n COMP = {\"0\": \"0101010\", \"1\": \"0111111\", \"-1\": \"0111010\",\n \"D\": \"0001100\", \"A\": \"0110000\", \"M\": \"1110000\",\n \"!D\": \"0001101\", \"!A\": \"0110001\", \"!M\": \"1110001\",\n \"-D\": \"0001111\", \"-A\": \"0110011\", \"-M\": \"1110011\",\n \"D+1\": \"0011111\", \"A+1\": \"0110111\", \"M+1\": \"1110111\",\n \"D-1\": \"0001110\", \"A-1\": \"0110010\", \"M-1\": \"1110010\",\n \"D+A\": \"0000010\", \"D+M\": \"1000010\",\n \"D-A\": \"0010011\", \"D-M\": \"1010011\",\n \"A-D\": \"0000111\", \"M-D\": \"1000111\",\n \"D&A\": \"0000000\", \"D&M\": \"1000000\",\n \"D|A\": \"0010101\", \"D|M\": \"1010101\"}\n JUMP = {None: \"000\", \"JGT\": \"001\", \"JEQ\": \"010\", \"JGE\": \"011\", \"JLT\": \"100\",\n \"JNE\": \"101\", \"JLE\": \"110\", \"JMP\": \"111\"}\n\n def __init__(self, line, inst, dest, comp, jump):\n self._dest = dest\n self._comp = comp\n self._jump = jump\n super().__init__(line, inst)\n \n def get_dest(self):\n return self._dest\n\n def get_comp(self):\n return self._comp\n\n def get_jump(self):\n return self._jump\n\n def _check_valid(self):\n if self._dest not in CInstruction.DEST:\n raise DestinationError(self)\n if self._comp not in CInstruction.COMP:\n raise ComputationError(self)\n if self._jump not in CInstruction.JUMP:\n raise JumpError(self)\n\n def _encode(self):\n return (\"111\" + CInstruction.COMP[self._comp] +\n CInstruction.DEST[self._dest] + CInstruction.JUMP[self._jump])\n\n\ndef prepare_symbol_table(file_path):\n Instruction.load_table(SymbolTable(file_path))\n","repo_name":"AntJamGeo/hack-assembler","sub_path":"asmtools/instruction.py","file_name":"instruction.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6012854973","text":"from django.shortcuts import render\nimport csv\n\n\ndef inflation_view(request):\n template_name = 'inflation.html'\n # чтение csv-файла и заполнение контекста\n data = []\n year = []\n total = []\n with open('inflation_russia.csv', newline='', encoding=\"utf-8\") as csv_file:\n reader = csv.reader(csv_file, delimiter=';')\n for row in reader:\n data.append(row)\n head = data.pop(0)\n # for i in range(len(data)):\n # print(data[i][0])\n for line in data:\n year.append(line.pop(0))\n total.append(line.pop(len(line)-1))\n\n context = {'data': data, 'head': head, 'year': year, 'total': total }\n\n return render(request, template_name,\n context)\n","repo_name":"maxtv1982/Python","sub_path":"dynamic-templates/task1/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37631582842","text":"from typing import Tuple, Optional, List, Union\n\nimport torch\nimport logging\nimport torch.nn.functional as F\n\nfrom wenet.transformer.positionwise_feed_forward import PositionwiseFeedForward\nfrom wenet.transformer.embedding import PositionalEncoding\nfrom wenet.transformer.embedding import RelPositionalEncoding\nfrom wenet.transformer.embedding import NoPositionalEncoding\nfrom wenet.transformer.subsampling import Conv2dSubsampling4\nfrom wenet.transformer.subsampling import Conv2dSubsampling6\nfrom wenet.transformer.subsampling import Conv2dSubsampling8\nfrom wenet.transformer.subsampling import LinearNoSubsampling\nfrom wenet.transformer.attention import MultiHeadedAttention\nfrom wenet.transformer.attention import RelPositionMultiHeadedAttention\nfrom wenet.transformer.encoder_layer import ConformerEncoderLayer\n\nfrom wenet.efficient_conformer.subsampling import Conv2dSubsampling2\nfrom wenet.efficient_conformer.convolution import ConvolutionModule\nfrom wenet.efficient_conformer.attention import GroupedRelPositionMultiHeadedAttention\nfrom wenet.efficient_conformer.encoder_layer import StrideConformerEncoderLayer\n\nfrom wenet.utils.common import get_activation\nfrom wenet.utils.mask import make_pad_mask\nfrom wenet.utils.mask import add_optional_chunk_mask\n\n\nclass EfficientConformerEncoder(torch.nn.Module):\n \"\"\"Conformer encoder module.\"\"\"\n def __init__(\n self,\n input_size: int,\n output_size: int = 256,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n attention_dropout_rate: float = 0.0,\n input_layer: str = \"conv2d\",\n pos_enc_layer_type: str = \"rel_pos\",\n normalize_before: bool = True,\n static_chunk_size: int = 0,\n use_dynamic_chunk: bool = False,\n global_cmvn: torch.nn.Module = None,\n use_dynamic_left_chunk: bool = False,\n macaron_style: bool = True,\n activation_type: str = \"swish\",\n use_cnn_module: bool = True,\n cnn_module_kernel: int = 15,\n causal: bool = False,\n cnn_module_norm: str = \"batch_norm\",\n stride_layer_idx: Optional[Union[int, List[int]]] = 3,\n stride: Optional[Union[int, List[int]]] = 2,\n group_layer_idx: Optional[Union[int, List[int], tuple]] = (0, 1, 2, 3),\n group_size: int = 3,\n stride_kernel: bool = True,\n **kwargs\n ):\n \"\"\"Construct Efficient Conformer Encoder\n\n Args:\n input_size to use_dynamic_chunk, see in BaseEncoder\n macaron_style (bool): Whether to use macaron style for\n positionwise layer.\n activation_type (str): Encoder activation function type.\n use_cnn_module (bool): Whether to use convolution module.\n cnn_module_kernel (int): Kernel size of convolution module.\n causal (bool): whether to use causal convolution or not.\n stride_layer_idx (list): layer id with StrideConv, start from 0\n stride (list): stride size of each StrideConv in efficient conformer\n group_layer_idx (list): layer id with GroupedAttention, start from 0\n group_size (int): group size of every GroupedAttention layer\n stride_kernel (bool): default True. True: recompute cnn kernels with stride.\n \"\"\"\n super().__init__()\n self._output_size = output_size\n\n if pos_enc_layer_type == \"abs_pos\":\n pos_enc_class = PositionalEncoding\n elif pos_enc_layer_type == \"rel_pos\":\n pos_enc_class = RelPositionalEncoding\n elif pos_enc_layer_type == \"no_pos\":\n pos_enc_class = NoPositionalEncoding\n else:\n raise ValueError(\"unknown pos_enc_layer: \" + pos_enc_layer_type)\n\n if input_layer == \"linear\":\n subsampling_class = LinearNoSubsampling\n elif input_layer == \"conv2d2\":\n subsampling_class = Conv2dSubsampling2\n elif input_layer == \"conv2d\":\n subsampling_class = Conv2dSubsampling4\n elif input_layer == \"conv2d6\":\n subsampling_class = Conv2dSubsampling6\n elif input_layer == \"conv2d8\":\n subsampling_class = Conv2dSubsampling8\n else:\n raise ValueError(\"unknown input_layer: \" + input_layer)\n\n logging.info(f\"input_layer = {input_layer}, \"\n f\"subsampling_class = {subsampling_class}\")\n\n self.global_cmvn = global_cmvn\n self.embed = subsampling_class(\n input_size,\n output_size,\n dropout_rate,\n pos_enc_class(output_size, positional_dropout_rate),\n )\n self.input_layer = input_layer\n self.normalize_before = normalize_before\n self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)\n self.static_chunk_size = static_chunk_size\n self.use_dynamic_chunk = use_dynamic_chunk\n self.use_dynamic_left_chunk = use_dynamic_left_chunk\n\n activation = get_activation(activation_type)\n self.num_blocks = num_blocks\n self.attention_heads = attention_heads\n self.cnn_module_kernel = cnn_module_kernel\n self.global_chunk_size = 0\n self.chunk_feature_map = 0\n\n # efficient conformer configs\n self.stride_layer_idx = [stride_layer_idx] \\\n if type(stride_layer_idx) == int else stride_layer_idx\n self.stride = [stride] \\\n if type(stride) == int else stride\n self.group_layer_idx = [group_layer_idx] \\\n if type(group_layer_idx) == int else group_layer_idx\n self.grouped_size = group_size # group size of every GroupedAttention layer\n\n assert len(self.stride) == len(self.stride_layer_idx)\n self.cnn_module_kernels = [cnn_module_kernel] # kernel size of each StridedConv\n for i in self.stride:\n if stride_kernel:\n self.cnn_module_kernels.append(self.cnn_module_kernels[-1] // i)\n else:\n self.cnn_module_kernels.append(self.cnn_module_kernels[-1])\n\n logging.info(f\"stride_layer_idx= {self.stride_layer_idx}, \"\n f\"stride = {self.stride}, \"\n f\"cnn_module_kernel = {self.cnn_module_kernels}, \"\n f\"group_layer_idx = {self.group_layer_idx}, \"\n f\"grouped_size = {self.grouped_size}\")\n\n # feed-forward module definition\n positionwise_layer = PositionwiseFeedForward\n positionwise_layer_args = (\n output_size,\n linear_units,\n dropout_rate,\n activation,\n )\n # convolution module definition\n convolution_layer = ConvolutionModule\n\n # encoder definition\n index = 0\n layers = []\n for i in range(num_blocks):\n # self-attention module definition\n if i in self.group_layer_idx:\n encoder_selfattn_layer = GroupedRelPositionMultiHeadedAttention\n encoder_selfattn_layer_args = (\n attention_heads,\n output_size,\n attention_dropout_rate,\n self.grouped_size)\n else:\n if pos_enc_layer_type == \"no_pos\":\n encoder_selfattn_layer = MultiHeadedAttention\n else:\n encoder_selfattn_layer = RelPositionMultiHeadedAttention\n encoder_selfattn_layer_args = (\n attention_heads,\n output_size,\n attention_dropout_rate)\n\n # conformer module definition\n if i in self.stride_layer_idx:\n # conformer block with downsampling\n convolution_layer_args_stride = (\n output_size, self.cnn_module_kernels[index], activation,\n cnn_module_norm, causal, True, self.stride[index])\n layers.append(StrideConformerEncoderLayer(\n output_size,\n encoder_selfattn_layer(*encoder_selfattn_layer_args),\n positionwise_layer(*positionwise_layer_args),\n positionwise_layer(\n *positionwise_layer_args) if macaron_style else None,\n convolution_layer(\n *convolution_layer_args_stride) if use_cnn_module else None,\n torch.nn.AvgPool1d(\n kernel_size=self.stride[index], stride=self.stride[index],\n padding=0, ceil_mode=True,\n count_include_pad=False), # pointwise_conv_layer\n dropout_rate,\n normalize_before,\n ))\n index = index + 1\n else:\n # conformer block\n convolution_layer_args_normal = (\n output_size, self.cnn_module_kernels[index], activation,\n cnn_module_norm, causal)\n layers.append(ConformerEncoderLayer(\n output_size,\n encoder_selfattn_layer(*encoder_selfattn_layer_args),\n positionwise_layer(*positionwise_layer_args),\n positionwise_layer(\n *positionwise_layer_args) if macaron_style else None,\n convolution_layer(\n *convolution_layer_args_normal) if use_cnn_module else None,\n dropout_rate,\n normalize_before,\n ))\n\n self.encoders = torch.nn.ModuleList(layers)\n\n def set_global_chunk_size(self, chunk_size):\n \"\"\"Used in ONNX export.\n \"\"\"\n logging.info(f\"set global chunk size: {chunk_size}, default is 0.\")\n self.global_chunk_size = chunk_size\n if self.embed.subsampling_rate == 2:\n self.chunk_feature_map = 2 * self.global_chunk_size + 1\n elif self.embed.subsampling_rate == 6:\n self.chunk_feature_map = 6 * self.global_chunk_size + 5\n elif self.embed.subsampling_rate == 8:\n self.chunk_feature_map = 8 * self.global_chunk_size + 7\n else:\n self.chunk_feature_map = 4 * self.global_chunk_size + 3\n\n def output_size(self) -> int:\n return self._output_size\n\n def calculate_downsampling_factor(self, i: int) -> int:\n factor = 1\n for idx, stride_idx in enumerate(self.stride_layer_idx):\n if i > stride_idx:\n factor *= self.stride[idx]\n return factor\n\n def forward(self,\n xs: torch.Tensor,\n xs_lens: torch.Tensor,\n decoding_chunk_size: int = 0,\n num_decoding_left_chunks: int = -1,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Embed positions in tensor.\n Args:\n xs: padded input tensor (B, T, D)\n xs_lens: input length (B)\n decoding_chunk_size: decoding chunk size for dynamic chunk\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n Returns:\n encoder output tensor xs, and subsampled masks\n xs: padded output tensor (B, T' ~= T/subsample_rate, D)\n masks: torch.Tensor batch padding mask after subsample\n (B, 1, T' ~= T/subsample_rate)\n \"\"\"\n T = xs.size(1)\n masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)\n if self.global_cmvn is not None:\n xs = self.global_cmvn(xs)\n xs, pos_emb, masks = self.embed(xs, masks)\n mask_pad = masks # (B, 1, T/subsample_rate)\n chunk_masks = add_optional_chunk_mask(xs, masks,\n self.use_dynamic_chunk,\n self.use_dynamic_left_chunk,\n decoding_chunk_size,\n self.static_chunk_size,\n num_decoding_left_chunks)\n index = 0 # traverse stride\n for i, layer in enumerate(self.encoders):\n # layer return : x, mask, new_att_cache, new_cnn_cache\n xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n if i in self.stride_layer_idx:\n masks = masks[:, :, ::self.stride[index]]\n chunk_masks = chunk_masks[:, ::self.stride[index],\n ::self.stride[index]]\n mask_pad = masks\n pos_emb = pos_emb[:, ::self.stride[index], :]\n index = index + 1\n\n if self.normalize_before:\n xs = self.after_norm(xs)\n # Here we assume the mask is not changed in encoder layers, so just\n # return the masks before encoder layers, and the masks will be used\n # for cross attention with decoder later\n return xs, masks\n\n def forward_chunk(\n self,\n xs: torch.Tensor,\n offset: int,\n required_cache_size: int,\n att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),\n cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),\n att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\" Forward just one chunk\n\n Args:\n xs (torch.Tensor): chunk input\n offset (int): current offset in encoder output time stamp\n required_cache_size (int): cache size required for next chunk\n compuation\n >=0: actual cache size\n <0: means all history cache is required\n att_cache (torch.Tensor): cache tensor for KEY & VALUE in\n transformer/conformer attention, with shape\n (elayers, head, cache_t1, d_k * 2), where\n `head * d_k == hidden-dim` and\n `cache_t1 == chunk_size * num_decoding_left_chunks`.\n cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,\n (elayers, b=1, hidden-dim, cache_t2), where\n `cache_t2 == cnn.lorder - 1`\n att_mask : mask matrix of self attention\n\n Returns:\n torch.Tensor: output of current input xs\n torch.Tensor: subsampling cache required for next chunk computation\n List[torch.Tensor]: encoder layers output cache required for next\n chunk computation\n List[torch.Tensor]: conformer cnn cache\n\n \"\"\"\n assert xs.size(0) == 1\n\n # using downsampling factor to recover offset\n offset *= self.calculate_downsampling_factor(self.num_blocks + 1)\n\n chunk_masks = torch.ones(1,\n xs.size(1),\n device=xs.device,\n dtype=torch.bool)\n chunk_masks = chunk_masks.unsqueeze(1) # (1, 1, xs-time)\n\n real_len = 0\n if self.global_chunk_size > 0:\n # for ONNX decode simulation, padding xs to chunk_size\n real_len = xs.size(1)\n pad_len = self.chunk_feature_map - real_len\n xs = F.pad(xs, (0, 0, 0, pad_len), value=0.0)\n chunk_masks = F.pad(chunk_masks, (0, pad_len), value=0.0)\n\n if self.global_cmvn is not None:\n xs = self.global_cmvn(xs)\n\n # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim)\n xs, pos_emb, chunk_masks = self.embed(xs, chunk_masks, offset)\n elayers, cache_t1 = att_cache.size(0), att_cache.size(2)\n chunk_size = xs.size(1)\n attention_key_size = cache_t1 + chunk_size\n # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim)\n # shape(pos_emb) = (b=1, chunk_size, emb_size=output_size=hidden-dim)\n\n if required_cache_size < 0:\n next_cache_start = 0\n elif required_cache_size == 0:\n next_cache_start = attention_key_size\n else:\n next_cache_start = max(attention_key_size - required_cache_size, 0)\n\n r_att_cache = []\n r_cnn_cache = []\n mask_pad = torch.ones(1,\n xs.size(1),\n device=xs.device,\n dtype=torch.bool)\n mask_pad = mask_pad.unsqueeze(1) # batchPad (b=1, 1, time=chunk_size)\n\n if self.global_chunk_size > 0:\n # for ONNX decode simulation\n pos_emb = self.embed.position_encoding(\n offset=max(offset - cache_t1, 0),\n size=cache_t1 + self.global_chunk_size)\n att_mask[:, :, -self.global_chunk_size:] = chunk_masks\n mask_pad = chunk_masks.to(torch.bool)\n else:\n pos_emb = self.embed.position_encoding(\n offset=offset - cache_t1, size=attention_key_size)\n\n max_att_len, max_cnn_len = 0, 0 # for repeat_interleave of new_att_cache\n for i, layer in enumerate(self.encoders):\n factor = self.calculate_downsampling_factor(i)\n # NOTE(xcsong): Before layer.forward\n # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2),\n # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2)\n # shape(new_att_cache) = [ batch, head, time2, outdim//head * 2 ]\n att_cache_trunc = 0\n if xs.size(1) + att_cache.size(2) / factor > pos_emb.size(1):\n # The time step is not divisible by the downsampling multiple\n att_cache_trunc = xs.size(1) + \\\n att_cache.size(2) // factor - pos_emb.size(1) + 1\n xs, _, new_att_cache, new_cnn_cache = layer(\n xs, att_mask, pos_emb,\n mask_pad=mask_pad,\n att_cache=att_cache[i:i + 1, :, ::factor, :][:, :, att_cache_trunc:, :],\n cnn_cache=cnn_cache[i, :, :, :]\n if cnn_cache.size(0) > 0 else cnn_cache\n )\n\n if i in self.stride_layer_idx:\n # compute time dimension for next block\n efficient_index = self.stride_layer_idx.index(i)\n att_mask = att_mask[:, ::self.stride[efficient_index],\n ::self.stride[efficient_index]]\n mask_pad = mask_pad[:, ::self.stride[efficient_index],\n ::self.stride[efficient_index]]\n pos_emb = pos_emb[:, ::self.stride[efficient_index], :]\n\n # shape(new_att_cache) = [batch, head, time2, outdim]\n new_att_cache = new_att_cache[:, :, next_cache_start // factor:, :]\n # shape(new_cnn_cache) = [1, batch, outdim, cache_t2]\n new_cnn_cache = new_cnn_cache.unsqueeze(0)\n\n # use repeat_interleave to new_att_cache\n new_att_cache = new_att_cache.repeat_interleave(repeats=factor, dim=2)\n # padding new_cnn_cache to cnn.lorder for casual convolution\n new_cnn_cache = F.pad(\n new_cnn_cache,\n (self.cnn_module_kernel - 1 - new_cnn_cache.size(3), 0))\n\n if i == 0:\n # record length for the first block as max length\n max_att_len = new_att_cache.size(2)\n max_cnn_len = new_cnn_cache.size(3)\n\n # update real shape of att_cache and cnn_cache\n r_att_cache.append(new_att_cache[:, :, -max_att_len:, :])\n r_cnn_cache.append(new_cnn_cache[:, :, :, -max_cnn_len:])\n\n if self.normalize_before:\n xs = self.after_norm(xs)\n\n # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2),\n # ? may be larger than cache_t1, it depends on required_cache_size\n r_att_cache = torch.cat(r_att_cache, dim=0)\n # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2)\n r_cnn_cache = torch.cat(r_cnn_cache, dim=0)\n\n if self.global_chunk_size > 0 and real_len:\n chunk_real_len = real_len // self.embed.subsampling_rate // \\\n self.calculate_downsampling_factor(self.num_blocks + 1)\n # Keeping 1 more timestep can mitigate information leakage\n # from the encoder caused by the padding\n xs = xs[:, :chunk_real_len + 1, :]\n\n return xs, r_att_cache, r_cnn_cache\n\n def forward_chunk_by_chunk(\n self,\n xs: torch.Tensor,\n decoding_chunk_size: int,\n num_decoding_left_chunks: int = -1,\n use_onnx=False\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" Forward input chunk by chunk with chunk_size like a streaming\n fashion\n\n Here we should pay special attention to computation cache in the\n streaming style forward chunk by chunk. Three things should be taken\n into account for computation in the current network:\n 1. transformer/conformer encoder layers output cache\n 2. convolution in conformer\n 3. convolution in subsampling\n\n However, we don't implement subsampling cache for:\n 1. We can control subsampling module to output the right result by\n overlapping input instead of cache left context, even though it\n wastes some computation, but subsampling only takes a very\n small fraction of computation in the whole model.\n 2. Typically, there are several covolution layers with subsampling\n in subsampling module, it is tricky and complicated to do cache\n with different convolution layers with different subsampling\n rate.\n 3. Currently, nn.Sequential is used to stack all the convolution\n layers in subsampling, we need to rewrite it to make it work\n with cache, which is not prefered.\n Args:\n xs (torch.Tensor): (1, max_len, dim)\n decoding_chunk_size (int): decoding chunk size\n num_decoding_left_chunks (int):\n use_onnx (bool): True for simulating ONNX model inference.\n \"\"\"\n assert decoding_chunk_size > 0\n # The model is trained by static or dynamic chunk\n assert self.static_chunk_size > 0 or self.use_dynamic_chunk\n subsampling = self.embed.subsampling_rate\n context = self.embed.right_context + 1 # Add current frame\n stride = subsampling * decoding_chunk_size\n decoding_window = (decoding_chunk_size - 1) * subsampling + context\n num_frames = xs.size(1)\n\n outputs = []\n offset = 0\n required_cache_size = decoding_chunk_size * num_decoding_left_chunks\n if use_onnx:\n logging.info(\"Simulating for ONNX runtime ...\")\n att_cache: torch.Tensor = torch.zeros(\n (self.num_blocks, self.attention_heads, required_cache_size,\n self.output_size() // self.attention_heads * 2),\n device=xs.device)\n cnn_cache: torch.Tensor = torch.zeros(\n (self.num_blocks, 1, self.output_size(), self.cnn_module_kernel - 1),\n device=xs.device)\n self.set_global_chunk_size(chunk_size=decoding_chunk_size)\n else:\n logging.info(\"Simulating for JIT runtime ...\")\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)\n\n # Feed forward overlap input step by step\n for cur in range(0, num_frames - context + 1, stride):\n end = min(cur + decoding_window, num_frames)\n logging.info(f\"-->> frame chunk msg: cur={cur}, \"\n f\"end={end}, num_frames={end-cur}, \"\n f\"decoding_window={decoding_window}\")\n if use_onnx:\n att_mask: torch.Tensor = torch.ones(\n (1, 1, required_cache_size + decoding_chunk_size),\n dtype=torch.bool, device=xs.device)\n if cur == 0:\n att_mask[:, :, :required_cache_size] = 0\n else:\n att_mask: torch.Tensor = torch.ones(\n (0, 0, 0), dtype=torch.bool, device=xs.device)\n\n chunk_xs = xs[:, cur:end, :]\n (y, att_cache, cnn_cache) = \\\n self.forward_chunk(\n chunk_xs, offset, required_cache_size,\n att_cache, cnn_cache, att_mask)\n outputs.append(y)\n offset += y.size(1)\n\n ys = torch.cat(outputs, 1)\n masks = torch.ones(1, 1, ys.size(1), device=ys.device, dtype=torch.bool)\n return ys, masks\n","repo_name":"wenet-e2e/wenet","sub_path":"wenet/efficient_conformer/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":25005,"program_lang":"python","lang":"en","doc_type":"code","stars":3402,"dataset":"github-code","pt":"16"} +{"seq_id":"26386083671","text":"import argparse\nimport traceback\nfrom loader import read_queries, read_constraints\nfrom config import FileType, get_path, CONNECT_MAP\nfrom extract_rule import ExtractQueryRule\nfrom utils import GlobalExpRecorder, get_valid_queries\nfrom constraint import InclusionConstraint, LengthConstraint, FormatConstraint\n\nname_to_type = {\n 'inclusion': InclusionConstraint,\n 'length': LengthConstraint,\n 'format': FormatConstraint,\n}\n# =====================================================================\n\n\n# ========================== main function ============================\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument('--app', default='redmine')\n parser.add_argument('--cnt', type=int, default=100000, help='number of queries to rewrite')\n parser.add_argument(\"--data_dir\", type=str, help=\"data root dir\", default='data')\n args = parser.parse_args()\n \n recorder = GlobalExpRecorder()\n recorder.record(\"app_name\", args.app)\n \n # load query once for each app\n offset = 0\n queries = read_queries(get_path(FileType.RAW_QUERY, args.app, args.data_dir), offset, args.cnt)\n # queries = get_valid_queries(queries, CONNECT_MAP[args.app])\n \n # count the number of queries with constraints on it\n all_cs = load_cs(args.app, args.data_dir, 'all')\n all_cnt, warning_cnt = count_queries_with_cs(all_cs, queries, verbal=True)\n recorder.record(\"queries_with_cs\", all_cnt)\n recorder.record(\"warning cnt\", warning_cnt)\n\n # count inclusion, length, format constraint query\n for type_name in name_to_type.keys():\n cs_type = name_to_type[type_name]\n filtered_cs = load_cs(args.app, args.data_dir, cs_type)\n cnt, _ = count_queries_with_cs(filtered_cs, queries, verbal=True)\n recorder.record(type_name, cnt)\n recorder.dump(get_path(FileType.BENCH_STR2INT_NUM, args.app, args.data_dir))\n\n#################################\n# helper functions #\n#################################\n# return filtered constraints\ndef load_cs(appname, datadir, cs_type) -> list:\n constraints = read_constraints(get_path(FileType.CONSTRAINT, appname, datadir), include_all=True)\n if cs_type == 'all':\n return constraints\n filtered_cs = [c for c in constraints if isinstance(c, cs_type)]\n return filtered_cs\n\n# return number of queries contains filtered constraints\ndef count_queries_with_cs(filtered_cs, queries, verbal) -> tuple:\n cnt = 0\n rule = ExtractQueryRule(filtered_cs)\n for q in queries:\n try:\n rewrite_q = rule.apply(q.q_obj)\n if extracted(rewrite_q):\n # print(format(rewrite_q[0]))\n cnt += 1\n except (KeyError, TypeError, AttributeError, ValueError):\n print_error(q, verbal)\n return (cnt, rule.warning_cnt)\n\n# return True if query contains inclusion constrains\ndef extracted(q) -> bool:\n # rewrite_q is a list of queries from the rewrite rules\n return len(q) >= 1\n\n# print info about errored query\ndef print_error(q, verbal) -> None:\n if verbal:\n print(q.q_raw)\n print(\"----------------\")\n print(traceback.format_exc())\n print(\"================\")\n else:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LiuXiaoxuanPKU/Coco","sub_path":"rewriter/src/bench_str2int_num.py","file_name":"bench_str2int_num.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"27630117115","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nimport os\n\ndef get_page_html(url):\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36\"}\n page = requests.get(url, headers=headers)\n return page.content\n\n\ndef check_item_in_stock(page_html, phone, text_to_send, value_sku_item):\n soup = BeautifulSoup(page_html, 'html.parser')\n\n stock_L = soup.find(value=value_sku_item)\n\n if stock_L['data-inventory-status'] == 'Available':\n print(\"In stock!\")\n os.system(\"osascript imessage.scpt %s '%s' \" % (phone, text_to_send))\n elif stock_L['data-inventory-status'] == 'Unavailable':\n print(\"OOS\")\n else:\n print(\"Issue in program\")\n\n\ndef main ():\n phone = XXXX\n #Find sku in the source page\n value_sku_item = \"XXXX\"\n text_to_send = 'message to send'\n url = \"https://www.abercrombie.com/shop/us/XXX\"\n page_html = get_page_html(url)\n\n while True:\n check_item_in_stock(page_html, phone, text_to_send, value_sku_item)\n time.sleep(600)\n\nif __name__ == '__main__':\n main()","repo_name":"egiacomin/inventory_check_stock","sub_path":"check_stock.py","file_name":"check_stock.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22923520950","text":"from aiogram import types\r\nfrom aiogram.dispatcher import FSMContext\r\nfrom aiogram.dispatcher.filters import Command\r\nfrom keyboards.default.keyboards import markup,back\r\nfrom loader import dp\r\n\r\nfrom states.search import search\r\n\r\nimport requests\r\n\r\n@dp.message_handler(text=\"ℹ️ About\")\r\nasync def answer(message: types.Message, state: FSMContext):\r\n await message.answer(\"Ushbu bot butun dunyodagi eng so'ngi ob havo ma'lumotlarini ko'rsatib beradi\")\r\n@dp.message_handler(text=\"🔍Izlash\")\r\nasync def answer(message: types.Message, state: FSMContext):\r\n await search.search.set()\r\n await message.answer(\"\"\"Marhamat qilib Davlat yoki shahar nomini kiriting\r\nMisol uchun: \"O'zbekiston\" yoki \"Toshkent\"\r\nEslatib o'tamiz biz bilan butun\r\ndunyo ob-havo ma'lumotlarini topishingiz mumkin\"\"\",reply_markup=back)\r\n\r\n@dp.message_handler(state=search.search)\r\nasync def qidiruv(message: types.Message, state: FSMContext):\r\n if message.text== \"Qidiruv bo'limidan chiqish\":\r\n await state.finish()\r\n await message.answer('Siz asosiy menudasiz',reply_markup=markup) \r\n\r\n else:\r\n try:\r\n if message.text== \"Qidiruv bo'limidan chiqish\":\r\n await state.finish()\r\n await message.answer('Siz asosiy menudasiz',reply_markup=markup)\r\n API = 'f441cb7b77702bb2d9648180922cae59'\r\n CITY = message.text\r\n URL = f'https://api.openweathermap.org/data/2.5/weather?q={CITY}&units=metric&appid={API}'\r\n response = requests.get(url=URL).json()\r\n city_info = {\r\n 'city': CITY,\r\n 'temp': response['main']['temp'],\r\n 'humidity': response['main']['humidity'],\r\n 'weather': response['weather'][0]['main'],\r\n 'wind': response['wind']['speed'],\r\n 'pressure': response['main']['pressure'],\r\n }\r\n h={'Clear':'ochiq',\r\n \"Clouds\":\"bulutli\",\r\n \"Snow\":\"qorli\",\r\n \"Mist\":\"tumanli\",\r\n \"Smoke\":'tutunli',\r\n \"Haze\":\"tumanli\",\r\n \"Dust\":\"havo changli\",\r\n \"Fog\":\"quyuq tuman mavjud\",\r\n \"Sand\":\"yuqori darajada changlangan\",\r\n \"Ash\":\"havo tarkibida kul miqdori ko'p\",\r\n \"Squall\":\"yomg'irli\",\r\n \"Tornado\":\"Bo'ron bo'lmoqda\",\r\n \"Rain\":\"yomg'irli\",\r\n \"Drizzle\":\"yomg'irli\",\r\n \"Thunderstorm\":\"chaqmoq va momaqaldiroq mavjud\"\r\n }\r\n msg = f\"\"\"{CITY.upper()}\r\n\r\nOb-havo:{h[city_info['weather']]} \r\n------------------------------------\r\n🌡 Harorat: {city_info['temp']} C\r\n💨 Shamol: {city_info['wind']} m/s\r\n💦 Namlik: {city_info['humidity']} %\r\n🧬 Bosim: {city_info['pressure']} hPa\"\"\"\r\n await message.answer(msg, parse_mode='html',reply_markup=back)\r\n\r\n except Exception as e:\r\n msg1 = f\"Ushbu davlat haqida ma'lumotlar mavjud emas\"\r\n await message.answer(msg1, parse_mode='html',reply_markup=back)","repo_name":"Akaikumogo/Ob_havo_bot","sub_path":"handlers/users/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9699131351","text":"#importacões de libs\nimport matplotlib.pyplot as plt\n\n#variaveis\nx1 = [1, 2, 3, 4, 5]\ny1 = [3, 4, 7, 2, 4]\n\nx2 = [2, 4, 6, 8, 10]\ny2 = [5, 6, 8, 4, 4]\n\ntitulo = \"Graficos em barras\"\neixox = \"eixo X\"\neixoy = \"eixo Y\"\n\n#legendas\nplt.title(titulo)\nplt.xlabel(eixox)\nplt.ylabel(eixoy)\n\n#utlizando o bar para criar graficos em barras\nplt.bar(x1, y1, label = \"Grupo 1\") # colocando a legenda das cores\nplt.bar(x2, y2, label = \"Grupo 1\")\nplt.legend() #mostrando a leganda no gráfico\n\n#mostrando o grafico\nplt.show()\nplt.savefig(r\"graficos\\grafico_barras.png\", dpi = 300) #imagem em alta resolução","repo_name":"rafaelrossim/DataScience","sub_path":"2_graficosbarras.py","file_name":"2_graficosbarras.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19949836428","text":"import cv2\nimport os\nimport numpy as np\n\npreview_enabled = True\n\ndef init(set_enabled=True):\n global preview_enabled\n preview_enabled = set_enabled\n if set_enabled:\n if not os.path.exists('data_features_matches'):\n os.makedirs('data_features_matches')\n if not os.path.exists('data_features_preview'):\n os.makedirs('data_features_preview')\n\n\ndef saveImageFeaturePreview(imgFile, kp):\n if not preview_enabled:\n return\n img = cv2.imread(\"data/\" + imgFile)\n for kpit in kp:\n cv2.circle(img, tuple(map(int, kpit.pt)), 1, (0, 0, 255), 4)\n cv2.imwrite(\"data_features_preview/\" + imgFile, img)\n\n\ndef saveFullImageFeaturePreview(kp):\n if not preview_enabled:\n return\n img = cv2.imread(\"input/\" + os.environ.get(\"IFS_PHOTO_FILE\", \"ifs.jpg\"))\n for kpit in kp:\n cv2.circle(img, tuple(map(int, kpit.pt)), 1, (0, 0, 255), 4)\n cv2.imwrite(\"data_features_preview/ifs.jpg\", img)\n\n\ndef saveMatchedCenterMultiple(matchedList):\n if not preview_enabled:\n return\n imgFull = cv2.imread(\n \"input/\" + os.environ.get(\"IFS_PHOTO_FILE\", \"ifs.jpg\"))\n for matched in matchedList:\n for center in matched[\"centers\"]:\n imgFull = cv2.circle(\n imgFull, (center[\"x\"], center[\"y\"]), 5, (0, 255, 255), 8)\n imgFull = cv2.putText(\n imgFull, str(matched[\"portalID\"]), (center[\"x\"], center[\"y\"]), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 0, 255), 8)\n cv2.imwrite(\"data_features_matches/ifs.jpg\", imgFull)\n\n\ndef saveGridInfo(matchedGridList):\n if not preview_enabled:\n return\n imgFull = cv2.imread(\"data_features_matches/ifs.jpg\")\n for idx, colList in enumerate(matchedGridList):\n for center in colList:\n imgFull = cv2.putText(\n imgFull, str(idx), (center[\"x\"], center[\"y\"]), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 0, 255), 15)\n cv2.imwrite(\"data_features_matches/ifs-grid.jpg\", imgFull)\n","repo_name":"UESTC-Ingress/IFSolver","sub_path":"modules/utils/PreviewUtil.py","file_name":"PreviewUtil.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"16"} +{"seq_id":"14655908266","text":"from Messages import Message as Ms\n\n\nclass ConfigMessage(Ms.Message):\n def __init__(self, public_key: str, message: dict = None):\n try:\n if message is None:\n self.message_info = {\n \"Type\": \"Config\",\n \"PublicKey\": public_key\n }\n elif message[\"Type\"] == \"Config\":\n super(ConfigMessage, self).__init__(message=message)\n except Exception as e:\n print(e)\n","repo_name":"aymanKH9991/PasswordManagerServer","sub_path":"Server/Messages/Configration.py","file_name":"Configration.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3724124962","text":"from django.db.models import Q\nfrom rest_framework.serializers import ModelSerializer\nfrom modules.account.user.helpers.model_utils import UserModelUtils\nfrom ..models import Member, MemberShip, MemberShipType, BookingService, Device\nfrom modules.account.user.helpers.srs import UserSr\nfrom modules.club_service.service.helpers.srs import ServiceSr\nfrom modules.noti.notification.models import Notification\nfrom django.http.request import QueryDict\nfrom rest_framework.serializers import ValidationError\n\n\nModel = Member\n\n\nclass BookingServiceSr(ModelSerializer):\n class Meta:\n model = BookingService\n exclude = ()\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n member = Member.objects.filter(id=obj.member.id).first()\n membership = MemberShip.objects.filter(member=member).first()\n\n rep[\"member_real_name\"] = MemberSr(member).data[\"full_name\"]\n rep[\"member_real_phone_number\"] = UserSr(\n member.user).data[\"phone_number\"]\n rep[\"member_real_email\"] = MemberSr(member).data[\"email\"]\n rep[\"dob\"] = MemberSr(member).data[\"dob\"]\n rep[\"occupation\"] = MemberSr(member).data[\"occupation\"]\n rep[\"address\"] = MemberSr(member).data[\"address\"]\n rep[\"gender\"] = MemberSr(member).data[\"gender\"]\n rep[\"avatar\"] = MemberSr(member).data[\"avatar\"]\n rep[\"membership_type\"] = MemberShipSr(\n membership).data[\"membership_type\"]\n rep[\"register_date\"] = MemberShipSr(membership).data[\"register_date\"]\n rep[\"expire_date\"] = MemberShipSr(membership).data[\"expire_date\"]\n\n return rep\n\n\nclass MemberSr(ModelSerializer):\n class Meta:\n model = Model\n exclude = []\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n rep[\"phone_number\"] = UserSr(obj.user).data[\"phone_number\"]\n rep[\"email\"] = UserSr(obj.user).data[\"email\"]\n rep[\"membership_type\"] = MemberShipTypeSr(\n obj.membership.membership_type).data\n rep[\"register_date\"] = MemberShipSr(\n obj.membership).data[\"register_date\"]\n rep[\"expire_date\"] = MemberShipSr(obj.membership).data[\"expire_date\"]\n return rep\n\n\nclass MemberShipSr(ModelSerializer):\n class Meta:\n model = MemberShip\n exclude = []\n\n\nclass MemberShipTypeSr(ModelSerializer):\n class Meta:\n model = MemberShipType\n exclude = []\n\n\nclass DeviceSr(ModelSerializer):\n class Meta:\n model = Device\n exclude = []\n\n\nclass MemberRetrieveSr(MemberSr):\n class Meta(MemberSr.Meta):\n exclude = [\n \"created_at\",\n \"updated_at\",\n \"user\",\n ]\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n rep[\"membership_type\"] = MemberShipSr(\n obj.membership).data[\"membership_type\"]\n return rep\n\n\nclass MemberPermissionSr(MemberRetrieveSr):\n class Meta(MemberRetrieveSr.Meta):\n pass\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n is_not_read = Notification.objects.filter(\n Q(member=obj) & Q(is_read=False))\n rep[\"unread_notification\"] = bool(is_not_read)\n\n rep[\"phone_number\"] = UserSr(obj.user).data[\"phone_number\"]\n rep[\"email\"] = UserSr(obj.user).data[\"email\"]\n rep[\"membership_type\"] = MemberShipTypeSr(\n obj.membership.membership_type).data\n rep[\"list_services\"] = []\n rep[\"register_date\"] = MemberShipSr(\n obj.membership).data[\"register_date\"]\n rep[\"expire_date\"] = MemberShipSr(obj.membership).data[\"expire_date\"]\n rep[\"permissions\"] = UserModelUtils.get_permissions(obj.user)\n # for service in obj.membership.membership_type.services.all():\n # rep[\"list_services\"].append(ServiceSr(service).data)\n return rep\n\n\nclass MemberOptionSr(MemberSr):\n class Meta(MemberSr.Meta):\n exclude = []\n\n def to_representation(self, obj):\n groups = obj.user.groups.all()\n return {\n \"value\": obj.id,\n \"label\": obj.full_name,\n \"email\": obj.user.email,\n \"phone_number\": str(obj.user.phone_number),\n \"groups\": groups.values_list(\"id\", flat=True),\n \"group_labels\": groups.values_list(\"name\", flat=True),\n }\n","repo_name":"datlt198640/apartment-management","sub_path":"api/modules/account/member/helpers/srs.py","file_name":"srs.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17739310007","text":"'''\r\n\r\nNetwork Security Group operations for test.\r\n\r\n@author: Youyk\r\n'''\r\n\r\nimport apibinding.api_actions as api_actions\r\nimport zstackwoodpecker.test_util as test_util\r\nimport apibinding.inventory as inventory\r\nimport zstackwoodpecker.operations.account_operations as acc_ops\r\nimport zstackwoodpecker.operations.deploy_operations as dep_ops\r\nimport zstackwoodpecker.operations.resource_operations as res_ops\r\nimport zstacklib.utils.xmlobject as xmlobject\r\n\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\ndef create_security_group(sg_creation_option):\r\n action = api_actions.CreateSecurityGroupAction()\r\n if not sg_creation_option.get_name():\r\n action.name = 'test_sg'\r\n else:\r\n action.name = sg_creation_option.get_name()\r\n \r\n if not sg_creation_option.get_description():\r\n action.description = 'Test Security Group'\r\n else:\r\n action.description = sg_creation_option.get_description()\r\n\r\n if not sg_creation_option.get_timeout():\r\n action.timeout = 120000\r\n else:\r\n action.timeout = sg_creation_option.get_timeout()\r\n \r\n test_util.action_logger('Create [Security Group]: %s' % action.name)\r\n evt = acc_ops.execute_action_with_session(action, sg_creation_option.get_session_uuid())\r\n test_util.test_logger('[sg:] %s is created.' % evt.inventory.uuid)\r\n return evt.inventory\r\n\r\ndef delete_security_group(sg_uuid, session_uuid=None):\r\n action = api_actions.DeleteSecurityGroupAction()\r\n action.uuid = sg_uuid\r\n action.timeout = 12000\r\n test_util.action_logger('Delete [Security Group:] %s' % sg_uuid)\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\n#rules = [inventory.SecurityGroupRuleAO()]\r\ndef add_rules_to_security_group(sg_uuid, rules, session_uuid=None):\r\n action = api_actions.AddSecurityGroupRuleAction()\r\n action.securityGroupUuid = sg_uuid\r\n action.timeout = 120000\r\n action.rules = rules\r\n for rule in rules:\r\n test_util.action_logger('Add Security Group [Rule:] type: %s, protocol: %s, startPort: %s, endPort: %s, address: %s in [Security Group:] %s' % (rule.type, rule.protocol, rule.startPort, rule.endPort, rule.allowedCidr, sg_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt.inventory\r\n\r\n#rules = [rules_uuid]\r\ndef remove_rules_from_security_group(rules, session_uuid=None):\r\n '''\r\n params: rules is a list includes a list of rules uuids. \r\n '''\r\n action = api_actions.DeleteSecurityGroupRuleAction()\r\n action.timeout = 12000\r\n action.ruleUuids = rules\r\n test_util.action_logger('Delete Security Group [Rules:] %s' % rules)\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt.inventory\r\n\r\ndef add_nic_to_security_group(sg_uuid, vm_nic_list, session_uuid=None):\r\n action = api_actions.AddVmNicToSecurityGroupAction()\r\n action.securityGroupUuid = sg_uuid\r\n action.vmNicUuids = vm_nic_list\r\n action.timeout = 120000\r\n test_util.action_logger('Add [Nics:] %s to [Security Group:] %s' \\\r\n % (vm_nic_list, sg_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\ndef remove_nic_from_security_group(sg_uuid, nic_uuid_list, session_uuid=None):\r\n action = api_actions.DeleteVmNicFromSecurityGroupAction()\r\n action.securityGroupUuid = sg_uuid\r\n action.vmNicUuids = nic_uuid_list\r\n action.timeout = 12000\r\n test_util.action_logger('Delete [Nics:] %s From [Security Group:] %s' \\\r\n % (nic_uuid_list, sg_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\ndef attach_security_group_to_l3(sg_uuid, l3_uuid, session_uuid=None):\r\n action = api_actions.AttachSecurityGroupToL3NetworkAction()\r\n action.securityGroupUuid = sg_uuid\r\n action.l3NetworkUuid = l3_uuid\r\n action.timeout = 12000\r\n test_util.action_logger('Attach [Security Group:] %s to [l3:] %s' % (sg_uuid, l3_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\ndef detach_security_group_from_l3(sg_uuid, l3_uuid, session_uuid=None):\r\n action = api_actions.DetachSecurityGroupFromL3NetworkAction()\r\n action.l3NetworkUuid = l3_uuid\r\n action.securityGroupUuid = sg_uuid\r\n action.timeout = 12000\r\n test_util.action_logger('Detach [Security Group:] %s from [l3:] %s' % (sg_uuid, l3_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\ndef create_vip(vip_creation_option):\r\n action = api_actions.CreateVipAction()\r\n action.l3NetworkUuid = vip_creation_option.get_l3_uuid()\r\n action.allocateStrategy = vip_creation_option.get_allocateStrategy()\r\n action.timeout = vip_creation_option.get_timeout()\r\n #mandatory vip name:\r\n name = vip_creation_option.get_name()\r\n if not name:\r\n action.name = 'vip_test'\r\n else:\r\n action.name = name\r\n\r\n session_uuid = vip_creation_option.get_session_uuid()\r\n action.description = vip_creation_option.get_description()\r\n evt = acc_ops.execute_action_with_session(action, session_uuid).inventory\r\n test_util.action_logger('Create [VIP:] %s [IP:] %s in [l3:] %s' % (evt.uuid, evt.ip, action.l3NetworkUuid))\r\n return evt\r\n\r\ndef delete_vip(vip_uuid, session_uuid=None):\r\n action = api_actions.DeleteVipAction()\r\n action.uuid = vip_uuid\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[VIP]: %s is deleted\" % vip_uuid)\r\n return evt\r\n\r\ndef create_port_forwarding(pf_rule_creation_option):\r\n action = api_actions.CreatePortForwardingRuleAction()\r\n action.name = pf_rule_creation_option.get_name()\r\n if not action.name:\r\n action.name = 'test_port_forwarding_rule'\r\n\r\n action.timeout = pf_rule_creation_option.get_timeout()\r\n if not action.timeout:\r\n action.timeout = 12000\r\n\r\n action.description = pf_rule_creation_option.get_description()\r\n session_uuid = pf_rule_creation_option.get_session_uuid()\r\n\r\n action.vipPortStart, action.vipPortEnd = pf_rule_creation_option.get_vip_ports()\r\n action.privatePortStart, action.privatePortEnd = pf_rule_creation_option.get_private_ports()\r\n if not action.privatePortStart:\r\n action.privatePortStart = action.vipPortStart\r\n action.privatePortEnd = action.vipPortEnd\r\n\r\n action.vipUuid = pf_rule_creation_option.get_vip_uuid()\r\n action.vmNicUuid = pf_rule_creation_option.get_vm_nic_uuid()\r\n action.allowedCidr = pf_rule_creation_option.get_allowedCidr()\r\n action.protocolType = pf_rule_creation_option.get_protocol()\r\n test_util.action_logger(\"Create Port Forwarding Rule: [vipUuid:] %s [vm nic:] %s [vip start:] %s [vip end:] %s [pri start:] %s [pri end:] %s [allowedCidr:] %s\" % (action.vipUuid, action.vmNicUuid, action.vipPortStart, action.vipPortEnd, action.privatePortStart, action.privatePortEnd, action.allowedCidr))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt.inventory\r\n\r\ndef delete_port_forwarding(pf_rule_uuid, session_uuid=None):\r\n action = api_actions.DeletePortForwardingRuleAction()\r\n action.uuid = pf_rule_uuid\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"Port Forwarding Rule [uuid:] %s is deleted\" % pf_rule_uuid)\r\n return evt\r\n\r\ndef attach_port_forwarding(pf_rule_uuid, vm_nic_uuid, session_uuid=None):\r\n action = api_actions.AttachPortForwardingRuleAction()\r\n action.ruleUuid = pf_rule_uuid\r\n action.vmNicUuid = vm_nic_uuid\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"Port Forwarding Rule [uuid:] %s is attached to %s\" % (pf_rule_uuid, vm_nic_uuid))\r\n return evt.inventory\r\n\r\ndef detach_port_forwarding(pf_rule_uuid, session_uuid=None):\r\n action = api_actions.DetachPortForwardingRuleAction()\r\n action.uuid = pf_rule_uuid\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"Port Forwarding Rule [uuid:] %s is detached\" % pf_rule_uuid)\r\n return evt.inventory\r\n\r\ndef create_eip(eip_creation_option):\r\n action = api_actions.CreateEipAction()\r\n action.vipUuid = eip_creation_option.get_vip_uuid()\r\n action.vmNicUuid = eip_creation_option.get_vm_nic_uuid()\r\n action.name = eip_creation_option.get_name()\r\n if not action.name:\r\n action.name = 'eip test'\r\n action.description = eip_creation_option.get_description()\r\n action.timeout = eip_creation_option.get_timeout()\r\n if not action.timeout:\r\n action.timeout = 12000\r\n\r\n session_uuid = eip_creation_option.get_session_uuid()\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[EIP:] %s is created, with [vip:] %s and [nic:] %s\" % (evt.inventory.uuid, action.vipUuid, action.vmNicUuid))\r\n return evt.inventory\r\n\r\ndef attach_eip(eip_uuid, nic_uuid, session_uuid=None):\r\n action = api_actions.AttachEipAction()\r\n action.eipUuid = eip_uuid\r\n action.vmNicUuid = nic_uuid\r\n action.timeout = 12000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[EIP:] %s is attached to [nic:] %s\" % (eip_uuid, nic_uuid))\r\n return evt.inventory\r\n\r\ndef detach_eip(eip_uuid, session_uuid=None):\r\n action = api_actions.DetachEipAction()\r\n action.uuid = eip_uuid\r\n action.timeout = 12000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[EIP:] %s is detached\" % eip_uuid)\r\n return evt.inventory\r\n\r\ndef delete_eip(eip_uuid, session_uuid=None):\r\n action = api_actions.DeleteEipAction()\r\n action.uuid = eip_uuid\r\n action.timeout = 12000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[EIP:] %s is deleted\" % eip_uuid)\r\n return evt\r\n \r\n\r\ndef delete_l2(l2_uuid, session_uuid = None):\r\n '''\r\n Delete L2 will stop all VMs which is using this L2. When VM started again, \r\n the related L2 NIC will be removed. \r\n '''\r\n action = api_actions.DeleteL2NetworkAction()\r\n action.uuid = l2_uuid\r\n action.timeout = 300000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[L2:] %s is deleted\" % l2_uuid)\r\n return evt\r\n\r\ndef delete_l3(l3_uuid, session_uuid = None):\r\n '''\r\n Delete L3 will stop all VMs which is using this L3. When VM started again, \r\n the related L3 NIC will be removed. \r\n '''\r\n action = api_actions.DeleteL3NetworkAction()\r\n action.uuid = l3_uuid\r\n action.timeout = 300000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[L3:] %s is deleted\" % l3_uuid)\r\n return evt\r\n\r\ndef attach_l2(l2_uuid, cluster_uuid, session_uuid = None):\r\n action = api_actions.AttachL2NetworkToClusterAction()\r\n action.clusterUuid = cluster_uuid\r\n action.l2NetworkUuid = l2_uuid\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"Attach [L2:] %s to [Cluster:] %s \" \\\r\n % (l2_uuid, cluster_uuid))\r\n return evt\r\n\r\ndef add_l2_resource(deploy_config, l2_name, zone_name = None, \\\r\n session_uuid = None):\r\n session_uuid_flag = True\r\n if not session_uuid:\r\n session_uuid = acc_ops.login_as_admin()\r\n session_uuid_flag = False\r\n try:\r\n dep_ops.add_l2_network(deploy_config, session_uuid, l2_name, \\\r\n zone_name = zone_name)\r\n l2_uuid = res_ops.get_resource(res_ops.L2_NETWORK, session_uuid, \\\r\n name = l2_name)[0].uuid\r\n \r\n for zone in xmlobject.safe_list(deploy_config.zones.zone):\r\n if zone_name and zone_name != zone.name_:\r\n continue\r\n for cluster in xmlobject.safe_list(zone.clusters.cluster):\r\n if xmlobject.has_element(cluster, 'l2NetworkRef'):\r\n for l2ref in xmlobject.safe_list(cluster.l2NetworkRef):\r\n if l2_name != l2ref.text_:\r\n continue\r\n\r\n cluster_uuid = res_ops.get_resource(res_ops.CLUSTER, \\\r\n session_uuid, name=cluster.name_)[0].uuid\r\n attach_l2(l2_uuid, cluster_uuid, session_uuid)\r\n\r\n dep_ops.add_l3_network(deploy_config, session_uuid, l2_name = l2_name, \\\r\n zone_name = zone_name)\r\n cond = res_ops.gen_query_conditions('l2NetworkUuid', '=', l2_uuid)\r\n l3_name = res_ops.query_resource(res_ops.L3_NETWORK, cond, \\\r\n session_uuid)[0].name\r\n dep_ops.add_virtual_router(deploy_config, session_uuid, \\\r\n l3_name = l3_name, zone_name = zone_name)\r\n except Exception as e:\r\n test_util.test_logger('[Error] zstack deployment meets exception when adding l2 resource .')\r\n traceback.print_exc(file=sys.stdout)\r\n raise e\r\n finally:\r\n if not session_uuid_flag:\r\n acc_ops.logout(session_uuid)\r\n\r\n test_util.action_logger('Complete add l2 resources for [uuid:] %s' \\\r\n % l2_uuid)\r\n\r\ndef add_l3_resource(deploy_config, l3_name, l2_name = None, zone_name = None, \\\r\n session_uuid = None):\r\n session_uuid_flag = True\r\n if not session_uuid:\r\n session_uuid = acc_ops.login_as_admin()\r\n session_uuid_flag = False\r\n try:\r\n dep_ops.add_l3_network(deploy_config, session_uuid, l3_name = l3_name, \\\r\n l2_name = l2_name, zone_name = zone_name)\r\n dep_ops.add_virtual_router(deploy_config, session_uuid, \\\r\n l3_name = l3_name, zone_name = zone_name)\r\n l3_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, \\\r\n name = l3_name)[0].uuid\r\n except Exception as e:\r\n test_util.test_logger('[Error] zstack deployment meets exception when adding l3 resource .')\r\n traceback.print_exc(file=sys.stdout)\r\n raise e\r\n finally:\r\n if not session_uuid_flag:\r\n acc_ops.logout(session_uuid)\r\n\r\n test_util.action_logger('Complete add l3 resources for [uuid:] %s' \\\r\n % l3_uuid)\r\n\r\ndef delete_ip_range(ip_range_uuid, session_uuid = None):\r\n action = api_actions.DeleteIpRangeAction()\r\n action.sessionUuid = session_uuid\r\n action.uuid = ip_range_uuid\r\n action.timeout = 300000\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[IP Range:] %s is deleted\" % ip_range_uuid)\r\n return evt\r\n\r\ndef add_ip_range(ip_range_option, session_uuid = None):\r\n action = api_actions.AddIpRangeAction()\r\n action.sessionUuid = session_uuid\r\n action.timeout = 30000\r\n action.name = ip_range_option.get_name()\r\n action.startIp = ip_range_option.get_startIp()\r\n action.endIp = ip_range_option.get_endIp()\r\n action.netmask = ip_range_option.get_netmask()\r\n action.gateway = ip_range_option.get_gateway()\r\n action.l3NetworkUuid = ip_range_option.get_l3_uuid()\r\n action.description = ip_range_option.get_description()\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n test_util.action_logger(\"[IP Range:] %s is add\" % evt.inventory.uuid)\r\n return evt.inventory\r\n\r\ndef detach_l2(l2_uuid, cluster_uuid, session_uuid = None):\r\n action = api_actions.DetachL2NetworkFromClusterAction()\r\n action.sessionUuid = session_uuid\r\n action.timeout = 90000\r\n action.l2NetworkUuid = l2_uuid\r\n action.clusterUuid = cluster_uuid\r\n test_util.action_logger('Detach [l2:] %s from [cluster:] %s' % \\\r\n (l2_uuid, cluster_uuid))\r\n evt = acc_ops.execute_action_with_session(action, session_uuid)\r\n return evt\r\n\r\ndef get_ip_capacity_by_l3s(l3_network_list):\r\n action = api_actions.GetIpAddressCapacityAction()\r\n action.l3NetworkUuids = l3_network_list\r\n evt = acc_ops.execute_action_with_session(action, None)\r\n return evt\r\n \r\n","repo_name":"hyhhui/zstack-woodpecker","sub_path":"zstackwoodpecker/zstackwoodpecker/operations/net_operations.py","file_name":"net_operations.py","file_ext":"py","file_size_in_byte":15953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10510225439","text":"\"\"\"AXL addUser/addLine/addPhone sample script, using the Zeep SOAP library\r\nCopyright (c) 2018 Cisco and/or its affiliates.\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\n# This was tested and used on CUCM 11.5\r\n# Note you must have the schema folder/files and CUCM .pem certs in the same location as script\r\n# AXLAPI.wsdl\r\n# AXLEnums.xsd\r\n# AXLSoap.xsd\r\n\r\nfrom lxml import etree\r\nfrom requests import Session\r\nfrom requests.auth import HTTPBasicAuth\r\nfrom getpass import getpass\r\n\r\nimport keyring\r\n\r\nfrom zeep import Client, Settings, Plugin, xsd\r\nfrom zeep.transports import Transport\r\nfrom zeep.exceptions import Fault\r\nimport sys\r\nimport time\r\nimport logging\r\nfrom progress.spinner import Spinner\r\nimport re\r\n\r\n# Use keyring to store username/passwords in Windows credential manager\r\nresetCredentials = False\r\ncucmusername = ''\r\n\r\n\r\ndef _setup_cucm_username():\r\n global cucmusername\r\n # Change username & password variables as you see fit\r\n cucmusername = keyring.get_password(\"username\", \"username\")\r\n if cucmusername is None or cucmusername == \"\" or resetCredentials is True:\r\n print()\r\n print(\"No CUCM username found in local credential manager. Let's add it.\")\r\n print()\r\n cucmusername = getpass(prompt=\"Please enter your CUCM username: Note: you will not see what's being typed: \")\r\n if cucmusername is None or cucmusername == \"\":\r\n print(\"No username entered. Goodbye.\")\r\n sys.exit(1)\r\n else:\r\n keyring.set_password(\"username\", \"username\", cucmusername)\r\n print('Added AM CUCM username to the local credential manager under \"username.\"')\r\n\r\n\r\n_setup_cucm_username()\r\n\r\n\r\ncucmpassword = ''\r\n\r\n\r\ndef _setup_cucm_pw():\r\n global cucmpassword\r\n global resetCredentials\r\n cucmpassword = keyring.get_password(\"cucmpassword\", \"cucmpassword\")\r\n if cucmpassword is None or cucmpassword == \"\" or resetCredentials is True:\r\n print()\r\n print(\"No CUCM password found in local credential manager. Let's add it.\")\r\n print()\r\n cucmpassword = getpass(prompt=\"Please enter your CUCM password: \")\r\n if cucmpassword is None or cucmpassword == \"\":\r\n print(\"No password entered. Goodbye.\")\r\n sys.exit(1)\r\n else:\r\n keyring.set_password(\"cucmpassword\", \"cucmpassword\", cucmpassword)\r\n resetCredentials = False\r\n print('Added AM CUCM password to the local credential manager under \"cucmpassword.\"')\r\n\r\n\r\n_setup_cucm_pw()\r\n\r\n# Set up logging\r\nlog = \"standard.log\"\r\nlogging.basicConfig(filename='standard.log', level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S')\r\nlogging.info(cucmusername + ' started running the script')\r\n\r\n# CUCM Menu\r\nsessionCert = ''\r\nserverUrl = ''\r\nuserEnteredRegion = ''\r\nlocationMenu = {}\r\nlocationMenu[\"\\n1\"] = 'US'\r\nlocationMenu[\"2\"] = 'Europe'\r\nlocationMenu[\"3\"] = 'APAC'\r\nwhile True:\r\n options = locationMenu.keys()\r\n for entry in options:\r\n print(entry, locationMenu[entry])\r\n\r\n selection = input(\"Please select regional CUCM you'd like to work with: \")\r\n if selection == \"1\":\r\n userEnteredRegion = 'US'\r\n sessionCert = 'us-cert-chain.pem'\r\n serverUrl = 'https://insertUSCUCMURL:8443/axl/'\r\n break\r\n elif selection == \"2\":\r\n userEnteredRegion = 'Europe'\r\n sessionCert = 'europe-cert-chain.pem'\r\n serverUrl = 'https://insertEuropeCUCMURL:8443/axl/'\r\n break\r\n elif selection == \"3\":\r\n userEnteredRegion = 'APAC'\r\n sessionCert = 'apac-cert-chain.pem'\r\n serverUrl = 'https://insertAPACCUCMURL.net:8443/axl/'\r\n break\r\n\r\n# Location Menu\r\ndevicePoolName = None\r\nlocationName = None\r\ncallingSearchSpaceName = None\r\ncallForwardAll = None\r\nexternalMask = None\r\nif userEnteredRegion == 'US':\r\n # US Location Menu\r\n # Set cluster specific variables\r\n commonDeviceConfigName = 'US-PHONES'\r\n softkeyTemplateName = 'Standard User'\r\n userLocale = 'English United States'\r\n routePartitionName = 'ALL_IPPhones'\r\n locationMenu = {}\r\n locationMenu[\"\\n1\"] = 'Location 1'\r\n locationMenu[\"2\"] = 'Location 2'\r\n # you get the idea\r\n\r\n while True:\r\n options = locationMenu.keys()\r\n for entry in options:\r\n print(entry, locationMenu[entry])\r\n\r\n selection = input(\"Please select the location for this phone setup: \")\r\n if selection == \"1\":\r\n devicePoolName = 'LOCATION1_PHONES'\r\n locationName = 'LOCATION1'\r\n callingSearchSpaceName = 'LOCATION1_INTERNATIONAL'\r\n callForwardAll = {'callingSearchSpaceName': 'LOCATION1_CFA_CSS'}\r\n break\r\n elif selection == \"2\":\r\n devicePoolName = 'LOCATION2_PHONES'\r\n locationName = 'LOCATION2'\r\n callingSearchSpaceName = 'LOCATION2_LONG_DISTANCE'\r\n callForwardAll = {'callingSearchSpaceName': 'LOCATION2_CFA_CSS'}\r\n break\r\n # you get the idea\r\n\r\nelif userEnteredRegion == 'Europe':\r\n # Europe location menu\r\n userLocale = None\r\n softkeyTemplateName = 'Standard User'\r\n routePartitionName = 'CLUSTER-DN'\r\n locationMenu = {}\r\n locationMenu[\"\\n1\"] = 'Denmark'\r\n locationMenu[\"2\"] = 'Germany'\r\n # you get the idea\r\n\r\n while True:\r\n options = locationMenu.keys()\r\n for entry in options:\r\n print(entry, locationMenu[entry])\r\n\r\n selection = input(\"Please select the location for this phone setup: \")\r\n if selection == \"1\":\r\n devicePoolName = 'DENMARK-PHONES'\r\n commonDeviceConfigName = 'DENMARK-PHONES'\r\n locationName = 'DENMARK'\r\n callingSearchSpaceName = 'DEVICE-DENMARK-UNRESTRICTED'\r\n userLocale = 'Danish Denmark'\r\n callForwardAll = {'callingSearchSpaceName': 'CW-INTERNAL'}\r\n print('\\nDenmark set to allow internal forwarding only (Forward All CSS = CW-INTERNAL)')\r\n # networkLocale = 'Denmark' # shouldn't need this as it's set on device pool\r\n break\r\n elif selection == \"2\":\r\n devicePoolName = 'GERMANY-PHONES'\r\n commonDeviceConfigName = 'GERMANY-PHONES'\r\n locationName = 'GERMANY'\r\n callingSearchSpaceName = 'DEVICE-GERMANY-UNRESTRICTED'\r\n userLocale = 'German Germany'\r\n callForwardAll = {'callingSearchSpaceName': 'CW-INTERNAL'}\r\n print('\\nDiamant set to allow internal forwarding only (Forward All CSS = CW-INTERNAL)')\r\n break\r\n # you get the idea\r\n\r\nelif userEnteredRegion == 'APAC':\r\n # APAC menu for agent location\r\n userLocale = None\r\n softkeyTemplateName = 'CUSTOM User'\r\n routePartitionName = 'SYSTEM-CLUSTER-DN'\r\n locationMenu = {}\r\n locationMenu[\"\\n1\"] = 'Australia'\r\n locationMenu[\"2\"] = 'Japan'\r\n # you get the idea\r\n\r\n while True:\r\n options = locationMenu.keys()\r\n for entry in options:\r\n print(entry, locationMenu[entry])\r\n\r\n selection = input(\"Please select the location for this phone setup: \")\r\n if selection == \"1\":\r\n devicePoolName = 'AUSTRALIA-PHONES'\r\n commonDeviceConfigName = 'AUSTRALIA-PHONES'\r\n locationName = 'AUSTRALIA'\r\n callingSearchSpaceName = 'AUSTRALIA-UNRESTRICTED'\r\n userLocale = 'English United States'\r\n softkeyTemplateName = 'CUSTOM AUSTRALIA User'\r\n callForwardAll = {'callingSearchSpaceName': 'SYSTEM-CW-INTERNAL'}\r\n print('\\nAustralia set to allow internal forwarding only (Forward All CSS = SYSTEM-CW-INTERNAL)')\r\n break\r\n elif selection == \"2\":\r\n devicePoolName = 'JAPAN-PHONES'\r\n commonDeviceConfigName = 'JAPAN-PHONES'\r\n locationName = 'JAPAN'\r\n callingSearchSpaceName = 'JAPAN-UNRESTRICTED'\r\n userLocale = 'Japanese Japan'\r\n callForwardAll = {'callingSearchSpaceName': 'SYSTEM-CW-INTERNAL'}\r\n print('\\nJapan set to allow internal forwarding only (Forward All CSS = SYSTEM-CW-INTERNAL)')\r\n break\r\n\r\n\r\ndef _get_Phone_Mac_Address():\r\n global phoneMac\r\n global deskPhoneDeviceName\r\n phoneMac = input(\"\\nPlease enter the MAC address of the desk phone (Quit if there's no desk phone): \")\r\n if re.match(\"[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", phoneMac.lower()):\r\n # regex replacement of - & :\r\n phoneMac = re.sub(r'[:-]', '', phoneMac)\r\n deskPhoneDeviceName = 'SEP' + phoneMac.upper()\r\n else:\r\n print('Invalid MAC address, please try again.')\r\n _get_Phone_Mac_Address()\r\n\r\n\r\n# Get Prereqs / input data\r\nphoneUsername = input(\"\\nPlease enter the username of the person for the phone setup: \")\r\nphoneExt = input(\"\\nPlease enter the extension for the phone setup: \")\r\n# Menu for type of phone build Jabber/Deskphone/CIPC etc.\r\nuserEnteredPhoneModel = ''\r\njabberOnly = False\r\ndeskPhoneOnly = False\r\nphoneMac = None\r\ndeskPhoneDeviceName = None\r\nphoneBuildMenu = {}\r\nphoneBuildMenu[\"\\n1\"] = 'Desk Phone Only'\r\nphoneBuildMenu[\"2\"] = 'Jabber Only'\r\nphoneBuildMenu[\"3\"] = 'Desk Phone & Jabber'\r\nwhile True:\r\n options = phoneBuildMenu.keys()\r\n for entry in options:\r\n print(entry, phoneBuildMenu[entry])\r\n selection = input(\"Please select the type of phone build: \")\r\n if selection == \"1\":\r\n deskPhoneOnly = True\r\n userEnteredPhoneModel = input(\"\\nPlease enter the model of the desk phone (e.g. 7942): \")\r\n # replaced simple input with function to validate mac address\r\n _get_Phone_Mac_Address()\r\n break\r\n elif selection == \"2\":\r\n jabberOnly = True\r\n break\r\n elif selection == \"3\":\r\n userEnteredPhoneModel = input(\"\\nPlease enter the model of the desk phone (e.g. 7942): \")\r\n phoneMac = input(\"\\nPlease enter the MAC address of the desk phone (Quit if there's no desk phone): \")\r\n deskPhoneDeviceName = 'SEP' + phoneMac\r\n break\r\n\r\n\r\ndef _setup_connection():\r\n # Setup SOAP/AXL/HTTPS Connection\r\n global service\r\n # Change to true to enable output of request/response headers and XML\r\n DEBUG = False\r\n\r\n # The WSDL is a local file in the working directory, see README\r\n WSDL_FILE = 'schema/AXLAPI.wsdl'\r\n\r\n # This class lets you view the incoming and outgoing http headers and XML\r\n\r\n class MyLoggingPlugin(Plugin):\r\n\r\n def egress(self, envelope, http_headers, operation, binding_options):\r\n\r\n # Format the request body as pretty printed XML\r\n xml = etree.tostring(envelope, pretty_print=True, encoding='unicode')\r\n\r\n print(f'\\nRequest\\n-------\\nHeaders:\\n{http_headers}\\n\\nBody:\\n{xml}')\r\n\r\n def ingress(self, envelope, http_headers, operation):\r\n\r\n # Format the response body as pretty printed XML\r\n xml = etree.tostring(envelope, pretty_print=True, encoding='unicode')\r\n\r\n print(f'\\nResponse\\n-------\\nHeaders:\\n{http_headers}\\n\\nBody:\\n{xml}')\r\n\r\n\r\n # The first step is to create a SOAP client session\r\n session = Session()\r\n\r\n # We avoid certificate verification by default\r\n # session.verify = False\r\n\r\n # To enabled SSL cert checking (recommended for production)\r\n # place the CUCM Tomcat cert .pem file in the root of the project\r\n # and uncomment the line below\r\n\r\n session.verify = sessionCert\r\n\r\n # Add Basic Auth credentials\r\n session.auth = HTTPBasicAuth(cucmusername, cucmpassword)\r\n\r\n # Create a Zeep transport and set a reasonable timeout value\r\n transport = Transport(session=session, timeout=10)\r\n\r\n # strict=False is not always necessary, but it allows zeep to parse imperfect XML\r\n settings = Settings(strict=False, xml_huge_tree=True)\r\n\r\n # If debug output is requested, add the MyLoggingPlugin callback\r\n plugin = [MyLoggingPlugin()] if DEBUG else [ ]\r\n\r\n # Create the Zeep client with the specified settings\r\n client = Client(WSDL_FILE, settings=settings, transport=transport,\r\n plugins=plugin)\r\n\r\n # FUTURE create CUCM chooser menu\r\n\r\n # Create the Zeep service binding to AXL at the specified CUCM\r\n service = client.create_service('{http://www.cisco.com/AXLAPIService/}AXLAPIBinding', serverUrl)\r\n\r\n\r\n_setup_connection()\r\n\r\n\r\ndef _check_for_existing_setup():\r\n global resetCredentials\r\n global associatedDevices\r\n # Find out if there's an existing phone/extension and if it's associated with the line\r\n try:\r\n lineResp = service.getLine(pattern=phoneExt, routePartitionName=routePartitionName)\r\n # Unpack line dict and get associatedDevices. Format: userDetails = rawresp['return'].user\r\n associatedDevices = lineResp['return'].line.associatedDevices\r\n except Fault as err:\r\n if str(err) == 'Item not valid: The specified Line was not found':\r\n # Extension doesn't exist either error out or ask to create\r\n # print(f'Zeep error: getLine: { err }')\r\n logging.warning(cucmusername + ' Extension does not exist')\r\n userResp = input('\\nThis extension does not exist, would you like to create? (y/n) ')\r\n if userResp == 'y' or userResp == 'Y':\r\n # FUTURE create ext.\r\n input('\\nCode to create new extension has not been implemented, exiting.')\r\n sys.exit(1)\r\n else:\r\n input('\\nPress Enter to quit.')\r\n sys.exit(1)\r\n elif str(err) == 'Unknown fault occured':\r\n # Wrong credentials?\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: getLine: { err }')\r\n resetCredentailMenu = {}\r\n resetCredentailMenu[\"\\n1\"] = 'Try username & password again'\r\n resetCredentailMenu[\"2\"] = 'Quit'\r\n while True:\r\n options = resetCredentailMenu.keys()\r\n for entry in options:\r\n print(entry, resetCredentailMenu[entry])\r\n selection = input(\"Error authenticating or unknown error. Choose an option above: \")\r\n if selection == \"1\":\r\n resetCredentials = True\r\n _setup_cucm_username()\r\n _setup_cucm_pw()\r\n _setup_connection()\r\n _check_for_existing_setup()\r\n break\r\n elif selection == \"2\":\r\n sys.exit(1)\r\n break\r\n else:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: getLine: { err }')\r\n input('\\nCheck the error above and consult admin if needed, exiting.')\r\n sys.exit(1)\r\n\r\n\r\n_check_for_existing_setup()\r\n\r\n# Assume extension exists and continuing to create phone/device profile\r\n# Ask if you want to blow it away or quit\r\nif associatedDevices is None or associatedDevices == \"\":\r\n print('\\nNo devices associated with this line, continuing setup...')\r\nelse:\r\n\r\n print(associatedDevices)\r\n existingDeviceMenu = {}\r\n existingDeviceMenu[\"\\n1\"] = 'CONTINUE: I am adding a device to an existing user that already has a phone.'\r\n existingDeviceMenu[\"2\"] = 'DELETE: Remove or re-use this phone (This will delete all phones above. WARNING: All previous configuration will be lost.)'\r\n existingDeviceMenu[\"3\"] = 'QUIT (check the device name/mac address or clean things up manually in CUCM.)'\r\n while True:\r\n options = existingDeviceMenu.keys()\r\n for entry in options:\r\n print(entry, existingDeviceMenu[entry])\r\n selection = input('A device already exists with this extension. What would you like to do? ')\r\n if selection == \"1\":\r\n break\r\n # need to test this code\r\n elif selection == \"2\":\r\n service.removePhone(name=associatedDevices)\r\n print('Deleted these devices:')\r\n print(associatedDevices)\r\n break\r\n elif selection == \"3\":\r\n sys.exit(1)\r\n break\r\n\r\n# Force LDAP Sync to pull name instead of prompting for them\r\ntry:\r\n resp = service.doLdapSync(name='LDAP', sync='true')\r\nexcept Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: doLdapSync: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n# Loop and get status of LDAP sync before proceeding. Wait 1 second or it gets caught before complete\r\nspinner = Spinner('LDAP Syncing. This may take 1-2 minutes... ')\r\ntime.sleep(1)\r\nldapSyncStatus = ''\r\nwhile ldapSyncStatus != 'Sync is performed successfully':\r\n try:\r\n resp = service.getLdapSyncStatus(name='LDAP')\r\n ldapSyncStatus = resp['return']\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: doLdapSync: { err }')\r\n spinner.next()\r\n time.sleep(1)\r\nprint(' LDAP Sync Successful...\\n')\r\n\r\n# Execute the getUser request\r\ntry:\r\n getUserResponse = service.getUser(userid=phoneUsername)\r\n userDetails = getUserResponse['return'].user\r\n phoneLname = userDetails.lastName\r\n phoneFname = userDetails.firstName\r\nexcept Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: getUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n\r\n# Normalize some data\r\njabberDeviceName = 'csf' + phoneUsername\r\nphoneDescription = phoneFname + ' ' + phoneLname + ' - ' + phoneExt\r\nphoneDisplay = phoneLname + ', ' + phoneFname[0]\r\nphoneModel = 'Cisco ' + userEnteredPhoneModel\r\n\r\nlogging.info(cucmusername + ' setting up ' + phoneUsername + ' ' + phoneDescription + ' in ' + locationName)\r\ninput('Continue phone build for ' + phoneDescription + ' in ' + locationName + ' ? (Press Ctrl + C to quit. Press Enter to continue...)')\r\n\r\n\r\ndef _setup_Jabber():\r\n # Create the data for adding Jabber, associating the Line\r\n phone = {\r\n 'name': jabberDeviceName,\r\n 'description': phoneDescription,\r\n 'product': 'Cisco Unified Client Services Framework',\r\n 'model': 'Cisco Unified Client Services Framework',\r\n 'class': 'Phone',\r\n 'protocol': 'SIP',\r\n 'protocolSide': 'User',\r\n 'devicePoolName': devicePoolName,\r\n 'commonDeviceConfigName': commonDeviceConfigName,\r\n 'phoneTemplateName': 'Standard Client Services Framework',\r\n 'commonPhoneConfigName': 'Standard Common Phone Profile',\r\n 'locationName': locationName,\r\n 'useTrustedRelayPoint': 'Default',\r\n 'builtInBridgeStatus': 'Default',\r\n 'deviceMobilityMode': 'Default',\r\n 'callingSearchSpaceName': callingSearchSpaceName,\r\n 'retryVideoCallAsAudio': 'true',\r\n 'allowCtiControlFlag': 'true',\r\n 'hlogStatus': 'On',\r\n 'packetCaptureMode': 'None',\r\n 'certificateOperation': 'No Pending Operation',\r\n 'enableExtensionMobility': 'true',\r\n # Add line\r\n 'lines': {\r\n 'line': [\r\n {\r\n 'index': 1,\r\n 'label': phoneDescription, # Line text label\r\n 'dirn': {\r\n 'pattern': phoneExt,\r\n 'routePartitionName': routePartitionName,\r\n },\r\n 'display': phoneDisplay,\r\n 'displayAscii': phoneDisplay,\r\n 'e164Mask': externalMask, # Used in specific locations\r\n 'associatedEndusers': {\r\n 'enduser': [\r\n {\r\n 'userId': phoneUsername\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n },\r\n 'securityProfileName': 'Cisco Unified Client Services Framework - Standard SIP Non-Secure Profile'\r\n }\r\n # Execute the addPhone request\r\n try:\r\n resp = service.addPhone(phone)\r\n logging.info(cucmusername + ' created Jabber device ' + jabberDeviceName)\r\n\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: addPhone: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # print('\\naddPhone response:\\n')\r\n # print(resp, '\\n')\r\n input('Jabber creation complete. Press Enter to continue...')\r\n\r\n # Execute the updateLine request\r\n try:\r\n resp = service.updateLine(\r\n pattern=phoneExt,\r\n routePartitionName=routePartitionName,\r\n alertingName=phoneDisplay,\r\n asciiAlertingName=phoneDisplay,\r\n description=phoneDescription,\r\n callForwardAll=callForwardAll\r\n )\r\n\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updateLine: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # print('\\nupdateLine response:\\n')\r\n # print(resp, '\\n')\r\n input('Line Updated. Press Enter to continue...')\r\n\r\n\r\ndef _setup_desk_phone():\r\n # may need to add more models to diferentiate SIP/SCCP\r\n if phoneModel.startswith('Cisco 78') or phoneModel.startswith('Cisco 88'):\r\n protocol = 'SIP'\r\n securityProfileName = phoneModel + ' - Standard SIP Non-Secure Profile'\r\n else:\r\n protocol = 'SCCP'\r\n securityProfileName = None\r\n if userEnteredPhoneModel == '7942' or userEnteredPhoneModel == '7962':\r\n phoneTemplateName = 'Standard ' + userEnteredPhoneModel + 'G ' + protocol\r\n else:\r\n phoneTemplateName = 'Standard ' + userEnteredPhoneModel + ' ' + protocol\r\n phone = {\r\n 'name': deskPhoneDeviceName,\r\n 'description': phoneDescription,\r\n 'product': phoneModel,\r\n 'model': phoneModel,\r\n 'class': 'Phone',\r\n 'protocol': protocol,\r\n 'protocolSide': 'User',\r\n 'devicePoolName': devicePoolName,\r\n 'commonDeviceConfigName': commonDeviceConfigName,\r\n 'phoneTemplateName': phoneTemplateName,\r\n 'softkeyTemplateName': softkeyTemplateName,\r\n 'commonPhoneConfigName': 'Standard Common Phone Profile',\r\n 'locationName': locationName,\r\n 'useTrustedRelayPoint': 'Default',\r\n 'builtInBridgeStatus': 'Default',\r\n 'deviceMobilityMode': 'Default',\r\n 'callingSearchSpaceName': callingSearchSpaceName,\r\n 'retryVideoCallAsAudio': 'true',\r\n 'allowCtiControlFlag': 'true',\r\n 'hlogStatus': 'On',\r\n 'packetCaptureMode': 'None',\r\n 'certificateOperation': 'No Pending Operation',\r\n 'enableExtensionMobility': 'true',\r\n 'securityProfileName': securityProfileName,\r\n # Add line\r\n 'lines': {\r\n 'line': [\r\n {\r\n 'index': 1,\r\n 'label': phoneDescription, # Line text label\r\n 'dirn': {\r\n 'pattern': phoneExt,\r\n 'routePartitionName': routePartitionName,\r\n },\r\n 'display': phoneDisplay,\r\n 'displayAscii': phoneDisplay,\r\n 'e164Mask': externalMask, # Used in specific locations\r\n 'associatedEndusers': {\r\n 'enduser': [\r\n {\r\n 'userId': phoneUsername\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n\r\n # Execute the addPhone request\r\n try:\r\n resp = service.addPhone(phone)\r\n logging.info(cucmusername + ' created desk phone ' + deskPhoneDeviceName + ' ' + userEnteredPhoneModel)\r\n # TODO Future add handling for a device that already exists\r\n # Could not insert new row - duplicate value in a UNIQUE INDEX column (Unique Index:)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: addPhone: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # print('\\naddPhone response:\\n')\r\n # print(resp, '\\n')\r\n input('Desk phone creation complete. Press Enter to continue...')\r\n\r\n # Execute the updateLine request\r\n try:\r\n resp = service.updateLine(\r\n pattern=phoneExt,\r\n routePartitionName=routePartitionName,\r\n alertingName=phoneDisplay,\r\n asciiAlertingName=phoneDisplay,\r\n description=phoneDescription,\r\n callForwardAll=callForwardAll\r\n )\r\n\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updateLine: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # print('\\nupdateLine response:\\n')\r\n # print(resp, '\\n')\r\n input('Line Updated. Press Enter to continue...')\r\n\r\n\r\ndef _update_End_User():\r\n # LDAP sync should have occured above\r\n # Set data for UpdateUser Request (only variables with multiple values)\r\n\r\n primaryExtension = {\r\n 'pattern': phoneExt,\r\n 'routePartitionName': routePartitionName\r\n }\r\n associatedGroups = {\r\n 'userGroup': [\r\n {'name': 'Standard CCM End Users'},\r\n {'name': 'Standard CTI Enabled'}\r\n ]\r\n }\r\n if deskPhoneOnly:\r\n # Get existing data and append or it will wipe out existing associations\r\n try:\r\n resp = service.getUser(userid=phoneUsername)\r\n userDetails = resp['return'].user\r\n currentAssociatedDeviceList = userDetails.associatedDevices\r\n except Fault as err:\r\n print(f'Zeep error: getUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # Append desk phone to list\r\n if currentAssociatedDeviceList is None:\r\n updatedAssociatedDevices = {\r\n 'device': deskPhoneDeviceName\r\n }\r\n else:\r\n unpackedAssociatedDevices = currentAssociatedDeviceList.device\r\n unpackedAssociatedDevices.append(deskPhoneDeviceName)\r\n updatedAssociatedDevices = {\r\n 'device': unpackedAssociatedDevices\r\n }\r\n elif jabberOnly:\r\n # Get existing data and append or it will wipe out existing associations\r\n try:\r\n resp = service.getUser(userid=phoneUsername)\r\n userDetails = resp['return'].user\r\n currentAssociatedDeviceList = userDetails.associatedDevices\r\n except Fault as err:\r\n print(f'Zeep error: getUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # Append jabber to list\r\n if currentAssociatedDeviceList is None:\r\n updatedAssociatedDevices = {\r\n 'device': jabberDeviceName\r\n }\r\n else:\r\n unpackedAssociatedDevices = currentAssociatedDeviceList.device\r\n unpackedAssociatedDevices.append(jabberDeviceName)\r\n updatedAssociatedDevices = {\r\n 'device': unpackedAssociatedDevices\r\n }\r\n else:\r\n # Get existing data and append or it will wipe out existing associations\r\n try:\r\n resp = service.getUser(userid=phoneUsername)\r\n userDetails = resp['return'].user\r\n currentAssociatedDeviceList = userDetails.associatedDevices\r\n except Fault as err:\r\n print(f'Zeep error: getUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # Append jabber to list\r\n if currentAssociatedDeviceList is None:\r\n updatedAssociatedDevices = {\r\n 'device': [\r\n jabberDeviceName,\r\n deskPhoneDeviceName\r\n ]\r\n }\r\n else:\r\n unpackedAssociatedDevices = currentAssociatedDeviceList.device\r\n unpackedAssociatedDevices.append(deskPhoneDeviceName)\r\n unpackedAssociatedDevices.append(jabberDeviceName)\r\n updatedAssociatedDevices = {\r\n 'device': unpackedAssociatedDevices\r\n }\r\n # Execute update end user (1st time)\r\n if deskPhoneOnly:\r\n try:\r\n resp = service.updateUser(\r\n userid=phoneUsername,\r\n userLocale=userLocale,\r\n homeCluster=True,\r\n associatedDevices=updatedAssociatedDevices,\r\n enableCti=True,\r\n associatedGroups=associatedGroups\r\n )\r\n logging.info(cucmusername + ' first pass updated end user ' + phoneUsername)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updateUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n else:\r\n try:\r\n resp = service.updateUser(\r\n userid=phoneUsername,\r\n userLocale=userLocale,\r\n homeCluster=True,\r\n imAndPresenceEnable=True,\r\n associatedDevices=updatedAssociatedDevices,\r\n enableCti=True,\r\n associatedGroups=associatedGroups\r\n )\r\n logging.info(cucmusername + ' first pass updated end user ' + phoneUsername)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updateUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n # print('\\nupdateUser response:\\n')\r\n # print(resp, '\\n')\r\n\r\n # Execute update end user (2nd time)\r\n try:\r\n resp = service.updateUser(\r\n userid=phoneUsername,\r\n # Need to update after phone/Jabber is associated\r\n primaryExtension=primaryExtension,\r\n )\r\n logging.info(cucmusername + ' second pass updated end user ' + phoneUsername)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updateUser: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n print('End User Updated...\\n')\r\n\r\n\r\ndef _update_Desk_Phone_Owner():\r\n try:\r\n resp = service.updatePhone(\r\n name=deskPhoneDeviceName,\r\n ownerUserName=phoneUsername\r\n )\r\n logging.info(cucmusername + ' updated desk phone ' + deskPhoneDeviceName + ' owner to ' + phoneUsername)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updatePhone: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n print('Desk Phone owner updated...\\n')\r\n\r\n\r\ndef _update_Jabber_Owner():\r\n try:\r\n resp = service.updatePhone(\r\n name=jabberDeviceName,\r\n ownerUserName=phoneUsername\r\n )\r\n logging.info(cucmusername + ' updated jabber ' + jabberDeviceName + ' owner to ' + phoneUsername)\r\n except Fault as err:\r\n logging.error(cucmusername + ' ' + str(err))\r\n print(f'Zeep error: updatePhone: { err }')\r\n input('\\n Press Enter to quit.')\r\n sys.exit(1)\r\n print('Jabber owner updated...\\n')\r\n\r\n\r\n# EXECUTE!\r\nif deskPhoneOnly:\r\n _setup_desk_phone()\r\n _update_End_User()\r\n _update_Desk_Phone_Owner()\r\nelif jabberOnly:\r\n _setup_Jabber()\r\n _update_End_User()\r\n _update_Jabber_Owner()\r\nelse:\r\n _setup_desk_phone()\r\n _setup_Jabber()\r\n _update_End_User()\r\n _update_Desk_Phone_Owner()\r\n _update_Jabber_Owner()\r\n\r\nlogging.info(cucmusername + ' succesfully reached end of script')\r\ninput(\r\n 'Complete. Do the following manual tasks where applicable:'\r\n '\\n1:Setup voicemail (import from LDAP using UCXN GUI)'\r\n '\\nPress Enter to quit.')\r\n","repo_name":"dillonator/cucm-provisoning","sub_path":"StandardPhoneSetup.py","file_name":"StandardPhoneSetup.py","file_ext":"py","file_size_in_byte":32819,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"11760082920","text":"import turtle\n\nimport pandas as pd\n\nfrom scoreboard import Scoreboard\nfrom get_coor import Get_coor\n\n\nscoreboard=Scoreboard()\nscreen=turtle.Screen()\ncanvas = screen.getcanvas()\nwindow = canvas.create_window((100, 100), width=200, height=100)\nscreen.title(\"US States Game\")\nimage=\"blank_states_img.gif\"\nscreen.addshape(image)\nturtle.shape(image)\n\nscreen.tracer(0)\n\n\n# def get_mouce_click_coor(x,y):\n# print(x,y)\n#\n# turtle.onscreenclick(get_mouce_click_coor)\n\nturtle.penup()\n\nget_coor=Get_coor()\ngameIsOn= True\nwhile gameIsOn:\n correct=scoreboard.score\n userInput=screen.textinput(title=f\"{correct}/50 States Correct\",prompt=\"What's another States?\",)\n coor=(0,0)\n if userInput.title()=='Exit':\n break\n if get_coor.have_available(userInput):\n scoreboard.addScore()\n coor=get_coor.get_location(userInput)\n turtle.goto(coor)\n turtle.write(userInput)\n turtle.goto((0, 0))\n screen.update()\n\n if scoreboard.score==50:\n gameIsOn=False\n\n\n#states to learn.csv\ndic={\n \"Unvisted City\": get_coor.all_city_list\n}\ndf=pd.DataFrame(dic)\ndf.to_csv(\"learn.csv\")\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mamun464/US_States_Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70970409608","text":"def add_time(start, duration, wday=False):\n\n from math import floor\n\n weekday=[\"saturday\",\"sunday\",\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\"]\n wd_c=[\"Saturday\",\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\"]\n\n st=start.split()\n stt=st[0].split(\":\")\n dtt=duration.split(\":\")\n ampm=st[1]\n if ampm==\"PM\":\n stt_f=[int(stt[0])+12,int(stt[1])]\n else:\n stt_f=[int(stt[0]),int(stt[1])]\n\n dtt_f=[int(dtt[0]),int(dtt[1])]\n \n day=floor(dtt_f[0]/24)\n dtt_f[0]=dtt_f[0]%24\n\n ad=[dtt_f[0]+stt_f[0],dtt_f[1]+stt_f[1]]\n if ad[1]>=60:\n ad[0]=ad[0]+floor(ad[1]/60)\n ad[1]=ad[1]%60\n if ad[0]>=24:\n day=day+floor(ad[0]/24)\n ad[0]=ad[0]%24\n \n if ad[0]>=12:\n ampm=\"PM\"\n ad[0]=ad[0]-12\n else:\n ampm=\"AM\"\n if ad[0]==0:\n ad[0]=12\n\n day_str=''\n if day==0:\n day_str=\"\"\n elif (day==1):\n day_str=\" (next day)\"\n else:\n day_str=\" (\"+str(day)+\" days later)\"\n \n new_time=str(ad[0])+\":\"+str(ad[1]).zfill(2)+\" \"+ampm+day_str \n \n if wday!=False:\n wday=wday.lower()\n wday_n=weekday.index(wday)\n wday_n=((wday_n+day+1)%7)-1\n new_time=str(ad[0])+\":\"+str(ad[1]).zfill(2)+\" \"+ampm+\", \"+wd_c[wday_n]+day_str \n\n return new_time","repo_name":"azminewasi/FreeCodeCamp-Certification-Projects","sub_path":"Scientific Computing with Python Projects/p2-time-calculator/time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"41881193738","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nPyMLGame - Event\n\"\"\"\n\nfrom pymlgame.locals import E_KEYDOWN, E_KEYUP\n\n\nclass Event(object):\n def __init__(self, uid, type, data=None):\n self.uid = uid\n self.type = type\n if type == E_KEYDOWN or type == E_KEYUP:\n self.button = data\n else:\n self.data = data\n","repo_name":"PyMLGame/pymlgame","sub_path":"pymlgame/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"26603274358","text":"while True:\n try:\n h = input()\n h = h.split(':')\n\n hora = int(h[0]) + 1\n minuto = int(h[1])\n\n aux = hora - 8\n\n if aux < 0:\n print('Atraso maximo: 0')\n else:\n minuto += 60 * aux\n print(f'Atraso maximo: {minuto}')\n\n except EOFError:\n break\n","repo_name":"EdilsonJr/Uri-Judge-Python","sub_path":"beginner/2003.py","file_name":"2003.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9258269846","text":"import logging\n\nfrom celery import shared_task\n\nfrom sme_ptrf_apps.core.models import (\n AcaoAssociacao,\n Arquivo,\n Associacao,\n ContaAssociacao,\n FechamentoPeriodo,\n Periodo,\n PrestacaoConta,\n)\n\nfrom sme_ptrf_apps.core.services.enviar_email import enviar_email_html\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task(\n retry_backoff=2,\n retry_kwargs={'max_retries': 8},\n time_limet=600,\n soft_time_limit=300\n)\ndef concluir_prestacao_de_contas_async(periodo_uuid, associacao_uuid):\n from sme_ptrf_apps.core.services.prestacao_contas_services import _criar_documentos, _criar_fechamentos\n\n periodo = Periodo.by_uuid(periodo_uuid)\n associacao = Associacao.by_uuid(associacao_uuid)\n prestacao = PrestacaoConta.abrir(periodo=periodo, associacao=associacao)\n\n acoes = associacao.acoes.filter(status=AcaoAssociacao.STATUS_ATIVA)\n contas = associacao.contas.filter(status=ContaAssociacao.STATUS_ATIVA)\n\n _criar_fechamentos(acoes, contas, periodo, prestacao)\n logger.info('Fechamentos criados para a prestação de contas %s.', prestacao)\n\n _criar_documentos(acoes, contas, periodo, prestacao)\n logger.info('Documentos gerados para a prestação de contas %s.', prestacao)\n\n prestacao = prestacao.concluir()\n logger.info('Concluída a prestação de contas %s.', prestacao)\n\n\n@shared_task(\n retry_backoff=2,\n retry_kwargs={'max_retries': 8},)\ndef processa_carga_async(arquivo_uuid):\n from sme_ptrf_apps.core.services import processa_carga\n logger.info(\"Processando arquivo %s\", arquivo_uuid)\n arquivo = Arquivo.objects.filter(uuid=arquivo_uuid).first()\n if not arquivo:\n logger.info(\"Arquivo não encontrado %s\", arquivo_uuid)\n else:\n logger.info(\"Arquivo encontrado %s\", arquivo_uuid)\n processa_carga(arquivo)\n\n","repo_name":"ollyvergithub/SME-PTRF-BackEnd","sub_path":"sme_ptrf_apps/core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71881250887","text":"file = open(\"trans.test\",\"r\",encoding='utf-8')\r\ncontents = file.readlines()\r\n# print(contents[1:10][:])\r\nadmin = []\r\nfor msg in contents:\r\n msg = msg.strip('\\n')\r\n adm = msg.split(';')\r\n if '\"src_text\":\"\"' in adm[1]:\r\n adm[1] = ''\r\n admin.append(adm)\r\n# print(admin[0:5])\r\nfile.close()\r\n# for i in admin:\r\n # if \r\nwith open('desc_trans.txt','w',encoding='utf-8') as f:\r\n# with open('otomevn_full.txt','w') as f:\r\n for i in admin:\r\n for j in i:\r\n f.write(j)\r\n f.write(';')\r\n f.write('\\n')\r\n f.close()","repo_name":"yunpingwang27/otomegame","sub_path":"deal_trans.py","file_name":"deal_trans.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73784160009","text":"from flask import Blueprint, request\nfrom task.models import Task\nfrom user.models import User\nfrom db import db\n\ntask_blueprint = Blueprint('task', __name__)\n\n@task_blueprint.route(\"\", methods=[\"POST\"])\ndef create_tasks():\n # check json or not\n if not request.is_json:\n return {\"error\": \"Bukan json!!\"}, 400\n\n data = request.get_json()\n \n # task available in data or not\n if \"title\" not in data:\n return {\"error\": \"Title not available\"}, 400\n \n title = data.get(\"title\")\n user_id = data.get(\"user_id\")\n\n user = User.query.get(user_id)\n if not user:\n return {\"error\": \"User not found!\"}, 404\n \n task = Task(user=user, title=title)\n db.session.add(task)\n db.session.commit()\n\n return {\"message\": \"success\"}\n\n# tasks = [{'task': 'Coding with Flask', 'status': 'in progress'}]\n\n# @task_blueprint.route(\"\", methods=[\"GET\"])\n# def get_tasks_list():\n# return tasks\n\n# allowed_status = [\"in progress\", \"to do\", \"done\"]\n\n# @task_blueprint.route(\"/\", methods=[\"PUT\"])\n# def update_tasks(index):\n# if index > len(tasks):\n# return {\"error\": \"task not found!\"}, 404\n \n# data = request.get_json()\n# task = data.get(\"task\")\n# status = data.get(\"status\") # None\n \n# if status not in allowed_status:\n# return {\"error\": \"status tidak valid!\"}, 400 \n\n# updated_task = tasks[index - 1]\n\n# if task:\n# updated_task[\"task\"] = task\n \n# if status:\n# updated_task[\"status\"] = status\n\n# tasks[index - 1] = updated_task\n\n# return tasks\n\n\n# @task_blueprint.route(\"/\", methods=[\"DELETE\"])\n# def delete_task(index):\n# if index > len(tasks):\n# return {\"error\": \"news not found!\"}, 404\n \n# del tasks[index - 1]\n\n# return tasks","repo_name":"luthfihariz/revou-flask-api","sub_path":"day-2/task/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5193830482","text":"from basic_python.basictools import pause\n\n\ndef get_y(a, b):\n return lambda x: a * x + b\n\n\ny1 = get_y(14, 3)\nprint(y1(2)) # 结果为2\n\nprint((lambda x, y: x * 3 + y * 78)(90, 5))\n\n\ndef get_y_normal(a, b):\n def func(x):\n return a * x + b\n\n return func\n\n\ny2 = get_y_normal(12, 3)\nprint(y2)\nprint(y2(4))\npause()\n'''\nCreate a function.\n'''\n\n\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n pivot = arr[len(arr) // 2]\n left = [x for x in arr if x < pivot]\n middle = [x for x in arr if x == pivot]\n right = [x for x in arr if x > pivot]\n return quicksort(left) + middle + quicksort(right)\n\n\nprint(quicksort([3, 6, 8, 10, 1, 2, 1]))\n\nanimals = ['cat', 'dog', 'monkey']\nfor idx, animal in enumerate(animals):\n print('#%d: %s' % (idx + 1, animal))\n\n'''\nCreate a class.\n'''\n\n\nclass Greeter(object):\n # Constructor\n def __init__(self, name):\n self.name = name # Create an instance variable\n\n # Instance method\n def greet(self, loud=False):\n if loud:\n print('HELLO, %s!' % self.name.upper())\n else:\n print('Hello, %s' % self.name)\n\n\ng = Greeter('Fred') # Construct an instance of the Greeter class\ng.greet() # Call an instance method; prints \"Hello, Fred\"\ng.greet(loud=True) # Call an instance method; prints \"HELLO, FRED!\"\n","repo_name":"likewind1234/nlp-basic-study","sub_path":"basic_python/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17292479395","text":"import pendulum\nfrom airflow.providers.google.cloud.operators.bigquery import (\n BigQueryCreateExternalTableOperator,\n BigQueryDeleteTableOperator,\n)\n\nfrom airflow import DAG\n\nwith DAG(\n dag_id=\"running-buses-gcs-files-dag\",\n description=\"GCP MANAGER DAG\",\n start_date=pendulum.now(),\n tags=[\"live-buses\"],\n) as dag:\n date = pendulum.now(tz=\"America/Sao_Paulo\")\n day = date.format(\"DD\")\n month = date.format(\"MM\")\n year = date.format(\"YYYY\")\n hour = date.format(\"HH\")\n\n project_id = \"bus-data-389717\"\n bq_dataset_name = \"dbt_busdata_stag\"\n\n delete_table = BigQueryDeleteTableOperator(\n task_id=\"delete_table_running_buses\",\n deletion_dataset_table=f\"{project_id}.{bq_dataset_name}.external_table_running_buses\",\n gcp_conn_id=\"google_cloud_default\",\n ignore_if_missing=True,\n )\n\n gcs_bucket_name = \"data_lake_bus_data_bus-data-389717\"\n gcs_path_name = (\n f\"running_buses/year={year}/month={month}/day={day}/hour={hour}/*.csv\"\n )\n # gcs_path_name = f\"running_buses/year={year}/month={month}/day={day}/hour=14/*.csv\"\n # gcs_path_name = f\"running_buses/year={year}/month={month}/day=22/hour=13/part-00000-29e33fc5-b95e-4f0e-b351-1f131fa0eeac-c000.csv\"\n create_external_table = BigQueryCreateExternalTableOperator(\n task_id=\"create_external_table_running_buses\",\n destination_project_dataset_table=f\"{bq_dataset_name}.external_table_running_buses\",\n bucket=gcs_bucket_name,\n source_objects=[gcs_path_name],\n schema_fields=[\n {\"name\": \"line_name\", \"type\": \"STRING\", \"mode\": \"NULLABLE\"},\n {\"name\": \"line_number\", \"type\": \"INTEGER\", \"mode\": \"NULLABLE\"},\n {\"name\": \"qtd_running_buses\", \"type\": \"INTEGER\", \"mode\": \"NULLABLE\"},\n {\"name\": \"timestamp\", \"type\": \"TIMESTAMP\", \"mode\": \"NULLABLE\"},\n ],\n gcp_conn_id=\"google_cloud_default\",\n )\n\n delete_table >> create_external_table\n","repo_name":"warzinnn/bus-data","sub_path":"airflow/dags/running_buses_gcp_dag.py","file_name":"running_buses_gcp_dag.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8398788129","text":"import pandas as pd\nimport numpy as np\nimport json\nimport os\nfrom sklearn.model_selection import KFold\n\nclass Davis:\n def __init__(self, train=True, sim_type='sis', d_threshold=0.6, p_threshold=0.6):\n self.train = train\n self.sim_type = sim_type\n self.sim_neighbor_num = 5\n self.d_threshold = d_threshold\n self.p_threshold = p_threshold\n\n self.setting2_path = './data/davis/folds/fold_setting2.json'\n self.setting3_path = './data/davis/folds/fold_setting3.json'\n\n self.ligands_path = './data/davis/ligands_can.json'\n self.d_ecfps_path = './data/davis/drug_ecfps.csv'\n self.d_vecs_path = './data/davis/drug_vec.csv'\n self.d_sim_path = './data/davis/drug-drug_similarities_2D.txt'\n self.p_gos_path = './data/davis/protein_go_vector.csv'\n self.p_sim_path = './data/davis/target-target_similarities_WS.txt'\n\n def _load_data(self, setting, fold):\n self.d_vecs = np.loadtxt(self.d_vecs_path, delimiter=',', dtype=float, comments=None)\n self.d_ecfps = np.loadtxt(self.d_ecfps_path, delimiter=',', dtype=int, comments=None)\n\n d_sim_path = self.d_sim_path\n delimiter = ' '\n if self.sim_type != 'default':\n d_sim_path = './data/davis/drug_{}.csv'.format(self.sim_type)\n delimiter = ','\n self.d_sim = np.loadtxt(d_sim_path, delimiter=delimiter, dtype=float, comments=None)\n\n self.p_gos = pd.read_csv(self.p_gos_path, delimiter=',', header=0, index_col=0).to_numpy(float)\n p_sim = np.loadtxt(self.p_sim_path, delimiter=' ', dtype=float, comments=None)\n p_max, p_min = p_sim.max(axis=0), p_sim.min(axis=0)\n self.p_sim = (p_sim - p_min) / (p_max - p_min)\n\n self.p_embeddings = pd.read_csv('./data/davis/protein_embedding.csv', delimiter=',', header=None,\n index_col=0).to_numpy(float)\n\n self.y = np.loadtxt('./data/davis/Y.txt', delimiter=',', dtype=float, comments=None)\n","repo_name":"HuangStomach/SISDTA","sub_path":"data/davis/davis.py","file_name":"davis.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40688270379","text":"#!/usr/bin/python3\n\"\"\"defines an integer addition function\"\"\"\n\ndef add_integer(a, b=98):\n \"\"\"\n add_integer: adds two integers\n\n Floats are type casted into ints\n\n Raises:\n TypeError: if either a bor b is non int or non-float\n \"\"\"\n\n if ((not isinstance(a, int) and not isinstance(a, float))):\n raise TypeError(\"a must be an integer\")\n\n if ((not isinstance(b, int) and not isinstance(b, float))):\n raise TypeError(\"b must be an integer\")\n\n return (int(a) + int(b))\n","repo_name":"balagrivine/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5361861433","text":"from django.urls import path, include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom .views import RenderCertificate, searchCertificate, FindView, UpdateUserView, IndexView, DashboardView, AddCoupleView, PaymentView, AddWedView, AddDivorseView, SearchDocumentView, CertificateView, updateStatus\n\napp_name = 'certification'\nurlpatterns=[\n path('', IndexView.as_view(), name='home'),\n path('search', searchCertificate, name='search'),\n path('certificate/', CertificateView.as_view(), name='certificates'),\n path('dash/certificate/', RenderCertificate.as_view(), name='render-certificates'),\n path('certificate//payment', PaymentView.as_view(), name='payment'),\n path('dash', DashboardView.as_view(), name='dashboard'),\n path('add-couple', AddCoupleView.as_view(), name='add-couple'),\n path('add-wed', AddWedView.as_view(), name='add-wed'),\n path('add-divorse', AddDivorseView.as_view(), name='add-divorse'),\n path('profile/', UpdateUserView.as_view(), name='profile'),\n path('find', FindView.as_view(), name='find'),\n path('certificate/success', updateStatus, name='updatestatus')\n]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","repo_name":"Bateyjosue/divorse-certification","sub_path":"certification/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"38351628596","text":"import torch\nimport torchvision\nimport numpy as np\nfrom PIL import Image\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom torchvision.transforms import Compose, Resize, ToTensor, transforms, functional as TF\n\nMEAN = np.array([0.48145466, 0.4578275, 0.40821073]).reshape(-1, 1, 1)\nSTD = np.array([0.26862954, 0.26130258, 0.27577711]).reshape(-1, 1, 1)\n\n\ndef get_image_grid(images):\n # preprocess images\n image_size = (224, 224)\n image_preprocess = Compose([\n Resize(image_size, interpolation=Image.BICUBIC),\n ToTensor()\n ])\n images = [image_preprocess(img) for img in images]\n\n # stack into a grid and return\n image_stack = torch.tensor(np.stack(images))\n image_grid = torchvision.utils.make_grid(image_stack, nrow=5)\n transform = transforms.ToPILImage()\n image_grid = transform(image_grid)\n\n return image_grid\n\n\ndef get_similarity_heatmap(scores, images, text, transpose_flag):\n count_images = len(images)\n count_text = len(text)\n scores = np.round(scores, 2)\n scores = scores.T if transpose_flag else scores\n\n # create the figure\n fig = plt.figure()\n for i, image in enumerate(images):\n plt.imshow(np.asarray(image), extent=(i, i + 1.0, -1.0, -0.2), origin=\"lower\")\n sns.heatmap(scores, annot=scores, cbar_kws={'label': 'Probaility'}, cmap='viridis')\n plt.xticks([])\n plt.yticks(np.arange(count_text) + 0.5, text, rotation=0, fontsize=10)\n plt.xlabel('Images')\n plt.ylabel('Text')\n plt.xlim([0.0, count_images + 0.5])\n plt.ylim([count_text + 0.5, -1.0])\n plt.title('Predictions', fontweight='bold')\n\n return fig\n\n\ndef prepare_images(images, out_res, device):\n all_image = []\n for img in images:\n # PNGs are RGBA and JPGs are RGB, fix at RGB\n img = img.convert('RGB')\n res = min(img.size)\n out = TF.center_crop(img, (res, res))\n out = TF.resize(out, (out_res, out_res))\n out = TF.to_tensor(out).unsqueeze(0)\n out = (out - MEAN) / STD\n all_image.append(out)\n return torch.cat(all_image, dim = 0).to(device)\n","repo_name":"NimbleBoxAI/NL-Images","sub_path":"clip/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15347711710","text":"from ast import arg\nimport os\nimport re\nimport string\n\n\nclass CppGenerator():\n CMAKE_FILENAME = \"CMakeLists.txt\"\n SRC_FILE = \"main.cpp\"\n DUMMY_DESCRIPTION = '// DUMMY_DESCRIPTION'\n DUMMY_SNIPPET = '// DUMMY_SNIPPET'\n DUMMY_TESTS = '// DUMMY_TESTS'\n ASSERT_TEMPALTE = \"assert((VAR1) == (VAR2));\"\n CLASS_INSTANCE = 'solution'\n parameterTypes = dict()\n\n def __init__(self, workDir, problemDescription, codeSnippet):\n self.workDir = workDir\n self.code = codeSnippet\n self.description = problemDescription\n self.getRetTypesAndParams()\n\n def generate(self):\n try:\n self.setCmakeProjectName()\n self.configureMain()\n return True\n except:\n return False\n\n def setCmakeProjectName(self):\n cmakeFile = self.workDir + '/' + self.CMAKE_FILENAME\n name = \"project (\" + self.workDir.split(\"/\")[-1] + \")\"\n with open(cmakeFile, 'r') as file:\n filedata = file.read()\n\n filedata = re.sub(r'project \\(.*\\)', name, filedata, 1)\n with open(cmakeFile, 'w') as file:\n file.write(filedata)\n\n def configureMain(self):\n mainFile = self.workDir + '/' + self.SRC_FILE\n with open(mainFile, 'r') as file:\n filedata = file.read()\n # problem description\n filedata = filedata.replace(self.DUMMY_DESCRIPTION, self.description)\n # codeSnippet\n filedata = filedata.replace(self.DUMMY_SNIPPET, self.code)\n # examples & asserts\n assertionBlock = \"Solution \" + self.CLASS_INSTANCE+\";\\n\"\n assertionBlock += self.asserts\n filedata = filedata.replace(self.DUMMY_TESTS, assertionBlock)\n with open(mainFile, 'w') as file:\n file.write(filedata)\n\n def getRetTypesAndParams(self):\n code = self.code\n opBrck = code.find('(')\n clBrck = code.find(')')\n brackets = code[opBrck+1:clBrck]\n for argPair in brackets.split(','):\n argPair = argPair.strip().replace('&', '').split(' ')\n self.parameterTypes[argPair[1]] = argPair[0]\n\n typeFin = code.rfind(' ', 0, opBrck)\n typeStrt = code.rfind(' ', 0, typeFin)\n self.fooName = code[typeFin:opBrck].strip()\n self.returnType = code[typeStrt:typeFin].strip()\n if 'void' in self.returnType:\n self.returnType = 'auto'\n\n def parseExamples(self, examples):\n result = ''\n for example in examples:\n result += '\\n{\\n'\n example.replace(' ', '')\n splited = example.split('\\n')\n for row in splited:\n if 'Input:' in row:\n row = row.replace('Input:', '')\n for arg in row.split(', '):\n argType = self.parameterTypes.get(\n arg.split('=')[0].strip(), 'auto')\n if argType == 'auto':\n print(\"ERROOOORROOO\")\n result += argType + ' ' +\\\n arg.replace('[', '{').replace(']', '}')\n result += ';\\n'\n elif 'Output:' in row:\n row = row.replace('Output:', '')\n result += self.returnType + ' result = ' + \\\n row.replace('[', '{').replace(']', '}')\n result += ';\\n'\n else:\n continue\n fooBrackets = ''\n for k, v, in self.parameterTypes.items():\n fooBrackets += k + ','\n fooBrackets = fooBrackets[0:-1]\n result += self.ASSERT_TEMPALTE.replace('VAR1', 'result').replace(\n 'VAR2', self.CLASS_INSTANCE + '.' + self.fooName + '('+fooBrackets+')')\n result += '\\n}'\n self.asserts = result\n","repo_name":"Stasne/leetcode_helper","sub_path":"cpp_gen.py","file_name":"cpp_gen.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5363618089","text":"import pygame\nimport random\nimport math\nimport os\nimport sys\nfrom config import *\n# initializing the pygame\npygame.init()\n\n# font classes for score, player name, prommpt\nfont = pygame.font.Font(MESSAGE_FONT, 40) # taking font face from config.py\nfont_ = pygame.font.Font(MESSAGE_FONT, 18)\n\n# Configuration variables\nWIDTH = 1000\nHEIGHT = 1000\nSCREEN = pygame.display.set_mode(\n (WIDTH, HEIGHT))\nJUMP_VEL = 12\ngameOver = False\nSTAGE_WIDTH = WIDTH/STAGES\n\n# loading all the sprites\nexplosion_sound = pygame.mixer.Sound(\"music/explosion.ogg\")\n# background music to be played entire game\npygame.mixer.music.load(\"music/background_music.ogg\")\npygame.mixer.music.play(-1) # loopoijg indefinitely\n# loading the ground where player stands\ngnd = [pygame.image.load(\"images/gnd.png\"),\n pygame.image.load(\"images/gnd-1.png\")]\ngnd_c = 0\n# FIREBALL =[pygame.image.load(\"fireball32x32.png\"),pygame.image.load(\"fireball132x32.png\"),pygame.image.load(\"fireball232x32.png\")]\nEXPLOSION = [\n pygame.image.load(\"images/explosion-0.png\"), pygame.image.load(\n \"images/explosion-2.png\"), pygame.image.load(\"images/explosion-3.png\"),\n pygame.image.load(\"images/explosion-4.png\"), pygame.image.load(\n \"images/explosion-5.png\"), pygame.image.load(\"images/explosion-6.png\"),\n]\n\n# creating the lists/ Groups\ndragon_list = []\nexplosions = pygame.sprite.Group() #this will store explosions\nfireball = pygame.sprite.Group() #this will store fireballs\nall_sprites = pygame.sprite.Group()\ngems = pygame.sprite.Group() #for storing gems\nenemies = pygame.sprite.Group() #for storing bombs\n\n# generic function to check collisions\n\n\ndef check_collision(\n rect1,\n rect2):\n if (rect2.right > rect1.left\n and rect1.right > rect2.left\n and rect1.bottom > rect2.top\n and rect1.top < rect2.bottom):\n return True\n return False\n\n# function to make a new window to start the game\n\n\ndef newGame():\n global dragon_list\n global explosions\n global fireball\n global all_sprites\n global gems\n global enemies\n dragon_list = []\n explosions = pygame.sprite.Group()\n fireball = pygame.sprite.Group()\n all_sprites = pygame.sprite.Group()\n gems = pygame.sprite.Group()\n enemies = pygame.sprite.Group()\n for i in range(STAGES):\n for j in range(5):\n gems.add(Gem(random.randint(40, WIDTH-200), STAGE_WIDTH*i-16, #creating random gems\n random.choice([\"score\", \"score\", \"heart\", \"score\"])))\n for i in range(STAGES):\n for j in range(BOMB_COUNT): #creating random enemies\n enemies.add(Enemy(random.randint(40, WIDTH-200), STAGE_WIDTH*i-32))\n # all_sprites.add(player)\n for i in range(STAGES):\n dragon_list += [Dragon(i)] #creating dragons\n\n# class for creating health and scoring gems\n\n\nclass Gem(pygame.sprite.Sprite):\n def __init__(\n self, x, y,\n name):\n super(Gem, self).__init__()\n self.x = x\n self.y = y\n self.name = name\n if name == \"score\":\n self.surf = pygame.image.load(\"images/gem-black.png\")\n if name == \"heart\":\n self.surf = pygame.image.load(\"images/heart.png\")\n self.rect = self.surf.get_rect(\n center=(\n x,\n y,\n )\n )\n\n def update(self, player):\n if check_collision(player.rect, self.rect):\n self.kill()\n if self.name == \"heart\":\n player.health += 10 #updating health after collectiong health gems\n if player.health > 64:\n player.health = 64\n if self.name == \"score\":\n player.score += 1 #updating player scoring after collecting score gems\n\n# class for creating enemiy bombs\n\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super(Enemy, self).__init__()\n self.x = x\n self.y = y\n self.surf = pygame.image.load(\"images/death.png\")\n self.rect = self.surf.get_rect(\n center=(\n x,\n y,\n )\n )\n\n def update(self, player):\n if check_collision(self.rect, player.rect):\n # pygame.mixer.Sound.stop(explosion_sound)\n self.kill()\n player.health -= 5 #reducing health after enemy collision\n explosions.add(Explosion(self.rect.centerx, self.rect.centery)) #explosion after collison with enemy\n # pygame.mixer.Sound.play(explosion_sound)\n # explosion_sound.play()/\n\n # player.score +=1\n\n# class for creating players\n\n\nclass Player(pygame.sprite.Sprite):\n rounds = 0\n direc = 1\n alive = True\n score = 0\n time = 0\n # global gameOver\n health = 64\n jumping = False\n stage = 0\n speed = 5\n jump_vel = -JUMP_VEL\n jump_acc = 0.4\n\n def __init__(self, direc, name):\n super(Player, self).__init__()\n self.direc = direc\n self.name = name\n if STAGES > 8:\n self.surf = pygame.image.load(\"images/pikachu.png\")\n else:\n self.surf = pygame.image.load(\"images/pikachu-2.png\")\n if direc == 1:\n self.rect = self.surf.get_rect(\n center=(\n 0,\n HEIGHT-32,\n )\n )\n else:\n self.rect = self.surf.get_rect(\n center=(\n 0,\n STAGE_WIDTH-32\n )\n )\n\n def update(self, pressed_keys):\n if self.direc == 1 and self.rect.bottom < 0 and self.stage != 0:\n self.rounds += 1\n self.stage = 0\n self.rect.bottom = WIDTH\n newGame()\n return\n if self.direc == -1 and self.rect.top > HEIGHT and self.stage != 0:\n self.rounds += 1\n self.stage = 0\n newGame()\n return\n if pressed_keys[pygame.K_LEFT]:\n self.rect.move_ip(-self.speed, 0)\n if pressed_keys[pygame.K_RIGHT]:\n self.rect.move_ip(self.speed, 0)\n if not self.jumping and pressed_keys[pygame.K_SPACE]:\n self.stage += 1\n self.jumping = True\n if self.jumping:\n self.jump_vel += self.jump_acc\n self.rect.move_ip(0, self.jump_vel)\n if self.rect.bottom > HEIGHT-STAGE_WIDTH*self.stage and self.jump_vel > 0 and self.direc == 1:\n self.rect.bottom = HEIGHT-STAGE_WIDTH*self.stage\n self.jump_vel = -JUMP_VEL\n self.jumping = False\n elif self.rect.bottom > HEIGHT-STAGE_WIDTH*(STAGES-1-self.stage) and self.jump_vel > 0 and self.direc == -1:\n self.rect.bottom = HEIGHT-STAGE_WIDTH*(STAGES-1-self.stage)\n self.jump_vel = -JUMP_VEL\n self.jumping = False\n\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.health < 0:\n explosions.add(Explosion(self.rect.centerx, self.rect.centery))\n self.alive = False\n # self.kill()\n pygame.draw.rect(SCREEN, (0, 0, 0),\n (self.rect.left, self.rect.top-10, 64, 10))\n pygame.draw.rect(SCREEN, (0, 200, 20), (self.rect.left,\n self.rect.top-10, self.health, 10))\n text = font.render(\n f\":{self.score} Round:{self.rounds+1}\", True, (2, 25, 0))\n SCREEN.blit(pygame.image.load(\"images/gem-black.png\"),(0,0))\n SCREEN.blit(text, (32, 0))\n string = font_.render(f\"{self.name}\", True, (0, 200, 100))\n SCREEN.blit(string, (self.rect.left, self.rect.top-25))\n\n# class for creating the fire breathing dragons\n\n\nclass Dragon(pygame.sprite.Sprite):\n id = 0\n state = 0\n dir = 1\n rand = random.randint(10, 20)\n frame_rate = 20\n frame_count = 0\n # fire = False\n fire_count = 0\n\n def __init__(self, id):\n super(Dragon, self).__init__()\n self.surf = pygame.transform.flip(\n pygame.image.load(\"images/dragon.png\"), 1, 0)\n\n self.rect = self.surf.get_rect(\n center=(\n WIDTH - 32,\n HEIGHT - STAGE_WIDTH*id - STAGE_WIDTH/2,\n )\n )\n self.id = id\n\n def update(self, player):\n # if self.id <= player.stage+2 and self.id> player.stage-1:\n if self.frame_count > self.frame_rate:\n self.frame_count -= self.frame_rate\n self.rect.move_ip(0, 5*self.dir)\n self.dir = -1 * self.dir\n if self.fire_count > self.rand:\n self.rand = random.randint(50, 100)\n self.fire_count -= self.rand\n fireball.add(FireBall(self.id))\n self.fire_count += 1\n if check_collision(self.rect, player.rect):\n player.health -= 0.2\n self.frame_count += 1\n\n# class for creationg fireballs\n\n\nclass FireBall(pygame.sprite.Sprite):\n id = 0\n turn = 0\n\n def __init__(self, id):\n super(FireBall, self).__init__()\n # self.surf = pygame.image.load(\"fireball32x32.png\")\n self.surf = pygame.transform.flip(\n pygame.image.load(\"images/bird.png\"), 1, 0)\n if FIREBALL_SPEED == None:\n self.speed = random.random()*5+1\n else:\n self.speed = FIREBALL_SPEED\n\n self.rect = self.surf.get_rect(\n center=(\n WIDTH-150,\n HEIGHT - STAGE_WIDTH*id-STAGE_WIDTH/2-30,\n )\n )\n self.id = id\n\n def update(self, player):\n if self.rect.left < 0:\n self.kill()\n self.rect.move_ip(-self.speed-3*player.rounds, 0)\n self.turn = (self.turn+1) % 3\n # self.surf= FIREBALL[self.turn]\n # self.surf= FIREBALL[self.turn]\n if check_collision(self.rect, player.rect):\n self.kill()\n explosions.add(Explosion(self.rect.centerx, self.rect.centery))\n player.health -= 10\n\n# class for creating explosions\n\n\nclass Explosion(pygame.sprite.Sprite):\n frames = 6\n frame_id = 0\n frame_count = 0\n frame_rate = 15\n\n def __init__(self, x, y):\n super(Explosion, self).__init__()\n # explosion_sound.play()\n pygame.mixer.music.load(\"images/explosion.ogg\")\n pygame.mixer.music.play()\n # pygame.mixer.music.queue(\"background_music.ogg\")\n pygame.mixer.music.load(\"music/background_music.ogg\")\n pygame.mixer.music.play(-1)\n self.x = x\n self.y = y\n self.surf = EXPLOSION[0]\n self.rect = self.surf.get_rect(\n center=(\n x,\n y,\n )\n )\n\n def update(self, player1):\n self.frame_count += 1\n if self.frame_count < 20:\n return\n self.frame_count -= self.frame_rate\n self.frame_id += 1\n if self.frame_id == 6:\n self.kill()\n return\n self.surf = EXPLOSION[self.frame_id]\n\n\n# creting 2 players\nplayer1 = Player(1, \"player1\")\nplayer2 = Player(-1, \"player2\")\nplayer = None\n\n# the main loop\nwhile not gameOver:\n #checking for the quit presses\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n gameOver = True\n elif event.type == pygame.QUIT:\n gameOver = True\n\n #checking for the player\n if(player == None):\n player = player1\n newGame()\n elif not player1.alive and player == player1:\n player = player2\n newGame()\n elif not player2.alive and player == player2:\n #both the players have played\n #displaying hte necessary message\n text = \"\"\n if(player2.score > player1.score):\n text = f\"Player2 won by {player2.score}-{player1.score}\"\n elif(player1.score > player2.score):\n text = f\"Player1 won by {player1.score}-{player2.score}\"\n else:\n text = f\"IT's a Draw\"\n t = font.render(f\"{text}\", True, MESSAGE_COLOR)\n SCREEN.blit(t, (WIDTH/2-100, HEIGHT/2-20))\n pygame.display.update()\n pygame.time.delay(4000)\n \n #restaring the game automatically\n player = None\n player1 = Player(1, \"player1\")\n player2 = Player(-1, \"player2\")\n continue\n \n #drawing the stages,fireballs, explosions,enemies and the player\n for i in range(STAGES):\n SCREEN.blit(gnd[gnd_c], (0, HEIGHT-STAGE_WIDTH*i-80))\n dragon_list[i].update(player)\n SCREEN.blit(dragon_list[i].surf, dragon_list[i].rect)\n for fireballs in fireball:\n SCREEN.blit(fireballs.surf, fireballs.rect)\n fireballs.update(player)\n for explosion in explosions:\n SCREEN.blit(explosion.surf, explosion.rect)\n explosion.update(player)\n for gem in gems:\n SCREEN.blit(gem.surf, gem.rect)\n gem.update(player)\n for enemy in enemies:\n SCREEN.blit(enemy.surf, enemy.rect)\n enemy.update(player)\n SCREEN.blit(player.surf, player.rect)\n\n # checking key presses\n pressed_keys = pygame.key.get_pressed()\n player.update(pressed_keys)\n\n pygame.display.flip()\n SCREEN.fill((255, 255, 255))\n\n\n# pygame quitting\npygame.quit()\n","repo_name":"ace-spadez/pygame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69977496968","text":"import datetime\n\ndef solution(book_time):\n \n Room = []\n time = {}\n\n for time in book_time:\n s_h = int(time[0].split(':')[0])\n s_m = int(time[0].split(':')[1])\n e_h = int(time[1].split(':')[0])\n e_m = int(time[1].split(':')[1])\n \n c_start = s_h*60 + s_m\n c_end = e_h*60 + e_m + 10\n \n Room.append([c_start,1])\n Room.append([c_end,-1])\n\n Room.sort()\n number=0\n max_num = 0\n print(Room)\n \n for i in range(len(Room)-1):\n number += Room[i][1]\n \n if(number>max_num):\n if(Room[i][0] != Room[i+1][0]):\n max_num = number\n \n if(number+Room[-1][1] > max_num):\n max_num = number+Room[-1][1]\n print(max_num)\n \n answer = max_num\n \n \n \n \n \n \n \n \n# Room = []\n# for time in book_time:\n\n# s_h = int(time[0].split(':')[0])\n# s_m = int(time[0].split(':')[1])\n# e_h = int(time[1].split(':')[0])\n# e_m = int(time[1].split(':')[1])\n \n# c_start = datetime.time(s_h, s_m)\n \n# if(e_h==23 and e_m>=50):\n# c_end = datetime.time(23, 59)\n# elif(e_m>=50):\n# c_end = datetime.time(e_h+1, e_m-50)\n# else:\n# c_end = datetime.time(e_h, e_m+10)\n \n# flag = 1\n \n# for room in Room:\n# flag = 1\n# length = len(room)\n\n# #start 찾기\n# start_index = -1\n# end_index = -1\n\n# if(c_start>=room[-1]): # 맨 뒤에 추가\n# room.append(c_start)\n# room.append(c_end)\n# flag = 0\n# break\n# elif(c_end <= room[0]): # 맨 앞에 추가\n# room.insert(0, c_start)\n# room.insert(1, c_end)\n# flag = 0\n# break\n# else:\n# # start 찾기\n# for i in range(1, length):\n# if(c_start>=room[i-1] and c_startroom[i-1] and c_end<=room[i]):\n# end_index = i\n# break\n# # start end index 비교\n# if(start_index == end_index and start_index%2==0):\n# room.insert(start_index, c_start)\n# room.insert(start_index+1, c_end)\n# flag = 0\n# if(flag == 0):\n# break\n# # flag = 0 이면 이미 추가된거고 1이면 아직 추가 안된것\n \n# if(flag == 1): # 새로운 룸에 추가 (아무것도 없을때도 추가)\n# Room.append([c_start, c_end]) \n# print(Room)\n\n# answer = len(Room)\n return answer","repo_name":"dainshon/CODING","sub_path":"프로그래머스/unrated/155651. 호텔 대실/호텔 대실.py","file_name":"호텔 대실.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35145713495","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport json\nimport numpy as np\nimport pandas as pd\n\nclass Graber: \n \n url = 'https://apteki.su/'\n city = ''\n\n def __init__(self, city = ''):\n if city != '':\n self.city = city\n self.url = f\"https://{city}.apteki.su//catalog//Оземпик\"\n\n def openDriver(self):\n self.driver = webdriver.Chrome()\n self.driver.get(self.url)\n\n def waitLoadElement(self): \n pass\n \n def getCity(self):\n places = None\n city_list = []\n try:\n element = WebDriverWait(self.driver, 30).until(EC.presence_of_element_located(\n (By.XPATH, \"//*[@class='location-window__list location-window__list_city']\")))\n finally:\n while places is None:\n places = self.driver.execute_script(\"return sessionStorage.getItem('places');\")\n\n places_json = json.loads(places)\n \n for place in places_json:\n city_list.append(place['alias'])\n city_list.sort()\n return city_list\n\n def getPrice(self):\n table = self.driver.find_element(By.CLASS_NAME, 'search-select-form__item-list')\n elements = table.find_elements(By.TAG_NAME, 'dl')\n element_array = []\n for element in elements:\n name = element.find_elements(By.TAG_NAME, 'dt')[0].text\n price = element.find_elements(By.TAG_NAME, 'dd')[0].text\n element_array.append([self.city, name, price])\n return element_array\n \n def closeDriver(self):\n self.driver.quit()\n\n#MAIN\n#get all city`s\ngraber = Graber()\ngraber.openDriver()\ncity_list = graber.getCity()\ngraber.closeDriver()\n\n\n#get price in some city\nprices = []\nfor city in city_list:\n try:\n graber = Graber(city)\n graber.openDriver()\n pricesInCity = graber.getPrice()\n for priceInCity in pricesInCity:\n prices.append(priceInCity)\n print (f'{city} --- grabed')\n except:\n prices.append([city, 'nil', 'nil'])\n print (f'{city} --- error')\n \n graber.closeDriver()\n\n\n#save to csv\ndf = pd.DataFrame(prices, columns = ['city', 'name', 'price'])\ndf.to_csv('test.csv', index=False)\n","repo_name":"Zork777/GruberApteki","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20967129777","text":"total_problemas = int(input())\n\nqtd_certezas = 0\nQTD_AMIGOS = 3\nQTD_MIN_CERTEZAS = 2\n\nfor n in range(total_problemas):\n temp = 0\n certezas_n = input().split(' ')\n for i in range(QTD_AMIGOS):\n temp += int(certezas_n[i])\n if temp >= QTD_MIN_CERTEZAS:\n qtd_certezas += 1\n\nprint(qtd_certezas)","repo_name":"nszchagas/APC-codes","sub_path":"5_iteracoes/questao8.py","file_name":"questao8.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27952981766","text":"from django.urls import path\r\nfrom . import views\r\nfrom . import apiview\r\n\r\nurlpatterns=[\r\n path('', views.index,name=\"index\"),\r\n path('/', views.task_detail, name='task_detail'),\r\n path('form',views.task_form,name=\"task_form\"),\r\n path('api', apiview.api,name =\"Api_View\"),\r\n path ('api/',apiview.api_task_detail,name= \"Api Task Details\")\r\n]","repo_name":"PizzaMGx/DjangoTodo","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22875135923","text":"#! /usr/bin/python\n#-*-coding: utf-8 -*-\n\n\"\"\"\nPython code to use the MPR121 capacitive touch sensor from Adafruit with a Raspberry Pi to count/log touches, while ignoring 'un-touch' events.\nCounting/Logging happens in a threaded callback (using RPi.GPIO.add_event_callback) triggered from the IRQ pin on the MPR121\n\nAdafruit_MPR121 requires the Adafuit MPR121 library which, in turn, requires the Adafuit GPIO library for I2C-bus access:\ngit clone https://github.com/adafruit/Adafruit_Python_MPR121 \ngit clone https://github.com/adafruit/Adafruit_Python_GPIO\n\nNOTE :\nAdafruit_Python_MPR121 has been deprecated. Adafruit has a new module for mpr121 using CircuitPython at github.com/adafruit/Adafruit_CircuitPython_MPR121\nbut this requires the whole CircuitPython install, which is rather large. It may be worth switching to this in the future.\n\"\"\"\n\nfrom Adafruit_MPR121 import MPR121\nimport RPi.GPIO as GPIO\n\nfrom array import array\nfrom time import time, sleep\n\ngTouchDetector = None # global reference to touchDetector for use in touchDetector callback\n\n\nclass TouchDetector (MPR121.MPR121):\n \"\"\"\n ******************* TouchDetector inherits from Adafruit's MPR121 capacitive touch sensor code *************************\n\n mnemonic defines for use in controlling touchDetector callback\n \"\"\"\n callbackCountMode = 1 # callback counts touches on set of pin in touchPins\n callbackTimeMode = 2 # callback records time of each touch for each pin in touchPins \n callbackCustomMode = 4 # callback calls user-supplied custom function with touched pin\n\n @staticmethod\n def touchDetectorCallback (channel):\n \"\"\"\n Touch Detector callback, triggered by IRQ pin. The MPR121 sets the IRQ pin high whenever the touched/untouched state of any of the\n antenna pins changes. Calling MPR121.touched () sets the IRQ pin low again. MPR121.touched() returns a 12-but value\n where each bit represents a pin, with bits set for pins being touched, and un-set for pins not being touched. The callback tracks only touches, \n not un-touches, by keeping track of last touches. The callback counts touches on a set of pins, and/or logs timestamps of touches\n on a set of pinss, and/or calls a user-supplied custom function with the touched pin as the only parameter.\n \"\"\"\n global gTouchDetector\n touches = gTouchDetector.touched ()\n # compare current touches to previous touches to find new touches\n for pin in gTouchDetector.touchPins:\n pin = int(pin)\n pinBits = 2**pin\n if (touches & pinBits) and not (gTouchDetector.prevTouches & pinBits):\n if gTouchDetector.callbackMode & TouchDetector.callbackCountMode:\n gTouchDetector.touchCounts [pin] +=1\n if gTouchDetector.callbackMode & TouchDetector.callbackTimeMode:\n gTouchDetector.touchTimes.get(pin).append (time())\n if gTouchDetector.callbackMode & TouchDetector.callbackCustomMode:\n gTouchDetector.customCallback (pin)\n gTouchDetector.prevTouches = touches\n \n \n def __init__(self, I2Caddr, touchThresh, unTouchThresh, pinTuple, IRQpin):\n \"\"\"\n inits the MPR121 superclass, does MPR121 stuff, then does touchDetector stuff\n \"\"\"\n # MPR121 stuff\n super().__init__()\n self.begin(address =I2Caddr)\n self.set_thresholds (touchThresh, unTouchThresh)\n #touchDetector specific stuff, making data arrays, and installing callback\n # the tuple of pin numbers to monitor, passed in\n self.touchPins = pinTuple\n # an array of ints to count touches for each pin, for callbackCountMode\n # we make an array for all 12 pins, even though we may not be monitoring all of them\n self.touchCounts = array ('i', [0]*12)\n # a dictionary of lists to capture times of each touch on each pin, for callbackTimeMode\n self.touchTimes = {}\n for pin in self.touchPins:\n self.touchTimes.update({pin : []})\n # customCallback will contain reference to custom callback function, when installed\n self.customCallback = None\n # make global gTouchDetector reference this TouchDetector\n global gTouchDetector\n gTouchDetector = self\n # set up IRQ interrupt pin for input with pull-up resistor. Save IRQpin so we can remove event detect when object is deleted\n self.IRQpin = IRQpin\n GPIO.setmode (GPIO.BCM) # GPIO.setmode may already have been called, but call it again anyway\n GPIO.setup(IRQpin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # install callback\n GPIO.add_event_detect (self.IRQpin, GPIO.FALLING)\n GPIO.add_event_callback (self.IRQpin, TouchDetector.touchDetectorCallback)\n # callback mode, variable that tracks if we are counting touches, logging touch times, or running custom callback\n # callback is always running, even when callbackMode is 0, but we don't always log the touches\n self.callbackMode = 0\n # initial state of touches, saved from one callback call to next, used in callback to separate touches from untouches\n self.prevTouches = self.touched()\n\n\n def __del__ (self):\n \"\"\"\n Removes the event detect callback and cleans up the GPIO pin used for it\n \"\"\"\n GPIO.remove_event_detect (self.IRQpin)\n GPIO.cleanup (self.IRQpin)\n \n def addCustomCallback (self, customCallBack):\n \"\"\"\n sets the custom callback that will be called on a per-pin basis from main callback\n \"\"\"\n self.customCallback = customCallBack\n\n def startCustomCallback(self):\n \"\"\"\n sets callback mode field so main callback calls custom callback\n \"\"\"\n if self.customCallback is not None:\n self.callbackMode |= TouchDetector.callbackCustomMode\n\n def stopCustomCallback(self):\n \"\"\"\n sets callback mode field so main callback stops calling custom callback\n \"\"\"\n self.callbackMode &= ~TouchDetector.callbackCustomMode\n\n def startCount (self):\n \"\"\"\n Zeros the array that stores counts for each pin, and makes sure callback is filling the array for requested pins\n \"\"\"\n for i in range (0,12):\n self.touchCounts [i] = 0\n self.callbackMode |= TouchDetector.callbackCountMode\n \n def resumeCount(self):\n self.callbackMode |= TouchDetector.callbackCountMode\n \n def getCount (self):\n results = []\n for pin in self.touchPins:\n pin = int(pin)\n results.append ((pin, self.touchCounts [pin]))\n return results\n\n def stopCount (self):\n \"\"\"\n returns a list of tuples where each member is a pin number and the number of touches for that pin\n call startCount, wait a while for some touches, then call stopCount\n \"\"\"\n self.callbackMode &= ~TouchDetector.callbackCountMode\n results = []\n for pin in self.touchPins:\n pin = int(pin)\n results.append ((pin, self.touchCounts [pin]))\n return results\n\n\n def startTimeLog (self):\n \"\"\"\n clears the dictionary of lists used to capture times of each touch on each pin\n \"\"\"\n for pin in self.touchPins:\n pin = int(pin)\n self.touchTimes.update({pin : []})\n self.callbackMode = self.callbackMode | TouchDetector.callbackTimeMode\n\n def stopTimeLog (self):\n \"\"\"\n returns a shallow copy (the lists in the original and copy are the same)\n of the dictionary of lists of touch times for each pin\n \"\"\"\n self.callbackMode &= ~TouchDetector.callbackTimeMode\n return self.touchTimes.copy()\n\n def waitForTouch (self, timeOut_secs, startFromZero=False):\n \"\"\"\n Waits for a touch on any pin. Returns pin that was touched, or 0 if timeout expires with no touch,\n or -1 if startFromZero was True and the detector was touched for entire time\n \"\"\"\n endTime = time() + timeOut_secs\n if self.prevTouches == 0: # no touches now, wait for first touch, or timeout expiry\n while self.prevTouches ==0 and time() < endTime:\n sleep (0.05)\n return self.prevTouches\n else: #touches already registered\n if not startFromZero: # we are done already\n return self.prevTouches\n else: # we first wait till there are no touches, or time has expired\n while self.prevTouches > 0 and time() < endTime:\n sleep (0.05)\n if time() > endTime: # touched till timeout expired\n return -1\n else: # now wait for touch or til timeout expires\n while self.prevTouches == 0 and time() < endTime:\n sleep (0.05)\n return self.prevTouches # will be the pin touched, or 0 if no touches till timeout expires\n\n\n","repo_name":"jamieboyd/TouchDetector","sub_path":"TouchDetectorMPR121.py","file_name":"TouchDetectorMPR121.py","file_ext":"py","file_size_in_byte":9052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72320554568","text":"from imageai.Detection.Custom import CustomObjectDetection\n\ndetector = CustomObjectDetection()\ndetector.setModelTypeAsYOLOv3()\ndetector.setModelPath(\"../models/modelo_moedas_igor.h5\") #copiado do models p/ facilitar nome\ndetector.setJsonPath(\"../models/json/detection_config.json\") #como esse n muda o nome, faz ref direta\ndetector.loadModel()\npath_in = \"../images_in/\"\npath_out = \"../images_out/\"\nfilename = \"real-2.jpg\"\ndetections = detector.detectObjectsFromImage(input_image=path_in+filename, output_image_path=path_out+filename)\nfor detection in detections:\n print(detection[\"name\"], \" : \", detection[\"percentage_probability\"], \" : \", detection[\"box_points\"])","repo_name":"gdinn/real-imageai","sub_path":"scripts/img_analysis.py","file_name":"img_analysis.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30687038588","text":"import time\nlist_words = ['hello', ' day', 'love', 'good', 'hi']\n\nresponse = {\n 'hello': 'hi dear, how are you today',\n 'day': 'going well thank you',\n 'love': 'thanks',\n 'doing good': 'am also doing good, and nice to know'\n}\nuser_input = input('chat with our chat bot :\\n ')\nwhile len(user_input) > 0:\n if list_words[0] in user_input or list_words[4] in user_input:\n print('response processing')\n time.sleep(2)\n print(response['hello'])\n elif list_words[1] in user_input:\n print('response processing')\n time.sleep(2)\n print(response['day'])\n elif list_words[2] in user_input:\n print('response processing')\n time.sleep(2)\n print(response['love'])\n elif list_words[3] in user_input:\n print('response processing')\n time.sleep(2)\n print(response['doing good'])\n elif user_input == 'exit':\n print('pleasure talking to you, hope to talk to you next time')\n time.sleep(2)\n exit()\n elif 'can' and 'question' in user_input:\n print('response processing')\n time.sleep(2)\n print('you can ask your question')\n elif 'question' in user_input and 'can' not in user_input:\n print('response processing')\n time.sleep(2)\n print('sorry am just a bolt but our customer care can assist you better, contact them on 09090240674')\n else:\n print(\"sorry i don't have a response for that question\")\n user_input = input()\n","repo_name":"Esi-meci/chatbot","sub_path":"structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11810385916","text":"import os\r\n# import requests\r\n# from bs4 import BeautifulSoup as soup\r\nimport pandas as pd\r\n\r\nimport utils as ut\r\nfrom config import Settings\r\n\r\nconfig = Settings()\r\n\r\n\r\ndef Crawl_data(base_url=config.base_url, save_path=config.data_folder, load_time=3):\r\n\r\n return_reviews, return_sentiments = ut.crawl_data_from_url(\r\n base_url,\r\n no_load=load_time,\r\n reviews=[],\r\n sentiments=[]\r\n )\r\n\r\n df = pd.DataFrame({\"Reviews\": return_reviews,\r\n \"sentiment\": return_sentiments})\r\n file_name = f'{len(df)}_crawling_data.csv'\r\n df.save_csv(os.path.join(save_path, file_name))\r\n\r\n\r\ndef main():\r\n\r\n base_url = config.base_url\r\n save_path = config.data_folder\r\n load_time = 1\r\n\r\n Crawl_data(base_url, save_path, load_time)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"gaster08nan/Sentiment-Classification","sub_path":"crawl_data.py","file_name":"crawl_data.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39275186296","text":"class DoublyLinkedListNode:\n def __init__(self, value):\n self.value = value\n self.next_node = None\n self.prev_node = None\n\n def __repr__(self):\n return f\"DoublyLinkedListNode(value={self.value})\"\n\n\nif __name__ == '__main__':\n a = DoublyLinkedListNode(\"a\")\n b = DoublyLinkedListNode(\"b\")\n c = DoublyLinkedListNode(\"c\")\n d = DoublyLinkedListNode(\"d\")\n\n a.next_node = b\n b.prev_node = a\n b.next_node = c\n c.prev_node = b\n c.next_node = d\n d.prev_node = c\n\n print(c.prev_node, c.next_node)\n","repo_name":"ArtyomKozyrev8/algorithms_training","sub_path":"algorithms/linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70707961927","text":"#!/usr/bin/env Python\r\n#-*-coding:utf-8-*-\r\n\r\n# author :Guido.shu\r\n# datetime :2020/4/20 9:47\r\n# software : PyCharm\r\nimport requests\r\nfrom pyquery import PyQuery as pq\r\nfrom proxyTool import AbuyunSpider\r\nimport time\r\nimport random\r\nimport re\r\nfrom urllib import parse\r\nfrom conf.useragent import random_useragent\r\n\r\n\r\n\"\"\"\r\n土巴兔爬虫\r\n需求:爬取 城市、类型(家装or工装),家庭公司名称、地址、电话、设计案例、装修公司数\r\n\"\"\"\r\n\r\n\r\nclass TubatuSpider(object):\r\n\r\n def __init__(self):\r\n self.requestsProxies = self.reutnRequestsProxies()\r\n self.builtUrl = 'https://sz.to8to.com/'\r\n\r\n def reutnRequestsProxies(self):\r\n \"\"\"\r\n :return: 返回requests的proxies\r\n \"\"\"\r\n return AbuyunSpider.returnRequestProxies()\r\n\r\n def returnBuiltHeaders(self, path, item, RefererUrl=None, page=None):\r\n \"\"\"\r\n 构造headers\r\n\r\n :return:\r\n \"\"\"\r\n city = parse.quote(item['city'])\r\n city_num = item['city_num']\r\n city_type = item['city_type']\r\n\r\n sourceUrl_built_url = 'https%3A%2F%2F{}.to8to.com%2Fcompany%2F'.format(city_num)\r\n firstUrl_built_url = 'https://{city_num}.to8to.com/company/{city_type}/'.format(city_num=city_num,city_type=city_type)\r\n sourceUrl = item['sourceUrl'].replace('://', '%3A%2F%2F').replace('/', \"%2F\") if item.get('sourceUrl') else sourceUrl_built_url\r\n firstUrl = item.get('firstUrl').replace('://', '%3A%2F%2F').replace('/', \"%2F\") if item.get('firstUrl') else firstUrl_built_url.replace(':/', '%3A%2F%2F').replace('/', \"%2F\")\r\n nowpage = item.get('firstUrl').replace('://', '%253A%252F%252F').replace('/','%252F') if item.get('firstUrl') else firstUrl_built_url.replace(':/', '%253A%252F%252F').replace('/', \"%252F\")\r\n if not page:\r\n landpage = 'https%3A//sz.to8to.com/'\r\n else:\r\n landpage = firstUrl\r\n headers = {\r\n # \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n # \"accept-encoding\": \"gzip, deflate, br\",\r\n # \"accept-language\": \"zh-CN,zh;q=0.9\",\r\n # \"cache-control\": \"no-cache\",\r\n # \"pragma\": \"no-cache\",\r\n # \"sec-fetch-dest\": \"document\",\r\n # \"sec-fetch-mode\": \"navigate\",\r\n # \"sec-fetch-site\": \"cross-site\",\r\n # \"sec-fetch-user\": \"?1\",\r\n # \"upgrade-insecure-requests\": '1',\r\n \"user-agent\": random_useragent()\r\n ,\"cookie\": \"uid=CgoKUF61XAWCtJc0A7vvAg==; \"\r\n \"to8tocookieid=f982c677f999237b9fe9e5ee3947f2cc806225; \"\r\n \"tracker2019session=%7B%22session%22%3A%2217201522ce2109-0c32fb225c0495-14291003-2073600-17201522ce3201%22%7D; \"\r\n \"tracker2019jssdkcross=%7B%22distinct_id%22%3A%2217201522ce612f-03e21b7f111ee1-14291003-2073600-17201522ce71e%22%7D; \"\r\n \"to8to_sourcepage=; to8to_landtime=1589160062; \"\r\n \"to8to_cook=OkOcClPzRWV8ZFJlCIF4Ag==; \"\r\n \"to8to_townid=1103; to8to_tcode=sh; \"\r\n \"to8to_tname=%E4%B8%8A%E6%B5%B7; \"\r\n \"Hm_lvt_dbdd94468cf0ef471455c47f380f58d2=1589160063; \"\r\n \"tender_popup_flag=true;\"\r\n \" ONEAPM_BI_sessionid=9238.924|1589197648127; \"\r\n \"Hm_lpvt_dbdd94468cf0ef471455c47f380f58d2={times}; act=freshen;\"\r\n \"to8to_landpage={landpage}; \"\r\n \"to8to_tcode={city_num}; to8to_tname={city}; \"\r\n \"to8to_cmp_sourceUrl={sourceUrl}; \"\r\n \"to8to_cmp_firstUrl={firstUrl}; \"\r\n \"to8to_nowpage={nowpage}; \".format(city=city,\r\n city_num=item['city_num'],\r\n sourceUrl=sourceUrl,\r\n firstUrl=firstUrl,\r\n nowpage=nowpage,\r\n landpage=landpage,times=now_to_timestamp())\r\n }\r\n if RefererUrl:\r\n headers['Referer'] = RefererUrl\r\n headers['sourceUrl'] = sourceUrl\r\n return headers\r\n\r\n def process_response(self, response, meta, item):\r\n \"\"\"\r\n 处理response\r\n :param response:\r\n :return:\r\n \"\"\"\r\n # print(response)\r\n with open('土巴兔.html','w',encoding='utf-8') as f:\r\n f.write(response.text)\r\n documents = pq(response.text)\r\n li_params = documents('.company__list--content > ul > li')\r\n for li in li_params.items():\r\n bulitDivs = li('.company__info')\r\n href_params = li('a').attr('href')\r\n #todo:公司名称\r\n companyName = bulitDivs('.company__info--top').text()\r\n #todo:评论数量\r\n commentNum = bulitDivs('.company__info--all > .comment-count').text()\r\n #todo:日志数量\r\n logNum = bulitDivs('.company__info--all > .owner-diary').text()\r\n #todo:最近签约\r\n clientStatus = bulitDivs('.company__info--all')('.info-num > .recent-signing').text()\r\n #todo:价格\r\n priceNum = bulitDivs('.company__info--all')('.info-num > .average-price').text()\r\n\r\n print(companyName,commentNum,logNum,clientStatus,priceNum,href_params)\r\n\r\n #下一页\r\n print('ssssss', response.url)\r\n nextPage = documents('#nextpageid').attr('href')\r\n if nextPage:\r\n page = re.search(re.compile('(\\d+)'), nextPage)\r\n item['sourceUrl'] = meta['firstUrl']\r\n item['firstUrl'] = response.url\r\n print('################正在爬取{}页##############'.format(int(page.group(1))-1))\r\n nextUrl = self.builtUrl + nextPage.split('/')[1] + '/' + meta['key_con'] + '-' + nextPage.split('/')[-1] + '/'\r\n\r\n response,meta = self.process_request(nextUrl, meta=meta, Referer=response.url, item=item)\r\n meta['firstUrl'] = response.url\r\n self.process_response(response=response, meta=meta,item=item)\r\n else:\r\n page = re.search(re.compile('page(\\d+)'), response.url)\r\n\r\n print('################正在爬取{}页##############'.format(page.group(1)))\r\n\r\n #todo:处理requests 请求URL\r\n def process_request(self, nextPage, meta, item,Referer=None):\r\n path_params = '/' + '/'.join(nextPage.split('/')[-3:])\r\n \"\"\"\r\n header\r\n \"\"\"\r\n\r\n\r\n\r\n headers = self.returnBuiltHeaders(path=path_params, RefererUrl=Referer, item=item)\r\n meta['firstUrl'] = headers['sourceUrl']\r\n del headers['sourceUrl']\r\n while 1:\r\n try:\r\n first_url = nextPage[:-1]\r\n first_res = requests.get(first_url, headers={'user-agent': random_useragent()})\r\n with open('ss1.html', 'w') as f:\r\n # print(first_res.text)\r\n f.write(first_res.text)\r\n second_res = nextPage.replace('https', 'http')\r\n second_res = requests.get(second_res, headers={'user-agent': random_useragent()})\r\n with open('ss2.html', 'w') as f:\r\n # print(second_res.text)\r\n f.write(second_res.text)\r\n three_yrl = nextPage\r\n response = requests.get(url=three_yrl,\r\n headers=headers,\r\n timeout=3, allow_redirects=False, proxies=self.reutnRequestsProxies())\r\n if response.status_code == 200:\r\n return response,meta\r\n else:\r\n print(response)\r\n except Exception as e:\r\n print(e)\r\n time.sleep(random.randint(2, 5))\r\n\r\n\r\n\r\n #todo:处理url\r\n def process_start(self):\r\n \"\"\"\r\n 处理url\r\n :return:\r\n \"\"\"\r\n url_item = {\r\n 'ht1': \"小户型\",\r\n # 'ht4': \"普通住宅\",\r\n }\r\n city_params = [\r\n {'city': '上海', \"city_num\": \"sh\"},\r\n # {'city': '深圳', \"city_num\": \"sz\"},\r\n ]\r\n for key, value in url_item.items():\r\n for x in city_params:\r\n url = 'https://{city_num}.to8to.com/company/{key}/'.format(city_num=x['city_num'], key=key)\r\n x['city_type'] = key\r\n response = self.process_request(url, meta={'key_con': key, \"value_con\": value}, item=x)\r\n print(response)\r\n self.process_response(response[0], meta=response[1], item=x)\r\n\r\n def start(self):\r\n print('###########################土巴兔爬虫开启##############')\r\n\r\n self.process_start()\r\n\r\ndef now_to_timestamp(digits = 10):\r\n \"\"\"获取13位时间\"\"\"\r\n time_stamp = time.time()\r\n digits = 10 ** (digits -10)\r\n time_stamp = str(round(time_stamp*digits))\r\n return time_stamp\r\n\r\nif __name__ == '__main__':\r\n tubatu = TubatuSpider()\r\n tubatu.start()","repo_name":"917868607/spider_project","sub_path":"spider/Tubatu_spider.py","file_name":"Tubatu_spider.py","file_ext":"py","file_size_in_byte":9273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15181176588","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom WarpUtils import *\nfrom ProcessEXR import *\nfrom CropPano import *\nfrom Consts import *\n\ndef createDataset():\n exr_files = [f for f in listdir(hdr_dataset_dir) if isfile(join(hdr_dataset_dir,f)) and f.endswith(\".exr\")]\n random.shuffle(exr_files)\n for f in exr_files[:4]:\n name = f[:-4]\n processHDR(name)\n\n\ndef processHDR(name):\n # load HDR & segmap\n count = 16\n hdr_data = exr2array(hdr_dataset_dir+name+\".exr\")\n # segmap = plt.imread(light_masks_dir+name+\"_light_mask.jpg\")\n gray_segmap = plt.imread(light_masks_dir+name+\"_light_semantic_map.jpg\")\n segmap = np.expand_dims(gray_segmap, axis=-1)\n print(np.unique(segmap))\n print(segmap.shape)\n\n # cropped_hdr = []\n converted_ldr = []\n cropped_segmap = []\n for i in range(count):\n c1 = 0.125*(i-1) # (-0.25, 0, 0.25, 0.5, ..., 1.5), total: 8\n # c2 = 0.2\n c2 = np.clip(np.random.normal(loc=CROP_DISTRIB_MU, scale=CROP_DISTRIB_SIGMA), a_min=0.2, a_max=0.55)\n # c2 = np.random.uniform(low=0.2, high=0.5)\n # c2 = np.random.normal(loc=CROP_DISTRIB_MU, scale=CROP_DISTRIB_SIGMA)\n center_point = np.array([c1, c2]) # camera center point (valid range [0,2])\n center_row, center_col = crop_center2row_col(center_point)\n crop_theta, crop_phi = row_col2theta_phi(center_row, center_col, WIDTH, HEIGHT)\n partial_hdr = nfov.toNFOV(hdr_data, center_point, True).astype('float32')\n partial_segmap = nfov.toNFOV(segmap, center_point, False)\n # cropped_hdr.append(partial_hdr)\n cropped_segmap.append(partial_segmap)\n print(np.min(cropped_segmap, axis=(0,1)))\n # print(cropped_segmap[-1].shape)\n ldrDurand = tonemap_drago.process(partial_hdr)\n partial_ldr = np.clip(ldrDurand*255, 0, 255).astype('uint8')\n converted_ldr.append(partial_ldr)\n for i in range(count):\n ldr_img_name = ldr_imgs + name + \"_partial_{}.jpg\".format(i)\n seg_label_name = seg_labels + name + \"_partial_{}_segmap.jpg\".format(i)\n plt.imsave(ldr_img_name, converted_ldr[i])\n plt.imsave(seg_label_name, cropped_segmap[i])\n\n\nif __name__ == '__main__':\n # createDataset()\n processHDR(name=\"9C4A1707-0f4b3a9a59\")","repo_name":"WinterCyan/Gardner2019","sub_path":"DataPreprocess/CreateSegDataset.py","file_name":"CreateSegDataset.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"16"} +{"seq_id":"22575704143","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport sys\n\ninputFileName = ''\ndelimiter = ','\nhasHeader = False\ntokenizerType = ''\ntokenizedFileName = ''\nremoveDuplicateTokens = False\nrunReplacement = False\nmatrixNumTokenRule = False\nmatrixInitialRule = False\nmu = 0.50\nmuIterate = 0.10\nepsilon = 0.50\nepsilonIterate = 0.00\ncomparator = ''\nbeta = 0\nminBlkTokenLen = 0\nexcludeNumericBlocks = False\nremoveExcludedBlkTokens = False\nsigma = 0\nfatalError = False\ntruthFileName = ''\n\n\ndef convertToBoolean(lineNbr, value):\n if value == 'True':\n return True\n if value == 'False':\n return False\n print('**Error: Invalid Boolean value in Parameter File, line:', lineNbr, '->', value)\n global fatalError\n fatalError = True\n\n\ndef convertToFloat(lineNbr, value):\n try:\n floatValue = float(value)\n except ValueError:\n print('**Error: Invalid floating point value in Parameter File, line:', lineNbr, '->', value)\n global fatalError\n fatalError = True\n else:\n return floatValue\n\n\ndef convertToInteger(lineNbr, value):\n if value.isdigit():\n return int(value)\n else:\n print('**Error: Invalid integer value in Parameter File, line:', lineNbr, '->', value)\n global fatalError\n fatalError = True\n\n\ndef getParms(parmFileName):\n global fatalError\n validParmNames = ['inputFileName', 'delimiter', 'hasHeader', 'tokenizerType', 'removeDuplicateTokens',\n 'runReplacement', 'minFreqStdToken', 'minLenStdToken', 'maxFreqErrToken',\n 'mu', 'muIterate', 'beta', 'minBlkTokenLen', 'sigma', 'epsilon', 'epsilonIterate',\n 'excludeNumericBlocks', 'removeExcludedBlkTokens', 'runClusterMetrics',\n 'createFinalJoin', 'comparator', 'truthFileName', 'matrixNumTokenRule', 'matrixInitialRule']\n parmFile = open(parmFileName, 'r')\n parms = {}\n lineNbr = 0\n while True:\n line = (parmFile.readline()).strip()\n lineNbr += 1\n if line == '':\n break\n # Skip comment lines in parameter file\n if line.startswith('#'):\n continue\n part = line.split('=')\n parmName = part[0].strip()\n if parmName not in validParmNames:\n print('**Error: Invalid Parameter Name in Parameter File, line:', lineNbr, '->', parmName)\n fatalError = True\n parmValue = part[1].strip()\n if parmName == 'inputFileName':\n global inputFileName\n inputFileName = parmValue\n continue\n if parmName == 'delimiter':\n global delimiter\n if ',;:|\\t'.find(parmValue) >= 0:\n delimiter = parmValue\n continue\n else:\n print('**Error: Invalid delimiter in Parameter File, line:', lineNbr, '->', parmName)\n sys.exit()\n if parmName == 'hasHeader':\n global hasHeader\n hasHeader = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'tokenizerType':\n global tokenizerType\n tokenizerType = parmValue\n continue\n if parmName == 'removeDuplicateTokens':\n global removeDuplicateTokens\n removeDuplicateTokens = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'runReplacement':\n global runReplacement\n runReplacement = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'minFreqStdToken':\n global minFreqStdToken\n minFreqStdToken = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'minLenStdToken':\n global minLenStdToken\n minLenStdToken = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'maxFreqErrToken':\n global maxFreqErrToken\n maxFreqErrToken = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'matrixNumTokenRule':\n global matrixNumTokenRule\n matrixNumTokenRule = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'matrixInitialRule':\n global matrixInitialRule\n matrixInitialRule = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'mu':\n global mu\n mu = convertToFloat(lineNbr, parmValue)\n continue\n if parmName == 'muIterate':\n global muIterate\n muIterate = convertToFloat(lineNbr, parmValue)\n continue\n if parmName == 'epsilon':\n global epsilon\n epsilon = convertToFloat(lineNbr, parmValue)\n continue\n if parmName == 'epsilonIterate':\n global epsilonIterate\n epsilonIterate = convertToFloat(lineNbr, parmValue)\n continue\n if parmName == 'comparator':\n global comparator\n comparator = parmValue\n continue\n if parmName == 'beta':\n global beta\n beta = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'minBlkTokenLen':\n global minBlkTokenLen\n minBlkTokenLen = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'excludeNumericBlocks':\n global excludeNumericBlocks\n excludeNumericBlocks = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'removeExcludedBlkTokens':\n global removeExcludedBlkTokens\n removeExcludedBlkTokens = convertToBoolean(lineNbr, parmValue)\n continue\n if parmName == 'sigma':\n global sigma\n sigma = convertToInteger(lineNbr, parmValue)\n continue\n if parmName == 'truthFileName':\n global truthFileName\n truthFileName = parmValue\n continue\n # End of loop, cross checks\n if beta < 2:\n print('**Error: beta value ', beta, ' must be larger than 2')\n fatalError = True\n if sigma <= beta:\n print('**Error: sigma value ', sigma, ' must be larger than beta value ', beta)\n fatalError = True\n if mu <= 0.0 or mu > 1.00:\n print('**Error: mu value ', mu, ' must be in interval (0.00,1.00]')\n fatalError = True\n if fatalError:\n sys.exit()\n","repo_name":"Adeeba23/CensusBureauNameAddress","sub_path":"Household Graph/oysterer-dwm-graph-8711cd315cf3/GDWM18/DWM10_Parms.py","file_name":"DWM10_Parms.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"3981953300","text":"def ATMAndStudents(students, money):\n length = len(students)\n maxx = 0\n res = [-1]\n count = money\n i = 0\n j = 0\n while j < length and i < length:\n count += students[j]\n if count >= 0 and maxx < (j - i + 1):\n maxx = j - i + 1\n res = [i + 1, j + 1]\n if count < 0:\n count -= students[i]\n i += 1\n if count >= 0:\n j += 1\n else:\n count -= students[j]\n return res\n\ntests = int(input())\nans = []\nfor _ in range(tests):\n a, b = list(map(int, input().split()))\n students = list(map(int, input().split()))\n ans.append(ATMAndStudents(students, b))\nfor a in ans:\n print(*a)","repo_name":"nattigy/competitive_programming","sub_path":"div3 contest-1/F. ATM and Students.py","file_name":"F. ATM and Students.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11521011860","text":"###################################################################################################\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 23 22:05:51 2020\r\n\r\n@author: Srikanth Thirumalasetti (Roll #2019900090)\r\n\"\"\"\r\n\r\n\"\"\" This file is part of Main project on 'Learning representations in Financial domain' \"\"\"\r\n\r\n\"\"\" It evaluates a finetuned BERT model (base-uncased) on binary classification task. \"\"\"\r\n\r\n\"\"\" The script uses SimpleTransformers's ClassificationModel for finetuning. \"\"\"\r\n\r\n\"\"\" We use wandb to optimize our hyper params (learning rate and # of epochs) using \"accuracy\" as the key metric\r\n to evaluate the performance and store the best model.\"\"\"\r\n\r\n\"\"\" The model is:\r\n 1. first trained on the train set (self.trainDataFrame), \r\n 2. next, the wandb sweep is evaluated on the validation set (self.evalDataFrame)\r\n \r\n The best model as measured by the maximum accuracy corresponding to the hyper parameter values is saved to folder: self.bestModelOutputDir.\r\n\"\"\"\r\n###################################################################################################\r\nimport glob\r\nimport json\r\nimport logging as log\r\nimport math\r\nimport multiprocessing\r\nimport os\r\nimport pandas\r\nimport re\r\nimport sklearn\r\nimport subprocess\r\nimport sys\r\nimport threading\r\nimport time\r\nimport torch\r\nimport traceback\r\nimport wandb\r\nfrom collections import defaultdict, OrderedDict\r\nfrom filterSourceFilesForTraining import filterAndCopyCorpusFilesUsedToTrainBertOnMLM as filterCorpus\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom preprocess import preprocess_seq # make sure that the pre-process.py file is in the parent folder, else, the script errors out\r\nfrom simpletransformers.classification import (ClassificationModel, ClassificationArgs)\r\n\r\n# This variable is used as header for the train and validation dataframes\r\nglobal HEADER_COLS\r\nHEADER_COLS = [\"tweet\", \"relation\"]\r\n\r\n# This variable is the WandB API key that is used to log training and eval params in real-time to WandB server\r\nglobal WAND_API_KEY, WAND_PROJECT_NAME\r\nWAND_API_KEY = \"01b06361bbf14e2d29e535b7ae84a9f3716365a4\"\r\nWAND_PROJECT_NAME = \"bert-base-finetune-mlm-sec-data-binary-cls\"\r\n\r\n\r\n############################################################################\r\n# This class evaluates a finetuned BERT model on binary classification task.\r\n############################################################################\r\nclass EvalLanguageModelOnBinCls:\r\n def __init__(self, modelNameOrPath, trainFile, evalFile, maxSeqLen, wandb_sweep_config, wandb_sweep_defaults, logLevel):\r\n log.debug(\"Initializing 'EvalLanguageModelOnBinCls' class instance..\")\r\n self.modelType = \"bert\"\r\n self.modelNameOrPath = modelNameOrPath\r\n self.trainFile = trainFile\r\n self.evalFile = evalFile\r\n self.trainDataFrame = None\r\n self.evalDataFrame = None\r\n self.maxSeqLength = maxSeqLen\r\n self.wandbConfig = wandb_sweep_config\r\n self.wandbDefaults = wandb_sweep_defaults\r\n self.modelOutputDir = os.path.join(os.path.split(trainFile)[0], \"finetuned_model_on_bin_cls\")\r\n self.bestModelOutputDir = os.path.join(self.modelOutputDir, \"best_model\")\r\n self.modelCacheDir = os.path.join(self.modelOutputDir, \"cache\")\r\n self.modelFinalEvalResultsFile = os.path.join(self.modelOutputDir, \"model.eval.results\")\r\n self.modelFinalEvalOutputs = os.path.join(self.modelOutputDir, \"model.eval.outputs\")\r\n self.modelFinalWrongPreds = os.path.join(self.modelOutputDir, \"model.predictions.wrong.results\")\r\n self.lock = threading.Lock()\r\n setLogLevel(logLevel)\r\n\r\n def __preprocessSequenceWithoutBreakingSentence(self, sequence):\r\n ##############################################################################################\r\n # This method ensures that if multiple sentences are passed for pre-processing, the sequence\r\n # is pre-processed as individual sentence.\r\n #############################################################################################\r\n try:\r\n seqsPP = []\r\n sequences = sent_tokenize(sequence)\r\n if sequences:\r\n for seq in sequences:\r\n seqsPP.append(preprocess_seq(seq))\r\n if seqsPP:\r\n return \".\".join(seqsPP).strip()\r\n else:\r\n return sequence\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n err = f\"Error occurred while pre-processing the sequence '{sequence}'. Error is: {str(exc_type)}; {str(exc_value)}.\"\r\n log.error(err)\r\n return sequence\r\n\r\n def __buildTrainingAndEvalDataFrames(self):\r\n ##############################################################################################\r\n # This method builds training and eval dataframes from the given input training and dev files.\r\n #############################################################################################\r\n try:\r\n tweetsDictTrain = {}\r\n tweetsDictEval = {}\r\n with open(self.trainFile, \"r\", encoding=\"utf-8\") as fR:\r\n tweetsDictTrain = json.load(fR)\r\n log.debug(f\"Finished building training file.\")\r\n with open(self.evalFile, \"r\", encoding=\"utf-8\") as fD:\r\n tweetsDictEval = json.load(fD)\r\n log.debug(f\"Finished building eval file.\")\r\n\r\n # Columns are: 'tweet', 'target_num', 'offset', 'target_cashtag' and 'relation'\r\n # Our classification task is: Given a tweet, tell whether the relation is 0 or 1\r\n if tweetsDictTrain:\r\n log.debug(f\"Started generating pandas dataframe for training..\")\r\n df = pandas.DataFrame.from_dict(tweetsDictTrain)\r\n self.trainDataFrame = df.iloc[0:len(df.index), [0,4]]\r\n self.trainDataFrame.columns = HEADER_COLS\r\n self.trainDataFrame[HEADER_COLS[0]] = self.trainDataFrame[HEADER_COLS[0]].map(lambda sent: self.__preprocessSequenceWithoutBreakingSentence(sent))\r\n\r\n if tweetsDictEval:\r\n log.debug(f\"Started generating pandas dataframe for evaluation..\")\r\n df = pandas.DataFrame.from_dict(tweetsDictEval)\r\n self.evalDataFrame = df.iloc[0:len(df.index), [0,4]]\r\n self.evalDataFrame.columns = HEADER_COLS\r\n self.evalDataFrame[HEADER_COLS[0]] = self.evalDataFrame[HEADER_COLS[0]].map(lambda sent: self.__preprocessSequenceWithoutBreakingSentence(sent))\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n err = f\"Error occurred while building training and eval dataframes. Error is: {str(exc_type)}; {str(exc_value)}.\"\r\n raise Exception(err)\r\n\r\n def finetuneBertOnBinClsTask(self):\r\n ###############################################################################\r\n # This method evaluates the finetuned BERT model on binary classification task.\r\n ###############################################################################\r\n try:\r\n # Build training and eval dataframes\r\n self.__buildTrainingAndEvalDataFrames()\r\n\r\n # Check to make sure that training and eval data frames are built\r\n if self.trainDataFrame is None or self.evalDataFrame is None:\r\n log.error(f\"Error building training and eval dataframes. Cannot evaluate the finetuned model on binary classification task.\")\r\n return\r\n\r\n # Check if CUDA is available for doing training on a GPU system\r\n if torch.cuda.is_available() is False:\r\n log.warning(f\"CUDA libs not found. It is prefered to do finetuning on on a GPU system with CUDA libs!\")\r\n\r\n # Build WandB sweep params that are used to automatically pick up the hyper-params during training\r\n subprocess.run([\"wandb\", \"login\", WAND_API_KEY])\r\n time.sleep(1)\r\n sweep_defaults = self.wandbDefaults\r\n sweep_id = wandb.sweep(self.wandbConfig, project=WAND_PROJECT_NAME)\r\n\r\n # Start training\r\n startTime = time.time()\r\n def train():\r\n wandb.init(WAND_PROJECT_NAME)\r\n modelArgs = { \"max_seq_length\": self.maxSeqLength, \"output_dir\": self.modelOutputDir, \"overwrite_output_dir\": True, \"best_model_dir\": self.bestModelOutputDir,\r\n \"wandb_project\": WAND_PROJECT_NAME, \"num_training_epochs\": wandb.config.epochs, \"learning_rate\": wandb.config.learning_rate,\r\n \"do_lower_case\": True, \"cache_dir\": self.modelCacheDir, \"encoding\": \"utf-8\", \"train_batch_size\": 5, \"eval_batch_size\": 5,\r\n \"evaluate_during_training_steps\": 50, \"evaluate_during_training_verbose\": True, \"logging_steps\": 5, \"sliding_window\": True,\r\n \"reprocess_input_data\": True, \"evaluate_during_training\": True, \"use_multiprocessing\": True }\r\n\r\n model = ClassificationModel(self.modelType, self.modelNameOrPath, args=modelArgs, sweep_config=wandb.config, use_cuda=torch.cuda.is_available(),)\r\n\r\n # Training and evaluation\r\n try:\r\n log.info(f\"Started training/finetuning BERT on binary classification task..\")\r\n model.train_model(train_df=self.trainDataFrame, eval_df=self.evalDataFrame, show_running_loss=True,\r\n output_dir=self.modelOutputDir,\r\n mcc=sklearn.metrics.matthews_corrcoef,\r\n f1=sklearn.metrics.f1_score,\r\n acc=sklearn.metrics.accuracy_score,\r\n recall_score=sklearn.metrics.recall_score, )\r\n log.info(f\"Finished finetuning and evaluating our fine-tuned model on binary classification task. Check the folder '{self.modelOutputDir}' for finetuned weights.\")\r\n log.info(f\"It took {round((time.time() - startTime) / 3600, 1)} hours to finetune and evaluate ou fine-tuned model on binary classification task.\")\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n err = f\"Error occurred while training and evaluating the finetuned model on binary classification task. Error is: {exc_type}; {exc_value}.\"\r\n log.error(err)\r\n\r\n wandb.join()\r\n\r\n wandb.agent(sweep_id, function=train)\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n err = f\"** ERROR ** occurred while finetuning a pre-trained BERT model and evaluating it on binary classification task. Error is: {exc_type}; {exc_value}.\"\r\n log.error(err)\r\n\r\n\r\ndef setLogLevel(level):\r\n ##########################################################\r\n # This method sets the log level for the default logger.\r\n ##########################################################\r\n # Set log level, if set by the user\r\n # E for Error, D for Debug and I for Info\r\n if level == \"I\":\r\n log.basicConfig(level=log.INFO)\r\n elif level == \"D\":\r\n log.basicConfig(level=log.DEBUG)\r\n else:\r\n level = \"E\"\r\n log.basicConfig(level=log.ERROR) # default to Error\r\n print(f\"Setting log level to {level}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Total # of arguments passed to main() is {0}\".format(len(sys.argv)))\r\n if len(sys.argv) < 3:\r\n print(\"** ERROR ** 1) Finetuned model directory, 2) Training file, and 3) Dev file are required!\")\r\n print(\"Usage:\\n\\t \")\r\n else:\r\n # Get finetuned model folder as given by the user in the command line\r\n _finetunedModelFolder = sys.argv[1] # set this variable to value: \"bert-base-uncased\" to evaluate using Huggingface's pre-trained BERT embeddings\r\n if os.path.exists(_finetunedModelFolder) is False:\r\n print(f\"** ERROR ** Finetuned model folder '{_finetunedModelFolder}' DOES NOT exist!\")\r\n print(\"Usage:\\n\\t \")\r\n else:\r\n # Convert the path to absolute path\r\n _finetunedModelFolder = os.path.abspath(_finetunedModelFolder)\r\n\r\n # Get the training and dev files\r\n _trainFile = sys.argv[2]\r\n _trainFile = os.path.abspath(_trainFile)\r\n _devFile = sys.argv[3]\r\n _devFile = os.path.abspath(_devFile)\r\n\r\n # Get log level set by the user in the command line\r\n _logLevel = \"E\" # default to log.ERROR\r\n if len(sys.argv) == 5:\r\n _logLevel = sys.argv[4] # over default log level as set by the user\r\n setLogLevel(_logLevel)\r\n\r\n # Evaluate the finetuned model on binary classification task\r\n try:\r\n # Start finetuning with different hyper-parameters\r\n _maxSeqLen = 192 # setting to same sequence value that was used for finetuning the model on MLM objective\r\n _learningRates = [5e-5, 2e-5, 1e-5] # set three diff LRs\r\n _epochs = [3, 5, 10] # set total epochs that we'd like to run\r\n _wandb_sweep_defaults = {'learning_rate': _learningRates[0], 'epochs': _epochs[0]} # set some default values\r\n _wandb_sweep_config = {'method': 'grid', \"metric\": {\"name\": \"mcc\", \"goal\": \"maximize\"},\r\n 'parameters': {'learning_rate': {'values': _learningRates}, 'epochs': {'values': _epochs}},\r\n \"early_terminate\": {\"type\": \"hyperband\", \"min_iter\": 5, },}\r\n\r\n # Initialize training class and start training\r\n cls = EvalLanguageModelOnBinCls(_finetunedModelFolder, _trainFile, _devFile, _maxSeqLen, _wandb_sweep_config, _wandb_sweep_defaults, _logLevel)\r\n cls.finetuneBertOnBinClsTask()\r\n cls = None\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n err = f\"\\n\\t {exc_type}; {exc_value}\"\r\n log.error(err)\r\n","repo_name":"Madhvi19/Representations-in-Financial-Domain","sub_path":"BERT/evalOnBinaryClassificationTask.py","file_name":"evalOnBinaryClassificationTask.py","file_ext":"py","file_size_in_byte":14474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9260115186","text":"import json\nfrom unittest.mock import patch\n\nimport pytest\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom model_bakery import baker\nfrom rest_framework import status\n\nfrom sme_ptrf_apps.core.choices import RepresentacaoCargo\nfrom sme_ptrf_apps.users.models import Grupo\n\npytestmark = pytest.mark.django_db\n\n\n@pytest.fixture\ndef unidade_diferente(dre):\n return baker.make(\n 'Unidade',\n nome='Escola Unidade Diferente',\n tipo_unidade='EMEI',\n codigo_eol='123459',\n dre=dre,\n sigla='ET2',\n cep='5868120',\n tipo_logradouro='Travessa',\n logradouro='dos Testes',\n bairro='COHAB INSTITUTO ADVENTISTA',\n numero='100',\n complemento='fundos',\n telefone='99212627',\n email='emeijopfilho@sme.prefeitura.sp.gov.br',\n diretor_nome='Amaro Pedro',\n dre_cnpj='63.058.286/0001-86',\n dre_diretor_regional_rf='1234567',\n dre_diretor_regional_nome='Anthony Edward Stark',\n dre_designacao_portaria='Portaria nº 0.000',\n dre_designacao_ano='2017',\n )\n\n\n@pytest.fixture\ndef visao_ue():\n return baker.make('Visao', nome='UE')\n\n\n@pytest.fixture\ndef visao_dre():\n return baker.make('Visao', nome='DRE')\n\n\n@pytest.fixture\ndef visao_sme():\n return baker.make('Visao', nome='SME')\n\n\n@pytest.fixture\ndef permissao1():\n return Permission.objects.filter(codename='view_tipodevolucaoaotesouro').first()\n\n\n@pytest.fixture\ndef permissao2():\n return Permission.objects.filter(codename='view_unidade').first()\n\n\n@pytest.fixture\ndef grupo_1(permissao1, visao_ue):\n g = Grupo.objects.create(name=\"grupo1\")\n g.permissions.add(permissao1)\n g.visoes.add(visao_ue)\n g.descricao = \"Descrição grupo 1\"\n g.save()\n return g\n\n\n@pytest.fixture\ndef grupo_2(permissao2, visao_dre):\n g = Grupo.objects.create(name=\"grupo2\")\n g.permissions.add(permissao2)\n g.visoes.add(visao_dre)\n g.descricao = \"Descrição grupo 2\"\n g.save()\n return g\n\n\n@pytest.fixture\ndef grupo_3(permissao1, permissao2, visao_dre, visao_sme):\n g = Grupo.objects.create(name=\"grupo3\")\n g.permissions.add(permissao1, permissao2)\n g.visoes.add(visao_dre, visao_sme)\n g.descricao = \"Descrição grupo 3\"\n g.save()\n return g\n\n\n@pytest.fixture\ndef usuario_para_teste(\n unidade,\n grupo_1,\n visao_ue):\n\n senha = 'Sgp0418'\n login = '7210418'\n email = 'sme@amcom.com.br'\n User = get_user_model()\n user = User.objects.create_user(username=login, password=senha, email=email)\n user.unidades.add(unidade)\n user.groups.add(grupo_1)\n user.visoes.add(visao_ue)\n user.save()\n return user\n\n\n@pytest.fixture\ndef jwt_authenticated_client_u(client, usuario_para_teste):\n from unittest.mock import patch\n\n from rest_framework.test import APIClient\n api_client = APIClient()\n with patch('sme_ptrf_apps.users.api.views.login.AutenticacaoService.autentica') as mock_post:\n data = {\n \"nome\": \"LUCIA HELENA\",\n \"cpf\": \"62085077072\",\n \"email\": \"luh@gmail.com\",\n \"login\": \"7210418\"\n }\n mock_post.return_value.ok = True\n mock_post.return_value.status_code = 200\n mock_post.return_value.json.return_value = data\n resp = api_client.post('/api/login', {'login': usuario_para_teste.username,\n 'senha': usuario_para_teste.password}, format='json')\n resp_data = resp.json()\n api_client.credentials(HTTP_AUTHORIZATION='JWT {0}'.format(resp_data['token']))\n return api_client\n\n\n@pytest.fixture\ndef usuario_2(\n unidade_diferente,\n grupo_2,\n grupo_3,\n visao_dre,\n visao_sme):\n\n senha = 'Sgp1981'\n login = '7211981'\n email = 'sme1981@amcom.com.br'\n User = get_user_model()\n user = User.objects.create_user(username=login, password=senha, email=email)\n user.unidades.add(unidade_diferente)\n user.groups.add(grupo_2, grupo_3)\n user.visoes.add(visao_dre, visao_sme)\n user.save()\n return user\n\n\n@pytest.fixture\ndef usuario_3(\n unidade,\n grupo_2,\n visao_dre,\n visao_ue):\n\n senha = 'Sgp8198'\n login = '7218198'\n email = 'sme8198@amcom.com.br'\n User = get_user_model()\n user = User.objects.create_user(username=login, password=senha, email=email, name=\"Arthur Marques\")\n user.unidades.add(unidade)\n user.groups.add(grupo_2)\n user.visoes.add(visao_dre, visao_ue)\n user.save()\n return user\n\n\n@pytest.fixture\ndef jwt_authenticated_client_u2(client, usuario_2):\n from unittest.mock import patch\n\n from rest_framework.test import APIClient\n api_client = APIClient()\n with patch('sme_ptrf_apps.users.api.views.login.AutenticacaoService.autentica') as mock_post:\n data = {\n \"nome\": \"LUCIA HELENA\",\n \"cpf\": \"62085077072\",\n \"email\": \"luh@gmail.com\",\n \"login\": usuario_2.username\n }\n mock_post.return_value.ok = True\n mock_post.return_value.status_code = 200\n mock_post.return_value.json.return_value = data\n resp = api_client.post('/api/login', {'login': usuario_2.username,\n 'senha': usuario_2.password}, format='json')\n resp_data = resp.json()\n api_client.credentials(HTTP_AUTHORIZATION='JWT {0}'.format(resp_data['token']))\n return api_client\n\n\ndef test_consulta_grupos(\n jwt_authenticated_client_u2,\n usuario_2,\n visao_ue,\n visao_dre,\n visao_sme,\n permissao1,\n permissao2,\n grupo_1,\n grupo_2,\n grupo_3):\n\n response = jwt_authenticated_client_u2.get(\"/api/usuarios/grupos/?visao=DRE\", content_type='application/json')\n result = response.json()\n esperado = [\n {\n \"id\": str(grupo_2.id),\n \"nome\": grupo_2.name,\n \"descricao\": grupo_2.descricao\n },\n {\n \"id\": str(grupo_3.id),\n \"nome\": grupo_3.name,\n \"descricao\": grupo_3.descricao\n }]\n\n assert result == esperado\n\n\ndef test_lista_usuarios(\n jwt_authenticated_client_u,\n usuario_para_teste,\n usuario_3,\n visao_ue,\n visao_dre,\n visao_sme,\n permissao1,\n permissao2,\n grupo_1,\n grupo_2):\n\n response = jwt_authenticated_client_u.get(\"/api/usuarios/?visao=DRE\", content_type='application/json')\n result = response.json()\n esperado = [\n {\n 'id': usuario_3.id,\n 'username': usuario_3.username,\n 'email': usuario_3.email,\n 'name': usuario_3.name,\n 'url': f'http://testserver/api/esqueci-minha-senha/{usuario_3.username}/',\n 'tipo_usuario': usuario_3.tipo_usuario,\n 'groups': [{'id': grupo_2.id, 'name': grupo_2.name, 'descricao': grupo_2.descricao}]\n }\n ]\n assert result == esperado\n\n\ndef test_filtro_por_grupo_lista_usuarios(\n jwt_authenticated_client_u2,\n usuario_2,\n usuario_3,\n visao_ue,\n visao_dre,\n visao_sme,\n permissao1,\n permissao2,\n grupo_1,\n grupo_2):\n\n response = jwt_authenticated_client_u2.get(\n f\"/api/usuarios/?visao=DRE&groups__id={grupo_2.id}\", content_type='application/json')\n result = response.json()\n esperado = [\n {\n 'id': usuario_3.id,\n 'username': '7218198',\n 'email': 'sme8198@amcom.com.br',\n 'name': 'Arthur Marques',\n 'url': 'http://testserver/api/esqueci-minha-senha/7218198/',\n 'tipo_usuario': usuario_3.tipo_usuario,\n 'groups': [\n {\n 'id': grupo_2.id,\n 'name': 'grupo2',\n 'descricao': 'Descrição grupo 2'\n }\n ]\n }\n ]\n assert result == esperado\n\n\ndef test_filtro_por_nome_lista_usuarios(\n jwt_authenticated_client_u2,\n usuario_2,\n usuario_3,\n visao_ue,\n visao_dre,\n visao_sme,\n permissao1,\n permissao2,\n grupo_1,\n grupo_2):\n\n response = jwt_authenticated_client_u2.get(f\"/api/usuarios/?visao=DRE&search=Arth\", content_type='application/json')\n result = response.json()\n esperado = [\n {'id': usuario_3.id,\n 'username': '7218198',\n 'email': 'sme8198@amcom.com.br',\n 'name': 'Arthur Marques',\n 'url': 'http://testserver/api/esqueci-minha-senha/7218198/',\n 'tipo_usuario': usuario_3.tipo_usuario,\n 'groups': [\n {\n 'id': grupo_2.id,\n 'name': 'grupo2',\n 'descricao': 'Descrição grupo 2'}]\n }\n ]\n assert result == esperado\n\n\ndef test_criar_usuario_servidor(\n jwt_authenticated_client_u,\n grupo_1,\n grupo_2,\n visao_dre):\n\n payload = {\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'username': \"9876543\",\n 'name': \"Lukaku Silva\",\n 'email': 'lukaku@gmail.com',\n 'visao': \"DRE\",\n 'groups': [\n grupo_1.id,\n grupo_2.id\n ]\n }\n response = jwt_authenticated_client_u.post(\n \"/api/usuarios/\", data=json.dumps(payload), content_type='application/json')\n result = response.json()\n\n esperado = {\n 'username': '9876543',\n 'email': 'lukaku@gmail.com',\n 'name': 'Lukaku Silva',\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'groups': [grupo_1.id, grupo_2.id]\n }\n User = get_user_model()\n u = User.objects.filter(username='9876543').first()\n\n assert len(u.visoes.all()) > 0\n assert response.status_code == status.HTTP_201_CREATED\n assert result == esperado\n\n\ndef test_criar_usuario_servidor_sem_email_e_sem_nome(\n jwt_authenticated_client_u,\n grupo_1,\n grupo_2,\n visao_dre):\n\n payload = {\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'username': \"9876543\",\n 'name': \"\",\n 'email': \"\",\n 'visao': \"DRE\",\n 'groups': [\n grupo_1.id,\n grupo_2.id\n ]\n }\n response = jwt_authenticated_client_u.post(\n \"/api/usuarios/\", data=json.dumps(payload), content_type='application/json')\n result = response.json()\n esperado = {\n 'username': '9876543',\n 'email': '',\n 'name': '',\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'groups': [grupo_1.id, grupo_2.id]\n }\n User = get_user_model()\n u = User.objects.filter(username='9876543').first()\n\n assert len(u.visoes.all()) > 0\n assert response.status_code == status.HTTP_201_CREATED\n assert result == esperado\n\n\ndef test_atualizar_usuario_servidor(\n jwt_authenticated_client_u,\n usuario_3,\n usuario_2,\n visao_ue,\n visao_dre,\n visao_sme,\n grupo_1,\n grupo_2):\n\n assert not usuario_2.visoes.filter(nome='UE').first()\n\n payload = {\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'username': usuario_2.username,\n 'name': usuario_2.name,\n 'email': 'novoEmail@gmail.com',\n 'visao': \"UE\",\n 'groups': [\n grupo_1.id\n ]\n }\n\n response = jwt_authenticated_client_u.put(\n f\"/api/usuarios/{usuario_2.id}/\", data=json.dumps(payload), content_type='application/json')\n result = response.json()\n\n esperado = {\n 'username': usuario_2.username,\n 'email': 'novoEmail@gmail.com',\n 'name': usuario_2.name,\n 'tipo_usuario': RepresentacaoCargo.SERVIDOR.name,\n 'groups': [grupo_1.id]\n }\n\n assert usuario_2.visoes.filter(nome='UE').first()\n assert result == esperado\n\n\ndef test_deletar_usuario_servidor(\n jwt_authenticated_client_u,\n usuario_3\n):\n\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n assert User.objects.filter(id=usuario_3.id).exists()\n\n response = jwt_authenticated_client_u.delete(\n f\"/api/usuarios/{usuario_3.id}/\", content_type='application/json')\n assert not User.objects.filter(id=usuario_3.id).exists()\n\n\ndef test_consulta_informacao_usuario(jwt_authenticated_client_u):\n path = 'sme_ptrf_apps.users.api.views.user.SmeIntegracaoService.informacao_usuario_sgp'\n with patch(path) as mock_get:\n data = {\n 'cpf': '12808888813',\n 'nome': 'LUCIMARA CARDOSO RODRIGUES',\n 'codigoRf': '7210418',\n 'email': 'tutu@gmail.com',\n 'emailValido': True\n }\n\n mock_get.return_value = data\n\n username = '7210418'\n response = jwt_authenticated_client_u.get(f'/api/usuarios/consultar/?username={7210418}')\n result = json.loads(response.content)\n assert response.status_code == status.HTTP_200_OK\n assert result == data\n\n\ndef test_consulta_informacao_usuario_sem_username(jwt_authenticated_client_u):\n response = jwt_authenticated_client_u.get(f'/api/usuarios/consultar/?username=')\n result = json.loads(response.content)\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert result == \"Parâmetro username obrigatório.\"\n\n\ndef test_lista_usuarios_por_unidade(\n jwt_authenticated_client_u,\n usuario_para_teste,\n usuario_2,\n usuario_3,\n associacao,\n grupo_1,\n grupo_2):\n\n response = jwt_authenticated_client_u.get(f\"/api/usuarios/?associacao_uuid={associacao.uuid}\", content_type='application/json')\n result = response.json()\n esperado = [\n {\n 'id': usuario_3.id,\n 'name': 'Arthur Marques',\n 'tipo_usuario': 'Servidor',\n 'url': 'http://testserver/api/esqueci-minha-senha/7218198/',\n 'username': '7218198',\n 'email': 'sme8198@amcom.com.br',\n 'groups': [\n {\n 'descricao': 'Descrição grupo 2',\n 'id': grupo_2.id,\n 'name': 'grupo2'\n }],\n },\n {\n 'id': usuario_para_teste.id,\n 'name': 'LUCIA HELENA',\n 'tipo_usuario': 'Servidor',\n 'url': 'http://testserver/api/esqueci-minha-senha/7210418/',\n 'username': '7210418',\n 'email': 'luh@gmail.com',\n 'groups': [\n {\n 'descricao': 'Descrição grupo 1',\n 'id': grupo_1.id,\n 'name': 'grupo1'\n }],\n }\n\n ]\n print(result)\n assert result == esperado\n","repo_name":"ollyvergithub/SME-PTRF-BackEnd","sub_path":"sme_ptrf_apps/users/tests/test_api_user/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":14682,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11644009512","text":"import os\nimport sys\nimport json\n\nos_folder = os\ndefault_name = ''\n\n\ndef rename_files(folder_path, extension, new_name):\n os_folder.chdir(folder_path)\n count = 1\n success = False\n for file in os_folder.listdir():\n file_name, file_extension = os_folder.path.splitext(file)\n if file_extension == extension:\n name_to_give = new_name + str(count) + file_extension\n os_folder.rename(file, name_to_give)\n count = int(count) + 1\n success = True\n return success\n\ndef delete_files(folder_path, extension):\n os_folder.chdir(folder_path)\n success = False\n for file in os_folder.listdir():\n file_name, file_extension = os_folder.path.splitext(file)\n if file_extension == extension:\n os_folder.remove(file)\n success = True\n return success\n\nif __name__ == \"__main__\":\n raw_data = sys.argv[1]\n data = eval(raw_data)\n\n # initialize variables\n folder = data['folder']\n action = data['action']\n extension = data['extension']\n name = data['name']\n # folder = folder[2:]\n\n # rename files\n if action == 'rename':\n rename = rename_files(folder, extension, name)\n if rename:\n message = {'message': 'success'}\n print(json.dumps(message))\n\n # delete files\n elif action == 'delete':\n delete = delete_files(folder, extension)\n message = {'message': 'success'}\n print(json.dumps(message))\n\n\n","repo_name":"arhinfulemmanuel/file-renamer","sub_path":"Backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22606612058","text":"# -*- coding: utf-8 -*-\n\nimport cv2\n\nfrom os.path import basename\n\nfrom ..common.configuration import (\n PCT_FIT_BUFFER,\n PCT_HACK_WINDOW_DELAY,\n)\nfrom ..datamanagement.files import (\n get_edited_image_filepath,\n)\nfrom .imageprocessing import (\n crop_image,\n resize_image_width,\n rotate_image,\n compose_images,\n find_dominant_contours,\n collected_extrema,\n)\n\nclass PctComposerResponse:\n\n def get_response(self):\n return self._response\n \n def get_messages(self):\n return self._messages\n \n def get_warnings(self):\n return self._warnings\n \n #\n # Private\n #\n \n def __init__(self, response, messages=[], warnings=[]):\n self._response = response\n self._messages = messages\n self._warnings = warnings\n\nclass BaseComposerError(Exception):\n pass\n\nclass BaseComposer:\n def __init__(self, message_writer=None, debug_writer=None):\n self._debug = False\n self._message_writer = message_writer\n self._debug_writer = debug_writer\n \n def _log(self, msg):\n if self._message_writer:\n self._message_writer.write(msg)\n \n def _log_debug(self, msg):\n if self._debug_writer and self._debug:\n self._debug_writer.write(msg)\n\nclass PctComposerError(BaseComposerError):\n pass\n\nclass PctComposer(BaseComposer):\n \n def prepare(self, debug=False):\n self._debug = debug\n self._log_debug('Preparing images...')\n for image_composer in self._get_composers():\n image_composer.prepare(debug)\n self._log_debug('Image preparation complete.')\n \n def compose(self, debug=False):\n self._debug = debug\n self._log_debug('Composing image...')\n if not self._create_composition():\n self._log('No composition could be created.')\n return False\n return True\n \n def refresh_previews(self, width, startx=0, starty=0, margin=0,\n debug=False):\n self._debug = debug\n self._log_debug('Refreshing previews...')\n x = startx\n y = starty\n for composer in self._get_composers():\n composer.refresh_preview(x, y, width, debug)\n x += width + margin\n\n def refresh_composition(self, width, startx=0, starty=0, debug=False):\n self._debug = debug\n self._log_debug('Refreshing composition...')\n return self._refresh_composition(width, startx, starty)\n\n def reindex_image(self, index_in, index_out, debug=False):\n self._debug = debug\n self._log_debug('Swapping images {} and {}'.format(index_in, index_out))\n if self._check_index(index_in) and self._check_index(index_out):\n return self._reindex_image(index_in, index_out)\n return False\n \n def check_index(self, index, debug=False):\n self._debug = debug\n self._log_debug('Checking index {}'.format(index))\n return self._check_index(index)\n \n def fit(self, index, strength, debug=False):\n self._debug = debug\n self._log_debug('Fitting image {} at {}'.format(index, strength))\n if not self._check_index(index):\n self._log('Invalid index.')\n return False\n return self._get_composer(index).fit(strength, debug)\n \n def fit_all(self, strength, debug=False):\n self._debug = debug\n self._log_debug('Fitting images at {}'.format(strength))\n return self._fit_all(strength)\n \n def rotate(self, index, angle, debug=False):\n self._debug = debug\n self._log_debug('Rotating image {} by {}'.format(index, angle))\n if not self._check_index(index):\n self._log('Invalid index.')\n return False\n return self._get_composer(index).rotate(angle, debug)\n \n def save(self, filepath, metafile=None, debug=False):\n self._debug = debug\n self._log_debug('Saving {}'.format(filepath))\n \n # Composed\n if not self._save_composed(filepath):\n self._log('Unable to save {}'.format(filepath))\n return False\n \n # Edited\n image_files = self._save_changed_images()\n if not image_files:\n self._log('Unable to save edited image files')\n return False\n \n # Metadata\n if metafile is not None:\n if not self._save_metadata(metafile, image_files, filepath):\n self._log('Unable to save {}'.format(metafile))\n return False\n \n return True\n \n def undo(self, index, debug=False):\n self._debug = debug\n self._log_debug('Undoing action on image {}.'.format(index))\n if not self._check_index(index):\n self._log('Invalid index.')\n return False\n return self._get_composer(index).undo(debug)\n \n def redo(self, index, debug=False):\n self._debug = debug\n self._log_debug('Redoing action on image {}.'.format(index))\n if not self._check_index(index):\n self._log('Invalid index.')\n return False\n return self._get_composer(index).redo(debug)\n \n def cleanup(self, debug=False):\n self._debug = debug\n self._log_debug('Cleaning up composer...')\n return self._cleanup()\n \n #\n # Private\n #\n \n def __init__(self, image_files, message_writer=None, debug_writer=None):\n super(PctComposer, self).__init__(message_writer, debug_writer)\n self._init_image_composers(image_files)\n \n self._composition = None\n self._window = None\n\n def _init_image_composers(self, image_files):\n self._indexed_images = {}\n self._image_composers = {}\n for index, image in enumerate(image_files):\n self._indexed_images[index] = image\n self._image_composers[image] = ImgComposer(\n image,\n self._message_writer,\n self._debug_writer,\n )\n return self._image_composers\n\n def _init_window(self, name=None):\n self._destroy_window()\n if name is not None:\n self._window = name\n else:\n self._window = ' + '.join(\n [c.get_window() for c in self._get_composers()]\n )\n cv2.namedWindow(self._window)\n \n def _destroy_window(self):\n if self._window is not None:\n cv2.destroyWindow(self._window)\n self._window = None\n\n def _check_index(self, index):\n if index in self._indexed_images:\n return True\n return False\n\n def _get_composer(self, index):\n return self._image_composers[self._indexed_images[index]]\n \n def _get_composers(self, ordered=True):\n indices = list(self._indexed_images.keys())\n if ordered:\n indices.sort()\n composers = []\n for index in indices:\n composers.append(\n self._image_composers[self._indexed_images[index]]\n )\n return composers\n \n def _reindex_image(self, index_in, index_out):\n temp = self._indexed_images[index_in]\n self._indexed_images[index_in] = self._indexed_images[index_out]\n self._indexed_images[index_out] = temp\n return True\n \n def _create_composition(self):\n images = [c.get_image() for c in self._get_composers()]\n min_height = min([img.shape[0] for img in images])\n self._composition = compose_images(images, min_height)\n return True\n \n def _refresh_composition(self, width, x=0, y=0):\n self._init_window()\n preview = resize_image_width(self._composition, width)\n self._show_image(preview, x, y)\n \n def _show_image(self, image, x=None, y=None):\n cv2.imshow(self._window, image)\n if x is not None and y is not None:\n cv2.moveWindow(self._window, x, y)\n cv2.waitKey(PCT_HACK_WINDOW_DELAY)\n \n def _save_changed_images(self):\n filepaths = []\n for composer in self._get_composers():\n filepaths.append(composer.save(self._debug))\n return filepaths\n \n def _save_composed(self, filepath):\n cv2.imwrite(filepath, self._composition)\n return True\n \n def _save_metadata(self, filepath, image_files, composed_file):\n fp = open(filepath, 'w')\n fp.write(composed_file + '\\n')\n for f in image_files:\n fp.write(f + '\\n')\n fp.close()\n return True\n \n def _fit_all(self, strength):\n for c in self._get_composers():\n if not c.fit(strength, self._debug):\n return False\n return True\n \n def _cleanup(self):\n # Cleans up all of the preview images\n for c in self._get_composers():\n if not c.cleanup(self._debug):\n return False\n \n # Cleans up any composed image\n self._destroy_window()\n \n return True\n \nclass ImgComposerError(BaseComposerError):\n pass\n\nclass ImgComposer(BaseComposer):\n \n def prepare(self, debug=False):\n self._debug = debug\n self._log('Preparing {}'.format(self._image_file))\n self._prepare_image()\n self._prepare_window()\n \n def refresh_preview(self, x, y, width, debug=False):\n self._debug = debug\n preview = resize_image_width(self._current_image(), width)\n self._show_image(preview, x, y)\n \n def get_image(self):\n return self._current_image().copy()\n \n def get_window(self):\n return self._window\n \n def fit(self, strength, debug=False):\n self._debug = debug\n self._log_debug('Fitting {}'.format(self._image_file))\n if self._fit(strength):\n self._init_redo_images()\n return True\n return False\n \n def rotate(self, angle, debug=False):\n self._debug = debug\n self._log_debug('Rotating {}'.format(self._image_file))\n if self._rotate(angle):\n self._init_redo_images()\n return True\n return False\n \n def save(self, debug=False):\n self._debug = debug\n self._log_debug('Saving edited {}'.format(self._image_file))\n filepath = self._get_save_filepath()\n if self._save(filepath):\n return filepath\n return None\n \n def undo(self, debug=False):\n self._debug = debug\n self._log_debug('Undoing action on {}'.format(self._image_file))\n return self._undo()\n \n def redo(self, debug=False):\n self._debug = debug\n self._log_debug('Redoing action on {}'.format(self._image_file))\n return self._redo()\n \n def cleanup(self, debug=False):\n self._debug = debug\n self._log_debug('Cleaning up image composer')\n return self._cleanup()\n \n #\n # Private\n #\n \n def __init__(self, image_file, message_writer=None, debug_writer=None):\n super(ImgComposer, self).__init__(message_writer, debug_writer)\n self._init_images()\n self._init_redo_images()\n \n self._image_file = image_file\n self._window = None\n \n def _init_images(self):\n self._images = []\n \n def _init_redo_images(self):\n self._redo_images = []\n \n def _add_image(self, image):\n self._images.append(image)\n \n def _add_redo_image(self, image):\n self._redo_images.append(image)\n \n def _current_image(self):\n if len(self._images) < 1:\n return None\n return self._images[-1]\n \n def _pop_image(self):\n if len(self._images) < 2:\n return None\n return self._images.pop()\n \n def _pop_redo_image(self):\n if len(self._redo_images) < 1:\n return None\n return self._redo_images.pop()\n \n def _prepare_image(self):\n self._log_debug('Preparing image {}'.format(self._image_file))\n self._add_image(cv2.imread(self._image_file))\n \n def _prepare_window(self):\n self._log_debug('Preparing window for {}'.format(self._image_file))\n self._window = basename(self._image_file)\n cv2.namedWindow(self._window)\n \n def _destroy_window(self):\n if self._window is not None:\n cv2.destroyWindow(self._window)\n self._window = None\n \n def _show_image(self, image=None, x=None, y=None):\n self._log_debug('Showing image {}'.format(self._image_file))\n if image is None:\n cv2.imshow(self._window, self._current_image())\n else:\n cv2.imshow(self._window, image)\n if x is not None and y is not None:\n cv2.moveWindow(self._window, x, y)\n cv2.waitKey(PCT_HACK_WINDOW_DELAY)\n\n def _get_save_filepath(self):\n return get_edited_image_filepath(self._image_file)\n \n def _fit(self, strength):\n contours = find_dominant_contours(self._current_image(), strength)\n if contours is None:\n return False\n left, right, top, bottom = collected_extrema(contours)\n fitted = crop_image(\n self._current_image(),\n left,\n right,\n top,\n bottom,\n PCT_FIT_BUFFER,\n )\n if fitted is None:\n return False\n self._add_image(fitted)\n return True\n \n def _rotate(self, angle):\n rotated = rotate_image(self._current_image(), angle)\n self._add_image(rotated)\n return True\n\n def _save(self, filepath=None):\n cv2.imwrite(filepath, self._current_image())\n return True\n \n def _undo(self):\n image = self._pop_image()\n if image is not None:\n self._add_redo_image(image)\n return True\n return False\n \n def _redo(self):\n image = self._pop_redo_image()\n if image is not None:\n self._add_image(image)\n return True\n return False\n\n def _cleanup(self):\n self._destroy_window()\n return True","repo_name":"jasonbriceno/pct","sub_path":"pct/composer/composer.py","file_name":"composer.py","file_ext":"py","file_size_in_byte":14077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18727172157","text":"a = False\nb = True\nif(a):\n print(\"A is True\") # indentation is important in python\n if(b):\n print(\"B is also true\")\nelif(9 > 8):\n user = input(\"Enter the message\")\n print(user)\nif(True and True):\n print(\"True\")\n","repo_name":"Ramlala-Yadav-Git/Python","sub_path":"ch9/ifStatement.py","file_name":"ifStatement.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"34822432081","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 28 09:57:35 2019\n\n@author: Kenton\n\"\"\"\n\nfrom gurobipy import *\n\nfrom .Diet import *\n\ndef farmer_jones():\n # model\n m = Model('Farmer Jones')\n\n # variables\n x_ch = m.addVar()\n x_pl = m.addVar()\n\n # objective\n m.setObjective(4*x_ch + 2*x_pl, GRB.MAXIMIZE)\n\n # constraints\n m.addConstr(20*x_ch + 50*x_pl <= 480) # time\n m.addConstr(4*x_ch + x_pl <= 30) # eggs\n m.addConstr(0.26*x_ch + 0.2*x_pl <= 5) # milk\n\n m.optimize()\n\n print(x_ch.x, x_pl.x)\n\ndef farmer_jones_2():\n C = ['chocolate', 'plain']\n I = ['time', 'eggs', 'milk']\n\n revenue = [4, 2]\n usage = [[20, 50],\n [4, 1],\n [0.25, 0.2]]\n available = [480, 30, 5]\n\n m = Model('Farmer Jones')\n\n X = [m.addVar() for _ in C]\n\n m.setObjective(quicksum(revenue[c]*X[c] for c in range(len(C))),\n GRB.MAXIMIZE)\n\n for i in range(len(I)):\n m.addConstr(quicksum(usage[i][c]*X[c] for c in range(len(C))) <= available[i])\n\n m.optimize()\n\n for x in X:\n print(x.x)\n\ndef stigler():\n m = Model('Stigler Diet')\n\n X = [m.addVar() for f in F]\n\n for n in N:\n m.addConstr(DMIN[n] <= quicksum(NV[f][n]*X[f] for f in F))\n m.addConstr(quicksum(NV[f][n]*X[f] for f in F) <= DMAX[n])\n\n m.setObjective(quicksum(C[f]*X[f] for f in F), GRB.MINIMIZE)\n\n m.optimize()\n\n print('\\n'.join([Food[i] + ': ' + str(x.x) for i, x in enumerate(X)]))\n\nif __name__ == \"__main__\":\n farmer_jones()\n\n","repo_name":"katrinafyi/math3202","sub_path":"tutorials/tutorial_1.py","file_name":"tutorial_1.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"73963701449","text":"\"\"\"\nPROBLEMA #3:\nThe prime factors of 13195 are 5, 7, 13 and 29.\nWhat is the largest prime factor of the number 600851475143 ?\n\"\"\"\n\n\ndef own_dividers(number):\n dividers = []\n divider = 1\n while divider < number:\n if (number % divider) == 0:\n dividers.append(divider)\n divider += 1\n return dividers\n\n\ndef prime_factor(number):\n sum_result = 0\n for dividers in own_dividers(number):\n sum_result += dividers\n if sum_result == 1:\n return True\n else:\n return False\n\n\nn = 600851475143\nlist_of_prime_factors = []\nfor num in own_dividers(n):\n if prime_factor(num):\n list_of_prime_factors.append(num)\n\nprint(f'The prime factors of {n} are: \\n {list_of_prime_factors}')\n","repo_name":"Edesalher/PyCharm","sub_path":"ChallengeProblems/ChallengeProblem3.py","file_name":"ChallengeProblem3.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71047072967","text":"#importing modules\r\nimport pygame\r\nimport sys\r\nimport random\r\nimport time\r\nfrom pygame import mixer\r\nfrom pygame.locals import *\r\n\r\n#initializing pygame\r\npygame.init()\r\n\r\n#resolution of the screen\r\nWIDTH=800\r\nHEIGHT=600\r\nscreen=pygame.display.set_mode((WIDTH,HEIGHT))\r\n\r\n#caption and icon\r\npygame.display.set_caption(\"Space Rocket\")\r\nicon=pygame.image.load(\"Images\\\\icon.png\")\r\npygame.display.set_icon(icon)\r\n\r\n#player\r\nplayer_image=pygame.image.load(\"Images\\\\rocket.png\")\r\nplayer_size=30\r\nplayer_pos=[int(WIDTH/2),int(HEIGHT-3*player_size)]\r\n\r\n#enemy\r\nenemy_image=pygame.image.load(\"Images\\\\asteroid.png\")\r\nenemy_size = 50\r\nenemy_pos = [random.randint(0,WIDTH-enemy_size),random.randint(0,HEIGHT)]\r\nenemy_list=[enemy_pos]\r\nenemy_speed=0\r\n\r\n#background image\r\nbackground = pygame.image.load(\"Images\\\\background.png\")\r\n\r\n#background sound\r\nmixer.music.load(\"Sounds\\\\background.ogg\")\r\nmixer.music.play(-1)\r\n\r\n#basic\r\nscore=0\r\ngame_over=False\r\nclock=pygame.time.Clock()\r\n\r\n#fonts\r\nmyFont=pygame.font.SysFont(\"calibri\",35, bold=True, italic=True)\r\ngame_over_font=pygame.font.Font(\"Fonts\\\\font.otf\",60,italic=True)\r\n\r\ndef speed(score, enemy_speed):\r\n if score<10:\r\n enemy_speed=5\r\n elif score<20:\r\n enemy_speed=6\r\n elif score<30:\r\n enemy_speed=9\r\n elif score<50:\r\n enemy_speed=13\r\n elif score<65:\r\n enemy_speed=16\r\n else:\r\n enemy_speed=20\r\n return enemy_speed\r\ndef drop_enemies(enemy_list):\r\n delay = random.random()\r\n if len(enemy_list)<8 and delay<0.06:\r\n x_pos=random.randint(0,WIDTH-enemy_size)\r\n y_pos=0\r\n enemy_list.append([x_pos,y_pos])\r\ndef draw_enemies(enemy_list):\r\n for enemy_pos in enemy_list:\r\n screen.blit(enemy_image,(enemy_pos[0],enemy_pos[1]))\r\ndef new_enemy_pos(enemy_list, score):\r\n for idx, enemy_pos in enumerate(enemy_list):\r\n if enemy_pos[1]>=0 and enemy_pos[1]=p_x and e_x<(p_x+player_size)) or (p_x>=e_x and p_x<(e_x+enemy_size)):\r\n if(e_y>=p_y and e_y<(p_y+player_size)) or (p_y>=e_y and p_y<(e_y+enemy_size)):\r\n explosion=mixer.Sound(\"Sounds\\\\explosion.wav\")\r\n mixer.music.stop()\r\n explosion.play()\r\n return True\r\n return False\r\n\r\ndef gameover(game_over, score):\r\n screen.blit(background,(0,0))\r\n text=\" Game Over !\"\r\n label=game_over_font.render(text, 1, (255, 153, 102))\r\n screen.blit(label,(int(WIDTH/2)-150,int(HEIGHT/2)-30))\r\n text=\"Your Score: \"+str(score)\r\n label=game_over_font.render(text, 1, (255, 153, 102))\r\n screen.blit(label,(int(WIDTH/2)-150,int(HEIGHT/2)+20))\r\n\r\nrunning = True\r\nwhile(running == True):\r\n if game_over==False:\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n sys.exit()\r\n if event.type==pygame.KEYDOWN:\r\n x=player_pos[0]\r\n y=player_pos[1]\r\n if event.key==pygame.K_LEFT:\r\n \tif (x>0):\r\n \t\tx-=player_size+10\r\n elif event.key==pygame.K_RIGHT:\r\n \tif(x<700):\r\n \t\tx+=player_size+10\r\n elif event.key==pygame.K_UP:\r\n \tif(y>30):\r\n \t\ty-=player_size+10\r\n elif event.key==pygame.K_DOWN:\r\n \tif(y<500):\r\n \t\ty+=player_size+10\r\n player_pos=[x,y]\r\n screen.blit(background,(0,0))\r\n game_over=collision(player_pos,enemy_pos)\r\n drop_enemies(enemy_list)\r\n score=new_enemy_pos(enemy_list, score)\r\n enemy_speed=speed(score, enemy_speed)\r\n text=\"Your Score: \"+str(score)\r\n label=myFont.render(text, 1, (204, 255, 51))\r\n screen.blit(label,(int(WIDTH-250),0))\r\n game_over=multiple_collision(enemy_list,player_pos)\r\n draw_enemies(enemy_list)\r\n screen.blit(player_image,(player_pos[0],player_pos[1]))\r\n clock.tick(30)\r\n pygame.display.update()\r\n elif game_over==True:\r\n gameover(game_over, score)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n running = False\r\n pygame.display.quit()\r\n pygame.quit()\r\n sys.exit()\r\n\r\n pygame.display.update()\r\n","repo_name":"DebRC/Space-Rocket","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"40094065428","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 15:50:27 2019\n@author: matthew\n\nThese functions create the model and all of the custom model metrics we use.\nHeavy use of tensorflow and keras.\n\"\"\"\n\nimport data\n#import max_subarray_tf\nimport tensorflow.keras.models as models\nimport tensorflow.keras.layers as layers\nimport tensorflow.keras.optimizers as kOpt\nfrom tensorflow.keras import backend as keras\nimport tensorflow as tf\n#import tensorflow_probability as tfp\n\n#define a few parameters\nbase_n=5\np=16\n#this function is just shorthand for the base-2 exponential function\ndef f(x):\n return 2**x\n\ndef unet(pretrained_weights = None,input_shape = (224,224,1)):\n #see ronnenberger for architecture description\n \n inputs = layers.Input(input_shape,name='image_input')\n conv1 = layers.Conv2D(f(base_n), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = layers.Conv2D(f(base_n), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n #pool1= BatchNormalization(axis=3)(pool1)\n \n conv2 = layers.Conv2D(f(base_n+1), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n #conv2= BatchNormalization(axis=3)(conv2)\n conv2 = layers.Conv2D(f(base_n+1), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n #pool2= BatchNormalization(axis=3)(pool2)\n \n conv3 = layers.Conv2D(f(base_n+2), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n #conv3= BatchNormalization(axis=3)(conv3)\n conv3 = layers.Conv2D(f(base_n+2), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n #pool3= BatchNormalization(axis=3)(pool3)\n \n conv4 = layers.Conv2D(f(base_n+3), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n #conv4= BatchNormalization(axis=3)(conv4)\n conv4 = layers.Conv2D(f(base_n+3), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n drop4 = layers.Dropout(0.5)(conv4)\n pool4 = layers.MaxPooling2D(pool_size=(2, 2))(drop4)\n #pool4= BatchNormalization(axis=3)(pool4)\n \n conv5 = layers.Conv2D(f(base_n+4), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n conv5 = layers.Conv2D(f(base_n+4), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n drop5 = layers.Dropout(0.5)(conv5)\n \n up6 = layers.Conv2D(f(base_n+3), 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(layers.UpSampling2D(size = (2,2))(drop5))\n merge6 = layers.concatenate([drop4,up6], axis = 3)\n conv6 = layers.Conv2D(f(base_n+3), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n conv6 = layers.Conv2D(f(base_n+3), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n \n up7 = layers.Conv2D(f(base_n+2), 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(layers.UpSampling2D(size = (2,2))(conv6))\n merge7 = layers.concatenate([conv3,up7], axis = 3)\n conv7 = layers.Conv2D(f(base_n+2), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = layers.Conv2D(f(base_n+2), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n \n up8 = layers.Conv2D(f(base_n+1), 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(layers.UpSampling2D(size = (2,2))(conv7))\n merge8 = layers.concatenate([conv2,up8], axis = 3)\n conv8 = layers.Conv2D(f(base_n+1), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = layers.Conv2D(f(base_n+1), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n \n up9 = layers.Conv2D(f(base_n), 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(layers.UpSampling2D(size = (2,2))(conv8))\n merge9 = layers.concatenate([conv1,up9], axis = 3)\n conv9 = layers.Conv2D(f(base_n), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n #conv9=Dropout(rate=.2)(conv9)\n conv9 = layers.Conv2D(f(base_n), 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = layers.Conv2D(1, 1 ,activation = 'relu',)(conv9)\n \n model = models.Model(inputs = inputs, outputs = conv10) \n #model.compile(optimizer = kOpt.Adam(lr = 1E-4), loss = mean_squared_error_weighted, metrics = ['mean_absolute_error','mean_squared_error',countErr,countErr_signed,countErr_relative]) \n model.compile(optimizer = kOpt.Adam(lr = 1E-4), loss = mean_squared_error_weighted) \n #decay = 1E-4/100\n #load existing model if provided\n if(pretrained_weights):\n \tmodel.load_weights(pretrained_weights)\n return model\n\n#unused shorthand for batchnorm/conv combo\ndef conv_relu_bn(nFilt,layer):\n conv = layers.Conv2D(nFilt, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(layer)\n conv= layers.BatchNormalization(axis=3)(conv)\n conv = layers.Conv2D(nFilt, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n conv= layers.BatchNormalization(axis=3)(conv)\n return conv\n\n#%% define custom loss functions and metrics for tensorflow. \n \n#structural similarity index. can be useful to look at overall density map quality.\n#see https://scikit-image.org/docs/dev/auto_examples/transform/plot_ssim.html for details \ndef ssim(target,prediction):\n out=tf.image.ssim(target,prediction,2)\n out=-1*out\n return out\n#correlation of gt and predicted count density values, pixelwise\n#def corr2(target,prediction):\n# out=tfp.stats.correlation(tf.reshape(target[0,p:-p,p:-p,0],[192**2,1]),tf.reshape(prediction[0,p:-p,p:-p,0],[192**2,1]))\n# return out\n#not used. implements Lempitsky et al.'s loss function\ndef mesa_dist(target, prediction): \n target=target[0,:,:,0]\n prediction=prediction[0,:,:,0]\n diff1=target-prediction\n diff2=-target+prediction\n dMesa= tf.math.reduce_max(max_subarray_tf.maxSubArray_2D(diff1)[0],max_subarray_tf.maxSubArray_2D(diff2)[0])\n return dMesa\n#weighted mse. not weighted if weight=1\ndef mean_squared_error_weighted(y_true, y_pred):\n #essentially works as MSE with the cropping to remove mirrored regions\n weight=1\n dens = tf.not_equal(y_true, 0)\n sqdiff=keras.square(y_pred - y_true)\n sqdiff=tf.where(dens, weight*sqdiff, sqdiff) #condition, iftrue, iffalse\n return keras.mean(sqdiff[:,p:-p,p:-p,:]) \n\ndef mean_squared_error_bias(y_true, y_pred):\n #combo loss function of MSE and whole image error metric\n weight=1\n dens = tf.not_equal(y_true, 0)\n sqdiff=keras.square(y_pred - y_true)\n sqdiff=tf.where(dens, weight*sqdiff, sqdiff) #condition, iftrue, iffalse\n err=countErr(y_true, y_pred)\n return keras.mean(sqdiff[0,p:-p,p:-p,0])+(.1*tf.square(err))\n\n\ndef mean_squared_error_worst(y_true, y_pred):\n n=.25 #evaluate worst 1/4 of pixels- attempted substitute for max subarray\n \n sqdiff=keras.square(y_pred - y_true)[0,p:-p,p:-p,0]\n sqdiff=tf.reshape(sqdiff,[-1])\n sqdiff=tf.sort(sqdiff) #sorts in ascending order\n sqdiff=sqdiff[int(n*192*192):]\n \n return keras.mean(sqdiff)\n\n\n#evaluate loss only at worst pixel\ndef max_squared_error_weighted(y_true, y_pred):\n dens = tf.not_equal(y_true, 0)\n sqdiff=keras.square(y_pred - y_true)\n sqdiff=tf.where(dens, sqdiff, sqdiff) #condition, iftrue, iffalse\n return keras.max(sqdiff)\n#square of summed error\ndef tot_err(y_true, y_pred):\n #returns total image error squared\n return keras.square(keras.sum(y_true- y_pred)/data.mult)\n#absolute value of total error in image\ndef countErr(target, prediction):\n #target=tf.math.exp(target)-1\n #prediction=tf.math.exp(prediction)-1\n \n a=keras.sum(target)\n b=keras.sum(prediction)\n error = (b-a)\n return tf.math.abs(error/data.mult)\n#relative error over full image\ndef countErr_relative(target, prediction):\n #target=tf.math.exp(target)-1\n #prediction=tf.math.exp(prediction)-1\n #calculates percent error\n \n a=keras.sum(target)+1\n b=keras.sum(prediction)+1\n error = (b-a)/a\n return tf.math.abs(error)\n#signed total error in image\ndef countErr_signed(target, prediction):\n #target=tf.math.exp(target)-1\n #prediction=tf.math.exp(prediction)-1\n \n \n a=keras.sum(target)\n b=keras.sum(prediction)\n error = (b-a)\n return (error/data.mult)\n#get averages for prediciton and target values\ndef targSum(target, prediction):\n a=keras.sum(target)\n return a\ndef predSum(target, prediction):\n a=keras.sum(prediction)\n return a\n\n","repo_name":"ethier-lab/AxoNet","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40964721682","text":"\"\"\" The Hand class instantiates a list of Card objects.\n We add a card to a hand by adding a Card instanse to the cards list.\n To calculate the value, we determine if card is numeric, Ace, or other (a face card).\n Once determined the value is added.\n If the hand is a bust (>21), the value of the Ace is reduced to 1.\n\"\"\"\n\n\nclass Hand:\n # This function when called from the game class creates a empty hand for the player then the dealer.\n def __init__(self, dealer=False):\n self.dealer = dealer\n self.cards = []\n self.rank = 0\n\n # A Card object is added to the cards list.\n def add_card(self, card):\n self.cards.append(card)\n # This function initiates the card at 0 value and not an ace. \n def calculate_rank(self):\n self.rank = 0\n has_ace = False\n # Here we loop througe the Card instances, calculate and add the value.\n for card in self.cards:\n if card.rank.isnumeric():\n self.rank += int(card.rank)\n else:\n if card.rank == \"A\":\n has_ace = True\n self.rank += 11\n else:\n self.rank += 10\n # If over 21 Ace rank drops from 11 to 1\n if has_ace and self.rank > 21: \n self.rank -= 10\n # When called from game.py, this function the calculate_rank function.\n def get_rank(self):\n self.calculate_rank()\n return self.rank\n\n # This function prints players and the dealers 2nd card (hides the first).\n def display(self):\n if self.dealer:\n print(\"Hidden\")\n print(self.cards[1])\n else:\n for card in self.cards: \n print(card)\n print(\"rank:\", self.get_rank())","repo_name":"JEmbry2019/Poker_Python_Project_Tuesday","sub_path":"hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6343141054","text":"import libreria\n\n\n# Aplicacion para guardar datos de pedir cantidad de productos\n\ndef agregarProductos():\n # 1. Pedir productos\n # 2. Pedir cantidad\n # 3. Guardar los datos en el archivo 5_app.txt\n producto= libreria.pedir_producto(\"Ingrese producto : \")\n cantidad= libreria.pedir_kilogramo(\"Ingrese cantidad : \")\n contenido= producto +\" - \"+str(cantidad)+\" kg\\n\"\n libreria.guardar_datos(\"5_app.txt\", contenido, \"a\")\n print(\"Datos guardados\")\n\ndef leerProductos():\n print(\"Producto Precio\")\n datos= libreria.obtener_datos(\"5_app.txt\")\n if(datos != \"\"):\n print(datos)\n else:\n print(\"Datos no guardados\")\n\nopc=0\nmax=3\nwhile ( opc != max ):\n print(\"##################################################\")\n print(\"############ MENU ###############################\")\n print(\"##################################################\")\n print(\"# 1. Pedir producto y cantidad #\")\n print(\"# 2. Leer producto y cantidad #\")\n print(\"# 3. Salir #\")\n print(\"##################################################\")\n\n opc = libreria.pedir_numero(\"Ingrese opcion : \", 1, 3)\n\n if ( opc == 1 ):\n agregarProductos()\n\n if ( opc == 2):\n leerProductos()\n# fin_menu\n\nprint(\"Fin del programa\")\n","repo_name":"florespadillaunprg/t10_flores.padilla","sub_path":"flores/1_menus/app5.py","file_name":"app5.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36467762601","text":"# -*- coding:utf-8 -*-\n\nfrom pymongo import MongoClient\n\nconn = MongoClient(\"192.168.200.120\", 27017)\n\ndb = conn[\"products\"]\n\ncol = db[\"jay_loco_amazons\"]\n\nrs = col.find({\"crawlid\": \"5116\"}, {\"images\": 1})\n\nfor i in rs:\n\n print(i)","repo_name":"ShichaoMa/old-spider","sub_path":"test/test_image_exist.py","file_name":"test_image_exist.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11615449783","text":"import requests\r\nimport random\r\nimport time\r\nimport os\r\nimport pyfiglet\r\nfrom colorama import Fore\r\n\r\nascii_banner = pyfiglet.figlet_format(\"samidbangkit\")\r\nprint(ascii_banner)\r\n\r\ntime.sleep(1)\r\n\r\nchannel_id = input(\"Masukkan ID channel: \")\r\nwaktu1 = int(input(\"Set Waktu Hapus Pesan: \"))\r\nwaktu2 = int(input(\"Set Waktu Kirim Pesan: \"))\r\n\r\ntime.sleep(1)\r\nprint(\"3\")\r\ntime.sleep(1)\r\nprint(\"2\")\r\ntime.sleep(1)\r\nprint(\"1\")\r\ntime.sleep(1)\r\n\r\ndef countdown(t):\r\n \r\n while t:\r\n mins, secs = divmod(t, 60)\r\n timer = '{:02d}:{:02d}'.format(mins, secs)\r\n print(timer, end=\"\\r\")\r\n time.sleep(1)\r\n t -= 1\r\n \r\n print('Fire in the hole!!')\r\n \r\n \r\n# input time in seconds\r\nt = waktu1\r\n\r\n\r\n\r\nos.system('cls' if os.name == 'nt' else 'clear')\r\n\r\nwith open(\"pesan.txt\", \"r\") as f:\r\n words = f.readlines()\r\n\r\nwith open(\"token.txt\", \"r\") as f:\r\n authorization = f.readline().strip()\r\n\r\nwhile True:\r\n channel_id = channel_id.strip()\r\n\r\n payload = {\r\n 'content': random.choice(words).strip()\r\n }\r\n\r\n headers = {\r\n 'Authorization': authorization\r\n }\r\n\r\n r = requests.post(f\"https://discord.com/api/v9/channels/{channel_id}/messages\", data=payload, headers=headers)\r\n print(Fore.WHITE + \"Sent message: \")\r\n print(Fore.YELLOW + payload['content'])\r\n \r\n \r\n# function call\r\n\r\n print(Fore.RED + payload[countdown(int(t))])\r\n\r\n\r\n response = requests.get(f'https://discord.com/api/v9/channels/{channel_id}/messages', headers=headers)\r\n\r\n if response.status_code == 200:\r\n messages = response.json()\r\n if len(messages) == 0:\r\n is_running = False\r\n break\r\n else:\r\n time.sleep(waktu1)\r\n\r\n else:\r\n print(f'Gagal mendapatkan pesan di channel: {response.status_code}')\r\n\r\n time.sleep(waktu2)\r\n","repo_name":"smdbngkt/pushbang","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32622905061","text":"s = int(input())\n\ndef f(a):\n if a % 2 == 0:\n return a / 2\n else:\n return 3*a + 1\n\na = [s]\ni = 1\nwhile True:\n i += 1\n a_next = f(a[-1])\n if a_next in a:\n print(i)\n break\n else:\n a.append(a_next)","repo_name":"tamlog06/Atcoder-Beginner-Contest","sub_path":"problems/ABC/116/b/abc116_b.py","file_name":"abc116_b.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14227337376","text":"#This file used to analyze data that exists locally. Data is downloaded seperately. \nfrom numpy import array\nimport csv\nimport os\nimport pickle\n\npath = \"/home/human/WattTime/data/CA-2012\" \npath2= \"/home/human/WattTime/data/CA-2012-Price\"\nplants = {}\npices = {}\n\n#functions for preservation of abstraction\ndef get_name(row):\n try: return float(row[1])\n except: return 0\n\ndef get_ORISPL_code(row):\n try: return float(row[2])\n except: return 0\n\ndef get_unit_id(row):\n try: return float(row[3])\n except: return 0\n\ndef get_date(row):\n try: return row[4]\n except: return 0\n\ndef get_hour(row):\n try: return float(row[5])\n except: return 0\n\ndef get_op_time(row):\n try: return float(row[6])\n except: return 0 \n\ndef get_GLOAD(row):\n try: return float(row[7])\n except: return 0\n\ndef get_SLOAD(row):\n return row[8]\n\ndef get_SO2_MASS(row):\n return row[9]\n\ndef get_SO2_MASS_MEASURE(row):\n return row[10]\n\ndef get_SO2_RATE(row):\n return row[11]\n\ndef get_SO2_RATE_MEASURE(row):\n return row[12]\n\ndef get_CO2_MASS(row):\n try: return row[17]\n except: return 0\n\n#for price data begin\ndef get_LMP_TYPE(row):\n try: return row[6]\n except: return 0\n\ndef get_OPR_DT(row):\n try: return row[0]\n except: return 0\n\ndef get_HE01(row):\n try: return row[11]\n except: return 0\n\ndef get_AVPR(row):\n#returns the average price in a day\n total_price = 0\n for i in range(11,35):\n try: total_price += float(row[i])\n except: total_price += 0\n return total_price/24\n#for price data end\n\ndef get_first(row):\n return row[0] #gets first item in row\n# end of abstraction functions for getting info from csv list object\n\n\"\"\"\n#This is for opening new .csv files. This should now be done by opener.py\n#here for reference only.\n\nfor filename in os.listdir(path):\n with open(\"{0}/{1}\".format(path,filename)) as csv_file:\n data_object = csv.reader(csv_file)\n for row in data_object:\n if \"{0} ID:{d1}\".format(get_name(row),get_unit_id(row)) not in plants:\n plants[\"{0} ID:{1}\".format(get_name(row),get_unit_id(row))] = [row]\n else: \n if get_unit_id(row) == get_unit_id(get_first(plants[\"{0} ID:{1}\".format(get_name(row),get_unit_id(row))])):\n plants[\"{0} ID:{1}\".format(get_name(row),get_unit_id(row))].append(row)\n else:\n plants[\"{0} ID:{1}\".format(get_name(row),get_unit_id(row))] = [row]\n\"\"\"\nwith open(\"/home/human/WattTime/data/plants.pickle\",\"rb\") as pickler:\n plants= pickle.load(pickler) #gets info for plants dictionary from file\n\n\"\"\"\n#This is for generating an \"aggregate plant\", that measures total amount over all plants\n#This should now be done by opener.py. Here for reference only. \n\ndef tryfloat(string):\n try: return float(string)\n except: return 0\n\naggregate_plant ={}\nworklist = []\nfor plant in plants:\n for row in plants[plant]:\n if get_date(row) not in aggregate_plant:\n totalhour = get_op_time(row)\n totalmw = get_GLOAD(row)\n totalCO2 = get_CO2_MASS(row)\n aggregate_plant[get_date(row)] = [tryfloat(totalhour), tryfloat(totalmw), tryfloat(totalCO2)]\n else:\n hour = get_op_time(row)\n mw = get_GLOAD(row)\n CO2 = get_CO2_MASS(row)\n aggregate_plant[get_date(row)][0] += tryfloat(hour)\n aggregate_plant[get_date(row)][1] += tryfloat(mw)\n aggregate_plant[get_date(row)][2] += tryfloat(CO2)\n\"\"\"\nwith open(\"/home/human/WattTime/data/aggregate_plant.pickle\",\"rb\") as pickler:\n aggregate_plant = pickle.load(pickler) #gets info for aggregate_plant dictionary from file\n\n\"\"\"\n#this is a test that displays the contents of the aggregate_plant dictionary\n#uncomment to run\nfor value in sorted(aggregate_plant):\n print(value, aggregate_plant[value])\n\"\"\" \n#this converts the aggregate plant dictionary into a data list.\ndata_list = []\nfor date in sorted(aggregate_plant):\n data_list.append([date] + aggregate_plant[date])\ndata_list.pop()\ndata_list.pop()\n#thisconverts the data_list into a 2-dimmensional numpy array \ndata_array = array([row for row in data_list]) \n\nwith open(\"/home/human/WattTime/data/data_array.pickle\",\"wb\") as pickler:\n pickle.dump(data_array, pickler) #dumps the array for later use by engine\n\n\n\"\"\"\n#this is a test that displays the contents of the data_list list\n#uncomment to run\nfor i in data_list:\n print(i)\n\"\"\"\n\n#these 3 dictionaries exist to make working with their respective variables more convenient\n\"\"\"\naggregate_HOUR = {} \nfor i in aggregate_plant:\n aggregate_HOUR[aggregate_plant[i][0]] = i\n \naggregate_MW = {}\nfor i in aggregate_plant:\n aggregate_MW[aggregate_plant[i][1]] = i\n\naggregate_CO2 = {}\nfor i in aggregate_plant:\n aggregate_CO2[aggregate_plant[i][2]] = i\n\"\"\"\n\"\"\"\n#this is a test that displays the contents of the dictionaries in increasing order\n#uncomment to run\nfor dic in [aggregate_HOUR,aggregate_MW,aggregate_CO2]:\n for value in sorted(dic):\n print(value, dic[value])\n\"\"\"\n\n\"\"\"\n #This is for opening new .csv files for price. This should now be done by opener.py\n #Here for reference only. \n\nfor filename in os.listdir(path2):\n with open(\"{0}/{1}\".format(path2,filename)) as csv_file:\n price_object = csv.reader(csv_file)\n for row in price_object:\n if get_LMP_TYPE(row) == \"LMP\":\n prices[get_OPR_DT(row)] = row\n\"\"\"\n\nwith open(\"/home/human/WattTime/data/prices.pickle\",\"rb\") as pickler:\n prices = pickle.load(pickler) #gets info for prices dictionary from file\n\nav_daily_prices = {}\nfor date in prices:\n av_daily_prices[get_AVPR(prices[date])] = date\n\"\"\"\n#This is a test that prints the average daily prices in increasing order.\n#uncomment to run\nfor price in sorted(av_daily_prices):\n print(price, av_daily_prices[price])\n\"\"\"\nav_daily_prices_bydate = {}\nfor date in prices:\n av_daily_prices_bydate[date] = get_AVPR(prices[date])\n \n\"\"\"\n#Another formatting test\nfor plant in sorted(plants):\n for row in plants[plant]:\n print(\"Plant: {0}; Date: {1} Hour: {2}; Operating Percent: {3}\".format(get_name(row), get_date(row), get_hour(row), get_op_time(row)))\n\nprint(\"Success!\")\n\n\"\"\"\n\ndef count_rows(plants, key):\n count = 0 \n for row in plants[key]:\n count +=1\n\ndef mean(dictionary):\n total = 0\n count = 0\n for value in dictionary.values():\n count +=1\n try: total += value\n except: total += 0\n return total/count\n\ndef avpr_when_on(plants, prices):\n outlist = []\n for plant in sorted(plants):\n for row in plants[plant]:\n outlist.append(\"{0}: {1}\".format(get_date(row), get_hour(row)))\n\ndef operating_time_average(plants, period=24):\n #returns average operating time over all plants in given period\n returndict = {}\n for plant in plants:\n count = 0\n ontime = 0\n for row in plants[plant]:\n count += 1\n try: ontime += float(get_op_time(row))\n except: ontime += 0\n returndict[plant] = (ontime * period)/count\n return mean(returndict)\n\n\ndef average_X(plants, plant, get_X, period=24):\n total = 0 \n count = 0\n for row in plants[plant]:\n if float(get_X(row)) >= 0:\n count += 1\n total += float(get_X(row))\n if count == 0: \n return 0\n return (total * period)/count\n\ndef average_X_dict(plants, get_X, period=24):\n AV_DICT = {}\n for plant in plants:\n AV_DICT[plant] = average_X(plants, plant, get_X, period) \n return AV_DICT\n\ndef op_time_av_plant(plant, period=24):\n ontime = 0\n for row in plant:\n try: ontime += float(get_op_time(row))\n except: ontime += 0\n return (ontime * period)/8784\n\ndef CO2_per_MW(plant, period=1):\n CO2 = 0\n MW = 0\n for row in plant:\n try: CO2 += float(get_CO2_MASS(row))\n except: CO2 += 0 \n for row in plant:\n try: MW += float(get_GLOAD(row))\n except: MW += 0\n if MW == 0:\n return 0\n return (period * (CO2/MW))\n\"\"\"\n#FOR GRAPHING WITH MATPLOTLIB/PYLAB\n\nCO2_DICT = {}\nfor plant in sorted(plants):\n CO2_DICT[CO2_per_MW(plants[plant])] = plant\nfor plant in sorted(CO2_DICT):\n print(plant, CO2_DICT[plant])\nxvals =[]\nyvals =[]\nfor plant in sorted(CO2_DICT):\n xvals.append(CO2_DICT[plant])\n yvals.append(plant)\nimport matplotlib.pyplot as plt\nimport pylab\n\nfig = plt.figure()\ngraph = fig.add_subplot(111)\nfig.subplots_adjust(top=0.85)\ngraph.set_ylabel(\"CO2 per MW/hr\")\ngraph.set_xlabel(\"Plants\")\nfig.suptitle(\"Average CO2 per MW\", fontsize=25, fontweight=\"bold\")\nx = range(len(xvals))\npylab.plot(x, yvals, \"g\")\npylab.show()\n\"\"\"\n \ndef at_least(a, b):\n if a >= b:\n return True\n else:\n return False\n\ndef at_most(a, b):\n if a <= b:\n return True\n else:\n return False\n\ndef on_percent(plants, percent, operator, period=24): \n#returns a list of plants that were operating (on average) at least X percent of a period\n returnlist = []\n for plant in plants:\n if operator(op_time_av_plant(plants[plant], period), percent*period):\n returnlist.append(plant)\n return returnlist\n\"\"\"\n#This is a test that prints all plants on at least half of an average day\n#Uncomment to run\nfor plant in on_percent(plants, .5 , at_least, 24):\n print (plant)\n\"\"\"\ndef similar(plants, operator, threshold, comparer, value, period=24):\n #returns a dictionary of plants greater than or less than(depending on operator) a certain threshold ratio of the output of a value function over a period, compared by some comparer function\n returndict= {}\n finaldict = {}\n for plant in plants:\n returndict[plant] = comparer(plants[plant], period)\n for item in returndict:\n if operator(returndict[item]/value, threshold * value):\n finaldict[item] = returndict[item] \n return finaldict\n\n\"\"\"\"\n#This is a test which displays an example use of \"similar\" function from above.\n#Uncomment to run\n\nsortdict = similar(plants, at_least, 1 , op_time_av_plant, operating_time_average(plants))\nreverse = {}\nfor plant in sorted(sortdict):\n reverse[sortdict[plant]] = plant\nfor i in reverse:\n print(i, reverse[i])\n\"\"\"\ndef similar_days(dict_list, date, radius):\n \"\"\" takes in a list of pairs of dictionaries of format:\n [({average of var1: corresponding-date }, {corresponding-date: average of var1}), ({average of var2: corresponding-date... }...)...]\n as well as a date, and a radius. Returns the most similar days to the given day by minimizing\n the distance between the values recorded in that day and the average values in\n the {radius} amount of days requested. A radius of 5 would return the 10 most\n similar days: 5 days with values greater than the day, and 5 days with lower values\n \"\"\"\n return(\"failure\")\n\ndef similar_days(dict_list, date, amount):\n my_difference =\"nope\" \n print(\"Success!\")\n\n","repo_name":"egeriicw/watttime-grid","sub_path":"regression/2011_REGS/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":10999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36577024933","text":"import json\nfrom queue import Empty\n\nfrom sqlalchemy import and_\n\nfrom flask_restful import Resource\nfrom flask import request\n\nfrom tools import api_tools, auth\nfrom ...models.tests import Test\nfrom ...models.pd.test_parameters import PerformanceTestParam\nfrom ...utils.utils import run_test, parse_test_data, handle_artifact_source\n\n\nclass API(Resource):\n url_params = [\n '',\n ]\n\n def __init__(self, module):\n self.module = module\n\n @auth.decorators.check_api({\n \"permissions\": [\"performance.backend.tests.view\"],\n })\n def get(self, project_id: int):\n total, res = api_tools.get(project_id, request.args, Test)\n rows = []\n for i in res:\n test = i.api_json()\n schedules = test.pop('schedules', [])\n if schedules:\n try:\n test['scheduling'] = self.module.context.rpc_manager.timeout(\n 2).scheduling_backend_performance_load_from_db_by_ids(schedules)\n except Empty:\n test['scheduling'] = []\n rows.append(test)\n return {'total': total, 'rows': rows}, 200\n\n @staticmethod\n def get_schedules_ids(filter_) -> set:\n r = set()\n for i in Test.query.with_entities(Test.schedules).filter(\n filter_\n ).all():\n r.update(set(*i))\n return r\n\n @auth.decorators.check_api({\n \"permissions\": [\"performance.backend.tests.delete\"],\n \"recommended_roles\": {\n \"default\": {\"admin\": True, \"editor\": False, \"viewer\": False},\n \"administration\": {\"admin\": True, \"editor\": False, \"viewer\": False},\n }\n })\n def delete(self, project_id: int):\n project = self.module.context.rpc_manager.call.project_get_or_404(\n project_id=project_id)\n try:\n delete_ids = list(map(int, request.args[\"id[]\"].split(',')))\n except TypeError:\n return 'IDs must be integers', 400\n\n filter_ = and_(\n Test.project_id == project.id,\n Test.id.in_(delete_ids)\n )\n\n try:\n self.module.context.rpc_manager.timeout(3).scheduling_delete_schedules(\n self.get_schedules_ids(filter_)\n )\n except Empty:\n ...\n\n Test.query.filter(\n filter_\n ).delete()\n Test.commit()\n\n return {'ids': delete_ids}, 200\n\n @auth.decorators.check_api({\n \"permissions\": [\"performance.backend.tests.create\"],\n \"recommended_roles\": {\n \"default\": {\"admin\": True, \"editor\": True, \"viewer\": False},\n \"administration\": {\"admin\": True, \"editor\": True, \"viewer\": False},\n }\n })\n def post(self, project_id: int):\n \"\"\"\n Create test and run on demand\n \"\"\"\n data = json.loads(request.form.get('data'))\n run_test_ = data.pop('run_test', False)\n compile_tests_flag = data.pop('compile_tests', False)\n engagement_id = data.get('integrations', {}).get('reporters', {}) \\\n .get('reporter_engagement', {}).get('id')\n\n test_data, errors = parse_test_data(\n project_id=project_id,\n request_data=data,\n rpc=self.module.context.rpc_manager,\n )\n\n if errors:\n return errors, 400\n\n schedules = test_data.pop('scheduling', [])\n\n test_data['test_parameters'].append(\n PerformanceTestParam(\n name=\"test_type\",\n default=test_data.pop('test_type'),\n description='auto-generated from test type'\n ).dict()\n )\n test_data['test_parameters'].append(\n PerformanceTestParam(\n name=\"env_type\",\n default=test_data.pop('env_type'),\n description='auto-generated from environment'\n ).dict()\n )\n\n if test_data['source']['name'] == 'artifact':\n project = self.module.context.rpc_manager.call.project_get_or_404(\n project_id=project_id)\n handle_artifact_source(project, request.files['file'],\n compile_tests_flag=compile_tests_flag,\n runner=test_data[\"runner\"])\n\n test = Test(**test_data)\n test.insert()\n\n test.handle_change_schedules(schedules)\n\n if run_test_:\n resp = run_test(test, engagement_id=engagement_id)\n return resp, resp.get('code', 200)\n return test.api_json(), 200\n","repo_name":"carrier-io/backend_performance","sub_path":"api/v1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10087448988","text":"from tkinter import Canvas\n\n\nclass CaseTaquin:\n largeur = 80\n hauteur = 80\n fill = \"grey\"\n activefill = \"green\"\n font = ('Helvetica', '16')\n\n def __init__(self, canevas: Canvas, numero: int, x: int, y: int):\n self.x = x\n self.y = y\n self.numero = numero\n self.canevas = canevas\n self.id = None\n\n def draw(self):\n self.id = self.canevas.create_rectangle(self.x, self.y, self.x + CaseTaquin.largeur,\n self.y + CaseTaquin.hauteur, fill=CaseTaquin.fill,\n activefill=CaseTaquin.activefill)\n self.canevas.create_text(self.x + CaseTaquin.largeur // 2, self.y + CaseTaquin.hauteur // 2,\n text=str(self.numero), font=CaseTaquin.font)\n\n def deplace_droite(self):\n self.x = self.x + CaseTaquin.largeur\n self.canevas.move(self.id, CaseTaquin.largeur, 0)\n self.canevas.move(self.id + 1, CaseTaquin.largeur, 0)\n\n def deplace_gauche(self):\n self.x = self.x - CaseTaquin.largeur\n self.canevas.move(self.id, -CaseTaquin.largeur, 0)\n self.canevas.move(self.id + 1, -CaseTaquin.largeur, 0)\n\n def deplace_haut(self):\n self.y = self.y - CaseTaquin.hauteur\n self.canevas.move(self.id, 0, -CaseTaquin.hauteur)\n self.canevas.move(self.id + 1, 0, -CaseTaquin.hauteur)\n\n def deplace_bas(self):\n self.y = self.y + CaseTaquin.hauteur\n self.canevas.move(self.id, 0, CaseTaquin.hauteur)\n self.canevas.move(self.id + 1, 0, CaseTaquin.hauteur)\n\n def __str__(self):\n return f\"({self.x},{self.y})\"\n","repo_name":"oultetman/taquin","sub_path":"case_taquin.py","file_name":"case_taquin.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72626219847","text":"from PyGRB.main.fitpulse import PulseFitter\nfrom PyGRB.backend.makemodels import create_model_from_key\n\nGRB = PulseFitter(7475, times = (-2, 60),\n datatype = 'discsc', nSamples = 200, sampler = 'nestle',\n priors_pulse_start = -10, priors_pulse_end = 30, p_type ='docs')\n\nkey = 'F'\nmodel = create_model_from_key(key)\nGRB.main_multi_channel(channels = [0, 1, 2, 3], model = model)\n","repo_name":"JamesPaynter/PyGRB","sub_path":"examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"} +{"seq_id":"39639202658","text":"#!/usr/bin/python3\n\"\"\"\nFetches a URL and prints the X-Request-Id header value.\n\"\"\"\n\nimport requests\nimport sys\n\n\ndef main():\n r = sys.argv[1]\n \"\"\"sends a request to the url\"\"\"\n response = requests.get(r)\n if 'X-Request-Id' in response.headers:\n \"\"\"prints the id id variable in the header\"\"\"\n print(response.headers['X-Request-Id'])\n else:\n return None\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kamerelinda/alx_python","sub_path":"python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40069427809","text":"# -*- coding: utf-8 -*-\n# cls_mssqlserver.py\t written by Duncan Murray 28/4/2014\n# Simple wrapper for MS SQL Server functionality\n\n# install https://pypi.python.org/pypi/pypyodbc/ \n# extract folder to D:\\install\\python\\pypyodbc-1.3.1\n# shell to folder, run setup.py\n# in your main program:\n# import lib_data_SQLServer as sql\n# sql.CreateAccessDatabase('test.mdb')\n\n\n\ntry:\n import pypyodbc \nexcept ImportError:\n print('you need to install https://pypi.python.org/pypi/pypyodbc/ ')\n exit(1)\n\nfrom if_database import Database\n\n\ndef TEST():\n #testFile = 'D:\\\\database.mdb'\n print('wrapper for MS SQL Server and Access databases')\n \n d = MSSQL_server(['server', 'database', 'username', 'password'])\n d.connect()\n print(d.server)\n \nclass MSSQL_server(Database):\n\n def CreateAccessDatabase(self, fname):\n pypyodbc.win_create_mdb(fname)\n connection = pypyodbc.win_connect_mdb(fname)\n connection.cursor().execute('CREATE TABLE t1 (id COUNTER PRIMARY KEY, name CHAR(25));').commit()\n connection.close()\n\n def CompactAccessDatabase(self, fname):\n pypyodbc.win_compact_mdb(fname,'D:\\\\compacted.mdb')\n\n def SQLServer_to_CSV(self, cred, schema, table, fldr):\n opFile = fldr + table + '.CSV'\n print ('Saving ' + table + ' to ' + opFile)\n #cred = [server, database, username, password]\n connection_string ='Driver={SQL Server Native Client 11.0};Server=' + cred[0] + ';Database=' + cred[1] + ';Uid=' + cred[2] + ';Pwd=' + cred[3] + ';'\n #print(connection_string)\n conn = pypyodbc.connect(connection_string)\n cur = conn.cursor()\t\n sqlToExec = 'SELECT * FROM ' + schema + '.' + table + ';'\n cur.execute(sqlToExec)\n op = open(opFile,'wb') # 'wb'\n # add column headers\n txt = ''\n for col in cur.description:\n txt += '\"' + self.force_string(col[0]) + '\",'\n op.write(txt + '\\n')\n for row_data in cur: # add table rows\t\t\t.encode('utf-8')\n txt = ''\n for col in row_data:\n txt += '\"' + self.force_string(col) + '\",'\n op.write(txt + '\\n')\n op.close()\t\n cur.close()\n conn.close()\n\n def force_string(self, obj):\n if type(obj) is str:\n return obj\n else:\n return str(obj)\n \nif __name__ == '__main__':\n TEST()\t\n ","repo_name":"acutesoftware/AIKIF","sub_path":"aikif/dataTools/if_mssqlserver.py","file_name":"if_mssqlserver.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"16"} +{"seq_id":"3705122799","text":"import scrapy\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\n\ncount = 0\n\n\nclass ConcordiaSpider(scrapy.Spider):\n name = \"concordia\"\n start_urls = ['https://www.concordia.ca/ginacody.html']\n\n def parse(self, response):\n soup = BeautifulSoup(response.text, 'lxml')\n page = urlparse(response.url)\n filename = \"test_html/\" + page.path[10:].replace(\"/\", \"_\")\n with open(filename, 'wb') as f:\n f.write(response.body)\n\n # Obeying robots apart from robots.txt Look at tag\n robot_tag = soup.find(\"meta\", attrs={'name': 'robots'})\n\n links = soup.findAll('a', href=True)\n for link in links:\n if link is not None:\n url = link['href']\n if url.startswith(\"/ginacody\") and robot_tag['content'] == \"index,follow\":\n next_link = response.urljoin(url)\n yield {\n 'Next link': next_link,\n 'url': url,\n 'robots_tag': robot_tag['content']\n }\n yield scrapy.Request(url=next_link, callback=self.parse)\n","repo_name":"Gmartinica/COMP-479-projects","sub_path":"p4/concordia/concordia/spiders/concordia_crawler.py","file_name":"concordia_crawler.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3577331478","text":"'''\nCreated on Oct 25, 2011\n\n@author: Meredith\n'''\n\n'''for loop for counting\nrange (startpoint, end point, update)\\\nstartpoint = includes\nendpoint = does not include\nupdate = number by which to count\n\nrange(0, 10, 2)\noutput- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n\nrange(0,10,2)\noutput- 0, 2, 4, 6, 8\n\nrange(10, 0, -1)\n10, 9, 8, 7, 6, 5, 4, 3, 2, 1\n\nrange(0, 10)\noutput 0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n\nrange(10)\n0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n\ni,j,k are counting variables'''\n\ns = 'Meredith Hoo'\nfor c in s:\n if c.isalnum():\n print(c)\n elif c.isalph():\n print(c)","repo_name":"zesameri/Beginner-Python-Projects","sub_path":"Basic Concepts/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69970209929","text":"from logging import debug\r\nfrom flask import Flask,render_template\r\nfrom flask.globals import request\r\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, send\r\n\r\napp=Flask(__name__)\r\napp.config[\"SECRET_KEY\"]=\"SECRET\"\r\nsocketio = SocketIO(app,async_mode = 'eventlet')\r\n\r\nuser_room_dict={}\r\nsid_user_dict={}\r\n\r\n@socketio.on('message')\r\ndef handle_message(msg):\r\n print(request.sid)\r\n emit(\"message\",msg,broadcast=True)\r\n\r\n@socketio.on('join')\r\ndef join_a_room(data):\r\n print(data)\r\n roomid=data[\"room\"]\r\n join_room(roomid)\r\n\r\n user_room_dict[request.sid]=roomid\r\n\r\n emit(\"Roommessage\",sid_user_dict[request.sid]+\" enter the room\",to=roomid)\r\n\r\n@socketio.on('leave')\r\ndef leave_a_room(data):\r\n print(data)\r\n roomid=data[\"room\"]\r\n leave_room(roomid)\r\n\r\n emit(\"Roommessage\",sid_user_dict[request.sid]+\" leave the room\",to=roomid)\r\n\r\n del user_room_dict[request.sid]\r\n\r\n\r\n\r\n@socketio.on('toSomeRoom')\r\ndef toSomeRoom(Msg):\r\n roomid=user_room_dict[request.sid]\r\n emit('Roommessage',sid_user_dict[request.sid]+\":\"+Msg,to=roomid)\r\n\r\n@socketio.on('changeName')\r\ndef changeName(name):\r\n sid_user_dict[request.sid]=name\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('broadcast_page.html')\r\n\r\nif __name__==\"__main__\":\r\n socketio.run(app,debug=1)","repo_name":"oFeasl/Flask_SocketIO","sub_path":"test_FlaskSocketIO/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16064058618","text":"\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport time\n\nclass edge_rec:\n def __init__(self, image): \n self.image = image\n self.PILimg = Image.open(self.image)\n\n self.width = self.PILimg.size[0]\n self.height = self.PILimg.size[1]\n\n self.x_kernel = [[1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]]\n \n self.y_kernel = [[1, 0, -1],\n [2, 0, -2],\n [1, 0, -1]]\n\n self.kernel_size = len(self.x_kernel[0])\n\n # convert image to gray scale image\n def gray_scale(self):\n lst = []\n for x in range(0, self.height):\n for y in range(0, self.width):\n pixel = (y, x)\n rgb = self.PILimg.getpixel(pixel)\n\n gray_scale = rgb[0] / 255\n lst.append(gray_scale)\n\n gray_im = []\n for i in range(0, len(lst), self.width):\n gray_im.append(lst[i:i + self.width])\n\n return gray_im\n\n # apply convolution on image with a sobel filter\n def convolution(self, kernel):\n matrice = self.gray_scale()\n\n lst = []\n\n for k_j in range(len(matrice) - self.kernel_size + 1):\n for k_i in range(len(matrice[1]) - self.kernel_size + 1):\n scalar = []\n for i in range(self.kernel_size):\n for j in range(self.kernel_size):\n scalar.append(matrice[i + k_j][j + k_i] * kernel[i][j])\n \n lst.append(sum(scalar))\n\n return lst\n\n def main(self):\n # plot original image\n plt.matshow(self.gray_scale(), cmap=\"gray\")\n plt.title(\"original\")\n plt.show()\n \n start = time.time()\n\n # create edges in x-axis\n x_edges = self.convolution(self.x_kernel)\n # create edges in y-axis\n y_edges = self.convolution(self.y_kernel)\n \n # combine x- and y-edges\n for i in range(len(x_edges)):\n x_edges[i] = np.sqrt(pow(x_edges[i], 2) + pow(y_edges[i], 2))\n x_edges[i] = x_edges[i]\n\n edges = x_edges\n \n # reshape to original height - 2 and original width - 2 (the amount of kernels that fit into height and width)\n convolutioned = np.array(edges).reshape(self.height - 2, self.width - 2)\n\n duration = time.time() - start\n print(\"took: \", duration, \" sec\")\n\n plt.matshow(convolutioned)\n plt.title(\"edges\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n edge_rec(\"images/house.png\").main()\n\n","repo_name":"theopfr/edge-detection-from-scratch","sub_path":"python-version/edge_recognition.py","file_name":"edge_recognition.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11617289647","text":"# hex_map.py\n#\n#\n#\n#\n\nimport tkinter as tk\nimport math\nfrom hex import Hex\n\nCOS_30 = math.cos(math.radians(30))\nSIN_30 = math.sin(math.radians(30))\n\nclass HexMap(tk.Frame):\n\n def __init__(self, master, parent, size, hex_menu_callback):\n super().__init__(master)\n self._master = master\n self._parent = parent\n self.hex_grid = tk.Canvas(self, height=780, width=1275, bg=\"black\")\n self.hex_grid.grid(column=0, row=1)\n self._hex_menu_callback = hex_menu_callback\n\n self._hex_ids = []\n self._hexes = []\n\n self._create_grid(size)\n self._label_grid()\n\n\n def _create_grid(self, size):\n start_x = 50\n curr_x = start_x\n curr_y = 50\n margin = 5\n x_offset = start_x + (COS_30 * start_x) + 3\n for row in range(9):\n print(\"Generating row \" + str(row))\n if row % 2 == 0:\n for hex in range(13):\n next_hex = self._create_hex(curr_x, curr_y, size)\n curr_x = next_hex[0] + margin\n else:\n for hex in range(12):\n next_hex = self._create_hex(curr_x, curr_y, size)\n curr_x = next_hex[0] + margin\n if row % 2 == 0:\n curr_x = x_offset\n else:\n curr_x = start_x\n curr_y = curr_y + (COS_30 * start_x * 2) - 7\n\n\n def _create_hex(self, start_x, start_y, size):\n hex_points = self._calculate_hex_points(start_x, start_y, size)\n hex_id = self.hex_grid.create_polygon(hex_points[0][0], hex_points[0][1], hex_points[1][0], hex_points[1][1], hex_points[2][0], hex_points[2][1], hex_points[3][0], hex_points[3][1], hex_points[4][0], hex_points[4][1], hex_points[5][0], hex_points[5][1], outline=\"white\", activedash=True)\n self._hex_ids.append(hex_id)\n self._hexes.append(Hex(self.hex_grid, self._master, self, hex_id, self._hex_menu_callback))\n return hex_points[2]\n\n\n def _label_grid(self):\n print(\"Laebeling grid...\")\n for hex in self._hex_ids:\n label = str(hex)\n # print(\"Adding label \" + label)\n coords = self.hex_grid.coords(hex)\n self.hex_grid.create_text((coords[0] + coords[4]) / 2, coords[1] - 10, fill=\"white\", text=label, justify=\"center\")\n\n\n def _calculate_hex_points(self, start_x, start_y, size):\n # print(\"Creating hex: \" + str(start_x), str(start_y), str(size))\n p1 = (start_x, start_y)\n p2 = (start_x + (COS_30 * size), (start_y + (SIN_30 * size * -1)))\n p3 = ((p2[0] + (COS_30 * size)), start_y)\n p4 = (p3[0], start_y + size)\n p5 = (p2[0], p4[1] + (SIN_30 * size))\n p6 = (start_x, start_y + size)\n return (p1, p2, p3, p4, p5, p6)\n\n\n def initialize_start_positions(self, player_data):\n start_positions = [1, 7, 13, 101, 107, 113]\n current_player = 1\n for hex in start_positions:\n self._parent._players[current_player-1].set_hq(hex)\n current_player_str = \"p\" + str(current_player)\n current_player_data = player_data[current_player_str]\n self._hexes[hex - 1].change_owner(current_player_data)\n current_player += 1\n","repo_name":"HarrisonMH/smashmap","sub_path":"hex_map_old.py","file_name":"hex_map_old.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14902107831","text":"# Definir una función superposicion() que tome dos listas y\r\n# devuelva True si tienen al menos 1 miembro en común o devuelva False de lo contrario.\r\n# Escribir la función usando el bucle for anidado.\r\n\r\ndef superposicion(array1, array2):\r\n\r\n iguales = True\r\n contador = 0\r\n \r\n for i in array1:\r\n for j in array2:\r\n if i == j:\r\n contador += 1\r\n\r\n if contador > 0:\r\n print(iguales)\r\n else:\r\n iguales = False\r\n print(iguales)\r\n\r\n\r\nlista1 = [1,2]\r\nlista2 = [2,3]\r\n\r\nsuperposicion(lista1, lista2)\r\n","repo_name":"alan199912/Pyhton","sub_path":"Ejercicios 1/07-superposicion.py","file_name":"07-superposicion.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38620359684","text":"from __future__ import print_function\nimport re\nimport numpy\n\nshifts = []\nguards = dict()\n\nwith open('input.txt', 'rb') as f:\n for line in f:\n shifts.append(line)\n\nshifts.sort()\nprint(shifts)\n\nguard_id = 0\ntimecard = numpy.zeros(60)\ntime_asleep = 0\n\nfor shift in shifts:\n if 'Guard' in shift:\n digits = re.search(r'#\\d+', shift)\n guard_id = int(digits.group()[1:])\n if guard_id in guards:\n guard = guards[guard_id]\n else:\n guards[guard_id] = {\n 'timecard': numpy.zeros(60),\n }\n\n elif 'falls asleep' in shift:\n minutes = re.search(r':\\d+', shift)\n time_asleep = int(minutes.group()[1:])\n elif 'wakes up' in shift:\n minutes = re.search(r':\\d+', shift)\n time_awake = int(minutes.group()[1:])\n for i in range(time_asleep, time_awake):\n guards[guard_id]['timecard'][i] += 1\n\n# part a\ndef most_minutes_asleep():\n max_guard_id = 0\n max_minutes = 0\n max_time = 0\n for i in guards:\n minutes = sum(guards[i]['timecard'])\n if max_minutes < minutes:\n max_guard_id = i\n max_minutes = minutes\n max_time = numpy.argmax(guards[i]['timecard'])\n return max_guard_id * max_time\n\n# part b\ndef minute_most_often_asleep():\n max_guard_id = 0\n max_minutes = 0\n max_time = 0\n for i in guards:\n minutes = max(guards[i]['timecard'])\n if max_minutes < minutes:\n max_guard_id = i\n max_minutes = minutes\n max_time = numpy.argmax(guards[i]['timecard'])\n return max_guard_id * max_time\n\nprint(most_minutes_asleep())\nprint(minute_most_often_asleep())","repo_name":"drewtaylors/Advent_of_Code","sub_path":"day_4/star_4.py","file_name":"star_4.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38994495741","text":"#Desenvolva um programa que receba 8 números inteiros positivos. Apresente a soma e média aritmética deles.\n\nsoma = 0\nmedia = 0\n\nfor cont in range (1,4):\n num = float(input(\"Digite um número inteiro\"))\n soma = soma + num\n media = soma / 3\n\nprint(soma)\nprint(media)\n\n\n\n","repo_name":"diogomoreirax/computational-thinking-using-python-1-semestre","sub_path":"atividade 7 FOR ESTRUTURAS DE REPETIÇÃO/8soma_arit.py","file_name":"8soma_arit.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2189197423","text":"import dotenv\nimport hydra\nimport glob\nfrom pathlib import Path\nimport torch\nimport os\nfrom omegaconf import DictConfig\n\n# load environment variables from `.env` file if it exists\n# recursively searches for `.env` in all folders starting from work dir\ndotenv.load_dotenv(override=True)\nfrom src import utils\n\nlog = utils.get_logger(__name__)\n\n@hydra.main(config_path=\"configs/\", config_name=\"test.yaml\")\ndef main(config: DictConfig):\n\n # Imports can be nested inside @hydra.main to optimize tab completion\n # https://github.com/facebookresearch/hydra/issues/934\n from src import utils\n from src.utils import metrics\n from src.testing_pipeline import test\n\n # Applies optional utilities\n utils.extras(config)\n all_preds = []\n os.chdir(\"/Weather-Prediction-NN\")\n # Evaluate model\n chkpts = []\n path = config.ckpt_folder\n for ck in Path(path).rglob(\"*.ckpt\"):\n if not \"last\" in str(ck):\n chkpts.append(ck)\n for c in chkpts:\n config.ckpt_path = c\n preds, all_targets = test(config)\n all_preds.append(preds)\n\n all_preds = torch.stack((all_preds))\n all_preds = torch.mean(all_preds, dim=0)\n rocauc_table, ap_table, f1_table = metrics.metrics_celled(all_targets, all_preds)\n res_rocauc = torch.median(rocauc_table)\n res_ap = torch.median(ap_table)\n res_f1 = torch.median(f1_table)\n log.info(f\"test_ensemble_median_rocauc: {res_rocauc}\")\n log.info(f\"test_ensemble_median_ap: {res_ap}\")\n log.info(f\"test_ensemble_median_f1: {res_f1}\")\n with open(\"ens.txt\", \"a\") as f:\n f.write(config.ckpt_folder + \"\\n\")\n f.write(\"median_rocauc: \" + str(res_rocauc) + \"\\n\")\n f.write(\"\\n\")\n f.write(\"median_ap: \" + str(res_ap) + \"\\n\")\n f.write(\"\\n\")\n f.write(\"median_f1: \" + str(res_f1) + \"\\n\")\n f.write(\"\\n\")\n return\n\n\nif __name__ == \"__main__\":\n \n \n main()\n","repo_name":"VGrabar/Weather-Prediction-NN","sub_path":"test_ensemble.py","file_name":"test_ensemble.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"13990146622","text":"from django.http import JsonResponse\nfrom django.views import View\nfrom .models import BookInfo, HeroInfo\nfrom .serializers import BookSerializer, HeroSerializer\nfrom .serializers import BookModelSerializer\nimport json\n\n\nclass BooksView(View):\n def post(self, request):\n param_dict = json.loads(request.body.decode())\n\n # 1.将接收的数据赋给data参数\n serializer = BookSerializer(data=param_dict)\n # 2.验证\n if serializer.is_valid():\n # 验证成功===》创建对象\n book = serializer.save()\n\n serializer = BookSerializer(book)\n book_dict = serializer.data\n return JsonResponse(book_dict, status=201)\n else:\n # 验证失败\n return JsonResponse(serializer.errors)\n\n\nclass BookView(View):\n def get(self, request, pk):\n book = BookInfo.objects.get(pk=pk)\n\n # serializer = BookSerializer(book)\n # book_dict = serializer.data\n\n serializer = BookModelSerializer(book)\n book_dict = serializer.data\n\n return JsonResponse(book_dict)\n\n def put(self, request, pk):\n param_dict = json.loads(request.body.decode())\n\n book = BookInfo.objects.get(pk=pk)\n\n serializer = BookSerializer(book, data=param_dict)\n if serializer.is_valid(): ###is_valid() 注意有括号\n book = serializer.save()\n\n serializer = BookSerializer(book)\n book_dict = serializer.data\n return JsonResponse(book_dict, status=201)\n else:\n return JsonResponse(serializer.errors)\n\n\nclass HeroView(View):\n def get(self, request, pk):\n hero = HeroInfo.objects.get(pk=pk)\n\n serializer = HeroSerializer(hero)\n hero_dict = serializer.data\n\n return JsonResponse(hero_dict)\n","repo_name":"yongfang117/pro_useful_code","sub_path":"pro_drf/demo2/booktest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12494283540","text":"from data_structure.linked_list import LinkedList\nfrom utils.emojis import chicken_emoji\nfrom food.food import Food\n\n\nchicken_menu_emoji = \"\"\n\nchicken_menu = LinkedList()\n\nchicken_parmesan = Food(\"Chicken Parmesan\", \"Breaded chicken with tomato sauce.\", chicken_emoji)\ngrilled_chicken_salad = Food(\"Grilled Chicken Salad\", \"Greens with grilled chicken slices.\", chicken_emoji)\nchicken_alfredo_pasta = Food(\"Chicken Alfredo Pasta\", \"Pasta with chicken and creamy sauce.\", chicken_emoji)\nbbq_chicken_sandwhich = Food(\"BBQ Chicken Sandwich\", \"Sandwich with BBQ sauce and chicken.\", chicken_emoji)\ncrispy_chicken_strips = Food(\"Crispy Chicken Strips\", \"Fried chicken strips with dipping sauce.\", chicken_emoji)\nchicken_fajitas = Food(\"Chicken Fajitas\", \"Chicken and vegetables in a tortilla.\", chicken_emoji)\nroasted_chicken = Food(\"Roasted Chicken\", \"Whole roasted chicken with vegetables.\", chicken_emoji)\nchicken_noodle_soup = Food(\"Chicken Noodle Soup\", \"Chicken and noodles in broth.\", chicken_emoji)\nlemon_garlic_chicken = Food(\"Lemon Garlic Chicken\", \"Chicken with lemon and garlic flavors.\", chicken_emoji)\nhoney_mustard_chicken = Food(\"Honey Mustard Chicken\", \"Chicken with honey mustard sauce.\", chicken_emoji)\n\nchicken_menu.insert_beginning(chicken_parmesan)\nchicken_menu.insert_end(grilled_chicken_salad)\nchicken_menu.insert_end(chicken_alfredo_pasta)\nchicken_menu.insert_end(bbq_chicken_sandwhich)\nchicken_menu.insert_end(crispy_chicken_strips)\nchicken_menu.insert_end(chicken_fajitas)\nchicken_menu.insert_end(roasted_chicken)\nchicken_menu.insert_end(chicken_noodle_soup)\nchicken_menu.insert_end(lemon_garlic_chicken)\nchicken_menu.insert_end(honey_mustard_chicken)\n\nchicken_menu_emoji = chicken_menu.show_food_emoji()\n","repo_name":"JohnMachado11/Linked-List-Restaurant","sub_path":"src/food/chicken_menu.py","file_name":"chicken_menu.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24381595194","text":"import json\nimport os\nimport subprocess\nimport uuid\n\nimport pytest\n\nimport quetz\nfrom quetz.db_models import Package, Profile, User\nfrom quetz.rest_models import Channel\nfrom quetz.tasks import indexing\n\n\n@pytest.fixture\ndef user(db):\n user = User(id=uuid.uuid4().bytes, username=\"bartosz\")\n profile = Profile(name=\"Bartosz\", avatar_url=\"http:///avatar\", user=user)\n db.add(user)\n db.add(profile)\n db.commit()\n yield user\n\n\n@pytest.fixture\ndef channel_name():\n return \"my-channel\"\n\n\n@pytest.fixture\ndef package_name():\n return \"mytestpackage\"\n\n\n@pytest.fixture\ndef package_format():\n return 'tarbz2'\n\n\n@pytest.fixture\ndef package_file_name(package_name, package_format):\n if package_format == 'tarbz2':\n return f\"{package_name}-0.1-0.tar.bz2\"\n elif package_format == \"conda\":\n return f\"{package_name}-0.1-0.conda\"\n\n\n@pytest.fixture\ndef channel(dao: \"quetz.dao.Dao\", channel_name, user):\n channel_data = Channel(name=channel_name, private=False)\n channel = dao.create_channel(channel_data, user.id, \"owner\")\n return channel\n\n\n@pytest.fixture\ndef package_subdir():\n return \"noarch\"\n\n\n@pytest.fixture\ndef package_version(\n dao: \"quetz.dao.Dao\",\n user,\n channel,\n package_name,\n db,\n package_file_name,\n package_format,\n package_subdir,\n):\n channel_data = json.dumps({\"subdirs\": [package_subdir]})\n package_data = Package(name=package_name)\n\n package = dao.create_package(channel.name, package_data, user.id, \"owner\")\n package.channeldata = channel_data\n db.commit()\n\n package_info = (\n '{\"run_exports\": {\"weak\": [\"otherpackage > 0.1\"]}, \"size\": 100, \"depends\": []}'\n )\n version = dao.create_version(\n channel.name,\n package_name,\n package_format,\n package_subdir,\n \"0.1\",\n \"0\",\n \"0\",\n package_file_name,\n package_info,\n user.id,\n size=0,\n )\n\n yield version\n\n\n@pytest.fixture\ndef archive_format():\n return \"tarbz2\"\n\n\n@pytest.fixture\ndef pkgstore(config):\n pkgstore = config.get_package_store()\n return pkgstore\n\n\ndef test_repodata_zchunk(\n pkgstore,\n package_version,\n channel_name,\n package_file_name,\n dao,\n db,\n):\n indexing.update_indexes(dao, pkgstore, channel_name)\n\n index_path = os.path.join(\n pkgstore.channels_dir,\n channel_name,\n \"noarch\",\n \"index.html\",\n )\n\n assert os.path.isfile(index_path)\n with open(index_path, 'r') as fid:\n content = fid.read()\n\n assert \"repodata.json\" in content\n assert \"repodata.json.bz2\" in content\n assert \"repodata.json.zck\" in content\n\n for fname in (\"repodata.json\", \"repodata.json.zck\"):\n repodata_path = os.path.join(\n pkgstore.channels_dir, channel_name, \"noarch\", fname\n )\n\n assert os.path.isfile(repodata_path)\n\n if fname.endswith('.zck'):\n subprocess.check_call(['unzck', repodata_path])\n with open('repodata.json') as f:\n repodata_unzck = f.read()\n\n assert repodata == repodata_unzck # NOQA # type: ignore\n else:\n with open(repodata_path) as f:\n repodata = f.read() # NOQA\n","repo_name":"mamba-org/quetz","sub_path":"plugins/quetz_repodata_zchunk/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"16"} +{"seq_id":"70082936968","text":"#!/usr/bin/env python\nimport numpy as np\nimport cv2\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nbridge = CvBridge()\nhog = cv2.HOGDescriptor()\n\ndef inside(r, q):\n rx, ry, rw, rh = r\n qx, qy, qw, qh = q\n return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh\n\n\ndef draw_detections(img, rects, thickness = 1):\n for x, y, w, h in rects:\n # the HOG detector returns slightly larger rectangles than the real objects.\n # so we slightly shrink the rectangles to get a nicer output.\n pad_w, pad_h = int(0.15*w), int(0.05*h)\n cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)\n\ndef callback(data):\n frame = bridge.imgmsg_to_cv2(data, \"bgr8\")\n found,w=hog.detectMultiScale(frame, winStride=(8,8), padding=(32,32), scale=1.05)\n draw_detections(frame,found)\n cv2.imshow('feed',frame)\n cv2.waitKey(3)\n #ch = 0xFF & cv2.waitKey(1)\n #if ch == 27:\n #break\n #cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )\n #cap=cv2.VideoCapture('video3.mp4')\n try:\n rospy.init_node('peopledetector_node', anonymous=False)\n rospy.Subscriber('/image_raw', Image, callback)\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"jginesclavero/proc_imagen_mj","sub_path":"scripts/peopledetect.py","file_name":"peopledetect.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34320903189","text":"def add_thousands_separator(fn):\n def wrapper(a, b):\n result = fn(a, b)\n revers_result = str(result)[::-1]\n count = 0\n new_text_num = \"\"\n for i in revers_result:\n if count == 3:\n new_text_num += \"'\" + i\n count = 1\n else:\n new_text_num += i\n count += 1\n return new_text_num[::-1]\n\n return wrapper\n\n\n@add_thousands_separator\ndef multiply(a, b):\n return a * b\n\n\nprint(multiply(9336, 1223))\n","repo_name":"AlitaVitalii/lesson_python","sub_path":"python_basic_28.09.2022/homework/dz30_v1.py","file_name":"dz30_v1.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4141846806","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File : argument.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 03/14/2017\n#\n# This file is part of Jacinle.\n# Distributed under terms of the MIT license.\n\nimport collections\nfrom typing import Any, Optional, Union, Sequence, Tuple, Callable\n\n__all__ = [\n 'get_2dshape', 'get_3dshape', 'get_4dshape',\n 'astuple', 'asshape',\n 'canonize_args_list',\n 'UniqueValueGetter'\n]\n\n\ndef get_2dshape(x: Optional[Union[int, Sequence[int]]], default: Tuple[int, int] = None, type: type = int) -> Tuple[int, int]:\n \"\"\"Convert a value or a tuple to a tuple of length 2.\n\n Args:\n x: a value of type `type`, or a tuple of length 2. If the input is a single value, it will be duplicated to a tuple of length 2.\n default: default value.\n type: expected type of the element.\n\n Returns:\n a tuple of length 2.\n \"\"\"\n if x is None:\n return default\n if isinstance(x, collections.Sequence):\n x = tuple(x)\n if len(x) == 1:\n return x[0], x[0]\n else:\n assert len(x) == 2, '2dshape must be of length 1 or 2'\n return x\n else:\n x = type(x)\n return x, x\n\n\ndef get_3dshape(x: Optional[Union[int, Sequence[int]]], default: Tuple[int, int, int] = None, type: type = int) -> Tuple[int, int, int]:\n \"\"\"Convert a value or a tuple to a tuple of length 3.\n\n Args:\n x: a value of type `type`, or a tuple of length 3. If the input is a single value, it will be duplicated to a tuple of length 3.\n default: default value.\n type: expected type of the element.\n\n Returns:\n a tuple of length 3.\n \"\"\"\n\n if x is None:\n return default\n if isinstance(x, collections.Sequence):\n x = tuple(x)\n if len(x) == 1:\n return x[0], x[0], x[0]\n else:\n assert len(x) == 3, '3dshape must be of length 1 or 3'\n return x\n else:\n x = type(x)\n return x, x, x\n\n\ndef get_4dshape(x: Optional[Union[int, Sequence[int]]], default: Tuple[int, int, int, int] = None, type: type = int) -> Tuple[int, int, int, int]:\n \"\"\"Convert a value or a tuple to a tuple of length 4.\n\n Args:\n x: a value of type `type`, or a tuple of length 4. If there is only one value, it will return (1, x, x, 1).\n If there are two values, it will return (1, x[0], x[1], 1).\n default: default value.\n type: expected type of the element.\n\n Returns:\n a tuple of length 4.\n \"\"\"\n if x is None:\n return default\n if isinstance(x, collections.Sequence):\n x = tuple(x)\n if len(x) == 1:\n return 1, x[0], x[0], 1\n elif len(x) == 2:\n return 1, x[0], x[1], 1\n else:\n assert len(x) == 4, '4dshape must be of length 1, 2, or 4'\n return x\n else:\n x = type(x)\n return 1, x, x, 1\n\n\ndef astuple(arr_like: Any) -> Tuple:\n \"\"\"Convert a sequence or a single value to a tuple. This method differ from the system method `tuple` in that\n a single value (incl. int, string, bytes) will be converted to a tuple of length 1.\n\n Args:\n arr_like: a sequence or a single value.\n\n Returns:\n a tuple.\n \"\"\"\n if type(arr_like) is tuple:\n return arr_like\n elif isinstance(arr_like, collections.Sequence) and not isinstance(arr_like, (str, bytes)):\n return tuple(arr_like)\n else:\n return tuple((arr_like,))\n\n\ndef asshape(arr_like: Optional[Union[int, Sequence[int]]]) -> Optional[Tuple[int, ...]]:\n \"\"\"Convert a sequence or a single value to a tuple of integers. It will return None if the input is None.\n\n Args:\n arr_like: a sequence or a single value.\n\n Returns:\n a tuple of integers.\n \"\"\"\n if type(arr_like) is tuple:\n return arr_like\n elif type(arr_like) is int:\n if arr_like == 0:\n return tuple()\n else:\n return tuple((arr_like,))\n elif arr_like is None:\n return None,\n else:\n return tuple(arr_like)\n\n\ndef canonize_args_list(args: Tuple[Any], *, allow_empty: bool = False, cvt: Optional[Callable[[Any], Any]] = None) -> Tuple[Any]:\n \"\"\"Convert the argument list to a tuple of values. This is useful to make unified interface for shape-related operations.\n\n Example:\n .. code-block:: python\n\n def foo(*args):\n args = canonize_args_list(args, allow_empty=True)\n print(args)\n\n foo(1, 2, 3) # (1, 2, 3)\n foo((1, 2, 3)) # (1, 2, 3)\n foo(1) # (1,)\n foo() # ()\n\n Args:\n args: the argument list.\n allow_empty: whether to allow empty argument list.\n cvt: a function to be applied to each element.\n \"\"\"\n\n if not allow_empty and not args:\n raise TypeError('at least one argument must be provided')\n\n if len(args) == 1 and isinstance(args[0], (list, tuple)):\n args = args[0]\n if cvt is not None:\n args = tuple(map(cvt, args))\n return args\n\n\nclass UniqueValueGetter(object):\n \"\"\"A helper class to ensure that a value is unique.\n\n Example:\n .. code-block:: python\n\n uvg = UniqueValueGetter()\n uvg.set(1)\n uvg.set(2) # will raise ValueError\n uvg.set(1)\n\n print(uvg.get()) # 1\n \"\"\"\n\n def __init__(self, msg: str = 'Unique value checking failed', default: Any = None):\n \"\"\"Initialize the UniqueValueGetter.\n\n Args:\n msg: the error message.\n default: the default value.\n \"\"\"\n self._msg = msg\n self._val = None\n self._default = default\n\n def set(self, v):\n assert self._val is None or self._val == v, self._msg + ': expect={} got={}'.format(self._val, v)\n self._val = v\n\n def get(self):\n return self._val or self._default\n\n","repo_name":"vacancy/Jacinle","sub_path":"jacinle/utils/argument.py","file_name":"argument.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"} +{"seq_id":"27340261546","text":"\nimport numpy as np\nimport tensorflow as tf\nimport horovod.tensorflow as hvd\n\nhvd.init()\nsize = hvd.size()\nrank = hvd.rank()\n\ntensor = tf.Variable(np.array([1.0,1.0,1.0], dtype=np.float32) * rank, name = 'tensor')\nassign_op = tf.assign(tensor, np.array([99.0, 99.0, 99.0]))\n\n# An op which broadcasts all tensors on root rank to the same tensors\n# on all other Horovod processes.\nbroadcast_op = hvd.broadcast_global_variables(0)\n\n# The operation will not start until all processes are ready \n# to receive the tensor.\n\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n print(\"rank %d before broadcast: %s\" % (rank, sess.run(tensor)))\n\n if rank == 0:\n sess.run(assign_op)\n\n sess.run(broadcast_op)\n print(\"rank %d after broadcast: %s\" % (rank, sess.run(tensor)))\n # => [99. 99. 99.]\n","repo_name":"asprenger/distributed-training-patterns","sub_path":"horovod/hvd_broadcast.py","file_name":"hvd_broadcast.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"36541616551","text":"import logging\nimport threading\nimport time\nimport keyboard\n\nimport activity_log\nimport notification\nimport sound\nimport text_generator\n\nlogging.basicConfig(level=logging.DEBUG, format='%(threadName)s: %(message)s')\n\nstop = False # スレッドの終了用\ninterval = 2 # 記録の間隔\nworking_state_list = [\"start\",\n \"terminate\",\n \"cheer\",\n \"praise\",\n \"idle\"]\nworking_state = \"idle\"\n\n\ndef log_activity_to_file(previous_program_list, current_program_list,\n skip_duplicate=True):\n global working_state\n\n # プログラムの起動と終了を検知\n current_program_list = activity_log.get_all_windows()\n started = activity_log.get_title_of_started_program(\n previous_program_list=previous_program_list,\n current_program_list=current_program_list)\n terminated = activity_log.get_title_of_terminated_program(\n previous_program_list=previous_program_list,\n current_program_list=current_program_list)\n\n if terminated:\n working_state = \"terminate\"\n title = terminated\n state = \"T\"\n activity_log.print_to_file(title, state)\n event_notification.set()\n event_voice.set()\n \n if started:\n working_state = \"start\"\n title = started\n state = \"S\"\n activity_log.print_to_file(title, state)\n activity_log.set_start_word_num(title.split()[0])\n event_voice.set()\n event_notification.set()\n\n working_time = activity_log.get_working_time_on_current_window(interval=2)\n logging.debug(\"working_time: \" + str(working_time))\n thresholds = [3600 for _ in range(8)] # 1~8時間\n for threshold in thresholds:\n if threshold <= working_time < threshold + interval:\n working_state = \"cheer\"\n event_voice.set()\n event_notification.set()\n\n \n # アクティブウィンドウの記録\n title = activity_log.get_title_of_active_window(skip_duplicate)\n state = \"A\"\n if title:\n activity_log.print_to_file(title, state)\n\n title = activity_log.get_active_window_title() \n if len(title.split()) < 2:\n return \n elif \".docx\" in title.split()[0] and title.split()[-1] == \"Word\":\n start_word_num = activity_log.get_start_word_num()\n finish_word_num = activity_log.get_finish_word_num(title.split()[0])\n need_to_notify = notification.need_to_notify(start_word_num,finish_word_num)\n if need_to_notify == True:\n working_state = \"praise\"\n event_notification.set()\n event_voice.set()\n \n\ndef worker_log(event_log):\n \"\"\"ログ記録のためのworker\n \"\"\"\n previous_program_list = activity_log.get_all_windows()\n while not stop:\n # event.set が実行されるまで待機\n event_log.wait()\n event_log.clear()\n logging.debug('logging start')\n\n current_program_list = activity_log.get_all_windows()\n log_activity_to_file(previous_program_list,\n current_program_list)\n previous_program_list = current_program_list\n\n logging.debug('logging end')\n\n\ndef worker_voice(event_voice):\n \"\"\"音声再生のためのworker\n \"\"\"\n while not stop:\n # event.set が実行されるまで待機\n event_voice.wait()\n event_voice.clear()\n logging.debug('voice start')\n text = text_generator.generate_text(working_state)\n # time.sleep(3)\n sound.play_sound(text)\n logging.debug('voice end')\n\n\ndef worker_notification(event_notification):\n \"\"\"通知のためのworker\n \"\"\"\n global working_state\n while not stop:\n # event.set が実行されるまで待機\n event_notification.wait()\n event_notification.clear()\n logging.debug('notify start')\n text = text_generator.generate_text(working_state)\n img_path = notification.generate_path(working_state)\n notification.notify(text, img_path)\n logging.debug('notify end')\n if working_state == \"praise\": \n title = activity_log.get_active_window_title() \n activity_log.set_start_word_num(title.split()[0])\n working_state = \"idle\"\n\ndef worker_main(event_log, event_voice, event_notification):\n \"\"\"処理の中心となるworker\n \"\"\"\n global stop\n global working_state\n\n logging.debug('start')\n print(stop)\n while not stop:\n if keyboard.is_pressed(\"q\"):\n logging.debug(\"q is pressed, program end\")\n exit()\n stop = True\n\n time.sleep(interval)\n event_log.set()\n logging.debug(\"event_log.set()\")\n logging.debug(\"working_state: \" + working_state)\n\n need_to_play_voice = sound.need_to_sound()\n if need_to_play_voice:\n event_voice.set()\n logging.debug(\"event_voice.set()\")\n\n need_to_notify = notification.need_to_notify(-1,-1)\n if need_to_notify:\n event_notification.set()\n logging.debug(\"event_notification.set()\")\n\n\nif __name__ == '__main__':\n event_log = threading.Event()\n event_voice = threading.Event()\n event_notification = threading.Event()\n\n thread_main = threading.Thread(\n name=\"thread_main\",\n target=worker_main,\n args=(event_log, event_voice, event_notification, ))\n\n # メインスレッド以外はすべてデーモン化\n thread_log = threading.Thread(\n name=\"thread_log\",\n target=worker_log,\n args=(event_log,))\n thread_log.setDaemon(True)\n\n thread_voice = threading.Thread(\n name=\"thread_voice\",\n target=worker_voice,\n args=(event_voice,))\n thread_voice.setDaemon(True)\n\n thread_notification = threading.Thread(\n name=\"thread_notification\",\n target=worker_notification,\n args=(event_notification,))\n thread_notification.setDaemon(True)\n\n thread_main.start()\n thread_voice.start()\n thread_notification.start()\n thread_log.start()\n","repo_name":"jphacks/D_2016","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"38294186334","text":"import numbers\nimport numpy as np\n\n# import math\n\ndef pcc(x, y):\n\t\"\"\"\nComputes an estimate of the PCC between random variables X and Y\n\nFirst form:\n Args:\n x (list of N floats, N>1): N-points sample of random variable X\n y (list of N floats, N>1): N-points sample of random variable Y\n \n Returns:\n float: PCC estimate of random variables X and Y\n\n Example:\n import tr_pcc\n ...\n x = [0]*N # X sample\n y = [0]*N # Y sample\n p = 0 # PCC estimate\n for n in range (N): # For N draws\n x[n] = draw_X # Draw random variable X\n y[n] = draw_Y # Draw random variable Y\n p = tr_pcc.pcc (x,y) # Compute PCC(X,Y) estimate\n print (\"PCC(X,Y) = %lf\" % p) # Print PCC estimate\n ...\n\nSecond form:\n Args:\n x (list of N floats, N>1): N-points sample of random variable X\n y (list of K lists of N floats, N>1): K different N-points\n samples of random variables Y0,Y1,...,YK-1\n \n Returns:\n list of K floats: PCC estimates of random variables X and Y0,\n X and Y1,..., X and YK-1\n\n Example:\n import tr_pcc\n ...\n x = [0]*N # X sample\n y = [[0]*N for k in range(K)] # Yk samples, 0<=k1): N-points sample of vector random variable X\n y (list of N floats, N>1): N-points sample of random variable Y\n \n Returns:\n list of L floats: PCC estimate of random variables X and Y\n\n Example:\n import tr_pcc\n ...\n x = [[0]*L for n in range(N)] # X sample\n y = [0]*N # Y sample\n p = [0]*L # PCC estimate\n for n in range (N): # For N draws\n for l in range (L): # For L components\n x[n][l] = draw_X (l) # Draw component of random variable X\n y[n] = draw_Y # Draw random variable Y\n p = tr_pcc.pcc (x,y) # Compute PCC(X,Y) estimate\n for l in range (L): # For L components\n\t\t\tprint (\"PCC(X,Y)[%d] = %lf\" % (l,p[l])) # Print component of PCC estimate\n ...\n\nFourth form:\n Args:\n x (list of N lists of L floats, N>1): N-points sample of vector random variable X\n y (list of K lists of N floats, N>1): K different N-points\n samples of random variables Y0,Y1,...,YK-1\n \n Returns:\n list of K lists of L floats: PCC estimates of random variables X and Y0,\n X and Y1,..., X and YK-1\n\n Example:\n import tr_pcc\n ...\n x = [[0]*L for n in range(N)] # X sample\n y = [[0]*N for k in range(K)] # Yk samples, 0<=k[\\-\\w]+)/newsfeeddescription/',newsfeeddescription,name='newsfeeddescription'),\n url(r'^newsapi/',NewsapiViewSet.as_view({'get':'list'}),name='newsapi'),\n url(r'^(?P[\\-\\w]+)/newsfeedsdetail/',newsfeedsdetail,name='newsfeedsdetail'),\n ]","repo_name":"feederfox/FeederFox-Media","sub_path":"NewsFeeds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30691662601","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nn = int(input())\nlst = []\ncheck = [[0]*n for i in range(n)]\ncheckRG =[[0]*n for i in range(n)]\nanswer = 0\nanswerRG = 0\nfor i in range(n) :\n arr = []\n s = input()\n for j in range(n) :\n if s[j] == 'B' :\n arr.append(0)\n elif s[j] == 'R' :\n arr.append(1)\n else :\n arr.append(2)\n lst.append(arr)\n\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\nqueue = deque([])\nqueueRG = deque([])\n\nfor i in range(n) :\n for j in range(n) :\n if check[i][j] == 0 :\n answer += 1\n check[i][j] = answer\n queue.append([i, j])\n\n while queue :\n x, y = queue.popleft()\n # check[x][y] = answer\n for l in range(4) :\n r = x + dx[l]\n c = y + dy[l]\n if r < 0 or r >= n or c < 0 or c >= n :\n continue\n if lst[x][y] == lst[r][c] and check[r][c] == 0:\n check[r][c] = answer\n queue.append([r,c])\n\n if checkRG[i][j] == 0 :\n answerRG += 1\n checkRG[i][j] = answerRG\n queueRG.append([i, j])\n\n while queueRG :\n x, y = queueRG.popleft()\n # checkRG[x][y] = answerRG\n for l in range(4) :\n r = x + dx[l]\n c = y + dy[l]\n if r < 0 or r >= n or c < 0 or c >= n :\n continue\n if checkRG[r][c] == 0:\n if ((lst[x][y] and lst[r][c]) or (not lst[x][y] and not lst[r][c])) :\n checkRG[r][c] = answerRG\n queueRG.append([r,c])\n\nprint(answer, answerRG)","repo_name":"SuperH0ng/algorithm","sub_path":"따로 푼 것/백준/백준 10026(적록색약).py","file_name":"백준 10026(적록색약).py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34216751681","text":"numeros = []\ncont = 0\nesc = 's'\nwhile True:\n n = int(input('Digite um valor: '))\n if n not in numeros:\n numeros.append(n)\n print('Numero adicionado com sucesso..')\n else:\n print('Valor duplicado! Não vou adicionar.')\n esc = input('Quer o continuar [s]SIM [n]NÃO: ').upper()\n while esc not in 'sn':\n print('Opção inválida!')\n esc = input('Quer o continuar? [s]SIM [n]NÃO: ').upper()\n if esc == 'n':\n break\nprint('-='*20)\nprint(f'Os números validados digitados foram: {sorted(numeros)}')\n","repo_name":"GustaHenriPe/Exercicios-Python","sub_path":"PythonExercicios/ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"818873853","text":"from fms_core.template_importer.row_handlers._generic import GenericRowHandler\nfrom fms_core.template_importer._constants import LOAD_ALL\nfrom fms_core.services.sample import get_sample_from_container, prepare_library\nfrom fms_core.services.container import get_container, get_or_create_container\nfrom fms_core.services.index import get_index\nfrom fms_core.services.library import create_library\n\n\n\nclass LibraryRowHandler(GenericRowHandler):\n def __init__(self):\n super().__init__()\n\n def process_row_inner(self, library_batch_info, source_sample, volume_used,\n comment, container, volume, index, strandedness, workflow):\n\n if not library_batch_info:\n self.errors['library_preparation'] = 'No batch is associated with this library.'\n\n # Calling the service creator for Samples in LibraryPreparation\n source_sample_obj, self.errors['container'], self.warnings['container'] = \\\n get_sample_from_container(barcode=source_sample['barcode'], coordinates=source_sample['coordinates'])\n\n if not volume_used:\n self.errors['volume_used'] = f\"Volume used must be entered\"\n\n if source_sample_obj:\n # Set the actual volumed_used in case the load all option was used\n volume_used = source_sample_obj.volume if volume_used == LOAD_ALL else volume_used\n\n if volume_used > source_sample_obj.volume:\n self.errors['volume_used'].append(f\"Volume used ({volume_used}) exceeds the current volume of the sample ({source_sample_obj.volume})\")\n\n # Check if sample is not a library or a pool of libraries\n if source_sample_obj.is_library:\n self.errors['source_sample'] = f\"Source sample can't be a library or a pool of libraries.\"\n\n # Add a warning if the sample has failed qc\n if any([source_sample_obj.quality_flag is False, source_sample_obj.quantity_flag is False]):\n self.warnings[\"qc_flags\"] = (\"Source sample {0} has failed QC.\", [source_sample_obj.name])\n\n # Populate the libraries with the batch and individual information\n protocol = library_batch_info['protocol']\n process_by_protocol = library_batch_info['process_by_protocol']\n\n # Retrieve process\n process_obj = process_by_protocol[protocol.id]\n\n container_coordinates = container['coordinates']\n\n container_parent_obj = None\n if container['parent_barcode']:\n container_parent_obj, self.errors['parent_container'], self.warnings['parent_container'] = \\\n get_container(barcode=container['parent_barcode'])\n\n container_obj, created, self.errors['library_container'], self.warnings['library_container'] = get_or_create_container(\n name=container['name'],\n barcode=container['barcode'],\n kind=container['kind'],\n container_parent=container_parent_obj if container_parent_obj else None,\n coordinates=container['parent_coordinates'] if container_parent_obj else None,\n creation_comment=comment)\n\n if container_obj and not created:\n self.warnings['library_container'] = ('Using existing container {0}', [container_obj.name])\n\n index_obj, self.errors['index'], self.warnings['index'] = get_index(index)\n\n library_info = dict(\n library_type=library_batch_info['library_type'],\n library_date=library_batch_info['library_date'],\n platform=library_batch_info['platform'],\n index=index_obj,\n strandedness=strandedness,\n )\n\n libraries_by_derived_sample = {}\n for derived_sample_source in source_sample_obj.derived_samples.all():\n library_obj, self.errors['library'], self.warnings['library'] = create_library(library_type=library_info['library_type'],\n index=index_obj,\n platform=library_info['platform'],\n strandedness=strandedness)\n libraries_by_derived_sample[derived_sample_source.id] = library_obj\n\n sample_destination, self.errors['library_preparation'], self.warnings['library_preparation'] = \\\n prepare_library(process=process_obj,\n sample_source=source_sample_obj,\n container_destination=container_obj,\n libraries_by_derived_sample=libraries_by_derived_sample,\n volume_used=volume_used,\n execution_date=library_info['library_date'],\n coordinates_destination=container_coordinates,\n volume_destination=volume,\n comment=comment,\n workflow=workflow)\n else:\n self.errors['sample_source'] = 'Sample source is needed to prepare a library.'\n","repo_name":"c3g/freezeman","sub_path":"backend/fms_core/template_importer/row_handlers/library_preparation/library_preparation.py","file_name":"library_preparation.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"42599886504","text":"from collections import defaultdict\nfrom typing import Optional, Tuple, Set, Iterable\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data._utils.collate import default_collate\nfrom torch.utils.data._utils.pin_memory import pin_memory\n\nfrom .config import DataConfig\nfrom .dataset import DataTable, Chart\nfrom .sequence import Sequence, State\nfrom .special_tokens import SpecialTokens\nfrom .token import AnaType, Token\n\n\nclass ChartUserActions(Dataset):\n __slots__ = \"chart\", \"seq_len\"\n\n def __init__(self, cUID: str, ana_type: AnaType, idx_to_field: dict,\n config: DataConfig, search_sampling: bool):\n # TODO: remove this try-catch. (Work item #63)\n try:\n self.chart = Chart(cUID, ana_type, idx_to_field, config, search_sampling)\n self.seq_len = self.chart.seq_len()\n except:\n self.seq_len = 0\n\n def __len__(self):\n return len(self.chart.complete_states()) * (self.seq_len - 1)\n\n def __getitem__(self, index) -> Tuple[State, Token]:\n state_idx = index // (self.seq_len - 1)\n state_len = index % (self.seq_len - 1) + 1\n state = self.chart.states[state_idx].prefix(state_len)\n action = self.chart.states[state_idx][state_len]\n return state, action\n\n\nclass QValue:\n __slots__ = 'state', 'actions', 'valid_mask', 'has_valid_action', 'values', 'n_fields'\n\n def __init__(self, state: State, action_space: Sequence, valid_actions: Iterable[bool], action_values: Iterable):\n \"\"\"\n :param state: a token sequence describing the state\n :param action_space: the whole action space (context), including invalid ones for the state\n :param valid_actions: a bool list indicating if an action in the action_space is valid\n :param action_values: 1 if the action leads to the final reward\n \"\"\"\n self.state = state\n self.actions = action_space # The field tokens should always be the first n_fields ones.\n self.valid_mask = np.array(valid_actions)\n self.has_valid_action = any(valid_actions)\n self.values = np.array(action_values)\n self.n_fields = action_space.num_fields()\n\n def __hash__(self) -> int:\n return hash(self.state)\n\n def __len__(self):\n return len(self.state)\n\n def __copy__(self):\n return QValue(self.state, self.actions, self.valid_mask, self.values)\n\n def to_dict(self, state_len: int, action_len: int, field_permutation: bool, config: DataConfig):\n # Necessary preparations for \"values\" tensor\n values = self.values.copy()\n values[np.logical_not(self.valid_mask)] = -1 # Only 0, 1 for valid actions\n if field_permutation: # randomly permute field order in a table\n permutation = np.random.permutation(self.n_fields)\n values[:self.n_fields] = values[permutation] # we only need to focus on the field tokens\n else:\n permutation = None\n\n return {\n \"state\": self.state.to_dict(state_len, permutation, config.need_field_indices, False, config),\n \"actions\": self.actions.to_dict(action_len, permutation, False, True, config),\n \"values\": torch.tensor(np.pad(values, (0, action_len - len(values)), mode='constant', constant_values=-1),\n dtype=torch.long)\n }\n\n @staticmethod\n def collate(batch, config: DataConfig, field_permutation: bool, pin: bool = False):\n state_len = max(map(lambda x: len(x.state), batch))\n action_len = max(map(lambda x: len(x.actions), batch))\n\n batch = default_collate([x.to_dict(state_len, action_len, field_permutation, config) for x in batch])\n return pin_memory(batch) if pin else batch\n\n\ndef determine_action_values(action_space: Sequence, positive_actions: Optional[Set[Token]]):\n if positive_actions:\n return [1 if action in positive_actions else 0 for action in action_space]\n else:\n return [0] * len(action_space)\n\n\nclass TableQValues(Dataset):\n def __init__(self, tUID: str, special_tokens: SpecialTokens, config: DataConfig, search_sampling: bool = False):\n self.table = DataTable(tUID, special_tokens, config)\n\n self.complete_states = set()\n # Merge the samples from the Charts with specified types in config\n self.state_actions = defaultdict(set)\n\n self.valid_c = 0\n for cUID, cType in zip(self.table.cUIDs, self.table.cTypes):\n if cType not in config.input_types:\n continue\n chart = ChartUserActions(cUID, cType, self.table.idx2field, config, search_sampling)\n if chart.seq_len == 0:\n continue\n\n for state, action in chart:\n self.state_actions[state].add(action)\n chart_complete_states = chart.chart.complete_states()\n if search_sampling:\n self.complete_states.update(chart_complete_states)\n if len(chart_complete_states) > 0:\n self.valid_c += 1\n\n self.samples = list(self.state_actions.items())\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, index) -> QValue:\n state, positive_actions = self.samples[index]\n valid_actions = state.valid_actions(self.table.action_space, top_freq_func=self.table.config.top_freq_func)\n action_values = determine_action_values(self.table.action_space, positive_actions)\n\n return QValue(state, self.table.action_space, valid_actions, action_values)\n\n def get_state_actions(self):\n return self.state_actions\n\n def get_positive_prefixes(self):\n return self.complete_states | self.get_state_actions().keys()\n","repo_name":"Table2Charts/Table2Charts","sub_path":"Table2Charts/data/qvalues.py","file_name":"qvalues.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"1136527697","text":"import math\ninput = open(\"3/input.txt\").read().split(\"\\n\")\nexample = open(\"3/example.txt\").read().split(\"\\n\")\nhax = open(\"3/hax.txt\").read().split(\"\\n\")\n\n#input = [x.split(\" \") for x in input]\n\n\n\ndef part1(list):\n gamma = []\n epsilon = []\n \n for i in range(len(list[0])):\n ones = 0\n zeros = 0\n for row in list:\n if int(row[i]) == 1:\n ones +=1\n else:\n zeros +=1\n most = 1 if ones > zeros else 0\n least = 0 if ones > zeros else 1\n gamma.append(most)\n epsilon.append(least)\n \n g = int(\"\".join(map(str,gamma)),2)\n e = int(\"\".join(map(str,epsilon)),2)\n # print(gamma,epsilon)\n # g = 0\n # e = 0\n # gamma.reverse()\n # epsilon.reverse()\n # for i in range(len(gamma)):\n # g += gamma[i] * (2**i)\n # e += epsilon[i]*(2**i)\n # print(int(t,2) * int(y,2))\n\n return g*e\n\n\n\ndef part2(list):\n oxygen = 0\n co2 = 0\n\n oxygen_list = [x for x in list]\n co2_list = [x for x in list]\n \n for i in range(len(list[0])): \n ox_ones = 0\n ox_zeros = 0\n co2_ones = 0\n co2_zeros = 0\n\n for row in oxygen_list:\n if int(row[i]) == 1:\n ox_ones +=1\n else:\n ox_zeros +=1\n for row in co2_list:\n if int(row[i]) == 1:\n co2_ones +=1\n else:\n co2_zeros +=1\n most = 1 if ox_ones >= ox_zeros else 0\n least = 0 if co2_ones >= co2_zeros else 1\n #filter \n if(len(oxygen_list) != 1):\n oxygen_list = [x for x in oxygen_list if int(x[i])==most]\n if(len(co2_list) != 1):\n co2_list = [x for x in co2_list if int(x[i])==least]\n if (len(co2_list) == 1 and len(oxygen_list) == 1):\n break\n\n #reverse string\n\n oxygen = int(\"\".join(oxygen_list),2)\n co2 = int(\"\".join(co2_list),2)\n\n \n return(oxygen * co2)\n\nprint(f\"mine part 1: {part1(input)}\")\nprint(f\"hax part 1: {part1(hax)}\")\nprint(f\"mine part 2: {part2(input)}\")\nprint(f\"example part 2: {part2(example)}\")","repo_name":"Olivolja/Aoc_2021","sub_path":"3/aoc_3.py","file_name":"aoc_3.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5646974619","text":"# Find the sum of all numbers under one million that are palindromic in both\n# bases 10 and 2 (binary)\n\nimport MyModule as mm\n\nsum = 0\n\nfor baseTen in range(0,1000000):\n if mm.is_it_palindromic(baseTen):\n binaryString = bin(baseTen)[2:]\n if binaryString == binaryString[::-1]:\n sum += baseTen\n\nprint(sum)\n","repo_name":"bencouser/project_euler","sub_path":"36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33799438533","text":"import datetime as dt\nimport numpy as np\n\n\ndef load_data(file_name):\n with open(file_name, 'r') as input_file:\n lines_read = input_file.readlines()\n # Read all the data from the 5th index until before the last index\n # Parse the line into index numbers and coordinates\n # Convert coordinates to float\n name = ''\n data = []\n for text in lines_read:\n if len(text) > 0 and text[0].isdigit():\n data.append(tuple(map(float, text.strip().split(' ')[1:3])))\n elif 'NAME: ' in text:\n name = text[6:-1]\n return name, data\n\n\ndef early_stop_checker(seconds=float('inf'), target_cost=0):\n start_time = dt.datetime.now()\n def _lambda(q): \n return ((dt.datetime.now() - start_time).total_seconds() < seconds) and q > target_cost\n return _lambda\n\n\ndef distance(node_a: list, node_b: list) -> float:\n return np.sqrt((node_a[0] - node_b[0]) ** 2 + (node_a[1] - node_b[1]) ** 2)\n\n\ndef tour_cost(path: list) -> float:\n cost = 0\n for previous_index, current_node in enumerate(path[1:]):\n previous_node = path[previous_index]\n cost += distance(previous_node, current_node)\n cost += distance(path[0], path[-1])\n return cost","repo_name":"moalani/CSE6140-TSP-Project","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70548338568","text":"while True:\n try:\n N = int(input())\n\n somas = []\n while len(somas) < N:\n numeros = [int(x) for x in input().strip().split(' ')]\n somas.extend(numeros)\n\n for i in range(1, N):\n somas[i] += somas[i - 1]\n\n inicio, fim, resposta = 0, N, somas[N - 1]\n while(inicio < fim):\n meio = (inicio + fim)//2\n\n rangel = somas[meio]\n gugu = somas[N - 1] - rangel\n resposta = min(resposta, abs(rangel - gugu))\n\n if(rangel == gugu):\n break\n elif(rangel < gugu):\n meio = inicio + 1\n else:\n meio = fim\n\n print(resposta)\n except EOFError:\n break\n","repo_name":"xTecna/solucoes-da-beecrowd","sub_path":"problemas/iniciante/2715/2715.py","file_name":"2715.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"pt","doc_type":"code","stars":42,"dataset":"github-code","pt":"16"} +{"seq_id":"1053536003","text":"import torch\nimport re\nfrom operator import itemgetter\nfrom nsvqa.nn.interpreter import util\nfrom nsvqa.nn.interpreter.batch_base_types import TokenType\n\nclass OracleBase(torch.nn.Module):\n \n def __init__(self, ontology, feature_dim=1):\n super(OracleBase, self).__init__()\n self._feature_dim = feature_dim\n self._ontology = ontology\n\n def forward(self, token_type, token_list, token_image_map, world, default_log_likelihood=-30, normalized_probability=True):\n if not isinstance(token_list, list):\n token_list = [token_list]\n t_list = [a.strip() for a in token_list]\n \n if token_type == TokenType.ATTRIBUTE:\n res = self._compute_attribute_log_likelihood(world._device, t_list, world._attribute_features, world._meta_data, world._object_num, token_image_map, \\\n world._object_image_map, default_log_likelihood=default_log_likelihood, normalized_probability=normalized_probability)\n\n if isinstance(res, torch.Tensor):\n res = res.view(len(token_list), world._object_num, self._feature_dim)\n \n elif token_type == TokenType.RELATION:\n res = self._compute_relation_log_likelihood(world._device, t_list, world._relation_features, world._meta_data, world._object_num, token_image_map, \\\n world._object_image_map, default_log_likelihood=default_log_likelihood, normalized_probability=normalized_probability)\n \n if isinstance(res, torch.Tensor):\n res = res.view(len(token_list), world._object_num, world._object_num, self._feature_dim)\n\n return res\n\n def _compute_attribute_log_likelihood(self, device, attribute_list, object_features, meta_data, object_num, attribute_image_map, object_image_map, default_log_likelihood=-30, normalized_probability=True):\n pass\n\n def _compute_relation_log_likelihood(self, device, relation_list, pair_object_features, meta_data, object_num, relation_image_map, object_image_map, default_log_likelihood=-30, normalized_probability=True):\n pass\n\n def get_embedding(self, tokens, meta_data, device):\n if meta_data is None:\n embedding = torch.from_numpy(self._ontology.get_embedding(tokens)).float().to(device)\n else:\n try:\n ind = itemgetter(*tokens)(meta_data['index'])\n embedding = meta_data['embedding'][ind, :]\n except KeyError as e:\n embedding = torch.from_numpy(self._ontology.get_embeddings(tokens)).float().to(device)\n\n return embedding\n\n######################################################################################################################################\n\nclass RandomOracle(OracleBase):\n \n def __init__(self, ontology, device):\n super(RandomOracle, self).__init__(ontology)\n self._device = device\n\n def _compute_attribute_log_likelihood(self, device, attribute_list, object_features, meta_data, object_num, attribute_image_map, object_image_map, default_log_likelihood=-30):\n res = torch.rand(len(attribute_list) * self._object_num, device=self._device)\n # print(\"\\nAttribute likelihood:\")\n # print(res.view(len(attribute_list), -1))\n return util.safe_log(res)\n\n def _compute_relation_log_likelihood(self, device, relation_list, pair_object_features, meta_data, object_num, relation_image_map, object_image_map, default_log_likelihood=-30):\n res = torch.rand(len(relation_list) * (self._object_num**2), device=self._device)\n # print(\"\\nRelation likelihood:\")\n # print(res.view(len(relation_list), self._object_num, self._object_num))\n return util.safe_log(res)\n\n######################################################################################################################################\n\nclass StaticOracle(OracleBase):\n \n def __init__(self, ontology, feature_dim=1):\n super(StaticOracle, self).__init__(ontology, feature_dim=feature_dim)\n\n def _extract_entries(self, a_list, features):\n i = [features['index'][c] if c in features['index'] else 0 for c in a_list]\n ind = torch.tensor(i, dtype=torch.int64, device=features['log_likelihood'].device)\n return features['log_likelihood'][ind]\n\n def _compute_attribute_log_likelihood(self, device, attribute_list, object_features, meta_data, object_num, attribute_image_map, object_image_map, default_log_likelihood=-30):\n return self._extract_entries(attribute_list, object_features)\n\n def _compute_relation_log_likelihood(self, device, relation_list, pair_object_features, meta_data, object_num, relation_image_map, object_image_map, default_log_likelihood=-30):\n return self._extract_entries(relation_list, pair_object_features)\n","repo_name":"microsoft/DFOL-VQA","sub_path":"src/nsvqa/nn/vision/base_oracle.py","file_name":"base_oracle.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"16"} +{"seq_id":"9276448861","text":"from .database import connect\nfrom .exists import check_existsquery\nfrom .handlers import get_handler\nfrom .request import Request\nfrom .responses import (\n ErrorResponse,\n MethodNotAllowedResponse,\n NotFoundResponse,\n PermanentRedirectResponse,\n)\nfrom .routing import route\n\nimport logging\n\nlogger = logging.getLogger(\"sqlsite\")\n\n\ndef should_append_slash(request):\n return request.route.pattern.endswith(\"/\") and not request.path.endswith(\"/\")\n\n\ndef method_allowed(request):\n return request.method in {\"GET\", \"HEAD\"}\n\n\ndef get_response(request):\n if not method_allowed(request):\n return MethodNotAllowedResponse()\n matched_route = route(request.db, request.path)\n if not matched_route:\n return NotFoundResponse()\n request.route = matched_route\n if should_append_slash(request):\n return PermanentRedirectResponse(f\"/{request.path}/\")\n if not check_existsquery(request):\n return NotFoundResponse()\n handler = get_handler(matched_route.handler)\n response = handler(request)\n return response\n\n\ndef make_app(test_db=None):\n def app(environ, start_response):\n db = test_db or connect()\n request = Request(environ, db)\n try:\n response = get_response(request)\n except Exception as exception:\n logger.exception(exception)\n response = ErrorResponse()\n start_response(response.get_status_line(), response.get_headers())\n return response.get_content()\n\n return app\n","repo_name":"j4mie/sqlsite","sub_path":"sqlsite/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":177,"dataset":"github-code","pt":"16"} +{"seq_id":"16513138068","text":"def coordinateLister(jsonlist):\n \"\"\"\n -------------------\n VERSION: 2021-11-15\n -------------------\n \n Give a list of jsons as a parameter. For example a list created with readJson()-function.\n This function gives all x- and y-coordinates of density blocks, moisture blocks and all defects.\n \n Six lists will be returned, which all contains lists of all sheets as a parameter.\n \n Example\n -------\n \n xDensity, yDensity, xMoisture, yMoisture, xDefects, yDefects = coordinateLister(jsonlist)\n \n So if your jsonlist contains all 146 peeling sheets, then for example\n list xDensity contains 146 lists. First list inside xDensity contains all density x-coordinates of the first sheet and so on. \n \n \"\"\"\n xD_ALL = []; yD_ALL = []; xM_ALL = []; yM_ALL = []; xDEF_ALL = []; yDEF_ALL = []\n\n # Looping all sheets and appending sheets coordinates lists to lists containing lists of all sheets coordinate-lists\n for sheet in jsonlist:\n # Density-blocks\n xD = []; yD = []\n for dblock in sheet['DensityBlocks']:\n for key in dblock:\n if key == 'm_pntTL' or key == 'm_pntTR' or key == 'm_pntBL' or key == 'm_pntBR':\n xD.append(dblock[key]['x'])\n yD.append(dblock[key]['y'])\n xD_ALL.append(xD)\n yD_ALL.append(yD)\n\n # Moisture blocks\n xM = []; yM = []\n for dblock in sheet['MoistureBlocks']:\n for key in dblock:\n if key == 'm_pntTL' or key == 'm_pntTR' or key == 'm_pntBL' or key == 'm_pntBR':\n xM.append(dblock[key]['x'])\n yM.append(dblock[key]['y'])\n xM_ALL.append(xM)\n yM_ALL.append(yM)\n\n # Defects\n xDEF = []; yDEF = []\n for dblock in sheet['Defects']:\n for key in dblock:\n if key == 'm_mmpntGravity':\n xDEF.append(dblock[key]['x']/sheet['ObjectData'][0]['ObjectResX'])\n yDEF.append(dblock[key]['y']/sheet['ObjectData'][0]['ObjectResY'])\n xDEF_ALL.append(xDEF)\n yDEF_ALL.append(yDEF)\n return xD_ALL, yD_ALL, xM_ALL, yM_ALL, xDEF_ALL, yDEF_ALL","repo_name":"TuomasKarjalainen/project-RAUTE","sub_path":"Modules/coordinatelister.py","file_name":"coordinatelister.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24983332155","text":"import numpy as np\nimport threading\nimport time\nimport math\n\n'''\nnaive implementation of vector-matrix multiplication\n\nweight matrix shape: (n,n)\nvector shape: (n,1)\n'''\nglobal y\n\ndef naive_vector_matrix_mult(w, x):\n n = w.shape[0]\n # y = np.zeros((n,1))\n for i in range(n):\n for j in range(n):\n y[i]+=w[i,j]*x[j]\n # y[i] = np.dot(w[i,:], x)\n return\n\n\ndef vector_vector_thread(row, x, ind):\n y[ind] = np.dot(row, x)\n return\n \ndef single_partition(w, x, num_threads=16):\n n = w.shape[0]\n\n \n threads = []\n #8 cores, hyperthreading doubles to 16\n # we will broadcast to make 16 copies of x\n xb = np.broadcast_to(x.T, shape=(num_threads,n))\n \n vec_ind = 0\n thread_ind = 0\n \n while(vec_ind%s[^'\"]*)['\"].*\\/>\"\"\" % settings.STATIC_URL\n\n image_matches = re.findall(image_pattern, message.alternatives[0][0])\n\n added_images = {}\n\n for image_match in image_matches:\n\n if image_match not in added_images:\n img_content_cid = id_generator()\n on_disk_path = os.path.join(settings.MEDIA_ROOT, image_match.replace(settings.STATIC_URL, ''))\n img_data = open(on_disk_path, 'rb').read()\n img = MIMEImage(img_data)\n img.add_header('Content-ID', '<%s>' % img_content_cid)\n img.add_header('Content-Disposition', 'inline')\n message.attach(img)\n\n added_images[image_match] = img_content_cid\n\n def repl(matchobj):\n x = matchobj.group('img_src')\n y = 'cid:%s' % str(added_images[matchobj.group('img_src')])\n return matchobj.group(0).replace(matchobj.group('img_src'), 'cid:%s' % added_images[matchobj.group('img_src')])\n\n if added_images:\n message.alternatives = [(re.sub(image_pattern, repl, message.alternatives[0][0]), 'text/html')]\n message.body = re.sub(image_pattern, repl, message.body)\n \n ","repo_name":"su-danny/famdates","sub_path":"notification/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34863728865","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: ai ts=4 sts=4 et sw=4 nu\n\nimport os\nimport time\nimport json\nfrom io import BytesIO\n\nimport pytest\nimport requests\n\nfrom warcio import ArchiveIterator\nfrom jinja2 import Environment, PackageLoader\nfrom zimscraperlib.zim import Archive\n\nfrom warc2zim.main import (\n warc2zim,\n canonicalize,\n iter_warc_records,\n get_record_url,\n)\n\n\nTEST_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n\n\n# ============================================================================\nCMDLINES = [\n [\"example-response.warc\"],\n [\"example-response.warc\", \"--progress-file\", \"progress.json\"],\n [\"example-resource.warc.gz\", \"--favicon\", \"https://example.com/some/favicon.ico\"],\n [\"example-resource.warc.gz\", \"--favicon\", \"https://www.google.com/favicon.ico\"],\n [\"example-revisit.warc.gz\"],\n [\n \"example-revisit.warc.gz\",\n \"-u\",\n \"http://example.iana.org/\",\n \"--lang\",\n \"eng\",\n ],\n [\n \"example-utf8.warc\",\n \"-u\",\n \"https://httpbin.org/anything/utf8=%E2%9C%93?query=test&a=b&1=%E2%9C%93\",\n ],\n [\"single-page-test.warc\"],\n]\n\n\n@pytest.fixture(params=CMDLINES, ids=[\" \".join(cmds) for cmds in CMDLINES])\ndef cmdline(request):\n return request.param\n\n\n# ============================================================================\nFUZZYCHECKS = [\n {\n \"filename\": \"video-yt.warc.gz\",\n \"entries\": [\n \"H/youtube.fuzzy.replayweb.page/get_video_info?video_id=aT-Up5Y4uRI\",\n \"H/youtube.fuzzy.replayweb.page/videoplayback?id=o-AE3bg3qVNY-gAWwYgL52vgpHKJe9ijdbu2eciNi5Uo_w\",\n ],\n },\n {\n \"filename\": \"video-yt-2.warc.gz\",\n \"entries\": [\n \"H/youtube.fuzzy.replayweb.page/youtubei/v1/player?videoId=aT-Up5Y4uRI\",\n \"H/youtube.fuzzy.replayweb.page/videoplayback?id=o-AGDtIqpFRmvgVVZk96wgGyFxL_SFSdpBxs0iBHatQpRD\",\n ],\n },\n {\n \"filename\": \"video-vimeo.warc.gz\",\n \"entries\": [\n \"H/vimeo.fuzzy.replayweb.page/video/347119375\",\n \"H/vimeo-cdn.fuzzy.replayweb.page/01/4423/13/347119375/1398505169.mp4\",\n ],\n },\n]\n\n\n@pytest.fixture(params=FUZZYCHECKS, ids=[fuzzy[\"filename\"] for fuzzy in FUZZYCHECKS])\ndef fuzzycheck(request):\n return request.param\n\n\n# ============================================================================\nclass TestWarc2Zim(object):\n def list_articles(self, zimfile):\n zim_fh = Archive(zimfile)\n for x in range(zim_fh.entry_count):\n yield zim_fh.get_entry_by_id(x)\n\n def get_metadata(self, zimfile, name):\n zim_fh = Archive(zimfile)\n return zim_fh.get_metadata(name)\n\n def get_article(self, zimfile, path):\n zim_fh = Archive(zimfile)\n return zim_fh.get_content(path)\n\n def get_article_raw(self, zimfile, path):\n zim_fh = Archive(zimfile)\n return zim_fh.get_item(path)\n\n def verify_warc_and_zim(self, warcfile, zimfile):\n assert os.path.isfile(warcfile)\n assert os.path.isfile(zimfile)\n\n # autoescape=False to allow injecting html entities from translated text\n env = Environment(\n loader=PackageLoader(\"warc2zim\", \"templates\"),\n extensions=[\"jinja2.ext.i18n\"],\n autoescape=False,\n )\n\n head_insert = env.get_template(\"sw_check.html\").render().encode(\"utf-8\")\n\n # track to avoid checking duplicates, which are not written to ZIM\n warc_urls = set()\n\n zim_fh = Archive(zimfile)\n for record in iter_warc_records([warcfile]):\n url = get_record_url(record)\n if not url:\n continue\n\n if url in warc_urls:\n continue\n\n if record.rec_type not in ((\"response\", \"resource\", \"revisit\")):\n continue\n\n # ignore revisit records that are to the same url\n if (\n record.rec_type == \"revisit\"\n and record.rec_headers[\"WARC-Refers-To-Target-URI\"] == url\n ):\n continue\n\n # parse headers as record, ensure headers match\n url_no_scheme = url.split(\"//\", 2)[1]\n print(url_no_scheme)\n parsed_record = next(\n ArchiveIterator(BytesIO(zim_fh.get_content(\"H/\" + url_no_scheme)))\n )\n\n assert record.rec_headers == parsed_record.rec_headers\n assert record.http_headers == parsed_record.http_headers\n\n # ensure payloads match\n try:\n payload = zim_fh.get_item(\"A/\" + url_no_scheme)\n except KeyError:\n payload = None\n\n if record.rec_type == \"revisit\" or (\n record.http_headers and record.http_headers.get(\"Content-Length\") == \"0\"\n ):\n assert not payload\n else:\n payload_content = payload.content.tobytes()\n\n # if HTML, still need to account for the head insert, otherwise should have exact match\n if payload.mimetype.startswith(\"text/html\"):\n assert head_insert in payload_content\n assert (\n payload_content.replace(head_insert, b\"\")\n == record.buffered_stream.read()\n )\n else:\n assert payload_content == record.buffered_stream.read()\n\n warc_urls.add(url)\n\n def test_canonicalize(self):\n assert canonicalize(\"http://example.com/?foo=bar\") == \"example.com/?foo=bar\"\n\n assert canonicalize(\"https://example.com/?foo=bar\") == \"example.com/?foo=bar\"\n\n assert (\n canonicalize(\"https://example.com/some/path/http://example.com/?foo=bar\")\n == \"example.com/some/path/http://example.com/?foo=bar\"\n )\n\n assert (\n canonicalize(\"example.com/some/path/http://example.com/?foo=bar\")\n == \"example.com/some/path/http://example.com/?foo=bar\"\n )\n\n def test_warc_to_zim_specify_params_and_metadata(self, tmp_path):\n zim_output = \"zim-out-filename.zim\"\n warc2zim(\n [\n \"-v\",\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"--name\",\n \"example-response\",\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"-r\",\n \"https://cdn.jsdelivr.net/npm/@webrecorder/wabac@2.16.11/dist/\",\n \"--tags\",\n \"some\",\n \"--tags\",\n \"foo\",\n \"--desc\",\n \"test zim\",\n \"--tags\",\n \"bar\",\n \"--title\",\n \"Some Title\",\n ]\n )\n\n zim_output = tmp_path / zim_output\n\n assert os.path.isfile(zim_output)\n\n all_articles = {\n article.path: article.title for article in self.list_articles(zim_output)\n }\n\n assert all_articles == {\n # entries from WARC\n \"A/example.com/\": \"Example Domain\",\n \"H/example.com/\": \"H/example.com/\",\n # replay system files\n \"A/index.html\": \"A/index.html\",\n \"A/load.js\": \"A/load.js\",\n \"A/404.html\": \"A/404.html\",\n \"A/sw.js\": \"A/sw.js\",\n \"A/topFrame.html\": \"A/topFrame.html\",\n }\n\n zim_fh = Archive(zim_output)\n\n # ZIM metadata\n assert list(zim_fh.metadata.keys()) == [\n \"Counter\",\n \"Creator\",\n \"Date\",\n \"Description\",\n \"Language\",\n \"Name\",\n \"Publisher\",\n \"Scraper\",\n \"Tags\",\n \"Title\",\n ]\n\n assert zim_fh.has_fulltext_index\n assert zim_fh.has_title_index\n\n assert self.get_metadata(zim_output, \"Description\") == b\"test zim\"\n assert (\n self.get_metadata(zim_output, \"Tags\")\n == b\"_ftindex:yes;_category:other;_sw:yes;some;foo;bar\"\n )\n assert self.get_metadata(zim_output, \"Title\") == b\"Some Title\"\n\n def test_warc_to_zim(self, cmdline, tmp_path):\n # intput filename\n filename = cmdline[0]\n\n # set intput filename (first arg) to absolute path from test dir\n warcfile = os.path.join(TEST_DATA_DIR, filename)\n cmdline[0] = warcfile\n\n cmdline.extend([\"--output\", str(tmp_path), \"--name\", filename])\n\n warc2zim(cmdline)\n\n zimfile = filename + \"_\" + time.strftime(\"%Y-%m\") + \".zim\"\n\n if \"--progress-file\" in cmdline:\n with open(tmp_path / \"progress.json\", \"r\") as fh:\n progress = json.load(fh)\n assert (\n progress[\"written\"] > 0\n and progress[\"total\"] > 0\n and progress[\"written\"] <= progress[\"total\"]\n )\n\n self.verify_warc_and_zim(warcfile, tmp_path / zimfile)\n\n def test_same_domain_only(self, tmp_path):\n zim_output = \"same-domain.zim\"\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, \"example-revisit.warc.gz\"),\n \"--favicon\",\n \"http://example.com/favicon.ico\",\n \"--include-domains\",\n \"example.com/\",\n \"--lang\",\n \"eng\",\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"same-domain\",\n \"--output\",\n str(tmp_path),\n ]\n )\n\n zim_output = tmp_path / zim_output\n\n for article in self.list_articles(zim_output):\n url = article.path\n # ignore the replay files, which have only one path segment\n if url.startswith(\"A/\") and len(url.split(\"/\")) > 2:\n assert url.startswith(\"A/example.com/\")\n\n def test_skip_self_redirect(self, tmp_path):\n zim_output = \"self-redir.zim\"\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, \"self-redirect.warc\"),\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"self-redir\",\n ]\n )\n\n zim_output = tmp_path / zim_output\n\n for article in self.list_articles(zim_output):\n url = article.path\n if url.startswith(\"H/\"):\n # ensure there is only one H/ record, and its a 200 (not 301)\n assert url == \"H/kiwix.org/\"\n assert b\"HTTP/1.1 200 OK\" in self.get_article(\n zim_output, \"H/kiwix.org/\"\n )\n\n def test_include_domains_favicon_and_language(self, tmp_path):\n zim_output = \"spt.zim\"\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, \"single-page-test.warc\"),\n \"-i\",\n \"reseau-canope.fr\",\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"spt\",\n ]\n )\n\n zim_output = tmp_path / zim_output\n\n for article in self.list_articles(zim_output):\n url = article.path\n # ignore the replay files, which have only one path segment\n if url.startswith(\"A/\") and len(url.split(\"/\")) > 2:\n assert \"reseau-canope.fr/\" in url\n\n # test detected language\n assert self.get_metadata(zim_output, \"Language\") == b\"fra\"\n\n # test detected favicon\n assert self.get_article(\n zim_output,\n \"A/lesfondamentaux.reseau-canope.fr/fileadmin/template/img/favicon.ico\",\n )\n assert self.get_metadata(zim_output, \"Illustration_48x48@1\")\n\n # test default tags added\n assert (\n self.get_metadata(zim_output, \"Tags\")\n == b\"_ftindex:yes;_category:other;_sw:yes\"\n )\n\n def test_all_warcs_root_dir(self, tmp_path):\n zim_output = \"test-all.zim\"\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR),\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"test-all\",\n \"--url\",\n \"http://example.com\",\n ]\n )\n zim_output = tmp_path / zim_output\n\n # check articles from different warc records in tests/data dir\n\n # ensure trailing slash added\n assert b'window.mainUrl = \"http://example.com/\"' in self.get_article(\n zim_output, \"A/index.html\"\n )\n\n # from example.warc.gz\n assert self.get_article(zim_output, \"A/example.com/\") != b\"\"\n\n # from single-page-test.warc\n assert (\n self.get_article(\n zim_output, \"A/lesfondamentaux.reseau-canope.fr/accueil.html\"\n )\n != b\"\"\n )\n\n # timestamp fuzzy match from example-with-timestamp.warc\n assert self.get_article(zim_output, \"H/example.com/path.txt?\") != b\"\"\n\n def test_fuzzy_urls(self, tmp_path, fuzzycheck):\n zim_output = fuzzycheck[\"filename\"] + \".zim\"\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, fuzzycheck[\"filename\"]),\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"test-fuzzy\",\n ]\n )\n zim_output = tmp_path / zim_output\n\n for entry in fuzzycheck[\"entries\"]:\n res = self.get_article(zim_output, entry)\n assert b\"Location: \" in res\n\n def test_local_replay_viewer_url(self, tmp_path):\n zim_local_sw = \"zim-local-sw.zim\"\n\n res = requests.get(\n \"https://cdn.jsdelivr.net/npm/@webrecorder/wabac@2.16.11/dist/sw.js\"\n )\n\n with open(tmp_path / \"sw.js\", \"wt\") as fh:\n fh.write(res.text)\n\n warc2zim(\n [\n \"-v\",\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"-r\",\n str(tmp_path) + \"/\",\n \"--output\",\n str(tmp_path),\n \"--name\",\n \"local-sw\",\n \"--zim-file\",\n zim_local_sw,\n ]\n )\n\n assert os.path.isfile(tmp_path / zim_local_sw)\n\n def test_error_bad_replay_viewer_url(self, tmp_path):\n zim_output_not_created = \"zim-out-not-created.zim\"\n with pytest.raises(Exception) as e:\n warc2zim(\n [\n \"-v\",\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"-r\",\n \"x-invalid-x\",\n \"--output\",\n str(tmp_path),\n \"--name\",\n \"bad\",\n \"--zim-file\",\n zim_output_not_created,\n ]\n )\n\n # zim file should not have been created since replay viewer could not be loaded\n assert not os.path.isfile(tmp_path / zim_output_not_created)\n\n def test_error_bad_main_page(self, tmp_path):\n zim_output_not_created = \"zim-out-not-created.zim\"\n with pytest.raises(Exception) as e:\n warc2zim(\n [\n \"-v\",\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"-u\",\n \"https://no-such-url.example.com\",\n \"--output\",\n str(tmp_path),\n \"--name\",\n \"bad\",\n \"--zim-file\",\n zim_output_not_created,\n ]\n )\n\n def test_args_only(self):\n # error, name required\n with pytest.raises(SystemExit) as e:\n warc2zim([])\n assert e.code == 2\n\n # error, no such output directory\n with pytest.raises(Exception) as e:\n warc2zim([\"--name\", \"test\", \"--output\", \"/no-such-dir\"])\n\n # success, special error code for no output files\n assert warc2zim([\"--name\", \"test\", \"--output\", \"./\"]) == 100\n\n def test_custom_css(self, tmp_path):\n custom_css = b\"* { background-color: red; }\"\n custom_css_path = tmp_path / \"custom.css\"\n with open(custom_css_path, \"wb\") as fh:\n fh.write(custom_css)\n\n zim_output = \"test-css.zim\"\n\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"test-css\",\n \"--custom-css\",\n str(custom_css_path),\n ]\n )\n zim_output = tmp_path / zim_output\n\n res = self.get_article(zim_output, \"A/example.com/\")\n assert \"https://warc2zim.kiwix.app/custom.css\".encode(\"utf-8\") in res\n\n res = self.get_article(zim_output, \"A/warc2zim.kiwix.app/custom.css\")\n assert custom_css == res\n\n def test_custom_css_remote(self, tmp_path):\n zim_output = \"test-css.zim\"\n url = (\n \"https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap-reboot.css\"\n )\n\n warc2zim(\n [\n os.path.join(TEST_DATA_DIR, \"example-response.warc\"),\n \"--output\",\n str(tmp_path),\n \"--zim-file\",\n zim_output,\n \"--name\",\n \"test-css\",\n \"--custom-css\",\n url,\n ]\n )\n zim_output = tmp_path / zim_output\n\n res = self.get_article(zim_output, \"A/example.com/\")\n assert \"https://warc2zim.kiwix.app/custom.css\".encode(\"utf-8\") in res\n\n res = self.get_article(zim_output, \"A/warc2zim.kiwix.app/custom.css\")\n assert res == requests.get(url).content\n","repo_name":"openzim/warc2zim","sub_path":"tests/test_warc_to_zim.py","file_name":"test_warc_to_zim.py","file_ext":"py","file_size_in_byte":18112,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"16"} +{"seq_id":"12904417084","text":"\"\"\"\nTODO: Add docstring.\n\nrequires:\npip install chromadb\npip install chromadb-client\n\"\"\n\n\"\"\"\nfrom typing import Dict, Optional, List, Union\nimport uuid\n\n\n# pylint: disable=import-error\nimport chromadb as chroma\nfrom chromadb.types import Collection\n\n# pylint: disable=no-name-in-module\nfrom chromadb import PersistentClient\n\nfrom .vector_store import VectorStore, QueryResult\n\n\nclass ChromaVectorStore(VectorStore):\n \"\"\"\n Implementation of the VectorStore for the Chroma vector database.\n\n Provides methods to interact with Chroma, including creating, querying,\n and managing collections. Supports ephemeral, persistent, and HTTP Chroma clients.\n\n Ensure `chromadb` and `chromadb-client` packages are installed.\n\n Attributes:\n client: Chroma client instance.\n current_collection: Current active collection in Chroma.\n\n Args:\n api_key (Optional[str]): API key or token.\n client_type (str): Chroma client type (\"ephemeral\", \"persistent\", or \"http\").\n path (Optional[str]): Path for persistent storage.\n host (Optional[str]): Host for HTTP client.\n port (Optional[int]): Port for HTTP client.\n config (Optional[Dict[str, str]]): Additional configurations.\n \"\"\"\n\n # pylint: disable=R0913\n def __init__(\n self,\n api_key: Optional[str] = None,\n client_type: str = \"ephemeral\",\n path: Optional[str] = None,\n host: Optional[str] = None,\n port: Optional[int] = None,\n config: Optional[Dict[str, str]] = None,\n ):\n \"\"\"\n Initializes the ChromaVectorStore with optional parameters.\n\n Args:\n api_key (str, optional): API key or authentication token.\n client_type (str): Type of Chroma client (\"ephemeral\", \"persistent\", or \"http\").\n path (str, optional): Path for persistent client storage.\n host (str, optional): Host for HTTP client.\n port (int, optional): Port for HTTP client.\n config (Dict[str, str], optional): Additional configuration settings.\n \"\"\"\n super().__init__(api_key, None, config)\n\n if client_type == \"ephemeral\":\n self.client = chroma.Client()\n elif client_type == \"persistent\":\n if not path:\n raise ValueError(\"Path is required for persistent client.\")\n # pylint: disable=no-member\n self.client = chroma.PersistentClient(path=path)\n elif client_type == \"http\":\n if not host or not port:\n raise ValueError(\"Host and port are required for HTTP client.\")\n # pylint: disable=no-member\n self.client = chroma.HttpClient(host=host, port=port)\n else:\n raise ValueError(f\"Invalid client type: {client_type}\")\n\n self.current_collection = None\n\n def create_collection(\n self, name: str, metadata: Optional[Dict[str, str]] = None\n ) -> Collection:\n \"\"\"\n Creates a new collection with the given name and metadata in Chroma.\n\n Args:\n name (str): The name of the collection to create.\n metadata (Optional[Dict[str, str]]): Optional metadata to associate with the collection.\n\n Returns:\n Collection: The newly created collection.\n\n Raises:\n ValueError: If the collection already exists.\n ValueError: If the collection name is invalid.\n \"\"\"\n # Using the Chroma client to create a collection\n self.current_collection = self.client.create_collection(\n name=name, metadata=metadata\n )\n\n def use_collection(self, name):\n \"\"\"\n Sets the current collection to an existing one or creates it if it doesn't exist.\n\n Args:\n name (str): Name of the collection to use or create.\n \"\"\"\n # Set the current collection to an existing one\n self.current_collection = self.client.get_or_create_collection(name)\n\n def delete_collection(self, name: str) -> None:\n \"\"\"\n Deletes a collection with the given name from Chroma.\n\n Args:\n name (str): The name of the collection to delete.\n\n Raises:\n ValueError: If the collection does not exist.\n \"\"\"\n # Using the Chroma client to delete a collection\n self.client.delete_collection(name=name)\n\n def add_documents(\n self,\n texts: List[str],\n embeddings: Optional[List[List[float]]] = None,\n metadata_list: Optional[List[Dict[str, str]]] = None,\n ids: Optional[List[str]] = None,\n ) -> None:\n \"\"\"\n Adds documents and their associated embeddings and metadata\n to the current collection in Chroma.\n\n Args:\n texts (List[str]): List of documents to be added.\n embeddings (Optional[List[List[float]]]): List of embeddings.\n If not provided, embeddings will be generated by Chroma.\n metadata_list (Optional[List[Dict[str, str]]]): Metadata associated with each document.\n ids (Optional[List[str]]): IDs for each document.\n If not provided, unique IDs will be auto-generated.\n\n Raises:\n ValueError: If no collection is set.\n ValueError: Handled by the underlying Chroma library for various conditions.\n \"\"\"\n # Ensure a collection is set\n if not self.current_collection:\n # pylint: disable=line-too-long\n raise ValueError(\n \"No collection is set. Use 'create_collection' or 'use_collection' first.\"\n )\n\n # If ids are not provided, auto-generate unique IDs for each document\n if not ids:\n ids = [str(uuid.uuid4()) for _ in texts]\n\n # Add documents to the current collection\n self.current_collection.add(\n ids=ids, embeddings=embeddings, metadatas=metadata_list, documents=texts\n )\n\n def remove_documents(self, ids: List[str]) -> None:\n \"\"\"\n Removes documents with the specified IDs from the current collection in Chroma.\n\n Args:\n ids (List[str]): List of document IDs to be removed.\n\n Raises:\n ValueError: If no collection is set.\n \"\"\"\n # Ensure a collection is set\n if not self.current_collection:\n # pylint: disable=line-too-long\n raise ValueError(\n \"No collection is set. Use 'create_collection' or 'use_collection' first.\"\n )\n\n # Remove documents from the current collection\n self.current_collection.delete(ids=ids)\n\n def save_index(self, file_path: str) -> None:\n \"\"\"\n Saves the current Chroma index to the specified file path.\n\n Args:\n file_path (str): The path where the Chroma index will be saved.\n \"\"\"\n self.client = PersistentClient(path=file_path)\n # pylint: disable=line-too-long\n # Since Chroma automatically persists data with PersistentClient, no further action is needed.\n\n def load_index(self, file_path: str) -> None:\n \"\"\"\n Loads the Chroma index from the specified file path.\n\n Args:\n file_path (str): The path from where the Chroma index will be loaded.\n \"\"\"\n # pylint: disable=no-member\n self.client = PersistentClient(path=file_path)\n # Chroma will automatically load the data from the provided path.\n\n def query_collection(\n self,\n query_texts: Optional[List[str]] = None,\n n_results: int = 10,\n query_embeddings: Optional[List[List[float]]] = None,\n where: Optional[Dict[str, Union[str, float]]] = None,\n where_document: Optional[Dict[str, Dict[str, str]]] = None,\n include: Optional[List[str]] = None,\n ) -> QueryResult:\n \"\"\"\n Queries the current collection in Chroma and returns the nearest neighbors.\n\n Args:\n # pylint: disable=line-too-long\n query_texts (Optional[List[str]]): Document texts for querying.\n n_results (int): Number of nearest neighbors to return.\n query_embeddings (Optional[List[List[float]]]): Embeddings for querying.\n where (Optional[Dict[str, Union[str, float]]]): A filter to narrow down results based on metadata criteria. For example, {\"color\": \"red\", \"price\": 4.20} would return vectors with metadata matching these criteria.\n where_document (Optional[Dict[str, Dict[str, str]]]): A filter to narrow down results based on document content. For instance, {$contains: {\"text\": \"hello\"}} would return vectors whose documents contain the word \"hello\".\n include (Optional[List[str]]): Data to include in the results. Defaults to [\"metadatas\", \"documents\", \"distances\"].\n\n\n Returns:\n QueryResult: Contains the results.\n\n Raises:\n ValueError: If no collection is set or incorrect query parameters are provided.\n \"\"\"\n if include is None:\n include = [\"metadatas\", \"documents\", \"distances\"]\n\n if not self.current_collection:\n # pylint: disable=line-too-long\n raise ValueError(\n \"No collection set. Use 'create_collection' or 'use_collection' first.\"\n )\n\n if not query_embeddings and not query_texts:\n raise ValueError(\"Provide either 'query_embeddings' or 'query_texts'.\")\n if query_embeddings and query_texts:\n raise ValueError(\"Provide only one of 'query_embeddings' or 'query_texts'.\")\n\n return self.current_collection.query(\n query_embeddings=query_embeddings,\n query_texts=query_texts,\n n_results=n_results,\n where=where,\n where_document=where_document,\n include=include,\n )\n\n def list_collections(self) -> List[str]:\n \"\"\"\n Lists all collections in Chroma.\n\n Returns:\n List[str]: A list of collection names.\n \"\"\"\n return self.client.list_collections()\n","repo_name":"dirkjbreeuwer/ai_podcast","sub_path":"src/search_and_retrieval/chroma_vector_store.py","file_name":"chroma_vector_store.py","file_ext":"py","file_size_in_byte":10048,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"26676490687","text":"\"\"\" Game backbone, shared instances used in other modules \"\"\"\n\n# global constants\nWIDTH = 640 # framebuffer width\nHEIGHT = 360 # framebuffer height\nMAX_LAYERS = 3 # backgroudn layers\nMAX_SPRITES = 32 # max sprites\nASSETS_PATH = \"../assets\"\n\n# global game objects, delayed creation\n\nengine = ()\t # tilengine main instance\nwindow = ()\t # tilengine window instance\nactors = ()\t # list that contains every active game entity\nui = ()\t\t # UI items\nworld = ()\t # world/level instance\nplayer = () # player instance\nsounds = ()\t # sound effects handler\n","repo_name":"megamarc/TilenginePythonPlatformer","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"} +{"seq_id":"70388010568","text":"import numpy as np\nfrom astropy import units as u\nfrom falafel.utils import config\nfrom orphics import cosmology, maps as omaps \nfrom classy_sz import Class\n\ndef ell2ang(ell, angle_unit=None):\n \"\"\"Convert from harmonic mode ell to an angle with units.\n\n Parameters\n ----------\n ell : float\n Harmonic mode ell\n angle_unit : str, optional\n Desired unit for the angle, by default None\n\n Returns\n -------\n astropy Quantity with astropy Units\n Angle with units\n\n Raises\n ------\n ValueError\n You need to give an appropriate value for the angle's units\n\n \"\"\"\n\n #Possible Units\n units_dict = {}\n units_dict['deg'] = u.deg\n units_dict['rad'] = u.rad\n units_dict['arcmin'] = u.arcmin\n units_dict['arcsec'] = u.arcsec\n\n if angle_unit == None:\n raise ValueError(f\"Valid angle units are {', '.join(units_dict.keys())}\")\n\n #Calculate Angle\n theta = 2 * np.pi / ell * u.rad\n\n #Convert Unit\n return theta.to(units_dict[angle_unit], equivalencies=u.dimensionless_angles())\n\n\ndef ang2ell(angle, angle_unit=None):\n \"\"\"Convert from an angle to harmonic mode ell. This function adds the astropy units so you only need numbers and strings to use it.\n\n Parameters\n ----------\n angle : float\n Value of angle\n angle_unit : str, optional\n String of the unit for the angle, by default None\n\n Returns\n -------\n int\n Harmonic mode ell\n\n Raises\n ------\n ValueError\n You need to give an appropriate value for the angle's units\n \"\"\"\n\n #Possible Units\n units_dict = {}\n units_dict['deg'] = u.deg\n units_dict['rad'] = u.rad\n units_dict['arcmin'] = u.arcmin\n units_dict['arcsec'] = u.arcsec\n\n if angle_unit == None:\n raise ValueError(f\"Valid angle units are {', '.join(units_dict.keys())}\")\n\n #Convert to Radians\n angle = angle * units_dict[angle_unit]\n angle = angle.to(u.rad, equivalencies=u.dimensionless_angles())\n\n #Calculate Angle\n return int(2 * np.pi / angle.value)\n\n\ndef nanToZeros(array):\n return np.where(array == np.nan, 0., array)\n\n\ndef str2bool(string):\n valid_true = ['true', 't', 'yes', 'y']\n valid_false = ['false', 'f', 'no', 'n']\n choice = string.lower()\n\n if choice in valid_true:\n boolean_val = True\n elif choice in valid_false:\n boolean_val = False\n else:\n raise ValueError(f'Valid boolean values are {valid_true + valid_false}')\n\n return boolean_val\n\n\ndef sort_str_list(l):\n \"\"\"\n Sorts list of strings of integers.\n \"\"\"\n int_list = np.fromiter(l, dtype=int)\n string_list = np.sort(int_list).astype(str)\n\n return string_list\n\n\n\ndef percentDiscrepancy(exp, ref):\n return (exp - ref) / ref * 100\n\n\ndef SN(signal, noise):\n return np.sqrt(np.sum( signal**2 / noise**2 ))\n\n\ndef get_theory_dicts(nells=None,lmax=9000,grad=True):\n\n #Initialize\n if nells is None: nells = {'TT':0,'EE':0,'BB':0} # noise (dimensionless)\n ls = np.arange(lmax+1)\n ucls = {}\n tcls = {}\n unlensedcls = {}\n\n #Load Theory\n thloc = config['data_path'] + config['theory_root']\n theory = cosmology.loadTheorySpectraFromCAMB(thloc,get_dimensionless=False)\n ells,gt,ge,gb,gte = np.loadtxt(f\"{thloc}_camb_1.0.12_grads.dat\",unpack=True,usecols=[0,1,2,3,4])\n\n #Repackage Theory into Dictionaries\n ucls['TT'] = omaps.interp(ells,gt)(ls) if grad else theory.lCl('TT',ls)\n ucls['TE'] = omaps.interp(ells,gte)(ls) if grad else theory.lCl('TE',ls)\n ucls['EE'] = omaps.interp(ells,ge)(ls) if grad else theory.lCl('EE',ls)\n ucls['BB'] = omaps.interp(ells,gb)(ls) if grad else theory.lCl('BB',ls)\n unlensedcls['TT'] = theory.uCl('TT',ls)\n unlensedcls['TE'] = theory.uCl('TE',ls)\n unlensedcls['EE'] = theory.uCl('EE',ls)\n unlensedcls['BB'] = theory.uCl('BB',ls)\n ucls['kk'] = theory.gCl('kk',ls) # this doesn't exist\n tcls['TT'] = theory.lCl('TT',ls) + nells['TT']\n tcls['TE'] = theory.lCl('TE',ls)\n tcls['EE'] = theory.lCl('EE',ls) + nells['EE']\n tcls['BB'] = theory.lCl('BB',ls) + nells['BB']\n\n return ls, unlensedcls, ucls, tcls\n\n\n\n#DEPRECATED!\n#Mat's version in orphics is better documented, can take pre-calculated pixel area maps, and works for FFTs and equal area maps\n# def wn(mask1, n1, mask2=None, n2=None):\n# \"\"\"TODO: check pixel area average\"\"\"\n# pmap = orphics.maps.psizemap(mask1.shape, mask1.wcs)\n# if mask2 is None:\n# output = np.sum(mask1**n1 * pmap) /np.pi / 4.\n# else:\n# output = np.sum(mask1**n1 * mask2**n2 * pmap) /np.pi / 4.\n# return output\n\ndef getClassyCIB(spectra, nu_list, params={}, emulFlag=False, kappaFlag=False):\n \"\"\"Wrapper for classy_sz calculations of CIB auto and CIB x lensing theory spectra.\n\n Parameters\n ----------\n spectra : str\n Which CIB spectra do you want? Options: 'auto', 'cross', 'both'\n nu_list : float\n List of observing frequencies as numbers in GHz.\n params : dict, optional\n Dictionary of classy_sz parameters, by default empty\n emulFlag : bool, optional\n Use the cosmopower emulators?, by default False\n kappaFlag : bool, optional\n return kappa autospectrum?, by defualt False\n\n Returns\n -------\n ells, Cls_dict\n Array of ells and a dictionary of Cls. The keys are 'auto' and 'cross', and each of those entries is itself a dictionary indexed by observing frequency as a string.\n \"\"\"\n\n if spectra.lower() not in ['both', 'cross', 'auto']:\n raise ValueError(\"Your 'spectra' variable is incorrect\")\n\n #Parameters for Cosmology Planck 14, https://arxiv.org/pdf/1303.5076.pdf, best-fit\n p14_dict={}\n p14_dict['h'] = 0.6711 \n p14_dict['omega_b'] = 0.022068\n p14_dict['Omega_cdm'] = 0.3175 - 0.022068/p14_dict['h']/p14_dict['h']\n p14_dict['A_s'] = 2.2e-9\n p14_dict['n_s'] = .9624\n p14_dict['k_pivot'] = 0.05\n p14_dict['tau_reio'] = 0.0925\n p14_dict['N_ncdm'] = 1\n p14_dict['N_ur'] = 0.00641\n p14_dict['deg_ncdm'] = 3\n p14_dict['m_ncdm'] = 0.02\n p14_dict['T_ncdm'] = 0.71611\n\n p_hm_dict = {}\n\n p_hm_dict['mass function'] = 'T10'\n p_hm_dict['concentration parameter'] = 'D08'\n p_hm_dict['delta for cib'] = '200m'\n p_hm_dict['hm_consistency'] = 1\n p_hm_dict['damping_1h_term'] = 0\n # Precision\n p_hm_dict['pressure_profile_epsabs'] = 1.e-8\n p_hm_dict['pressure_profile_epsrel'] = 1.e-3\n # HOD parameters for CIB\n p_hm_dict['M_min_HOD'] = pow(10.,10) # was M_min_HOD_cib\n\n #Grid Parameters\n # Mass bounds\n p_hm_dict['M_min'] = 1e8 * p14_dict['h'] # was M_min_cib\n p_hm_dict['M_max'] = 1e16 * p14_dict['h'] # was M_max_cib\n # Redshift bounds\n p_hm_dict['z_min'] = 0.07\n p_hm_dict['z_max'] = 6. # fiducial for MM20 : 6\n p_hm_dict['freq_min'] = 10.\n p_hm_dict['freq_max'] = 5e4 # fiducial for MM20 : 6\n p_hm_dict['z_max_pk'] = p_hm_dict['z_max']\n\n #Precision Parameters\n # Precision for redshift integal\n p_hm_dict['redshift_epsabs'] = 1e-40#1.e-40\n p_hm_dict['redshift_epsrel'] = 1e-4#1.e-10 # fiducial value 1e-8\n # Precision for mass integal\n p_hm_dict['mass_epsabs'] = 1e-40 #1.e-40\n p_hm_dict['mass_epsrel'] = 1e-4#1e-10\n # Precision for Luminosity integral (sub-halo mass function)\n p_hm_dict['L_sat_epsabs'] = 1e-40 #1.e-40\n p_hm_dict['L_sat_epsrel'] = 1e-3#1e-10\n # Multipole array\n p_hm_dict['dlogell'] = 1\n p_hm_dict['ell_max'] = 3968.0\n p_hm_dict['ell_min'] = 2.0\n\n #CIB Parameters\n p_CIB_dict = {}\n p_CIB_dict['alpha'] = 0.36\n p_CIB_dict['T_o'] = 24.4\n p_CIB_dict['beta'] = 1.75\n p_CIB_dict['gamma'] = 1.7\n p_CIB_dict['delta'] = 3.6\n p_CIB_dict['M_eff'] = 10**12.6\n p_CIB_dict['L_o'] = 6.4e-8\n p_CIB_dict['sigma_sq'] = 0.5\n\n # nu_list = [353,545,857]\n nu_list_str = str(nu_list)[1:-1] # Note: this must be a single string, not a list of strings!\n\n #Frequency Parameters\n p_freq_dict = {}\n p_freq_dict['cib_frequency_list_num'] = len(nu_list)\n p_freq_dict['cib_frequency_list_in_GHz'] = nu_list_str\n\n #Flux Cuts\n cib_fcut_dict = {}\n\n #Planck flux cut, Table 1 in https://arxiv.org/pdf/1309.0382.pdf\n cib_fcut_dict['100'] = 400\n cib_fcut_dict['143'] = 350\n cib_fcut_dict['217'] = 225\n cib_fcut_dict['353'] = 315\n cib_fcut_dict['545'] = 350\n cib_fcut_dict['857'] = 710\n cib_fcut_dict['3000'] = 1000\n\n def _make_flux_cut_list(cib_flux, nu_list):\n \"\"\"\n Make a string of flux cut values for given frequency list to pass into class_sz\n Beware: if frequency not in the flux_cut dictionary, it assigns 0\n \"\"\"\n cib_flux_list = []\n keys = list(cib_flux.keys())\n for i,nu in enumerate(nu_list):\n if str(nu) in keys:\n cib_flux_list.append(cib_flux[str(nu)])\n else:\n cib_flux_list.append(0)\n return cib_flux_list\n\n #Format Flux Cuts\n cib_flux_list = _make_flux_cut_list(cib_fcut_dict, nu_list)\n\n #Add Flux Cuts\n p_freq_dict['cib_Snu_cutoff_list [mJy]'] = str(list(cib_flux_list))[1:-1]\n p_freq_dict['has_cib_flux_cut'] = 1\n\n # M.set({# class_sz parameters:\n # 'output':'lens_cib_1h,lens_cib_2h', \n \n # #CIB Parameters\n # 'Redshift evolution of dust temperature' : 0.36,\n # 'Dust temperature today in Kelvins' : 24.4,\n # 'Emissivity index of sed' : 1.75,\n # 'Power law index of SED at high frequency' : 1.7,\n # 'Redshift evolution of L - M normalisation' : 3.6,\n # 'Most efficient halo mass in Msun' : 10.**12.6,\n # 'Normalisation of L - M relation in [Jy MPc2/Msun]' : 6.4e-8,\n # 'Size of of halo masses sourcing CIB emission' : 0.5,\n\n # #M_min_HOD is the threshold above which nc = 1:\n # 'M_min_HOD' : 10.**10,\n\n # 'M_min' : 1e10*common_settings['h'],\n # 'M_max' : 1e16*common_settings['h'],\n # 'z_min' : 0.06,\n # 'z_max' : 15,\n\n # ### Precision\n # #redshift_epsabs : 1.0e-40\n # #redshift_epsrel : 0.0005\n # #mass_epsabs : 1.0e-40\n # #mass_epsrel : 0.0005\n # 'dell' : 64,\n # #multipoles_sz : 'ell_mock'\n # 'ell_max' : 3968.0,\n # 'ell_min' : 2.0,\n # 'ndim_masses' : 100,\n # 'ndim_redshifts' : 100,\n \n # 'cib_frequency_list_num' : Nfreq,\n # #'cib_frequency_list_in_GHz' : '217,353,545,857,3000',\n # 'cib_frequency_list_in_GHz' : '353, 545, 857'\n # })\n\n #Create Class Object\n M = Class()\n \n #Add Spectra\n outspec = []\n if spectra.lower() == 'both' or spectra == 'auto':\n outspec.append('cib_cib_1h,cib_cib_2h')\n if spectra.lower() == 'both' or spectra == 'cross':\n outspec.append('lens_cib_1h,lens_cib_2h')\n if kappaFlag:\n outspec.append('lens_lens_1h,lens_lens_2h')\n M.set({'output': ','.join(outspec)})\n\n #Add Parameters\n M.set(p14_dict)\n M.set(p_hm_dict)\n M.set(p_CIB_dict)\n M.set(p_freq_dict)\n if params:\n M.set(params)\n \n #Compute Spectra\n if emulFlag:\n M.compute_class_szfast()\n else:\n M.compute()\n \n #Extract Spectra\n Dl_spectra = {}\n if spectra.lower() == 'both' or spectra == 'auto':\n Dl_spectra['auto'] = M.cl_cib_cib()\n if spectra.lower() == 'both' or spectra == 'cross':\n Dl_spectra['cross'] = M.cl_lens_cib()\n M.struct_cleanup()\n M.empty()\n\n ells = []\n Cls_dict = {}\n #Cycle Through CIB Spectra\n for spec_key, Dl_dict in Dl_spectra.items():\n if spec_key.lower() in ['cross', 'both']:\n freq_list = sort_str_list( list(Dl_dict.keys()) )\n else:\n freq_list = Dl_dict.keys()\n Cls_dict[spec_key] = {}\n\n #Cycle through Frequencies\n for nu in freq_list:\n #Get ells\n if not len(ells):\n ells = np.array(Dl_dict[nu]['ell'])\n\n #Get Spectra\n Dl_total = np.array(Dl_dict[nu]['1h']) + np.array(Dl_dict[nu]['2h'])\n Cl_total = dl2cl(Dl_total, ells= ells)\n\n #Save Spectra\n Cls_dict[spec_key][nu] = Cl_total\n \n #Get Kappa Autospectrum\n if kappaFlag:\n Dl_phi = M.cl_lens_lens()\n Cl_phi = dl2cl(Dl_phi, ells= ells)\n Cls_dict['lens'] = Cl_phi\n\n return ells, Cls_dict\n \n \n\ndef phi2kappa(phi, type= 'spectrum', ells= None):\n\n if ells is None:\n ells = np.arange(len(phi))\n\n factor = ells * (ells + 1.) / 2.\n\n if type == 'spectrum':\n kappa = factor**2 * phi\n elif type == 'map':\n kappa = factor**2 * phi\n else:\n raise ValueError('Invalid \"type\" argument')\n\n return kappa\n\n\ndef dl2cl(Dl, ells= None):\n \n if ells is None:\n ells = np.arange(len(Dl))\n \n factor = ells * (ells+1) / (2*np.pi)\n \n return Dl / factor\n\n\ndef cl2dl(Cl, ells= None):\n \n if ells is None:\n ells = np.arange(len(Cl))\n \n factor = ells * (ells+1) / (2*np.pi)\n \n return Cl * factor\n\n\n\n\ndef bin_cen2edg(centers, dbins= None):\n \"\"\"\n Shifts from midpoints of bins to the edges of the bins (inclusive of the lower and upper endpoints).\n\n Parameters\n ----------\n centers : 1darray\n Midpoints of bins\n dbins : 1darray, optional\n Size of each bin if you have unevenly spaced bins. By default None\n\n Returns\n -------\n 1darray\n Edges of the bins (including both the lowest and highest edges). Length is 1+len(centers)\n \"\"\"\n if dbins is None:\n delta = centers[1] - centers[0]\n dbins = np.ones(centers.shape) * delta\n\n right_edges = centers - dbins/2\n edges = np.append(right_edges, right_edges[-1] + delta) # doing this instead of centers[-1] * delta/2 avoids issues with odd deltas\n\n return edges\n\n\ndef bin_edg2cen(edges):\n \"\"\"\n Shifts from the edges of bins to their midpoints. Works for unevenly sized bins.\n\n Parameters\n ----------\n edges : 1darray\n Edges of the bins, including the left edge of the first bin and right edge of the last bin.\n\n Returns\n -------\n 1darray\n Midponts of the bins. Length is len(edges) - 1.\n \"\"\"\n return (edges[1:] + edges[:-1]) / 2\n\n\ndef binning(binsize, xdata, ydata, start='midpoint'):\n \"\"\"\n Bins x and y data. Handles NaNs just fine. Only works for bins of equal length. For arbitrary or uneven spacing (e.g. log), use orphics.\n \"\"\"\n\n #Bin xdata\n if start.lower() == 'midpoint':\n midpoint = binsize//2 - 1 # midpoint if binsize = odd and to the left of midpoint if binsize = even\n xbins = xdata[midpoint : (xdata.size//binsize) * binsize : binsize]\n elif start.lower() == 'left':\n xbins = xdata[: (xdata.size//binsize) * binsize : binsize] \n else:\n raise ValueError('Need a valid')\n \n #Bin ydata\n ybins = ydata[:(ydata.size//binsize) * binsize] # drop the last bin if it's too small\n ybins = np.nanmean(ybins.reshape(-1, binsize), axis=-1)\n\n return xbins, ybins\n","repo_name":"Yogesh3/ymfuncs","sub_path":"myfuncs/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19096963672","text":"# -*- coding: utf-8 -*-\n#\nimport helpers\n\n\ndef plot():\n from matplotlib import pyplot as plt\n import numpy as np\n\n data = np.zeros((3, 3))\n data[:2, :2] = 1.0\n\n fig = plt.figure()\n\n ax1 = plt.subplot(131)\n ax2 = plt.subplot(132)\n ax3 = plt.subplot(133)\n axes = [ax1, ax2, ax3]\n\n for ax in axes:\n im = ax.imshow(data)\n fig.colorbar(im, ax=ax, orientation=\"horizontal\")\n\n return fig\n\n\ndef test():\n phash = helpers.Phash(plot())\n assert phash.phash == \"6bc42bd46a95c03f\", phash.get_details()\n return\n","repo_name":"moritzmuehlbauer/matplotlib2tikz","sub_path":"test/test_subplots_with_colorbars.py","file_name":"test_subplots_with_colorbars.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"5226091652","text":"from typing import Union, Literal\nfrom fbi.connection import __get_api_response\n\n\ndef wanted(person_classification: Union[Literal['Main'], Literal['Accomplice'], Literal['Victim']] = None,\n page_size: int = 20, page: int = 1, sort_order: Union[Literal['asc'], Literal['desc']] = None) -> dict:\n \"\"\"\n Get listing of wanted people\n :param person_classification: person classification\n :param page_size: number of items to return\n :param page: page of result listing\n :param sort_order: result sort order\n :return: a list of result dictionaries\n \"\"\"\n if person_classification is None:\n person_classification = \"\"\n else:\n person_classification = f\"&person_classification={person_classification}\"\n\n if sort_order is None:\n sort_order = \"\"\n else:\n sort_order = f\"&sort_order={sort_order}\"\n\n response = __get_api_response(f\"https://api.fbi.gov/@wanted?pageSize={page_size}&page={page}{person_classification}\")\n return response['items']\n\n\ndef wanted_person(person_id: str) -> dict:\n \"\"\"\n Retrieve information on wanted person\n :param person_id: id of wanted person\n :return: a dictionary of person's information\n \"\"\"\n return __get_api_response(f\"https://api.fbi.gov/@wanted-person/{person_id}\")\n\n\ndef art_crimes(page_size: int = 20, page: int = 1, sort_order: Union[Literal['asc'], Literal['desc'], None] = None,\n reference_number: Union[int, str] = None) -> dict:\n \"\"\"\n Get listing of national art theft\n :param page_size: number of items to return\n :param page: page of result listing\n :param sort_order: result sort order\n :param reference_number: art crime reference number\n :return: a list of result dictionaries\n \"\"\"\n if reference_number is None:\n reference_number = \"\"\n else:\n reference_number = f\"&referenceNumber={reference_number}\"\n\n if sort_order is None:\n sort_order = \"\"\n else:\n sort_order = f\"&sort_order={sort_order}\"\n\n response = __get_api_response(f\"https://api.fbi.gov/@artcrimes?pageSize={page_size}&page={page}\"\n f\"{sort_order}{reference_number}\")\n return response['items']\n\n\ndef art_crime(crime_id: str) -> dict:\n \"\"\"\n Retrieve information on an art crime\n :param crime_id: id of an art crime\n :return: dictionary of an art crime's information\n \"\"\"\n return __get_api_response(f\"https://api.fbi.gov/@artcrimes/{crime_id}\")\n","repo_name":"rly0nheart/fbi-api","sub_path":"fbi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"36425844143","text":"from decimal import Decimal\nfrom domain import Rates\nimport json\nimport urllib.request\n\nAPI_KEY = 'c15dab3a23b14a1d82ef0ab60b3b417d'\nURL = \"https://openexchangerates.org/api/latest.json?app_id={}\".format(API_KEY)\n\n\ndef build_rates_db() -> Rates:\n req = urllib.request.Request(URL)\n with urllib.request.urlopen(req) as response:\n result = json.loads(response.read().decode('utf-8'))\n\n rates = result['rates']\n base = result['base']\n timestamp = result['timestamp']\n\n rt = {k: Decimal(rates[k]) for k in rates}\n openexc_rates = Rates(base, rt, timestamp)\n #print(openexc_rates)\n return openexc_rates\n\n","repo_name":"evgenii-malov/interviews","sub_path":"CURR_XCHG_REST_14_03_18/datasources/openexc_datasource.py","file_name":"openexc_datasource.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23479140545","text":"import tensorflow as tf\nimport numpy as np\n\nfrom layers import Mish, ScaledRandomUniform\nimport utils\n\n\n\nanchor_sizes = [\n [(12, 16), (19, 36), (40, 28)],\n [(36, 75), (76, 55), (72, 146)],\n [(142, 110), (192, 243), (459, 401)],\n]\nscales = [1.2, 1.1, 1.05]\n\n\ndef calc_loss(layer_id, gt, preds, debug=False):\n gt_boxes = gt[..., : 4]\n gt_labels = tf.cast(gt[..., 4], tf.int32)\n gt_count = tf.shape(gt_labels)[-1]\n gt_mask = tf.where(gt_labels == -1, 0.0, 1.0)\n layer_xywh, layer_obj, layer_cls = utils.decode_layer(preds, layer_id)\n cls_count = layer_cls.shape[-1]\n\n s = tf.shape(preds)\n batch_size = s[0]\n gw = s[1]\n gh = s[2]\n stride_x = 1 / gw\n stride_y = 1 / gh\n d = s[3]\n truth_mask = tf.zeros((batch_size, gw, gh, 3))\n\n box_loss = 0.0\n cls_loss = 0.0\n\n ix = tf.cast(tf.math.floor(tf.cast(gw, tf.float32) * gt_boxes[..., 0]), tf.int32)\n iy = tf.cast(tf.math.floor(tf.cast(gh, tf.float32) * gt_boxes[..., 1]), tf.int32)\n ix = tf.clip_by_value(ix, 0, gw - 1)\n iy = tf.clip_by_value(iy, 0, gh - 1)\n\n box_shape = tf.shape(gt_labels)\n zeros = tf.zeros_like(gt_labels, dtype=tf.float32)\n gt_shift = tf.stack([zeros, zeros, gt_boxes[..., 2], gt_boxes[..., 3]], axis=-1)\n gt_shift = tf.stack([gt_shift, gt_shift, gt_shift], axis=1)\n\n anchors_ws = [tf.cast(tf.fill(box_shape, anchor_sizes[layer_id][ir][0]), dtype=tf.float32) / 608.0 for ir in range(3)]\n anchors_hs = [tf.cast(tf.fill(box_shape, anchor_sizes[layer_id][ir][1]), dtype=tf.float32) / 608.0 for ir in range(3)]\n anchors = tf.stack([tf.stack([zeros, zeros, anchors_ws[ir], anchors_hs[ir]], axis=-1) for ir in range(3)], axis=1)\n\n ious = utils.calc_ious(gt_shift, anchors)\n ious_argmax = tf.cast(tf.argmax(ious, axis=1), dtype=tf.int32)\n batch_idx = tf.tile(tf.range(batch_size)[ : , tf.newaxis], [1, box_shape[-1]])\n\n indices = tf.stack([batch_idx, iy, ix, ious_argmax], axis=-1)\n pred_boxes = tf.gather_nd(layer_xywh, indices)\n box_loss = tf.math.reduce_sum(gt_mask * (1.0 - utils.calc_gious(pred_boxes, gt_boxes)))\n\n cls_one_hot = tf.one_hot(gt_labels, cls_count)\n pred_cls = tf.gather_nd(layer_cls, indices)\n cls_diffs = tf.math.reduce_sum(tf.math.square(pred_cls - cls_one_hot), axis=-1)\n cls_loss = tf.math.reduce_sum(gt_mask * cls_diffs)\n\n indices_not_null = tf.gather_nd(indices, tf.where(gt_labels != -1))\n truth_mask = tf.tensor_scatter_nd_update(truth_mask, indices_not_null, tf.ones_like(indices_not_null, dtype=tf.float32)[:,0])\n inv_truth_mask = 1.0 - truth_mask\n\n obj_loss = tf.math.reduce_sum(tf.math.square(1 - layer_obj) * truth_mask)\n gt_boxes_exp = tf.tile(tf.reshape(gt_boxes, (batch_size, 1, 1, 1, gt_count, 4)), [1, gw, gh, 3, 1, 1])\n pred_boxes_exp = tf.tile(tf.reshape(layer_xywh, (batch_size, gw, gh, 3, 1, 4)), [1, 1, 1, 1, gt_count, 1])\n iou_mask = tf.cast(tf.math.reduce_max(utils.calc_ious(gt_boxes_exp, pred_boxes_exp), axis=-1) < 0.7, tf.float32)\n obj_loss += tf.math.reduce_sum(tf.math.square(layer_obj) * inv_truth_mask * iou_mask)\n\n return (0.05 * box_loss + 1.0 * obj_loss + 0.5 * cls_loss) / tf.cast(batch_size, dtype=tf.float32)\n\n\n\n\nclass YOLOv4Model(tf.keras.Model):\n def __init__(self, classes_num=80, image_size=(608, 608)):\n\n self.classes_num = classes_num\n self.image_size = (image_size[0], image_size[1], 3)\n\n input = tf.keras.Input(shape=self.image_size)\n output = self.CSPDarknet53WithSPP()(input)\n output = self.YOLOHead()(output)\n super().__init__(input, output)\n\n self.loss_tracker = tf.keras.metrics.Mean(name=\"loss\")\n self.lr_tracker = tf.keras.metrics.Mean(name=\"lr\")\n self.mAP_tracker = tf.keras.metrics.Mean(name=\"mAP\")\n\n\n def fit(self, dataset, **kwargs):\n\n start_step = 1 + kwargs['steps_per_epoch'] * kwargs['initial_epoch']\n self.current_step = tf.Variable(start_step, trainable=False, dtype=tf.int32)\n self.total_steps = kwargs['epochs'] * kwargs['steps_per_epoch']\n super().fit(dataset, **kwargs)\n\n\n def train_step(self, data):\n\n input, gt_boxes = data\n with tf.GradientTape() as tape:\n output = self(input, training=True)\n loss0 = calc_loss(0, gt_boxes, output[0])\n loss1 = calc_loss(1, gt_boxes, output[1])\n loss2 = calc_loss(2, gt_boxes, output[2])\n total_loss = loss0 + loss1 + loss2\n gradients = tape.gradient(total_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n self.loss_tracker.update_state(total_loss)\n self.lr_tracker.update_state(self.optimizer.lr(self.current_step))\n self.current_step.assign_add(1)\n\n return {\"loss\" : self.loss_tracker.result(), \"lr\": self.lr_tracker.result()}\n\n def test_step(self, data):\n\n input, gt_boxes = data\n prediction = self(input, training=False)\n ap = tf.py_function(\n func=lambda *args: utils.calc_mAP(args[:-2], args[-2], args[-1]),\n inp=[*prediction, gt_boxes, self.classes_num],\n Tout=tf.float64,\n )\n self.mAP_tracker.update_state(ap)\n\n return {\"mAP\" : self.mAP_tracker.result()}\n\n\n @property\n def metrics(self):\n return [self.loss_tracker, self.mAP_tracker, self.lr_tracker]\n\n\n\n def load_weights(self, weights_file):\n if weights_file.endswith(\".h5\"):\n super().load_weights(weights_file)\n else:\n self._load_weights_yolo(weights_file)\n\n # load weights from darknet weight file\n def _load_weights_yolo(self, weights_file):\n with open(weights_file, \"rb\") as f:\n major, minor, revision = np.fromfile(f, dtype=np.int32, count=3)\n if (major * 10 + minor) >= 2:\n seen = np.fromfile(f, dtype=np.int64, count=1)\n else:\n seen = np.fromfile(f, dtype=np.int32, count=1)\n j = 0\n for i in range(110):\n conv_layer_name = \"conv2d_%d\" % i if i > 0 else \"conv2d\"\n bn_layer_name = \"batch_normalization_%d\" % j if j > 0 else \"batch_normalization\"\n\n conv_layer = self.get_layer(conv_layer_name)\n in_dim = conv_layer.input_shape[-1]\n filters = conv_layer.filters\n size = conv_layer.kernel_size[0]\n\n if i not in [93, 101, 109]:\n # darknet weights: [beta, gamma, mean, variance]\n bn_weights = np.fromfile(f, dtype=np.float32, count=4 * filters)\n # tf weights: [gamma, beta, mean, variance]\n bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]\n bn_layer = self.get_layer(bn_layer_name)\n j += 1\n else:\n conv_bias = np.fromfile(f, dtype=np.float32, count=filters)\n\n # darknet shape (out_dim, in_dim, height, width)\n conv_shape = (filters, in_dim, size, size)\n conv_weights = np.fromfile(f, dtype=np.float32, count=np.product(conv_shape))\n # tf shape (height, width, in_dim, out_dim)\n conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])\n\n if i not in [93, 101, 109]:\n conv_layer.set_weights([conv_weights])\n bn_layer.set_weights(bn_weights)\n else:\n conv_layer.set_weights([conv_weights, conv_bias])\n\n assert len(f.read()) == 0, \"failed to read all data\"\n\n\n\n\n def darknetConv(\n self, filters, size, strides=1, batch_norm=True, activate=True, activation=\"leaky\"\n ):\n def feed(x):\n if strides == 1:\n padding = \"same\"\n else:\n x = tf.keras.layers.ZeroPadding2D(((1, 0), (1, 0)))(x)\n padding = \"valid\"\n\n x = tf.keras.layers.Conv2D(\n filters=filters,\n kernel_size=size,\n strides=strides,\n padding=padding,\n use_bias=not batch_norm,\n kernel_initializer=ScaledRandomUniform(\n scale=tf.sqrt(2 / (size * size * self.image_size[2])), minval=-0.01, maxval=0.01\n ),\n kernel_regularizer=tf.keras.regularizers.l2(0.0005),\n )(x)\n\n if batch_norm:\n x = tf.keras.layers.BatchNormalization(moving_variance_initializer=\"zeros\", momentum = 0.9)(x)\n\n if activate:\n if activation == \"mish\":\n x = Mish()(x)\n elif activation == \"leaky\":\n x = tf.keras.layers.LeakyReLU(alpha=0.1)(x)\n\n return x\n\n return feed\n\n def darknetResidualBlock(self, filters, repeats=1, initial=False):\n def feed(x):\n filters2 = 2 * filters if initial else filters\n x = self.darknetConv(2 * filters, 3, strides=2, activation=\"mish\")(x)\n route = self.darknetConv(filters2, 1, activation=\"mish\")(x)\n x = self.darknetConv(filters2, 1, activation=\"mish\")(x)\n for i in range(repeats):\n skip = x\n x = self.darknetConv(filters, 1, activation=\"mish\")(x)\n x = self.darknetConv(filters2, 3, activation=\"mish\")(x)\n x = tf.keras.layers.Add()([skip, x])\n x = self.darknetConv(filters2, 1, activation=\"mish\")(x)\n x = tf.keras.layers.Concatenate()([x, route])\n x = self.darknetConv(2 * filters, 1, activation=\"mish\")(x)\n return x\n\n return feed\n\n def CSPDarknet53WithSPP(self):\n def feed(x):\n x = self.darknetConv(32, 3, activation=\"mish\")(x)\n x = self.darknetResidualBlock(32, initial=True)(x)\n x = self.darknetResidualBlock(64, repeats=2)(x)\n x = route_1 = self.darknetResidualBlock(128, repeats=8)(x)\n x = route_2 = self.darknetResidualBlock(256, repeats=8)(x)\n x = self.darknetResidualBlock(512, repeats=4)(x)\n x = self.darknetConv(512, 1)(x)\n x = self.darknetConv(1024, 3)(x)\n x = self.darknetConv(512, 1)(x)\n\n # SPP\n spp1 = tf.keras.layers.MaxPooling2D(pool_size=13, strides=1, padding=\"same\")(x)\n spp2 = tf.keras.layers.MaxPooling2D(pool_size=9, strides=1, padding=\"same\")(x)\n spp3 = tf.keras.layers.MaxPooling2D(pool_size=5, strides=1, padding=\"same\")(x)\n\n x = tf.keras.layers.Concatenate()([spp1, spp2, spp3, x])\n\n x = self.darknetConv(512, 1)(x)\n x = self.darknetConv(1024, 3)(x)\n x = self.darknetConv(512, 1)(x)\n return route_1, route_2, x\n\n return feed\n\n def yoloUpsampleConvBlock(self, filters):\n def feed(x, y):\n x = self.darknetConv(filters, 1)(x)\n x = tf.keras.layers.UpSampling2D()(x)\n y = self.darknetConv(filters, 1)(y)\n x = tf.keras.layers.Concatenate()([y, x])\n\n x = self.darknetConv(filters, 1)(x)\n x = self.darknetConv(2 * filters, 3)(x)\n x = self.darknetConv(filters, 1)(x)\n x = self.darknetConv(2 * filters, 3)(x)\n x = self.darknetConv(filters, 1)(x)\n\n return x\n\n return feed\n\n def yoloDownsampleConvBlock(self, filters):\n def feed(x, y):\n x = self.darknetConv(filters, 3, strides=2)(x)\n x = tf.keras.layers.Concatenate()([x, y])\n\n x = self.darknetConv(filters, 1)(x)\n x = self.darknetConv(2 * filters, 3)(x)\n x = self.darknetConv(filters, 1)(x)\n x = self.darknetConv(2 * filters, 3)(x)\n x = self.darknetConv(filters, 1)(x)\n\n return x\n\n return feed\n\n def yoloBboxConvBlock(self, filters):\n def feed(x):\n x = self.darknetConv(filters, 3)(x)\n x = self.darknetConv(3 * (self.classes_num + 5), 1, activate=False, batch_norm=False)(x)\n\n return x\n\n return feed\n\n def YOLOHead(self):\n def feed(x):\n route_1, route_2, route = x\n x = route_2 = self.yoloUpsampleConvBlock(256)(route, route_2)\n x = route_1 = self.yoloUpsampleConvBlock(128)(x, route_1)\n small_bbox = self.yoloBboxConvBlock(256)(x)\n x = self.yoloDownsampleConvBlock(256)(route_1, route_2)\n medium_bbox = self.yoloBboxConvBlock(512)(x)\n x = self.yoloDownsampleConvBlock(512)(x, route)\n large_bbox = self.yoloBboxConvBlock(1024)(x)\n\n return small_bbox, medium_bbox, large_bbox\n\n return feed\n","repo_name":"NVIDIA/DALI","sub_path":"docs/examples/use_cases/tensorflow/yolov4/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":4689,"dataset":"github-code","pt":"16"} +{"seq_id":"42425623023","text":"#!/usr/bin/python3\n\nimport sys\n\n\ndef safe_function(fct, *args):\n try:\n result_of = fct(*args)\n return result_of\n except Exception as er:\n error_messages = \"Exception: {}\".format(er)\n print(error_messages, file=sys.stderr)\n return None\n","repo_name":"MMahmudd/alx-higher_level_programming","sub_path":"0x05-python-exceptions/101-safe_function.py","file_name":"101-safe_function.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31563297925","text":"import cv2\nimport os\nimport pandas as pd\nimport numpy as np\n\ndef load_images_from_folder(folder):\n images = [] \n labels = []\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder,filename))\n if img is not None:\n images.append(img)\n labels.append(ord(filename[0]) -97)\n return images, labels\n\nfolder=\"testImages\"\nimgs, labels= load_images_from_folder(folder)\ncol = pd.read_csv('/home/alecsoc/Desktop/mygit/EECS504_Project_F20/sign_mnist_test.csv').columns\ndata1 = pd.read_csv('/home/alecsoc/Desktop/mygit/EECS504_Project_F20/sign_mnist_test.csv')\ndata = np.zeros((len(imgs), 28*28+1))\nfor i in range(len(imgs)):\n gray = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2GRAY)\n grayF = gray.reshape((1,28*28))\n data[i,:] = np.insert(grayF,0,labels[i])\nnewData = pd.DataFrame(data,columns = col)\ndata2 = data1.append(newData)\ndata2.to_csv('AlecData.csv',index =False)\n\n\n","repo_name":"AlecS19/EECS504_Project_F20","sub_path":"dataCreation.py","file_name":"dataCreation.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75046008007","text":"#https://www.acmicpc.net/problem/1213\n#Implementation, Greedy\n\nfrom collections import defaultdict \n\ns = list(input())\ns.sort()\n\nd = defaultdict(int)\nfor i in s:\n d[i] += 1\n\nc = []\ncnt = 0\nfor key, val in d.items():\n if val%2 != 0:\n cnt += 1\n c.append(key)\n s.remove(key)\n\n if cnt > 1:\n print(\"I'm Sorry Hansoo\")\n break\nelse:\n l = []\n for i in range(0, len(s), 2):\n l.append(s[i])\n\n result = l+c+l[::-1]\n print(''.join(result))\n\n#Counter 사용#\nfrom collections import Counter \n\ns = list(input())\ns.sort()\n\ncounter = Counter(s)\n\nc = []\ncnt = 0\nfor i in counter:\n if counter[i]%2!=0:\n cnt += 1\n c.append(i)\n s.remove(i)\n\n if cnt > 1:\n print(\"I'm Sorry Hansoo\")\n break\nelse:\n l = []\n for i in range(0, len(s), 2):\n l.append(s[i])\n\n result = l+c+l[::-1]\n print(''.join(result))\n ","repo_name":"JeongHo16/Coding_Test","sub_path":"Problems/python/boj_팰린드롬만들기.py","file_name":"boj_팰린드롬만들기.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44239756330","text":"from dateutil.parser import parse as parse_date\n\nfrom kp_scrapers.lib.parser import may_strip\nfrom kp_scrapers.models.normalize import DataTypes\nfrom kp_scrapers.spiders.bases.mail import MailSpider\nfrom kp_scrapers.spiders.charters import CharterSpider\nfrom kp_scrapers.spiders.charters.banchero_vlcc import normalize\n\n\nclass BancheroCostaVLCCSpider(CharterSpider, MailSpider):\n name = 'BCR_Fixtures_VLCC'\n provider = 'Banchero'\n version = '1.0.1'\n produces = [DataTypes.SpotCharter, DataTypes.Vessel]\n\n spider_settings = {\n # push items on GDrive spreadsheet\n 'KP_DRIVE_ENABLED': True,\n # notify in Slack the document is ready\n 'NOTIFY_ENABLED': True,\n }\n\n def parse_mail(self, mail):\n \"\"\"\n\n Args:\n mail (Mail):\n\n Returns:\n SpotCharter:\n\n \"\"\"\n reported_date = parse_date(mail.envelope['date']).strftime('%d %b %Y')\n start_processing = False if 'wake up' not in mail.envelope['subject'].lower() else True\n\n for tr_sel in self.select_body_html(mail).xpath('//tr'):\n row = [\n may_strip(''.join(td_sel.xpath('.//text()').extract()))\n for td_sel in tr_sel.xpath('.//td')\n ]\n\n if 'med/black sea' in row:\n start_processing = True\n continue\n\n # wake up attachment has unneccesary tables\n if len(row) < 8:\n continue\n\n if start_processing:\n raw_item = {str(idx): row[idx] for idx, r in enumerate(row)}\n\n raw_item.update(provider_name=self.provider, reported_date=reported_date)\n yield normalize.process_item(raw_item)\n","repo_name":"theHausdorffMetric/test","sub_path":"kp_scrapers/spiders/charters/banchero_vlcc/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34273413740","text":"import os\nimport json\nfrom flask import Flask\nimport pytest\nfrom api.routes import config_routes\nfrom api.config import Config\nfrom flask_sqlalchemy import SQLAlchemy\n\n# app = Flask(__name__)\n# @pytest.fixture\n# def client():\n \nroot_url = \"/todo/api/v1.0/\"\n\napp = Flask(__name__)\napp.config.from_object(Config)\ndb = SQLAlchemy(app)\nconfig_routes(app, db)\nclient = app.test_client()\n\ndef test_base_route(): \n url = \"/\"\n response = client.get(url)\n assert b'Hello' in response.get_data()\n assert response.status_code == 200\n\ndef test_tasks_list():\n url = root_url + \"tasks\"\n response = client.get(url)\n assert response.status_code == 200\n # assert b'[]' in response.get_data()\n\n\ndef test_retrieve_task():\n url = root_url + \"tasks/1\"\n response = client.get(url)\n assert response.status_code == 200\n assert b'\"id\": 1' in response.get_data()\n\ndef test_create_task():\n url = root_url + \"tasks\"\n request_data = {\n \"description\": \"hmmm\",\n \"id\": \"4\",\n \"title\": \"WOW AMZING\"\n }\n response = client.post(url,\n data = json.dumps(request_data),\n content_type = 'application/json')\n assert response.status_code == 200\n assert \"Success\" or \"Error\" in response.get_data()\n\ndef test_update_task():\n url = root_url + \"tasks/1\"\n request_data = {\n \"description\": \"hmmm\"\n }\n response = client.put(url,\n data = json.dumps(request_data),\n content_type = 'application/json')\n\n assert response.status_code == 200\n\n# def test_delete_task():\n# url = root_url + \"tasks/1\"\n# response = client.delete(url)\n# # assert response.status_code == 204","repo_name":"usamasubhani/todo-rest","sub_path":"tests/test_routes.py","file_name":"test_routes.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26279126674","text":"# This is the Driver module of the Amazon Textract program.\n# Author: Guangqi Li \n# Import the Utility module\nimport Amazon_Textract_Utility as Utility\n# First check whether the tracking csv exists. If not, create one.\nUtility.tracking_csv()\n# Get the progress of the extraction.\ntracking_list = Utility.track_progress()\n# Call the extraction functions.\nfor j in range(tracking_list[1],tracking_list[0]):\n # If you want to extract forms instead of tables, replace the \"Utility.Amazon_tesseract_tables(j)\" by \"Utility.Amazon_tesseract_forms(j)\"\n Utility.Amazon_tesseract_tables(j)\n","repo_name":"GLiUMN/MergentProcessing","sub_path":"Step_3/Amazon_Textract_Driver.py","file_name":"Amazon_Textract_Driver.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32889901105","text":"def detectLoop(head):\n #code here\n start = head\n forward = head\n while(start and forward and forward.next):\n start = start.next\n forward = forward.next.next\n if start == forward:\n return True\n return False","repo_name":"AgrimNautiyal/Problem-solving","sub_path":"linked_list/loop_present/detect_loop_floyds_circling_algorithm_2_pointer.py","file_name":"detect_loop_floyds_circling_algorithm_2_pointer.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71023930888","text":"import requests\nfrom bs4 import BeautifulSoup\nurl = \"https://stackoverflow.com/questions/tagged/python\"\nr = requests.get(url)\nques_lst = []\nsoup = BeautifulSoup(r.text, 'html.parser')\nques_summary = soup.find_all('div', class_='s-post-summary--content')\nfor summary in ques_summary:\n question = summary.find(class_='s-link').text\n ques_lst.append(question)\nprint(*ques_lst, sep = \"\\n\")\n \n\n\n","repo_name":"vamshisurya/WebScrapping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1017613647","text":"\n\ndef intersection(a, b):\n result = []\n for i in a:\n for j in b:\n if i == j:\n result.append(i)\n return sorted(result)\n\n\ndef reunion(a, b):\n result = []\n for i in a:\n result.append(i)\n for j in b:\n if j not in result:\n result.append(j)\n return sorted(result)\n\n\ndef difference(a, b):\n result = []\n for i in a:\n result.append(i)\n for j in b:\n if j in result:\n result.remove(j)\n return sorted(result)\n\n# def operation(a, b):\n# return (a intersectat cu b, a reunit cu b, a - b, b - a)\n\n\na = [1, 3, 5, 7]\nb = [2, 8, 12, 5, 9]\nprint(difference(b, a))\n","repo_name":"emilianraduu/an3semestru1","sub_path":"Python/Laborator 2/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19678770322","text":"from classes.helpers import LoopHelper\nimport sys\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='''Usage python PATHTO/bundle_check.py ROOTDIR ''')\n parser.add_argument('path', nargs='*', default=[1], help='root dir to check for bundles')\n args = parser.parse_args()\n\n print(\"Bundle check in dir: \" + sys.argv[1])\n LoopHelper.LoopHelper().loop_through(sys.argv[1])\n","repo_name":"yirez/BundleChecker","sub_path":"bundle_check.py","file_name":"bundle_check.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74125382088","text":"import tkinter as tk\n\n# Create a new window and configure it\nwindow = tk.Tk()\nwindow.title(\"Miles to Km Converter\")\nwindow.minsize(width=300, height=200)\nwindow.config(padx=50, pady=50)\n\n# Entry widget for user input\nmiles_entry = tk.Entry(width=10, justify=\"center\")\nmiles_entry.grid(column=1, row=0, padx=10, pady=10)\n\n# Label for miles\nmiles_label = tk.Label(text=\"Miles\")\nmiles_label.grid(column=2, row=0, padx=10, pady=10)\n\n# Label for \"is equal to\" text\nequal_label = tk.Label(text=\"is equal to\")\nequal_label.grid(column=0, row=1, padx=10, pady=10)\n\n# Label for the converted value\nconversion_value = 0.00\nconverted_label = tk.Label(text=f\"{conversion_value}\")\nconverted_label.grid(column=1, row=1, padx=10, pady=10)\n\n# Label for km\nkm_label = tk.Label(text=\"Km\")\nkm_label.grid(column=2, row=1, padx=10, pady=10)\n\n# Function to perform the conversion\ndef calculate():\n conversion_value = round(float(miles_entry.get()) * 1.60934, 2)\n converted_label.config(text=f\"{conversion_value}\")\n\n# Button to trigger the conversion\ncalculate_button = tk.Button(text=\"Calculate\", command=calculate)\ncalculate_button.grid(column=1, row=2, padx=10, pady=10)\n\n# Start the main event loop to handle user interactions with the GUI\nwindow.mainloop()\n","repo_name":"dannychan0510/100-days-of-code","sub_path":"day-27-miles-to-km-converter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39607858781","text":"import cv2\nimport numpy as np\nimport time\n\n\ndef multi_template(image, temp):\n img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n temp_gray = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)\n\n res_ = cv2.matchTemplate(img_gray, temp_gray, cv2.TM_CCOEFF_NORMED)\n\n w, h = temp_gray.shape[:2]\n\n threshold = 0.75\n loc = np.where(res_ >= threshold)\n print(\"# Check ~\")\n\n pixel_ = 50\n\n T_count = 4\n\n th1, th2, cnt = 0, 0, 0\n pt_data = []\n F_i = len(loc[0])-2\n\n for i in range(len(loc[0])-1):\n check1 = abs(loc[0][i] - loc[0][i+1])\n check2 = abs(loc[1][i] - loc[1][i+1])\n\n if check1 > pixel_ and check2 > pixel_:\n th1 = i + 1\n cnt += 1\n # print(\"# \", check1, \" < = > \", th2, \" - \", th1, \" | Current Counts : \", cnt)\n new_pt = tuple([int(np.mean(loc[1][th2:th1])), int(np.mean(loc[0][th2:th1]))])\n pt_data.append(new_pt)\n th2 = th1\n\n if cnt + 1 == T_count or i == F_i:\n new_pt = tuple([int(np.mean(loc[1][th2:])), int(np.mean(loc[0][th2:]))])\n pt_data.append(new_pt)\n # print(\"# \", check1, \" < = > \", th2, \" - End\", \" | Current Counts : \", cnt+1)\n print(\"# Done ~\")\n break\n\n for i in range(len(pt_data)):\n cv2.rectangle(image, pt_data[i], (pt_data[i][0] + w, pt_data[i][1] + h), (0, 0, 255), 2)\n print(\"# Mark Point Info : (\", pt_data[i], \")\")\n\n return image\n\n\ndef resize_img(image_, ratio=1.0):\n h, w = image_.shape[:2]\n image_ = cv2.resize(image_, (int(ratio * w), int(ratio * h)))\n return image_\n\n\nif __name__ == \"__main__\":\n # obj_image1 = cv2.imread(\"D:\\\\temp5.png\")\n obj_image2 = cv2.imread(\"D:\\\\FF_.png\")\n temp_image = cv2.imread(\"D:\\\\AOI_STI\\\\images\\\\MarkPoint\\\\MarkPointTemplate1.png\")\n\n tic = time.time()\n\n # obj_image1 = resize_img(obj_image1, 0.4)\n obj_image2 = resize_img(obj_image2, 0.4)\n temp_image = resize_img(temp_image, 0.4)\n\n # out_image1 = multi_template(obj_image1, temp_image)\n out_image2 = multi_template(obj_image2, temp_image)\n print(\"# Total Spent Times : %.3f ms\" % ((time.time() - tic)*1000))\n\n # out_image1 = resize_img(out_image1)\n out_image2 = resize_img(out_image2)\n # cv2.imshow(\"Out1\", out_image1)\n cv2.imshow(\"Out2\", out_image2)\n # cv2.imwrite(\"D:\\\\out1.png\", out_image1)\n cv2.imwrite(\"D:\\\\out2.png\", out_image2)\n cv2.waitKey()\n","repo_name":"gogo12235LYH/Image-Processing-Base","sub_path":"Muti_Template.py","file_name":"Muti_Template.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37358773994","text":"\nfrom asyncio import FastChildWatcher\nfrom django.shortcuts import HttpResponse\nfrom django.http import JsonResponse\n\nimport json\nimport time\nimport os\n\nfrom HModules import HMySQL, HConfig, HActuator\n\nsql = HMySQL.HSQL('HHOME')\ncam = HActuator.CAM()\n\nMDIR = 'HModules'\nFACES = MDIR + '/baseFaces'\nif not os.path.isdir(FACES): os.mkdir(FACES)\nCONFS = MDIR + '/conf'\nif not os.path.isdir(CONFS): os.mkdir(CONFS)\n\nlightConf = HConfig.CONFIG(CONFS + '/light_conf', CONFS + '/light_conf_reset')\ndhtConf = {\n 'temperature': 24,\n 'humidity': 60,\n 'water_auto': True,\n 'water_state': False,\n 'heat_auto': True,\n 'heat_state': False,\n}\ndhtConf = HConfig.CONFIG(CONFS + '/dht_conf', CONFS + '/dht_conf_reset', dhtConf)\n\ndef index(request):\n return HttpResponse('这不是你该来的地方')\n\n############################## 设置系列 ##############################\n\ndef set_hconfig(change_info: dict) -> bool:\n with open('HModules/thresholds', encoding='utf8') as f: hconfig = json.loads(f.readline())\n for k, v in change_info.items():\n if k in hconfig: hconfig[k] = v\n with open('HModules/thresholds_reset', 'w', encoding='utf8') as f: f.write(json.dumps(hconfig))\n return True\n\ndef set_temperature(request):\n query_data = json.loads(request.body)\n rdata = dict()\n rdata['temperature'] = float(query_data['num'])\n set_hconfig(rdata)\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef set_humidity(request):\n query_data = json.loads(request.body)\n rdata = {'pid': query_data['houseNum']}\n rdata['humidity'] = int(query_data['num'])\n set_hconfig(rdata)\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef set_light(request):\n r\"\"\"\n POST request\n 设置灯光配置,请求数据\n 必须字段:\n houseNum -- 节点 id\n lightId -- 灯光配置 id\n 非必须字段:\n name -- 灯光名\n local -- 灯光安置地点\n light -- 灯光亮度\n color -- 灯光颜色\n state -- 灯光状态 1 - 开,0 - 关\n \"\"\"\n query_data = json.loads(request.body)\n rdata = {'pid': query_data['houseNum']}\n for newConf in query_data['setLights']:\n lightId = str(newConf['lightId'])\n conf = lightConf.get_data([lightId])\n for k in conf:\n if k in newConf:\n conf[k] = newConf[k]\n lightConf.save()\n rdata['config'] = lightConf.get_data()\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef set_water(request):\n query_data = json.loads(request.body)\n rdata = dict()\n rdata['water_auto'] = False\n rdata['water_state'] = bool(query_data['status'])\n set_hconfig(rdata)\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef set_curtain(request):\n query_data = json.loads(request.body)\n rdata = dict()\n rdata['curtain_auto'] = False\n rdata['curtain_state'] = bool(query_data['status'])\n set_hconfig(rdata)\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\n############################## 获取系列 ##############################\n\ndef get_ports(request):\n rdata = {'ports': sql.get_ports()}\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef get_data(request):\n # print(f\"get_data = {request.GET}\")\n r\"\"\"\n GET request\n \"\"\"\n rdata = dict()\n rdata['pid'] = request.GET['houseNum'][0]\n if 'startTime' in request.GET and 'endTime' in request.GET:\n rdata['start_date'] = request.GET['startTime']\n rdata['end_date'] = request.GET['endTime']\n else:\n rdata['start_date'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 24*3600))\n rdata['end_date'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n rdata['data_type'] = 'dht'\n dht_data = sql.get_data(rdata)\n # rdata['data_type'] = 'light'\n # light_data = sql.get_data(rdata)\n # return JsonResponse({'state': 'ok', 'dht_data': dht_data, 'light_data': light_data})\n return JsonResponse({'state': 'ok', 'dht_data': dht_data})\n\ndef get_light_config(request):\n r\"\"\"\n 此函数用于返回灯光的配置数据\n\n Return value / exceptions raised:\n - 返回一个字典\n \"\"\"\n rdata = {'pid': request.GET['houseNum'][0]}\n if not lightConf.data:\n lightFields = ['id', 'name', 'local', 'light', 'color', 'state']\n query_sql = f\"\"\"\n SELECT {', '.join(['`' + f + '`' for f in lightFields])}\n FROM `light_config` WHERE `pid` = {rdata['pid']}\n \"\"\"\n lights = sql.sql_select(lightFields, query_sql)\n for light in lights:\n lid = light['id']\n del light['id']\n lightConf.updata(lid, light)\n # lightConf.save()\n rdata['lights'] = lightConf.get_data()\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef get_dht_config(request):\n r\"\"\"\n GET request\n 此函数用于返回DHT传感器的配置数据\n\n Return value / exceptions raised:\n - 返回一个字典,内容是\n {\n \"temperature\": 27,\n \"humidity\": 44,\n \"curtain_auto\": true,\n \"curtain_state\": true,\n \"water_auto\": false,\n \"water_state\": false,\n }\n \"\"\"\n rdata = dhtConf.get_data()\n rdata['pid'] = request.GET['houseNum'][0]\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef get_masters(request):\n r\"\"\"\n GET request\n 返回已经登陆的人脸\n\n Return value / exceptions raised:\n - 返回一个列表 [{}, {},]\n \"\"\"\n rdata = {'state': 'ok'}\n masters = [cam.get_user_info(userHeadPic[:-4]) for userHeadPic in os.listdir(FACES)]\n rdata['masters'] = masters\n return JsonResponse(rdata)\n\n############################## 添加系列 ##############################\n\ndef add_port(request):\n r\"\"\"\n POST 请求:\n 首先获取请求中的 name 和 local 字段插入添加一个节点\n 之后查询最新的节点信息\n \"\"\"\n qdata = json.loads(request.body)\n query_data = [qdata['portName'], qdata['portLocal']]\n # query_data = ['新', '奥秘客人']\n query_sql = \"INSERT INTO `ports`(`name`, `local`) VALUES(%s, %s)\"\n sql.sql_insert(query_sql, query_data)\n\n query_sql = \"SELECT * FROM `ports` WHERE `id` = (SELECT MAX(`id`) FROM `ports`)\"\n rdata = sql.sql_select(['id', 'name', 'local'], query_sql)[0]\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef add_light(request):\n r\"\"\"\n POST 请求:\n 需要获取的字段:\n pid -- 节点 id【必须\n name -- 灯光名称备注【非必须\n local -- 灯光布置地点备注【非必须\n \"\"\"\n qdata = json.loads(request.body)\n # qdata = {'pid':1 ,'name': '卧室主灯', 'local': '卧室'}\n insert_data = dict()\n for f in ['pid', 'name', 'local']:\n if qdata.get(f, None):\n insert_data[f] = qdata[f]\n query_sql = f\"\"\"\n INSERT INTO `light_config`({', '.join(['`' + f + '`' for f in insert_data.keys()])})\n VALUES({', '.join(['%s']*len(insert_data.keys()))})\n \"\"\"\n sql.sql_insert(query_sql, list(insert_data.values()))\n\n query_sql = \"SELECT * FROM `light_config` WHERE `id` = (SELECT MAX(`id`) FROM `light_config`)\"\n rdata = sql.sql_select(['id', 'pid', 'name', 'local', 'light', 'color', 'state'], query_sql)[0]\n rdata['state'] = 'ok'\n return JsonResponse(rdata)\n\ndef add_master(request):\n r\"\"\"\n POST 注册新的房屋主人\n 需要的字段:\n facePic -- 人脸的照片的 base64 编码字符串\n name -- 需要注册的照片的 id / 名字\n \"\"\"\n qdata = json.loads(request.body)\n with open(f\"{FACES}/{qdata['name']}.jpg\", 'wb') as f:\n f.write(base64.b64decode(qdata['facePic']))\n cam.add_user(qdata['name'])\n rdata = cam.user_info(qdata['name'])\n return JsonResponse(rdata)\n\n","repo_name":"Hybrogen/HHOME","sub_path":"HHOME/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"46109803790","text":"# 1 ~ 10 사이의 어떤 수로도 나누어 떨어지는 가장 작은 수는 2520입니다.\n# 그러면 1 ~ 20 사이의 어떤 수로도 나누어 떨어지는 가장 작은 수는 얼마입니까?\n\n#방법 1 : 오래 걸림\n# value = 1\n# check = True\n# while check:\n# for i in range(1,21):\n# print()\n# if value % i == 0 :\n# continue\n# else :\n# value = value+1\n# break\n\n# print(\"kk\",value)\n# else:\n# print(value)\n# break\n\n\n# 방법2: 수학이론을 적용한 더 빠르고 좋은 방법 #김웅규 t\n\ndef gcd(a,b):\n while (b!=0):\n r=a%b\n a,b=b,r\n return a\ndef lcm(a,b):\n return a*b/gcd(a,b)\n\nn=20\nc=lcm(1,2)\nfor i in range(3,n+1):\n c=lcm(c,i)\nprint(c)\n\n\n#232792560.0","repo_name":"xzeromath/euler","sub_path":"ep5.py","file_name":"ep5.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37011572814","text":"from django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n path('', inicio, name=\"inicio\"),\n\n path('jugadorFormulario/', jugadorFormulario, name=\"jugadorFormulario\"),\n path('equipoFormulario/', equipoFormulario, name=\"equipoFormulario\"),\n path('estadioFormulario/', estadioFormulario, name=\"estadioFormulario\"),\n\n path('busquedaJugador/', busquedaJugador, name=\"busquedaJugador\"),\n path('buscar/', buscar, name=\"buscar\"),\n\n path('leerJugadores/', leerJugadores, name=\"leerJugadores\"),\n path('eliminarJugador/', eliminarJugador, name=\"eliminarJugador\"),\n path('editarJugador/', editarJugador, name=\"editarJugador\"),\n\n path('leerEquipos/', leerEquipos, name=\"leerEquipos\"),\n path('eliminarEquipo/', eliminarEquipo, name=\"eliminarEquipo\"),\n path('editarEquipo/', editarEquipo, name=\"editarEquipo\"),\n\n path('leerEstadios/', leerEstadios, name=\"leerEstadios\"),\n path('eliminarEstadio/', eliminarEstadio, name=\"eliminarEstadio\"),\n path('editarEstadio/', editarEstadio, name=\"editarEstadio\"),\n \n\n]","repo_name":"fedebaldasso/Entrega1-Baldasso_Federico","sub_path":"AppEI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11188336868","text":"import math as mt\n\nfrom Scene.class_scene_Gl import *\n\n\nclass TestGlScene(SceneThird):\n debug = False\n\n ambient = (1.0, 1.0, 1.0, 1) # Первые три числа цвет в формате RGB, а последнее - яркость\n lightpos = (1.0, 1.0, 1.0) # Положение источника освещения по осям xyz\n\n rquad = 2.0\n speed = 0.1\n wireframe = False\n\n cur_x = 0.\n cur_y = 0.\n\n def gl_mouse_motion(self, x, y):\n self.cur_x = x\n self.cur_y = y\n self.print('callback gl_mouse_motion in point (%d, %d)' % (x, y))\n if not self.left_button_down:\n return\n\n pass\n\n def gl_mouse_motion_passive(self, x, y):\n self.cur_x = x\n self.cur_y = y\n\n return super().gl_mouse_motion_passive(x, y)\n\n def init(self):\n if self.wireframe:\n glPolygonMode(GL_FRONT, GL_LINE)\n glPolygonMode(GL_BACK, GL_LINE)\n elif not self.wireframe:\n glPolygonMode(GL_FRONT, GL_FILL)\n glPolygonMode(GL_BACK, GL_FILL)\n\n # glLoadIdentity()\n # glTranslatef(0.0, 0.0, -5.0)\n\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, self.ambient) # Определяем текущую модель освещения\n glEnable(GL_LIGHTING) # Включаем освещение\n glEnable(GL_LIGHT0) # Включаем один источник света\n glLightfv(GL_LIGHT0, GL_POSITION, self.lightpos) # Определяем положение источника света\n\n glutSetCursor(GLUT_CURSOR_NONE)\n\n self.gen_draw()\n\n def gl_key_pressed(self, *args):\n # todo super().gl_key_pressed(args)\n if args[0] == b\"c\":\n self.is_projection_ortho = not self.is_projection_ortho\n if args[0] == b\"\\x1b\":\n glutLeaveMainLoop()\n exit()\n if args[0] == b\"x\":\n if not self.wireframe:\n glPolygonMode(GL_FRONT, GL_LINE)\n glPolygonMode(GL_BACK, GL_LINE)\n else:\n glPolygonMode(GL_FRONT, GL_FILL)\n glPolygonMode(GL_BACK, GL_FILL)\n self.wireframe = not self.wireframe\n elif args[0] == b\"v\":\n self.rquad = 2\n\n if args[0] == b\"w\":\n self.rotate_up()\n if args[0] == b\"s\":\n self.rotate_down()\n if args[0] == b\"a\":\n self.rotate_left()\n if args[0] == b\"d\":\n self.rotate_right()\n\n def point_pos(self, cnt, r):\n # type: (int, int) -> TestGlScene\n # points = [(self.width / 2 + r * mt.cos(a), self.height / 2 + r * mt.sin(a)) for a in [2 * mt.pi * i / cnt for i in range(cnt)]]\n points = [(r * mt.cos(a), r * mt.sin(a)) for a in [2 * mt.pi * i / cnt for i in range(cnt)]]\n\n glPointSize(7.0)\n self.set_pixels(points, self.get_color(150, 0, 0))\n '''\n for pt in points:\n self.setpixel(pt[0], pt[1], self.getcolor(1.0, 0.0, 0.0))\n '''\n\n glLineWidth(0.1)\n for pt1 in points:\n for pt2 in points:\n '''\n c_x1, c_y1 = self.get_xy_scene(pt1[0], pt1[1])\n c_x2, c_y2 = self.get_xy_scene(pt2[0], pt2[1])\n self.line(c_x1, c_y1, c_x2, c_y2, self.getcolor(0.0, 1.0, 0.0))\n '''\n self.line(pt1[0], pt1[1], pt2[0], pt2[1], self.get_color(0, 150, 0))\n\n return self\n\n the_img = None\n\n def gen_draw(self):\n self.the_img = glGenLists(1)\n glNewList(self.the_img, GL_COMPILE)\n self.draw_obj()\n glEndList()\n\n def redraw(self):\n # type: () -> TestGlScene\n\n glCallList(self.the_img)\n\n glPushMatrix()\n koef = 1\n c_x, c_y = self.get_xy_scene(self.cur_x, self.cur_y)\n glTranslatef(c_x * koef / self.nSca, c_y * koef / self.nSca, 0.)\n # glutSolidCube(0.05)\n glutSolidSphere(0.01 / self.nSca, 20, 20)\n\n color = [1, 0.2, 0.5, 1.]\n glMaterialfv(GL_FRONT, GL_DIFFUSE, color)\n glDisable(GL_LIGHTING)\n glPopMatrix()\n\n glPushMatrix()\n self.line(0, 0, self.cur_x - self.width / 2, self.cur_y - self.height / 2, self.get_color(0, 0, 255))\n glPopMatrix()\n\n return super().redraw()\n\n def draw_obj(self):\n # type: () -> TestGlScene\n\n lol = 45\n\n '''\n glPushMatrix()\n glTranslatef(*self.lightpos)\n glutSolidSphere(0.05, 20, 20)\n glPopMatrix()\n \n glPushMatrix()\n glPointSize(17.0)\n glBegin(GL_POINTS)\n glColor3d(*self.ambient)\n glVertex3f(*self.lightpos)\n glEnd()\n glPopMatrix()\n '''\n glPushMatrix()\n\n self.lines()\n\n for x, y in [[-self.width / 2, -self.height / 2], [-self.width / 2, self.height / 2],\n [self.width / 2, self.height / 2], [self.width / 2, -self.height / 2]]:\n '''\n c_x, c_y = self.get_xy_scene(x + self.width / 2, y + self.height / 2)\n self.line(0, 0, c_x, c_y, self.getcolor(255, 0, 0))\n '''\n self.line(0, 0, x, y, self.get_color(255, 128, 0))\n\n self.point_pos(lol, self.height / 2)\n glEnable(GL_LIGHTING)\n glPopMatrix()\n\n return self\n\n\nt = TestGlScene(640, 480)\nt.draw()\n","repo_name":"thenzen34/Scene","sub_path":"examples/test_gl_scene3.py","file_name":"test_gl_scene3.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9577107479","text":"\"\"\"my_controller controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\nfrom turtle import left\nfrom controller import Robot\nfrom PyCTRNN import CTRNN\nimport numpy\n# create the Robot instance.\nrobot = Robot()\n\n# get the time step of the current world.\ntimestep = int(robot.getBasicTimeStep())\n\n\nbrain = CTRNN(4)\n\nds1 = robot.getDevice(\"ds1\")\nds2 = robot.getDevice(\"ds2\")\nds1.enable(timestep)\nds2.enable(timestep)\nleftMotor = robot.getDevice(\"leftMotor\")\nrightMotor = robot.getDevice(\"rightMotor\")\nleftMotor.setPosition(float(\"inf\"))\nrightMotor.setPosition(float(\"inf\"))\n\n# brain.weights = numpy.zeros((brain.size,brain.size))\n# brain.bias = numpy.zeros(brain.size)\n\n# brain.weights[2,0] = 4\n# brain.weights[3,1] = 4\n# brain.weights[2,1] = -4\n# brain.weights[3,0] = -4\n\nprint(brain.weights)\n\nmutateTimer=0\n\nwhile robot.step(timestep) != -1:\n\n if(mutateTimer > 100000):\n brain.mutate(0.5)\n mutateTimer = 0\n print(brain.weights)\n else:\n mutateTimer += timestep\n sensorInput = numpy.array([ds1.getValue(),ds2.getValue(),0,0])\n\n brainOutput = brain.step(sensorInput)\n \n \n leftMotor.setVelocity(5*brainOutput[2])\n rightMotor.setVelocity(5*brainOutput[3])\n #print(brainOutput)\n \n\n# Enter here exit cleanup code.\n","repo_name":"BhargavaGowda/WebotsCTRNNVisual","sub_path":"trial/controllers/my_controller/my_controller.py","file_name":"my_controller.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71430310407","text":"import socket\nimport time\n\ndns_ip = \"127.0.0.1\"\ndns_port = 53\n# Dictionary to store IP address mappings\ndns_cache = {\"www.doryevhttp.com\": \"127.0.0.1\"}\n\n\ndef check_internet():\n try:\n socket.getaddrinfo(\"www.google.com\", None)\n return True\n except socket.gaierror:\n return False\n\n\ndef dns_reply():\n domain, client_ip = sock.recvfrom(1024)\n domain = domain.decode('utf-8')\n if domain in dns_cache:\n ip_address = dns_cache[domain]\n print(f\"{domain} : {dns_cache[domain]}\")\n if check_internet():\n sock.sendto(ip_address.encode('utf-8'), client_ip)\n print(\"IP SENT\")\n print(f\"Domain: {domain} , IP : {ip_address}\")\n\n else:\n if check_internet():\n try:\n ip_address = socket.gethostbyname(domain)\n dns_cache[domain] = ip_address\n sock.sendto(ip_address.encode('utf-8'), client_ip)\n print(\"IP SENT\")\n print(f\"Domain: {domain} , IP : {ip_address}\")\n\n except socket.gaierror:\n print(\"Could not resolve domain / no internet connection\")\n ip_address = \"Couldn't Resolve\"\n sock.sendto(ip_address.encode('utf-8'), client_ip)\n\n\nif __name__ == '__main__':\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((dns_ip, 53))\n print(\"DNS SERVER IS ON...\")\n # Start sniffing DNS queries on port 53 on the specified interface\n while True:\n dns_reply()\n time.sleep(1)\n","repo_name":"yevgenyivanov/Networking_Proj","sub_path":"dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1212044163","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 23 11:13:14 2020\r\n\r\n@author: 766810\r\n\"\"\"\r\nelements = \"H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn Fr Ra Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No Lr Rf Db Sg Bh Hs Mt Ds Rg Cn Uut Fl Uup Lv Uus Uuo\".split()\r\n\r\nimport PyPDF2\r\ni=elements.index\r\ndef m(w,p=(\"\",[])):\r\n if not w:return p\r\n x,y,z=w[0],w[:2],w[:3]\r\n if x!=y and y in elements:\r\n a=m(w[2:],(p[0]+y,p[1]+[i(y)]))\r\n if a:return a\r\n if x in elements:\r\n b=m(w[1:],(p[0]+x,p[1]+[i(x)]))\r\n if b:return b\r\n if z in elements:\r\n c=m(w[3:],(p[0]+z,p[1]+[i(z)]))\r\n if c:return c\r\n\r\nf=open('/Users/766810/python/largedictionary.pdf','rb')\r\n# creating a pdf reader object \r\npdfReader = PyPDF2.PdfFileReader(f) \r\nfor l in f:\r\n x=m(l[:-1])\r\n if x:print(x[0],x[1])\r\nf.close()","repo_name":"mcvenkat/Python-Programs","sub_path":"Longest Word from Periodic Table 2.py","file_name":"Longest Word from Periodic Table 2.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7986260397","text":"import sys\nimport json\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\n\ndef get_s3_object_source_path():\n args = getResolvedOptions(sys.argv, ['JOB_NAME', 's3_trigger_event'])\n s3_event = json.loads(args['s3_trigger_event'])\n bucket = s3_event['s3']['bucket']['name']\n key = s3_event['s3']['object']['key']\n s3_file_location = f's3://{bucket}/{key}'\n\n return s3_file_location\n\ndef get_s3_object_destination_path():\n output_filename = \"custom_output_file.json\"\n return get_s3_object_source_path() + \"/\" + output_filename\n\ndef main():\n sc = SparkContext()\n glueContext = GlueContext(sc)\n\n dynamicFrame = glueContext.create_dynamic_frame.from_options(\n format_options={\"rowTag\": \"Categorias\"},\n connection_type=\"s3\",\n format=\"xml\",\n connection_options={\"paths\": [get_s3_object_source_path()]},\n transformation_ctx=\"dynamicFrame\",\n )\n\n dynamicFrame = dynamicFrame.repartition(1)\n\n glueContext.write_dynamic_frame.from_options(\n frame=dynamicFrame,\n connection_type=\"s3\",\n connection_options={\"path\": get_s3_object_destination_path(), \"partitionKeys\": []},\n format=\"json\"\n )\n\nmain()\n","repo_name":"bryannbarbosa/aws-glue-s3-python","sub_path":"glue.py","file_name":"glue.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33166032394","text":"import json\nfrom machine_settings import _MachineConfig\nimport os.path as os_path\n\n\nENCODING = 'utf8'\n\n\ndef get_config():\n config = Config()\n return config\n\n\nclass _ConfigBase:\n def __init__(self, parent):\n self._parent = parent\n machine_config = _MachineConfig()\n self._initialize(machine_config)\n\n def _initialize(self, machine_config):\n pass\n\n def __str__(self):\n dict = {}\n self._toJson(dict, self)\n return json.dumps(dict, indent=4)\n \n @classmethod\n def _toJson(cls, parent, obj):\n for attribute_name in dir(obj): \n if not attribute_name.startswith('_'):\n attribute = getattr(obj, attribute_name)\n if isinstance(attribute, _ConfigBase):\n child = {}\n parent[attribute_name] = child\n cls._toJson(child, attribute)\n else:\n parent[attribute_name] = attribute \n \n\nclass _CheckpointConfig(_ConfigBase):\n def _initialize(self, _):\n \n self.enabled = True\n self.weights_only = True\n self.dir = 'checkpoints'\n self.filename = 'best_model.hdf5'\n\n\nclass _CrossValidationConfig(_ConfigBase):\n def _initialize(self, machine_config):\n self.train_set_ids_path = os_path.join(machine_config.data_dir, 'preprocessed/cross-validation/group_1_train_set_1809-2018.txt')\n self.dev_set_ids_path = os_path.join(machine_config.data_dir, 'preprocessed/cross-validation/group_1_target_dev_set_2018-2018.txt')\n self.test_set_ids_path = os_path.join(machine_config.data_dir, 'preprocessed/cross-validation/group_1_reporting_test_set_2018-2018.txt')\n self.encoding = ENCODING\n self.train_limit = machine_config.train_limit\n self.dev_limit = machine_config.dev_limit\n self.test_limit = machine_config.test_limit\n\n\nclass _CsvLoggerConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.dir = 'logs'\n self.filename = 'logs.csv'\n self.best_epoch_filename = 'best_epoch_logs.txt'\n self.encoding = ENCODING\n\n\nclass _DatabaseConfig(_ConfigBase):\n def _initialize(self, machine_config):\n\n self.config = { 'user': '****',\n 'database': '****',\n 'password': '****', \n 'host': machine_config.database_host,\n 'charset': 'utf8mb4', \n 'collation': 'utf8mb4_unicode_ci', \n 'use_unicode': True }\n\n\nclass _EarlyStoppingConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.min_delta = 0.001\n self.patience = 2\n\n\nclass _ModelConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.checkpoint = _CheckpointConfig(self)\n\n self.word_embedding_size = 300\n self.word_embedding_dropout_rate = 0.25\n \n self.conv_act = 'relu'\n self.num_conv_filter_sizes = 3\n self.min_conv_filter_size = 2\n self.conv_filter_size_step = 3\n self.total_conv_filters = 350\n self.num_pool_regions = 5\n\n self.num_journals = 30347\n self.journal_embedding_size = 50\n\n self.num_hidden_layers = 1\n self.hidden_layer_size = 3365\n self.hidden_layer_act = 'relu'\n self.inputs_dropout_rate = 0.0\n self.dropout_rate = 0.5\n\n self.output_layer_act = 'sigmoid'\n self.output_layer_size = self._pp_config.num_labels \n \n self.init_threshold = 0.5\n self.init_learning_rate = 0.001\n\n @property\n def hidden_layer_sizes(self):\n return [self.hidden_layer_size]*self.num_hidden_layers\n\n @property\n def conv_filter_sizes(self):\n sizes = [self.min_conv_filter_size + self.conv_filter_size_step*idx for idx in range(self.num_conv_filter_sizes)]\n return sizes\n\n @property\n def conv_num_filters(self):\n num_filters = round(self.total_conv_filters / len(self.conv_filter_sizes))\n return num_filters\n\n @property\n def _pp_config(self):\n return self._parent.inputs.preprocessing\n\n @property\n def vocab_size(self):\n return self._pp_config.vocab_size\n\n @property\n def title_max_words(self):\n return self._pp_config.title_max_words\n\n @property\n def abstract_max_words(self):\n return self._pp_config.abstract_max_words\n\n @property\n def num_year_completed_time_periods(self):\n return self._pp_config.num_year_completed_time_periods\n\n @property\n def num_pub_year_time_periods(self):\n return self._pp_config.num_pub_year_time_periods\n\n\nclass _PreprocessingConfig(_ConfigBase):\n def _initialize(self, machine_config):\n\n self.word_index_lookup_path = os_path.join(machine_config.data_dir, 'preprocessed/vocab/cross_val_group_1_word_index_lookup.pkl') # indices start from 2\n self.unknown_index = 1\n self.padding_index = 0\n self.title_max_words = 64\n self.abstract_max_words = 448\n self.num_labels = 1 \n self.vocab_size = 400000\n self.min_year_completed= 1965 \n self.max_year_completed = 2018 \n self.num_year_completed_time_periods = 1 + self.max_year_completed - self.min_year_completed\n self.min_pub_year = 1809 \n self.max_pub_year = 2018 \n self.num_pub_year_time_periods = 1 + self.max_pub_year - self.min_pub_year\n\n \nclass _ProcessingConfig(_ConfigBase):\n def _initialize(self, machine_config):\n \n self.run_on_cpu = machine_config.run_on_cpu \n self.use_multiprocessing = machine_config.use_multiprocessing \n self.workers = machine_config.workers \n self.max_queue_size = machine_config.max_queue_size\n\n\nclass _ReduceLearningRateConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.factor = 0.33\n self.patience = 1\n self.min_delta = 0.001\n\n\nclass _RestoreConfig(_ConfigBase):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n \n self.sub_dir = '****'\n self.model_json_filename = 'model.json'\n self.encoding = ENCODING\n self.model_checkpoint_dir = 'checkpoints'\n self.model_checkpoint_filename = 'best_model.hdf5'\n self.weights_only_checkpoint = True\n self.threshold = 0.5\n self.learning_rate = 0.001\n \n\nclass _ResumeConfig(_RestoreConfig):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n\n self.enabled = False\n self.resume_checkpoint_filename = 'best_model_resume.hdf5'\n self.resume_logger_filename = 'logs_resume.csv'\n\n\nclass _SaveConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.settings_filename = 'settings.json'\n self.model_json_filename = 'model.json'\n self.encoding = ENCODING\n self.model_img_filename = 'model.png'\n\n\nclass _TensorboardConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.enabled = False\n self.dir = 'logs'\n self.write_graph = True\n\n\nclass _EvaluateConfig(_RestoreConfig, _ProcessingConfig):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n\n self.results_filename = 'eval-result.txt'\n self.encoding = ENCODING\n self.batch_size = 128\n self.limit = 1000000000\n\n\nclass _PredictConfig(_RestoreConfig, _ProcessingConfig):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n\n self.pmids_filepath = '../datasets/pipeline_validation_set.json'\n self.results_dir = 'predictions_val'\n self.results_filename = 'predictions.csv'\n self.dereferenced_filename = 'dereferenced_predictions.csv'\n self.metrics_filename_template = 'metrics{}.csv'\n self.journal_groups_filepath = os_path.join(machine_config.data_dir, 'preprocessed/selective-indexing/selectively_indexed_journal_groups.csv') \n self.encoding = ENCODING\n self.delimiter = ','\n self.batch_size = 128\n self.limit = 1000000000\n \n \nclass _InputsConfig(_ConfigBase):\n def _initialize(self, _):\n\n self.preprocessing = _PreprocessingConfig(self)\n\n\nclass _OptimizeFscoreThresholdConfig(_ProcessingConfig):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n \n self.enabled = True\n self.batch_size = 128\n self.limit = 1000000000\n self.metric_name = 'fscore'\n self.alpha = 0.005\n self.k = 3\n\n\nclass _TrainingConfig(_ProcessingConfig):\n def _initialize(self, machine_config):\n super()._initialize(machine_config)\n\n self.batch_size = 128\n self.initial_epoch = 0\n self.max_epochs = 500\n self.train_limit = 1000000000\n self.dev_limit = 1000000000\n self.monitor_metric = 'val_fscore'\n self.monitor_mode = 'max'\n self.save_config = _SaveConfig(self)\n self.optimize_fscore_threshold = _OptimizeFscoreThresholdConfig(self)\n self.reduce_learning_rate = _ReduceLearningRateConfig(self)\n self.early_stopping = _EarlyStoppingConfig(self)\n self.tensorboard = _TensorboardConfig(self)\n self.csv_logger = _CsvLoggerConfig(self)\n self.resume = _ResumeConfig(self)\n\n\nclass Config(_ConfigBase):\n def __init__(self):\n super().__init__(self)\n\n def _initialize(self, machine_config):\n\n self.root_dir = machine_config.runs_dir\n self.data_dir = machine_config.data_dir\n self.inputs = _InputsConfig(self)\n self.model = _ModelConfig(self)\n self.cross_val = _CrossValidationConfig(self)\n self.train = _TrainingConfig(self)\n self.eval = _EvaluateConfig(self)\n self.pred = _PredictConfig(self)\n self.database = _DatabaseConfig(self)","repo_name":"indexing-initiative/selective_indexing","sub_path":"cnn/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"512262085","text":"\"\"\"\nUse these functions to seed(insert) fake data to database\nRunning this script is similar to executing the same code\nas in the script in the shell console.\n\"\"\"\nfrom datetime import date\nfrom faker import Faker\nimport random\n\nfrom typing import Optional\nfrom seed_data_to_data_base.django_connection import create_con\nfrom django.db.models import Q\n\nfrom app.models import Employees, Positions\n\n# create connection from this script to Django project\ncreate_con()\n\n# Let`s localise Faker lib\nfake = Faker('ru_RU')\n\n# You can use this if everything is broken\n# Employees.fix_tree()\n\n# This method delete all data from model\n# Employees.objects.all().delete()\n\n\ndef get_first_name() -> str:\n \"\"\"\n :return: String with random first name.\n \"\"\"\n first_name_ = fake.first_name()\n return first_name_\n\n\ndef get_last_name() -> str:\n \"\"\"\n :return: String with random last name.\n \"\"\"\n last_name_ = fake.last_name()\n return last_name_\n\n\ndef get_random_day() -> date:\n \"\"\"\n :return: random day.\n \"\"\"\n random_day_ = fake.date_this_year()\n return random_day_\n\n\ndef get_random_salary() -> float:\n \"\"\"\n :return: Returns a random float number up to 2 decimal places == random salary\n \"\"\"\n random_salary_ = round(random.uniform(80000, 600000), 2)\n\n return random_salary_\n\n\ndef get_position(name: str) -> Positions:\n \"\"\"\n Function return an instance of a class Positions\n :param name: Name of position\n :return: Function return an instance of a class Positions\n \"\"\"\n position_ = Positions.objects.get(position_name=name)\n return position_\n\n\ndef get_employee(employee_id: Optional[int] = None, position_id: Optional[int] = None) -> Employees:\n \"\"\"\n Function return an instance of a class Employees\n :param employee_id: id of employee\n :param position_id: position_id of employee\n :return: Function return an instance of a class Employees\n \"\"\"\n if employee_id:\n employee_ = Employees.objects.get(id=employee_id)\n return employee_\n\n employee_ = Employees.objects.get(position_id=position_id)\n return employee_\n\n\ndef created_seo() -> None:\n \"\"\"\n This function creates SEO company == The initial vertex of the graph\n :return: None\n \"\"\"\n # The position for which the employee is being created\n position = get_position('Генеральный директор')\n # Created random field\n day = get_random_day()\n salary = get_random_salary()\n first_name = get_first_name()\n last_name = get_last_name()\n\n seo = Employees.add_root(first_name=first_name,\n last_name=last_name,\n position=position,\n date_employment=day,\n salary=salary)\n seo.refresh_from_db()\n print('User SEO creation completed successfully')\n\n\ndef created_subordinate(chief_id: int, position_name: str) -> None:\n \"\"\"\n This function create subordinate of the specified chief (chief_position)\n :param position_name: Name of the position for which the employee is being created\n :param chief_id: Еhe id of the chief of the employee(subordinate) being created\n :return: None\n \"\"\"\n # Get object chief\n chief = Employees.objects.get(id=chief_id)\n # Get position for which the employee is being created\n position = get_position(position_name)\n # Created random field\n day = get_random_day()\n salary = get_random_salary()\n first_name = get_first_name()\n last_name = get_last_name()\n # Let`s create employee\n created_employee = chief.add_child(first_name=first_name,\n last_name=last_name,\n position=position,\n date_employment=day,\n salary=salary)\n\n created_employee.refresh_from_db()\n print(f'Subordinate of the chief_id =={chief_id} creation completed successfully, {created_employee.id}')","repo_name":"cheremyha/org_str","sub_path":"seed_data_to_data_base/functions_to_seed_data.py","file_name":"functions_to_seed_data.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8523114366","text":"import csv\nimport io\nimport tempfile\nimport unittest\nfrom src.advisor.reports.csv_issue_type_count_by_file_report import CsvIssueTypeCountByFileReport\nfrom src.advisor.reports.issues.issue_type_config import IssueTypeConfig\nfrom src.advisor.scanners.config_guess_scanner import ConfigGuessScanner\nfrom src.advisor.scanners.source_scanner import SourceScanner\n\n\nclass TestCsvIssueTypeCountByFileReport(unittest.TestCase):\n def test_output(self):\n config_guess_scanner = ConfigGuessScanner()\n source_scanner = SourceScanner()\n\n issue_type_config = IssueTypeConfig()\n report = CsvIssueTypeCountByFileReport('/root', issue_type_config=issue_type_config)\n report.add_source_file('test_negative.c')\n io_object = io.StringIO('__asm__(\"mov r0, r1\")')\n source_scanner.scan_file_object(\n 'test_negative.c', io_object, report)\n report.add_source_file('test_neutral.c')\n io_object = io.StringIO('#pragma simd foo')\n source_scanner.scan_file_object(\n 'test_neutral.c', io_object, report)\n report.add_source_file('config.guess')\n io_object = io.StringIO('aarch64:Linux')\n config_guess_scanner.scan_file_object(\n 'config.guess', io_object, report)\n report.add_source_file('test_nothing.c')\n io_object = io.StringIO('foobar')\n source_scanner.scan_file_object(\n 'test_nothing.c', io_object, report)\n self.assertEqual(len(report.issues), 2)\n self.assertEqual(len(report.remarks), 1)\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as ofp:\n report.write(ofp)\n fname = ofp.name\n ofp.close()\n\n with open(fname) as ifp:\n csv_reader = csv.DictReader(ifp)\n seen_negative = False\n seen_neutral = False\n seen_config_guess = False\n seen_nothing = False\n for row in csv_reader:\n if 'test_negative.c' in row['filename']:\n seen_negative = True\n for (field, actual) in row.items():\n if field == 'filename':\n continue\n expected = '1' if field == 'InlineAsm' else '0'\n self.assertEqual(expected, actual)\n elif 'test_neutral.c' in row['filename']:\n seen_neutral = True\n for (field, actual) in row.items():\n if field == 'filename':\n continue\n expected = '1' if field == 'PragmaSimd' else '0'\n self.assertEqual(expected, actual)\n elif 'config.guess' in row['filename']:\n seen_config_guess = True\n for (field, actual) in row.items():\n if field == 'filename':\n continue\n self.assertEqual('0', actual)\n elif 'test_nothing.c' in row['filename']:\n seen_nothing = True\n for (field, actual) in row.items():\n if field == 'filename':\n continue\n self.assertEqual('0', actual)\n else:\n print(row)\n self.fail('Unexpected row in CSV output')\n self.assertTrue(seen_negative)\n self.assertTrue(seen_neutral)\n self.assertTrue(seen_config_guess)\n self.assertTrue(seen_nothing)\n\n","repo_name":"aws/porting-advisor-for-graviton","sub_path":"unittest/test_csv_issue_type_count_by_file_report.py","file_name":"test_csv_issue_type_count_by_file_report.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"} +{"seq_id":"12618445780","text":"\"\"\"\nThe prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143 ?\n\"\"\"\nfrom math import sqrt\n\n\ndef compute_factors(num):\n # limit = int(sqrt(num))\n limit = num // 2\n factors = []\n for d in range(2, limit):\n if num % d == 0:\n factors.append(d)\n print(len(factors))\n \n return factors\n\n\ndef solve_1(num):\n # Get the list of factors\n factors = compute_factors(num)\n # print(factors)\n\n # Get the last prime from list\n i = len(factors) - 1\n while i >= 0:\n prime = True\n for j in range(i):\n if factors[i] % factors[j] == 0:\n # print(factors[i], factors[j])\n prime = False\n break\n if prime:\n return factors[i]\n i = i - 1\n\n\ndef prime_factors(n):\n factors = []\n \n # Print the number of two's that divide n\n flag = False\n while n % 2 == 0:\n flag = True\n n = n / 2\n if flag:\n factors.append(2)\n\n for i in range(3, int(sqrt(n))+1, 2):\n flag = False\n # while i divides n , print i ad divide n\n while n % i == 0:\n flag = True\n n = n / i\n if flag:\n factors.append(i)\n\n if n > 2:\n factors.append(int(n))\n\n return factors\n\n\ndef solve_2(num):\n factors = prime_factors(num)\n print(factors)\n\n factors.sort()\n\n return factors[-1]\n\n\ndef main():\n num = int(input())\n print(solve_2(num))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"romitheguru/ProjectEuler","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28328056534","text":"import pygame\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((500,500))\r\npygame.display.set_caption('Designs')\r\n\r\nx,y = 100,100\r\nwidth, height = 10,10\r\nspeed = 10\r\n\r\nwhile True:\r\n pygame.time.delay(10)\r\n for events in pygame.event.get():\r\n if events.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n \r\n key = pygame.key.get_pressed()\r\n\r\n if key[pygame.K_UP] and y>0:\r\n y -= speed\r\n if key[pygame.K_DOWN] and y<500-height:\r\n y += speed\r\n if key[pygame.K_LEFT] and x>0:\r\n x -= speed\r\n if key[pygame.K_RIGHT] and x<500 -width:\r\n x += speed\r\n \r\n pygame.draw.rect(screen, 'white', (x,y, width, height))\r\n pygame.display.update()\r\n \r\n","repo_name":"pickry/programmingknowledge","sub_path":"design.py","file_name":"design.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"32667268729","text":"\nf = open('a.php','w')\nfrom urllib.request import urlopen\nimport urllib3.request\nfrom dataclasses import replace\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nkeyword =sys.argv[1]\nurlA = 'https://www.amazon.in/s?k='+keyword+''\nurlF = 'https://www.flipkart.com/search?q='+keyword+''\nurlR = 'https://www.reliancedigital.in/search?q='+keyword+''\nurlH = 'https://www.happimobiles.com/mobiles/all?serach=&q='+keyword+''\nurlL = 'https://www.lotmobiles.com/catalogsearch/result/?q='+keyword+''\nurlP = 'https://www.paiinternational.in/SearchResults.aspx?search='+keyword+''\nurlB = 'https://www.bajajelectronics.com/product/search?q='+keyword+''\n\nprices = {}\ndef scrape(url):\n if url == urlF:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemF = soup.find_all('div', class_='_4rR01T')\n costF = soup.find_all('div', class_='_30jeq3 _1_WHN1')\n #print(itemF[0].text + \" \" + costF[0].text)\n costF = costF[0].text[1:]\n prices[\"Flipkart\"] = costF\n fp = \"\\nData is Retrieved Successfully!!\\n\"\n fp1 = fp.replace('\\n','
')\n print(fp1)\n fp2 = ''\n print (fp2)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n except Exception as e:\n fp3=\"\\ndata from Flipkart is not found\\n\"\n fp4=fp3.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n elif url == urlA:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemA = soup.find_all('span', class_='a-size-medium a-color-base a-text-normal')\n costA = soup.find_all('span', class_='a-offscreen')\n #print(itemA[0].text + \" \" + costA[0].text)\n costA = costA[0].text[1:]\n prices[\"Amazon\"] = costA\n fp=\"\\nData is Retrieved Successfully!!\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp2 = ''\n print (fp2)\n\n \n fp=\"\\n========================================================\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n\n except Exception as e:\n fp=\"\\ndata from amazon is not found\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n elif url == urlR:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemR = soup.find_all('p', class_='sp__name')\n costR = soup.find_all('span', class_='sc-bxivhb cHwYJ')\n #print(itemR[0].text + \" \" + costR[0].text)\n costR = costR[0].text[1:]\n prices[\"Reliance\"] = costR\n fp=\"\\nData is Retrieved Successfully!!\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp2 = ''\n print (fp2)\n fp=\"\\n========================================================\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n except Exception as e:\n fp=\"\\ndata from reliance is not found\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n elif url == urlH:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemH = soup.find_all('a', class_='name')\n costH = soup.find_all('div', class_='p-c')\n #print(itemH[0].text + \" \" + costH[0].text)\n costH = costH[0].text[1:]\n prices[\"Happi Mobiles\"] = costH\n fp=\"\\nData is Retrieved Successfully!!\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp2 = ''\n print (fp2)\n \n fp=\"\\n========================================================\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n except Exception as e:\n fp=\"\\ndata from Happi mobiles is not found\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n elif url == urlL:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemL = soup.find_all('a', class_='product-item-link')\n costL = soup.find_all('span', class_='price')\n #print(itemL[0].text+ \" \" + costL[0].text)\n costL = costL[0].text[1:]\n prices[\"Lot Mobiles\"] = costL\n fp=\"\\nData is Retrieved Successfully!!\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp2 = ''\n print (fp2)\n fp=\"\\n========================================================\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n except Exception as e:\n fp=\"\\ndata from Lot mobiles is not found\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\n\n elif url == urlB:\n try:\n res = requests.get(url).content\n soup = BeautifulSoup(res, 'html.parser')\n itemB = soup.find_all('h3',class_='prodHeaderDesc mb10')\n costB = soup.find_all('h3', class_='prodPrice d-inline')\n #print(itemB[0].text+ \" \" + costB[0].text)\n costB = costB[0].text[1:]\n prices[\"Bajaj\"] = costB\n fp=\"\\nData is Retrieved Successfully!!\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp2 = ''\n print (fp2)\n fp=\"\\n========================================================\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n except Exception as e:\n fp=\"\\ndata from Bajaj is not found\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n fp=\"\\n========================================================\\n\"\n fp1=fp.replace('\\n','
')\n print(fp1)\n\ndef priceComparision():\n a=f'Showing results for : {keyword} in different sites'\n b=\"\\n\"+a+\"\\n\"\n fp4=b.replace('\\n','
')\n print(fp4)\n \n for item in prices.items():\n a=item[0],\":\",item[1]\n fp=\"\\n\"+item[0]+\":\"+item[1]+\"\\n\"\n fp4=fp.replace('\\n','
')\n print(fp4)\n \nif __name__ == '__main__':\n print('connecting to Flipkart.com\\n')\n flip=scrape(urlF)\n print('connecting to Amazon.in\\n')\n ama=scrape(urlA)\n print('connecting to Reliance\\n')\n rel=scrape(urlR)\n print('connecting to happi mobiles\\n')\n hap=scrape(urlH)\n print('connecting to Lot Mobiles\\n')\n lot=scrape(urlL)\n print('connecting to Bajaj Electronics\\n')\n baj=scrape(urlB)\n b=priceComparision()\nmessage = f\"\"\"\n\n


Flipkart : {prices[\"Flipkart\"]}

Link\n\"\"\"\nf.write(message)\nf.close()\n\n\n","repo_name":"ManvithDevadiga/Price-Comparison-website-using-Python-Beautifulsoup","sub_path":"qwe.py","file_name":"qwe.py","file_ext":"py","file_size_in_byte":7964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43601034663","text":"#from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport http.server\nimport socketserver\nimport simplejson\nimport json\nimport random\nimport myFSM\nimport diaLogic\nimport mqttDriver\nimport os\n\n\n# mqttIp = 'ACA PONGO LA IP'\n\n# # Callback para mensajes de mqtt, todavia no se bien como armar este callback jaja xd\n# topic = ''\n# msg = ''\n\n\n# def messageCallback(client, userdata, message):\n# userdata.topic = message.topic\n# userdata.msg = message.payload\n# return userdata\n\n# Callbacks para la FSM\n\n\n# def idle2EspObj():\n\n\n# def idle2EspAcc():\n\n\n# def espAcc2EspAcc():\n\n\n# def espAcc2EspObj():\n\n\n# def espObj2EspObj():\n\n\n# def espObj2EspAcc():\n\n\n# # Una vez definidos los callbacks y el IP creo el cliente\n# mqttClient = mqttDriver.mqttClient(mqttIp, messageCallback)\n\n# # Una vez definidos los callbacks genero la FSM\n# myLogic = jsonParser.myDiaLogic(\n# checkCallback=checkCallback, setCallback=setCallback)\n# FSM = myFSM.myFSM()\n\n# # Creo estados\n# FSM.addState('Idle')\n# FSM.addState('EsperoAccion')\n# FSM.addState('EsperoObjeto')\n\n# # Creo eventos para Idle\n# FSM.addPath('Idle', myFSM.myOptions('Pregunta', callBack1, 'EsperoAccion'))\n# FSM.addPath('Idle', myFSM.myOptions('Accion', callBack2, 'EsperoObjeto'))\n\n# # Creo eventos para EsperoAccion\n\n# FSM.addPath('EsperoAccion', myFSM.myOptions(\n# 'Accion', callBack2, 'EsperoObjeto'))\n# FSM.addPath('EsperoAccion', myFSM.myOptions(\n# 'Pregunta', callBack1, 'EsperoAccion'))\n\n# # Creo eventos para EsperoObjeto\n\n# FSM.addPath('EsperoObjeto', myFSM.myOptions(\n# 'Accion', callBack2, 'EsperoObjeto'))\n# FSM.addPath('EsperoObjeto', myFSM.myOptions(\n# 'Pregunta', callBack1, 'EsperoAccion'))\n\n\n# # Testeo commit desde PC\n# FSM.printFSM()\n\n\ndef jsonPPrint(filename):\n f = open(filename, 'r')\n data = json.loads(f.read())\n print(json.dumps(data, indent=4, sort_keys=True))\n\n\nclass S(http.server.BaseHTTPRequestHandler):\n '''Servidor HTTP'''\n\n def get_File(self, mime):\n self.send_response(200)\n self.send_header('Content-type', mime)\n self.end_headers()\n f = open(os.path.dirname(\n os.path.abspath(__file__)) + self.path, 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def do_GET(self):\n '''Callback para GETs'''\n print(self.path)\n try:\n if(self.path.endswith('/')):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n f = open(\"myPag.htm\", \"r\", encoding=\"utf8\")\n self.wfile.write(f.read().encode())\n f.close()\n\n elif(self.path.endswith('.png')):\n self.get_File('image/png')\n\n elif(self.path.endswith('.htm')):\n self.get_File('text/html')\n\n elif(self.path.endswith('.jpg')):\n self.get_File('image/jpg')\n\n elif(self.path.endswith('.js')):\n self.get_File('text/javascript')\n\n elif(self.path.endswith('.css')):\n self.get_File('text/css')\n\n elif(self.path.endswith('.woff')):\n self.get_File('application/x-font-woff')\n\n elif(self.path.endswith('.ico')):\n self.get_File('image/ico')\n\n elif(self.path.endswith('.pdf')):\n self.get_File('application/pdf')\n else:\n self.send_error(\n 403, \"Forbidden File, Format Not Supported {}\".format(self.path))\n\n except FileNotFoundError:\n self.send_error(404, \"File Not Found {}\".format(self.path))\n\n def do_HEAD(self):\n self._set_headers()\n\n def do_POST(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.data_string = self.rfile.read(int(self.headers['Content-Length']))\n # self.send_response(200)\n data = simplejson.loads(self.data_string)\n with open(\"myFulfillment.json\", \"w\") as outfile:\n simplejson.dump(data, outfile)\n # jsonPPrint(\"myFulfillment.json\")\n f = open(\"response.json\")\n self.wfile.write(f.read().encode())\n return\n\n\ndef run(server_class=http.server.HTTPServer, handler_class=S, server='localhost', port=80):\n server_address = (server, port)\n httpd = server_class(server_address, handler_class)\n print('Server starting at ' + str(server_address[0]) + ':' + str(port))\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n from sys import argv\n\nif len(argv) == 3:\n run(server=argv[1], port=int(argv[2]))\n print(argv[1])\nelse:\n run()\n","repo_name":"sfalcona/myIOT","sub_path":"myServer.py","file_name":"myServer.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19213330481","text":"import cv2\r\nimport numpy as np\r\nfrom skimage.morphology import convex_hull_image\r\nimport glob\r\nfrom natsort import natsorted\r\nimport csv\r\nimport os\r\n\r\ndef dens(folder):\r\n\r\n # 画像ファイルの読み込み\r\n imgs = folder + \"/*png\"\r\n imgs_list = glob.glob(imgs)\r\n imgs_list = natsorted(imgs_list)\r\n\r\n # データを出力するcsv作成\r\n filename = \"result.csv\"\r\n with open(filename, \"w\", newline=\"\") as f:\r\n header = ['img_name', 'evaluation value']\r\n writer = csv.writer(f)\r\n writer.writerow(header) \r\n\r\n # 画像ごとに処理を実施\r\n for image in imgs_list:\r\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\r\n\r\n # 凸包計算\r\n img_hull = convex_hull_image(img)\r\n img_t = np.where(img_hull == False, 0, 255).astype(np.uint8)\r\n\r\n # 凸包の重心計算\r\n mu = cv2.moments(img_t, False)\r\n x,y= int(mu[\"m10\"]/mu[\"m00\"]) , int(mu[\"m01\"]/mu[\"m00\"])\r\n\r\n # 画像内での重心からのばらつき計算\r\n eva = 0\r\n h, w = img.shape[: 2]\r\n for i in range(h):\r\n for j in range(w):\r\n eva =eva + (img[i][j])/255 * ((i-y)**2 + (j-x)**2)\r\n \r\n eva = int(eva)\r\n i = os.path.basename(image)\r\n writer.writerow([i, eva])\r\n\r\nif __name__ == \"__main__\":\r\n dens(\"source\")","repo_name":"yamamura-san/test3","sub_path":"density.py","file_name":"density.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"491659562","text":"import json\nimport os\nimport re\nfrom typing import Optional\n\nfrom django.http import HttpResponse, HttpResponseServerError\n\nimport tools.ffmpeg\nfrom core.interface import Service\nfrom core.model import Result, ErrorResult, Info, Extra\nfrom tools import http_utils, store\nfrom core import config\nfrom core.type import Video\nfrom tools.store import make_path\n\nheaders = {\n \"accept\": \"*/*\",\n \"content-type\": \"json\",\n \"user-agent\": config.user_agent\n}\n\nweb_headers = {\n \"accept\": \"*/*\",\n \"sec-ch-ua\": \"\\\"Not?A_Brand\\\";v=\\\"8\\\", \\\"Chromium\\\";v=\\\"108\\\", \\\"Google Chrome\\\";v=\\\"108\\\"\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": \"\\\"macOS\\\"\",\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"cookie\": config.bilibili_cookie,\n \"user-agent\": config.web_user_agent,\n}\n\nuser_headers = {\n \"Accept\": \"json\",\n \"Sec-Ch-Ua\": \"\\\"Not?A_Brand\\\";v=\\\"8\\\", \\\"Chromium\\\";v=\\\"108\\\", \\\"Google Chrome\\\";v=\\\"108\\\"\",\n \"Sec-Ch-Ua-mobile\": \"?0\",\n \"Sec-Ch-Ua-Platform\": \"\\\"macOS\\\"\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-site\",\n \"Origin\": \"https://www.bilibili.com\",\n \"Cookie\": config.bilibili_cookie,\n \"User-Agent\": config.web_user_agent\n}\n\ndownload_headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"identity;q=1, *;q=0\",\n \"range\": \"bytes=0-\",\n \"sec-fetch-dest\": \"video\",\n \"sec-fetch-mode\": \"no-cors\",\n \"sec-fetch-site\": \"cross-sit\",\n \"referer\": \"https://www.bilibili.com\",\n \"user-agent\": config.user_agent\n}\n\n\nvtype = Video.BILIBILI\n\n\nclass BiliBiliService(Service):\n\n @classmethod\n def get_url(cls, text: str) -> Optional[str]:\n print(text)\n if \"bilibili\" in text:\n urls = re.findall(r'(?<=www\\.bilibili\\.com\\/video\\/).+', text, re.I | re.M)\n if urls:\n return \"https://www.bilibili.com/video/\" + urls[0]\n return None\n\n urls = re.findall(r'(?<=b23\\.tv\\/)\\w+', text, re.I | re.M)\n print(urls)\n if len(urls) == 0:\n return None\n url = \"https://b23.tv/\" + urls[0]\n res = http_utils.get(url, header=headers, redirect=False)\n url = res.headers['location']\n print(url)\n return url\n\n # @classmethod\n # def get_prefix_pattern(cls) -> str:\n # # https://b23.tv/lizymu4\n # return 'www\\.bilibili\\.com\\/video\\/'\n\n @classmethod\n def make_url(cls, index) -> str:\n return index\n\n @classmethod\n def index(cls, url) -> Optional[str]:\n if \"b23.tv\" in url:\n return re.findall(r'(?<=b23\\.tv\\/)\\w+', url, re.I | re.M)[0]\n\n try:\n bvid = re.findall(r'(?<=video\\/)\\w+', url)[0]\n except IndexError:\n return None\n\n p = re.findall(r\"(?<=p=)(\\d)\", url)\n if len(p) == 0:\n return bvid\n else:\n return bvid + '-' + p[0]\n\n @classmethod\n def get_bvid(cls, url) -> Optional[str]:\n try:\n return re.findall(r'(?<=video\\/)\\w+', url)[0]\n except IndexError:\n return None\n\n @classmethod\n def get_info(cls, url: str) -> Result:\n burl = cls.get_url(url)\n if burl is None:\n print('error')\n return ErrorResult.URL_NOT_INCORRECT\n\n video_data = BiliBiliService.get_data(burl)\n\n bvid = video_data['bvid']\n\n res = http_utils.get('https://api.bilibili.com/x/player/pagelist',\n param={'bvid': bvid, 'jsonp': 'jsonp'}, header=headers)\n if http_utils.is_error(res):\n return Result.error(res)\n\n data = json.loads(res.content)\n\n p = re.findall(r\"(?<=p=)(\\d)\", burl)\n if len(p) == 0:\n index = 0\n else:\n index = int(p[0]) - 1\n\n try:\n cid = data['data'][index]['cid']\n except (KeyError, IndexError):\n return ErrorResult.VIDEO_ADDRESS_NOT_FOUNT\n\n res = http_utils.get(url, header=user_headers)\n result = re.findall(r'(?<=', response.content.decode(\"utf-8\"), re.M)\n json = json_parser.loads(data[0])\n\n items = json[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"]\n time_limit = datetime.now() - timedelta(hours=12)\n\n media = []\n\n for i, item in enumerate(items):\n node = item[\"node\"]\n if (datetime.fromtimestamp(int(node[\"taken_at_timestamp\"])) > time_limit\n and node[\"is_video\"] != True):\n media.append({\n \"text\": \"\",\n \"photo\": node[\"display_url\"],\n \"url\": \"http://instagram.com/p/%s\" % node[\"shortcode\"]\n });\n\n if (len(media) != 0):\n return media;\n else:\n return None\n\nGROUP_ID = 37862023\nvk = VK()\n\ncelebrities = {\n \"Селена Гомес\": \"selenagomez\",\n \"Зак Эфрон\": \"zacefron\",\n \"Блейк Лавли\": \"blakelively\",\n \"Нина Добрев\": \"nina\",\n \"Крис Пратт\": \"prattprattpratt\",\n \"Эмма Робертс\": \"emmaroberts\",\n \"Лили Колинз\": \"lilyjcollins\",\n \"Бейонсе\": \"beyonce\",\n \"Криштиану Роналду\": \"cristiano\",\n \"Джей Ло\": \"jlo\",\n \"Victoria's Secret\": \"victoriassecret\",\n \"Джастин Тимберлейк\": \"justintimberlake\",\n \"Дэвид Бекхэм\": \"davidbeckham\",\n \"Рианна\": \"badgalriri\",\n \"Марго Робби\": \"margotrobbie\",\n \"Уилл Смит\": \"willsmith\",\n \"Настя Ивлеева\": \"_agentgirl_\",\n \"Меган Фокс\": \"the_native_tiger\",\n \"Дрейк\": \"champagnepapi\",\n \"Дженнифер Лоуренс\": \"jenniferlawrencepx\",\n \"Кайли Дженнер\": \"kyliejenner\",\n \"Джиджи Хадид\": \"gigihadid\",\n \"Белла Хадид\": \"bellahadid\",\n \"Эмили Ратаковски\": \"emrata\",\n \"Роми Стрейд\": \"romeestrijd\",\n \"Кендалл Дженнер\": \"kendalljenner\",\n \"Жозефин Скривер\": \"josephineskriver\",\n \"Сара Сампайо\": \"sarasampaio\",\n \"Ирина Шейк\": \"irinashayk\",\n \"Грейс Элизабет\": \"lovegrace_e\",\n \"Адриана Лима\": \"adrianalima\",\n \"Эльза Хоск\": \"hoskelsa\",\n \"Кара Делевинь\": \"caradelevingne\",\n \"Вика Одинцова\": \"viki_odintcova\",\n \"Алексис Рэн\": \"alexisren\",\n \"Ким Кардашян\": \"kimkardashian\",\n \"Тейлор Хилл\": \"taylor_hill\",\n \"Роузи Хантингтон-Уайтли\": \"rosiehw\",\n \"Джессика Ли Бьюкенен\": \"jessleebuchanan\",\n \"Жасмин Тукс\": \"jastookes\",\n \"Алессандра Амбросио\": \"alessandraambrosio\",\n \"Стелла Максвелл\": \"stellamaxwell\",\n \"Кайя ��ербер\": \"kaiagerber\",\n \"Барбара Палвин\": \"realbarbarapalvin\",\n \"Марта Хант\": \"marthahunt\",\n \"Синди Мелло\": \"cindymello\"\n}\n\n'''\nmessage = \"Лучшие публикации из инстаграмм-аккаунтов селебрити за последние 24 часа 🔥\\n\\n\"\nphotos = []\n'''\n\nfor name, username in celebrities.items():\n\n media = getMediaFromInstagram(username)\n print(media)\n\n if (media != None):\n \n photos = []\n\n for item in media:\n response = requests.get(item[\"photo\"])\n open(\"photo.jpg\", \"wb\").write(response.content)\n photos.append(vk.uploadPhoto(GROUP_ID, \"photo.jpg\", name))\n \n vk.post(-GROUP_ID, \"💣 %s\\n📷 Instagram: %s\" % (name, username), photos)\n\n'''\nif (len(photos) >= 3):\n message += \"#Селебрити@faces\"\n vk.post(-GROUP_ID, message, photos)\n'''\n","repo_name":"alexmustdie/faces","sub_path":"getMediaFromInstagram.py","file_name":"getMediaFromInstagram.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29444406542","text":"#!/usr/bin/env python3\nimport requests, json\ns = requests.Session()\ns.headers['Wanikani-Revision'] = '20170710'\ns.headers['Authorization'] = 'Bearer a2344c7b-6d09-4aca-bd14-1945105c35b4'\n\ndef fetch_paginated(url):\n data = []\n while url:\n print(url)\n stuff = s.get(url).json()\n data += stuff['data']\n url = stuff['pages']['next_url']\n return data\n\ndef dump(stuff, path):\n with open(path, 'w') as fp:\n json.dump(stuff, fp, indent=' ')\ndef do_study_materials():\n data = fetch_paginated('https://api.wanikani.com/v2/study_materials')\n dump(data, 'study_materials.json')\ndef do_subjects():\n data = fetch_paginated('https://api.wanikani.com/v2/subjects')\n by_kind = {}\n for item in data:\n by_kind.setdefault(item['object'], []).append(item)\n\n #open('tmp.json', 'w').write(json.dumps(by_kind))\n\n assert set(by_kind.keys()) == {'radical', 'kanji', 'vocabulary', 'kana_vocabulary'}\n for kind, items in by_kind.items():\n dump(items, f'{kind}.json')\ndo_study_materials()\n","repo_name":"comex/wk","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40252608743","text":"#!/usr/bin/python\n\nfrom double import double\nfrom io import StringIO\nimport pytest\n\nnumber_inputs = StringIO(u'1234\\n')\n\ndef test_double(monkeypatch):\n monkeypatch.setattr('sys.stdin', number_inputs)\n assert double() == 2468\n\nstr_inputs = StringIO(u'abcd\\n')\ndef test_double_str(monkeypatch):\n with pytest.raises(NameError) as e:\n monkeypatch.setattr('sys.stdin', str_inputs)\n result = double()\n assert str(e.value) == \"name 'abcd' is not defined\"\n","repo_name":"srkiNZ84/pytest_intro","sub_path":"test_double.py","file_name":"test_double.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41994800601","text":"import os\nimport numpy as np\nimport pandas as pd\nimport argparse\nfrom train_utils import prepare_datamodule\nfrom data.chexpert_data_module import CheXpert\nfrom data.vindrcxr_data_module import Vindr_CXR\nfrom solvers.resnet_solver import resnet_solver\nfrom solvers.attrinet_solver import task_switch_solver\nimport torchvision.transforms as tfs\nfrom torch.utils.data import DataLoader\nfrom experiment_utils import update_key_value_pairs\nfrom train_utils import to_numpy\nimport matplotlib.pyplot as plt\nimport torch\nfrom train_utils import to_numpy\nfrom tqdm import tqdm\nfrom PIL import Image\n\n\ndef ncc(a,v, zero_norm=True):\n a = a.flatten()\n v = v.flatten()\n if zero_norm:\n a = (a - np.mean(a)) / (np.std(a) * len(a))\n v = (v - np.mean(v)) / np.std(v)\n else:\n a = (a) / (np.std(a) * len(a))\n v = (v) / np.std(v)\n\n return np.correlate(a, v)\n\ndef save_img(img, prefix, path):\n path = os.path.join(path, prefix)\n if \"input\" in prefix:\n plt.imsave(path, img, cmap='gray')\n if \"mask\" in prefix:\n vmax = np.abs(img).flatten().max()\n plt.imsave(path, img, cmap='bwr', vmax=vmax, vmin=-vmax)\n\n\n\n\n\n\n\n\nclass spurious_detection():\n\n def __init__(self, solver, spu_dataloader, norm_dataloader, confounder, threshold, sample_indices_dict, out_dir, attr_method, config):\n self.ratio = 0.1\n self.solver = solver\n self.spu_dataloader = spu_dataloader\n self.norm_dataloader = norm_dataloader\n self.confounder = confounder\n self.threshold = threshold\n self.flip_idx = sample_indices_dict[\"flip_idx\"]\n self.neg_idx = sample_indices_dict[\"all_neg_idx\"]\n self.all_pos_idx = sample_indices_dict[\"all_pos_idx\"]\n self.attr_method = attr_method\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n os.makedirs(out_dir, exist_ok=True)\n self.out_dir = out_dir\n os.makedirs(self.out_dir, exist_ok=True)\n self.label_idx = solver.TRAIN_DISEASES.index(config.contaminated_class)\n self.all_results_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), config.all_results_file)\n self.model_name = config.model + \"_\" + config.dataset # \"resnet_chexpert_Pneumothorax_stripe_degree0.0\"\n\n def get_ncc(self, positive_only, num_samples):\n\n sample_idces = self.flip_idx\n samples_out_dir = os.path.join(self.out_dir, \"flip_idx\")\n os.makedirs(samples_out_dir, exist_ok=True)\n\n ncc_values = []\n count = 0\n\n for i in tqdm(range(len(sample_idces))):\n idx = sample_idces[i]\n spu_data = self.spu_dataloader.dataset[int(idx)]\n spu_img = spu_data['img']\n lbl = spu_data['label'].squeeze()\n norm_data = self.norm_dataloader.dataset[int(idx)]\n norm_img = norm_data['img']\n spu_img = torch.from_numpy(spu_img[None])\n norm_img = torch.from_numpy(norm_img[None])\n\n if self.attr_method == \"attrinet\":\n spu_y_pred = self.solver.get_probs(spu_img.to(self.device), self.label_idx)\n spu_p = to_numpy(spu_y_pred)\n spu_attr = self.solver.get_attributes(spu_img, self.label_idx)\n spu_attr = -to_numpy(spu_attr).squeeze()\n norm_y_pred = self.solver.get_probs(norm_img.to(self.device),self.label_idx)\n norm_p = to_numpy(norm_y_pred)\n norm_attr = self.solver.get_attributes(norm_img, self.label_idx)\n norm_attr = -to_numpy(norm_attr).squeeze()\n\n else:\n spu_y_pred_logits = self.solver.model(spu_img.to(self.device))\n spu_y_pred = torch.sigmoid(spu_y_pred_logits).squeeze()\n spu_p = to_numpy(spu_y_pred)\n spu_attr = self.solver.get_attributes(spu_img, self.label_idx, positive_only=positive_only)\n spu_attr = to_numpy(spu_attr).squeeze()\n norm_y_pred_logits = self.solver.model(norm_img.to(self.device))\n norm_y_pred = torch.sigmoid(norm_y_pred_logits).squeeze()\n norm_p = to_numpy(norm_y_pred)\n norm_attr = self.solver.get_attributes(norm_img, self.label_idx, positive_only=positive_only)\n norm_attr = to_numpy(norm_attr).squeeze()\n\n ncc_measurement = ncc(spu_attr, norm_attr)\n count += 1\n ncc_values.append(ncc_measurement)\n\n if count < 20:\n # save some samples for qualitative evaluation\n save_img(to_numpy(spu_img).squeeze(), prefix=str(idx) + '_spu_GT_' + str(lbl.squeeze()) + '_T_' + str(self.threshold.squeeze()) + '_input.png',\n path=samples_out_dir)\n save_img(to_numpy(norm_img).squeeze(), prefix=str(idx) + '_norm_GT_' + str(lbl.squeeze()) + '_T_' + str(self.threshold.squeeze()) + '_input.png',\n path=samples_out_dir)\n save_img(spu_attr.squeeze(), prefix=str(idx) + '_spu_pred: ' + str(spu_p) + '_mask.png', path=samples_out_dir)\n save_img(norm_attr.squeeze(), prefix=str(idx) + '_norm_pred: ' + str(norm_p) + '_mask.png', path=samples_out_dir)\n\n if count >= num_samples:\n break\n ncc_values = np.asarray(ncc_values)\n mean_ncc = np.mean(ncc_values)\n if self.attr_method == \"attrinet\":\n update_key_value_pairs(self.all_results_file, self.model_name, \"explanation_ncc\", float(mean_ncc))\n else:\n update_key_value_pairs(self.all_results_file, self.model_name, self.attr_method +\"_explanation_ncc\", float(mean_ncc))\n print(\"mean_ncc\", mean_ncc)\n\n def get_sensitivity(self, num_samples, positive_only=True):\n sample_idces = self.flip_idx\n count = 0\n sensitivity_values = []\n\n for i in tqdm(range(len(sample_idces))):\n idx = sample_idces[i]\n spu_data = self.spu_dataloader.dataset[int(idx)]\n spu_img = spu_data['img']\n lbl = spu_data['label'].squeeze()\n spu_img = torch.from_numpy(spu_img[None])\n\n if self.attr_method == \"attrinet\":\n spu_y_pred = self.solver.get_probs(spu_img.to(self.device), self.label_idx)\n spu_attr = self.solver.get_attributes(spu_img, self.label_idx)\n spu_attr = -to_numpy(spu_attr).squeeze()\n\n else:\n spu_y_pred_logits = self.solver.model(spu_img.to(self.device))\n spu_y_pred = torch.sigmoid(spu_y_pred_logits).squeeze()\n spu_attr = self.solver.get_attributes(spu_img, self.label_idx, positive_only=positive_only)\n spu_attr = to_numpy(spu_attr).squeeze()\n\n if i < 20:\n hitts = self.sstt(spu_attr, confounder=self.confounder, attr_method=self.attr_method, ratio=self.ratio, plot_itsect=True, out_dir=self.out_dir, prefix=str(idx))\n else:\n hitts = self.sstt(spu_attr, confounder=self.confounder, attr_method=self.attr_method, ratio=self.ratio, plot_itsect=False, out_dir=None, prefix=None)\n sensitivity_values.append(hitts)\n count += 1\n if count >= num_samples:\n break\n\n sensitivity_values = np.asarray(sensitivity_values)\n mean_sensitivity = np.mean(sensitivity_values)\n if self.attr_method == \"attrinet\":\n update_key_value_pairs(self.all_results_file, self.model_name, \"confounder_sensitivity\", float(mean_sensitivity))\n else:\n update_key_value_pairs(self.all_results_file, self.model_name, self.attr_method + \"_confounder_sensitivity\", float(mean_sensitivity))\n\n print(\"mean_sensitivity\", mean_sensitivity)\n def sstt(self, spu_attr, confounder, attr_method, ratio, plot_itsect, out_dir, prefix):\n #'lime', 'GCam', 'GB', 'shap', 'gifsplanation','attrinet'\n # get confounder pixel positions\n confounder_pos = np.where(confounder == 1)\n confounder_pos_x = confounder_pos[0]\n confounder_pos_y = confounder_pos[1]\n num_founder_pixels = len(confounder_pos_y)\n confounder_pixels = list(zip(confounder_pos_x, confounder_pos_y))\n confounder_set = set(confounder_pixels)\n\n #select top 10% pixel with highest value\n all_attr_pixel = 320*320\n num_pixels = int(ratio * all_attr_pixel)\n\n if attr_method == 'attrinet' or attr_method == 'gifsplanation':\n pixel_importance = np.absolute(to_numpy(spu_attr.squeeze()))\n else:\n pixel_importance = spu_attr\n\n idcs = np.argsort(pixel_importance.flatten()) # from smallest to biggest\n idcs = idcs[::-1] # if we want the order biggest to smallest, we reverse the indices array\n idcs = idcs[:num_pixels]\n # Compute the corresponding masks for deleting pixels in the given order\n positions = np.array(np.unravel_index(idcs, pixel_importance.shape)).T # first colum, h index, second column, w index\n attri_pos_x = positions[:, 0]\n attri_pos_y = positions[:, 1]\n top_attri_pixels = list(zip(attri_pos_x, attri_pos_y))\n top_attri_set = set(top_attri_pixels)\n inter_set = confounder_set.intersection(top_attri_set)\n hitts = len(inter_set)/num_founder_pixels\n if hitts!=0 and plot_itsect == True:\n pixels = [list(item) for item in inter_set]\n pixels = np.asarray(pixels)\n background = np.zeros((320, 320))\n background[pixels[:, 0], pixels[:, 1]] = 255\n img = Image.fromarray(background)\n img = img.convert(\"L\")\n out_path = os.path.join(out_dir, prefix + \"_\" + str(hitts) + \"_hitts.png\")\n img.save(out_path)\n return hitts\n\n\n\n\n\n\n\ndef str2bool(v):\n return v.lower() in ('true')\n\ndef argument_parser():\n \"\"\"\n Create a parser with run_experiments arguments.\n Returns:\n argparse.ArgumentParser:\n \"\"\"\n parser = argparse.ArgumentParser(description=\"spurious model/sample analyser.\")\n\n parser.add_argument('--model', type=str, default='resnet', choices=['resnet', 'attrinet'])\n parser.add_argument('--dataset', type=str, default='chexpert', choices=['chexpert', 'vindrcxr'])\n parser.add_argument('--contaminated_class', type=str, default=\"Cardiomegaly\",\n choices=[\"Cardiomegaly\", \"Aortic enlargement\"])\n parser.add_argument('--attr_method', type=str, default='GCam',\n help=\"choose the explaination methods, can be 'lime', 'GCam', 'GB', 'shap', 'gifsplanation', 'attrinet'\")\n parser.add_argument('--positive_only', type=str2bool, default=False,\n help=\"if Ture, only select positive attributions, if False, keep all attribution\")\n parser.add_argument('--mode', type=str, default='test', choices=['train', 'test'])\n\n parser.add_argument('--contaim_scale', type=int, default=2, choices=[0, 1, 2, 3, 4])\n parser.add_argument('--contaim_type', type=str, default=\"tag\", choices=[\"tag\", \"hyperintensities\", \"obstruction\"])\n\n parser.add_argument('--num_samples', type=int, default=100, choices=[100],\n help=\"number of flipped sample to evaluate on, 100 for short evaluate, 2500 for larger evaluation\")\n\n parser.add_argument(\"--img_size\", default=320,\n type=int, help=\"image size for the data loader.\")\n parser.add_argument(\"--batch_size\", default=1,\n type=int, help=\"Batch size for the data loader.\")\n parser.add_argument('--manual_seed', type=int, default=42, help='set seed')\n parser.add_argument('--use_gpu', type=str2bool, default=True, help='whether to run on the GPU')\n parser.add_argument('--all_results_file', type=str, default=\"all_results.json\", help='dictionary to save all results')\n\n return parser\n\n\ndef get_arguments():\n parser = argument_parser()\n exp_configs = parser.parse_args()\n if exp_configs.attr_method == \"attrinet\":\n # configurations of generator\n exp_configs.image_size = 320\n exp_configs.generator_type = 'stargan'\n exp_configs.deep_supervise = False\n\n # configurations of latent code generator\n exp_configs.n_fc = 8\n exp_configs.n_ones = 20\n exp_configs.num_out_channels = 1\n # configurations of classifiers\n exp_configs.lgs_downsample_ratio = 32\n\n return exp_configs\n\n\ndef prep_solver(exp_configs):\n from models_dict import resnet_model_path_dict, attrinet_model_path_dict\n from data.dataset_params import dataset_dict_chexpert_Cardiomegaly, dataset_dict_vindrcxr_Aortic_enlargement\n exp_configs.dataset += '_' + exp_configs.contaminated_class + '_' + exp_configs.contaim_type + '_' + 'degree' + str(\n exp_configs.contaim_scale) # \"chexpert_Cardiomegaly_tag_degree2\"\n\n if \"chexpert\" in exp_configs.dataset:\n if \"Cardiomegaly\" in exp_configs.dataset:\n dataset_dict = dataset_dict_chexpert_Cardiomegaly[exp_configs.dataset]\n if \"vindrcxr\" in exp_configs.dataset:\n dataset_dict = dataset_dict_vindrcxr_Aortic_enlargement[exp_configs.dataset]\n datamodule = prepare_datamodule(exp_configs, dataset_dict)\n exp_configs.train_diseases = dataset_dict[\"train_diseases\"]\n\n if exp_configs.attr_method == \"attrinet\":\n exp_configs.model_path = attrinet_model_path_dict[exp_configs.dataset]\n data_loader = {}\n data_loader['train_pos'] = None\n data_loader['train_neg'] = None\n data_loader['vis_pos'] = None\n data_loader['vis_neg'] = None\n data_loader['valid'] = None\n data_loader[\"test\"] = None\n solver = task_switch_solver(exp_configs, data_loader=data_loader)\n\n else:\n exp_configs.model_path = resnet_model_path_dict[exp_configs.dataset]\n data_loader = {}\n data_loader[\"train\"] = None\n data_loader[\"valid\"] = None\n data_loader[\"test\"] = None\n solver = resnet_solver(exp_configs, data_loader=data_loader)\n solver.set_explainer(which_explainer=exp_configs.attr_method)\n\n exp_configs.result_dir = os.path.join(exp_configs.model_path, \"miccai23\", \"selected_flip_prediction_samples\", \"new_eval_1615\", exp_configs.attr_method)\n os.makedirs(exp_configs.result_dir, exist_ok=True)\n\n print(\"exp_configs.result_dir: \",exp_configs.result_dir)\n print(\"exp_configs.model_path: \",exp_configs.model_path)\n\n return solver\n\n\n\ndef prep_dataloaders(exp_configs):\n\n from data.contaminate_data_settings import TGT_DATA_ROOT\n confounder_dict = {\n \"tag\": os.path.join(os.path.dirname(os.path.abspath(__file__)), \"confounder_masks\", \"tag.txt\"),\n \"hyperintensities\": os.path.join(os.path.dirname(os.path.abspath(__file__)), \"confounder_masks\", \"hyperintensities.txt\"),\n \"obstruction\": os.path.join(os.path.dirname(os.path.abspath(__file__)), \"confounder_masks\", \"obstruction.txt\")\n }\n\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"confounder_masks\", \"tag.txt\")\n\n\n normal_chexpert_Cardiomegaly_root_folders = {\n \"tag\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"tag\", \"degree0\"),\n \"hyperintensities\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"hyperintensities\", \"degree0\"),\n \"obstruction\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"obstruction\", \"degree0\")\n }\n\n normal_vindr_Aortic_enlargement_root_folders = {\n \"tag\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"tag\", \"degree0\"),\n \"hyperintensities\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"hyperintensities\", \"degree0\"),\n \"obstruction\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"obstruction\", \"degree0\")\n\n }\n spu_chexpert_Cardiomegaly_root_folders = {\n \"tag\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"tag\", \"degree4\"),\n \"hyperintensities\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"hyperintensities\", \"degree4\"),\n \"obstruction\": os.path.join(TGT_DATA_ROOT, \"chexpert\", \"Cardiomegaly\", \"obstruction\", \"degree4\")\n }\n\n spu_vindr_Aortic_enlargement_root_folders = {\n \"tag\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"tag\", \"degree4\"),\n \"hyperintensities\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"hyperintensities\", \"degree4\"),\n \"obstruction\": os.path.join(TGT_DATA_ROOT, \"vindr\", \"Aortic enlargement\", \"obstruction\", \"degree4\")\n }\n\n transforms = tfs.Compose([tfs.Resize((exp_configs.img_size, exp_configs.img_size)), tfs.ToTensor()])\n if \"chexpert\" in exp_configs.dataset:\n normal_data_root = normal_chexpert_Cardiomegaly_root_folders[exp_configs.contaim_type]\n spu_data_root = spu_chexpert_Cardiomegaly_root_folders[exp_configs.contaim_type]\n\n if \"vindrcxr\" in exp_configs.dataset:\n normal_data_root = normal_vindr_Aortic_enlargement_root_folders[exp_configs.contaim_type]\n spu_data_root = spu_vindr_Aortic_enlargement_root_folders[exp_configs.contaim_type]\n\n spu_img_dir = os.path.join(spu_data_root, \"test\")\n normal_img_dir = os.path.join(normal_data_root, \"test\")\n spu_csv_path = os.path.join(spu_data_root, \"test_df.csv\")\n normal_csv_path = os.path.join(normal_data_root, \"test_df.csv\")\n spu_df = pd.read_csv(spu_csv_path)\n normal_df = pd.read_csv(normal_csv_path)\n if \"chexpert\" in exp_configs.dataset:\n spu_testset = CheXpert(spu_img_dir, spu_df, exp_configs.train_diseases, transforms=transforms)\n normal_testset = CheXpert(normal_img_dir, normal_df, exp_configs.train_diseases, transforms=transforms)\n if \"vindrcxr\" in exp_configs.dataset:\n spu_testset = Vindr_CXR(image_dir=spu_img_dir, df=spu_df, train_diseases=exp_configs.train_diseases,\n transforms=transforms)\n normal_testset = Vindr_CXR(image_dir=normal_img_dir, df=normal_df,\n train_diseases=exp_configs.train_diseases,\n transforms=transforms)\n\n spu_test_loader = DataLoader(spu_testset, batch_size=exp_configs.batch_size, shuffle=False)\n normal_test_loader = DataLoader(normal_testset, batch_size=exp_configs.batch_size, shuffle=False)\n confounder = np.loadtxt(confounder_dict[exp_configs.contaim_type])\n return normal_test_loader, spu_test_loader, confounder\n\n\n\n\n\n\ndef main(config):\n\n solver = prep_solver(config)\n normal_test_loader, spu_test_loader, confounder = prep_dataloaders(config)\n threshlod_path = os.path.join(config.model_path, \"miccai23\", \"valid\", \"auc_result_dir\", \"best_threshold.txt\")\n threshold = np.loadtxt(threshlod_path)\n eval_root_dir = os.path.join(config.model_path, \"miccai23\", \"selected_flip_prediction_samples\")\n flip_idx = np.loadtxt(os.path.join(eval_root_dir, \"flip_idx.txt\")).tolist()\n all_neg_idx = np.loadtxt(os.path.join(eval_root_dir, \"neg_idx.txt\")).tolist()\n all_pos_idx = np.loadtxt(os.path.join(eval_root_dir, \"all_pos_idx.txt\")).tolist()\n\n idx_dict = {\n \"flip_idx\": flip_idx,\n \"all_neg_idx\": all_neg_idx,\n \"all_pos_idx\":all_pos_idx\n }\n\n detector = spurious_detection(solver, spu_test_loader, normal_test_loader, confounder, threshold=threshold,\n sample_indices_dict=idx_dict, out_dir=config.result_dir,\n attr_method=config.attr_method, config=config)\n\n detector.get_ncc(positive_only=False, num_samples=config.num_samples)\n detector.get_sensitivity(num_samples=config.num_samples, positive_only=True)\n\n\nif __name__ == \"__main__\":\n params = get_arguments()\n main(params)\n\n","repo_name":"ss-sun/right-for-the-wrong-reason","sub_path":"spurious_detection_eval.py","file_name":"spurious_detection_eval.py","file_ext":"py","file_size_in_byte":19696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36820520947","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef conv_shift(arr, dx):\n \"\"\"\n Shift an array arr by dx \n like a worse version of np.roll!\n \"\"\"\n dx = len(arr)//2 + int(dx) # need to set the impulse position to index from the middle of the array!\n arr = np.array(arr)\n shift = np.zeros(arr.shape)\n shift[dx] = 1\n longarr = np.concatenate((arr, arr))\n return np.convolve(longarr, shift, 'same')[len(arr):]\n\n\nif __name__ == \"__main__\":\n\n x = np.linspace(-5, 5, 201)\n g = np.exp(- x**2/2)/(np.sqrt(2*np.pi))\n\n\n plt.figure()\n plt.title('Convolution shift example')\n plt.plot(g, label='Input array')\n plt.plot(conv_shift(g, len(g)//2), label='Shifted array')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.legend()\n plt.show()","repo_name":"teolemay/PHYS_512","sub_path":"ProblemSet6/Q1_convolution_shift.py","file_name":"Q1_convolution_shift.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2711326326","text":"# -*- coding: future_fstrings -*-\n\n\"\"\"Interface for the control api d-bus service.\"\"\"\n\nfrom typing import Callable, Any\nimport sys, os\nfrom time import perf_counter\nfrom difflib import get_close_matches\n\nfrom PyQt5.QtCore import pyqtSlot, QObject\nfrom PyQt5.QtDBus import QDBusConnection, QDBusInterface, QDBusReply, QDBusPendingCallWatcher, QDBusPendingReply\n\nfrom debugger import *; dbg\nfrom animate import delay\nimport logging; log = logging.getLogger('Chronos.api')\n\n#Mock out the old API; use production for this one so we can switch over piecemeal.\nUSE_MOCK = False #os.environ.get('USE_CHRONOS_API_MOCK') in ('always', 'web')\nAPI_INTERCALL_DELAY = 0\nAPI_SLOW_WARN_MS = 100\nAPI_TIMEOUT_MS = 5000\n\n\n# Set up d-bus interface. Connect to mock system buses. Check everything's working.\nif not QDBusConnection.systemBus().isConnected():\n\tprint(\"Error: Can not connect to D-Bus. Is D-Bus itself running?\", file=sys.stderr)\n\traise Exception(\"D-Bus Setup Error\")\n\ncameraControlAPI = QDBusInterface(\n\tf\"ca.krontech.chronos.{'control_mock' if USE_MOCK else 'control'}\", #Service\n\tf\"/ca/krontech/chronos/{'control_mock' if USE_MOCK else 'control'}\", #Path\n\tf\"\", #Interface\n\tQDBusConnection.systemBus() )\ncameraVideoAPI = QDBusInterface(\n\tf\"ca.krontech.chronos.{'video_mock' if USE_MOCK else 'video'}\", #Service\n\tf\"/ca/krontech/chronos/{'video_mock' if USE_MOCK else 'video'}\", #Path\n\tf\"\", #Interface\n\tQDBusConnection.systemBus() )\n\ncameraControlAPI.setTimeout(API_TIMEOUT_MS) #Default is -1, which means 25000ms. 25 seconds is too long to go without some sort of feedback, and the only real long-running operation we have - saving - can take upwards of 5 minutes. Instead of setting the timeout to half an hour, we use events which are emitted as the task progresses. One frame (at 15fps) should be plenty of time for the API to respond, and also quick enough that we'll notice any slowness.\ncameraVideoAPI.setTimeout(API_TIMEOUT_MS)\n\nif not cameraControlAPI.isValid():\n\tprint(\"Error: Can not connect to control D-Bus API at %s. (%s: %s)\" % (\n\t\tcameraControlAPI.service(), \n\t\tcameraControlAPI.lastError().name(), \n\t\tcameraControlAPI.lastError().message(),\n\t), file=sys.stderr)\n\traise Exception(\"D-Bus Setup Error\")\n\nif not cameraVideoAPI.isValid():\n\tprint(\"Error: Can not connect to video D-Bus API at %s. (%s: %s)\" % (\n\t\tcameraVideoAPI.service(), \n\t\tcameraVideoAPI.lastError().name(), \n\t\tcameraVideoAPI.lastError().message(),\n\t), file=sys.stderr)\n\traise Exception(\"D-Bus Setup Error\")\n\n\n\nclass DBusException(Exception):\n\t\"\"\"Raised when something goes wrong with dbus. Message comes from dbus' msg.error().message().\"\"\"\n\tpass\n\nclass APIException(Exception):\n\t\"\"\"Raised when something goes wrong with dbus. Message comes from dbus' msg.error().message().\"\"\"\n\tpass\n\nclass ControlReply():\n\tdef __init__(self, value=None, errorName=None, message=None):\n\t\tself.value = value\n\t\tself.message = message\n\t\tself.errorName = errorName\n\t\n\tdef unwrap(self):\n\t\tif self.errorName:\n\t\t\traise APIException(self.errorName + ': ' + self.message)\n\t\telse:\n\t\t\treturn self.value\n\n\nclass video():\n\t\"\"\"Call the D-Bus video API, asynchronously.\n\t\t\n\t\tMethods:\n\t\t\t- call(function[, arg1[ ,arg2[, ...]]])\n\t\t\t\tCall the remote function.\n\t\t\t- get([value[, ...]])\n\t\t\t\tGet the named values from the API.\n\t\t\t- set({key: value[, ...]}])\n\t\t\t\tSet the named values in the API.\n\t\t\n\t\tAll methods return an A* promise-like, in that you use\n\t\t`.then(cb(value))` and `.catch(cb(error))` to get the results\n\t\tof calling the function.\n\t\"\"\"\n\t\n\t_videoEnqueuedCalls = []\n\t_videoCallInProgress = False\n\t_activeCall = None\n\t\n\t@staticmethod\n\tdef _enqueueCallback(pendingCall, coalesce: bool=True): #pendingCall is video.call\n\t\t\"\"\"Enqueue callback. Squash and elide calls to set for efficiency.\"\"\"\n\t\t\n\t\t#Step 1: Will this call actually do anything? Elide it if not.\n\t\tanticipitoryUpdates = False #Emit update signals before sending the update to the API. Results in faster UI updates but poorer framerate.\n\t\tif coalesce and pendingCall._args[0] == 'set':\n\t\t\t#Elide this call if it would not change known state.\n\t\t\thasNewInformation = False\n\t\t\tnewItems = pendingCall._args[1].items()\n\t\t\tfor key, value in newItems:\n\t\t\t\tif _camState[key] != value:\n\t\t\t\t\thasNewInformation = True\n\t\t\t\t\tif not anticipitoryUpdates:\n\t\t\t\t\t\tbreak\n\t\t\t\t\t#Update known cam state in advance of state transition.\n\t\t\t\t\tlog.info(f'Anticipating {key} → {value}.')\n\t\t\t\t\t_camState[key] = value\n\t\t\t\t\tfor callback in apiValues._callbacks[key]:\n\t\t\t\t\t\tcallback(value)\n\t\t\tif not hasNewInformation:\n\t\t\t\treturn\n\t\t\n\t\tif coalesce and pendingCall._args[0] == 'playback':\n\t\t\t#Always merge playback states.\n\t\t\t#Take the playback state already enqueued, {}, and overlay the current playback state. (so, {a:1, b:1} + {b:2} = {a:1, b:2})\n\t\t\tassert type(pendingCall._args[1]) is dict, f\"playback() takes a {{key:value}} dict, got {pendingCall._args[1]} of type {type(pendingCall._args[1])}.\"\n\t\t\texistingParams = [call._args[1] for call in video._videoEnqueuedCalls if call._args[0] == 'playback']\n\t\t\tif not existingParams:\n\t\t\t\tvideo._videoEnqueuedCalls += [pendingCall]\n\t\t\telse:\n\t\t\t\t#Update the parameters of the next playback call instead of enqueueing a new call.\n\t\t\t\tfor k, v in pendingCall._args[1].items():\n\t\t\t\t\texistingParams[-1][k] = v\n\t\t\t\t\n\t\t\treturn\n\t\t\n\t\t#Step 2: Is there already a set call pending? (Note that non-set calls act as set barriers; two sets won't get coalesced if a non-set call is between them.)\n\t\tif coalesce and [pendingCall] == video._videoEnqueuedCalls[:1]:\n\t\t\tvideo._videoEnqueuedCalls[-1] = pendingCall\n\t\telse:\n\t\t\tvideo._videoEnqueuedCalls += [pendingCall]\n\t\n\t@staticmethod\n\tdef _startNextCallback():\n\t\t\"\"\"Check for pending callbacks.\n\t\t\t\n\t\t\tIf none are found, simply stop.\n\t\t\t\n\t\t\tNote: Needs to be manually pumped.\n\t\t\"\"\"\n\t\t\n\t\tif video._videoEnqueuedCalls:\n\t\t\tvideo._videoCallInProgress = True\n\t\t\tvideo._videoEnqueuedCalls.pop(0)._startAsyncCall()\n\t\telse:\n\t\t\tvideo._videoCallInProgress = False\n\t\n\t\n\tclass call(QObject):\n\t\t\"\"\"Call the camera video DBus API. First arg is the function name. Returns a promise.\n\t\t\n\t\t\tSee http://doc.qt.io/qt-5/qdbusabstractinterface.html#call for details about calling.\n\t\t\tSee https://github.com/krontech/chronos-cli/tree/master/src/api for implementation details about the API being called.\n\t\t\tSee README.md at https://github.com/krontech/chronos-cli/tree/master/src/daemon for API documentation.\n\t\t\"\"\"\n\t\t\n\t\tdef __init__(self, *args, immediate=True):\n\t\t\tassert args, \"Missing call name.\"\n\t\t\t\n\t\t\tsuper().__init__()\n\t\t\t\n\t\t\tself._args = args\n\t\t\tself._thens = []\n\t\t\tself._catches = []\n\t\t\tself._done = False\n\t\t\tself._watcherHolder = None\n\t\t\tself.performance = {\n\t\t\t\t'enqueued': perf_counter(),\n\t\t\t\t'started': 0.,\n\t\t\t\t'finished': 0.,\n\t\t\t\t'handled': 0.,\n\t\t\t}\n\t\t\t\n\t\t\tlog.debug(f'enquing {self}')\n\t\t\tvideo._enqueueCallback(self)\n\t\t\t#log.print(f'current video queue: {video._videoEnqueuedCalls}')\n\t\t\tif not video._videoCallInProgress:\n\t\t\t\t#Don't start multiple callbacks at once, the most recent one will block.\n\t\t\t\tvideo._startNextCallback()\n\t\t\n\t\tdef __eq__(self, other):\n\t\t\t# If a video call sets the same keys as another\n\t\t\t# video call, then it is equal to itself and can\n\t\t\t# be deduplicated as all sets of the same values\n\t\t\t# have the same side effects. (ie, Slider no go\n\t\t\t# fast if me no drop redundant call.)\n\t\t\t# –DDR 2019-05-14\n\t\t\treturn (\n\t\t\t\t'set' == self._args[0] == other._args[0]\n\t\t\t\tand self._args[1].keys() == other._args[1].keys()\n\t\t\t)\n\t\t\n\t\tdef __repr__(self):\n\t\t\treturn f'''video.call({', '.join([repr(x) for x in self._args])})'''\n\t\t\t\n\t\t\n\t\tdef _startAsyncCall(self):\n\t\t\tlog.debug(f'starting async call: {self._args[0]}({self._args[1:]})')\n\t\t\tself.performance['started'] = perf_counter()\n\t\t\tself._watcherHolder = QDBusPendingCallWatcher(\n\t\t\t\tcameraVideoAPI.asyncCallWithArgumentList(self._args[0], self._args[1:])\n\t\t\t)\n\t\t\tself._watcherHolder.finished.connect(self._asyncCallFinished)\n\t\t\tvideo._activeCall = self\n\t\t\t\n\t\t\n\t\tdef _asyncCallFinished(self, watcher):\n\t\t\tlog.debug(f'finished async call: {self}')\n\t\t\tself.performance['finished'] = perf_counter()\n\t\t\tself._done = True\n\t\t\t\n\t\t\treply = QDBusPendingReply(watcher)\n\t\t\ttry:\n\t\t\t\tif reply.isError():\n\t\t\t\t\tif self._catches:\n\t\t\t\t\t\tfor catch in self._catches:\n\t\t\t\t\t\t\tcatch(reply.error())\n\t\t\t\t\telse:\n\t\t\t\t\t\t#This won't do much, but (I'm assuming) most calls simply won't ever fail.\n\t\t\t\t\t\tif reply.error().name() == 'org.freedesktop.DBus.Error.NoReply':\n\t\t\t\t\t\t\traise DBusException(f\"{self} timed out ({API_TIMEOUT_MS}ms)\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise DBusException(\"%s: %s\" % (reply.error().name(), reply.error().message()))\n\t\t\t\telse:\n\t\t\t\t\tvalue = reply.value()\n\t\t\t\t\tfor then in self._thens:\n\t\t\t\t\t\tvalue = then(value)\n\t\t\texcept Exception as e:\n\t\t\t\traise e\n\t\t\tfinally:\n\t\t\t\t#Wait a little while before starting on the next callback.\n\t\t\t\t#This makes the UI run much smoother, and usually the lag\n\t\t\t\t#is covered by the UI updating another few times anyway.\n\t\t\t\t#Note that because each call still lags a little, this\n\t\t\t\t#causes a few dropped frames every time the API is called.\n\t\t\t\t#video._startNextCallback()\n\t\t\t\tdelay(self, API_INTERCALL_DELAY, video._startNextCallback)\n\t\t\t\t\n\t\t\t\tself.performance['handled'] = perf_counter()\n\t\t\t\tif self.performance['finished'] - self.performance['started'] > API_SLOW_WARN_MS / 1000:\n\t\t\t\t\tlog.warn(\n\t\t\t\t\t\tf'''slow call: {self} took {\n\t\t\t\t\t\t\t(self.performance['finished'] - self.performance['started'])*1000\n\t\t\t\t\t\t:0.0f}ms/{API_SLOW_WARN_MS}ms. (Total call time was {\n\t\t\t\t\t\t\t(self.performance['handled'] - self.performance['enqueued'])*1000\n\t\t\t\t\t\t:0.0f}ms.)'''\n\t\t\t\t\t)\n\t\t\n\t\tdef then(self, callback):\n\t\t\tassert callable(callback), \"video().then() only accepts a single, callable function.\"\n\t\t\tassert not self._done, \"Can't register new then() callback, call has already been resolved.\"\n\t\t\tself._thens += [callback]\n\t\t\treturn self\n\t\t\n\t\tdef catch(self, callback):\n\t\t\tassert callable(callback), \"video().then() only accepts a single, callable function.\"\n\t\t\tassert not self._done, \"Can't register new then() callback, call has already been resolved.\"\n\t\t\tself._catches += [callback]\n\t\t\treturn self\n\t\n\tdef callSync(*args, warnWhenCallIsSlow=True, **kwargs):\n\t\t\"\"\"Call the camera video DBus API. First arg is the function name.\n\t\t\t\n\t\t\tThis is the synchronous version of the call() method. It\n\t\t\tis much slower to call synchronously than asynchronously!\n\t\t\n\t\t\tSee http://doc.qt.io/qt-5/qdbusabstractinterface.html#call for details about calling.\n\t\t\tSee https://github.com/krontech/chronos-cli/tree/master/src/api for implementation details about the API being called.\n\t\t\tSee README.md at https://github.com/krontech/chronos-cli/tree/master/src/daemon for API documentation.\n\t\t\"\"\"\n\t\t\n\t\t#Unwrap D-Bus errors from message.\n\t\tlog.debug(f'control.callSync{tuple(args)}')\n\t\t\n\t\tstart = perf_counter()\n\t\tmsg = QDBusReply(cameraVideoAPI.call(*args, **kwargs))\n\t\tend = perf_counter()\n\t\tif warnWhenCallIsSlow and (end - start > API_SLOW_WARN_MS / 1000):\n\t\t\tlog.warn(f'slow call: control.callSync{tuple(args)} took {(end-start)*1000:.0f}ms/{API_SLOW_WARN_MS}ms.')\n\t\t\n\t\tif msg.isValid():\n\t\t\treturn msg.value()\n\t\telse:\n\t\t\tif msg.error().name() == 'org.freedesktop.DBus.Error.NoReply':\n\t\t\t\traise DBusException(f\"control.callSync{tuple(args)} timed out ({API_TIMEOUT_MS}ms)\")\n\t\t\telse:\n\t\t\t\traise DBusException(\"%s: %s\" % (msg.error().name(), msg.error().message()))\n\t\n\t\n\tdef restart(*_):\n\t\t\"\"\"Helper method to reboot the video pipeline.\n\t\t\t\n\t\t\tSometimes calls do not apply until you restart the daemon, although they should.\n\t\t\tLiterally every use of this function is a bug.\n\t\t\"\"\"\n\t\t\n\t\tos.system('killall -HUP cam-pipeline')\n\n\nclass control():\n\t\"\"\"Call the D-Bus control API, asychronously.\n\t\t\n\t\tMethods:\n\t\t\t- call(function[, arg1[ ,arg2[, ...]]])\n\t\t\t\tCall the remote function.\n\t\t\t- get([value[, ...]])\n\t\t\t\tGet the named values from the API.\n\t\t\t- set({key: value[, ...]}])\n\t\t\t\tSet the named values in the API.\n\t\t\n\t\tAll methods return an A* promise-like, in that you use\n\t\t`.then(cb(value))` and `.catch(cb(error))` to get the results\n\t\tof calling the function.\n\t\"\"\"\n\t\n\t_controlEnqueuedCalls = []\n\t_controlCallInProgress = False\n\t_activeCall = None\n\t\n\t@staticmethod\n\tdef _enqueueCallback(pendingCall, coalesce: bool=True): #pendingCall is control.call\n\t\t\"\"\"Enqueue callback. Squash and elide calls to set for efficiency.\"\"\"\n\t\t\n\t\t#Step 1: Will this call actually do anything? Elide it if not.\n\t\tanticipitoryUpdates = False #Emit update signals before sending the update to the API. Results in faster UI updates but poorer framerate.\n\t\tif coalesce and pendingCall._args[0] == 'set':\n\t\t\t#Elide this call if it would not change known state.\n\t\t\thasNewInformation = False\n\t\t\tnewItems = pendingCall._args[1].items()\n\t\t\tfor key, value in newItems:\n\t\t\t\tif _camState[key] != value:\n\t\t\t\t\thasNewInformation = True\n\t\t\t\t\tif not anticipitoryUpdates:\n\t\t\t\t\t\tbreak\n\t\t\t\t\t#Update known cam state in advance of state transition.\n\t\t\t\t\tlog.info(f'Anticipating {key} → {value}.')\n\t\t\t\t\t_camState[key] = value\n\t\t\t\t\tfor callback in apiValues._callbacks[key]:\n\t\t\t\t\t\tcallback(value)\n\t\t\tif not hasNewInformation:\n\t\t\t\treturn\n\t\t\n\t\t#Step 2: Is there already a set call pending? (Note that non-set calls act as set barriers; two sets won't get coalesced if a non-set call is between them.)\n\t\tif coalesce and [pendingCall] == control._controlEnqueuedCalls[:1]:\n\t\t\tcontrol._controlEnqueuedCalls[-1] = pendingCall\n\t\telse:\n\t\t\tcontrol._controlEnqueuedCalls += [pendingCall]\n\t\n\t@staticmethod\n\tdef _startNextCallback():\n\t\t\"\"\"Check for pending callbacks.\n\t\t\t\n\t\t\tIf none are found, simply stop.\n\t\t\t\n\t\t\tNote: Needs to be manually pumped.\n\t\t\"\"\"\n\t\t\n\t\tif control._controlEnqueuedCalls:\n\t\t\tcontrol._controlCallInProgress = True\n\t\t\tcontrol._controlEnqueuedCalls.pop(0)._startAsyncCall()\n\t\telse:\n\t\t\tcontrol._controlCallInProgress = False\n\t\n\t\n\tclass call(QObject):\n\t\t\"\"\"Call the camera control DBus API. First arg is the function name. Returns a promise.\n\t\t\n\t\t\tSee http://doc.qt.io/qt-5/qdbusabstractinterface.html#call for details about calling.\n\t\t\tSee https://github.com/krontech/chronos-cli/tree/master/src/api for implementation details about the API being called.\n\t\t\tSee README.md at https://github.com/krontech/chronos-cli/tree/master/src/daemon for API documentation.\n\t\t\"\"\"\n\t\t\n\t\tdef __init__(self, *args, immediate=True):\n\t\t\tassert args, \"Missing call name.\"\n\t\t\t\n\t\t\tsuper().__init__()\n\t\t\t\n\t\t\tself._args = args\n\t\t\tself._thens = []\n\t\t\tself._catches = []\n\t\t\tself._done = False\n\t\t\tself._watcherHolder = None\n\t\t\tself.performance = {\n\t\t\t\t'enqueued': perf_counter(),\n\t\t\t\t'started': 0.,\n\t\t\t\t'finished': 0.,\n\t\t\t\t'handled': 0.,\n\t\t\t}\n\t\t\t\n\t\t\tlog.debug(f'enquing {self}')\n\t\t\tcontrol._enqueueCallback(self)\n\t\t\t#log.print(f'current control queue: {control._controlEnqueuedCalls}')\n\t\t\tif not control._controlCallInProgress:\n\t\t\t\t#Don't start multiple callbacks at once, the most recent one will block.\n\t\t\t\tcontrol._startNextCallback()\n\t\t\n\t\tdef __eq__(self, other):\n\t\t\t# If a control call sets the same keys as another\n\t\t\t# control call, then it is equal to itself and can\n\t\t\t# be deduplicated as all sets of the same values\n\t\t\t# have the same side effects. (ie, Slider no go\n\t\t\t# fast if me no drop redundant call.)\n\t\t\t# –DDR 2019-05-14\n\t\t\treturn (\n\t\t\t\t'set' == self._args[0] == other._args[0]\n\t\t\t\tand self._args[1].keys() == other._args[1].keys()\n\t\t\t)\n\t\t\n\t\tdef __repr__(self):\n\t\t\treturn f'''control.call({', '.join([repr(x) for x in self._args])})'''\n\t\t\t\n\t\t\n\t\tdef _startAsyncCall(self):\n\t\t\tlog.debug(f'starting async call: {self._args[0]}({self._args[1:]})')\n\t\t\tself.performance['started'] = perf_counter()\n\t\t\tself._watcherHolder = QDBusPendingCallWatcher(\n\t\t\t\tcameraControlAPI.asyncCallWithArgumentList(self._args[0], self._args[1:])\n\t\t\t)\n\t\t\tself._watcherHolder.finished.connect(self._asyncCallFinished)\n\t\t\tcontrol._activeCall = self\n\t\t\t\n\t\t\n\t\tdef _asyncCallFinished(self, watcher):\n\t\t\tlog.debug(f'finished async call: {self}')\n\t\t\tself.performance['finished'] = perf_counter()\n\t\t\tself._done = True\n\t\t\t\n\t\t\treply = QDBusPendingReply(watcher)\n\t\t\ttry:\n\t\t\t\tif reply.isError():\n\t\t\t\t\tif self._catches:\n\t\t\t\t\t\terror = reply.error()\n\t\t\t\t\t\tfor catch in self._catches:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\terror = catch(error)\n\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\terror = e\n\t\t\t\t\telse:\n\t\t\t\t\t\t#This won't do much, but (I'm assuming) most calls simply won't ever fail.\n\t\t\t\t\t\tif reply.error().name() == 'org.freedesktop.DBus.Error.NoReply':\n\t\t\t\t\t\t\traise DBusException(f\"{self} timed out ({API_TIMEOUT_MS}ms)\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise DBusException(\"%s: %s\" % (reply.error().name(), reply.error().message()))\n\t\t\t\telse:\n\t\t\t\t\tvalue = reply.value()\n\t\t\t\t\tfor then in self._thens:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tvalue = then(value)\n\t\t\t\t\t\texcept Exception as error:\n\t\t\t\t\t\t\tif self._catches:\n\t\t\t\t\t\t\t\tfor catch in self._catches:\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\terror = catch(error)\n\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\terror = e\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\traise e\n\t\t\texcept Exception as e:\n\t\t\t\traise e\n\t\t\tfinally:\n\t\t\t\t#Wait a little while before starting on the next callback.\n\t\t\t\t#This makes the UI run much smoother, and usually the lag\n\t\t\t\t#is covered by the UI updating another few times anyway.\n\t\t\t\t#Note that because each call still lags a little, this\n\t\t\t\t#causes a few dropped frames every time the API is called.\n\t\t\t\tdelay(self, API_INTERCALL_DELAY, control._startNextCallback)\n\t\t\t\t\n\t\t\t\tself.performance['handled'] = perf_counter()\n\t\t\t\tif self.performance['finished'] - self.performance['started'] > API_SLOW_WARN_MS / 1000:\n\t\t\t\t\tlog.warn(\n\t\t\t\t\t\tf'''slow call: {self} took {\n\t\t\t\t\t\t\t(self.performance['finished'] - self.performance['started'])*1000\n\t\t\t\t\t\t:0.0f}ms/{API_SLOW_WARN_MS}ms. (Total call time was {\n\t\t\t\t\t\t\t(self.performance['handled'] - self.performance['enqueued'])*1000\n\t\t\t\t\t\t:0.0f}ms.)'''\n\t\t\t\t\t)\n\t\t\n\t\tdef then(self, callback):\n\t\t\tassert callable(callback), \"control().then() only accepts a single, callable function.\"\n\t\t\tassert not self._done, \"Can't register new then() callback, call has already been resolved.\"\n\t\t\tself._thens += [callback]\n\t\t\treturn self\n\t\t\n\t\tdef catch(self, callback):\n\t\t\tassert callable(callback), \"control().then() only accepts a single, callable function.\"\n\t\t\tassert not self._done, \"Can't register new then() callback, call has already been resolved.\"\n\t\t\tself._catches += [callback]\n\t\t\treturn self\n\t\n\tdef callSync(*args, warnWhenCallIsSlow=True, **kwargs):\n\t\t\"\"\"Call the camera control DBus API. First arg is the function name.\n\t\t\t\n\t\t\tThis is the synchronous version of the call() method. It\n\t\t\tis much slower to call synchronously than asynchronously!\n\t\t\n\t\t\tSee http://doc.qt.io/qt-5/qdbusabstractinterface.html#call for details about calling.\n\t\t\tSee https://github.com/krontech/chronos-cli/tree/master/src/api for implementation details about the API being called.\n\t\t\tSee README.md at https://github.com/krontech/chronos-cli/tree/master/src/daemon for API documentation.\n\t\t\"\"\"\n\t\t\n\t\t#Unwrap D-Bus errors from message.\n\t\tlog.debug(f'control.callSync{tuple(args)}')\n\t\t\n\t\tstart = perf_counter()\n\t\tmsg = QDBusReply(cameraControlAPI.call(*args, **kwargs))\n\t\tend = perf_counter()\n\t\tif warnWhenCallIsSlow and (end - start > API_SLOW_WARN_MS / 1000):\n\t\t\tlog.warn(f'slow call: control.callSync{tuple(args)} took {(end-start)*1000:.0f}ms/{API_SLOW_WARN_MS}ms.')\n\t\t\t\n\t\tif msg.isValid():\n\t\t\treturn msg.value()\n\t\telse:\n\t\t\tif msg.error().name() == 'org.freedesktop.DBus.Error.NoReply':\n\t\t\t\traise DBusException(f\"control.callSync{tuple(args)} timed out ({API_TIMEOUT_MS}ms)\")\n\t\t\telse:\n\t\t\t\traise DBusException(\"%s: %s\" % (msg.error().name(), msg.error().message()))\n\n\t\n\n\ndef getSync(keyOrKeys):\n\t\"\"\"Call the camera control DBus get method.\n\t\n\t\tConvenience method for `control('get', [value])[0]`.\n\t\t\n\t\tAccepts key or [key, …], where keys are strings.\n\t\t\n\t\tReturns value or {key:value, …}, respectively.\n\t\t\n\t\tSee control's `availableKeys` for a list of valid inputs.\n\t\"\"\"\n\t\n\tvalueList = control.callSync('get',\n\t\t[keyOrKeys] if isinstance(keyOrKeys, str) else keyOrKeys )\n\treturn valueList[keyOrKeys] if isinstance(keyOrKeys, str) else valueList\n\ndef get(keyOrKeys):\n\t\"\"\"Call the camera control DBus get method.\n\t\n\t\tConvenience method for `control('get', [value])[0]`.\n\t\t\n\t\tAccepts key or [key, …], where keys are strings.\n\t\t\n\t\tReturns value or {key:value, …}, respectively.\n\t\t\n\t\tSee control's `availableKeys` for a list of valid inputs.\n\t\"\"\"\n\t\n\treturn control.call(\n\t\t'get', [keyOrKeys] if isinstance(keyOrKeys, str) else keyOrKeys\n\t).then(lambda valueList:\n\t\tvalueList[keyOrKeys] if isinstance(keyOrKeys, str) else valueList\n\t)\n\ndef setSync(*args):\n\t\"\"\"Call the camera control DBus set method.\n\t\t\n\t\tAccepts {str: value, ...} or a key and a value.\n\t\tReturns either a map of set values or the set\n\t\t\tvalue, if the second form was used.\n\t\"\"\"\n\t\n\tif len(args) == 1:\n\t\treturn control.callSync('set', *args)\n\telif len(args) == 2:\n\t\treturn control.callSync('set', {args[0]:args[1]})[args[0]]\n\telse:\n\t\traise valueError('bad args')\n\n\n\ndef set(*args):\n\t\"\"\"Call the camera control DBus set method.\n\t\t\n\t\tAccepts {str: value, ...} or a key and a value.\n\t\tReturns either a map of set values or the set\n\t\t\tvalue, if the second form was used.\n\t\"\"\"\n\t\n\tlog.debug(f'simple set call: {args}')\n\tif len(args) == 1:\n\t\treturn control.call('set', *args)\n\telif len(args) == 2:\n\t\treturn control.call(\n\t\t\t'set', {args[0]:args[1]}\n\t\t).then(lambda valueDict: \n\t\t\tvalueDict[args[0]]\n\t\t)\n\telse:\n\t\traise valueError('bad args')\n\n\n\n\n\n# State cache for observe(), so it doesn't have to query the status of a variable on each subscription.\n# Since this often crashes during development, the following line can be run to try getting each variable independently.\n# for key in [k for k in control.callSync('availableKeys') if k not in {'dateTime', 'externalStorage'}]: print('getting', key); control.callSync('get', [key])\n__badKeys = {} #set of blacklisted keys - useful for when one is unretrievable during development.\n_camState = control.callSync('get', [\n\tkey\n\tfor key in control.callSync('availableKeys')\n\tif key not in __badKeys\n], warnWhenCallIsSlow=False)\nif(not _camState):\n\traise Exception(\"Cache failed to populate. This indicates the get call is not working.\")\n_camState['error'] = '' #Last error is reported inline sometimes.\nif 'videoSegments' not in _camState:\n\tlog.warn('videoSegments not found in availableKeys (pychronos/issues/31)')\n\t_camState['videoSegments'] = []\nif 'videoZoom' not in _camState:\n\tlog.warn('videoZoom not found in availableKeys (pychronos/issues/52)')\n\t_camState['videoZoom'] = 1\n_camStateAge = {k:0 for k,v in _camState.items()}\n\nclass APIValues(QObject):\n\t\"\"\"Wrapper class for subscribing to API values in the chronos API.\"\"\"\n\t\n\tdef __init__(self):\n\t\tsuper(APIValues, self).__init__()\n\t\t\n\t\t#The .connect call freezes if we don't do this, or if we do this twice.\n\t\tQDBusConnection.systemBus().registerObject(\n\t\t\tf\"/ca/krontech/chronos/{'control_mock_hack' if USE_MOCK else 'control_hack'}\", \n\t\t\tself,\n\t\t)\n\t\t\n\t\tself._callbacks = {value: [] for value in _camState}\n\t\tself._callbacks['all'] = [] #meta, watch everything\n\t\t\n\t\tQDBusConnection.systemBus().connect(\n\t\t\tf\"ca.krontech.chronos.{'control_mock' if USE_MOCK else 'control'}\", \n\t\t\tf\"/ca/krontech/chronos/{'control_mock' if USE_MOCK else 'control'}\",\n\t\t\tf\"\",\n\t\t\t'notify', \n\t\t\tself.__newKeyValue,\n\t\t)\n\t\n\tdef observe(self, key, callback):\n\t\t\"\"\"Add a function to get called when a value is updated.\"\"\"\n\t\tassert callable(callback), f\"Callback is not callable. (Expected function, got {callback}.)\"\n\t\tassert key in self._callbacks, f\"Unknown value, '{key}', to observe.\\n\\nAvailable keys are: \\n{chr(10).join(self._callbacks.keys())}\\n\\nDid you mean to observe '{(get_close_matches(key, self._callbacks.keys(), n=1) or ['???'])[0]}' instead of '{key}'?\\n\"\n\t\tself._callbacks[key].append(callback)\n\t\n\tdef unobserve(self, key, callback):\n\t\t\"\"\"Stop a function from getting called when a value is updated.\"\"\"\n\t\tassert callable(callback), f\"Callback is not callable. (Expected function, got {callback}.)\"\n\t\tself._callbacks[key].remove(callback)\n\t\n\tdef __newValueIsEnqueued(self, key):\n\t\treturn True in [\n\t\t\tkey in call._args[1]\n\t\t\tfor call in control._controlEnqueuedCalls\n\t\t\tif call._args[0] == 'set'\n\t\t]\n\t\n\t@pyqtSlot('QDBusMessage')\n\tdef __newKeyValue(self, msg):\n\t\t\"\"\"Update _camState and invoke any registered observers.\"\"\"\n\t\tnewItems = msg.arguments()[0].items()\n\t\tlog.info(f'Received new information. {msg.arguments()[0] if len(str(msg.arguments()[0])) <= 45 else chr(10)+prettyFormat(msg.arguments()[0])}')\n\t\tfor key, value in newItems:\n\t\t\tif _camState[key] != value and not self.__newValueIsEnqueued(key):\n\t\t\t\t_camState[key] = value\n\t\t\t\t_camStateAge[key] += 1\n\t\t\t\tfor callback in self._callbacks[key]:\n\t\t\t\t\tcallback(value)\n\t\t\t\tfor callback in self._callbacks['all']:\n\t\t\t\t\tcallback(key, value)\n\t\t\telse:\n\t\t\t\tlog.info(f'Ignoring {key} → {value}, stale.')\n\t\n\tdef get(self, key):\n\t\treturn _camState[key]\n\napiValues = APIValues()\ndel APIValues\n\n\ndef observe(name: str, callback: Callable[[Any], None]) -> None:\n\t\"\"\"Observe changes in a state value.\n\t\n\t\tArgs:\n\t\t\tname: ID of the state variable. \"exposure\", \"focusPeakingColor\", etc.\n\t\t\tcallback: Function called when the state updates and upon subscription.\n\t\t\t\tCalled with one parameter, the new value. Called when registered\n\t\t\t\tand when the value updates.\n\t\t\n\t\tNote: Some frequently updated values (~> 10/sec) are only available via\n\t\t\tpolling due to flooding concerns. They can not be observed, as they're\n\t\t\tassumed to *always* be changed. See the API docs for more details.\n\t\t\n\t\t\n\t\tRationale:\n\t\tIt is convenient and less error-prone if we only have one callback that\n\t\thandles the initialization and update of values. The API provides separate\n\t\tinitialization and update methods, so we'll store the initialization and\n\t\tuse it to perform the initial call to the observe() callback.\n\t\t\n\t\tIn addition, this means we only have to query the initial state once,\n\t\tretrieving a blob of all the data available, rather than retrieving each\n\t\tkey one syscall at a time as we instantiate each Qt control.\n\t\"\"\"\n\t\n\tassert callable(callback), f\"Callback is not callable. (Expected function, got {callback}.)\"\n\tapiValues.observe(name, callback)\n\tcallback(apiValues.get(name))\n\n\ndef observe_future_only(name: str, callback: Callable[[Any], None]) -> None:\n\t\"\"\"Like `observe`, but without the initial callback when observing.\n\t\n\t\tUseful when `observe`ing a derived value, which observe can't deal with yet.\n\t\"\"\"\n\t\n\tassert callable(callback), f\"Callback is not callable. (Expected function, got {callback}.)\"\n\tapiValues.observe(name, callback)\n\n\nunobserve = apiValues.unobserve\n\n\n\nclass Signal(QObject):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\tself._signalObservers = {\n\t\t\t'sof': [], #Use lists here to preserve order of callbacks.\n\t\t\t'eof': [],\n\t\t\t'segment': [],\n\t\t}\n\t\t\n\t\t\n\t\t#The .connect call freezes if we don't do this, or if we do this twice.\n\t\tQDBusConnection.systemBus().registerObject(\n\t\t\tf\"/ca/krontech/chronos/{'video_mock_hack' if USE_MOCK else 'video_hack'}\", \n\t\t\tself,\n\t\t)\n\t\t\n\t\tfor signal_ in self._signalObservers:\n\t\t\tQDBusConnection.systemBus().connect(\n\t\t\t\tf\"ca.krontech.chronos.{'video_mock' if USE_MOCK else 'video'}\", \n\t\t\t\tf\"/ca/krontech/chronos/{'video_mock' if USE_MOCK else 'video'}\",\n\t\t\t\tf\"\",\n\t\t\t\tsignal_, \n\t\t\t\tgetattr(self, f'_{type(self).__name__}__{signal_}')\n\t\t\t)\n\t\n\t\n\t#Sort of a reverse trampoline, needed because callbacks must be decorated.\n\t@pyqtSlot('QDBusMessage')\n\tdef __sof(self, msg):\n\t\tlog.debug(f'''video signal: sof ({len(self._signalObservers['sof'])} handlers)''')\n\t\tself.__invokeCallbacks('sof', *msg.arguments())\n\t@pyqtSlot('QDBusMessage')\n\tdef __eof(self, msg):\n\t\tlog.debug(f'''video signal: eof ({len(self._signalObservers['eof'])} handlers)''')\n\t\tself.__invokeCallbacks('eof', *msg.arguments())\n\t@pyqtSlot('QDBusMessage')\n\tdef __segment(self, msg):\n\t\tlog.debug(f'''video signal: segment ({len(self._signalObservers['segment'])} handlers)''')\n\t\tself.__invokeCallbacks('segment', *msg.arguments())\n\t\n\tdef __invokeCallbacks(self, signal, data):\n\t\tfor callback in self._signalObservers[signal]:\n\t\t\tcallback(data)\n\t\n\t\n\tdef observe(self, signal: str, handler: Callable[[Any], None]) -> None:\n\t\t\"\"\"Add a function to get called when a D-BUS signal is emitted.\"\"\"\n\t\tassert callable(handler), f\"Handler is not callable. (Expected function, got {handler}.)\"\n\t\tself._signalObservers[signal].append(handler)\n\t\n\tdef unobserve(self, signal: str, handler: Callable[[Any], None]) -> None:\n\t\t\"\"\"Stop a function from getting called when a D-BUS signal is emitted.\"\"\"\n\t\tassert callable(handler), f\"Handler is not callable. (Expected function, got {handler}.)\"\n\t\tself._signalObservers[signal].remove(handler)\nsignal = Signal()\ndel Signal\t\n\n\n\n#Perform self-test if launched as a standalone.\nif __name__ == '__main__':\n\tfrom PyQt5.QtCore import QCoreApplication\n\timport signal as sysSignal\n\t\n\tapp = QCoreApplication(sys.argv)\n\t\n\t#Quit on ctrl-c.\n\tsysSignal.sysSignal(sysSignal.SIGINT, sysSignal.SIG_DFL)\n\t\n\tprint(\"Self-test: Retrieve exposure period.\")\n\tprint(f\"Exposure is {get('exposurePeriod')}ns.\")\n\tprint(\"Control API self-test passed. Goodbye!\")\n\t\n\tsys.exit(0)","repo_name":"krontech/chronos-web-interface","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":28921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70341120330","text":"def fn(a,i,n,key):\n if i==n:\n return -1\n if a[i]==key:\n return i\n return fn(a,i+1,n,key)\nlist1=list(map(int,input().split()))\nn=len(list1)\nkey=int(input())\nc=fn(list1,0,n,key)\nif c==-1:\n print(\"No is not present\")\nelse:\n print(c)\n","repo_name":"tinkalkumar007/Competitive_Coding","sub_path":"linearSearch_rcrsn.py","file_name":"linearSearch_rcrsn.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21952014190","text":"from re import template\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views import View\nfrom .models import Job, field_choices, field_choices_dict\nfrom django.urls import reverse, reverse_lazy\nfrom django.http import Http404, HttpResponseRedirect\nfrom .forms import JobCreationForm\n# Create your views here.\n\nclass index(View):\n def get(self, request):\n return render(request, \"jobs/index.html\", {'field_choices' : field_choices})\n \nclass listJobs(View):\n \n def get(self, request, *args, **kwargs):\n \n if kwargs['field'] not in field_choices_dict.keys():\n raise Http404('

404



The page cannot be found.

')\n\n filtered_jobs = Job.objects.filter(field = kwargs['field']) \n\n return render(request, \"jobs/category_wise_list.html\", {\n 'field_choices' : field_choices,\n 'field_chosen' : field_choices_dict[kwargs['field']],\n 'joblist' : filtered_jobs\n })\n\nclass jobDetail(View):\n def get(self, request, *args, **kwargs):\n job = get_object_or_404(Job, pk = kwargs['id'])\n user = request.user\n type_user = None\n if user == job.recruiter:\n type_user = 'recruiter'\n elif job.applicants.filter(pk = user.pk).exists():\n type_user = 'applicant'\n else: \n type_user = 'neither'\n return render(request, 'jobs/job_detail.html', {\n 'job' : job,\n 'type_user' : type_user,\n 'field_choices' : field_choices\n })\nclass jobApply(View):\n def get(self, request, *args, **kwargs):\n user = request.user\n job = get_object_or_404(Job, pk = kwargs['id'])\n if user.is_authenticated:\n if user != job.recruiter:\n job.applicants.add(user)\n type_user = None\n if user == job.recruiter:\n type_user = 'recruiter'\n elif job.applicants.filter(pk = user.pk).exists():\n type_user = 'applicant'\n else: \n type_user = 'neither'\n \n return render(request, \"jobs/apply.html\", {\n 'type_user' : type_user\n })\n else: \n raise Http404\n\n \nclass createJob(View):\n def get(self, request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse('users:login'))\n form = JobCreationForm()\n return render(request, 'jobs/create_job.html', {\n 'form' : form\n })\n def post(self, request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse('users:login'))\n form = JobCreationForm(request.POST)\n if not form.is_valid():\n return render(request, 'jobs/create_job.html', {\n 'form' : form\n })\n new_job = form.save(commit = False)\n new_job.recruiter = request.user\n new_job.save()\n return HttpResponseRedirect(reverse_lazy('users:recruitment'))\n\nclass companyUpdate(View):\n def get(self, request):\n form = JobCreationForm()\n return render(request, 'jobs/company_update.html', {\n 'form' : form\n })","repo_name":"amayank7/django-job-portal","sub_path":"jobs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21897942412","text":"#!/usr/bin/env python3\n\"\"\"Script to set up the Volkszaehler API wrapper.\"\"\"\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"volkszaehler\",\n version=\"0.4.0\",\n description=\"Python Wrapper for interacting with the Volkszahler API.\",\n long_description=long_description,\n url=\"https://github.com/home-assistant-ecosystem/python-volkszaehler\",\n download_url=\"https://github.com/home-assistant-ecosystem/python-volkszaehler/releases\",\n author=\"Fabian Affolter\",\n author_email=\"fabian@affolter-engineering.ch\",\n license=\"MIT\",\n install_requires=[\n \"aiohttp>=3.8.4,<4\",\n \"async_timeout>4,<5\",\n ],\n packages=[\"volkszaehler\"],\n python_requires=\">=3.9\",\n zip_safe=True,\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Utilities\",\n ],\n)\n","repo_name":"home-assistant-ecosystem/python-volkszaehler","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4950503844","text":"from datetime import datetime\nfrom parser_india import India_get_data\nimport json\nimport pandas as pd\n\n# Ссылки на данные импорта и экспорта\nURL_IMPORT = 'https://tradestat.commerce.gov.in/meidb/cntcomq.asp?ie=i'\nURL_EXPORT = 'https://tradestat.commerce.gov.in/meidb/cntcomq.asp?ie=e'\n\n# Словарь с порядковым номером месяца\nwith open('month_number.json', 'r') as fl:\n d = json.load(fl)\n\n# Словарь с конфигурацией запроса\n# 'export', 'import' 'radioqty' 'radiousd'\ndict_config = {\n 'type_flow': 'export',\n 'url_flow': URL_EXPORT,\n 'year': 2023,\n 'month_number': d,\n 'key_unit_value': 'radiousd',\n 'key_unit_qty': 'radioqty'\n}\n\n# Внести только те названия, данные по которым нужно получить\n# Пример со страной: 'ALGERIA'\n# Пример с месяцем: 'FEB'\nneeded_country_list = []\nneed_month_list = []\n\n\n# Валидация пропущенных значений\ndef bol(x, y):\n if x == 0 and y == 0:\n return 'full_none'\n else:\n return 'not_none'\n\n\n# Создаем экземпляр класса\nneed_entries = India_get_data(dict_config['type_flow'], dict_config['url_flow'], dict_config['year'], d)\n\n# Получаем нужные страны и месяцы\nlst_country = {k: v for k, v in need_entries.get_country_option().items() if v in needed_country_list} \\\n if needed_country_list else need_entries.get_country_option()\nlst_month = {k: v for k, v in need_entries.get_month_option().items() if v in need_month_list} \\\n if need_month_list else need_entries.get_month_option()\n\n# Заготовка под итоговый датасет\ndf_void = pd.DataFrame()\n\n# Обходим все страны и месяцы\nfor country_option, country_value in lst_country.items():\n start = datetime.now()\n print(f'Загружаем {country_value}')\n for month_option, month_value in lst_month.items():\n print(f'Месяц {month_value}')\n # Формируем датафрейм для trade_value\n df_value = India_get_data(dict_config['type_flow'], dict_config['url_flow'], dict_config['year'],\n dict_config['month_number'], key_unit=dict_config['key_unit_value']).build_df_value(\n month_option, month_value, country_option, country_value)\n # Формируем датафрейм для qty\n df_qty = India_get_data(dict_config['type_flow'], dict_config['url_flow'], dict_config['year'],\n dict_config['month_number'], key_unit=dict_config['key_unit_qty']).build_df_qty(\n month_option, country_option, country_value)\n\n # Собираем полученные датафреймы в один\n try:\n merge_ff = df_value.merge(df_qty, on=['commodity_code', 'year', 'name_country_source'])\n merge_ff.fillna(0, inplace=True)\n merge_ff['bool_border'] = merge_ff.apply(lambda x: bol(x.trade_value, x.qty), axis=1)\n merge_ff = merge_ff.query('bool_border != \"full_none\"')\n\n df_void = pd.concat((df_void, merge_ff))\n except KeyError:\n print(f'Нет данный в {country_value} месяца {month_value}')\n print(f'Выгрузка данных по {country_value} составила {round((datetime.now() - start).total_seconds(), 2)} секунд')\n\n# Сохраняем данные в файл\ndf_void.to_csv(f'india_{dict_config[\"type_flow\"]}.csv', index=False)\n","repo_name":"osulel12/India_parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8953821028","text":"from formalchemy import config\nfrom sqlalchemy.orm import Query, class_mapper\nfrom sqlalchemy.exceptions import InvalidRequestError # 0.4 support\nimport compiler\n\n__all__ = ['stringify', 'normalized_options', '_pk', '_pk_one_column',\n 'simple_eval']\n\n# see http://code.activestate.com/recipes/364469/ for explanation.\n# 2.6 provides ast.literal_eval, but requiring 2.6 is a bit of a stretch for now\nclass _SafeEval(object):\n def visit(self, node,**kw):\n cls = node.__class__\n meth = getattr(self, 'visit' + cls.__name__, self.default)\n return meth(node, **kw)\n \n def default(self, node, **kw):\n for child in node.getChildNodes():\n return self.visit(child, **kw)\n \n visitExpression = default\n \n def visitName(self, node, **kw):\n if node.name in ['True', 'False', 'None']:\n return eval(node.name)\n\n def visitConst(self, node, **kw):\n return node.value\n\n def visitTuple(self,node, **kw):\n return tuple(self.visit(i) for i in node.nodes)\n \n def visitList(self,node, **kw):\n return [self.visit(i) for i in node.nodes]\n\ndef simple_eval(source):\n \"\"\"like 2.6's ast.literal_eval, but only does constants, lists, and tuples, for serialized pk eval\"\"\"\n if source == '':\n return None\n walker = _SafeEval()\n ast = compiler.parse(source, 'eval')\n return walker.visit(ast)\n\n\ndef stringify(k, null_value=u''):\n if k is None:\n return null_value\n if isinstance(k, str):\n return unicode(k, config.encoding)\n elif isinstance(k, unicode):\n return k\n elif hasattr(k, '__unicode__'):\n return unicode(k)\n else:\n return unicode(str(k), config.encoding)\n\ndef _pk_one_column(instance, column):\n try:\n attr = getattr(instance, column.key)\n except AttributeError:\n # FIXME: this is not clean but the only way i've found to retrieve the\n # real attribute name of the primary key.\n # This is needed when you use something like:\n # id = Column('UGLY_NAMED_ID', primary_key=True)\n # It's a *really* needed feature\n cls = instance.__class__\n for k in instance._sa_class_manager.keys():\n props = getattr(cls, k).property\n if hasattr(props, 'columns'):\n if props.columns[0] is column:\n attr = getattr(instance, k)\n break\n return attr\n\ndef _pk(instance):\n # Return the value of this instance's primary key, suitable for passing to Query.get(). \n # Will be a tuple if PK is multicolumn.\n try:\n columns = class_mapper(type(instance)).primary_key\n except InvalidRequestError:\n return None\n if len(columns) == 1:\n return _pk_one_column(instance, columns[0])\n return tuple([_pk_one_column(instance, column) for column in columns])\n\n\n\ndef query_options(L):\n \"\"\"\n Return a list of tuples of `(item description, item pk)`\n for each item in the iterable L, where `item description`\n is the result of str(item) and `item pk` is the item's primary key.\n \"\"\"\n return [(stringify(item), _pk(item)) for item in L]\n\n\ndef normalized_options(options):\n \"\"\"\n If `options` is an SA query or an iterable of SA instances, it will be\n turned into a list of `(item description, item value)` pairs. Otherwise, a\n copy of the original options will be returned with no further validation.\n \"\"\"\n if isinstance(options, Query):\n options = options.all()\n if callable(options):\n return options\n i = iter(options)\n try:\n first = i.next()\n except StopIteration:\n return []\n try:\n class_mapper(type(first))\n except:\n return list(options)\n return query_options(options)\n\n","repo_name":"abourget/formalchemy-abourget","sub_path":"formalchemy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"73278577609","text":"import sys\nimport os.path\nimport arrow\nfrom dateutil import tz\nimport numpy as np\nfrom bark.io.rhd.load_intan_rhd_format import read_data\nfrom bark import create_entry, write_metadata\n\n\ndef bark_rhd_to_entry():\n import argparse\n default_max_gaps = 10\n p = argparse.ArgumentParser(\n description=\"\"\"Create a Bark entry from RHD files\n RHD files should be contiguous in time.\n\n An error is raised if the RHD files do not all have the same\n channels recorded.\n \"\"\")\n p.add_argument(\"rhdfiles\", help=\"RHD file(s) to convert\", nargs=\"+\")\n p.add_argument(\"-o\", \"--out\", help=\"name of bark entry\")\n p.add_argument(\"-a\",\n \"--attributes\",\n action='append',\n type=lambda kv: kv.split(\"=\"),\n dest='keyvalues',\n help=\"extra metadata in the form of KEY=VALUE\")\n p.add_argument(\n \"-t\",\n \"--timestamp\",\n help=\"\"\"format: YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS.S, if left unspecified\n the timestamp will be inferred from the filename of the\n first RHD file.\"\"\")\n p.add_argument('--timezone',\n help=\"timezone of timestamp, \\\n default: America/Chicago\",\n default='America/Chicago')\n p.add_argument(\n \"-p\",\n \"--parents\",\n help=\"No error if already exists, new meta-data written, \\\n and datasets will be overwritten.\",\n action=\"store_true\")\n p.add_argument(\n \"-g\",\n \"--maxgaps\",\n help=\"Maximum allowable gaps in continuous data, default: {}.\"\n .format(default_max_gaps),\n type=int,\n default=default_max_gaps)\n args = p.parse_args()\n attrs = dict(args.keyvalues) if args.keyvalues else {}\n check_exists(args.rhdfiles)\n rhds_to_entry(args.rhdfiles, args.out, args.timestamp, args.parents,\n args.maxgaps, args.timestamp, **attrs)\n\n\ndef rhd_filename_to_timestamp(fname, timezone):\n return arrow.get(fname, 'YYMMDD_HHmmss').replace(\n tzinfo=tz.gettz(timezone)).datetime\n\n\ndef input_string_to_timestamp(string, timezone):\n return arrow.get(string).replace(tzinfo=tz.gettz(timezone)).datetime\n\n\ndef chan_names(result, key):\n if key in result:\n return [chan['native_channel_name'] for chan in result[key]]\n else:\n return []\n\n\ndef amp_chan_names(result):\n return chan_names(result, 'amplifier_channels')\n\n\ndef adc_chan_names(result):\n return chan_names(result, 'board_adc_channels')\n\n\ndef board_adc_metadata(result, dsetname):\n attrs = dict(dtype=result['board_adc_data'].dtype.str,\n sampling_rate=result['frequency_parameters'][\n 'board_adc_sample_rate'], )\n columns = {i: chan_attrs\n for i, chan_attrs in enumerate(result['board_adc_channels'])}\n for k in columns:\n columns[k]['units'] = 'V'\n columns[k]['unit_scale'] = result['ADC_input_bit_volts']\n write_metadata(dsetname, columns=columns, **attrs)\n\n\ndef amplifier_metadata(result, dsetname):\n attrs = dict(dtype=result['amplifier_data'].dtype.str,\n sampling_rate=result['frequency_parameters'][\n 'amplifier_sample_rate'], )\n attrs.update(result['frequency_parameters'])\n columns = {i: chan_attrs\n for i, chan_attrs in enumerate(result['amplifier_channels'])}\n for k in columns:\n columns[k]['units'] = 'uV'\n columns[k]['unit_scale'] = result['amplifier_bit_microvolts']\n write_metadata(dsetname, columns=columns, **attrs)\n\n\ndef not_implemented_warnings(result):\n if 'aux_input_channels' in result:\n print(\"AUX INPUT DATA CONVERSION NOT YET IMPLEMENTED\")\n if 'supply_voltage_data' in result:\n print(\"SUPPLY VOLTAGE DATA CONVERSION NOT YET IMPLEMENTED\")\n if 'board_dig_in_data' in result:\n print(\"DIGITAL INPUT DATA CONVERSION NOT YET IMPLEMENTED\")\n if 'board_dig_out_data' in result:\n print(\"DIGITAL OUTPUT DATA CONVERSION NOT YET IMPLEMENTED\")\n if 'temp_sensor_data' in result:\n print(\"TEMP SENSOR DATA CONVERSION NOT YET IMPLEMENTED\")\n\n\ndef check_timestamp_gaps(data, max_gaps):\n num_gaps = np.sum(~np.isclose(\n np.diff(data['t_amplifier']), 1. / data['frequency_parameters'][\n 'amplifier_sample_rate']))\n if num_gaps > max_gaps:\n raise Exception(\"{} data gaps exceeds maximum limit {}\".format(\n num_gaps, max_gaps))\n\n\ndef check_exists(rhd_paths):\n for filepath in rhd_paths:\n if not os.path.exists(filepath):\n print(\"file {} does not exist\".format(filepath))\n sys.exit(0)\n\n\ndef rhds_to_entry(rhd_paths,\n entry_name,\n timestamp=None,\n parents=False,\n max_gaps=10,\n timezone='America/Chicago',\n **attrs):\n \"\"\"\n Converts a temporally contiguous list of .rhd files to a bark entry.\n \"\"\"\n if not timestamp:\n timestamp = rhd_filename_to_timestamp(rhd_paths[0], timezone)\n else:\n timestamp = input_string_to_timestamp(timestamp, timezone)\n # extract data and metadata from first file\n print(rhd_paths[0])\n result = read_data(rhd_paths[0], no_floats=True)\n not_implemented_warnings(result)\n check_timestamp_gaps(result, max_gaps)\n # make entry\n entry_attrs = result['notes']\n attrs.update(entry_attrs)\n create_entry(entry_name, timestamp, parents, **attrs)\n # make datasets\n board_channels = adc_chan_names(result)\n if board_channels:\n dsetname = os.path.join(entry_name, 'board_adc.dat')\n board_adc_metadata(result, dsetname)\n with open(dsetname, 'wb') as fp:\n fp.write(result['board_adc_data'].T.tobytes())\n\n amplifier_channels = amp_chan_names(result)\n if amplifier_channels:\n dsetname = os.path.join(entry_name, 'amplifier.dat')\n amplifier_metadata(result, dsetname)\n with open(dsetname, 'wb') as fp:\n fp.write(result['amplifier_data'].T.tobytes())\n\n # now that the metadata has been written (and data from the first file)\n # write data for the remainder of the files\n for rhdfile in rhd_paths[1:]:\n print(rhdfile)\n result = read_data(rhdfile, no_floats=True)\n not_implemented_warnings(result)\n check_timestamp_gaps(result, max_gaps)\n cur_board_channels = adc_chan_names(result)\n cur_amplifier_channels = amp_chan_names(result)\n\n # check that the same channels are being recorded\n if board_channels != cur_board_channels:\n raise ValueError(\"\"\"{} has channels {}\n {} has channels {} \"\"\".format(\n rhdfile, cur_board_channels, rhd_paths[0], board_channels))\n if amplifier_channels != cur_amplifier_channels:\n raise ValueError(\"\"\"{} has channels {}\n {} has channels {}\"\"\"\n .format(rhdfile, cur_amplifier_channels,\n rhd_paths[0], amplifier_channels))\n # write data\n if cur_board_channels:\n dsetname = os.path.join(entry_name, 'board_adc.dat')\n with open(dsetname, 'ab') as fp:\n fp.write(result['board_adc_data'].T.tobytes())\n if cur_amplifier_channels:\n dsetname = os.path.join(entry_name, 'amplifier.dat')\n with open(dsetname, 'ab') as fp:\n fp.write(result['amplifier_data'].T.tobytes())\n","repo_name":"kylerbrown/bark","sub_path":"bark/io/rhd/rhd2bark.py","file_name":"rhd2bark.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"10504955912","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n'''\n@File : main.py\n@Author : zhangheng\n@version : 1.0\n@Description: 天气预报案例 - 通过gps定位获取该位置近几天的天气状况\n board.json - 硬件资源配置文件,详情请参考:https://haas.iot.aliyun.com/haasapi/index.html#/Python/docs/zh-CN/haas_extended_api/driver/driver\n'''\n\nfrom aliyunIoT import Device\nfrom driver import UART\nimport display # 显示库\nimport network # 网络库\nimport utime # 延时函数在utime库中\nimport sntp # 网络时间同步库\nimport _thread # 线程库\nimport gnss # gps\nimport ujson as json\n\n\n# Wi-Fi SSID和Password设置\nSSID='***'\nPWD='***'\n\n# HaaS设备三元组\nproductKey = \"***\"\ndeviceName = \"***\"\ndeviceSecret = \"***\"\n\ng_lk_connect = False\ng_lk_service = False\n\nkey_info = {\n 'region' : 'cn-shanghai' ,\n 'productKey': productKey ,\n 'deviceName': deviceName ,\n 'deviceSecret': deviceSecret ,\n 'keepaliveSec': 60\n}\n\ndef connect_wifi(ssid, pwd):\n # 引用全局变量\n global disp\n # 初始化网络\n wlan = network.WLAN(network.STA_IF)\n wlan.active(True)\n wlan.connect(ssid, pwd)\n while True:\n print('Wi-Fi is connecting...')\n # 显示网络连接中\n disp.text(20, 30, 'Wi-Fi is connecting...', disp.RED)\n # 网络连接成功后,更新显示字符\n if (wlan.isconnected() == True):\n print('Wi-Fi is connected')\n disp.textClear(20, 30, 'Wi-Fi is connecting...')\n disp.text(20, 30, 'Wi-Fi is connected', disp.RED)\n #print(wlan.ifconfig())\n ip = wlan.ifconfig()[0]\n print('IP: %s' %ip)\n disp.text(20, 50, ip, disp.RED)\n # NTP时间更新,如果更新不成功,将不能进行识别\n print('NTP start')\n disp.text(20, 70, 'NTP start...', disp.RED)\n sntp.setTime()\n print('NTP done')\n disp.textClear(20, 70, 'NTP start...')\n disp.text(20, 70, 'NTP done', disp.RED)\n break\n utime.sleep_ms(500)\n utime.sleep(2)\n\ndef cb_lk_connect(data):\n global g_lk_connect\n print('link platform connected')\n g_lk_connect = True\n\ndef weather_forecast(latitude, longitude):\n start = utime.ticks_ms()\n global dev\n ext = {'latitude':latitude, 'longitude': longitude}\n ext_str = json.dumps(ext)\n all_params = {'id': 1, 'version': '1.0', 'params': { 'eventType': 'haas.faas', 'eventName': 'weatherForecast', 'argInt': 1, 'ext': ext_str}}\n all_params_str = json.dumps(all_params)\n forecast_param = {\n 'topic': '/sys/' + productKey + '/' + deviceName + '/thing/event/hli_event/post',\n 'qos': 1,\n 'payload': all_params_str\n }\n dev.publish(forecast_param)\n while g_lk_service == False:\n continue\n time_diff = utime.ticks_diff(utime.ticks_ms(), start)\n print('get response time : %d' % time_diff)\n\ndef cb_lk_service(data):\n global g_lk_service, callFlag, weatherResult\n if data != None:\n params = data['params']\n #print(params)\n params_dict = json.loads(params)\n ext = params_dict['ext']\n ext_dict = json.loads(ext)\n result = ext_dict['data']\n resultJson = json.loads(result)\n infocode = resultJson['infocode']\n if infocode == '10000':\n casts = resultJson['forecasts'][0]['casts']\n try:\n print(\"casts:\" + json.dumps(casts))\n weatherResult = casts\n for index in range(len(casts)):\n cast = casts[index]\n date = cast['date']\n daytemp = cast['daytemp']\n nighttemp = cast['nighttemp']\n except:\n print(\"Error: casts\")\n callFlag = True\n else:\n callFlag = False\n else:\n callFlag = False\n g_lk_service = True\n\n# 循环定位获取天气信息线程\ndef weather_forecast_thread():\n global gnssDev\n while True:\n location = gnssDev.getLocation()\n if location:\n print(\"The gnss infor 纬度-%d 经度-%d 海拔-%d\",location.longitude, location.latitude, location.altitude)\n latitude = str(location.latitude[0])\n longitude = str(location.longitude[0])\n print('latitude: ' + latitude + \", longitude: \" + longitude)\n if(latitude != '0.0' and longitude != '0.0'):\n weather_forecast(latitude, longitude)\n # 间隔5秒后再重新定位\n utime.sleep_ms(5000)\n\ndef display_thread():\n # 引用全局变量\n global disp, callFlag, weatherResult\n while True:\n if callFlag == True:\n # 清除屏幕内容\n disp.clear()\n # 设置文字字体\n #disp.font(disp.FONT_DejaVu40)\n # 显示识别结果\n height = 30\n for index in range(len(weatherResult)):\n cast = weatherResult[index]\n date = cast['date']\n daytemp = cast['daytemp']\n nighttemp = cast['nighttemp']\n result = \"date=\" + date + \",daytemp=\" + daytemp + \",nighttemp=\" + nighttemp\n print(result)\n disp.text(10, height, result, disp.RED)\n height = height + 30\n utime.sleep_ms(1000)\n else:\n # 清除屏幕内容\n disp.clear()\n # 设置文字字体\n #disp.font(disp.FONT_DejaVu40)\n # 显示识别结果\n disp.text(40, 20, 'no weather!!!', disp.RED)\n print('no weather ...!!!')\n utime.sleep_ms(1000)\n\ndef init_gps():\n global gnssDev\n print(\"gnss init...\")\n uartDev = UART()\n uartDev.open(\"gnss\")\n gnssDev = gnss.GNSS(uartDev)\n\ndef init_device():\n # 设备初始化\n global dev\n dev = Device()\n dev.on(Device.ON_CONNECT, cb_lk_connect)\n dev.on(Device.ON_SERVICE, cb_lk_service)\n dev.connect(key_info)\n while True:\n if g_lk_connect:\n break\n\ndef init():\n # 全局变量\n global disp, frame, gnssDev, callFlag, weatherResult\n callFlag = False\n # 创建lcd display对象\n disp = display.TFT()\n # 连接网络\n connect_wifi(SSID, PWD)\n # 初始化gps\n init_gps()\n # 初始化设备\n init_device()\n\ndef execute():\n try:\n # 启动显示线程\n _thread.start_new_thread(display_thread, ())\n # 设置线程stack\n _thread.stack_size(10 * 1024)\n # 启动定位线程\n _thread.start_new_thread(weather_forecast_thread, ())\n except:\n print(\"Error: unable to start thread\")\n while True:\n utime.sleep_ms(1000)\n\ndef main():\n init()\n execute()\n\nif __name__ == '__main__':\n main()\n","repo_name":"alibaba/AliOS-Things","sub_path":"haas_lib_bundles/python/docs/examples/weather_forecast/esp32/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","stars":4479,"dataset":"github-code","pt":"16"} +{"seq_id":"9367120882","text":"import logging\nfrom typing import Dict, List, Optional, Tuple, Union\n\n# Detectron imports\nimport fvcore.nn.weight_init as weight_init\nimport numpy as np\nimport torch\nfrom detectron2.config import configurable\nfrom detectron2.data.detection_utils import convert_image_to_rgb\nfrom detectron2.layers import Conv2d, Linear, ShapeSpec, cat, get_norm\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY\nfrom detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN\nfrom detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads\nfrom detectron2.modeling.roi_heads.box_head import ROI_BOX_HEAD_REGISTRY\nfrom detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference\nfrom detectron2.structures import Boxes, ImageList, Instances\nfrom detectron2.utils.events import get_event_storage\nfrom detectron2.utils.logger import log_first_n\nfrom fvcore.nn import smooth_l1_loss\n\n# Project imports\nfrom probabilistic_inference.inference_utils import get_dir_alphas\nfrom torch import distributions, nn\nfrom torch.nn import functional as F\n\nfrom probabilistic_modeling.losses import negative_log_likelihood, reshape_box_preds\nfrom probabilistic_modeling.modeling_utils import (\n PoissonPointProcessIntensityFunction,\n clamp_log_variance,\n covariance_output_to_cholesky,\n get_probabilistic_loss_weight,\n unscented_transform,\n PoissonPointUnion,\n)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n@META_ARCH_REGISTRY.register()\nclass ProbabilisticGeneralizedRCNN(GeneralizedRCNN):\n \"\"\"\n Probabilistic GeneralizedRCNN class.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n # Parse configs\n self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME\n self.compute_cls_var = self.cls_var_loss != \"none\"\n self.cls_var_num_samples = (\n cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES\n )\n\n self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME\n self.compute_bbox_cov = self.bbox_cov_loss != \"none\"\n self.bbox_cov_num_samples = (\n cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES\n )\n self.bbox_cov_dist_type = (\n cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE\n )\n self.bbox_cov_type = (\n cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE\n )\n if self.bbox_cov_type == \"diagonal\":\n # Diagonal covariance matrix has N elements\n self.bbox_cov_dims = 4\n else:\n # Number of elements required to describe an NxN covariance matrix is\n # computed as: (N * (N + 1)) / 2\n self.bbox_cov_dims = 10\n\n self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE\n self.use_dropout = self.dropout_rate != 0.0\n self.num_mc_dropout_runs = -1\n if (\n self.compute_bbox_cov\n and self.bbox_cov_loss == \"pmb_negative_log_likelihood\"\n ):\n ppp_constructor = lambda x: PoissonPointProcessIntensityFunction(\n cfg, **x\n )\n self.nll_max_num_solutions = (\n cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS\n )\n\n self.current_step = 0\n\n # Define custom probabilistic head\n self.roi_heads.box_predictor = ProbabilisticFastRCNNOutputLayers(\n cfg,\n input_shape=self.roi_heads.box_head.output_shape,\n compute_cls_var=self.compute_cls_var,\n cls_var_loss=self.cls_var_loss,\n cls_var_num_samples=self.cls_var_num_samples,\n compute_bbox_cov=self.compute_bbox_cov,\n bbox_cov_loss=self.bbox_cov_loss,\n bbox_cov_type=self.bbox_cov_type,\n bbox_cov_dims=self.bbox_cov_dims,\n bbox_cov_num_samples=self.bbox_cov_num_samples,\n ppp_constructor=ppp_constructor,\n nll_max_num_solutions=self.nll_max_num_solutions,\n bbox_cov_dist_type=self.bbox_cov_dist_type,\n matching_distance=cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE,\n use_prediction_mixture=cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE,\n )\n\n # Send to device\n self.to(self.device)\n def get_ppp_intensity_function(self):\n return self.roi_heads.box_predictor.ppp_intensity_function\n\n def forward(\n self, batched_inputs, return_anchorwise_output=False, num_mc_dropout_runs=-1\n ):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances (optional): groundtruth :class:`Instances`\n * proposals (optional): :class:`Instances`, precomputed proposals.\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n return_anchorwise_output (bool): returns raw output for probabilistic inference\n\n num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and\n not full neural network.\n\n Returns:\n dict[str: Tensor]:\n mapping from a named loss to a tensor storing the loss. Used during training only.\n \"\"\"\n try:\n self.current_step += get_event_storage().iter\n except:\n self.current_step += 1\n\n if not self.training and num_mc_dropout_runs == -1:\n if return_anchorwise_output:\n return self.produce_raw_output(batched_inputs)\n else:\n return self.inference(batched_inputs)\n elif self.training and num_mc_dropout_runs > 1:\n self.num_mc_dropout_runs = num_mc_dropout_runs\n output_list = []\n for i in range(num_mc_dropout_runs):\n output_list.append(self.produce_raw_output(batched_inputs))\n return output_list\n\n images = self.preprocess_image(batched_inputs)\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN,\n \"'targets' in the model inputs is now renamed to 'instances'!\",\n n=10,\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n\n features = self.backbone(images.tensor)\n\n if self.proposal_generator:\n proposals, proposal_losses = self.proposal_generator(\n images, features, gt_instances\n )\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n proposal_losses = {}\n\n _, detector_losses = self.roi_heads(\n images, features, proposals, gt_instances, current_step=self.current_step\n )\n if self.vis_period > 0:\n storage = get_event_storage()\n if storage.iter % self.vis_period == 0:\n # TODO: implement to visualize probabilistic outputs\n self.visualize_training(batched_inputs, proposals)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n def produce_raw_output(self, batched_inputs, detected_instances=None):\n \"\"\"\n Run inference on the given inputs and return proposal-wise output for later postprocessing.\n\n Args:\n batched_inputs (list[dict]): same as in :meth:`forward`\n detected_instances (None or list[Instances]): if not None, it\n contains an `Instances` object per image. The `Instances`\n object contains \"pred_boxes\" and \"pred_classes\" which are\n known boxes in the image.\n The inference will then skip the detection of bounding boxes,\n and only predict other per-ROI outputs.\n Returns:\n same as in :meth:`forward`.\n \"\"\"\n raw_output = dict()\n\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n\n if detected_instances is None:\n if self.proposal_generator:\n proposals, _ = self.proposal_generator(images, features, None)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n # Create raw output dictionary\n raw_output.update({\"proposals\": proposals[0]})\n\n results, _ = self.roi_heads(\n images,\n features,\n proposals,\n None,\n produce_raw_output=True,\n num_mc_dropout_runs=self.num_mc_dropout_runs,\n )\n else:\n detected_instances = [x.to(self.device) for x in detected_instances]\n results = self.roi_heads.forward_with_given_boxes(\n features, detected_instances\n )\n\n box_cls, box_delta, box_cls_var, box_reg_var = results\n\n raw_output.update(\n {\n \"box_cls\": box_cls,\n \"box_delta\": box_delta,\n \"box_cls_var\": box_cls_var,\n \"box_reg_var\": box_reg_var,\n }\n )\n if (\n self.compute_bbox_cov\n and self.bbox_cov_loss == \"pmb_negative_log_likelihood\"\n ):\n ppp_output = (\n self.roi_heads.box_predictor.ppp_intensity_function.get_weights()\n )\n raw_output.update({\"ppp\": ppp_output})\n\n return raw_output\n\n def visualize_training(self, batched_inputs, proposals):\n \"\"\"\n A function used to visualize images and proposals. It shows ground truth\n bounding boxes on the original image and up to 20 top-scoring predicted\n object proposals on the original image. Users can implement different\n visualization functions for different models.\n\n Args:\n batched_inputs (list): a list that contains input to the model.\n proposals (list): a list that contains predicted proposals. Both\n batched_inputs and proposals should have the same length.\n \"\"\"\n from core.visualization_tools.probabilistic_visualizer import (\n ProbabilisticVisualizer as Visualizer,\n )\n\n storage = get_event_storage()\n max_vis_prop = 20\n\n with torch.no_grad():\n self.eval()\n predictions = self.produce_raw_output(batched_inputs)\n self.train()\n predictions = (\n predictions[\"box_cls\"],\n predictions[\"box_delta\"],\n predictions[\"box_cls_var\"],\n predictions[\"box_reg_var\"],\n )\n _, _, _, pred_covs = predictions\n boxes = self.roi_heads.box_predictor.predict_boxes(predictions, proposals)\n scores = self.roi_heads.box_predictor.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n\n # Apply NMS without score threshold\n instances, kept_idx = fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n 0.0,\n self.roi_heads.box_predictor.test_nms_thresh,\n self.roi_heads.box_predictor.test_topk_per_image,\n )\n\n num_prop_per_image = [len(p) for p in proposals]\n pred_covs = pred_covs.split(num_prop_per_image)\n\n pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]\n pred_scores = [score[kept] for score, kept in zip(scores, kept_idx)]\n pred_boxes = [box[kept] for box, kept in zip(boxes, kept_idx)]\n\n for i, (input, prop) in enumerate(zip(batched_inputs, proposals)):\n img = input[\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\n v_gt = Visualizer(img, None)\n v_gt = v_gt.overlay_instances(boxes=input[\"instances\"].gt_boxes)\n anno_img = v_gt.get_image()\n box_size = min(len(prop.proposal_boxes), max_vis_prop)\n\n v_pred = Visualizer(img, None)\n boxes = pred_boxes[i][0:box_size, :4].cpu().numpy()\n pred_cov_matrix = pred_covs[i][0:box_size, :4]\n pred_cov_matrix = clamp_log_variance(pred_cov_matrix)\n chol = covariance_output_to_cholesky(pred_cov_matrix)\n cov = (\n torch.matmul(chol, torch.transpose(chol, -1, -2)).cpu().detach().numpy()\n )\n\n v_pred = v_pred.overlay_covariance_instances(\n boxes=boxes, covariance_matrices=cov\n )\n prop_img = v_pred.get_image()\n vis_img = np.concatenate((anno_img, prop_img), axis=1)\n vis_img = vis_img.transpose(2, 0, 1)\n vis_name = \"Left: GT bounding boxes; Right: Predicted proposals\"\n storage.put_image(vis_name, vis_img)\n break # only visualize one image in a batch\n\n\n@ROI_HEADS_REGISTRY.register()\nclass ProbabilisticROIHeads(StandardROIHeads):\n \"\"\"\n Probabilistic ROI heads, inherit from standard ROI heads so can be used with mask RCNN in theory.\n \"\"\"\n\n def __init__(self, cfg, input_shape):\n super(ProbabilisticROIHeads, self).__init__(cfg, input_shape)\n\n self.is_mc_dropout_inference = False\n self.produce_raw_output = False\n self.current_step = 0\n\n def forward(\n self,\n images: ImageList,\n features: Dict[str, torch.Tensor],\n proposals: List[Instances],\n targets: Optional[List[Instances]] = None,\n num_mc_dropout_runs=-1,\n produce_raw_output=False,\n current_step=0.0,\n ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:\n \"\"\"\n See :class:`ROIHeads.forward`.\n \"\"\"\n\n self.is_mc_dropout_inference = num_mc_dropout_runs > 1\n self.produce_raw_output = produce_raw_output\n self.current_step = current_step\n\n del images\n if self.training and not self.is_mc_dropout_inference:\n assert targets\n proposals = self.label_and_sample_proposals(proposals, targets)\n # del targets\n\n if self.training and not self.is_mc_dropout_inference:\n losses = self._forward_box(features, proposals, targets)\n # Usually the original proposals used by the box head are used by the mask, keypoint\n # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes\n # predicted by the box head.\n losses.update(self._forward_mask(features, proposals))\n losses.update(self._forward_keypoint(features, proposals))\n return proposals, losses\n else:\n pred_instances = self._forward_box(features, proposals, targets)\n if self.produce_raw_output:\n return pred_instances, {}\n # During inference cascaded prediction is used: the mask and keypoints heads are only\n # applied to the top scoring box detections.\n pred_instances = self.forward_with_given_boxes(features, pred_instances)\n return pred_instances, {}\n\n def _forward_box(\n self,\n features: Dict[str, torch.Tensor],\n proposals: List[Instances],\n gt_instances: List[Instances],\n ) -> Union[Dict[str, torch.Tensor], List[Instances]]:\n \"\"\"\n Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,\n the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.\n\n Args:\n features (dict[str, Tensor]): mapping from feature map names to tensor.\n Same as in :meth:`ROIHeads.forward`.\n proposals (list[Instances]): the per-image object proposals with\n their matching ground truth.\n Each has fields \"proposal_boxes\", and \"objectness_logits\",\n \"gt_classes\", \"gt_boxes\".\n\n Returns:\n In training, a dict of losses.\n In inference, a list of `Instances`, the predicted instances.\n \"\"\"\n features = [features[f] for f in self.in_features]\n box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])\n box_features = self.box_head(box_features)\n predictions = self.box_predictor(box_features)\n del box_features\n\n if self.produce_raw_output:\n return predictions\n\n if self.training:\n losses = self.box_predictor.losses(\n predictions, proposals, self.current_step, gt_instances\n )\n # proposals is modified in-place below, so losses must be computed first.\n if self.train_on_pred_boxes:\n with torch.no_grad():\n pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(\n predictions, proposals\n )\n for proposals_per_image, pred_boxes_per_image in zip(\n proposals, pred_boxes\n ):\n proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)\n return losses\n else:\n pred_instances, _ = self.box_predictor.inference(predictions, proposals)\n return pred_instances\n\n\nclass ProbabilisticFastRCNNOutputLayers(nn.Module):\n \"\"\"\n Four linear layers for predicting Fast R-CNN outputs:\n (1) proposal-to-detection box regression deltas\n (2) classification scores\n (3) box regression deltas covariance parameters (if needed)\n (4) classification logits variance (if needed)\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape,\n *,\n box2box_transform,\n num_classes,\n cls_agnostic_bbox_reg=False,\n smooth_l1_beta=0.0,\n test_score_thresh=0.0,\n test_nms_thresh=0.5,\n test_topk_per_image=100,\n compute_cls_var=False,\n compute_bbox_cov=False,\n bbox_cov_dims=4,\n cls_var_loss=\"none\",\n cls_var_num_samples=10,\n bbox_cov_loss=\"none\",\n bbox_cov_type=\"diagonal\",\n dropout_rate=0.0,\n annealing_step=0,\n bbox_cov_num_samples=1000,\n ppp_constructor=None,\n nll_max_num_solutions=5,\n bbox_cov_dist_type=None,\n matching_distance=\"log_prob\",\n use_prediction_mixture=False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature to this module\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\n num_classes (int): number of foreground classes\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\n smooth_l1_beta (float): transition point from L1 to L2 loss.\n test_score_thresh (float): threshold to filter predictions results.\n test_nms_thresh (float): NMS threshold for prediction results.\n test_topk_per_image (int): number of top predictions to produce per image.\n compute_cls_var (bool): compute classification variance\n compute_bbox_cov (bool): compute box covariance regression parameters.\n bbox_cov_dims (int): 4 for diagonal covariance, 10 for full covariance.\n cls_var_loss (str): name of classification variance loss.\n cls_var_num_samples (int): number of samples to be used for loss computation. Usually between 10-100.\n bbox_cov_loss (str): name of box covariance loss.\n bbox_cov_type (str): 'diagonal' or 'full'. This is used to train with loss functions that accept both types.\n dropout_rate (float): 0-1, probability of drop.\n annealing_step (int): step used for KL-divergence in evidential loss to fully be functional.\n ppp_intensity_function (func): function that returns PPP intensity given sample box\n nll_max_num_solutions (int): Maximum NLL solutions to consider when computing NLL-PMB loss\n \"\"\"\n super().__init__()\n if isinstance(input_shape, int): # some backward compatibility\n input_shape = ShapeSpec(channels=input_shape)\n self.num_classes = num_classes\n input_size = (\n input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\n )\n\n self.compute_cls_var = compute_cls_var\n self.compute_bbox_cov = compute_bbox_cov\n\n self.bbox_cov_dims = bbox_cov_dims\n self.bbox_cov_num_samples = bbox_cov_num_samples\n\n self.dropout_rate = dropout_rate\n self.use_dropout = self.dropout_rate != 0.0\n\n self.cls_var_loss = cls_var_loss\n self.cls_var_num_samples = cls_var_num_samples\n\n self.annealing_step = annealing_step\n\n self.bbox_cov_loss = bbox_cov_loss\n self.bbox_cov_type = bbox_cov_type\n self.bbox_cov_dist_type = bbox_cov_dist_type\n\n # The prediction layer for num_classes foreground classes and one background class\n # (hence + 1)\n self.cls_score = Linear(input_size, num_classes + 1)\n num_bbox_reg_classes = 1.0 if cls_agnostic_bbox_reg else num_classes\n box_dim = len(box2box_transform.weights)\n self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n if self.compute_cls_var:\n self.cls_var = Linear(input_size, num_classes + 1)\n nn.init.normal_(self.cls_var.weight, std=0.0001)\n nn.init.constant_(self.cls_var.bias, 0)\n\n if self.compute_bbox_cov:\n self.bbox_cov = Linear(input_size, num_bbox_reg_classes * bbox_cov_dims)\n nn.init.normal_(self.bbox_cov.weight, std=0.0001)\n nn.init.constant_(self.bbox_cov.bias, 0.0)\n\n self.box2box_transform = box2box_transform\n self.smooth_l1_beta = smooth_l1_beta\n self.test_score_thresh = test_score_thresh\n self.test_nms_thresh = test_nms_thresh\n self.test_topk_per_image = test_topk_per_image\n\n self.ppp_intensity_function = ppp_constructor({\"device\": device}) if ppp_constructor is not None else None\n self.ppp_constructor = ppp_constructor\n self.nll_max_num_solutions = nll_max_num_solutions\n self.matching_distance = matching_distance\n self.use_prediction_mixture = use_prediction_mixture\n\n @classmethod\n def from_config(\n cls,\n cfg,\n input_shape,\n compute_cls_var,\n cls_var_loss,\n cls_var_num_samples,\n compute_bbox_cov,\n bbox_cov_loss,\n bbox_cov_type,\n bbox_cov_dims,\n bbox_cov_num_samples,\n ppp_constructor,\n nll_max_num_solutions,\n ):\n return {\n \"input_shape\": input_shape,\n \"box2box_transform\": Box2BoxTransform(\n weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS\n ),\n # fmt: off\n \"num_classes\": cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"cls_agnostic_bbox_reg\": cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\n \"smooth_l1_beta\": cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\n \"test_score_thresh\": cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\n \"test_nms_thresh\": cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n \"test_topk_per_image\": cfg.TEST.DETECTIONS_PER_IMAGE,\n \"compute_cls_var\": compute_cls_var,\n \"cls_var_loss\": cls_var_loss,\n \"cls_var_num_samples\": cls_var_num_samples,\n \"compute_bbox_cov\": compute_bbox_cov,\n \"bbox_cov_dims\": bbox_cov_dims,\n \"bbox_cov_loss\": bbox_cov_loss,\n \"bbox_cov_type\": bbox_cov_type,\n \"dropout_rate\": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,\n \"annealing_step\": cfg.SOLVER.STEPS[1] if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0 else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP,\n \"bbox_cov_num_samples\": bbox_cov_num_samples,\n \"ppp_constructor\": ppp_constructor,\n \"nll_max_num_solutions\" : nll_max_num_solutions,\n 'bbox_cov_dist_type': cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE,\n \"use_prediction_mixture\": cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE\n # fmt: on\n }\n\n def forward(self, x):\n \"\"\"\n Args:\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\n\n Returns:\n Tensor: Nx(K+1) logits for each box\n Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.\n Tensor: Nx(K+1) logits variance for each box.\n Tensor: Nx4(10) or Nx(Kx4(10)) covariance matrix parameters. 4 if diagonal, 10 if full.\n \"\"\"\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n scores = self.cls_score(x)\n proposal_deltas = self.bbox_pred(x)\n\n # Compute logits variance if needed\n if self.compute_cls_var:\n score_vars = self.cls_var(x)\n else:\n score_vars = None\n\n # Compute box covariance if needed\n if self.compute_bbox_cov:\n proposal_covs = self.bbox_cov(x)\n else:\n proposal_covs = None\n\n return scores, proposal_deltas, score_vars, proposal_covs\n\n def losses(self, predictions, proposals, current_step=0, gt_instances=None):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features\n that were used to compute predictions.\n current_step: current optimizer step. Used for losses with an annealing component.\n gt_instances: list of ground truth instances\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n global device\n\n # Overwrite later\n use_nll_loss = False\n\n (\n pred_class_logits,\n pred_proposal_deltas,\n pred_class_logits_var,\n pred_proposal_covs,\n ) = predictions\n\n if len(proposals):\n box_type = type(proposals[0].proposal_boxes)\n # cat(..., dim=0) concatenates over all images in the batch\n proposals_boxes = box_type.cat([p.proposal_boxes for p in proposals])\n assert (\n not proposals_boxes.tensor.requires_grad\n ), \"Proposals should not require gradients!\"\n\n # The following fields should exist only when training.\n if proposals[0].has(\"gt_boxes\"):\n gt_boxes = box_type.cat([p.gt_boxes for p in proposals])\n assert proposals[0].has(\"gt_classes\")\n gt_classes = cat([p.gt_classes for p in proposals], dim=0)\n else:\n proposals_boxes = Boxes(\n torch.zeros(0, 4, device=pred_proposal_deltas.device)\n )\n\n no_instances = len(proposals) == 0 # no instances found\n\n # Compute Classification Loss\n if no_instances:\n # TODO 0.0 * pred.sum() is enough since PT1.6\n loss_cls = 0.0 * F.cross_entropy(\n pred_class_logits,\n torch.zeros(0, dtype=torch.long, device=pred_class_logits.device),\n reduction=\"sum\",\n )\n else:\n if self.compute_cls_var:\n # Compute classification variance according to:\n # \"What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?\", NIPS 2017\n if self.cls_var_loss == \"loss_attenuation\":\n num_samples = self.cls_var_num_samples\n\n # Compute standard deviation\n pred_class_logits_var = torch.sqrt(torch.exp(pred_class_logits_var))\n\n # Produce normal samples using logits as the mean and the standard deviation computed above\n # Scales with GPU memory. 12 GB ---> 3 Samples per anchor for\n # COCO dataset.\n univariate_normal_dists = distributions.normal.Normal(\n pred_class_logits, scale=pred_class_logits_var\n )\n\n pred_class_stochastic_logits = univariate_normal_dists.rsample(\n (num_samples,)\n )\n pred_class_stochastic_logits = pred_class_stochastic_logits.view(\n (\n pred_class_stochastic_logits.shape[1] * num_samples,\n pred_class_stochastic_logits.shape[2],\n -1,\n )\n )\n pred_class_logits = pred_class_stochastic_logits.squeeze(2)\n\n # Produce copies of the target classes to match the number of\n # stochastic samples.\n gt_classes_target = torch.unsqueeze(gt_classes, 0)\n gt_classes_target = torch.repeat_interleave(\n gt_classes_target, num_samples, dim=0\n ).view((gt_classes_target.shape[1] * num_samples, -1))\n gt_classes_target = gt_classes_target.squeeze(1)\n\n loss_cls = F.cross_entropy(\n pred_class_logits, gt_classes_target, reduction=\"mean\"\n )\n elif self.cls_var_loss == \"evidential\":\n # ToDo: Currently does not provide any reasonable mAP Results\n # (15% mAP)\n\n # Assume dirichlet parameters are output.\n alphas = get_dir_alphas(pred_class_logits)\n\n # Get sum of all alphas\n dirichlet_s = alphas.sum(1).unsqueeze(1)\n\n # Generate one hot vectors for ground truth\n one_hot_vectors = torch.nn.functional.one_hot(\n gt_classes, alphas.shape[1]\n )\n\n # Compute loss. This loss attempts to put all evidence on the\n # correct location.\n per_instance_loss = one_hot_vectors * (\n torch.digamma(dirichlet_s) - torch.digamma(alphas)\n )\n\n # Compute KL divergence regularizer loss\n estimated_dirichlet = torch.distributions.dirichlet.Dirichlet(\n (alphas - 1.0) * (1.0 - one_hot_vectors) + 1.0\n )\n uniform_dirichlet = torch.distributions.dirichlet.Dirichlet(\n torch.ones_like(one_hot_vectors).type(torch.FloatTensor).to(device)\n )\n kl_regularization_loss = torch.distributions.kl.kl_divergence(\n estimated_dirichlet, uniform_dirichlet\n )\n\n # Compute final loss\n annealing_multiplier = torch.min(\n torch.as_tensor(current_step / self.annealing_step).to(device),\n torch.as_tensor(1.0).to(device),\n )\n\n per_proposal_loss = (\n per_instance_loss.sum(1)\n + annealing_multiplier * kl_regularization_loss\n )\n\n # Compute evidence auxiliary loss\n evidence_maximization_loss = smooth_l1_loss(\n dirichlet_s,\n 100.0 * torch.ones_like(dirichlet_s).to(device),\n beta=self.smooth_l1_beta,\n reduction=\"mean\",\n )\n\n evidence_maximization_loss *= annealing_multiplier\n\n # Compute final loss\n foreground_loss = per_proposal_loss[\n (gt_classes >= 0) & (gt_classes < pred_class_logits.shape[1] - 1)\n ]\n background_loss = per_proposal_loss[\n gt_classes == pred_class_logits.shape[1] - 1\n ]\n\n loss_cls = (\n torch.mean(foreground_loss) + torch.mean(background_loss)\n ) / 2 + 0.01 * evidence_maximization_loss\n else:\n loss_cls = F.cross_entropy(\n pred_class_logits, gt_classes, reduction=\"mean\"\n )\n\n # Compute regression loss:\n if no_instances:\n # TODO 0.0 * pred.sum() is enough since PT1.6\n loss_box_reg = 0.0 * smooth_l1_loss(\n pred_proposal_deltas,\n torch.zeros_like(pred_proposal_deltas),\n 0.0,\n reduction=\"sum\",\n )\n else:\n gt_proposal_deltas = self.box2box_transform.get_deltas(\n proposals_boxes.tensor, gt_boxes.tensor\n )\n box_dim = gt_proposal_deltas.size(1) # 4 or 5\n cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim\n device = pred_proposal_deltas.device\n\n bg_class_ind = pred_class_logits.shape[1] - 1\n\n # Box delta loss is only computed between the prediction for the gt class k\n # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions\n # for non-gt classes and background.\n # Empty fg_inds produces a valid loss of zero as long as the size_average\n # arg to smooth_l1_loss is False (otherwise it uses torch.mean internally\n # and would produce a nan loss).\n fg_inds = torch.nonzero(\n (gt_classes >= 0) & (gt_classes < bg_class_ind), as_tuple=True\n )[0]\n if cls_agnostic_bbox_reg:\n # pred_proposal_deltas only corresponds to foreground class for\n # agnostic\n gt_class_cols = torch.arange(box_dim, device=device)\n else:\n fg_gt_classes = gt_classes[fg_inds]\n # pred_proposal_deltas for class k are located in columns [b * k : b * k + b],\n # where b is the dimension of box representation (4 or 5)\n # Note that compared to Detectron1,\n # we do not perform bounding box regression for background\n # classes.\n gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(\n box_dim, device=device\n )\n gt_covar_class_cols = self.bbox_cov_dims * fg_gt_classes[\n :, None\n ] + torch.arange(self.bbox_cov_dims, device=device)\n\n loss_reg_normalizer = gt_classes.numel()\n\n pred_proposal_deltas = pred_proposal_deltas[fg_inds[:, None], gt_class_cols]\n gt_proposals_delta = gt_proposal_deltas[fg_inds]\n\n if self.compute_bbox_cov:\n pred_proposal_covs = pred_proposal_covs[\n fg_inds[:, None], gt_covar_class_cols\n ]\n pred_proposal_covs = clamp_log_variance(pred_proposal_covs)\n\n if self.bbox_cov_loss == \"negative_log_likelihood\":\n if self.bbox_cov_type == \"diagonal\":\n # Ger foreground proposals.\n _proposals_boxes = proposals_boxes.tensor[fg_inds]\n\n # Compute regression negative log likelihood loss according to:\n # \"What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?\", NIPS 2017\n loss_box_reg = (\n 0.5\n * torch.exp(-pred_proposal_covs)\n * smooth_l1_loss(\n pred_proposal_deltas,\n gt_proposals_delta,\n beta=self.smooth_l1_beta,\n )\n )\n loss_covariance_regularize = 0.5 * pred_proposal_covs\n loss_box_reg += loss_covariance_regularize\n\n loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer\n else:\n # Multivariate Gaussian Negative Log Likelihood loss using pytorch\n # distributions.multivariate_normal.log_prob()\n forecaster_cholesky = covariance_output_to_cholesky(\n pred_proposal_covs\n )\n\n multivariate_normal_dists = (\n distributions.multivariate_normal.MultivariateNormal(\n pred_proposal_deltas, scale_tril=forecaster_cholesky\n )\n )\n\n loss_box_reg = -multivariate_normal_dists.log_prob(\n gt_proposals_delta\n )\n loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer\n\n elif self.bbox_cov_loss == \"second_moment_matching\":\n # Compute regression covariance using second moment\n # matching.\n loss_box_reg = smooth_l1_loss(\n pred_proposal_deltas, gt_proposals_delta, self.smooth_l1_beta\n )\n errors = pred_proposal_deltas - gt_proposals_delta\n if self.bbox_cov_type == \"diagonal\":\n # Handel diagonal case\n second_moment_matching_term = smooth_l1_loss(\n torch.exp(pred_proposal_covs),\n errors ** 2,\n beta=self.smooth_l1_beta,\n )\n loss_box_reg += second_moment_matching_term\n loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer\n else:\n # Handel full covariance case\n errors = torch.unsqueeze(errors, 2)\n gt_error_covar = torch.matmul(\n errors, torch.transpose(errors, 2, 1)\n )\n\n # This is the cholesky decomposition of the covariance matrix.\n # We reconstruct it from 10 estimated parameters as a\n # lower triangular matrix.\n forecaster_cholesky = covariance_output_to_cholesky(\n pred_proposal_covs\n )\n\n predicted_covar = torch.matmul(\n forecaster_cholesky,\n torch.transpose(forecaster_cholesky, 2, 1),\n )\n\n second_moment_matching_term = smooth_l1_loss(\n predicted_covar,\n gt_error_covar,\n beta=self.smooth_l1_beta,\n reduction=\"sum\",\n )\n loss_box_reg = (\n torch.sum(loss_box_reg) + second_moment_matching_term\n ) / loss_reg_normalizer\n\n elif self.bbox_cov_loss == \"energy_loss\":\n forecaster_cholesky = covariance_output_to_cholesky(\n pred_proposal_covs\n )\n\n # Define per-anchor Distributions\n multivariate_normal_dists = (\n distributions.multivariate_normal.MultivariateNormal(\n pred_proposal_deltas, scale_tril=forecaster_cholesky\n )\n )\n # Define Monte-Carlo Samples\n distributions_samples = multivariate_normal_dists.rsample(\n (self.bbox_cov_num_samples + 1,)\n )\n\n distributions_samples_1 = distributions_samples[\n 0 : self.bbox_cov_num_samples, :, :\n ]\n distributions_samples_2 = distributions_samples[\n 1 : self.bbox_cov_num_samples + 1, :, :\n ]\n\n # Compute energy score\n loss_covariance_regularize = (\n -smooth_l1_loss(\n distributions_samples_1,\n distributions_samples_2,\n beta=self.smooth_l1_beta,\n reduction=\"sum\",\n )\n / self.bbox_cov_num_samples\n ) # Second term\n\n gt_proposals_delta_samples = torch.repeat_interleave(\n gt_proposals_delta.unsqueeze(0),\n self.bbox_cov_num_samples,\n dim=0,\n )\n\n loss_first_moment_match = (\n 2.0\n * smooth_l1_loss(\n distributions_samples_1,\n gt_proposals_delta_samples,\n beta=self.smooth_l1_beta,\n reduction=\"sum\",\n )\n / self.bbox_cov_num_samples\n ) # First term\n\n # Final Loss\n loss_box_reg = (\n loss_first_moment_match + loss_covariance_regularize\n ) / loss_reg_normalizer\n\n elif self.bbox_cov_loss == \"pmb_negative_log_likelihood\":\n losses = self.nll_od_loss_with_nms(\n predictions, proposals, gt_instances\n )\n\n loss_box_reg = losses[\"loss_box_reg\"]\n use_nll_loss = True\n\n else:\n raise ValueError(\n \"Invalid regression loss name {}.\".format(self.bbox_cov_loss)\n )\n\n # Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more\n # elaborate regression variance losses.\n standard_regression_loss = smooth_l1_loss(\n pred_proposal_deltas,\n gt_proposals_delta,\n self.smooth_l1_beta,\n reduction=\"sum\",\n )\n standard_regression_loss = (\n standard_regression_loss / loss_reg_normalizer\n )\n\n probabilistic_loss_weight = get_probabilistic_loss_weight(\n current_step, self.annealing_step\n )\n\n loss_box_reg = (\n (1.0 - probabilistic_loss_weight) * standard_regression_loss\n + probabilistic_loss_weight * loss_box_reg\n )\n\n if use_nll_loss:\n loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls\n else:\n loss_box_reg = smooth_l1_loss(\n pred_proposal_deltas,\n gt_proposals_delta,\n self.smooth_l1_beta,\n reduction=\"sum\",\n )\n loss_box_reg = loss_box_reg / loss_reg_normalizer\n\n if use_nll_loss:\n losses[\"loss_cls\"] = loss_cls\n losses[\"loss_box_reg\"] = loss_box_reg\n else:\n losses = {\"loss_cls\": loss_cls, \"loss_box_reg\": loss_box_reg}\n\n return losses\n\n def nll_od_loss_with_nms(\n self,\n predictions: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],\n proposals: List[Instances],\n gt_instances,\n ):\n if \"log_prob\" in self.matching_distance and self.matching_distance != \"log_prob\":\n covar_scaling = float(self.matching_distance.split(\"_\")[-1])\n matching_distance = \"log_prob\"\n else:\n covar_scaling = 1\n matching_distance = self.matching_distance\n\n self.ppp_intensity_function.update_distribution()\n _, pred_deltas, _, pred_covs = predictions\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n scores = [score.clamp(1e-6, 1 - 1e-6) for score in scores]\n _, num_classes = scores[0].shape\n num_classes -= 1 # do not count background class\n image_shapes = [x.image_size for x in proposals]\n num_prop_per_image = [len(p) for p in proposals]\n\n # Apply NMS without score threshold\n instances, kept_idx = fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n 0.0,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n kept_idx = [k.unique() for k in kept_idx]\n pred_covs = pred_covs.split(num_prop_per_image)\n pred_deltas = pred_deltas.split(num_prop_per_image)\n\n kept_proposals = [\n prop.proposal_boxes.tensor[idx] for prop, idx in zip(proposals, kept_idx)\n ]\n\n pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]\n nll_pred_cov = [\n covariance_output_to_cholesky(clamp_log_variance(reshape_box_preds(cov, num_classes)))\n for cov in pred_covs\n ]\n nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]\n nll_pred_deltas = [\n reshape_box_preds(delta[kept], num_classes)\n for delta, kept in zip(pred_deltas, kept_idx)\n ]\n trans_func = lambda x,y: self.box2box_transform.apply_deltas(x,y)\n box_means = []\n box_chols = []\n bs = len(nll_pred_deltas)\n for i in range(bs):\n box_mean, box_chol = unscented_transform(nll_pred_deltas[i], nll_pred_cov[i], kept_proposals[i], trans_func)\n box_means.append(box_mean)\n box_chols.append(box_chol)\n \n nll_gt_classes = [instances.gt_classes for instances in gt_instances]\n gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]\n\n if self.bbox_cov_dist_type == \"gaussian\":\n regression_dist = (\n lambda x, y: distributions.multivariate_normal.MultivariateNormal(\n loc=x, scale_tril=y\n )\n )\n elif self.bbox_cov_dist_type == \"laplacian\":\n regression_dist = lambda x, y: distributions.laplace.Laplace(\n loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)\n )\n else:\n raise Exception(\n f\"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available.\"\n )\n\n if self.use_prediction_mixture:\n ppps = []\n src_boxes_tot = []\n src_box_chol_tot = []\n src_boxes_deltas_tot = []\n src_boxes_deltas_chol_tot = []\n src_scores_tot = []\n gt_box_deltas = []\n for i in range(bs):\n image_shape = image_shapes[i]\n h,w = image_shape\n scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)\n pred_box_means = box_means[i]*scaling\n pred_box_chols = torch.diag_embed(scaling)@box_chols[i]\n pred_box_deltas = nll_pred_deltas[i]\n pred_box_delta_chols = nll_pred_cov[i]\n pred_cls_probs = nll_scores[i]\n\n #max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]\n max_conf = 1 - pred_cls_probs[..., -1]\n ppp_preds_idx = (\n max_conf <= self.ppp_intensity_function.ppp_confidence_thres\n )\n\n props = kept_proposals[i][ppp_preds_idx.logical_not()]\n\n # Get delta between each GT and proposal, batch-wise\n tmp = torch.stack(\n [\n self.box2box_transform.get_deltas(\n props,\n gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),\n )\n for j in range(len(gt_boxes[i]))\n ]\n )\n\n gt_box_deltas.append(\n tmp.permute(1, 0, 2)\n ) # [gt,pred,boxdim] -> [pred, gt, boxdim]\n\n gt_boxes[i] = gt_boxes[i]*scaling\n\n mixture_dict = {}\n mixture_dict[\"weights\"] = max_conf[ppp_preds_idx]\n mixture_dict[\"means\"] = pred_box_means[ppp_preds_idx, 0]\n selected_chols = pred_box_chols[ppp_preds_idx, 0]\n mixture_dict[\"covs\"] = selected_chols@(selected_chols.transpose(-1,-2))\n mixture_dict[\"cls_probs\"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]\n mixture_dict[\"reg_dist_type\"] = self.bbox_cov_dist_type\n\n if self.bbox_cov_dist_type == \"gaussian\":\n mixture_dict[\n \"reg_dist\"\n ] = distributions.multivariate_normal.MultivariateNormal\n mixture_dict[\"reg_kwargs\"] = {\n \"scale_tril\": selected_chols\n }\n elif self.bbox_cov_dist_type == \"laplacian\":\n mixture_dict[\"reg_dist\"] = distributions.laplace.Laplace\n mixture_dict[\"reg_kwargs\"] = {\n \"scale\": (\n selected_chols.diagonal(dim1=-2, dim2=-1)\n / np.sqrt(2)\n )\n }\n loss_ppp = PoissonPointUnion()\n loss_ppp.add_ppp(self.ppp_constructor({\"predictions\": mixture_dict}))\n loss_ppp.add_ppp(self.ppp_intensity_function)\n\n mixture_dict = {}\n mixture_dict[\"weights\"] = max_conf[ppp_preds_idx]\n mixture_dict[\"means\"] = pred_box_means[ppp_preds_idx, 0]\n \n scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling\n scaled_chol = scale_mat@pred_box_chols[ppp_preds_idx, 0]\n mixture_dict[\"covs\"] = (scaled_chol)@(scaled_chol.transpose(-1,-2))\n mixture_dict[\"cls_probs\"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]\n mixture_dict[\"reg_dist_type\"] = self.bbox_cov_dist_type\n\n if self.bbox_cov_dist_type == \"gaussian\":\n mixture_dict[\n \"reg_dist\"\n ] = distributions.multivariate_normal.MultivariateNormal\n mixture_dict[\"reg_kwargs\"] = {\n \"scale_tril\": scaled_chol\n }\n elif self.bbox_cov_dist_type == \"laplacian\":\n mixture_dict[\"reg_dist\"] = distributions.laplace.Laplace\n mixture_dict[\"reg_kwargs\"] = {\n \"scale\": (\n (scaled_chol).diagonal(dim1=-2, dim2=-1)\n / np.sqrt(2)\n )\n }\n \n match_ppp = PoissonPointUnion()\n match_ppp.add_ppp(self.ppp_constructor({\"predictions\": mixture_dict}))\n match_ppp.add_ppp(self.ppp_intensity_function)\n ppps.append({\"matching\": match_ppp, \"loss\": loss_ppp})\n\n src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])\n src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])\n src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])\n src_boxes_deltas_tot.append(pred_box_deltas[ppp_preds_idx.logical_not()])\n src_boxes_deltas_chol_tot.append(pred_box_delta_chols[ppp_preds_idx.logical_not()])\n\n nll_pred_deltas = src_boxes_deltas_tot\n nll_pred_delta_chols = src_boxes_deltas_chol_tot\n nll_pred_boxes = src_boxes_tot\n nll_pred_cov = src_box_chol_tot\n nll_scores = src_scores_tot\n use_target_delta_matching = False\n elif self.ppp_intensity_function.ppp_intensity_type == \"gaussian_mixture\":\n ppps = []\n src_boxes_tot = []\n src_box_chol_tot = []\n src_boxes_deltas_tot = []\n src_boxes_deltas_chol_tot = []\n src_scores_tot = []\n gt_box_deltas = []\n for i in range(bs):\n image_shape = image_shapes[i]\n h,w = image_shape\n scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)\n pred_box_means = box_means[i]*scaling\n pred_box_chols = torch.diag_embed(scaling)@box_chols[i]\n pred_box_deltas = nll_pred_deltas[i]\n pred_box_delta_chols = nll_pred_cov[i]\n pred_cls_probs = nll_scores[i]\n props = kept_proposals[i]\n\n # Get delta between each GT and proposal, batch-wise\n tmp = torch.stack(\n [\n self.box2box_transform.get_deltas(\n props,\n gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),\n )\n for j in range(len(gt_boxes[i]))\n ]\n )\n\n gt_box_deltas.append(\n tmp.permute(1, 0, 2)\n ) # [gt,pred,boxdim] -> [pred, gt, boxdim]\n\n gt_boxes[i] = gt_boxes[i]*scaling\n\n src_boxes_tot.append(pred_box_means)\n src_box_chol_tot.append(pred_box_chols)\n src_scores_tot.append(pred_cls_probs)\n src_boxes_deltas_tot.append(pred_box_deltas)\n src_boxes_deltas_chol_tot.append(pred_box_delta_chols)\n\n nll_pred_deltas = src_boxes_deltas_tot\n nll_pred_delta_chols = src_boxes_deltas_chol_tot\n nll_pred_boxes = src_boxes_tot\n nll_pred_cov = src_box_chol_tot\n nll_scores = src_scores_tot\n use_target_delta_matching = False\n ppps = [{\"loss\": self.ppp_intensity_function, \"matching\": self.ppp_intensity_function}]*bs\n else:\n gt_box_deltas = []\n for i in range(len(gt_boxes)):\n # Get delta between each GT and proposal, batch-wise\n tmp = torch.stack(\n [\n self.box2box_transform.get_deltas(\n kept_proposals[i],\n gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),\n )\n for j in range(len(gt_boxes[i]))\n ]\n )\n\n gt_box_deltas.append(\n tmp.permute(1, 0, 2)\n ) # [gt,pred,boxdim] -> [pred, gt, boxdim]\n \n use_target_delta_matching = True\n ppps = [{\"loss\": self.ppp_intensity_function, \"matching\": self.ppp_intensity_function}]*bs\n nll_pred_delta_chols = nll_pred_cov\n nll_pred_deltas = nll_pred_deltas\n nll_pred_boxes = nll_pred_deltas\n nll_pred_cov = nll_pred_cov\n \n nll, associations, decompositions = negative_log_likelihood(\n nll_scores,\n nll_pred_boxes,\n nll_pred_cov,\n gt_boxes,\n nll_gt_classes,\n image_shapes,\n regression_dist,\n ppps,\n self.nll_max_num_solutions,\n scores_have_bg_cls=True,\n target_deltas=gt_box_deltas,\n matching_distance=matching_distance,\n use_target_delta_matching=use_target_delta_matching,\n pred_deltas=nll_pred_deltas,\n pred_delta_chols=nll_pred_delta_chols,\n )\n\n # Save some stats\n storage = get_event_storage()\n num_classes = self.num_classes\n mean_variance = np.mean(\n [\n cov.diagonal(dim1=-2,dim2=-1)\n .pow(2)\n .mean()\n .item()\n for cov in nll_pred_cov\n if cov.shape[0] > 0\n ]\n )\n storage.put_scalar(\"nll/mean_covariance\", mean_variance)\n ppp_intens = np.sum([ppp[\"loss\"].integrate(\n torch.as_tensor(image_shapes).to(device), num_classes\n )\n .mean()\n .item()\n for ppp in ppps\n ])\n storage.put_scalar(\"nll/ppp_intensity\", ppp_intens)\n\n reg_loss = np.mean(\n [\n np.clip(\n decomp[\"matched_bernoulli_reg\"][0]\n / (decomp[\"num_matched_bernoulli\"][0] + 1e-6),\n -1e25,\n 1e25,\n )\n for decomp in decompositions\n ]\n )\n cls_loss_match = np.mean(\n [\n np.clip(\n decomp[\"matched_bernoulli_cls\"][0]\n / (decomp[\"num_matched_bernoulli\"][0] + 1e-6),\n -1e25,\n 1e25,\n )\n for decomp in decompositions\n ]\n )\n cls_loss_no_match = np.mean(\n [\n np.clip(\n decomp[\"unmatched_bernoulli\"][0]\n / (decomp[\"num_unmatched_bernoulli\"][0] + 1e-6),\n -1e25,\n 1e25,\n )\n for decomp in decompositions\n ]\n )\n\n # Collect all losses\n losses = dict()\n losses[\"loss_box_reg\"] = nll\n # Add losses for logging, these do not propagate gradients\n losses[\"loss_regression\"] = torch.tensor(reg_loss).to(nll.device)\n losses[\"loss_cls_matched\"] = torch.tensor(cls_loss_match).to(nll.device)\n losses[\"loss_cls_unmatched\"] = torch.tensor(cls_loss_no_match).to(nll.device)\n\n return losses\n\n def inference(self, predictions, proposals):\n \"\"\"\n Returns:\n list[Instances]: same as `fast_rcnn_inference`.\n list[Tensor]: same as `fast_rcnn_inference`.\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n return fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n def predict_boxes_for_gt_classes(self, predictions, proposals):\n \"\"\"\n Returns:\n list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\n the number of predicted objects for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n scores, proposal_deltas = predictions\n proposal_boxes = [p.proposal_boxes for p in proposals]\n proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor\n N, B = proposal_boxes.shape\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas, proposal_boxes\n ) # Nx(KxB)\n\n K = predict_boxes.shape[1] // B\n if K > 1:\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\n # Some proposals are ignored or have a background class. Their gt_classes\n # cannot be used as index.\n gt_classes = gt_classes.clamp_(0, K - 1)\n\n predict_boxes = predict_boxes.view(N, K, B)[\n torch.arange(N, dtype=torch.long, device=predict_boxes.device),\n gt_classes,\n ]\n num_prop_per_image = [len(p) for p in proposals]\n return predict_boxes.split(num_prop_per_image)\n\n def predict_boxes(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of predicted objects for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n _, proposal_deltas, _, _ = predictions\n num_prop_per_image = [len(p) for p in proposals]\n proposal_boxes = [p.proposal_boxes for p in proposals]\n proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas, proposal_boxes\n ) # Nx(KxB)\n return predict_boxes.split(num_prop_per_image)\n\n def predict_probs(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions.\n\n Returns:\n list[Tensor]: A list of Tensors of predicted class probabilities for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of predicted objects\n for image i.\n \"\"\"\n scores, _, _, _ = predictions\n num_inst_per_image = [len(p) for p in proposals]\n if self.cls_var_loss == \"evidential\":\n alphas = get_dir_alphas(scores)\n dirichlet_s = alphas.sum(1).unsqueeze(1)\n # Compute probabilities\n probs = alphas / dirichlet_s\n else:\n probs = F.softmax(scores, dim=-1)\n return probs.split(num_inst_per_image, dim=0)\n\n\n# Todo: new detectron interface required copying code. Check for better\n# way to inherit from FastRCNNConvFCHead.\n@ROI_BOX_HEAD_REGISTRY.register()\nclass DropoutFastRCNNConvFCHead(nn.Module):\n \"\"\"\n A head with several 3x3 conv layers (each followed by norm & relu) and then\n several fc layers (each followed by relu) and dropout.\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape: ShapeSpec,\n *,\n conv_dims: List[int],\n fc_dims: List[int],\n conv_norm=\"\",\n dropout_rate,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature.\n conv_dims (list[int]): the output dimensions of the conv layers\n fc_dims (list[int]): the output dimensions of the fc layers\n conv_norm (str or callable): normalization for the conv layers.\n See :func:`detectron2.layers.get_norm` for supported types.\n dropout_rate (float): p for dropout layer\n \"\"\"\n super().__init__()\n assert len(conv_dims) + len(fc_dims) > 0\n\n self.dropout_rate = dropout_rate\n self.use_dropout = self.dropout_rate != 0.0\n\n self._output_size = (\n input_shape.channels,\n input_shape.height,\n input_shape.width,\n )\n\n self.conv_norm_relus = []\n for k, conv_dim in enumerate(conv_dims):\n conv = Conv2d(\n self._output_size[0],\n conv_dim,\n kernel_size=3,\n padding=1,\n bias=not conv_norm,\n norm=get_norm(conv_norm, conv_dim),\n activation=F.relu,\n )\n self.add_module(\"conv{}\".format(k + 1), conv)\n self.conv_norm_relus.append(conv)\n self._output_size = (conv_dim, self._output_size[1], self._output_size[2])\n\n self.fcs = []\n self.fcs_dropout = []\n for k, fc_dim in enumerate(fc_dims):\n fc = Linear(np.prod(self._output_size), fc_dim)\n fc_dropout = nn.Dropout(p=self.dropout_rate)\n self.add_module(\"fc{}\".format(k + 1), fc)\n self.add_module(\"fc_dropout{}\".format(k + 1), fc_dropout)\n self.fcs.append(fc)\n self.fcs_dropout.append(fc_dropout)\n self._output_size = fc_dim\n\n for layer in self.conv_norm_relus:\n weight_init.c2_msra_fill(layer)\n for layer in self.fcs:\n weight_init.c2_xavier_fill(layer)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV\n conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM\n num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC\n fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM\n return {\n \"input_shape\": input_shape,\n \"conv_dims\": [conv_dim] * num_conv,\n \"fc_dims\": [fc_dim] * num_fc,\n \"conv_norm\": cfg.MODEL.ROI_BOX_HEAD.NORM,\n \"dropout_rate\": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,\n }\n\n def forward(self, x):\n for layer in self.conv_norm_relus:\n x = layer(x)\n if len(self.fcs):\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n for layer, dropout in zip(self.fcs, self.fcs_dropout):\n x = F.relu(dropout(layer(x)))\n return x\n\n @property\n def output_shape(self):\n \"\"\"\n Returns:\n ShapeSpec: the output feature shape\n \"\"\"\n o = self._output_size\n if isinstance(o, int):\n return ShapeSpec(channels=o)\n else:\n return ShapeSpec(channels=o[0], height=o[1], width=o[2])\n","repo_name":"georghess/pmb-nll","sub_path":"src/probabilistic_modeling/probabilistic_generalized_rcnn.py","file_name":"probabilistic_generalized_rcnn.py","file_ext":"py","file_size_in_byte":66644,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"73765280649","text":"from typing import List, Tuple, Union\n\nimport numpy as np\nfrom numpy import ndarray\n\nfrom maximum_matching.algorithms.algorithm_base import AlgorithmBase\nfrom maximum_matching.graphs.graph_base import BipartiteSet, GraphBase\n\n\nclass MaxFlow(AlgorithmBase):\n \"\"\"\n Edmonds Karp algorithm implementation of the Maximum Flow problem\n \"\"\"\n\n INF = 1000000009\n\n def __init__(self) -> None:\n super().__init__()\n # self.vec = np.empty(1) # graph on which the max flow will run\n # self.visited = np.empty(1)\n # self.capacity = np.empty(1) # 2D array for capacity\n # self.bfs_path = np.empty(1)\n\n @staticmethod\n def bfs(start: int, endd: int, n: int, vec, capacity) -> Tuple[bool, np.ndarray]:\n bfs_path = np.full(shape=n, fill_value=-1)\n visited = np.full(shape=n, fill_value=False, dtype=bool)\n visited[start] = True\n\n que = [start]\n found = False\n\n while len(que) > 0 and not found:\n u = que.pop(0)\n\n if found:\n break\n\n vec_u = np.where(vec[u] > -1)[0]\n vec_u_2 = vec_u[np.logical_and(np.logical_not(visited[vec_u]), capacity[u][vec_u] > 0)]\n\n visited[vec_u_2] = True\n bfs_path[vec_u_2] = u\n que += list(vec_u_2)\n\n if endd in vec_u_2:\n found = True\n break\n\n return found, bfs_path\n\n @staticmethod\n def run_max_flow(src: int, sink: int, n: int, vec, capacity) -> int:\n max_flow = 0\n min_capacity = MaxFlow.INF\n\n bfs_res, bfs_path = MaxFlow.bfs(src, sink, n, vec, capacity)\n while bfs_res is True:\n\n x = sink\n while x != src:\n min_capacity = min(min_capacity, capacity[bfs_path[x]][x])\n x = bfs_path[x]\n\n x = sink\n while x != src and min_capacity != MaxFlow.INF:\n capacity[bfs_path[x]][x] -= min_capacity\n capacity[x][bfs_path[x]] += min_capacity\n x = bfs_path[x]\n\n max_flow += min_capacity\n min_capacity = MaxFlow.INF\n bfs_res, bfs_path = MaxFlow.bfs(src, sink, n, vec, capacity)\n\n return max_flow\n\n @staticmethod\n def find_max_bipartite(graph: GraphBase, sourceSinkCap: int = 1) -> \\\n Tuple[int, Union[List, None]]:\n\n left_side = graph.get_independent_set(BipartiteSet.left)\n right_side = graph.get_independent_set(BipartiteSet.right)\n\n source_node = graph.size\n sink_node = graph.size + 1\n total_nodes = graph.size + 2\n\n vec = np.full(shape=(total_nodes, total_nodes), fill_value=-1, dtype=int)\n capacity = np.zeros((total_nodes, total_nodes))\n\n # connecting source_node with left side\n vec[source_node, left_side] = 1\n vec[left_side, source_node] = 1\n capacity[left_side, source_node] = sourceSinkCap\n capacity[source_node, left_side] = sourceSinkCap\n\n # connecting right_side with sink_node\n vec[sink_node, right_side] = 1\n vec[right_side, sink_node] = 1\n capacity[right_side, sink_node] = sourceSinkCap\n capacity[sink_node, right_side] = sourceSinkCap\n\n for v in right_side:\n neighbours = graph.list(v)\n vec[neighbours, v] = 1\n vec[v, neighbours] = 1\n capacity[v, neighbours] = 1\n capacity[neighbours, v] = 1\n\n max_matches = MaxFlow.run_max_flow(source_node, sink_node, total_nodes, vec, capacity)\n\n trends = [[graph.size, max_matches]] * (graph.size_right + 1)\n\n return max_matches, trends\n\n @staticmethod\n def find_max_bipartite_with_cap(graph: GraphBase, sourceSinkCap: int = 1) -> \\\n Tuple[int, List, ndarray]:\n\n left_side = graph.get_independent_set(BipartiteSet.left)\n right_side = graph.get_independent_set(BipartiteSet.right)\n\n source_node = graph.size\n sink_node = graph.size + 1\n total_nodes = graph.size + 2\n\n vec = np.full(shape=(total_nodes, total_nodes), fill_value=-1, dtype=int)\n capacity = np.zeros((total_nodes, total_nodes))\n\n # connecting source_node with left side\n vec[source_node, left_side] = 1\n vec[left_side, source_node] = 1\n capacity[left_side, source_node] = sourceSinkCap\n capacity[source_node, left_side] = sourceSinkCap\n\n # connecting right_side with sink_node\n vec[sink_node, right_side] = 1\n vec[right_side, sink_node] = 1\n capacity[right_side, sink_node] = sourceSinkCap\n capacity[sink_node, right_side] = sourceSinkCap\n\n for v in right_side:\n neighbours = graph.list(v)\n vec[neighbours, v] = 1\n vec[v, neighbours] = 1\n capacity[v, neighbours] = 1\n capacity[neighbours, v] = 1\n\n max_matches = MaxFlow.run_max_flow(source_node, sink_node, total_nodes, vec, capacity)\n\n trends = [[graph.size, max_matches]] * (graph.size_right + 1)\n\n return max_matches, trends, capacity\n\n def run(self, graph: GraphBase, **kwargs) -> Tuple[int, Union[List, None]]:\n max_matches, trends = self.find_max_bipartite(graph)\n\n return max_matches, trends\n","repo_name":"habedi77/maximum-matching","sub_path":"maximum_matching/algorithms/max_flow.py","file_name":"max_flow.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20816387652","text":"import unittest\n\nimport six\n\nfrom traits.api import Any, Event, HasTraits, NO_COMPARE, \\\n pop_exception_handler, push_exception_handler, Instance\n\nfrom ..binder import Binder\nfrom ..binding import Binding, Factory, PulledFrom, PushedTo, SetOnceTo, \\\n SyncedWith, find_ext_attrs\n\n\nclass DummyBinder(Binder):\n x = Any(comparison_mode=NO_COMPARE)\n child = Instance(HasTraits)\n\n\nclass DummyModel(HasTraits):\n y = Any(10, comparison_mode=NO_COMPARE)\n event = Event()\n\n\nclass TestBinding(unittest.TestCase):\n\n def setUp(self):\n self.binder = DummyBinder()\n self.model = DummyModel()\n self.blah = DummyBinder()\n self.context = dict(\n object=self.model,\n blah=self.blah,\n )\n push_exception_handler(lambda *args, **kwds: None,\n reraise_exceptions=True)\n\n def tearDown(self):\n pop_exception_handler()\n\n def test_find_ext_attr(self):\n pairs = [\n ('object', []), # no dots\n ('object.foo', ['object.foo']),\n ('object.foo.bar', ['object.foo.bar']),\n ('object.foo + 10', ['object.foo']),\n ('object.foo + str(int)', ['object.foo']),\n ('object.foo + str(int)', ['object.foo']),\n ('object.foo + handler.bar', ['object.foo', 'handler.bar']),\n ('object.foo + handler.func(10)', ['object.foo', 'handler.func']),\n ('object.foo + \"ohm.m\"', ['object.foo']),\n ('object.foo + \"ohm.m\".format.__name__', ['object.foo']),\n ('(object.foo).text()', ['object.foo']),\n ]\n for expr, ext_attrs in pairs:\n found = find_ext_attrs(expr)\n six.assertCountEqual(self, found, ext_attrs)\n\n def test_parse_binding(self):\n pairs = [\n ('text = object.text',\n SetOnceTo('text', 'object.text')),\n ('text=object.text',\n SetOnceTo('text', 'object.text')),\n ('checked = object.text == \"blah\"',\n SetOnceTo('checked', 'object.text == \"blah\"')),\n ('text = object.func(value=\"blah\")',\n SetOnceTo('text', 'object.func(value=\"blah\")')),\n ('text << object.text',\n PulledFrom('text', 'object.text')),\n ('text<> 2',\n PulledFrom('value', 'object.value >> 2')),\n ('text >> object.text',\n PushedTo('text', 'object.text')),\n ('text>>object.text',\n PushedTo('text', 'object.text')),\n ('value >> object.value >> 2',\n PushedTo('value', 'object.value >> 2')),\n ('value >> object.value << 2',\n PushedTo('value', 'object.value << 2')),\n ('value := object.value',\n SyncedWith('value', 'object.value')),\n ('value:=object.value',\n SyncedWith('value', 'object.value')),\n ('lineEdit.text << object.text',\n PulledFrom('lineEdit.text', 'object.text')),\n ]\n for text, binding in pairs:\n parsed = Binding.parse(text)\n self.assertEqual(parsed, binding)\n self.assertEqual(hash(parsed), hash(binding))\n self.assertFalse(parsed != binding)\n self.assertFalse(parsed == 10)\n self.assertIs(Binding.parse(binding), binding)\n factory = Factory('text', lambda: 'foo')\n self.assertIs(Binding.parse(factory), factory)\n for bad in [None, ('value', 'object.value')]:\n with self.assertRaises(TypeError):\n Binding.parse(bad)\n\n def test_set_once_to(self):\n binding = SetOnceTo('x', 'object.y')\n binding.bind(self.binder, self.context)\n self.assertEqual(self.binder.x, 10)\n self.model.y = 20\n self.assertEqual(self.binder.x, 10)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.x, 10)\n\n def test_factory(self):\n binding = Factory('x', lambda: self.model.y)\n binding.bind(self.binder, self.context)\n self.assertEqual(self.binder.x, 10)\n self.model.y = 20\n self.assertEqual(self.binder.x, 10)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.x, 10)\n\n def test_pulled_from_trait(self):\n binding = PulledFrom('x', 'object.y')\n binding.bind(self.binder, self.context)\n self.assertEqual(self.binder.x, 10)\n self.model.y = 20\n self.assertEqual(self.binder.x, 20)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.x, 20)\n\n def test_pulled_from_expression(self):\n binding = PulledFrom('x', 'object.y + 5')\n binding.bind(self.binder, self.context)\n self.assertEqual(self.binder.x, 15)\n self.model.y = 20\n self.assertEqual(self.binder.x, 25)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.x, 25)\n\n def test_pulled_from_event(self):\n binding = PulledFrom('x', 'object.event')\n binding.bind(self.binder, self.context)\n self.assertIsNone(self.binder.x)\n self.model.event = 10\n self.assertEqual(self.binder.x, 10)\n binding.unbind()\n self.model.event = 20\n self.assertEqual(self.binder.x, 10)\n\n def test_pushed_to(self):\n binding = PushedTo('x', 'object.y')\n binding.bind(self.binder, self.context)\n # PushedTo does not initialize the model.\n self.assertEqual(self.model.y, 10)\n self.binder.x = 20\n self.assertEqual(self.model.y, 20)\n binding.unbind()\n self.binder.x = 30\n self.assertEqual(self.model.y, 20)\n\n def test_synced_with(self):\n binding = SyncedWith('x', 'object.y')\n binding.bind(self.binder, self.context)\n self.assertEqual(self.binder.x, 10)\n self.model.y = 20\n self.assertEqual(self.binder.x, 20)\n self.binder.x = 25\n self.assertEqual(self.model.y, 25)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.x, 25)\n self.binder.x = 35\n self.assertEqual(self.model.y, 30)\n\n def test_child(self):\n self.binder.child = DummyBinder()\n binding = SyncedWith('child.x', 'object.y')\n binding.bind(self.binder, self.context)\n self.assertIsNone(self.binder.x)\n self.assertEqual(self.binder.child.x, 10)\n self.model.y = 20\n self.assertEqual(self.binder.child.x, 20)\n self.binder.child.x = 25\n self.assertEqual(self.model.y, 25)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.binder.child.x, 25)\n self.binder.child.x = 35\n self.assertEqual(self.model.y, 30)\n\n def test_binder_in_context(self):\n binding = PulledFrom('blah.x', 'object.y')\n binding.bind(self.binder, self.context)\n self.assertEqual(self.blah.x, 10)\n self.model.y = 20\n self.assertEqual(self.blah.x, 20)\n binding.unbind()\n self.model.y = 30\n self.assertEqual(self.blah.x, 20)\n","repo_name":"enthought/qt_binder","sub_path":"qt_binder/tests/test_binding.py","file_name":"test_binding.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"} +{"seq_id":"128173121","text":"# Solved on 2022. 1. 29.\n# 2583 영역 구하기\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n dx, dy = [1, -1, 0, 0], [0, 0, 1, -1]\n count = 2\n graph[x][y] = 1\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n if nx < 0 or ny < 0 or nx >= M or ny >= N:\n continue\n if graph[nx][ny] == 0:\n graph[nx][ny] = count\n count += 1\n queue.append((nx, ny))\n return count - 1\n\n\nM, N, K = map(int, input().split())\ngraph = [[0] * N for _ in range(M)]\nfor _ in range(K):\n ax, ay, bx, by = map(int, input().split())\n for i in range(ay, by):\n for j in range(ax, bx):\n graph[i][j] = -1\n\nres = []\nfor i in range(M):\n for j in range(N):\n if graph[i][j] == 0:\n res.append(bfs(i, j))\n\nres.sort()\nprint(len(res))\nprint(*res)\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/DFS_and_BFS/2583.py","file_name":"2583.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42149042602","text":"# mypy: ignore-errors\nimport math\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom typing_extensions import Self\n\nfrom domi.utils import find_multiple, domi_config\n\n\"\"\"\nThis cache stores the mask used in the attention computation as they \ndont change throughout the sequance generation process.\n\nIts a 4D tensor. So if we take a specific value from this 4D tensor, \nsay mask_cache[a][b][c][d], it represents whether the c-th token in a \nsequence of length b can attend to the d-th token. \nIf the value is 1, it means attention is disallowed from token c to \ntoken d; if it's 0, attention is allowed.\n\nIn the context of the sentence \"India is my country\", \nwith a block size of 5, the tensor would be of shape [5, 5, 5, 5]. \nFor instance, mask_cache[4][3][1][4] would tell us if the second token \nin a sequence of length 4 can attend to the fifth token. \nSince our sentence only has 4 tokens, the mask value would be 1, \nindicating that attention is disallowed because the fifth token \ndoesn't exist in our sentence.\n\n\"\"\"\nMaskCache = torch.Tensor\n\n\"\"\"\nThis cache Store the Rotary Position Embedding avoiding comutiing them\nagain and again as they dont chage over differnt runs. \n\nThe size of rope_cache is typically [block_size, n_embd], where \nblock_size is the length of the block (i.e., sequence length) and \nn_embd is the dimensionality of the embeddings.\n\"\"\"\nRoPECache = torch.Tensor\n\n\"\"\"\nThis cache stores the key and value tensors in the self-attnetion mechanis of the transformer\nthe key and value of each postion up to the current positon remais the same.\n\nEach tensor is a 4D tensor of size [B, n_head, max_seq_length, \nhead_size], where B is the batch size, n_head is the number of attention \nheads, max_seq_length is the maximum sequence length, and head_size \nis the size of each head (which is n_embd divided by n_head).\n\"\"\"\nKVCache = Tuple[torch.Tensor, torch.Tensor]\n\n\n@dataclass\nclass DomiConfig:\n \"\"\"\n block_size: this is the max length of input sequence\n \n vocab_size: this is the size of the vocab the model is using. This\n directly affects the size of the embeddings layer, as each token in\n the vocab needs an embedding\n \n padded_vocab_size: this param is used for tensor operations that\n may need the vocab size to be a multiple of a certain number. If its\n not provided, its computed using the 'find_multiple' function to be \n the smallest multiple of 64 that is greater than or equal to vocab_size\n \n n_layer: the number of layers in the transformer model, here a \n transfomer blocks that includes self attention and feed forward NN\n \n n_head: the number of attention heads in the self-attention mechanism.\n each head allows the model to focus on different part of th input when\n computing attnetion output\n \n n_embd: dimensionality of the embeddings in the model. this is also the \n size of the hidden state in the transfomation layers\n \n __post_init__: special python method thats called afger the class is\n initialized. here it is used to compute the 'padded_vocab_size'\n \n from_name ->self: allows to create an isnatnce of DomiConfig by\n providing the name of a preset congifuation., in our case domi_configs\n defined later where keys are config names and values are dict of params.\n The mthod will fetch the params for specified config and use them to create\n an instance of the DomiConfig\n \n \"\"\"\n block_size: int = 2048\n vocab_size: int = 32000\n padded_vocab_size: Optional[int] = None\n n_layer: int = 32\n n_head: int = 32\n n_embd: int = 4096\n\n def __post_init__(self):\n if self.padded_vocab_size is None:\n self.padded_vocab_size = find_multiple(self.vocab_size, 64)\n\n @classmethod\n def from_name(cls, name: str) -> Self:\n return cls(**domi_config[name])\n\n\nclass DOMI(nn.Module):\n def __init__(self, config: DomiConfig) -> None:\n \n super().__init__()\n assert config.padded_vocab_size is not None\n self.config = config\n \"\"\"\n lm_head maps from the dimension of the model's internal representation \n (config.n_embd) to the size of the vocabulary (config.padded_vocab_size), \n allowing the model to produce a probability distribution \n over all possible words in the vocabulary for its output.\n \"\"\"\n self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=False)\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(config.padded_vocab_size, config.n_embd),\n h=nn.ModuleList(Block(config) for _ in range(config.n_layer)),\n ln_f=RMSNorm(config.n_embd),\n )\n )\n\n self.rope_cache: Optional[RoPECache] = None\n self.mask_cache: Optional[MaskCache] = None\n self.kv_caches: List[KVCache] = []\n\n def _init_weights(self, module: nn.Module) -> None:\n \"\"\"\n This is a method to initialize the weights of the model's layers. \n It uses different initialization depending on whether the \n module is a linear layer or an embedding layer. It uses the \n normal distribution to initialize weights.\n \"\"\"\n\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))\n \n\n def forward(\n self, idx: torch.Tensor, max_seq_length: Optional[int] = None, input_pos: Optional[torch.Tensor] = None\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[KVCache]]]:\n \"\"\"\n Let's assume we're dealing with a batch of size B, each consisting of a sequence of length T, \n and we're using the configuration \"7B\" as the LLaMAConfig for this model. So, according to the \"7B\" \n configuration, n_embd=4096, n_layer=32, n_head=32, vocab_size=32000, padded_vocab_size will be the \n smallest multiple of 64 greater than or equal to vocab_size, and block_size=2048.\n \n Input: The forward pass receives an input tensor idx with shape (B, T), \n where B is the batch size and T is the sequence length. For instance, let's say our batch size \n is 10 and sequence length is 2000, so the input tensor size is (10, 2000).\n \n Initial Checks: The forward function first verifies the lengths of the sequences, block size, \n and maximum sequence length. If these values are not set, they are defaulted to the block_size \n from the LLaMAConfig which is 2048.\n \n Then the forward model goes ahead and builds the cache for rope and mask if they dont exist.\n \n IF input_pos is not None, it used the Pytorch function 'index_select' to seelct positional encodings \n and masks correspoding to the input position. 'index_slect' takes an input tensor, a dimension along\n which to index(0 for rope and 2 for masks) and the indices to select. In this case, input_pos is a\n tensor of indices , so it selects the position encodings and masks corresponding to these positions.\n \n \n \n \"\"\"\n B, T = idx.size()\n\n block_size = self.config.block_size\n if max_seq_length is None:\n max_seq_length = block_size\n assert T <= max_seq_length, f\"Cannot forward sequence of length {T}, max seq length is only {max_seq_length}\"\n assert max_seq_length <= block_size, f\"Cannot attend to {max_seq_length}, block size is only {block_size}\"\n assert T <= block_size, f\"Cannot forward sequence of length {T}, block size is only {block_size}\"\n\n if self.rope_cache is None:\n self.rope_cache = self.build_rope_cache(idx)\n if self.mask_cache is None:\n self.mask_cache = self.build_mask_cache(idx)\n\n if input_pos is not None:\n # Selects the postional encoding that corresponds to the positions in 'input_pos'\n rope = self.rope_cache.index_select(0, input_pos)\n # Selects the attention masks corresponding the the postions in 'input_pos'\n mask = self.mask_cache.index_select(2, input_pos)\n # Trims the mask to the maximum sequence length.\n mask = mask[:, :, :, :max_seq_length]\n else:\n rope = self.rope_cache[:T]\n mask = self.mask_cache[:, :, :T, :T]\n\n # forward the model itself\n \"\"\"\n The input tensor 'idx'(batch_size, sequence_lenght) is passed on to the embedding layer.\n it results in a 3-dim tensor 'x' of shape (b, t, n_embed)\n \"\"\"\n x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)\n\n if input_pos is None: # proxy for use_cache=False\n \"\"\"\n processes the sequence without caching\n \"\"\"\n for block in self.transformer.h:\n x, _ = block(x, rope, mask, max_seq_length)\n else:\n if not self.kv_caches:\n \"\"\"\n if cache is not initialized, it will be created with the shape.\n The cache is a list of tuples (key and value) of zeros tensors, with each tuple\n corresponding to a layer in the transformation model.\n \n then the input tensor is again passed through each transformation block, but this time\n the block also receives input position and the corresponding cache. the key value pair\n caches returned by the blocks are saved back into kv_cache.\n \"\"\"\n head_size = self.config.n_embd // self.config.n_head\n cache_shape = (B, self.config.n_head, max_seq_length, head_size)\n self.kv_caches = [\n (torch.zeros(cache_shape, device=x.device, dtype=x.dtype), torch.zeros(cache_shape, device=x.device, dtype=x.dtype))\n for _ in range(self.config.n_layer)\n ]\n for i, block in enumerate(self.transformer.h):\n x, self.kv_caches[i] = block(x, rope, mask, max_seq_length, input_pos, self.kv_caches[i])\n\n\n #The output from the transfomer blocks is then normalized using the final layer normalization. \n x = self.transformer.ln_f(x)\n\n logits = self.lm_head(x) # (b, t, vocab_size)\n\n return logits\n \n @classmethod\n def from_name(cls, name: str) -> Self:\n \"\"\"\n Getting the confinguration dict to use from the name, e.g. 7B\n \"\"\"\n return cls(DomiConfig.from_name(name))\n\n def build_rope_cache(self, idx: torch.Tensor) -> RoPECache:\n \"\"\"\n The function calls the below build rope cache function and\n builds the cache for RoPE\n \"\"\"\n return build_rope_cache(\n seq_len=self.config.block_size,\n n_elem=self.config.n_embd // self.config.n_head,\n dtype=idx.dtype,\n device=idx.device,\n )\n\n def build_mask_cache(self, idx: torch.Tensor) -> MaskCache:\n \"\"\"\n This function builds a cache for the attenstion masks used in the transformer\n model. This is used to prevent attention from further position in the\n sequence. \n \n A metric of ones with dimension (block_size, block_size) is created.\n The upper traingular part of the matrix is set to zero using the\n 'torch.tril()' function which means only the lower triangle and diagonals\n are filled with ones\n The resulting mask is unsqueezed twice to add two dimensions at the front, \n resulting in 4D tensor\n \"\"\"\n ones = torch.ones((self.config.block_size, self.config.block_size), device=idx.device, dtype=torch.bool)\n return torch.tril(ones).unsqueeze(0).unsqueeze(0)\n\n def reset_cache(self) -> None:\n \"\"\"\n Used to reset the cache.\n it also has a specific condition when cache is stored in 'xla' device type\n in which case the rope and mask caches are set to None. XLA refers to\n google accelerated linear algebra, a domina sepcific compiler for linear\n algebra that can be used to acclearate TensorFlow computations.\n \"\"\"\n self.kv_caches.clear()\n if self.mask_cache.device.type == \"xla\":\n # https://github.com/Lightning-AI/lit-parrot/pull/83#issuecomment-1558150179\n self.rope_cache = None\n self.mask_cache = None\n \n \nclass CausalSelfAttention(nn.Module):\n \"\"\"\n In the constructor two linear layers c_attn and c_proj are intialized\n The c_attn layer transforms the input to create queries, keys and values\n for each attention head, while the 'c_proj' layer transforms the output\n of the attention mechanism back to its original dimension.\n \n In the forward layer, the input x is first transformed by c_attn into\n queries, keys and values. Then apply_rote method applies the rotary position\n embedding to both queries and keys. \n \n Then the dimensions of Q,K,V are permuted so that the head dimension comes\n before the sequence length. This is done to facilitate the subsequent matrix \n operations for calculating the subsequent matrix opeartions for calculating\n the attention score\n \n If the cache is not 'None' we are using cached results to improve computational\n efficiency, typically when genrating sequences token by token (like in autoregressive\n decoding). In this case, the caceh is updated to store the current keys and values.\n \n The scaled dot porduct attnetion computed the dot product of the queries and the keys,\n scales them, applied a mask, and then applies a softmax function to obtain the final\n attention score. These scores are used to create a weighted sum of the values 'v'\n yielding the output of the attention mechanism. 'y\n \n The output tensor y is reshaped to combine the results from differnt attention heads\n side by side, and then it is transformed back to the original embedding dimension by\n the 'c_proj' layer.\n \"\"\"\n def __init__(self, config: DomiConfig) -> None:\n super().__init__()\n assert config.n_embd % config.n_head == 0\n\n # key, query, value projections for all heads, but in a batch\n self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)\n # output projection\n self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)\n\n self.n_head = config.n_head\n self.n_embd = config.n_embd\n self.block_size = config.block_size\n\n def forward(\n self,\n x: torch.Tensor,\n rope: RoPECache,\n mask: MaskCache,\n max_seq_length: int,\n input_pos: Optional[torch.Tensor] = None,\n kv_cache: Optional[KVCache] = None,\n ) -> Tuple[torch.Tensor, Optional[KVCache]]:\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n q, k, v = self.c_attn(x).split(self.n_embd, dim=2)\n\n head_size = C // self.n_head\n k = k.view(B, T, self.n_head, head_size)\n q = q.view(B, T, self.n_head, head_size)\n v = v.view(B, T, self.n_head, head_size)\n\n q = apply_rope(q, rope)\n k = apply_rope(k, rope)\n\n k = k.transpose(1, 2) # (B, nh, T, hs)\n q = q.transpose(1, 2) # (B, nh, T, hs)\n v = v.transpose(1, 2) # (B, nh, T, hs)\n\n if kv_cache is not None:\n #Unpacking the cache keys and values from the previous steps. \n # size is (batch_size, num_heads, seq_lenght, head_size)\n cache_k, cache_v = kv_cache\n # check if reached token limit. if true, cache has reached its limit\n #we need to make room for new tokens\n if input_pos[-1] >= max_seq_length:\n input_pos = torch.tensor(max_seq_length - 1, device=input_pos.device)\n # shift all enties in the cache 1 position to the left.\n #the oldest key value pair is discarded and a new pair is opened up at the end\n cache_k = torch.roll(cache_k, -1, dims=2)\n cache_v = torch.roll(cache_v, -1, dims=2)\n #Update the cache at the position given by input_pos with the newly computed \n # keys 'k' & values 'v\n k = cache_k.index_copy(2, input_pos, k)\n v = cache_v.index_copy(2, input_pos, v)\n kv_cache = k, v\n\n # efficient attention using Flash Attention CUDA kernels\n y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)\n\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.c_proj(y)\n\n return y, kv_cache\n \n\nclass Block(nn.Module):\n \"\"\"\n in the forward layer, first RMS normalization is applied to the\n input 'x', then passes the normalized data through the self -attention\n mechanism. 'rope' and 'mask' are positional encoding ans mask resp,\n and kv_cache is a chace for the key value pairs in the self-attention\n mechanism. The output 'h' is the result of applying the self-attention \n and new_kv_cache is the updated cache of key value pair\n \n x = x + h . This line adds the output of self attention layer to the\n original input. This is so called residual connection which helps\n mitigate the vanishing graidient problem in deep networks\n \n x = x + self.mlp(self.rms_2(x)). The input is again passed through the\n RMS normalization , then passed to MLP . The result is added to the\n input for another residual connection.\n \n The forward layer returns the final 'x' and the updates key-value\n cache 'new_kv_cache'\n \n This blocks contatins a single transformation block. There will be\n multiple such blocks stacked one on top of each other.\n \"\"\"\n def __init__(self, config: DomiConfig) -> None:\n super().__init__()\n self.rms_1 = RMSNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.rms_2 = RMSNorm(config.n_embd)\n self.mlp = MLP(config)\n\n def forward(\n self,\n x: torch.Tensor,\n rope: RoPECache,\n mask: MaskCache,\n max_seq_length: int,\n input_pos: Optional[torch.Tensor] = None,\n kv_cache: Optional[KVCache] = None,\n ) -> Tuple[torch.Tensor, Optional[KVCache]]:\n h, new_kv_cache = self.attn(self.rms_1(x), rope, mask, max_seq_length, input_pos, kv_cache)\n x = x + h\n x = x + self.mlp(self.rms_2(x))\n return x, new_kv_cache\n\nclass MLP(nn.Module):\n \"\"\"\n In the constructor, the hidden_dim is kept as 4 times the embed dim.\n and n_hidden as 2/3 of hidden_dim. The find mulitple adjust n_hidden\n so that it is a multiple of 256 which can make computations more\n efficient.\n \n The first c_fc1 is a linear transformation with input dim config.n_embed \n and output og n_hidden. The second layer c_fc2 is also a linear transformation\n with same dimension. This c_proj projects from n_hidden state back to\n embedding dimension.\n \n In the forwarding method, input x is first transformed by c_fc1 and then\n an activation function sigmoid linear unit is applied. this is element\n wise multiplied by c_fc2 layer. This is most likely for residual connection\n \n The resul is then transformed by c_proj and this final transformed version of\n x is returned.\n \"\"\"\n \n def __init__(self, config: DomiConfig) -> None:\n super().__init__()\n hidden_dim = 4 * config.n_embd\n n_hidden = int(2 * hidden_dim / 3)\n n_hidden = find_multiple(n_hidden, 256)\n\n self.c_fc1 = nn.Linear(config.n_embd, n_hidden, bias=False)\n self.c_fc2 = nn.Linear(config.n_embd, n_hidden, bias=False)\n self.c_proj = nn.Linear(n_hidden, config.n_embd, bias=False)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = F.silu(self.c_fc1(x)) * self.c_fc2(x)\n x = self.c_proj(x)\n return x\n\nclass RMSNorm(nn.Module):\n \"\"\"\n RMS normalization operates on the given tensor x. It is similar to\n standard layer normalization but instead of using the mean and variance,\n it uses the square root of the mean of the square elements. This provides\n a scaling factor for the normalization which can be beneficial.\n \n The constructor initialized a sacle parameter as a learnable pytorch\n parameter and sets the epsilon(used for numerical stability) and the \n dimension on which the normalization should be performed.\n \n The forward method calculates the root mean suqare of x.\n Then it calculates x_normed by scaling x with the reciprocal square \n root of the norm.\n The scaled tensor is then multiplied by the learnable 'scale' parameter\n \"\"\"\n\n def __init__(self, size: int, dim: int = -1, eps: float = 1e-5) -> None:\n super().__init__()\n self.scale = nn.Parameter(torch.ones(size))\n self.eps = eps\n self.dim = dim\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n norm_x = torch.mean(x * x, dim=self.dim, keepdim=True)\n x_normed = x * torch.rsqrt(norm_x + self.eps)\n return self.scale * x_normed\n\n\ndef build_rope_cache(\n seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000\n) -> RoPECache:\n \"\"\"\n This function generates the rotational embedding matrix, which is the same size as the input sequence \n length seq_len and dimensionality n_elem. base is the constant used in calculating the rotation angles.\n \n The purpose of this function is to prepare a cache of rotation angles, theta, that are multiplied \n with the position index, seq_idx, to generate idx_theta. Finally, the function returns a cache that \n contains the cosine and sine values of idx_theta. The dtype and device adjustments are made to \n ensure the calculations are consistent with the original implementation.\n \n \"\"\"\n # $\\Theta = {\\theta_i = 10000^{\\frac{2(i-1)}{d}}, i \\in [1, 2, ..., \\frac{d}{2}]}$\n theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))\n\n # Create position indexes `[0, 1, ..., seq_len - 1]`\n seq_idx = torch.arange(seq_len, dtype=dtype, device=device)\n\n # Calculate the product of position index and $\\theta_i$\n idx_theta = torch.outer(seq_idx, theta).float()\n\n cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)\n\n # this is to mimic the behaviour of complex32, else we will get different results\n if dtype in (torch.float16, torch.bfloat16, torch.int8):\n cache = cache.half()\n return cache\n\n\ndef apply_rope(x: torch.Tensor, rope_cache: RoPECache) -> torch.Tensor:\n \"\"\"\n This function applies the rotational positional embedding to the input tensor x. \n It first adjusts the rope_cache size to match the input sequence length. \n Then, the input tensor x and rope_cache are reshaped to facilitate element-wise multiplication and \n addition operations. These operations are then applied, essentially rotating the original embeddings \n in the complex plane. The final tensor x_out2 is reshaped and returned with the same dtype as the \n input tensor x.\n \n Now let's create an example:\n\n Suppose we have an input tensor x of size (batch_size=2, seq_len=5, n_elem=6). \n Here, we have 2 sequences, each of length 5 and each element in the sequence is represented \n by an embedding of size 6.\n \n After running this code, it will be a tensor of the same size as x but with the positional information \n encoded through rotations.\n\n Regarding the size of rope_cache, it should be a tensor of shape (seq_len, n_elem // 2, 2), \n which comes from stacking cosine and sine values of the product of the sequence index and theta. \n In our example, rope_cache would have a shape of (5, 3, 2).\n \"\"\"\n # truncate to support variable sizes\n T = x.size(1)\n rope_cache = rope_cache[:T]\n\n # cast because the reference does\n xshaped = x.float().reshape(*x.shape[:-1], -1, 2)\n rope_cache = rope_cache.view(1, xshaped.size(1), 1, xshaped.size(3), 2)\n x_out2 = torch.stack(\n [\n xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],\n xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],\n ],\n -1,\n )\n\n x_out2 = x_out2.flatten(3)\n return x_out2.type_as(x)\n\n","repo_name":"Jaijith/MyLLama","sub_path":"domi/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":24812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70103076488","text":"import re\n\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom tests.browser.selenium_helpers import SeleniumTestCase\n\nfrom orm.models import BitbakeVersion, Release, Project, Build, Target\n\nclass TestProjectBuildsPage(SeleniumTestCase):\n \"\"\" Test data at /project/X/builds is displayed correctly \"\"\"\n\n PROJECT_NAME = 'test project'\n CLI_BUILDS_PROJECT_NAME = 'command line builds'\n\n def setUp(self):\n bbv = BitbakeVersion.objects.create(name='bbv1', giturl='/tmp/',\n branch='master', dirpath='')\n release = Release.objects.create(name='release1',\n bitbake_version=bbv)\n self.project1 = Project.objects.create_project(name=self.PROJECT_NAME,\n release=release)\n self.project1.save()\n\n self.project2 = Project.objects.create_project(name=self.PROJECT_NAME,\n release=release)\n self.project2.save()\n\n self.default_project = Project.objects.create_project(\n name=self.CLI_BUILDS_PROJECT_NAME,\n release=release\n )\n self.default_project.is_default = True\n self.default_project.save()\n\n # parameters for builds to associate with the projects\n now = timezone.now()\n\n self.project1_build_success = {\n 'project': self.project1,\n 'started_on': now,\n 'completed_on': now,\n 'outcome': Build.SUCCEEDED\n }\n\n self.project1_build_in_progress = {\n 'project': self.project1,\n 'started_on': now,\n 'completed_on': now,\n 'outcome': Build.IN_PROGRESS\n }\n\n self.project2_build_success = {\n 'project': self.project2,\n 'started_on': now,\n 'completed_on': now,\n 'outcome': Build.SUCCEEDED\n }\n\n self.project2_build_in_progress = {\n 'project': self.project2,\n 'started_on': now,\n 'completed_on': now,\n 'outcome': Build.IN_PROGRESS\n }\n\n def _get_rows_for_project(self, project_id):\n \"\"\"\n Helper to retrieve HTML rows for a project's builds,\n as shown in the main table of the page\n \"\"\"\n url = reverse('projectbuilds', args=(project_id,))\n self.get(url)\n self.wait_until_present('#projectbuildstable tbody tr')\n return self.find_all('#projectbuildstable tbody tr')\n\n def test_show_builds_for_project(self):\n \"\"\" Builds for a project should be displayed in the main table \"\"\"\n Build.objects.create(**self.project1_build_success)\n Build.objects.create(**self.project1_build_success)\n build_rows = self._get_rows_for_project(self.project1.id)\n self.assertEqual(len(build_rows), 2)\n\n def test_show_builds_project_only(self):\n \"\"\" Builds for other projects should be excluded \"\"\"\n Build.objects.create(**self.project1_build_success)\n Build.objects.create(**self.project1_build_success)\n Build.objects.create(**self.project1_build_success)\n\n # shouldn't see these two\n Build.objects.create(**self.project2_build_success)\n Build.objects.create(**self.project2_build_in_progress)\n\n build_rows = self._get_rows_for_project(self.project1.id)\n self.assertEqual(len(build_rows), 3)\n\n def test_builds_exclude_in_progress(self):\n \"\"\" \"in progress\" builds should not be shown in main table \"\"\"\n Build.objects.create(**self.project1_build_success)\n Build.objects.create(**self.project1_build_success)\n\n # shouldn't see this one\n Build.objects.create(**self.project1_build_in_progress)\n\n # shouldn't see these two either, as they belong to a different project\n Build.objects.create(**self.project2_build_success)\n Build.objects.create(**self.project2_build_in_progress)\n\n build_rows = self._get_rows_for_project(self.project1.id)\n self.assertEqual(len(build_rows), 2)\n\n def test_show_tasks_with_suffix(self):\n \"\"\" Task should be shown as suffixes on build names \"\"\"\n build = Build.objects.create(**self.project1_build_success)\n target = 'bash'\n task = 'clean'\n Target.objects.create(build=build, target=target, task=task)\n\n url = reverse('projectbuilds', args=(self.project1.id,))\n self.get(url)\n self.wait_until_present('td[class=\"target\"]')\n\n cell = self.find('td[class=\"target\"]')\n content = cell.get_attribute('innerHTML')\n expected_text = '%s:%s' % (target, task)\n\n self.assertTrue(re.search(expected_text, content),\n '\"target\" cell should contain text %s' % expected_text)\n\n def test_cli_builds_hides_tabs(self):\n \"\"\"\n Display for command line builds should hide tabs\n \"\"\"\n url = reverse('projectbuilds', args=(self.default_project.id,))\n self.get(url)\n tabs = self.find_all('#project-topbar')\n self.assertEqual(len(tabs), 0,\n 'should be no top bar shown for command line builds')\n\n def test_non_cli_builds_has_tabs(self):\n \"\"\"\n Non-command-line builds projects should show the tabs\n \"\"\"\n url = reverse('projectbuilds', args=(self.project1.id,))\n self.get(url)\n tabs = self.find_all('#project-topbar')\n self.assertEqual(len(tabs), 1,\n 'should be a top bar shown for non-command-line builds')\n","repo_name":"openbmc/openbmc","sub_path":"poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py","file_name":"test_project_builds_page.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"16"} +{"seq_id":"19597392634","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport json, datetime, csv, tqdm\n\nGOODREADS_URL = \"https://www.goodreads.com/book/show/\"\n\ndef convert_book(book):\n return {\n \"Title\": book['Title'],\n \"Author\": book['Author'],\n \"Additional Authors\": book['Additional Authors'],\n \"ISBN\": book['ISBN'],\n \"ISBN13\": book['ISBN13'],\n \"My Rating\": book['My Rating'],\n \"Average Rating\": book['Average Rating'],\n \"Publisher\": book['Publisher'],\n \"Binding\": book['Binding'],\n \"Number of Pages\": book['Number of Pages'],\n \"Year Published\": book['Year Published'],\n \"Original Publication Year\": book['Original Publication Year'],\n \"Date Read\": book['Date Read'],\n \"Date Added\": book['Date Added'],\n \"Bookshelves\": book['Bookshelves'],\n \"Status\": book['Exclusive Shelf'],\n \"Cover\": None,\n \"Book Id\": book['Book Id'],\n }\n\nwith open('goodreads.json') as f:\n books_ = json.load(f)\n\nbooks = []\n\n\nfor book_ in books_:\n book = convert_book(book_)\n if book[\"Status\"] != 'to-read':\n books.append(book)\n\n# for book in tqdm.tqdm(books):\n# url = GOODREADS_URL + str(book['Book Id'])\n# url_open = urlopen(url)\n# soup = BeautifulSoup(url_open, 'html.parser')\n# tag = soup.find(\"img\", {\"id\": \"coverImage\"})\n# try:\n# book['Cover'] = tag['src']\n# except:\n# print(\"Book:\", url, \"found with no cover, try changing edition.\")\n\n# csv_columns = [\"Title\"\n# , \"Author\"\n# , \"Additional Authors\"\n# , \"ISBN\"\n# , \"ISBN13\"\n# , \"My Rating\"\n# , \"Average Rating\"\n# , \"Publisher\"\n# , \"Binding\"\n# , \"Number of Pages\"\n# , \"Year Published\"\n# , \"Original Publication Year\"\n# , \"Date Read\"\n# , \"Date Added\"\n# , \"Bookshelves\"\n# , \"Status\"\n# , \"Cover\",\n# \"Book Id\"]\n\ntime_stamp = datetime.datetime.now().strftime(\"%b-%d-%y-%H:%M:%S\")\n\nwith open('with_covers_{}.json'.format(time_stamp), mode='w') as jsonfile:\n json.dump(books, jsonfile)\n # writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n # writer.writeheader()\n # for data in books:\n # writer.writerow(data)","repo_name":"thiagomgd/exports","sub_path":"goodreads/load_covers.py","file_name":"load_covers.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"10085490672","text":"# Silver 3 - 크리스마스 선물\n\nfrom heapq import heappush, heappop\nimport sys\ninput = sys.stdin.readline\n\nhq = []\nfor _ in range(int(input())):\n a = list(map(int, input().split()))\n\n if a[0] == 0:\n print(heappop(hq) * -1 if hq else -1)\n else:\n for i in a[1:]:\n heappush(hq, -i)\n","repo_name":"vhzkclq0705/Algorithm_Problem_Solving","sub_path":"BackJoon/Study/Week7/14235.py","file_name":"14235.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34308970260","text":"\"\"\"Constants for the EnOcean integration.\"\"\"\n\nDOMAIN = \"my_enocean\"\n\nCONF_ENTRY_ID = \"entry_id\"\nCONF_SERIAL_PORT = \"serial_port\"\nCONF_IDENTIFIERS = \"identifiers\"\nCONF_MANUFACTURER = \"manufacturer\"\nCONF_MODEL = \"model\"\nCONF_VIA_DEVICE = \"via_device\"\nCONF_NO_OF_CHANNELS = \"no_of_channels\"\nCONF_OPTION = \"option\"\nCONF_LOADED = \"loaded\"\n\nDATA_BASE = \"data_base\"\nDATA_CONFIG = \"data_config\"\nDATA_DEVICES = \"data_devices\"\nDATA_DISPATCHERS = \"data_dispatchers\"\nDATA_DONGLE = \"data_dongle\"\nDATA_PLATFORM = \"data_platform\"\n\nDEFAULT_CONF_HUB_SERIAL_PORT = \"/dev/ttyS3\"\nDEFAULT_CONF_HUB_MANUFACTURER = \"element14\"\nDEFAULT_CONF_HUB_MODEL = \"TCM 310\"\nDEFAULT_DATABASE_NAME = \"enocean.sqlite\"\n\n#DEVICE_MANUFACTURER_NODON = \"NodOn\"\n#DEVICE_MANUFACTURER_ELTAKO = \"Eltako\"\n#DEVICE_MANUFACTURERS = [DEVICE_MANUFACTURER_ELTAKO, DEVICE_MANUFACTURER_NODON]\n#\n#DEVICE_MODEL_NODON_SIN_2_1_01 = \"SIN-2-1-01\"\n#DEVICE_MODEL_ELTAKO_FSR61NP = \"FSR61NP\"\n#DEVICE_MODELS = [DEVICE_MODEL_ELTAKO_FSR61NP, DEVICE_MODEL_NODON_SIN_2_1_01]\n\nDELAY_INIT = 3\n\nENOCEAN_TRANSCEIVER = \"EnOcean Transceiver\"\nENOCEAN_SWITCH = \"EnOcean Switch\"\n\n#OPTION_NONE = \"\"\n#OPTION_ADD_SWITCH = \"add_switch\"\n#OPTION_TEACH_IN = \"Teach-In\"\n#OPTION_LIST = [OPTION_NONE,OPTION_ADD_SWITCH,OPTION_TEACH_IN]\n\nRETURN_DB_NEW = \"return_db_new\"\nRETURN_DB_EXISTING = \"return_db_existing\"\n\nSERVICE_TEACH_IN = \"service_teach_in\"\nSERVICE_ADD_SWITCH = \"service_add_switch\"\nSERVICE_REMOVE_DEVICE = \"service_remove_device\"\n\nSIGNAL_RECEIVE_PACKET = \"enocean.receive_packet\"\nSIGNAL_SEND_PACKET = \"enocean.send_packet\"\nSIGNAL_TEACH_IN = \"enocean.teach_in\"\nSIGNAL_ADD_ENTITIES = \"enocean.add_entities\"\n\n\nDEVICES_EEP = {\n \"NodOn_SIN-2-1-01\": [\"TeachIn_UTE\", \"D2-01-00\"],\n \"NodOn_SIN-2-2-01\": [\"TeachIn_UTE\", \"D2-01-00\"],\n \"Eltako_FSR61NP\": [\"TeachIn_4BS\", \"F6-02-01\", \"A5-38-08\"],\n \"FLEXtron_300610\": [\"TeachIn_4BS\", \"A5-11-04\", \"A5-38-08\"],\n }\n","repo_name":"refohl/ha_my_enocean","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40469436189","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import with_statement\nfrom __future__ import division\nfrom __future__ import nested_scopes\nfrom __future__ import generators\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom neurotools.system import *\n\n'''\nExplore log-polar gaussian distributions for representing analytic\nsignal across the population.\n'''\n\n# TODO: wildcard imports considered hazardous\n#from matplotlib.pyplot import *\n#from neurotools.graphics.plot import *\n#from neurotools.graphics.color import *\n\nimport matplotlib.pyplot as mp\nimport scipy.stats\nimport numpy as np\nimport matplotlib as plt\n\ndef logpolar_gaussian(frame,doplot=False):\n '''\n Generates major and minor axis and one-sigma ellipse contours for a\n log-polar complex Gaussian. Contours are returned as lists of complex\n numbers.\n \n Parameters\n ----------\n frame : np.array\n Complex-valued polar data to model\n doplot : bool\n Whether to generate a plot of the resulting distribution\n \n Returns\n -------\n axis 1 (np.array)\n axis 2 (np.array)\n 1-sigma ellipse (np.array)\n '''\n # set to zero mean phase\n theta = np.angle(np.mean(frame))\n rephased = frame*np.exp(1j*-theta)\n weights = np.abs(rephased)\n weights = weights/np.sum(weights)\n x = np.log(np.abs(rephased))\n y = np.angle(rephased)/4\n # use 2D gaussian approximation\n mx = np.dot(weights,x)\n my = np.dot(weights,y)\n cx = x - mx\n cy = y - my\n correction = np.sum(weights)/(np.sum(weights)**2-np.sum(weights**2))\n cxx = np.dot(weights,cx*cx)*correction\n cxy = np.dot(weights,cx*cy)*correction\n cyy = np.dot(weights,cy*cy)*correction\n cm = np.array([[cxx,cxy],[cxy,cyy]])\n sm = scipy.linalg.cholesky(cm)\n w,v = scipy.linalg.eig(cm)\n v = v[0,:]+1j*v[1,:]\n origin = mx + 1j*my\n w = np.sqrt(w)\n axis1 = origin + v[0]*w[0]*np.linspace(-1,1,100)\n axis2 = origin + v[1]*w[1]*np.linspace(-1,1,100)\n circle = np.exp(1j*linspace(0,2*pi,100))\n circle = p2c(np.dot(sm,[circle.real,circle.imag]))+origin\n phase = np.exp(1j*theta)\n if doplot:\n plot(*c2p(np.exp(axis1)*phase),color='r',lw=2,zorder=Inf)\n plot(*c2p(np.exp(axis2)*phase),color='r',lw=2,zorder=Inf)\n plot(*c2p(np.exp(circle)*phase),color='r',lw=2,zorder=Inf)\n return np.exp(axis1)*phase,np.exp(axis2)*phase,np.exp(circle)*phase\n\n\ndef complex_gaussian(frame,doplot=False):\n '''\n Generate axis and 1-sigma contour for a complex gaussian distribution\n \n Parameters\n ----------\n frame : np.array\n Complex-valued polar data to model\n doplot : bool\n Whether to generate a plot of the resulting distribution\n \n Returns\n -------\n (axis 1, axis 2, 1-sigma ellipse)\n '''\n # set to zero mean phase\n rephased = frame#*np.exp(1j*-theta)\n weights = np.ones(np.shape(rephased))\n weights = weights/np.sum(weights)\n # convert to log-polar\n x = real(rephased)\n y = imag(rephased)\n # use 2D gaussian approximation\n mx = np.dot(weights,x)\n my = np.dot(weights,y)\n cx = x - mx\n cy = y - my\n #cm = cov(cx,cy)\n correction = np.sum(weights)/(np.sum(weights)**2-np.sum(weights**2))\n cxx = np.dot(weights,cx*cx)*correction\n cxy = np.dot(weights,cx*cy)*correction\n cyy = np.dot(weights,cy*cy)*correction\n cm = np.array([[cxx,cxy],[cxy,cyy]])\n sm = cholesky(cm)\n w,v = eig(cm)\n v = v[0,:]+1j*v[1,:]\n origin = mx + 1j*my\n w = np.sqrt(w)\n axis1 = origin + v[0]*w[0]*linspace(-1,1,100)\n axis2 = origin + v[1]*w[1]*linspace(-1,1,100)\n circle = np.exp(1j*linspace(0,2*pi,100))\n circle = p2c(np.dot(sm,[real(circle),imag(circle)]))+origin\n if doplot:\n plt.plot(*c2p(axis1) ,color='r',lw=2,zorder=Inf)\n plt.plot(*c2p(axis2) ,color='r',lw=2,zorder=Inf)\n plt.plot(*c2p(circle),color='r',lw=2,zorder=Inf)\n return axis1,axis2,circle\n\ndef logpolar_stats(frame,doplot=False):\n '''\n Generate summary statistics for a log-polar Gaussian distribution\n \n Parameters\n ----------\n frame : np.array\n Complex-valued polar data to model\n doplot : bool\n Whether to generate a plot of the resulting distribution\n \n Returns\n -------\n '''\n z = np.mean(frame)\n r = np.mean(np.abs(frame))\n rl = np.mean(np.log(np.abs(frame)))\n rs = np.std(np.abs(frame))\n rsl = np.std(np.log(np.abs(frame)))\n w = frame / np.abs(frame)\n x = np.mean(w)\n theta = angle(x)\n #R = np.abs(x)\n R = np.abs(z) / r\n sd = np.sqrt(-2*np.log(R))\n print('R,sd',R,sd)\n cv = 1-R\n s = np.exp(rl)*np.exp(1j*theta)\n arc = np.exp(rl+theta*1j)*np.exp(1j*linspace(-sd,sd,100))\n circle = np.exp(1j*linspace(0,2*pi,100))\n circle = real(circle)*rsl + 1j*imag(circle)*sd\n circle = circle+rl+1j*theta\n circle = np.exp(circle)\n radial = np.array([s*np.exp(-rsl),s*np.exp(rsl)])\n if doplot:\n plot(*c2p(circle),color='m',lw=2)\n plot(*c2p(arc),color='m',lw=2)\n plot(*c2p(radial),color='m',lw=2)\n return circle,arc,radial\n\ndef abspolar_stats(frame,doplot=False):\n '''\n Generate summary statistics for a polar Gaussian distribution\n \n Parameters\n ----------\n frame : np.array\n Complex-valued polar data to model\n doplot : bool\n Whether to generate a plot of the resulting distribution\n \n Returns\n -------\n '''\n z = frame\n phi = angle(np.mean(z**2))/2\n flip = sign(np.cos(np.angle(z)-phi))\n r = np.abs(z)*flip\n h = np.angle(z) + pi*np.int32(flip==-1)\n mr = np.mean(r)\n sr = np.std(r)\n mt = phi\n st = np.sqrt(-2*np.log(np.abs(np.mean(np.exp(1j*h)))))\n arc = mr*np.exp(1j*(phi+linspace(-st,st,100)))\n circle = np.exp(1j*linspace(0,2*pi,100))\n circle = (real(circle)*sr+mr)*np.exp(1j*(imag(circle)*st+phi))\n radial = np.array([(mr-sr)*np.exp(1j*phi),(mr+sr)*np.exp(1j*phi)])\n if doplot:\n plt.clf()\n plt.plot(*c2p(circle), color='m',lw=2)\n plt.plot(*c2p(arc ), color='m',lw=2)\n plt.plot(*c2p(radial), color='m',lw=2)\n plt.scatter(*c2p([mr*np.exp(1j*phi)]),color='k',s=5**2)\n return circle,arc,radial\n\ndef squared_first_circular_moment(samples, axis=-1, unbiased=True, dof=None):\n '''\n Compute squared first circular moment\n \n Parameters\n ----------\n samples : np.array\n Complex-valued polar data to model\n axis : int, default=-1\n Axis over which to compute moment\n unbiased : bool, default=True\n Whether to apply a bias correction (small samples can have smaller\n circular variance than expected)\n dof : int, defualts to None\n Optional degrees of freedome correction. If None, then the number\n of samples minus one will be used as the degrees of freedoms\n \n Returns\n -------\n squared_average : float\n Squared first circular moment\n '''\n squared_average = np.abs(np.np.mean(samples,axis=axis))**2\n if unbiased:\n if dof is None:\n if not type(axis) == int:\n dof = np.prod(np.np.array(np.shape(samples))[list(axis)])\n else:\n dof = np.shape(samples)[axis]\n squared_average = (dof*squared_average-1)/(dof-1)\n return squared_average\n\ndef fit_vonmises(z):\n '''\n Fit a vonMises distribution using circular moments.\n \n Parameters\n ----------\n samples : np.array\n Complex-valued polar data to model\n \n Returns\n -------\n '''\n scipy.stats.distributions.vonmises.a = -numpy.pi\n scipy.stats.distributions.vonmises.b = numpy.pi\n theta = angle(np.mean(z))\n dephased = z*np.exp(-1j*theta)\n location,_,scale = scipy.stats.distributions.vonmises.fit(np.angle(dephased))\n return location,theta,scale\n","repo_name":"michaelerule/stable-task-information","sub_path":"Version 2/Rule/neurotools/stats/circular.py","file_name":"circular.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34409078608","text":"from launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration, PathJoinSubstitution\nfrom launch_ros.substitutions import FindPackageShare\n\n\ndef generate_launch_description():\n dof = LaunchConfiguration('dof', default=6)\n prefix = LaunchConfiguration('prefix', default='')\n hw_ns = LaunchConfiguration('hw_ns', default='xarm')\n limited = LaunchConfiguration('limited', default=True)\n effort_control = LaunchConfiguration('effort_control', default=False)\n velocity_control = LaunchConfiguration('velocity_control', default=False)\n add_gripper = LaunchConfiguration('add_gripper', default=False)\n add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False)\n robot_type = LaunchConfiguration('robot_type', default='xarm')\n\n # robot moveit servo launch\n # xarm_moveit_servo/launch/_robot_moveit_servo.launch.py\n robot_moveit_servo_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_moveit_servo'), 'launch', '_robot_moveit_servo.launch.py'])),\n launch_arguments={\n 'dof': dof,\n 'prefix': prefix,\n 'hw_ns': hw_ns,\n 'limited': limited,\n 'effort_control': effort_control,\n 'velocity_control': velocity_control,\n 'add_gripper': add_gripper,\n 'add_vacuum_gripper': add_vacuum_gripper,\n 'robot_type': robot_type,\n }.items(),\n )\n \n return LaunchDescription([\n robot_moveit_servo_launch\n ])","repo_name":"xArm-Developer/xarm_ros2","sub_path":"xarm_moveit_servo/launch/xarm_moveit_servo_fake.launch.py","file_name":"xarm_moveit_servo_fake.launch.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"16"} +{"seq_id":"21013421984","text":"#! /usr/bin/env python3\n\n\"\"\"\nSpits out a posfile for a given pdb. Literally prints all resnums from an input pdb to a text file, using this for biasing\nGurobi solutions for preselected residues.\n\nUsage:\nposfile_from_input_pdb \n\nArguments:\n \n Path to input pdb\n\"\"\"\n\nimport os\nimport docopt\nimport prody\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n\n prody_pdb = prody.parsePDB(args['']).getHierView()\n posfile_name = os.path.basename(os.path.normpath(args[''])).split('.')[0] + '.pos'\n\n with open(posfile_name, 'w') as posfile:\n for residue in prody_pdb.iterResidues():\n posfile.write('{}\\n'.format(residue.getResnum()))","repo_name":"jaaamessszzz/BindingSitesFromFragments-Utilities","sub_path":"Temp/posfile_from_input_pdb.py","file_name":"posfile_from_input_pdb.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"23824923810","text":"from math import cos, sin, sqrt\r\nimport random\r\n\r\n\r\nclass Atom:\r\n def __init__(self, x, y, z):\r\n self.x = x\r\n self.y = y\r\n self.z = z\r\n self.type = 1\r\n v0 = 2.0\r\n z = random.random()*2.0-1\r\n s = random.random()*3.14*2.0\r\n self.vx = v0*sqrt(1.0-z**2)*cos(s)\r\n self.vy = v0*sqrt(1.0-z**2)*sin(s)\r\n self.vz = v0*z\r\n\r\n\r\ndef add_ball():\r\n atoms = []\r\n r = 4\r\n s = 1.7\r\n h = 0.5 * s\r\n for ix in range(-r, r):\r\n for iy in range(-r, r):\r\n for iz in range(-r, r):\r\n x = ix * s\r\n y = iy * s\r\n z = iz * s\r\n atoms.append(Atom(x, y, z))\r\n atoms.append(Atom(x, y+h, z+h))\r\n atoms.append(Atom(x+h, y, z+h))\r\n atoms.append(Atom(x+h, y+h, z))\r\n print(f\"{len(atoms)} atoms\")\r\n return atoms\r\n\r\n\r\ndef save_file(filename, atoms):\r\n with open(filename, \"w\") as f:\r\n f.write(\"Position Data\\n\\n\")\r\n f.write(\"{} atoms\\n\".format(len(atoms)))\r\n f.write(\"1 atom types\\n\\n\")\r\n f.write(\"-10.00 10.00 xlo xhi\\n\")\r\n f.write(\"-10.00 10.00 ylo yhi\\n\")\r\n f.write(\"-10.00 10.00 zlo zhi\\n\")\r\n f.write(\"\\n\")\r\n f.write(\"Atoms\\n\\n\")\r\n for i, a in enumerate(atoms):\r\n f.write(\"{} {} {} {} {}\\n\".format(i+1, a.type, a.x, a.y, a.z))\r\n f.write(\"\\n\")\r\n f.write(\"Velocities\\n\\n\")\r\n for i, a in enumerate(atoms):\r\n f.write(\"{} {} {} {}\\n\".format(i+1, a.vx, a.vy, a.vz))\r\n print(\"Generated {}\".format(filename))\r\n\r\n\r\nsave_file(\"config.atoms\", add_ball())\r\n","repo_name":"kaityo256/lj_and_wca","sub_path":"generate_config.py","file_name":"generate_config.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17195448593","text":"import matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport sys, re, pdb\nimport logging\nimport argparse\n\nimport pandas as pd\n\nimport datetime\n\ndef testi():\n return 3\n\ndef read_data(file_name):\n #logger.info(\"Reading data from file \" + file_name)\n df = pd.read_csv(file_name, parse_dates=['datetime'], sep = ';')\n df = df[df[\"municipalityId\"].notnull()]\n df[\"municipalityId\"] = df[\"municipalityId\"].astype(int)\n df[\"userId\"] = df[\"userId\"].astype(int)\n #logger.info(\"N of rows: {:.0f}\".format(len(df)))\n df = df.sort_values(['applicationId', 'datetime'])\n return df\ndef print_week1a_result(df):\n print('Amount of municipalites that have used the service is {}').format(df.municipalityId.unique().size)\ndef print_week1b_results(df):\n print(\"Amount of application role users is {}\").format(df[df['role'] == 'applicant'].userId.unique().size)\n print(\"Amount of authority role users is {}\").format(df[df['role'] == 'authority'].userId.unique().size)\ndef print_week2h1a_results(df):\n print('Amount of comments on each application is:')\n print(df[(df['action'] == 'add-comment')].groupby('applicationId').count().userId)\n#Create month column from createDate\ndef print_week2a1b_results(df_operative):\n df_operative['createdMonth'] = df_operative['createdDate'].map(lambda x: x.month)\n createdMonthPlot = df_operative[df_operative['state'] == 'submitted'].groupby('createdMonth').size().plot(kind='bar', title='Amount of application submitted each month')\n createdMonthPlot.set_xlabel('Month')\n\nif __name__ == \"__main__\":\n usage_file = \"/Users/toniok/Python/Python-data-science-exercises/data/all-lupapiste-usage-pub-20161031.csv\"\n operative_file = \"/Users/toniok/Python/Python-data-science-exercises/data/all-applications-operative-pub-20161031.csv\"\n df = read_data(usage_file)\n df_operative = pd.read_csv(operative_file, parse_dates=['createdDate', 'submittedDate'], sep = ';')\n print_week1a_result(df)\n print_week1b_results(df)\n print_week2h1a_results(df)\n print_week2a1b_results(df_operative)\n","repo_name":"neurogoo/Python-data-science-exercises","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44100172232","text":"import datetime\nimport asyncio\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom config import settings\n\nbot = Bot('5165798370:AAE9F4cCCbYySC0LGBf4b3cQWJ9ws6-r7J8')\ndp = Dispatcher(bot)\n\n\nasync def send_info():\n last_rsi_btc = settings.crypto_status['btc']['last_rsi']\n last_rsi_ltc = settings.crypto_status['ltc']['last_rsi']\n last_rsi_eth = settings.crypto_status['eth']['last_rsi']\n\n text_message = f'''\nПоследнее значение BTC: {last_rsi_btc}\nПоследнее значение LTC: {last_rsi_ltc}\nПоследнее значение ETH: {last_rsi_eth}\n '''\n settings.crypto_status['btc']['rsi'] = 0\n await bot.send_message(\"-974345397\", text_message)\n","repo_name":"prorokallaha/btc_scraping","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75032761928","text":"# Recurrent Neural Network\n\n# Part 1 - Data Preprocessing\n\n# Importing the libraries\n\nimport matplotlib.pyplot as plt\n#Using Quandl\nimport numpy as np\nimport pandas as pd\nimport quandl\nimport datetime\n\n\n\n#importing the training set of AAPL\n\nstart = datetime.datetime(2013,1,1)\n#\nend=datetime.datetime(2017,11,5)\n\n#start_test=datetime.datetime(2017,10,21)\n#end_test=datetime.datetime(2017,10,3)\n\n#Getting HH HL LL LH of a stock\nmystock_training=quandl.get('WIKI/AAPL',start_date=start,end_date=end)\n\n\n\n#keeping only the heigh and low with volume\nmystock_training=mystock_training[['Adj. Open','Adj. Close','Adj. Volume','Adj. Low','Adj. High']]\n\n#mystock_training['Date']=mystock_training.index\n\n#getting the sentimental dada\nmysent_training=quandl.get('NS1/AAPL_CI',api_key='No8oze7d5V48kqY_rSuy',start_date=start,end_date=end)\nmysent_training=mysent_training[['Sentiment','News Volume','News Buzz']]\n#mysent_training['Date']=mysent_training.index\n\ntraining_set=mysent_training.join(mystock_training)\ndataset=training_set.dropna()\n############################################\n#get the LL HL LH HH\n\ndataset['Diff_H']=dataset['Adj. High'].diff()\ndataset['Diff_L']=dataset['Adj. Low'].diff()\ndataset=dataset.dropna()\n\n#dataset['Diff_L']=np.where(dataset['Diff_L']>0,'HL','LL')\n#dataset['Diff_H']=np.where(dataset['Diff_H']>0,'HH','LH')\n\n\n\n#dataset['status']=dataset['Diff_H']+dataset['Diff_L']\n#del dataset['Diff_H']\n#del dataset['Diff_L']\n\n#original=dataset['status']\n\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n#labelencoder = LabelEncoder()\n##onehotencoder=OneHotEncoder(categorical_features=dataset['Diff_L'])\n#dataset['Diff_L']=labelencoder.fit_transform(dataset['Diff_L'])\n#\n##onehotencoder.fit_transform(dataset['Diff_L']).toarray()\n##Add the hot encoder\n\n\n\n#convert DataFram to Array\ndataset=dataset.values\n\n\n## Feature Scaling\n## Feature Scaling so we apply Normalization but not Standardisation\n#from sklearn.preprocessing import StandardScaler\n#sc = StandardScaler()\n#training_set_scaled = sc.fit_transform(dataset)\n#\n#\n##try Robustscaler\n#from sklearn.preprocessing import RobustScaler\n#rc=RobustScaler()\n#training_set_scaled=rc.fit_transform(dataset)\n\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range=(0,1))\ntraining_set_scaled = sc.fit_transform(dataset)\n\n\n##try normalization\n#from sklearn.preprocessing import Normalizer\n#\n#norm=Normalizer()\n#training_set_normalized=norm.fit_transform(dataset)\n\n\n#Creating a data strucure with 60 timesteps and one output\n#n is the number of days back\nn=60\nX_train = []\ny_train = []\nfor i in range(n,len(dataset)):\n X_train.append(training_set_scaled[i-n:i, :])\n y_train.append(training_set_scaled[i, 8:10])\n\n\n\nX_train, y_train=np.array(X_train), np.array(y_train)\n\n\n\n\n# Reshaping\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 10))\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\n# Initialising the RNN\nregressor = Sequential()\n\n# Adding the input layer and first LSTM layer\nregressor.add(LSTM(units = 50, return_sequences=True, input_shape = (X_train.shape[1], 10)))\nregressor.add(Dropout(0.2))\n\n#adding second LSTM Layer\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units = 50, return_sequences=True ))\nregressor.add(Dropout(0.2))\n\n\n\n# Adding last LSTM Layer\nregressor.add(LSTM(units = 50))\nregressor.add(Dropout(0.2))\n\n#adding the output layer\nregressor.add(Dense(units=2))\n\n# Compiling the RNN\nregressor.compile(optimizer = 'adam', loss = 'mean_squared_error')\n\n# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs =50, batch_size = 32)\n\n# Part 3 - Making the predictions and visualising the results\n\n\ninputs=dataset[len(dataset)-60:]\ninputs = sc.transform(inputs)\n\n#X_test=[]\n#\n#for i in range(1,2):\n# X_test.append(inputs[i-1:i,0])\n\n#inputs=np.array(inputs)\nX_test=np.array(inputs)\nX_test = np.reshape(X_test, (1, X_test.shape[0], 10))\n\n\n#X_test=np.reshape(inputs, (1,inputs.shape[0],8))\n\n\npredicted_stock_price=regressor.predict(X_test)\n\n\nnew=np.zeros(shape=(len(X_test), 10) )\n\nnew[0,8:10]=predicted_stock_price\n\npredicted_stock_price=sc.inverse_transform(new)\n\n\nH_today=predicted_stock_price[0,8]+dataset[len(dataset)-1,7]\nL_today=predicted_stock_price[0,9]+dataset[len(dataset)-1,6]\n\n\n########\n# Getting the predicted stock price of 2017\ninputs = training_set[-60,:]\n##inputs = sc.transform(inputs)\n#inputs=np.reshape(inputs, (1, 1, 1))\n#predicted_stock_price = regressor.predict(inputs)\n#predicted_stock_price = sc.inverse_transform(predicted_stock_price)\n######\n#\n#\n#\n#\n## Getting the predicted stock price\n##get all\n#dataset_total=pd.concat((mystock_training['Adj. High'],mystock_test['Adj. High']),axis=0)\n#inputs = dataset_total[len(dataset_total)-len(mystock_test)-60:].values\n#inputs=inputs.reshape(-1,1)\n#inputs=sc.fit_transform(inputs)\n\n#X_test = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 6))\n\nX_test = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 6))\n\npredicted_stock_price = regressor.predict(inputs)\nprice=sc1.inverse_transform(predicted_stock_price)\n\n# Visualising the results\nplt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')\nplt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()","repo_name":"youngphero/MachineLearning-Finance","sub_path":"rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27680490242","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom numpy import linalg as LA\nfrom .utils import prune_rate, arg_nonzero_min\n\n__all__ = ['weight_prune', 'filter_prune']\n\ndef weight_prune(model, pruning_perc, prev_masks=None, norm=False, device='cpu'):\n '''\n Prune pruning_perc% weights globally (not layer-wise)\n arXiv: 1606.09274\n ''' \n idx = 0\n all_weights = []\n for p in model.parameters():\n if len(p.data.size()) != 1 and p.requires_grad:\n if not prev_masks:\n if not norm:\n all_weights += list(abs(p.cpu().data.numpy()).flatten())\n else:\n all_weights += list(abs(p.cpu().data.numpy()).flatten() / LA.norm(abs(p.cpu().data.numpy()).flatten()))\n else:\n if not norm:\n all_weights += list(abs(p.cpu().data.numpy() * prev_masks[idx].cpu().numpy()).flatten())\n else:\n all_weights += list(abs(p.cpu().data.numpy() * prev_masks[idx].cpu().numpy()).flatten() / LA.norm(abs(p.cpu().data.numpy() * prev_masks[idx].cpu().numpy()).flatten()))\n idx += 1\n threshold = np.percentile(np.array(all_weights), pruning_perc)\n \n # generate mask\n idx = 0\n masks = []\n for p in model.parameters():\n if len(p.data.size()) != 1 and p.requires_grad:\n if not prev_masks:\n if not norm:\n pruned_inds = p.data.abs() > threshold\n else:\n norm_value = torch.norm(p.data.abs(), p=2)\n norm_value = torch.sum(norm_value)\n pruned_inds = (p.data.abs() / norm_value) > threshold\n else:\n if not norm:\n pruned_inds = p.data.abs() * prev_masks[idx] > threshold\n idx += 1\n else:\n norm_value = torch.norm(p.data.abs() * prev_masks[idx], p=2)\n norm_value = torch.sum(norm_value)\n pruned_inds = (p.data.abs() * prev_masks[idx] / norm_value) > threshold\n idx += 1\n \n masks.append(pruned_inds.type(p.dtype).to(device))\n return masks\n\n\ndef prune_one_filter(model, masks, norm, device='cpu'):\n '''\n Pruning one least ``important'' feature map by the scaled l2norm of \n kernel weights\n arXiv:1611.06440\n '''\n NO_MASKS = False\n # construct masks if there is not yet\n if not masks:\n masks = []\n NO_MASKS = True\n\n values = []\n for p in model.parameters():\n if len(p.data.size()) != 1 and p.requires_grad: # nasty way of selecting conv layer\n p_np = p.data.cpu().numpy()\n\n # construct masks if there is not\n if NO_MASKS:\n #masks.append(np.ones(p_np.shape).astype('float16'))\n masks.append(torch.from_numpy(np.ones(p_np.shape)).type(p.dtype).to(device))\n \n if len(p.data.size()) == 4:\n # find the scaled l2 norm for each filter this layer\n value_this_layer = np.square(p_np).sum(axis=1).sum(axis=1)\\\n .sum(axis=1)/(p_np.shape[1]*p_np.shape[2]*p_np.shape[3])\n # it means fully_connected layer\n else:\n value_this_layer = np.square(p_np.sum(axis=1)/p_np.shape[1])\n \n # normalization (important)\n if norm:\n value_this_layer = value_this_layer / np.sqrt(np.square(value_this_layer).sum())\n min_value, min_ind = arg_nonzero_min(list(value_this_layer))\n values.append([min_value, min_ind])\n\n assert len(masks) == len(values), \"something wrong here\"\n\n values = np.array(values)\n\n # set mask corresponding to the filter to prune\n to_prune_layer_ind = np.argmin(values[:, 0])\n to_prune_filter_ind = int(values[to_prune_layer_ind, 1])\n masks[to_prune_layer_ind][to_prune_filter_ind] = torch.tensor(0).type(p.dtype).to(device)\n\n '''\n print('Prune filter #{} in layer #{}'.format(\n to_prune_filter_ind, \n to_prune_layer_ind))\n '''\n return masks\n\n\ndef filter_prune(model, pruning_perc, prev_masks=None, norm=True, device='cpu'):\n '''\n Prune filters one by one until reach pruning_perc\n (not iterative pruning)\n '''\n if not prev_masks:\n masks = []\n else:\n masks = prev_masks\n current_pruning_perc = 0.\n\n while current_pruning_perc < pruning_perc:\n masks = prune_one_filter(model, masks, norm, device=device)\n model.set_masks(masks)\n current_pruning_perc = prune_rate(model, verbose=False)\n #print('{:.2f} pruned'.format(current_pruning_perc))\n\n return masks","repo_name":"Lee-Gihun/MicroNet_OSI-AI","sub_path":"models/pruning/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"26206305592","text":"# coding=utf-8\r\nimport os\r\nfrom xml.etree.ElementTree import ElementTree, Element\r\n'''checkxml.py'''\r\ndef read_xml(in_path):\r\n '''''读取并解析xml文件\r\n in_path: xml路径\r\n return: ElementTree'''\r\n tree = ElementTree()\r\n tree.parse(in_path)\r\n return tree\r\n\r\ndef check():\r\n url = \"E:/annotations/\" # 修改成annotation的目录\r\n i = 0\r\n list_error = []\r\n for item in os.listdir(url):\r\n tree = read_xml(url + \"/\" + item)\r\n root = tree.getroot()\r\n object = root.findall(\"object\")\r\n size = root.find(\"size\")\r\n width = int(size.find(\"width\").text)\r\n height = int(size.find(\"height\").text)\r\n if object == None:\r\n print(item)\r\n continue\r\n for it in object:\r\n bndbox = it.find(\"bndbox\")\r\n if bndbox == None:\r\n print(\"bndbox == None\")\r\n print(item)\r\n xmin = int(bndbox.find(\"xmin\").text)\r\n xmax = int(bndbox.find(\"xmax\").text)\r\n ymin = int(bndbox.find(\"ymin\").text)\r\n ymax = int(bndbox.find(\"ymax\").text)\r\n if xmin <= 0 or xmin >= xmax or ymin <= 0 or ymin >= ymax:\r\n # 本段注释的代码用于把xmin\\xmax, ymin\\ymax互换,以及把为0的xmin\\ymin改成1,一般来说用不到\r\n # if xmin >= xmax:\r\n # temp = xmin\r\n # bndbox.find(\"xmin\").text = str(xmax)\r\n # bndbox.find(\"xmax\").text = str(temp)\r\n # if ymin >= ymax:\r\n # temp = ymin\r\n # bndbox.find(\"ymin\").text = str(ymax)\r\n # bndbox.find(\"ymax\").text = str(temp)\r\n # if xmin == 0:\r\n # bndbox.find(\"xmin\").text = str(1)\r\n # if ymin == 0:\r\n # bndbox.find(\"ymin\").text = str(1)\r\n # tree.write(\"E:/annotations_update/\"+item)\r\n print(\"xmin <= 0 or xmin >= xmax or ymin <=0 or ymin >= ymax\", xmin, ymin) # 定位到出错的具体位置,在xml中搜索xmin或ymin的具体数据即可\r\n print(item)\r\n list_error.append(item)\r\n i += 1\r\n if xmax > width or ymax > height:\r\n print(\"xmax > width or ymax> height\",xmin,ymax)\r\n print(item)\r\n list_error.append(item)\r\n i += 1\r\n print(list(set(list_error)))\r\n print(len(list(set(list_error))))\r\n\r\nif __name__ == '__main__':\r\n check()\r\n","repo_name":"little-spoon/voc2coco","sub_path":"voc2coco/checkxml.py","file_name":"checkxml.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"371155540","text":"# -- coding: utf-8 --\n# @Time : 2020/3/20 下午5:38\n# @Author : Gao Shang\n# @File : recognition_words.py\n# @Software : PyCharm\n\n\nimport os\nimport cv2\nimport torch\nimport numpy as np\nfrom recognition_words_module import model\nfrom recognition_words_module import decode\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom recognition_words_module.alphabet import alphabet\n\nabsolute_path = os.path.dirname(__file__)\n\n# with open('../data/alphabet.txt', 'r') as f:\n# alphabet = f.read().replace('\\n', '')\n# alphabet = [alphabet, '0123456789X-.长期']\n\n# model = [model.chsNet(1, len(alphabet[0]) + 1), model.digitsNet(1, len(alphabet[1]) + 1)]\n# if torch.cuda.is_available():\n# # 中文模型\n# model[0] = model[0].cuda()\n# # 数字模型\n# model[1] = model[1].cuda()\n# model[0].load_state_dict({k.replace('module.', ''): v for k, v in torch.load(absolute_path + '/data/chs.pth').items()})\n# model[1].load_state_dict({k.replace('module.', ''): v for k, v in torch.load(absolute_path + '/data/number.pth').items()})\n#\n\n# 定义词典,存储地区码和地址的对应关系\nlex_sex = ['男', '女']\nlex_nation = ['仡佬', '高山', '藏', '珞巴', '景颇', '门巴', '仫佬', '柯尔克孜',\n '畲', '维吾尔', '阿昌', '瑶', '裕固', '撒拉', '土', '塔塔尔',\n '侗', '傈僳', '傣', '崩龙', '苗', '达斡尔', '羌', '怒',\n '水', '哈尼', '乌孜别克', '鄂温克', '回', '汉', '赫哲', '壮',\n '黎', '布依', '保安', '土家', '鄂伦春', '佤', '哈萨克', '塔吉克',\n '毛难', '俄罗斯', '蒙古', '纳西', '独龙', '东乡', '布朗', '拉祜',\n '普米', '京', '彝', '朝鲜', '满', '白', '基诺', '锡伯']\nlex_year = [str(i) for i in range(1958, 2009)]\nlex_month = [str(i) for i in range(1, 13)]\nlex_day = [str(i) for i in range(1, 32)]\nlex_month_02d = ['%02d' % i for i in range(1, 13)]\nlex_day_02d = ['%02d' % i for i in range(1, 32)]\nlex_year_start = [str(i) for i in range(2009, 2020)]\nlex_year_end = [str(i) for i in range(2014, 2040)]\n\nlex_region = [] # 存储市级名称\nlex_code = [] # 存储市级代码编号\nregion_code = {}\ncode_region = {} # 代码对应行政区字典\nfor line in open(absolute_path + '/data/code_region.txt', encoding='gbk'):\n seg = line.strip().split(' ')\n if len(seg[0]) != 6:\n continue\n code_region[seg[0]] = seg[1]\n # 市级行政代码编号\n if seg[0][2:] == '0000':\n r1 = seg[1]\n lex_region.append(r1)\n elif seg[0][4:] == '00':\n r2 = seg[1]\n lex_region.append(r1 + r2)\n else:\n if lex_code[-1][4:] == '00':\n lex_code.pop()\n lex_region.pop()\n if lex_code[-1][2:] == '0000':\n lex_code.pop()\n lex_region.pop()\n lex_region.append(r1 + r2 + seg[1])\n lex_code.append(seg[0])\nfor i in range(len(lex_code)):\n region_code[lex_region[i]] = lex_code[i]\n\nlexicon = [decode.Lexicon(lex_sex, alphabet[0]), decode.Lexicon(lex_nation, alphabet[0]),\n decode.Lexicon(lex_year, alphabet[1]), decode.Lexicon(lex_month, alphabet[1]),\n decode.Lexicon(lex_day, alphabet[1]),\n decode.Lexicon(lex_month_02d, alphabet[1]), decode.Lexicon(lex_day_02d, alphabet[1]),\n decode.Lexicon(lex_region, alphabet[0]), decode.Lexicon(lex_code, alphabet[1]),\n decode.Lexicon(lex_year_start, alphabet[1]), decode.Lexicon(lex_year_end, alphabet[1])]\n\n\ndef getRegionByCode(code):\n if code not in code_region:\n return None\n cs = code[:2] + '0000'\n rs = code_region[cs]\n if cs == code:\n return rs\n cs = code[:4] + '00'\n if len(code_region[cs]) > 1:\n rs += code_region[cs]\n if cs == code:\n return rs\n return rs + code_region[code]\n\n\ndef getBureauByCode(code):\n if code not in code_region:\n return None\n cs = code[:4] + '00'\n if len(code_region[cs]) > 1:\n rs = code_region[cs]\n else:\n rs = ''\n return rs + code_region[code] + '公安局'\n\n\ndef networkOutput(image, model):\n imgH = 22\n if image.shape[0] != imgH:\n image = cv2.resize(image, (max(int(imgH * image.shape[1] / image.shape[0]), imgH), imgH),\n cv2.INTER_LINEAR)\n\n image = torch.from_numpy(image.astype(np.float32))\n if torch.cuda.is_available():\n image = image.cuda()\n image = Variable(image.view(1, 1, *image.size()))\n model.eval()\n preds = model(image)\n preds = preds.view(preds.size(0), -1)\n preds = F.softmax(preds, dim=1)\n\n return preds.data\n\n\ndef getWordsResult(images, model):\n\n name = decode.index_str(networkOutput(images[0], model[0]), alphabet[0])\n\n sex, _ = decode.wordBeamSearch(networkOutput(images[1], model[0]), lexicon[0])\n\n nation, _ = decode.wordBeamSearch(networkOutput(images[2], model[0]), lexicon[1])\n\n year = decode.index_str(networkOutput(images[3], model[1]), alphabet[1])\n month = decode.index_str(networkOutput(images[4], model[1]), alphabet[1])\n day = decode.index_str(networkOutput(images[5], model[1]), alphabet[1])\n\n addr_output = networkOutput(images[6], model[0])\n region, score, t = decode.prefixBeamSearch(addr_output, lexicon[7])\n code_output = networkOutput(images[7], model[1])\n id_code, id_conf = decode.wordBeamSearch(code_output, lexicon[8])\n id_tail = decode.index_str(networkOutput(images[11], model[1]), alphabet[1])\n if region not in region_code:\n region = getRegionByCode(id_code)\n t, _ = decode.prefixMatch(addr_output, alphabet[0], region)\n elif region_code[region] != id_code:\n _, id_conf1 = decode.prefixMatch(code_output, alphabet[1], region_code[region])\n t1, conf1 = decode.prefixMatch(addr_output, alphabet[0], getRegionByCode(id_code))\n if score * id_conf1 < conf1 * id_conf: # code is more confident\n t = t1\n region = getRegionByCode(id_code)\n else: # region is more confident\n id_code = region_code[region]\n else: # just wanna align the region\n t, _ = decode.prefixMatch(addr_output, alphabet[0], region)\n\n address = region + decode.index_str(addr_output[t + 1:], alphabet[0])\n idnumber = id_code + year + month + day + id_tail\n agency = getBureauByCode(id_code)\n\n if len(images) == 16:\n # 长期\n valid_year, _ = decode.wordBeamSearch(networkOutput(images[13], model[1]), lexicon[9])\n valid_month, _ = decode.wordBeamSearch(networkOutput(images[14], model[1]), lexicon[5])\n valid_day, _ = decode.wordBeamSearch(networkOutput(images[15], model[1]), lexicon[6])\n valid_date = valid_year + '.' + valid_month + '.' + valid_day + '-长期'\n else:\n valid_year_start = networkOutput(images[13], model[1])\n valid_year, score = decode.wordBeamSearch(valid_year_start, lexicon[9])\n\n valid_year_end = networkOutput(images[16], model[1])\n valid_year1, score1 = decode.wordBeamSearch(valid_year_end, lexicon[10])\n if int(valid_year1) - int(valid_year) not in [5, 10, 20]:\n if score1 > score:\n score = 0\n for i in [5, 10, 20]:\n year = str(int(valid_year1) - i)\n _, year_score = decode.prefixMatch(valid_year_start, alphabet[1], year)\n if year_score > score:\n valid_year = year\n score = year_score\n else:\n score1 = 0\n for i in [5, 10, 20]:\n year = str(int(valid_year) + i)\n _, year_score = decode.prefixMatch(valid_year_end, alphabet[1], year)\n if year_score > score1:\n valid_year1 = year\n score1 = year_score\n valid_month, score = decode.wordBeamSearch(networkOutput(images[14], model[1]), lexicon[5])\n valid_month1, score1 = decode.wordBeamSearch(networkOutput(images[17], model[1]), lexicon[5])\n if valid_month != valid_month1 and score1 > score:\n valid_month = valid_month1\n\n valid_day, score = decode.wordBeamSearch(networkOutput(images[15], model[1]), lexicon[6])\n valid_day1, score1 = decode.wordBeamSearch(networkOutput(images[18], model[1]), lexicon[6])\n if valid_day != valid_day1 and score1 > score:\n valid_day = valid_day1\n\n valid_date = '.' + valid_month + '.' + valid_day\n valid_date = valid_year + valid_date + '-' + valid_year1 + valid_date\n\n return [name, sex, nation, year, month, day, address, idnumber, agency, valid_date]\n","repo_name":"Alex-1997-Wzx/OCR_IDCARD","sub_path":"recognition_words_module/recognition_words.py","file_name":"recognition_words.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"3192266047","text":"from kafka import KafkaConsumer\nfrom elasticsearch import Elasticsearch\nimport json\nimport time\nfrom models import *\n\ntime.sleep(15)\n\nproducer = KafkaProducer(bootstrap_servers='kafka:9092')\nes = Elasticsearch(['es'])\n\nfor blog_post in BlogPost.objects.all():\n\tsome_new_listing = {'title': blog_post.title, 'body': blog_post.body, 'id': blog_post.pk}\n\tproducer.send('new-listings-topic', json.dumps(some_new_listing).encode('utf-8'))\n\nconsumer = KafkaConsumer('new-listings-topic', group_id='listing-indexer', bootstrap_servers=['kafka:9092'])\n\nwhile True:\n\tfor message in consumer:\n\t\tnew_listing = json.loads((message.value).decode('utf-8'))\n\t\tes.index(index='listing_index', doc_type='listing', id=new_listing['id'], body=new_listing)\n\t\tes.indices.refresh(index='listing_index')","repo_name":"msukkar/marketplace_of_ideas","sub_path":"model_api/model_api/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1841713887","text":"#메모리 :31256 시간 :44\nfrom itertools import combinations\nimport sys\ninput=sys.stdin.readline\nliist=[]\nwhile True:\n string=list(map(int,input().split()))\n if string[0]==0:\n exit()\n else:\n for i in combinations(string[1:],6):\n print(*i)\n print()\n\n","repo_name":"meeeeju/Python-Algorithm-Study","sub_path":"hyunji/BOJ/BFS,DFS/6603_로또.py","file_name":"6603_로또.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21308921174","text":"class Solution:\n def expressiveWords(self, s: str, words: List[str]) -> int:\n \n def group(word):\n groups = []\n temp = []\n\n for i in word:\n if not temp or temp[-1] == i:\n temp.append(i)\n elif temp:\n groups.append(''.join(temp))\n temp = [i]\n\n if temp:\n groups.append(''.join(temp))\n\n return groups\n \n \n s = group(s) \n countSubtrings = 0\n\n for word in words:\n temp = group(word)\n if len(s) != len(temp):\n continue\n\n isStrechy = True\n for i in range(len(s)):\n if len(temp[i]) == len(s[i]) and temp[i][0] == s[i][0]:\n continue\n\n if len(s[i]) < 3 or len(s[i]) < len(temp[i]) or temp[i][0] != s[i][0]:\n isStrechy = False\n\n if isStrechy:\n countSubtrings += 1\n\n return countSubtrings\n\n \n ","repo_name":"YeabAM/A2SV","sub_path":"0809-expressive-words/0809-expressive-words.py","file_name":"0809-expressive-words.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"7896288063","text":"#!/usr/bin/python\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom six.moves import input\n__author__ = \"ktown\"\n__copyright__ = \"Copyright Adafruit Industries 2014 (adafruit.com)\"\n__license__ = \"MIT\"\n__version__ = \"0.1.0\"\n\nimport os\nimport sys\nimport time\nimport argparse\nimport csv\nimport requests\nimport mysql.connector as mariadb \nfrom datetime import datetime\nfrom SnifferAPI import Logger\nfrom SnifferAPI import Sniffer\nfrom SnifferAPI import CaptureFiles\nfrom SnifferAPI.Devices import Device\nfrom SnifferAPI.Devices import DeviceList\n\n\nmySniffer = None\n\"\"\"@type: SnifferAPI.Sniffer.Sniffer\"\"\"\n\n\ndef setup(serport, delay=6):\n \"\"\"\n Tries to connect to and initialize the sniffer using the specific serial port\n @param serport: The name of the serial port to connect to (\"COM14\", \"/dev/tty.usbmodem1412311\", etc.)\n @type serport: str\n @param delay: Time to wait for the UART connection to be established (in seconds)\n @param delay: int\n \"\"\"\n global mySniffer\n\n # Initialize the device on the specified serial port\n print(\"Connecting to sniffer on \" + serport)\n mySniffer = Sniffer.Sniffer(serport)\n # Start the sniffer\n mySniffer.start()\n # Wait a bit for the connection to initialise\n time.sleep(delay)\n\n\ndef scanForDevices(scantime=5):\n print(\"Starting BLE device scan ({0} seconds)\".format(str(scantime)))\n\n mySniffer.scan()\n time.sleep(scantime)\n devs = mySniffer.getDevices()\n devstr = str(devs)\n devlist=devstr.split(\"BLE\")\n del devlist[0] #Delete Description\n for i in devlist:\n name=str(i).split('\"\"',1)[1].split('\"\"',1)[0]\n values=str(i).split(\"[\",1)[1].split(\"]\",1)[0]\n valuelist=values.split(\",\")\n newlist=[]\n for i in valuelist:\n i=i.replace(' ','')\n try:\n x=hex(int(i)).replace('0x','')\n except:\n x=i\n if len(x)==1: # Wenn ein Teil der MAC einstellig ist, fuehrende '0' anstellen\n x='0'+ x\n newlist.append(x)\n mac = newlist[0] + ':' +newlist[1] + ':' + newlist[2] + ':' + newlist[3] + ':' + newlist[4] + ':' + newlist[5]\n connectable = newlist[6]\n print(newlist[6])\n time.sleep(2)\n if newlist[6] == 'False':\n r = requests.get('https://api.macvendors.com/' + mac);\n vendor = r.text\n else:\n vendor = 'Not found'\n if vendor.find('errors') == -1:\n vendor = vendor\n else:\n vendor ='Not found'\n device=[name,mac,connectable,vendor]\n \n try:\n conn = mariadb.connect(\n user=\"writer\",\n password=\"PW4Writer!\",\n host=\"127.0.0.1\",\n port=3306,\n database=\"radio_mon\"\n )\n except mariadb.Error as e:\n print(\"Error connecting to MariaDB: \" + e)\n \n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO ble(name, mac, connect, vendor, last_seen) VALUES ('\"+ name + \"', '\" + mac + \"', '\" + connectable + \"', '\" + vendor + \"', '\" +str(datetime.now())+ \"') ON DUPLICATE KEY UPDATE last_seen='\" + str(datetime.now()) + \"';\");\n conn.commit()\n conn.close()\n return devs\n\nsetup('/dev/ttyUSB0')\nscanForDevices()\n\nexit()\n","repo_name":"FX0H4CK/RadioSecurity","sub_path":"BLE/Sniffer/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15644827683","text":"import hashlib\nfrom decouple import config\nimport os\nimport pandas as pd\n\n\ndef md5(fname):\n hash_md5 = hashlib.md5()\n hash_md5.update(open(fname, 'rb').read())\n return hash_md5.hexdigest()\n\n\nif __name__ == '__main__':\n out_dir = config('OUT_PATH')\n m5 = config('M5')\n df = pd.read_csv(m5, sep=' ', engine='python')\n for file in os.listdir(out_dir):\n hash_file = md5(out_dir + '/' + file)\n if df.loc[df['filename'] == file]['hash'].values[0] == hash_file:\n print(f'{file} True')\n else:\n print(f'----- for {file} hash {hash_file} does not match:')\n print(df.loc[df['filename'] == file]['filename'].values[0])\n print(df.loc[df['filename'] == file]['hash'].values[0])\n","repo_name":"dmitriytiras/metrics","sub_path":"saudibell_wget/check_m5.py","file_name":"check_m5.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25558679927","text":"import pygame\n\n\nclass Interface:\n def __init__(self, access):\n self.access = access\n self.player = access.player\n self.stats = access.stats\n self.setting = access.setting\n self.screen = access.screen\n self.screen_rect = self.screen.get_rect()\n\n # Scores Font\n self.score_font = pygame.font.SysFont('ROG Fonts', 24, True)\n self.hs_font = pygame.font.SysFont('ROG Fonts', 40, True)\n self.lv_font = pygame.font.SysFont('ROG Fonts', 24, True)\n\n # Player_Health\n self.start_width = 200\n self.h_width = 200\n self.h_height = 20\n self.pos_x = 30\n self.pos_y = 40\n\n def update_scores(self, score):\n scores_image = self.score_font.render(str(score), True, self.setting.white)\n rect_scores = scores_image.get_rect()\n\n rect_scores.right = self.screen_rect.right - 20\n rect_scores.top = self.screen_rect.top + 60\n\n self.screen.blit(scores_image, rect_scores)\n\n def update_high_scores(self, score):\n highscore = 0\n if score > self.stats.highscore:\n self.stats.highscore = score\n else:\n highscore = self.stats.highscore\n\n hs_image = self.hs_font.render(str(highscore), True, self.setting.white)\n rect_hs_image = hs_image.get_rect()\n rect_hs_image.centerx = self.screen_rect.centerx\n rect_hs_image.top = self.screen_rect.top + 20\n\n self.screen.blit(hs_image, rect_hs_image)\n\n def update_health(self, hp):\n\n # Calculate decrement of health bar width\n width_decrease = self.start_width - (self.start_width / self.setting.player_health) * \\\n (self.setting.player_health - hp)\n\n outer_hp = pygame.Surface((self.h_width + 10, self.h_height + 10))\n outer_hp.fill(self.setting.gray)\n rect_outer = outer_hp.get_rect(midleft=(self.pos_x - 5, self.pos_y))\n\n # Carry out when player HP > 0\n try:\n inner_hp = pygame.Surface((width_decrease, self.h_height))\n inner_hp.fill(self.setting.blue)\n rect_inner = inner_hp.get_rect(midleft=(self.pos_x, self.pos_y))\n\n self.screen.blit(outer_hp, rect_outer)\n self.screen.blit(inner_hp, rect_inner)\n\n except pygame.error: # When player HP == 0\n self.screen.blit(outer_hp, rect_outer)\n\n def update_life(self, lives):\n for life_number in range(lives):\n life = pygame.image.load('./data/images/tank_up(1).png').convert_alpha()\n size = life.get_size()\n image_smaller = pygame.transform.smoothscale(life, (int(size[0] * 0.5), int(size[1] * 0.5)))\n rect = image_smaller.get_rect()\n rect.x = 30 + life_number * (rect.width + 20)\n rect.y = rect.width + self.pos_y\n\n self.screen.blit(image_smaller, (rect.x, rect.y))\n\n def update_level(self, level):\n level_image = self.lv_font.render('Level ' + str(level), True, self.setting.blue)\n rect_lv_image = level_image.get_rect()\n rect_lv_image.right = self.screen_rect.right - 20\n rect_lv_image.top = self.screen_rect.top + 20\n\n self.screen.blit(level_image, rect_lv_image)\n\n def run_updates(self, hp, lives, level, score):\n self.update_scores(score)\n self.update_high_scores(score)\n self.update_health(hp)\n self.update_life(lives)\n self.update_level(level)\n","repo_name":"minhngo3818/tank-destroyer","sub_path":"dist/data/src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11173706052","text":"from ardis import *\nimport ardis.d_geometry as dg\nimport ardis.geometry as geo\n\nimport numpy as np\n\nimport os\n\nmatrixFolder = \"data\"\noutputFolder = \"output\"\n\nreaction, drain, epsilon = 1, 1e-8, 1e-3\n\ndt, max_time = 0.1, 10\n\nplot_dt = 1\n\nname = \"maze\"\n\nprint(\"Starting exploration on experiment :\", name)\n\ndampingPath = matrixFolder+\"/\"+name+\"_damping.mtx\"\nstiffnessPath = matrixFolder+\"/\"+name+\"_stiffness.mtx\"\nmeshPath = matrixFolder + \"/\" + name + \"_mesh.dat\"\n\nMesh = dg.read_mesh(meshPath)\n\nd_S = to_d_spmatrix(read_spmatrix(\n stiffnessPath, read_type.Symetric), matrix_type.CSR)\nprint(\"Stiffness matrix loaded ...\")\nd_D = to_d_spmatrix(read_spmatrix(\n dampingPath, read_type.Symetric), matrix_type.CSR)\nprint(\"Dampness matrix loaded ...\")\n\nst = state(d_D.shape[0])\nn = len(Mesh.x)\n\nU = d_vector(n)\nU.fill_value(0)\n\nd_Mesh = dg.d_mesh(Mesh.x, Mesh.y)\n\nstartZone = dg.rect_zone(dg.point2d(0, 0), dg.point2d(500, 10))\ndg.fill_zone(U, d_Mesh, startZone, 1)\n\nst.add_species(\"N\")\nst.set_species(\"N\", U)\nst.add_species(\"P\")\nst.set_species(\"P\", U)\nst.add_species(\"NP\")\nst.set_species(\"NP\", np.zeros(len(U)))\n\nsimu = simulation(st)\nsimu.drain = drain\nsimu.epsilon = epsilon\n\nsimu.load_stiffness_matrix(d_S)\nsimu.load_dampness_matrix(d_D)\n\nsimu.add_mm_reaction(\" N -> 2 N\", reaction, 1)\nsimu.add_reaction(\" N+P -> NP\", reaction)\nsimu.add_mm_reaction(\" NP -> 2P\", reaction, 1)\n# simu.add_reaction(\"N+P-> 2P\", reaction)\n\nNit = int(max_time / dt)\nverboseCount = 0\nplotcount = 0\n\nos.system(\"mkdir \" + outputFolder + \"/\" + name)\n\nfor i in range(0, Nit):\n simu.iterate_diffusion(dt)\n simu.prune()\n simu.iterate_reaction(dt, True)\n\n os.system(\"rm -f test\")\n write_file(simu.state, \"test\")\n print(simu.state == read_state(\"test\"))\n\n if (i * dt > plot_dt * plotcount):\n plotcount += 1\n fig = plot_state(simu.state, Mesh, excludeSpecies=[\"NP\"])\n fig.savefig(\n outputFolder + \"/\"+name+\"/\" + str(i) + \".png\")\n plt.close(fig)\n\n if Nit >= 100 and i >= verboseCount * Nit / 10 and i < verboseCount * Nit / 10 + 1:\n print(str(verboseCount * 10) + \"% completed\")\n verboseCount += 1\n\nos.system(\"convert -delay 10 -loop 0 $(ls -1 \"+outputFolder +\n \"/\" + name + \"/*png | sort -V) \" + outputFolder + \"/\" + name + \".gif\" + \" && \" +\n \"rm -rf \" + outputFolder + \"/\" + name)\n\nprint(\"Results plot have been saved here: \" +\n outputFolder + \"/\" + name + \".gif\")\n","repo_name":"hedi-sel/ARDiS","sub_path":"example/MazeExplorer.py","file_name":"MazeExplorer.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"27691457184","text":"from django.shortcuts import render\nfrom django.template import RequestContext, loader\nimport json\n\nfrom dcload_control import glo_dcload\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\n\ndef index(request):\n template = loader.get_template('dcload_ui/index.html')\n context = RequestContext(request, {});\n return HttpResponse(template.render(context))\n\n\ndef setDesiredMa(request):\n value = request.GET.get(\"value\", \"0\")\n value = float(value)\n glo_dcload.set_new_desired_ma(value)\n\n return HttpResponse(\"okey dokey\")\n\n\ndef setPower(request):\n # not implemented\n\n return HttpResponse(\"okey dokey\")\n\n\ndef getStatus(request):\n result = {\"actual_ma\": glo_dcload.actual_ma,\n \"actual_volts\": glo_dcload.actual_volts,\n \"actual_watts\": glo_dcload.actual_watts,\n \"actual_temp\": glo_dcload.temperature,\n \"desired_ma\": glo_dcload.desired_ma,\n \"new_desired_ma\": glo_dcload.new_desired_ma,\n \"power\": True}\n\n return HttpResponse(json.dumps(result), content_type='application/javascript')\n","repo_name":"sbelectronics/pi-dcload","sub_path":"dcload_ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"25969276240","text":"'''\nCreated on 19 feb 2018\n\n@author: Serena Sensini; Enzo Cocca \n'''\n\n\nclass STRUTTURA(object):\n def __init__(self,\n id_struttura,\n sito,\n sigla_struttura,\n numero_struttura,\n categoria_struttura,\n tipologia_struttura,\n definizione_struttura,\n descrizione,\n interpretazione,\n periodo_iniziale,\n fase_iniziale,\n periodo_finale,\n fase_finale,\n datazione_estesa,\n materiali_impiegati,\n elementi_strutturali,\n rapporti_struttura,\n misure_struttura\n ):\n self.id_struttura = id_struttura # 0\n self.sito = sito # 1\n self.sigla_struttura = sigla_struttura # 2\n self.numero_struttura = numero_struttura # 3\n self.categoria_struttura = categoria_struttura # 4\n self.tipologia_struttura = tipologia_struttura # 5\n self.definizione_struttura = definizione_struttura # 6\n self.descrizione = descrizione # 7\n self.interpretazione = interpretazione # 8\n self.periodo_iniziale = periodo_iniziale # 9\n self.fase_iniziale = fase_iniziale # 10\n self.periodo_finale = periodo_finale # 11\n self.fase_finale = fase_finale # 12\n self.datazione_estesa = datazione_estesa # 13\n self.materiali_impiegati = materiali_impiegati # 14\n self.elementi_strutturali = elementi_strutturali # 15\n self.rapporti_struttura = rapporti_struttura # 16\n self.misure_struttura = misure_struttura # 17\n\n def __repr__(self):\n return \"\" % (\n self.id_struttura,\n self.sito,\n self.sigla_struttura,\n self.numero_struttura,\n self.categoria_struttura,\n self.tipologia_struttura,\n self.definizione_struttura,\n self.descrizione,\n self.interpretazione,\n self.periodo_iniziale,\n self.fase_iniziale,\n self.periodo_finale,\n self.fase_finale,\n self.datazione_estesa,\n self.materiali_impiegati,\n self.elementi_strutturali,\n self.rapporti_struttura,\n self.misure_struttura\n )\n","repo_name":"pyarchinit/pyarchinit","sub_path":"modules/db/entities/STRUTTURA.py","file_name":"STRUTTURA.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"it","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"50692915596","text":"from celery import Celery\nfrom gevent import Timeout\nfrom app.redis_utils import Redis\n\n\ndef make_celery(app):\n celery = Celery(\n app.import_name,\n backend=app.config['CELERY_RESULT_BACKEND'],\n broker=app.config['CELERY_BROKER_URL']\n )\n celery.conf.update(app.config)\n celery.conf['BROKER_TRANSPORT_OPTIONS'] = {'visibility_timeout': 3600, 'socket_timeout': 3600} # 1 hour\n celery.conf['RESULT_BACKEND_TRANSPORT_OPTIONS'] = {'socket_timeout': 3600}\n\n class ContextTask(celery.Task):\n timeout = 3600\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n with Timeout(self.timeout, TimeoutError(f\"Task {self.name} timed out\")):\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n","repo_name":"JaskaranSinghKawatra/FairnessProject","sub_path":"flask_project/app/celery_utils.py","file_name":"celery_utils.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12449532518","text":"from enum import Enum, auto\nfrom typing import Optional, List\n\nfrom ._AbstractMultiResponsePLS import AbstractMultiResponsePLS\nfrom ...core import ZERO, real, ONE\nfrom ...core.matrix import Matrix, factory\nfrom ...transformation import Standardize\n\n\nclass NIPALS(AbstractMultiResponsePLS):\n \"\"\"\n Nonlinear Iterative Partial Least Squares\n\n Implementation oriented at scikit-learn's NIPALS implementation:\n \n Github scikit-learn NIPALS\n \n\n Parameters:\n - tol: Iterative convergence tolerance\n - maxIter: Maximum number of iterations\n - normYWeights: Flat to normalize Y weights\n - deflationMode: Mode for Y matrix deflation. Can be either CANONICAL or REGRESSION\n \"\"\"\n def __init__(self):\n super().__init__()\n self.X_scores: Optional[Matrix] = None # Scores on X\n self.Y_scores: Optional[Matrix] = None # Scores on Y\n self.X_loadings: Optional[Matrix] = None # Loadings on X\n self.Y_loadings: Optional[Matrix] = None # Loadings on Y\n self.X_weights: Optional[Matrix] = None # Weights on X\n self.Y_weights: Optional[Matrix] = None # Weights on Y\n self.X_rotations: Optional[Matrix] = None # Projection of X into latent space\n self.Y_rotations: Optional[Matrix] = None # Projection of Y into latent space\n self.X: Optional[Matrix] = None # Training points\n self.coef: Optional[Matrix] = None # Regression coefficients\n self.tol: real = real(1e-6) # Inner NIPALS loop improvement tolerance\n self.max_iter: int = 500 # Inner NIPALS loop maximum number of iterations\n self.norm_Y_weights: bool = False # Flag to normalize Y weights\n self.standardize_X: Optional[Standardize] = None # Standarize X tranformation\n self.standardize_Y: Optional[Standardize] = None # Standardize Y transformation\n self.deflation_mode: DeflationMode = DeflationMode.REGRESSION\n\n def initialize(self, predictors: Optional[Matrix] = None, response: Optional[Matrix] = None) -> Optional[str]:\n if predictors is None and response is None:\n super().initialize()\n self.tol = real(1e-6)\n self.max_iter = 500\n self.norm_Y_weights = False\n self.standardize_X = Standardize()\n self.standardize_Y = Standardize()\n self.deflation_mode = DeflationMode.REGRESSION\n else:\n return super().initialize(predictors, response)\n\n @staticmethod\n def validate_max_iter(value: int) -> bool:\n return value >= 0\n\n @staticmethod\n def validate_tol(value: real) -> bool:\n return value >= ZERO\n\n def get_min_columns_response(self) -> int:\n return 1\n\n def get_max_columns_response(self) -> int:\n return -1\n\n def do_perform_initialization(self, predictors: Matrix, response: Matrix) -> Optional[str]:\n # Init\n X: Matrix = predictors\n X = self.standardize_X.transform(X)\n Y: Matrix = response\n Y = self.standardize_Y.transform(Y)\n\n # Dimensions\n num_rows: int = X.num_rows()\n num_features: int = X.num_columns()\n num_classes: int = Y.num_columns()\n num_components: int = self.num_components\n\n # Init matrices\n self.X_scores = factory.zeros(num_rows, num_components) # T\n self.Y_scores = factory.zeros(num_rows, num_components) # U\n\n self.X_weights = factory.zeros(num_features, num_components) # W\n self.Y_weights = factory.zeros(num_classes, num_components) # C\n\n self.X_loadings = factory.zeros(num_features, num_components) # P\n self.Y_loadings = factory.zeros(num_classes, num_components) # Q\n\n yk_loading: Matrix = factory.zeros(num_classes, 1)\n\n eps: real = real(1e-10)\n for k in range(num_components):\n if Y.transpose().mul(Y).all(lambda e: e < eps):\n self.logger.warning('Y residual constant at iteration ' + str(k))\n break\n\n res: NipalsLoopResult = self.nipals_loop(X, Y)\n xk_weight: Matrix = res.X_weights\n yk_weight: Matrix = res.Y_weights\n\n # Calculate latent X and Y scores\n xk_score: Matrix = X.mul(xk_weight)\n yk_score: Matrix = Y.mul(yk_weight).div(yk_weight.norm2_squared())\n\n if xk_score.norm2_squared() < eps:\n self.logger.warning('X scores are null at component ' + str(k))\n break\n\n # Deflate X\n xk_loading: Matrix = X.transpose().mul(xk_score).div(xk_score.norm2_squared())\n X = X.sub(xk_score.mul(xk_loading.transpose()))\n\n # Deflate Y\n if self.deflation_mode is DeflationMode.CANONICAL:\n yk_loading: Matrix = Y.transpose().mul(yk_score).div(yk_score.norm2_squared())\n Y = Y.sub(yk_score.mul(yk_loading.transpose()))\n elif self.deflation_mode is DeflationMode.REGRESSION:\n yk_loading: Matrix = Y.transpose().mul(xk_score).div(xk_score.norm2_squared())\n Y = Y.sub(xk_score.mul(yk_loading.transpose()))\n\n # Store results\n self.X_scores.set_column(k, xk_score)\n self.Y_scores.set_column(k, yk_score)\n self.X_weights.set_column(k, xk_weight)\n self.Y_weights.set_column(k, yk_weight)\n self.X_loadings.set_column(k, xk_loading)\n self.Y_loadings.set_column(k, yk_loading)\n\n self.X = X\n self.X_rotations = self.X_weights.mul((self.X_loadings.transpose().mul(self.X_weights)).pseudo_inverse())\n if Y.num_columns() > 1:\n self.Y_rotations = self.Y_weights.mul((self.Y_loadings.transpose().mul(self.Y_weights)).pseudo_inverse())\n else:\n self.Y_rotations = factory.filled(1, 1, ONE)\n\n # Calculate regression coefficients\n y_stds: Matrix = self.standardize_Y.get_std_devs()\n self.coef = self.X_rotations.mul(self.Y_loadings.transpose()).scale_by_row_vector(y_stds)\n return None\n\n def nipals_loop(self, X: Matrix, Y: Matrix) -> 'NipalsLoopResult':\n \"\"\"\n Perform the inner NIPALS loop.\n\n :param X: Predictors matrix.\n :param Y: Response matrix.\n :return: NipalsLoopResult.\n \"\"\"\n iterations: int = 0\n\n y_score: Matrix = Y.get_column(0) # (y scores)\n X_weight_old: Matrix = factory.zeros(X.num_columns(), 1)\n X_p_inv: Optional[Matrix] = None\n Y_p_inv: Optional[Matrix] = None\n\n eps: real = real(1e-16)\n\n # Repeat 1) - 3) until convergence: either change of u is lower than m_Tol or maximum\n # number of iterations has been reached (m_MaxIter)\n while True:\n # 1) Update X weights\n if self.get_weight_calculation_mode() is WeightCalculationMode.CCA:\n if X_p_inv is None:\n # sklearn uses pinv here which ojAlgo implicitly does\n X_p_inv = X.inverse()\n X_weight: Matrix = X_p_inv.mul(y_score)\n else: # PLS\n X_weight: Matrix = X.transpose().mul(y_score).div(y_score.norm2_squared())\n\n # Add eps if necessary to converge to a more acceptable solution\n if X_weight.norm2_squared() < eps:\n X_weight = X_weight.add(eps)\n\n # Normalize\n X_weight = X_weight.div(X_weight.norm2() + eps)\n\n # 2) Calculate latent X scores\n X_score: Matrix = X.mul(X_weight)\n\n # 3) Update Y weights\n if self.get_weight_calculation_mode() is WeightCalculationMode.CCA:\n if Y_p_inv is None:\n # sklearn uses pinv here which ojAlgo implicitly does\n Y_p_inv = Y.inverse()\n Y_weight: Matrix = Y_p_inv.mul(X_score)\n else: # PLS\n # WeightCalculationMode A: Regress each Y column on xscore\n Y_weight: Matrix = Y.transpose().mul(X_score).div(X_score.norm2_squared())\n\n # Normalise Y weights\n if self.norm_Y_weights:\n Y_weight = Y_weight.div(Y_weight.norm2() + eps)\n\n # 4) Calculate ykScores\n Y_score: Matrix = Y.mul(Y_weight).div(Y_weight.norm2_squared() + eps)\n\n X_weight_diff: Matrix = X_weight.sub(X_weight_old)\n\n if X_weight_diff.norm2_squared() < self.tol or Y.num_columns() == 1:\n break\n\n if iterations >= self.max_iter:\n break\n\n # Update stopping conditions\n X_weight_old = X_weight\n iterations += 1\n\n return NipalsLoopResult(X_weight, Y_weight, iterations)\n\n def do_perform_predictions(self, predictors: Matrix) -> Matrix:\n X: Matrix = self.standardize_X.transform(predictors)\n\n Y_means: Matrix = self.standardize_Y.get_means()\n Y_hat: Matrix = X.mul(self.coef).add_by_vector(Y_means)\n return Y_hat\n\n def do_transform(self, predictors: Matrix) -> Matrix:\n X: Matrix = self.standardize_X.transform(predictors)\n\n # Apply rotations\n X_scores: Matrix = X.mul(self.X_rotations)\n return X_scores\n\n def do_transform_response(self, response: Matrix) -> Matrix:\n Y: Matrix = self.standardize_Y.transform(response)\n\n # Apply rotations\n Y_scores: Matrix = Y.mul(self.Y_rotations)\n return Y_scores\n\n def get_matrix_names(self) -> List[str]:\n return ['T', 'U', 'P', 'Q']\n\n def get_matrix(self, name: str) -> Optional[Matrix]:\n if name == 'T':\n return self.X_scores\n elif name == 'U':\n return self.Y_scores\n elif name == 'P':\n return self.X_loadings\n elif name == 'Q':\n return self.Y_loadings\n return None\n\n def has_loadings(self) -> bool:\n return True\n\n def reset(self):\n super().reset()\n self.X_scores = None\n self.Y_scores = None\n self.X_loadings = None\n self.Y_loadings = None\n self.X_weights = None\n self.Y_weights = None\n self.coef = None\n self.X = None\n self.X_rotations = None\n self.Y_rotations = None\n self.standardize_X = Standardize()\n self.standardize_Y = Standardize()\n\n def get_loadings(self) -> Optional[Matrix]:\n return self.X_loadings\n\n def can_predict(self) -> bool:\n return True\n\n def get_weight_calculation_mode(self) -> 'WeightCalculationMode':\n return WeightCalculationMode.PLS # Mode A in sklearn\n\n\nclass NipalsLoopResult:\n \"\"\"\n NIPALS loop result: x and y weight matrices and number of iterations.\n \"\"\"\n def __init__(self, X_weights: Matrix, Y_weights: Matrix, iterations: int):\n self.X_weights: Matrix = X_weights\n self.Y_weights: Matrix = Y_weights\n self.iterations: int = iterations\n\n\nclass DeflationMode(Enum):\n \"\"\"\n Deflation mode enum.\n \"\"\"\n CANONICAL = auto()\n REGRESSION = auto()\n\n\nclass WeightCalculationMode(Enum):\n \"\"\"\n Mode for x/y-weight calculation\n \"\"\"\n PLS = auto()\n CCA = auto()\n","repo_name":"waikato-datamining/py-matrix-algorithms","sub_path":"src/wai/ma/algorithm/pls/_NIPALS.py","file_name":"_NIPALS.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"24705315272","text":"#-*-coding: utf-8 -*-\n\nfrom pytz import timezone\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.jobstores.redis import RedisJobStore\nfrom apscheduler.executors.pool import ProcessPoolExecutor\nfrom apscheduler.events import EVENT_JOB_ADDED\n\n\nscheduler = BackgroundScheduler()\n\n\ndef start_scheduler(settings):\n assert settings['scheduler.store'] in ('redis', 'sqlalchemy'),\\\n 'Uknown job store, must by one of redis or sqlalchemy'\n\n if settings['scheduler.store'] == 'redis':\n jobstores = {\n 'default': RedisJobStore(db=settings['scheduler.db'])\n }\n else:\n jobstores = {\n 'default': SQLAlchemyJobStore(url=settings['scheduler.url'])\n }\n \n executors = {\n 'default': {\n 'type': settings['scheduler.executors.type'],\n 'max_workers': settings['scheduler.executors.max_workers']\n },\n 'processpool': ProcessPoolExecutor(\n max_workers=settings['scheduler.executors.processpool.max_workers']\n )\n }\n job_defaults = {\n 'coalesce': False,\n 'max_instances': settings['scheduler.job_defaults.max_instances']\n }\n scheduler.configure(\n jobstores=jobstores,\n executors=executors,\n job_defaults=job_defaults,\n timezone=timezone('UTC')\n )\n if settings['scheduler.autostart'] == 'true':\n scheduler.start()\n\n\ndef job_added_event(event):\n job = scheduler.get_job(event.job_id)\n if hasattr(job.func, 'scopped'):\n kwargs = job.kwargs\n kwargs['_job_id'] = job.id\n scheduler.modify_job(\n event.job_id, None, **{'kwargs': kwargs}\n )\n\n\nscheduler.add_listener(job_added_event, EVENT_JOB_ADDED)\n","repo_name":"mazvv/travelcrm","sub_path":"travelcrm/lib/scheduler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"16"} +{"seq_id":"10085560612","text":"# Gold 4 - 알파벳\n\nimport sys\ninput = sys.stdin.readline\n\n# BFS\ndef bfs():\n global ans\n q = set([(0, 0, board[0][0])])\n\n while q:\n x, y, a = q.pop()\n ans = max(ans, len(a))\n\n for dx, dy in dxy:\n nx, ny = x + dx, y + dy\n\n if 0 <= nx < r and 0 <= ny < c and board[nx][ny] not in a:\n q.add((nx, ny, a + board[nx][ny]))\n\nr, c = map(int, input().split())\nboard = [input().rstrip() for _ in range(r)]\ndxy = [(-1, 0), (1, 0), (0, 1), (0, -1)]\nans = 0\n\nbfs()\nprint(ans)\n\n# Backtracking -> Python3 시간 초과, Pypy3 통과\n# def dfs(x, y, cnt):\n# global ans\n# ans = max(ans, cnt)\n\n# for dx, dy in dxy:\n# nx, ny = x + dx, y + dy\n\n# if 0 <= nx < r and 0 <= ny < c and not visited[board[nx][ny]]:\n# visited[board[nx][ny]] = True\n# dfs(nx, ny, cnt + 1)\n# visited[board[nx][ny]] = False\n\n# r, c = map(int, input().split())\n# board = [list(map(lambda s: ord(s) - 65, input().rstrip())) for _ in range(r)]\n# visited = [False] * 26\n# visited[board[0][0]] = 1\n# dxy = [(-1, 0), (1, 0), (0, 1), (0, -1)]\n# ans = 0\n\n# dfs(0, 0, 1)\n# print(ans)","repo_name":"vhzkclq0705/Algorithm_Problem_Solving","sub_path":"BackJoon/기출문제모음/1987.py","file_name":"1987.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38072054389","text":"\"\"\" Manages actual screen display w all sprites and elements, hardcoded values included \"\"\"\n\nimport sprites_management.sprites_manager\nfrom sprites_management.sprites_functions import extract_doors_sides\nfrom management_and_config.configurations import *\nfrom resources import image_manager\n\npygame.init()\n\n\nclass Room:\n def __init__(self, room_size, doors_config, room_enemies, doors, shop=None):\n # specification of the room\n self._size = room_size\n\n self._enemies_on_start = []\n for enemy in room_enemies:\n self._enemies_on_start.append(enemy.id)\n\n # minimap\n self._visited = False\n\n # walls group\n self._walls = pygame.sprite.Group()\n sprites_management.sprites_manager.add_walls(self._walls, room_size, extract_doors_sides(doors_config))\n\n # doors group\n self._doors = pygame.sprite.Group()\n sprites_management.sprites_manager.add_doors_room(self._doors, doors)\n\n # enemies group\n self._enemies = pygame.sprite.Group()\n sprites_management.sprites_manager.add_enemies(self._enemies, room_enemies)\n\n # dropped items group\n self._drop = pygame.sprite.Group()\n\n # self._shop = shop # TODO : shop is a dict of items with prices (or None if its not a shop)\n self._shop = {\"sword\": 20, \"health_potion\": 2} # TODO get from a file? configurations? (ex. shop1 = {...} shop2 =\n\n def draw_room(self):\n screen.fill(WHITE)\n self.terrain_display()\n self.wall_display()\n self.door_display()\n self.enemy_display()\n self.drop_display()\n\n def terrain_display(self):\n terrain_image = image_manager.get_terrain_image()\n screen.blit(terrain_image, terrain_image_start_point)\n\n def wall_display(self):\n self._walls.draw(screen)\n\n def door_display(self):\n self._doors.draw(screen)\n\n def enemy_display(self):\n self._enemies.draw(screen)\n\n # displaying enemies' health\n for enemy in self._enemies.sprites():\n health = enemy.health\n max_health = enemy.max_health\n if health != max_health:\n health_bar_start_point = [enemy.get_position()[0] + enemy_health_bar_display_difference[0],\n enemy.get_position()[1] + enemy_health_bar_display_difference[1]]\n\n health_text_center = [health_bar_start_point[0] + enemy_health_text_center_difference[0],\n health_bar_start_point[1] + enemy_health_text_center_difference[1]]\n\n # health bar\n pygame.draw.rect(screen, PINK, (health_bar_start_point[0], health_bar_start_point[1],\n enemy_health_bar_length, enemy_health_bar_width))\n\n pygame.draw.rect(screen, RED, (health_bar_start_point[0], health_bar_start_point[1],\n enemy_health_bar_length * health / max_health, enemy_health_bar_width))\n\n # text: current health / max health in the center of health bar\n health_text = enemy_health_font.render(str(health) + \"/\" + str(max_health), True, BLACK)\n health_text_rect = health_text.get_rect()\n health_text_rect.center = health_text_center\n screen.blit(health_text, health_text_rect)\n\n def drop_display(self):\n self._drop.draw(screen)\n\n def visit(self):\n self._visited = True\n\n def kill_enemy(self, enemy, time):\n sprites_management.sprites_manager.add_drop(self._drop, enemy, time)\n self._enemies.remove(enemy)\n\n def remove_drop(self, drop):\n self._drop.remove(drop)\n\n def update_active_enemies(self, active_enemies):\n for enemy_id in self._enemies_on_start:\n active_enemies[enemy_id] = 0\n\n for enemy in self._enemies:\n active_enemies[enemy.id] = 1\n\n return active_enemies\n\n def update_doors(self, open_doors):\n for door in self._doors:\n open_doors[door.id] = 0 if door.closed else 1\n return open_doors\n\n def open_door(self, character):\n for door in self._doors.sprites():\n door_pos = door.get_position_center()\n character_pos = character.get_position_center()\n if door.closed and ((character_pos[0] - door_pos[0]) ** 2 + (\n character_pos[1] - door_pos[1]) ** 2) < distance_to_open_door ** 2 and \\\n character.keys[door.color] > 0:\n character.use_key(door.color)\n door.open()\n return True\n return False\n\n def enter_shop(self, character): # TODO edit\n if self._shop is not None:\n # check if is near the building\n return True\n return False\n\n\n @property\n def size(self):\n \"\"\" size of the room \"\"\"\n return self._size\n\n @property\n def visited(self):\n \"\"\" if the room has been visited \"\"\"\n return self._visited\n\n @property\n def walls(self):\n \"\"\" room's walls (sprite group of walls) \"\"\"\n return self._walls\n\n @property\n def doors(self):\n \"\"\" room's doors (sprite group of doors) \"\"\"\n return self._doors\n\n @property\n def enemies(self):\n \"\"\" room's enemies (sprite group of enemies) \"\"\"\n return self._enemies\n\n @property\n def dropped_items(self):\n \"\"\" items dropped in the room (sprite group of drops) \"\"\"\n return self._drop\n\n @property\n def shop(self):\n \"\"\" dictionary of shop inventory: \"item_name\": price \"\"\"\n return self._shop\n","repo_name":"agga1/DanJyncesDungeons","sub_path":"worlds_management/Room.py","file_name":"Room.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24405912701","text":"import torch\n\nfrom .load_data import DATAPATH\nfrom .data_utils import rand_train_test_idx, even_quantile_labels\nfrom ogb.nodeproppred import NodePropPredDataset\n\n\nclass NCDataset(object):\n def __init__(self, name, root=f'{DATAPATH}'):\n \"\"\"\n based off of ogb NodePropPredDataset\n https://github.com/snap-stanford/ogb/blob/master/ogb/nodeproppred/dataset.py\n Gives torch tensors instead of numpy arrays\n - name (str): name of the dataset\n - root (str): root directory to store the dataset folder\n - meta_dict: dictionary that stores all the meta-information about data. Default is None, \n but when something is passed, it uses its information. Useful for debugging for external contributers.\n \n Usage after construction: \n \n split_idx = dataset.get_idx_split()\n train_idx, valid_idx, test_idx = split_idx[\"train\"], split_idx[\"valid\"], split_idx[\"test\"]\n graph, label = dataset[0]\n \n Where the graph is a dictionary of the following form: \n dataset.graph = {'edge_index': edge_index,\n 'edge_feat': None,\n 'node_feat': node_feat,\n 'num_nodes': num_nodes}\n For additional documentation, see OGB Library-Agnostic Loader https://ogb.stanford.edu/docs/nodeprop/\n \"\"\"\n\n self.name = name # original name, e.g., ogbn-proteins\n self.graph = {}\n self.label = None\n\n def get_idx_split(self, split_type='random', train_prop=.5, valid_prop=.25):\n \"\"\"\n train_prop: The proportion of dataset for train split. Between 0 and 1.\n valid_prop: The proportion of dataset for validation split. Between 0 and 1.\n \"\"\"\n\n if split_type == 'random':\n ignore_negative = False if self.name == 'ogbn-proteins' else True\n train_idx, valid_idx, test_idx = rand_train_test_idx(\n self.label, train_prop=train_prop, valid_prop=valid_prop, ignore_negative=ignore_negative)\n split_idx = {'train': train_idx,\n 'valid': valid_idx,\n 'test': test_idx}\n return split_idx\n\n def __getitem__(self, idx):\n assert idx == 0, 'This dataset has only one graph'\n return self.graph, self.label\n\n def __len__(self):\n return 1\n\n def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, len(self))\n\n\ndef load_nc_dataset(dataname, sub_dataname=''):\n \"\"\" Loader for NCDataset, returns NCDataset. \"\"\"\n if dataname == 'arxiv-year':\n dataset = load_arxiv_year_dataset()\n else:\n raise ValueError('Invalid dataname')\n return dataset\n\n\ndef load_arxiv_year_dataset(nclass=5):\n filename = 'arxiv-year'\n dataset = NCDataset(filename)\n ogb_dataset = NodePropPredDataset(root=DATAPATH, name='ogbn-arxiv')\n dataset.graph = ogb_dataset.graph\n dataset.graph['edge_index'] = torch.as_tensor(dataset.graph['edge_index'])\n dataset.graph['node_feat'] = torch.as_tensor(dataset.graph['node_feat'])\n\n label = even_quantile_labels(\n dataset.graph['node_year'].flatten(), nclass, verbose=False)\n dataset.label = torch.as_tensor(label).reshape(-1, 1)\n return dataset","repo_name":"draym28/LSGNN","sub_path":"load_large_graph/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"41382153001","text":"import utils \nimport charts\nimport read_csv\nimport pandas as pd\n\n\n\n\n\ndf = pd.read_csv('world_population.csv')\n\noptions = set(df['Continent'].values)\nprint(options)\ncontinent = str(input('de que continente quieres que se muestre la estadistica mundial ==> '))\n\n\ndf = df[df['Continent'] == continent]\n\ncountries = list(set(df['Country/Territory'].values))\nporcentage = df['World Population Percentage']\n\ncharts.generate_pie_chart(continent, countries,porcentage)\n\nprint(countries)\nprint(type(countries))\nprint(porcentage)\n\n\n'''\ndata = list(filter(lambda n : n['Continent'].lower() == continent.lower(), data))\nvalues = list(map(lambda i : i['World Population Percentage'],data))\nlabel = list(map(lambda i : i['Country/Territory'],data))\n'''\n","repo_name":"Sebas0529/Learning-python","sub_path":"app/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15983520203","text":"import json\nimport numpy as np\nfrom tqdm import tqdm\n\n\nclass Graph:\n def _add_edge(self, x, y):\n if x not in self.edges:\n self.edges[x] = {}\n self.in_degree[y] = self.in_degree.get(y, 0) + \\\n (1 if self.repeat or y not in self.edges[x] else 0)\n self.out_degree[x] = self.out_degree.get(x, 0) + \\\n (1 if self.repeat or y not in self.edges[x] else 0)\n\n self.edges[x][y] = 1 + \\\n (self.edges[x].get(y, 0) if self.repeat else 0)\n\n def __init__(self, edges, nodes=None, repeat=True, directed=False):\n _nodes = set()\n self.edges = {}\n self.repeat = repeat\n self.directed = directed\n self.in_degree = {}\n self.out_degree = {}\n _nodes = set()\n for x, y in edges:\n self._add_edge(x, y)\n if not directed:\n self._add_edge(y, x)\n _nodes.add(x)\n _nodes.add(y)\n\n self.nodes = _nodes if nodes is None else set(nodes)\n\n def get_nodes(self):\n return self.nodes.copy()\n\n def get_edges(self):\n return self.edges.copy()\n\n def get_edges_list(self):\n ans = []\n for x, y in self.edges.items():\n for k, v in y.items():\n for p in range(v):\n ans.append((x, k))\n return ans\n\n def get_in_degree(self):\n return self.in_degree.copy()\n\n def get_out_degree(self):\n return self.out_degree.copy()\n\n def get_degree(self):\n ans = {}\n for k, v in self.in_degree.items():\n ans[k] = ans.get(k, 0) + v\n for k, v in self.out_degree.items():\n ans[k] = ans.get(k, 0) + v\n if not self.directed:\n for k in ans:\n ans[k] //= 2\n return ans\n\n def get_node_list(self):\n return list(self.nodes)\n\n def get_degree_point(self, point):\n ans = self.in_degree.get(point, 0) + self.out_degree.get(point, 0)\n return ans if self.directed else ans // 2\n\n\nclass WalkGraph(Graph):\n def __init__(\n self, edges, p=1, q=1,\n nodes=None, repeat=True, directed=False\n ):\n super(WalkGraph, self).__init__(\n edges=edges, nodes=nodes,\n repeat=repeat, directed=directed\n )\n self.q, self.invq = q, 1 / q\n self.p, self.invp = p, 1 / p\n\n def get_val(self, v, u, w):\n if v == w:\n return self.invp\n elif v in self.edges.get(w, {}):\n return 1\n else:\n return self.invq\n\n def dfs(self, now, last, res, ans):\n if res == 0:\n return\n Neighbors = self.edges[now]\n Keys = list(Neighbors.keys())\n Distributions = np.array(\n [Neighbors[v] * self.get_val(v, now, last) for v in Keys],\n dtype=np.float64\n )\n Distributions = Distributions / Distributions.sum()\n Test = np.random.multinomial(1, Distributions)\n ans.append(Keys[Test.argmax()])\n self.dfs(ans[-1], now, res - 1, ans)\n\n def Walk_Single(self, point, length):\n if length < 0:\n raise ValueError('walk length should be positive')\n elif self.directed:\n Msg = 'Walk shouldn\\'t be performed on directed graph'\n raise AttributeError(Msg)\n else:\n if self.get_degree_point(point) == 0:\n return [point for x in range(length + 1)]\n else:\n ans = [point]\n self.dfs(point, None, length, ans)\n return ans\n\n def Walks(self, length, verbose=False):\n if verbose:\n print('[INFO] Random Walking..')\n\n ans = []\n Iters = tqdm(self.nodes) if verbose else self.nodes\n for i in Iters:\n ans.append(self.Walk_Single(i, length))\n\n if verbose:\n print('[INFO] Random Walking Done')\n\n return ans\n\n\nif __name__ == '__main__':\n V = WalkGraph(\n [\n [1, 2], [1, 2], [2, 3], [4, 5],\n [3, 4], [5, 7], [6, 8], [9, 1],\n [9, 2], [5, 9], [8, 3], [4, 9],\n [6, 2], [7, 1]\n ],\n repeat=True, p=0.5, q=2,\n nodes=[1, 2, 3, 4, 5, 6, 7, 9]\n )\n print(V.get_edges())\n print(V.get_edges_list())\n print(V.get_degree())\n print(V.get_in_degree())\n print(V.get_out_degree())\n\n print('\\n\\n')\n print(V.Walks(20, True))\n","repo_name":"zengkaipeng/EE359-Data-Mining","sub_path":"Course Project3 Link Prediction/src/Graph_Base.py","file_name":"Graph_Base.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11498687452","text":"import json\nfrom os import getenv\nfrom pyairtable.api import Api, Table\nfrom eagle_cool_client import EagleCoolClient\nfrom tqdm import tqdm\n\n\nairtable_token = getenv(\"AIRTABLE_TOKEN\", \"none\")\napi = Api(airtable_token)\nairtable_base = api.get_base(getenv(\"AIRTABLE_BASE_ID\", \"none\"))\ngenerations: Table = airtable_base.get_table(\"generations\")\neagle = EagleCoolClient()\nall_gens = generations.all()\n\nindexes = {\n \"gen_id\": {item[\"fields\"][\"gid\"]: item[\"fields\"] for item in all_gens},\n \"gen_id+seed\": {\n f\"{item['fields']['gid']}-{item['fields']['seed']}\": item[\"fields\"]\n for item in all_gens\n },\n \"seed\": {f\"{item['fields']['seed']}\": item[\"fields\"] for item in all_gens},\n}\n\nfolders = eagle.list_folders()\n\nname = \"stash\"\n\n\ndef find_folder(folders, name):\n stash_folders = []\n for folder in folders:\n if name in folder[\"name\"]:\n stash_folders.append(folder)\n if \"children\" in folder:\n stash_folders.extend(find_folder(folder[\"children\"], name))\n return stash_folders\n\n\nstash_folders = find_folder(folders[\"data\"], name)\n\nimages = eagle.list_items(folders=\",\".join([x[\"id\"] for x in stash_folders]))[\"data\"]\nfor image in tqdm(images):\n name = image[\"name\"]\n tags = image[\"tags\"]\n if \"has_prompt\" not in tags:\n for _, index in indexes.items():\n if name in index:\n gen_data = index[name]\n tags.append(\"has_prompt\")\n result = eagle.update_item(\n id=image[\"id\"],\n annotation=json.dumps(gen_data, ensure_ascii=False),\n tags=tags,\n )\nprint(stash_folders)\n","repo_name":"vpuhoff/eagle_cool_client","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17500217336","text":"\"\"\"empty message\n\nRevision ID: 329c9e995bc9\nRevises: w0009\nCreate Date: 2017-01-05 20:58:03.048614\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '329c9e995bc9'\ndown_revision = 'w0009'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('card', sa.Column('silver_points', sa.Integer(), server_default='0', nullable=False))\n op.drop_column('card', 'prize_pool')\n op.add_column('transfer', sa.Column('silver_points', sa.Integer(), server_default='0', nullable=False))\n op.drop_column('transfer', 'amount')\n op.add_column('user', sa.Column('silver_points', sa.Integer(), server_default='0', nullable=False))\n op.drop_column('user', 'points')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('points', sa.INTEGER(), server_default=sa.text(u'0'), autoincrement=False, nullable=False))\n op.drop_column('user', 'silver_points')\n op.add_column('transfer', sa.Column('amount', sa.INTEGER(), server_default=sa.text(u'0'), autoincrement=False, nullable=False))\n op.drop_column('transfer', 'silver_points')\n op.add_column('card', sa.Column('prize_pool', sa.INTEGER(), server_default=sa.text(u'0'), autoincrement=False, nullable=False))\n op.drop_column('card', 'silver_points')\n # ### end Alembic commands ###\n","repo_name":"itdream-dev/python","sub_path":"guidehero-backend/migrations/versions/329c9e995bc9_.py","file_name":"329c9e995bc9_.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41988696991","text":"import os, sys, json, time\nfrom unittest import mock\nsys.path.append('../')\n\nfrom main import eval_sheet, grade_sheet\nfrom modules.parse import parse_wrapper\nfrom modules.oai_api import get_completion\nfrom modules.local_llm_api import get_model_fn\nfrom openai.types.chat import ChatCompletion\n\ndef load_chat_completion(fn: str) -> ChatCompletion:\n '''need this since oai_api.get_completion takes a ChatCompletion object'''\n with open(fn, 'r') as f:\n data = json.load(f)\n return ChatCompletion(**data)\n \nRESPONSE_STUB_FN = './data/stubs/completion.json'\nMODEL_RESPONSE_STUB = load_chat_completion(RESPONSE_STUB_FN)\n\nVALID_MODEL_PATH = '../../../data/llama-2-7b.Q4_K_M.gguf'\n\n\ndef test_stub_loaded():\n '''test to make sure subsequent tests are valid'''\n msg = get_completion(MODEL_RESPONSE_STUB)\n assert len(msg) > 0\n\n\ndef test_get_model_fn():\n # valid model_name should return a path\n model_fn = get_model_fn('llama_7b')\n assert model_fn == '../../data/llama-2-7b.Q4_K_M.gguf'\n # valid path to a file should return the path\n model_fn = get_model_fn(VALID_MODEL_PATH)\n assert model_fn == VALID_MODEL_PATH\n # invalid model_name should raise ValueError\n try:\n model_fn = get_model_fn('llama_7b_fake')\n assert False\n except ValueError:\n assert True\n except:\n assert False\n\n\ndef test_eval_basic_1():\n '''\n demonstrate mocking:\n - output file(s): .md + .json\n - submit_prompt\n '''\n \n with mock.patch('modules.output.open', mock.mock_open()) as mock_output_file:\n with mock.patch('main.submit_prompt') as mock_submit_prompt:\n \n mock_submit_prompt.return_value = MODEL_RESPONSE_STUB\n \n output = eval_sheet(\n './data/input-one.md',\n '../data/md-schema.yaml',\n 'gpt-3.5-turbo',\n 'should-not-be-used.txt',\n verbose_level=0,\n )\n\n # two questions thus it should be called twice\n assert mock_submit_prompt.call_count == 2\n \n # verify the output file is written to, but not much else\n written_data = [e[0][0] for e in mock_output_file().write.call_args_list]\n \n assert len(written_data) > 0\n\n\ndef test_eval_basic_2():\n '''\n test with local model: llama_7b\n '''\n \n with mock.patch('modules.output.open', mock.mock_open()) as mock_output_file:\n with mock.patch('main.prompt_model') as mock_prompt_model:\n \n mock_prompt_model.return_value = (\"stubbed answer\", None)\n \n output = eval_sheet(\n './data/input-one.md',\n '../data/md-schema.yaml',\n 'llama_7b',\n 'should-not-be-used.txt',\n verbose_level=0,\n )\n\n # two questions thus it should be called twice\n assert mock_prompt_model.call_count == 2\n \n # verify the output file is written to, but not much else\n written_data = [e[0][0] for e in mock_output_file().write.call_args_list]\n \n assert len(written_data) > 0\n \n\ndef test_eval_grading():\n '''\n test the grading functionality:\n we'll capture if grading output file has the correct\n array of booleans by saying true answer to Question-1 \n in the mock response of the llama completion method\n '''\n \n TEST_FN = './data/dir-two/input-one.md'\n TEST_SCHEMA = '../data/md-schema.yaml'\n\n # A) capture the output_obj of a run\n # setting grading output to None to prevent output_obj from containing \n # a graded section\n with mock.patch('modules.output.open', mock.mock_open()) as mock_output_file:\n with mock.patch('main.prompt_model') as mock_prompt_model:\n \n mock_prompt_model.return_value = ('C) The worm', None) \n \n output_obj = eval_sheet(\n input_md_fn=TEST_FN,\n input_schema_fn=TEST_SCHEMA,\n model_name='llama_7b',\n output_md_fn='output-stub-xx.md',\n output_json_fn=None, # force only output to be output-xx.md\n output_grade_fn=None, # prevent output-md from having\n )\n\n # The test sheet has two questions, verify those are being being hit\n assert mock_prompt_model.call_count == 2\n\n # B) now do grading call here, instead of referencing the output json\n \n # first grab the doc_obj which would exist inside eval_sheet call \n # and be passed to it normally\n input_doc_obj = parse_wrapper(\n TEST_FN,\n TEST_SCHEMA,\n )\n\n # Now run the output objects from (A) and (B) through the grading function\n # analyze the boolean outputs\n list_grades = grade_sheet(\n json_doc=input_doc_obj,\n output_obj=output_obj,\n )\n\n print(f\"list_grades: {list_grades}\")\n\n assert len(list_grades) == 2\n\n # This one should be true because we mocked its response to be correct\n assert list_grades[0] == True\n\n # This one should be false because we mocked its response to be incorrect\n assert list_grades[1] == False\n","repo_name":"sutt/py-llama-scripts","sub_path":"eval-pipeline-1/tests/test_eval.py","file_name":"test_eval.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24012239274","text":"class Solution:\n def maxChunksToSorted(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: int\n \"\"\"\n cnt = [0 for i in range(len(arr))]\n for i, x in enumerate(arr):\n for t in range(max(x, i), len(arr)):\n cnt[t] += 1\n #print(cnt)\n \n res = 0\n for i, x in enumerate(cnt):\n if i + 1 == x:\n res += 1\n return res\n\narr = [4,3,2,1,0]\nprint (Solution().maxChunksToSorted(arr))\n \n","repo_name":"songzy12/LeetCode","sub_path":"python/769.max-chunks-to-make-sorted.py","file_name":"769.max-chunks-to-make-sorted.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"16072685631","text":"import json\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView, DetailView, ListView, DeleteView\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Post\nfrom follower.models import Follower\n\n\n# ? Worked without importing date from datetime\ndef formatDate(date):\n # if post was created today\n if date.date() == date.today().date():\n return f\"Today · {date.strftime('%I:%M %p')}\"\n # if post was created yesterday\n elif date.strftime(\"%d%m%Y\") == date.today().strftime(\n f\"{date.today().day-1:02d}%m%Y\"\n ):\n return f\"Yesterday · {date.strftime('%I:%M %p')}\"\n # if post was created within the current year\n elif date.year == date.today().year:\n return date.strftime(\"%B %d · %I:%M %p\")\n else:\n # show full format\n return date.strftime(\"%B %d, %Y · %I:%M %p\")\n\n\ndef formatPostDates(posts):\n formattedPost = []\n for post in posts:\n # check if post is modified\n if post.dateCreated == post.dateModified:\n post.dateModified = False\n else:\n post.dateModified = formatDate(post.dateModified)\n post.dateCreated = formatDate(post.dateCreated)\n formattedPost.append(post)\n return posts\n\n\nclass IndexView(TemplateView):\n http_method_names = [\"get\"]\n template_name = \"index.html\"\n\n def dispatch(self, request, *args, **kwargs):\n self.request = request\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n posts = []\n if self.request.user.is_authenticated:\n # List of users followed by current user\n following = list(\n Follower.objects.filter(followedBy=self.request.user).values_list(\n \"following\", flat=True\n )\n )\n # Filter out posts that are from the following list\n posts = formatPostDates(\n Post.objects.filter(author__in=following).order_by(\"-dateCreated\")\n )\n else:\n posts = formatPostDates(list(Post.objects.all().order_by(\"-dateCreated\")))\n context[\"posts\"] = posts\n context[\"home_active\"] = True\n return context\n\n\nclass PostExploreView(ListView):\n http_method_names = [\"get\"]\n template_name = \"explore.html\"\n model = Post\n context_object_name = \"posts\"\n\n def get_queryset(self):\n self.queryset = formatPostDates(\n list(Post.objects.all().order_by(\"-dateCreated\"))\n )\n return super().get_queryset()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"explore_active\"] = True\n return context\n\n\nclass PostDetailView(DetailView):\n http_method_names = [\"get\"]\n model = Post\n context_object_name = \"post\"\n template_name = \"view.html\"\n\n # Format date of requested post\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n post = self.get_object()\n if post.dateCreated == post.dateModified:\n post.dateModified = False\n else:\n post.dateModified = post.dateModified\n post.dateCreated = post.dateCreated\n context[\"post\"] = post\n return context\n\n\nclass PostCreateView(LoginRequiredMixin, CreateView):\n model = Post\n template_name = \"post.html\"\n fields = [\"content\"]\n success_url = \"/\"\n\n def dispatch(self, request, *args, **kwargs):\n # Get current user\n self.request = request\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"post_active\"] = True\n return context\n \n\n # def form_valid(self, form):\n # print(\"!!!!!!!!!!!!!!gets here\")\n # post = form.save(commit=False)\n # # Fill author field with current user\n # post.author = self.request.user\n # post.save()\n # return super().form_valid(form)\n\n def post(self, request, *args, **kwargs):\n newPost = Post.objects.create(\n content=request.POST.get(\"content\"), author=request.user\n )\n # if incoming request is ajax\n if request.headers.get(\"X-Requested-With\") == \"XMLHttpRequest\":\n newPost.dateCreated = formatDate(newPost.dateCreated)\n return render(\n request,\n \"./components/card.html\",\n {\"post\": newPost},\n content_type=\"application/html\",\n )\n else:\n return HttpResponseRedirect(self.success_url)\n\n\nclass PostDeleteView(DeleteView):\n model = Post\n success_url = \"/\"\n","repo_name":"deepak-parmar/til-django","sub_path":"feed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73022749769","text":"from django.shortcuts import render\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom .models import Task\nfrom .serializers import TaskSerializer\n\n\n# Create your views here.\n@api_view(['GET'])\ndef apiOverview(request):\n api_urls = {\n 'List':'/all_task/',\n 'Detail View':'/task-detail//',\n 'Create':'/create_task/',\n 'Update':'/edit_task//',\n 'Delete':'/delete-task/'\n }\n return Response(api_urls)\n\n\n@api_view(['GET'])\ndef taskList(request):\n task = Task.objects.all()\n serizializer =TaskSerializer(task, many=True)\n data = serizializer.data\n return Response(data)\n\n@api_view(['GET'])\ndef taskDetail(request, pk):\n task = Task.objects.get(id=pk)\n serizializer =TaskSerializer(task, many=False)\n data = serizializer.data\n return Response(data)\n\n@api_view(['POST'])\ndef createTask(request):\n serizializer =TaskSerializer(data=request.data)\n if serizializer.is_valid():\n serizializer.save()\n data = serizializer.data\n return Response(data)\n\n@api_view(['POST'])\ndef taskUpdate(request, pk):\n task = Task.objects.get(id=pk)\n serizializer =TaskSerializer(instance=task, data=request.data)\n if serizializer.is_valid():\n serizializer.save()\n data = serizializer.data\n return Response(data)\n\n@api_view(['DELETE'])\ndef taskUpdate(request, pk):\n task = Task.objects.get(id=pk)\n task.delete()\n return Response(\"Task Was deleted Successfully\")\n\n\n\n\n\n\n","repo_name":"fyung36/todoapp","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23755664385","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Menu, MenuItem\nfrom django.http import JsonResponse\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView, CreateView\nfrom .forms import MenuItemForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\n@login_required\ndef menu_item_list(req, menu_id):\n menuitem_list = MenuItem.objects.filter(menu_id=menu_id)\n return render(req, 'menu/menuitem_list.html', { \n 'menuitem_list': menuitem_list,\n 'menu_id': menu_id\n })\n\n\n@login_required\ndef menu_item_create(req, menu_id):\n form = MenuItemForm(req.POST or None)\n\n if form.is_valid():\n MenuItem.objects.create(menu_id=menu_id, name=form.cleaned_data['item'])\n return redirect(f'/menu/{menu_id}/items')\n\n return render(req, 'menu/menuitem_create.html', {\n 'menu_id': menu_id,\n 'form': form\n })\n\n\n@login_required\ndef menu_item_delete(req, menu_id, item_id):\n item = get_object_or_404(MenuItem, pk=item_id)\n item.delete()\n return redirect(f'/menu/{menu_id}/items')\n\n\n@login_required\ndef menu_item_update(req, menu_id, item_id):\n item = get_object_or_404(MenuItem, pk=item_id)\n form = MenuItemForm({'item': item.name})\n\n if req.method == 'POST':\n form = MenuItemForm(req.POST)\n if form.is_valid():\n MenuItem.objects.filter(id=item_id).update(name=form.cleaned_data['item'])\n return redirect(f'/menu/{menu_id}/items')\n\n return render(req, 'menu/menuitem_update.html', {\n 'form': form\n })\n\n\nclass MenuList(LoginRequiredMixin, ListView):\n model = Menu\n\n\nclass MenuCreate(LoginRequiredMixin, CreateView):\n model = Menu\n fields = '__all__'\n template_name = 'menu/menu_create.html'\n success_url = '/menu/all'\n\n\nclass MenuUpdate(LoginRequiredMixin, UpdateView):\n model = Menu\n fields = '__all__'\n template_name = 'menu/menu_update.html'\n success_url = '/menu/all'\n\n\nclass MenuDelete(LoginRequiredMixin, DeleteView):\n model = Menu\n template_name = 'menu/menu_delete.html'\n success_url = '/menu/all'\n\n\n@login_required\ndef get_all_menu(req):\n menu_list = Menu.objects.all()\n menu_json = []\n\n for menu in menu_list:\n menu_json.append({\n 'id': menu.id,\n 'name': menu.name,\n 'price': menu.price,\n 'image': menu.image.url,\n 'category': {\n 'id': menu.category.id,\n 'name': menu.category.name,\n },\n 'items': [{ 'id': item.id , 'name': item.name } for item in menu.menuitem_set.all()]\n })\n\n return JsonResponse(menu_json, safe=False)","repo_name":"vongthaya/sonnabod","sub_path":"menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1871384676","text":"import copy\n\ndef basicAttack(self,enemy):\n taunt = False\n #Check for taunt on enemy board\n for i in self.game.oppBoard:\n if 'Taunt' in i.effects:\n taunt = True\n #Cannot target attack if enemy is immune or stealthed, if a taunt is in the way, if frozen, or if regular and windfury attacks are both used up\n if (('Immune' or 'Stealth') in enemy.effects) or (taunt and 'Taunt' not in enemy.effects) or 'Frozen' in self.effects:\n return False\n elif self.canAttack:\n self.canAttack = False\n elif 'Windfury' in self.effects and self.effects['Windfury']:\n self.effects['Windfury'] = False\n else:\n return False\n #If this does not have a special attack, run a regular attack\n if 'Attack' in self.effects:\n return self.game.runEffect('Attack',self,enemy)\n else:\n self.game.damage(enemy,self.atk)\n self.game.damage(self,enemy.atk)\n self.game.resolveDeath()\n return True\ndef weaponAttack(self,enemy):\n self.game.damage(enemy,self.atk)\n self.game.damage(self,1)\n self.game.resolveDeath()\n \n#----------Game element classes----------\n\nclass Minion:\n def __init__(self,mName,mCost,mAtk,mHealth,mType=[],mEffect=[],mGame=None,mClass='Neutral'):\n self.name = mName\n self.cost = mCost\n self.baseCost = mCost\n self.atk = mAtk\n self.baseAtk = mAtk\n self.health = mHealth\n self.baseHealth = mHealth\n self.maxHealth = mHealth\n self.hearthClass = mClass\n while len(mEffect) < len(mType):\n mEffect.append(None)\n self.effects = {etype:effect for etype,effect in zip(mType,mEffect)}\n self.game = mGame\n\n buffAtk = 0\n buffHealth = 0\n auraAtk = 0\n auraHealth = 0\n damage = 0\n buffCost = 0\n auraCost = 0\n \n canAttack = False\n\n def attack(self,enemy):\n basicAttack(self,enemy)\n\nclass Spell:\n def __init__(self,sName,sCost,sClass,sType=[None],sEffect=[None],sGame=None):\n self.name = sName\n self.cost = sCost\n self.baseCost = sCost\n self.hearthClass = sClass\n self.effectType = sType\n while len(sEffect) < len(sType):\n sEffect.append(None)\n self.effects = {etype:effect for etype,effect in zip(sType,sEffect)}\n self.game = sGame\n \n buffCost = 0\n auraCost = 0\n\nclass Weapon:\n def __init__(self,wName,wCost,wAtk,wHealth,wType=['Attack'],wEffects=[lambda m,t:weaponAttack(m,t)],wHero=None,wClass='Neutral'):\n self.name = wName\n self.cost = wCost\n self.baseCost = wCost\n self.atk = wAtk\n self.baseAtk = wAtk\n self.health = wHealth\n self.baseHealth = wHealth\n self.hearthClass = wClass\n self.hero = wHero\n while len(wEffects) < len(wType):\n wEffects.append(None)\n self.effects = {etype:effect for etype,effect in zip(wType,wEffects)}\n \n buffAtk = 0\n buffHealth = 0\n auraAtk = 0\n auraHealth = 0\n damage = 0\n buffCost = 0\n auraCost = 0\n \n def attack(self,enemy):\n self.atk += self.hero.atk\n basicAttack(self,enemy)\n\n\nclass Hero:\n def __init__(self,hClass='Neutral',hGame=None):\n self.hearthClass = hClass\n self.effects = {}\n self.game = hGame\n mana = 0\n maxMana = 0\n atk = 0\n health = 30\n maxHealth = 30\n armor = 0\n weapon = None\n \n buffAtk = 0\n buffHealth = 0\n auraAtk = 0\n auraHealth = 0\n damage = 0\n\n def attack(self,enemy):\n if self.game.checkWeapon(self):\n self.weapon.attack(self,enemy)\n else:\n basicAttack(self,enemy)\n\n#----------Game class with useful functions----------\n\nclass Game:\n def __init__(self,gMe,gOpponent):\n self.hero1 = gMe\n self.hero1.game = self\n self.hero2 = gOpponent\n self.hero2.game = self\n self.me = self.hero1\n self.opponent = self.hero2\n\n self.board1 = []\n self.board2 = []\n self.myBoard = self.board1\n self.oppBoard = self.board2\n\n self.hand1 = []\n self.hand2 = []\n self.myHand = self.hand1\n self.oppHand = self.hand2\n\n self.summonOrder = []\n\n #Run a specific effect of a card, if it has that effect\n def runEffect(self,effect,card,arg1=None,arg2=None,arg3=None,default=False,resolve=True):\n succ = default\n if effect in card.effects:\n if arg3 != None and arg2 != None and arg1 != None:\n succ = card.effects[effect](card,arg1,arg2,arg3)\n elif arg2 != None and arg1 != None:\n succ = card.effects[effect](card,arg1,arg2)\n elif arg1 != None:\n succ = card.effects[effect](card,arg1)\n else:\n succ = card.effects[effect](card)\n if succ and resolve: self.resolveDeath()\n return succ\n\n #Use the runEffect function on all cards in a list\n def runEffectAll(self,effect,origin=None,order=None):\n if order == None:\n order = self.summonOrder\n if origin == None:\n for card in order:\n self.runEffect(effect,card)\n else:\n for card in order:\n self.runEffect(effect,card,origin)\n\n def checkWeapon(hero):\n return hero.weapon != None\n \n #Add a card to the current hand\n def addToHand(self,card,hand=None):\n if hand == None:\n hand = self.myHand\n if self.runEffect('Casts When Drawn',card):\n return True\n elif(len(hand) < 10):\n card = copy.deepcopy(card)\n if not isinstance(card,Weapon):\n card.game = self\n hand.append(card)\n self.checkAuras()\n return True\n return False\n\n #Summon a minion in a specific position, or just at the end if no position is specified\n def summon(self,minion,position=-1,board=None):\n if board == None:\n board = self.myBoard\n if len(board) < 7:\n minion = copy.deepcopy(minion)\n minion.game = self\n if position < 0:\n board.append(minion)\n else:\n board.insert(position,minion)\n self.summonOrder.append(minion)\n if 'Charge' in minion.effects:\n minion.canAttack = True\n if 'Windfury' in minion.effects:\n minion.effects['Windfury'] = True\n elif 'Windfury' in minion.effects:\n minion.effects['Windfury'] = False\n self.runEffectAll('Summon',minion)\n return True\n return False\n\n #Equip a weapon to the current hero\n def equip(self,weapon,hero=None):\n if hero == None:\n hero = self.me\n if self.checkWeapon(hero):\n self.kill(hero.weapon)\n hero.weapon = weapon\n weapon.hero = hero\n return True\n\n #Play a card; run battlecry or spell effect, targeting or not, then summon if it is a minion\n def playCard(self,card,arg1=None,position=-1,arg2=None,arg3=None):\n if self.me.mana < card.cost: \n return False\n succ = True\n succ = self.runEffect('Spell',card,arg1,arg2,arg3,succ)\n if isinstance(card,Minion):\n succ = self.summon(card,position)\n elif isinstance(card,Weapon):\n succ = self.equip(card)\n if succ:\n succ = self.runEffect('Battlecry',card,arg1,arg2,arg3,succ)\n if succ:\n self.runEffectAll('Play Card',card)\n self.myHand.remove(card)\n self.me.mana -= card.cost\n self.checkAuras()\n return succ\n \n #Deal damage to a minion\n def damage(self,minion,dmg,spell=False):\n if spell and dmg > 0:\n for i in self.myBoard:\n if 'Spell Damage' in i.effects:\n dmg += i.effects['Spell Damage']\n if 'Divine Shield' in minion.effects and dmg > 0:\n del minion.effects['Divine Shield']\n dmg = 0\n minion.damage = min(minion.damage-dmg,0)\n minion.health = minion.maxHealth + minion.damage\n if dmg > 0:\n self.runEffectAll('Damage',minion)\n elif dmg < 0:\n self.runEffectAll('Heal',minion)\n return True\n \n #Kill all minions with 0 health or less\n def resolveDeath(self):\n dummy = copy.copy(self.summonOrder)\n for minion in dummy:\n if minion.health <= 0:\n self.kill(minion)\n print(dummy)\n for weapon in [self.me.weapon,self.opponent.weapon]:\n if weapon != None and weapon.health <= 0:\n self.kill(weapon)\n self.checkAuras()\n\n #Kill a minion after running its deathrattle (unless specified otherwise)\n def kill(self,minion,dr=True):\n #Do not kill heroes\n if isinstance(minion,Hero):\n return\n #Otherwise, remove from its board and the summon order\n if isinstance(minion,Weapon):\n if self.me.weapon == minion:\n self.me.weapon = None\n elif self.opponent.weapon == minion:\n self.opponent.weapon = None\n elif minion in self.myBoard:\n self.runEffectAll('Death',minion)\n self.myBoard.remove(minion)\n self.summonOrder.remove(minion)\n elif minion in self.oppBoard:\n self.runEffectAll('Death',minion)\n self.oppBoard.remove(minion)\n self.summonOrder.remove(minion)\n #Execute deathrattle, unless otherwise specified\n if dr:\n self.runEffect('Deathrattle',minion)\n\n #Add or remove stats from a minion\n def buff(self,target,atk,health,cost=0):\n if isinstance(target,Minion or Hero):\n target.buffAtk += atk\n target.buffHealth += health\n target.atk = target.baseAtk + target.buffAtk + target.auraAtk\n target.atk = max(target.atk,0)\n target.maxHealth = target.baseHealth + target.buffHealth + target.auraHealth\n if target.maxHealth <= 0:\n self.kill(target,self.myBoard.count(target)>0)\n else:\n target.health = target.maxHealth + target.damage\n if not isinstance(target,Hero):\n target.buffCost += cost\n target.cost = max(target.baseCost + target.buffCost + target.auraCost, 0)\n return True\n\n def checkAuras(self):\n #Reset aura buffs, including card cost\n for i in self.summonOrder:\n i.auraAtk = 0\n i.auraHealth = 0\n for i in self.myHand:\n i.auraCost = 0\n for i in self.oppHand:\n i.auraCost = 0\n #Run all aura effects and in-hand cost reduction effects\n self.runEffectAll('Aura')\n self.runEffectAll('Hand Aura',order=self.myHand)\n self.runEffectAll('Hand Aura',order=self.oppHand)\n #Use the buff method to re-calculate all minion stats\n for i in self.summonOrder:\n self.buff(i,0,0)\n for i in self.myHand:\n self.buff(i,0,0)\n for i in self.oppHand:\n self.buff(i,0,0)\n \n def addEffect(self,card,eType,effect=None):\n if eType not in card.effects:\n card.effects[eType] = effect\n elif effect in card.effects:\n return\n else:\n #what the fuck\n return\n\n #Switch the turn to the other player by switching the heroes and boards\n def switchTurn(self):\n self.runEffectAll('End of Turn')\n\n #Remove frozen status if minions could have attacked this turn\n for i in self.myBoard:\n if 'Frozen' in i.effects and i.canAttack:\n i.effects.remove('Frozen')\n\n #Switch boards\n if self.myBoard == self.board1:\n self.myBoard = self.board2\n self.oppBoard = self.board1\n elif self.myBoard == self.board2:\n self.myBoard = self.board1\n self.oppBoard = self.board2\n\n #Switch heroes\n if self.me == self.hero1:\n self.me = self.hero2\n self.opponent = self.hero1\n elif self.me == self.hero2:\n self.me = self.hero1\n self.opponent = self.hero2\n\n #Switch hands\n if self.myHand == self.hand1:\n self.myHand = self.hand2\n self.oppHand = self.hand1\n elif self.myHand == self.hand2:\n self.myHand = self.hand1\n self.oppHand = self.hand2\n\n #Reset all minion's attacks\n for i in self.myBoard:\n i.canAttack = True\n if 'Windfury' in i.effects:\n i.effects['Windfury'] = True\n\n #Gain 1 mana\n self.me.maxMana = min(10,self.me.maxMana+1)\n self.me.mana = self.me.maxMana\n\n self.runEffectAll('Start of Turn')\n\n #Discard a card from the current hand\n def discard(self,card):\n self.myHand.remove(card)\n self.runEffect('Discard',card)\n\n #Find the minions adjacent to a specific minion\n def getAdjacent(self,minion):\n if minion in self.myBoard:\n board = self.myBoard\n elif minion in self.oppBoard:\n board = self.oppBoard\n else:\n return []\n pos = board.index(minion)\n targets = []\n try:\n targets.append(board[pos-1])\n except IndexError:\n pass\n try:\n targets.append(board[pos+1])\n except IndexError:\n pass\n return targets\n\n #Silence a minion (this will currently cause problems with copying / adding to hand)\n def silence(self,target):\n target.buffAtk = 0\n target.buffHealth = 0\n target.effects = {}\n\n #Get the position of a minion anywhere on the board\n def getMinion(self,minion):\n if minion in self.myBoard:\n return self.myBoard.index(minion)\n elif minion in self.oppBoard:\n return self.oppBoard.index(minion)\n return -1\n\n #Return which board a minion is on\n def getBoard(self,minion):\n if minion in self.myBoard:\n return self.myBoard\n elif minion in self.oppBoard:\n return self.oppBoard\n return -1\n\n#Take card definitions from other files\nimport Classic\nimport Rastakhan\ncardDefs = [i for i in Classic.cardDefs]\nfor i in Rastakhan.cardDefs:\n cardDefs.append(i)\n","repo_name":"Updownbanana/HearthAI","sub_path":"HearthCards.py","file_name":"HearthCards.py","file_ext":"py","file_size_in_byte":12742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39737325699","text":"#!/usr/bin/python\n# encoding: utf-8\n\n\"\"\"\n@author: Ian\n@file: main.py\n@time: 2019-06-19 15:42\n\"\"\"\nif __name__ == '__main__':\n while True:\n print('please select: 1) data explore; 2) train; 3) test')\n a = input()\n if a == 'q':\n print(f'bye! {a}')\n break\n print(f'hi {a}')","repo_name":"mayi140611/mayiutils","sub_path":"apps/quant/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8853368541","text":"import os\nimport lmdb # install lmdb by \"pip install lmdb\"\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport six\nfrom PIL import Image\nimport scipy.io as sio\nfrom tqdm import tqdm\nimport re\n\ndef checkImageIsValid(imageBin):\n if imageBin is None:\n return False\n imageBuf = np.fromstring(imageBin, dtype=np.uint8)\n # imageBuf = np.frombuffer(imageBin, dtype=np.uint8)\n if imageBuf.size == 0:\n return False\n img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)\n imgH, imgW = img.shape[0], img.shape[1]\n if imgH * imgW == 0:\n return False\n return True\n\n\ndef writeCache(env, cache):\n with env.begin(write=True) as txn:\n for k, v in cache.items():\n txn.put(k.encode(), v)\n\n\ndef _is_difficult(word):\n assert isinstance(word, str)\n return not re.match('^[\\w]+$', word)\n\n\ndef createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):\n \"\"\"\n Create LMDB dataset for CRNN training.\n ARGS:\n outputPath : LMDB output path\n imagePathList : list of image path\n labelList : list of corresponding groundtruth texts\n lexiconList : (optional) list of lexicon lists\n checkValid : if true, check the validity of every image\n \"\"\"\n assert(len(imagePathList) == len(labelList))\n nSamples = len(imagePathList)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in tqdm(range(nSamples)):\n imagePath = imagePathList[i]\n label = labelList[i]\n if len(label) == 0:\n continue\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n if checkValid:\n if not checkImageIsValid(imageBin):\n print('%s is not a valid image' % imagePath)\n continue\n\n imageKey = 'image-%09d' % cnt\n labelKey = 'label-%09d' % cnt\n cache[imageKey] = imageBin\n cache[labelKey] = label.encode()\n if lexiconList:\n lexiconKey = 'lexicon-%09d' % cnt\n cache[lexiconKey] = ' '.join(lexiconList[i])\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt-1\n cache['num-samples'] = str(nSamples).encode()\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)\n\nif __name__ == \"__main__\":\n image_root_dir = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/words'\n annot_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/ascii/words.txt'\n lmdb_output_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten_lmdbs/IAM_train'\n split_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/splits/trainset.txt'\n\n lmdb_output_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten_lmdbs/IAM_test'\n split_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/splits/testset.txt'\n\n lmdb_output_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten_lmdbs/IAM_val1'\n split_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/splits/validationset1.txt'\n\n lmdb_output_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten_lmdbs/IAM_val2'\n split_path = '/home/ymk-wh/workspace/datasets/text_recognition/handwritten/IAM/splits/validationset2.txt'\n\n # load annotation\n with open(annot_path, 'r') as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n # load data split\n with open(split_path, 'r') as f:\n split_ids = f.readlines()\n split_ids = [split_id.strip() for split_id in split_ids]\n \n image_path_list, label_list = [], []\n for line in lines:\n if line[0] == '#':\n continue\n splits = line.split(' ', 8)\n image_name, seg_flag, _, x, y, w, h, tag, label = splits\n if seg_flag == 'ok':\n ids = image_name.split('-')\n paper_id = ids[0]\n line_id = '-'.join(ids[:2])\n split_id = '-'.join(ids[:3])\n if split_id in split_ids:\n image_path = os.path.join(image_root_dir, paper_id, line_id, image_name + '.png')\n image_path_list.append(image_path)\n label_list.append(label)\n \n createDataset(lmdb_output_path, image_path_list, label_list)","repo_name":"ayumiymk/DiG","sub_path":"tools/create_iam_lmdb.py","file_name":"create_iam_lmdb.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"16"} +{"seq_id":"44790522979","text":"\"\"\"\n给定一棵二叉树,设计一个算法,创建含有某一深度上所有节点的链表(比如,若一棵树的深度为 D,则会创建出 D 个链表)。返回一个包含所有深度的链表的数组。\n\n示例:\n\n输入:[1,2,3,4,5,null,7,8]\n\n 1\n / \\ \n 2 3\n / \\ \\ \n 4 5 7\n /\n 8\n\n输出:[[1],[2,3],[4,5,7],[8]]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/list-of-depth-lcci\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n# 2020.9.16 在一番挣扎后写了出来,需活用树和链表\nfrom typing import List\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def listOfDepth(self, tree: TreeNode) -> List[ListNode]:\n res = []\n queue = [tree]\n while queue:\n head = ListNode(None)\n start = head\n for _ in range(len(queue)):\n cur = queue.pop(0)\n head.next = ListNode(cur.val)\n head = head.next\n if cur.left:\n queue.append(cur.left)\n if cur.right:\n queue.append(cur.right)\n res.append(start.next) \n return res\n\n\n# 网上的其他解法,不太容易理解\nclass Solution1:\n def listOfDepth(self, root: TreeNode) -> List[ListNode]:\n ans = []\n def dfs(node, level):\n if not node: return None\n if len(ans) == level:\n ans.append(ListNode(node.val))\n else:\n head = ListNode(node.val)\n head.next = ans[level]\n ans[level] = head\n dfs(node.right, level + 1)\n dfs(node.left, level + 1)\n dfs(root, 0)\n return ans\n","repo_name":"ZhiyuSun/leetcode-practice","sub_path":"程序员面试金典/0403_特定深度节点链表.py","file_name":"0403_特定深度节点链表.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"72377922888","text":"import numpy as np\n#import matplotlib.pyplot as plt\n\n### SOLUTIONS\n# Composite trapezoid rule approximated the integral as 2.754548\n# Composite Simpson's rule approximated the integral as 2.738414\n\ndef f(s):\n return 1/(1 + (s**2))\n\ndef trapezoid():\n part = np.linspace(-5, 5, 50)\n #print(part)\n h = part[1] - part[0]\n n = len(part)\n sum_total = 0\n #print(n)\n \n for i in range(1, n):\n sum_total += f(part[0] + i*h)\n \n return (h/2)*(f(part[0]) + 2*sum_total + f(part[-1]))\n\ndef simpsons():\n part = np.linspace(-5, 5, 50)\n #print(part)\n h = part[1] - part[0]\n n = len(part)\n sum_total_1 = 0\n sum_total_2 = 0\n x_j_1 = []\n x_j_2 = []\n \n #for i in range(1, n):\n #x_j.append(f(part[0] + i*h))\n \n for j in range(1, int((n/2) - 1)):\n x_2j = part[0] + 2*j*h\n sum_total_1 += f(x_2j)\n \n for k in range(1, int(n/2)):\n x_2j_1 = part[0] + 2*k*h - h\n sum_total_2 += f(x_2j_1)\n \n return (h/3)*(f(part[0]) + 2*sum_total_1 + 4*sum_total_2 + f(part[-1]))\n \nprint(f'Result from composite trapezoid rule: {trapezoid()}')\nprint(f\"Result from composite Simpson's rule: {simpsons()}\")","repo_name":"LukeStuckenbruck/APPM_4600_repository","sub_path":"Homework/Homework_10/HW10_Q2_code.py","file_name":"HW10_Q2_code.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12489185808","text":"import numpy as np\n\n\ndef circular_indices(lb, ub, thresh):\n indices = []\n while True:\n stop = min(ub, thresh)\n ix = np.arange(lb, stop)\n indices.append(ix)\n if stop != ub:\n diff = ub - stop\n lb = 0\n ub = diff\n else:\n break\n\n return np.concatenate(indices)\n\n\ndef npz_to_array(npzfile):\n \"\"\"\"Get a list of numpy arrays from a npz file\"\"\"\n nitems = len(npzfile.keys())\n return [npzfile['arr_%s' % i] for i in range(nitems)]\n","repo_name":"wacabanga/wacacore","sub_path":"wacacore/util/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"404183358","text":"import argparse\nimport json\nimport subprocess\n\nfrom core_data_modules.cleaners import Codes, URNCleaner\nfrom core_data_modules.logging import Logger\nfrom engagement_database import EngagementDatabase\nfrom engagement_database.data_models import CommandLogEntry, CommandStatuses, HistoryEntryOrigin\nfrom google.cloud import firestore\nfrom id_infrastructure.firestore_uuid_table import FirestoreUuidTable\nfrom storage.google_cloud import google_cloud_utils\n\nlog = Logger(__name__)\n\nBATCH_SIZE = 500\n\n\n@firestore.transactional\ndef update_next_message_with_operator_nc(transaction, engagement_db, uuid_table, previous_message=None):\n if previous_message is None:\n query_filter = lambda q: q.order_by(\"last_updated\").order_by(\"message_id\") \\\n .where(\"channel_operator\", \"==\", Codes.NOT_CODED) \\\n .limit(1)\n else:\n query_filter = lambda q: q.order_by(\"last_updated\").order_by(\"message_id\") \\\n .start_after(previous_message.to_dict()) \\\n .where(\"channel_operator\", \"==\", Codes.NOT_CODED) \\\n .limit(1)\n\n messages = engagement_db.get_messages(query_filter, transaction)\n if len(messages) == 0:\n return None\n message = messages[0]\n\n urn = uuid_table.uuid_to_data(message.participant_uuid)\n operator = URNCleaner.clean_operator(urn)\n\n if operator == Codes.NOT_CODED:\n log.warning(f\"Message {message.message_id} still has operator {Codes.NOT_CODED}\")\n return message\n\n log.info(f\"Updating message {message.message_id} to have operator {operator}{dry_run_text}...\")\n message.channel_operator = operator\n\n if not dry_run:\n engagement_db.set_message(\n message,\n HistoryEntryOrigin(\"Update operator\", {}),\n transaction\n )\n\n return message\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Updates the channel_operator of all messages in an engagement \"\n \"database that are labelled with channel_operator 'NC', by \"\n \"redetermining the channel operator from the message urn\")\n\n parser.add_argument(\"--dry-run\", const=True, default=False, action=\"store_const\")\n parser.add_argument(\"user\", help=\"Identifier of the user launching this program\")\n parser.add_argument(\"google_cloud_credentials_file_path\", metavar=\"google-cloud-credentials-file-path\",\n help=\"Path to a Google Cloud service account credentials file to use to access the \"\n \"credentials bucket\")\n parser.add_argument(\"engagement_database_credentials_file_url\", metavar=\"engagement-database-credentials-file-url\",\n help=\"GS URL of the credentials for the Firestore project to export\")\n parser.add_argument(\"database_path\", metavar=\"database-path\",\n help=\"Path to the engagement database to export e.g. engagement_databases/test\")\n parser.add_argument(\"uuid_table_credentials_url\", metavar=\"uuid-table-credentials-url\",\n help=\"GS URL to the Firebase credentials file to use for the uuid table\")\n parser.add_argument(\"uuid_table_name\", metavar=\"uuid-table-name\",\n help=\"Name of the uuid table to use to re-identify a participant\")\n parser.add_argument(\"uuid_prefix\", metavar=\"uuid-prefix\",\n help=\"UUID prefix for the uuid_table\")\n\n args = parser.parse_args()\n\n dry_run = args.dry_run\n user = args.user\n google_cloud_credentials_file_path = args.google_cloud_credentials_file_path\n engagement_database_credentials_file_url = args.engagement_database_credentials_file_url\n database_path = args.database_path\n uuid_table_credentials_url = args.uuid_table_credentials_url\n uuid_table_name = args.uuid_table_name\n uuid_prefix = args.uuid_prefix\n\n dry_run_text = \" (dry run)\" if dry_run else \"\"\n\n commit = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).decode().strip()\n project = subprocess.check_output([\"git\", \"config\", \"--get\", \"remote.origin.url\"]).decode().strip()\n HistoryEntryOrigin.set_defaults(user, project, \"NA\", commit)\n\n log.info(\"Downloading Firestore engagement database credentials...\")\n engagement_database_credentials = json.loads(google_cloud_utils.download_blob_to_string(\n google_cloud_credentials_file_path,\n engagement_database_credentials_file_url\n ))\n engagement_db = EngagementDatabase.init_from_credentials(engagement_database_credentials, database_path)\n log.info(f\"Initialised the Engagement Database client\")\n\n log.info(\"Downloading Firestore UUID Table credentials...\")\n firestore_uuid_table_credentials = json.loads(google_cloud_utils.download_blob_to_string(\n google_cloud_credentials_file_path,\n uuid_table_credentials_url\n ))\n uuid_table = FirestoreUuidTable.init_from_credentials(\n firestore_uuid_table_credentials,\n uuid_table_name,\n uuid_prefix\n )\n log.info(\"Initialised the Firestore UUID table\")\n\n if not dry_run:\n engagement_db.set_command_log_entry(CommandLogEntry(status=CommandStatuses.STARTED))\n\n log.info(f\"Updating messages labelled with channel_operator {Codes.NOT_CODED}{dry_run_text}...\")\n total_messages = 0\n msg = update_next_message_with_operator_nc(engagement_db.transaction(), engagement_db, uuid_table)\n while msg is not None:\n total_messages += 1\n log.info(f\"Processed {total_messages} messages so far\")\n msg = update_next_message_with_operator_nc(engagement_db.transaction(), engagement_db, uuid_table, msg)\n\n if not dry_run:\n engagement_db.set_command_log_entry(CommandLogEntry(status=CommandStatuses.COMPLETED_SUCCESSFULLY))\n\n log.info(f\"Done. Processed {total_messages} messages{dry_run_text}\")\n","repo_name":"AfricasVoices/tools","sub_path":"engagement_database/update_channel_operator_nc_messages.py","file_name":"update_channel_operator_nc_messages.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74902561609","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n Load data from the csv. \n Args: \n messages_filepath: the path to the messages.csv \n categories_filepath: the path to the messages.csv \n Returns: \n merged_df (DataFrame): return merged dataframe with messages and categories\n \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = pd.merge(messages, categories, on = 'id', how ='outer')\n return df\n\n\ndef clean_data(df):\n \"\"\"\n Clean and transform the previous merged df. \n Args: \n df: The merged dataframe\n Returns: \n df (DataFrame): messages and categories(numerical) merged dataframe\n \"\"\"\n \n # create a dataframe of the 36 individual category columns\n categories = df['categories'].str.split(';', expand=True)\n \n # select the first row of the categories dataframe\n row = categories.iloc[0]\n \n # extract a list of new column names for categories.\n category_colnames = row.apply(lambda x: x.split('-')[0])\n \n # rename the columns of `categories`\n categories.columns = category_colnames\n \n # replace original values into 1 and 0\n for column in categories:\n categories[column] = categories[column].astype(str).str[-1] \n categories[column] = categories[column].astype(int)\n \n #replace 2 to 1 to make a binary column\n categories[\"related\"] = categories.related.replace({2:1})\n \n # replace the old categories column\n df.drop('categories', axis = 1, inplace = True)\n df = pd.concat([df, categories], axis=1)\n \n # drop duplicates\n df = df.drop_duplicates()\n \n return df\n\n\ndef save_data(df, database_filename):\n \"\"\"\n Save processed dataframe into sqlite database\n Args: \n df: Dataframe to be saved\n database_filename: File path of SQL Database to be saved\n Returns: \n None\n \"\"\"\n # save data into a sqlite database\n engine = create_engine('sqlite:///' + database_filename)\n df.to_sql('DisasterResponse', engine, index=False, if_exists='replace')\n\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"jdjaramillou/Data-Scientist-Nanodegree-Program","sub_path":"Project2/data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12696141052","text":"#!/usr/bin/python3\n\"\"\"\n prints my github id\n\"\"\"\n\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n site = 'https://api.github.com/user'\n uid = sys.argv[1]\n pw = sys.argv[2]\n resp = requests.get(site, auth=(uid, pw))\n acc = resp.json()\n print(acc.get('id'))\n","repo_name":"gavazcal/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/10-my_github.py","file_name":"10-my_github.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14868980387","text":"from eth_account import account\nfrom eth_utils import address\nfrom web3 import Web3\nimport json\n\n\n# deploy contract, return reciept and abi\ndef deploy(path, account, w3, *args):\n with open(f\"bin/{path}.abi\", \"r\") as abi_p:\n abi = json.loads(abi_p.read())\n with open(f\"bin/{path}.bin\", \"r\") as bytecode_p:\n bytecode = bytecode_p.read()\n \n contract = w3.eth.contract(abi=abi, bytecode=bytecode)\n # print(f\"Account {account} balance before deployment: {w3.eth.getBalance(account)}\")\n\n tx_hash = contract.constructor(*args).transact({\"from\": account})\n tx_reciept = w3.eth.get_transaction_receipt(tx_hash)\n\n # print(f\"Account {account} balance after deployment: {w3.eth.getBalance(account)}\")\n return tx_reciept, abi\n\n\n# deploy and get contract object\ndef build_contract(path, account, w3, *args):\n tx_reciept, abi = deploy(path, account, w3, *args)\n return tx_reciept, w3.eth.contract(address=tx_reciept[\"contractAddress\"], abi=abi)\n\n\n# example use of deploy.py\ndef _hello_world():\n w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545'))\n account = w3.eth.accounts[0]\n _, contract = build_contract(\"contracts/HelloWorld\", account, w3, \"Hi!\")\n \n print(contract.functions.getMessage().call())\n contract.functions.changeMessage(\"Hello!\").transact({\"from\": account})\n print(contract.functions.getMessage().call())","repo_name":"ramgos/RPSsolidity","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22416079351","text":"# labels: test_group::mlagility name::yolos_tiny_for_object_detection author::huggingface_pytorch task::Computer_Vision\n\"\"\"https://huggingface.co/hustvl/yolos-tiny\"\"\"\nfrom mlagility.parser import parse\nimport transformers\nimport torch\n\n# Parsing command-line arguments\nbatch_size, height, num_channels, width = parse(\n [\"batch_size\", \"height\", \"num_channels\", \"width\"]\n)\n\n\n# Model and input configurations\nmodel = transformers.YolosForObjectDetection.from_pretrained(\"hustvl/yolos-tiny\")\n\ninputs = {\n \"pixel_values\": torch.ones(\n [batch_size, num_channels, height, width], dtype=torch.float\n )\n}\n\n\n# Call model\nmodel(**inputs)\n","repo_name":"groq/mlagility","sub_path":"models/transformers/yolos_tiny_for_object_detection.py","file_name":"yolos_tiny_for_object_detection.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"29706399178","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom PyQt4 import QtGui, QtCore\n\nmargin = 4\nspacing = 4\n\n\nclass Box(QtGui.QBoxLayout):\n _horizontal = QtGui.QBoxLayout.LeftToRight\n _vertical = QtGui.QBoxLayout.TopToBottom\n\n def __init__(self, direction, QWidget_parent=None,\n margin=margin, spacing=spacing):\n \"\"\"\n\n :param direction: Box._horizontal \\ Box._vertical\n :param QWidget_parent: QWidget\n :param margin: поле вокруг\n :param spacing: интервал (шаг) между виджетами\n \"\"\"\n super().__init__(direction, QWidget_parent)\n self.setDirection(direction)\n self.setMargin(margin)\n self.setSpacing(spacing)\n\n\nclass ToolButton(QtGui.QToolButton):\n def __init__(self, name, parent=None):\n super().__init__()\n self.parent = parent\n self._name = name\n self.setObjectName(self.name)\n\n @property\n def name(self):\n return str(self._name)\n\n @name.setter\n def name(self, name):\n print(name)\n self._name = str(name)\n\n\nclass GameButton(ToolButton):\n def __init__(self, object_name, index):\n super().__init__(object_name)\n self.index = index\n self.setObjectName(self._name)\n\n def __repr__(self):\n return \"\"\"\n object - {};\n object_name - {};\n index - {}\n \"\"\".format(self.__class__.__name__, self._name, self.index)\n\n\nclass SettingButton(QtGui.QPushButton):\n def __init__(self, name, size):\n \"\"\"\n\n :type size_button: int\n :type size_icon: int\n :type name: str\n \"\"\"\n super().__init__()\n self.setObjectName(name)\n self.setIconSize(QtCore.QSize(size, size))\n self.setFixedSize(size + 2, size + 2)\n\n\nclass Frame(QtGui.QFrame):\n def __init__(self, name, parent):\n super().__init__(parent)\n self.setObjectName(name)\n self.setParent(parent)\n\n\nclass MenegerFrame(Frame):\n def __init__(self, name, parent=None):\n super().__init__(name, parent=None)\n\n\nclass ToolGame(Frame):\n def __init__(self, name, parent=None):\n super().__init__(name, parent=None)\n\n\nclass FaderWidget(QtGui.QWidget):\n def __init__(self, old_widget, new_widget):\n QtGui.QWidget.__init__(self, new_widget)\n\n self.old_pixmap = QtGui.QPixmap(new_widget.size())\n old_widget.render(self.old_pixmap)\n self.pixmap_opacity = 1.0\n\n self.timeline = QtCore.QTimeLine()\n self.timeline.valueChanged.connect(self.animate)\n self.timeline.finished.connect(self.close)\n self.timeline.setDuration(999)\n self.timeline.start()\n\n self.resize(new_widget.size())\n self.show()\n\n def paintEvent(self, event):\n painter = QtGui.QPainter()\n painter.begin(self)\n painter.setOpacity(self.pixmap_opacity)\n painter.drawPixmap(0, 0, self.old_pixmap)\n painter.end()\n\n def animate(self, value):\n self.pixmap_opacity = 1.0 - value\n self.repaint()\n\n\nclass StackedLayout(QtGui.QStackedLayout):\n def __init__(self, parent=None, *__args):\n super().__init__(*__args)\n\n\n def add_widget(self, QWidget):\n self.addWidget(QWidget)\n\n","repo_name":"zaswed76/cube110_v5","sub_path":"gui/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2420036988","text":"from django.conf.urls import url\nfrom . import views\nfrom shop.views import IndexView\n\n\napp_name = 'shop'\n\nurlpatterns = [\n\turl(r'^$', IndexView.as_view(), name='index'),\n\turl(r'^register/', views.RegisterCreate.as_view(), name='register'),\n\turl(r'^coffee/', views.coffee_view, name='coffee_view'),\n\turl(r'^baked/', views.baked_view, name='baked'),\n\turl(r'^contacts/', views.contacts_view, name='contacts'),\n\n]\n","repo_name":"Drew81/CoffeeShopReg","sub_path":"coffee/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8455296966","text":"# Write a program for personal contact book.\n# Menu based interface for creating, updating, searching and removing records.\n# The information need to be stored in comma separated text file.\n\nimport pickle\ncontactDict = {}\nloadedContactDict = {}\ncreateNewContactDict = {}\n\n\ndef menu():\n print(\"1. Create a new contact\")\n print(\"2. Update a contact\")\n print(\"3. Search a contact\")\n print(\"4. Remove a contact\")\n number = int(input(\"Enter a number to make a choice: \"))\n\n if number == 1:\n createContact()\n elif number == 2:\n updateContact()\n elif number == 3:\n searchContact()\n elif number == 4:\n deleteContact()\n else:\n print(\"Wrong number!\")\n\n\ndef saveFile():\n with open(\"contacts.p\", \"wb\") as f:\n pickle.dump(contactDict, f)\n\n\ndef loadFile():\n with open(\"contacts.p\", \"rb\") as f:\n loadedContactDict = pickle.load(f)\n return loadedContactDict\n\n\n# It takes me enormous free time to understand and wrote working dictionary. But I'm happy at the end. Not perfect but it works.\ndef createContact():\n with open(\"contacts.p\", \"rb\") as f:\n loadedContactDict = pickle.load(f)\n contactDict = loadedContactDict\n newName = input(\"Enter a name: \")\n print(\"Enter data for\", newName)\n\n createNewContactDict = {\n newName: {\"address\": \"\", \"birthDay\": \"\", \"phoneNumber\": \"\", \"email\": \"\", \"profession\": \"\", \"interests\": \"\"}}\n createNewContactDict[\"newName\"] = newName\n createNewContactDict[newName][\"address\"] = input(\"Enter an address: \")\n createNewContactDict[newName][\"birthDay\"] = int(input(\"Enter a birth day: \"))\n createNewContactDict[newName][\"phoneNumber\"] = int(input(\"Enter a phone number: \"))\n createNewContactDict[newName][\"email\"] = input(\"Enter an email: \")\n createNewContactDict[newName][\"profession\"] = input(\"Enter a profession: \")\n createNewContactDict[newName][\"interests\"] = input(\"Enter interests: \")\n contactDict.update(createNewContactDict)\n\n with open(\"contacts.p\", \"wb\") as f:\n pickle.dump(contactDict, f)\n\n\ndef searchContact():\n contactDict = loadFile()\n print(\"Look up name.\")\n name = input(\"Enter name: \")\n if name in contactDict:\n print(\"Information for\", name, \" is: \", contactDict[name])\n else:\n print(name, \"was not found.\")\n\n\ndef updateContact():\n with open(\"contacts.p\", \"rb\") as f:\n loadedContactDict = pickle.load(f)\n contactDict = loadedContactDict\n name = input(\"Enter a name: \")\n print(\"Update all information about\", name)\n\n createNewContactDict = {\n name: {\"address\": \"\", \"birthDay\": \"\", \"phoneNumber\": \"\", \"email\": \"\", \"profession\": \"\", \"interests\": \"\"}}\n createNewContactDict[\"name\"] = name\n createNewContactDict[name][\"address\"] = input(\"Enter an address: \")\n createNewContactDict[name][\"birthDay\"] = int(input(\"Enter a birth day: \"))\n createNewContactDict[name][\"phoneNumber\"] = int(input(\"Enter a phone number: \"))\n createNewContactDict[name][\"email\"] = input(\"Enter an email: \")\n createNewContactDict[name][\"profession\"] = input(\"Enter a profession: \")\n createNewContactDict[name][\"interests\"] = input(\"Enter interests: \")\n contactDict.update(createNewContactDict)\n\n with open(\"contacts.p\", \"wb\") as f:\n pickle.dump(contactDict, f)\n\n\ndef deleteContact():\n with open(\"contacts.p\", \"rb\") as f:\n loadedContactDict = pickle.load(f)\n contactDict = loadedContactDict\n name = input(\"Enter name to remove the contact: \")\n if name in contactDict:\n del contactDict[name]\n contactDict.update(createNewContactDict)\n with open(\"contacts.p\", \"wb\") as f:\n pickle.dump(contactDict, f)\n print(name, \"was removed\")\n else:\n print(name, \"was not found!\")\n\n\nmenu()","repo_name":"Tsaribrodsky/University","sub_path":"ProgrammingBasicsPython/homework Exercise5/ContactBook.py","file_name":"ContactBook.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14345439136","text":"# import Image module\nimport re\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport os, sys\n\n# import_dir = '../Exports/Images/wb_roads_2020-05-01_2020-07-31_2021-05-01_2021-07-31/png'\n# export_dir = '../Exports/Images/wb_roads_2020-05-01_2020-07-31_2021-05-01_2021-07-31/png_web'\n\nimport_dir = 'D:/road_upgrades/Exports/Images/wb_roads_timeline_2016-05-01_2021-07-31/png'\nexport_dir = 'D:/road_upgrades/Exports/Images/wb_roads_timeline_2016-05-01_2021-07-31/gifs'\nspecific_road_subroad_ids = []\noverwrite = True\n#specific_road_subroad_ids = [(30, 0)]\n\n\nif not os.path.isdir(export_dir):\n os.mkdir(export_dir)\n\nimages_alphabetic_order = os.listdir(import_dir)\n\nimage_dics = []\nfor f in images_alphabetic_order:\n m = re.search(r\"^(\\d+)_(\\d+)_(\\d+)_(\\d+)\", f)\n road_id = int(m.group(1))\n subroad_id = int(m.group(2))\n year = int(m.group(3))\n month = int(m.group(4))\n insert = {'road_id': road_id, 'subroad_id': subroad_id, 'year': year, 'month': month, 'filename': f}\n image_dics.append(insert)\n\nimage_dics = sorted(image_dics, key=lambda k: (k['road_id'], k['subroad_id'], k['year'], k['month']))\nimages = [x['filename'] for x in image_dics]\n\n\nimage_collections = {}\n\nfor file in images:\n m = re.search(r\"^(\\d+)_(\\d+)_\", file)\n road_id = int(m.group(1))\n subroad_id = int(m.group(2))\n tup = (road_id, subroad_id)\n if tup not in list(image_collections.keys()):\n image_collections[tup] = [file]\n else:\n image_collections[tup].append(file)\n\n\nfor tup in list(image_collections.keys()):\n if isinstance(specific_road_subroad_ids, list):\n if len(specific_road_subroad_ids) > 0 and (tup not in specific_road_subroad_ids):\n continue\n\n gif_filename = f\"{tup[0]}_{tup[1]}\"\n if os.path.isfile(f'{export_dir}/{gif_filename}.gif') and overwrite == False:\n continue\n\n # Now import the images\n img_list = []\n for i in image_collections[tup]:\n m = re.search(r\"^(\\d+)_(\\d+)_(\\d+)_(\\d+)\", i)\n road_id = int(m.group(1))\n subroad_id = int(m.group(2))\n year = int(m.group(3))\n month = int(m.group(4))\n\n fp = f'{import_dir}/{i}'[:-4]\n img = Image.open(f\"{fp}.png\")\n img_cropped = img.crop((52, 25, 1852, 845))\n\n\n # add some text\n I1 = ImageDraw.Draw(img_cropped)\n\n # Custom font style and font size\n\n font_large = ImageFont.truetype('D:/road_upgrades/Fonts/BebasNeue-Regular.ttf', 70)\n font_small = ImageFont.truetype('D:/road_upgrades/Fonts/BebasNeue-Regular.ttf', 40)\n\n # Add Text to an image\n I1.text((20, 20), f\"{year}\", font=font_large, fill=(255, 255, 255))\n I1.text((20, 760), f\"Road: {road_id} Subroad: {subroad_id}\", font=font_small, fill=(255, 255, 255))\n\n # Display edited image\n #img_cropped.show()\n # Save the edited image\n #img.save(\"car2.png\")\n\n # I downsize the image with an ANTIALIAS filter (gives the highest quality)\n #img_web = img_cropped.resize((160, 73), Image.ANTIALIAS) # 220, 100\n\n img_list.append(img_cropped)\n\n\n gif_filename = f\"{tup[0]}_{tup[1]}\"\n img_list[0].save(f'{export_dir}/{gif_filename}.gif',\n save_all=True,\n append_images=img_list[1:],\n duration=1000,\n loop=0)\n\n print(f\"Published: {gif_filename}\")","repo_name":"jeffrey-clark/road_upgrades","sub_path":"python_road_upgrades/Scripts/makeGIFs.py","file_name":"makeGIFs.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40196484487","text":"\"\"\"\nDefines the X maps for Tetrahedrons.\n\"\"\"\n\nfrom newton_generation import *\n\n\nclass LinearTetrahedron(LinearBase):\n def __init__(self):\n\n num_vertices = 4\n ndim = 3\n name = \"linear_3d\"\n namespace = \"Tetrahedron\"\n x_description = \"\"\"\nX(xi) = (1/2)[v1-v0, v2-v0, v3-v0] (xi - [-1,-1,-1]^T) + v0\n\nwhere v*-v0 form the columns of the matrix.\n\"\"\"\n LinearBase.__init__(self, num_vertices, ndim, name, namespace, x_description)\n\n def get_x(self, xi):\n v = self.vertices\n A = 0.5 * Matrix(\n [\n [v[1][0] - v[0][0], v[2][0] - v[0][0], v[3][0] - v[0][0]],\n [v[1][1] - v[0][1], v[2][1] - v[0][1], v[3][1] - v[0][1]],\n [v[1][2] - v[0][2], v[2][2] - v[0][2], v[3][2] - v[0][2]],\n ]\n )\n\n s = Matrix(\n [-1.0, -1.0, -1.0],\n )\n x = A @ (xi - s) + v[0]\n\n return x\n\n\ndef self_test():\n\n geom_x = LinearTetrahedron()\n\n vertices_ref = (\n (-1.0, -1.0, -1.0),\n (1.0, -1.0, -1.0),\n (-1.0, 1.0, -1.0),\n (-1.0, -1.0, 1.0),\n )\n geom_ref = LinearGeomEvaluate(geom_x, vertices_ref)\n\n for vx in vertices_ref:\n to_test = geom_ref.x(vx)\n correct = vx\n assert (\n np.linalg.norm(\n np.array(correct).ravel() - np.array(to_test).ravel(), np.inf\n )\n < 1.0e-15\n )\n to_test = geom_ref.f(vx, vx)\n assert np.linalg.norm(np.array(to_test).ravel(), np.inf) < 1.0e-15\n\n vertices_test = (\n (-3.0, -2.0, 2.0),\n (1.0, -1.0, 2.0),\n (2.0, 2.0, 2.5),\n (-1.0, 4.0, 4.5),\n )\n geom_test = LinearGeomEvaluate(geom_x, vertices_test)\n geom_newton = Newton(geom_x)\n\n geom_newton_evaluate = NewtonEvaluate(geom_newton, geom_test)\n\n xi_correct0 = -0.9\n xi_correct1 = 0.8\n xi_correct2 = 0.2\n xi_correct = (xi_correct0, xi_correct1, xi_correct2)\n phys = geom_test.x(xi_correct)\n residual, fv = geom_newton_evaluate.residual(xi_correct, phys)\n assert residual < 1.0e-15\n\n xi = [0.0, 0.0, 0.0]\n for stepx in range(5):\n residual, fv = geom_newton_evaluate.residual(xi, phys)\n xin = geom_newton_evaluate.step(xi, phys, fv)\n xi[0] = xin[0]\n xi[1] = xin[1]\n xi[2] = xin[2]\n\n assert abs(xi[0] - xi_correct[0]) < 1.0e-14\n assert abs(xi[1] - xi_correct[1]) < 1.0e-14\n\n vertices_test = (\n (-0.2, -0.8, -0.6),\n (-2.20179e-12, -0.8, -0.6),\n (0.00913424, -0.730923, -0.420212),\n (-0.1, -0.9, -0.54),\n )\n geom_test = LinearGeomEvaluate(geom_x, vertices_test)\n geom_newton = Newton(geom_x)\n\n geom_newton_evaluate = NewtonEvaluate(geom_newton, geom_test)\n\n for vi, vx in enumerate(vertices_ref):\n to_test = geom_test.x(vx)\n correct = vertices_test[vi]\n assert (\n np.linalg.norm(\n np.array(correct).ravel() - np.array(to_test).ravel(), np.inf\n )\n < 1.0e-15\n )\n to_test = geom_test.f(vx, correct)\n assert np.linalg.norm(np.array(to_test).ravel(), np.inf) < 1.0e-15\n\n xi_correct0 = -0.6\n xi_correct1 = -0.5\n xi_correct2 = -0.2\n\n xi_correct = (xi_correct0, xi_correct1, xi_correct2)\n phys = geom_test.x(xi_correct)\n residual, fv = geom_newton_evaluate.residual(xi_correct, phys)\n assert residual < 1.0e-15\n\n phys_nektar = (-0.0677164398704403, -0.8227307060000001, -0.5310530700000000)\n\n assert abs(phys_nektar[0] - phys[0]) < 1.0e-7\n assert abs(phys_nektar[1] - phys[1]) < 1.0e-7\n assert abs(phys_nektar[2] - phys[2]) < 1.0e-7\n\n xi = [0.0, 0.0, 0.0]\n for stepx in range(5):\n residual, fv = geom_newton_evaluate.residual(xi, phys)\n xin = geom_newton_evaluate.step(xi, phys, fv)\n xi[0] = xin[0]\n xi[1] = xin[1]\n xi[2] = xin[2]\n\n assert abs(xi[0] - xi_correct[0]) < 1.0e-14\n assert abs(xi[1] - xi_correct[1]) < 1.0e-14\n assert abs(xi[2] - xi_correct[2]) < 1.0e-14\n\n\ndef get_geom_type():\n self_test()\n return LinearTetrahedron\n\n\nif __name__ == \"__main__\":\n self_test()\n","repo_name":"ExCALIBUR-NEPTUNE/NESO","sub_path":"python/deformed_mappings/tetrahedron.py","file_name":"tetrahedron.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"41428084755","text":"import base64\nfrom io import BytesIO\nfrom einops import rearrange\nimport json\nfrom PIL import Image\nfrom pytorch_lightning import seed_everything\nimport numpy as np\nfrom sagemaker_inference.errors import BaseInferenceToolkitError\nimport sgm\nfrom sgm.inference.api import (\n ModelArchitecture,\n SamplingParams,\n SamplingPipeline,\n Sampler,\n)\nfrom sgm.inference.helpers import get_input_image_tensor, embed_watermark\nimport os\n\n\ndef model_fn(model_dir, context=None):\n # Enable the refiner by default\n disable_refiner = os.environ.get(\"SDXL_DISABLE_REFINER\", \"false\").lower() == \"true\"\n\n sgm_path = os.path.dirname(sgm.__file__)\n config_path = os.path.join(sgm_path, \"configs/inference\")\n base_pipeline = SamplingPipeline(\n ModelArchitecture.SDXL_V1_BASE, model_path=model_dir, config_path=config_path\n )\n if disable_refiner:\n print(\"Refiner model disabled by SDXL_DISABLE_REFINER environment variable\")\n refiner_pipeline = None\n else:\n refiner_pipeline = SamplingPipeline(\n ModelArchitecture.SDXL_V1_REFINER,\n model_path=model_dir,\n config_path=config_path,\n )\n\n return {\"base\": base_pipeline, \"refiner\": refiner_pipeline}\n\n\ndef input_fn(request_body, request_content_type):\n if request_content_type == \"application/json\":\n model_input = json.loads(request_body)\n if not \"text_prompts\" in model_input:\n raise BaseInferenceToolkitError(400, \"Invalid Request\", \"text_prompts missing\")\n return model_input\n else:\n raise BaseInferenceToolkitError(\n 400, \"Invalid Request\", \"Content-type must be application/json\"\n )\n\n\ndef predict_fn(data, model, context=None):\n # Only a single positive and optionally a single negative prompt are supported by this example.\n prompts = []\n negative_prompts = []\n if \"text_prompts\" in data:\n for text_prompt in data[\"text_prompts\"]:\n if \"text\" not in text_prompt:\n raise BaseInferenceToolkitError(\n 400, \"Invalid Request\", \"text missing from text_prompt\"\n )\n if \"weight\" not in text_prompt:\n text_prompt[\"weight\"] = 1.0\n if text_prompt[\"weight\"] < 0:\n negative_prompts.append(text_prompt[\"text\"])\n else:\n prompts.append(text_prompt[\"text\"])\n\n if len(prompts) != 1:\n raise BaseInferenceToolkitError(\n 400,\n \"Invalid Request\",\n \"One prompt with positive or default weight must be supplied\",\n )\n if len(negative_prompts) > 1:\n raise BaseInferenceToolkitError(\n 400, \"Invalid Request\", \"Only one negative weighted prompt can be supplied\"\n )\n\n seed = 0\n height = 1024\n width = 1024\n sampler_name = \"DPMPP2MSampler\"\n cfg_scale = 7.0\n steps = 50\n use_pipeline = model[\"refiner\"] is not None\n init_image = None\n image_strength = 0.35\n\n if \"height\" in data:\n height = data[\"height\"]\n if \"width\" in data:\n width = data[\"width\"]\n if \"sampler\" in data:\n sampler_name = data[\"sampler\"]\n if \"cfg_scale\" in data:\n cfg_scale = data[\"cfg_scale\"]\n if \"steps\" in data:\n steps = data[\"steps\"]\n if \"seed\" in data:\n seed = data[\"seed\"]\n seed_everything(seed)\n if \"use_pipeline\" in data:\n use_pipeline = data[\"use_pipeline\"]\n if \"init_image\" in data:\n if \"image_strength\" in data:\n image_strength = data[\"image_strength\"]\n try:\n init_image_bytes = BytesIO(base64.b64decode(data[\"init_image\"]))\n init_image_bytes.seek(0)\n if init_image_bytes is not None:\n init_image = get_input_image_tensor(Image.open(init_image_bytes))\n except Exception as e:\n raise BaseInferenceToolkitError(400, \"Invalid Request\", \"Unable to decode init_image\")\n\n if model[\"refiner\"] is None and use_pipeline:\n raise BaseInferenceToolkitError(400, \"Invalid Request\", \"Pipeline is not available\")\n\n try:\n if init_image is not None:\n img_height, img_width = init_image.shape[2], init_image.shape[3]\n output = model[\"base\"].image_to_image(\n params=SamplingParams(\n width=img_width,\n height=img_height,\n steps=steps,\n sampler=Sampler(sampler_name),\n scale=cfg_scale,\n img2img_strength=image_strength,\n ),\n image=init_image,\n prompt=prompts[0],\n negative_prompt=negative_prompts[0] if len(negative_prompts) > 0 else \"\",\n return_latents=use_pipeline,\n )\n else:\n output = model[\"base\"].text_to_image(\n params=SamplingParams(\n width=width,\n height=height,\n steps=steps,\n sampler=Sampler(sampler_name),\n scale=cfg_scale,\n ),\n prompt=prompts[0],\n negative_prompt=negative_prompts[0] if len(negative_prompts) > 0 else \"\",\n return_latents=use_pipeline,\n )\n\n if isinstance(output, (tuple, list)):\n samples, samples_z = output\n else:\n samples = output\n samples_z = None\n\n if use_pipeline and samples_z is not None:\n print(\"Running Refinement Stage\")\n samples = model[\"refiner\"].refiner(\n params=SamplingParams(\n steps=50, sampler=Sampler.EULER_EDM, scale=5.0, img2img_strength=0.3\n ),\n image=samples_z,\n prompt=prompts[0],\n negative_prompt=negative_prompts[0] if len(negative_prompts) > 0 else \"\",\n )\n\n samples = embed_watermark(samples)\n images = []\n for sample in samples:\n sample = 255.0 * rearrange(sample.cpu().numpy(), \"c h w -> h w c\")\n image_bytes = BytesIO()\n Image.fromarray(sample.astype(np.uint8)).save(image_bytes, format=\"PNG\")\n image_bytes.seek(0)\n images.append(image_bytes.read())\n\n return images\n\n except ValueError as e:\n raise BaseInferenceToolkitError(400, \"Invalid Request\", str(e))\n\n\ndef output_fn(prediction, accept):\n # This only returns a single image since that's all the example code supports\n if accept != \"image/png\":\n raise BaseInferenceToolkitError(400, \"Invalid Request\", \"Accept header must be image/png\")\n return prediction[0], accept\n","repo_name":"TheCodeofMonteCristo/deep-learning-containers","sub_path":"test/sagemaker_tests/pytorch/inference/resources/stabilityai/sdxl-v1/model_gpu/code/sdxl_inference.py","file_name":"sdxl_inference.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"5559880304","text":"# A class is a blueprint or template for creating objects\r\n\r\nclass Person:\r\n name = \"Rocky\"\r\n age = 22\r\n ocuupation = \"Software Engineer\"\r\n marital_status = 'Single'\r\n\r\n # *Methods are functions that are defined inside a class. \r\n\r\n # *name, age, occupation thoes are attribute and Rocky, 22, Software Engineer are values\r\n\r\n\r\n def basic_info(self):\r\n print(f'{self.name} is a {self.ocuupation}\\n')\r\n\r\n # * The self parameter is a reference to the current instance of the class, and is used to access variables that belongs to the class.\r\n\r\n\r\n\r\n def family_info(self):\r\n print(f'{newPerson.name} is a {newPerson.marital_status} person')\r\n\r\n# *newPerson object has the name attribute and the basic_info method.\r\n\r\nnewPerson = Person()\r\nnewPerson2 = Person()\r\n\r\n\r\n# Could be change\r\nnewPerson.name = 'Mr. Rocky'\r\nnewPerson.age = 22.5\r\n# print(newPerson.name, newPerson.age)\r\n\r\nnewPerson2.name = 'Abdul Ali'\r\nnewPerson2.age = 33\r\n\r\nnewPerson.basic_info()\r\nnewPerson.family_info()\r\n\r\nnewPerson2.basic_info()\r\nnewPerson2.family_info()\r\n\r\n\r\nprint('-------New Program-------')\r\n\r\nclass Person:\r\n name = \"Harry\"\r\n occupation = \"Software Developer\"\r\n networth = 10\r\n def info(self):\r\n print(f\"{self.name} is a {self.occupation}\")\r\n\r\n\r\na = Person()\r\nb = Person()\r\nc = Person()\r\n\r\na.name = \"Shubham\"\r\na.occupation = \"Accountant\"\r\n\r\nb.name = \"Nitika\"\r\nb.occupation = \"HR\"\r\n\r\n# print(a.name, a.occupation)\r\na.info()\r\nb.info()\r\nc.info()\r\n\r\n\r\nprint('-------New Program From Programiz-------')\r\n\r\n#define a class\r\nclass Bike:\r\n name =''\r\n gear = 0\r\n\r\n#create object of class\r\nbike1 = Bike()\r\n\r\n#access attributes and assign new value\r\nbike1.gear = 21\r\nbike1.name = \"R15\"\r\n\r\nprint(f'Name: {bike1.name}\\nGear: {bike1.gear}')\r\n\r\n# * Create Multiple Objects of Python Class\r\n\r\n\r\n# define a class\r\nclass Employee:\r\n # define an attribute\r\n employee_id = 0\r\n\r\n# create two objects of the Employee class\r\nemployee1 = Employee()\r\nemployee2 = Employee()\r\n\r\n# access attribute using employee1\r\nemployee1.employee_id = 2001\r\nprint(f'Employee ID: {employee1.employee_id}')\r\n\r\n# access attributes using employee2\r\nemployee2.employeeID = 1002\r\nprint(f\"Employee ID: {employee2.employeeID}\")\r\n\r\n","repo_name":"rockyhaque/python-oop","sub_path":"class_object.py","file_name":"class_object.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30538171302","text":"import os\nimport pdb\nimport numpy as np\nimport torchvision\nimport torch\nimport torchvision.transforms as transforms\nfrom sklearn.model_selection import train_test_split\nfrom dl4d.images import ImageDataset\n\n\nclass Cifar10(ImageDataset):\n base_folder = 'cifar10'\n seed = 1337\n val_size = 1000\n\n def __init__(self, root, part='train', task='classification',\n features=False,\n val_size=None,\n test_size=None,\n transform=None, target_transform=None, download=True,\n normalize=False, standardize=False,\n scale_overall=True, scale_channelwise=True):\n\n self.root = root\n if download:\n self.download()\n\n super(Cifar10, self).__init__(root, transform=transform,\n target_transform=target_transform)\n\n self.x, self.y = self.load_dataset(part=part)\n\n def __len__(self):\n return len(self.x)\n\n def download(self):\n final_path = os.path.join(self.root, self.base_folder)\n if not os.path.exists(final_path):\n os.mkdir(final_path)\n else:\n return\n\n np.random.seed(self.seed)\n\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR10(root=final_path,\n train=True,\n download=True,\n transform=transform)\n testset = torchvision.datasets.CIFAR10(root=final_path,\n train=False,\n download=True,\n transform=transform)\n\n X_train = trainset.data.swapaxes(2, 3).swapaxes(1, 2)\n Y_train = trainset.targets\n X_test = testset.data.swapaxes(2, 3).swapaxes(1, 2)\n Y_test = testset.targets\n\n X_test, X_val, Y_test, Y_val = train_test_split(X_test, Y_test,\n test_size=self.val_size,\n random_state=self.seed,\n stratify=Y_test)\n\n np.save(file=os.path.join(final_path, 'X_train.npy'), arr=X_train)\n np.save(file=os.path.join(final_path, 'X_test.npy'), arr=X_test)\n np.save(file=os.path.join(final_path, 'X_val.npy'), arr=X_val)\n np.save(file=os.path.join(final_path, 'Y_train.npy'), arr=Y_train)\n np.save(file=os.path.join(final_path, 'Y_test.npy'), arr=Y_test)\n np.save(file=os.path.join(final_path, 'Y_val.npy'), arr=Y_val)\n\n os.system('rm {}/cifar-10-python.tar.gz; rm -rf {}/cifar-10-batches-py'.format(final_path, final_path))","repo_name":"Goschjann/ssltsc","sub_path":"dl4d/datasets/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"72623416648","text":"from __future__ import annotations\n\nfrom datetime import datetime\nfrom logging import getLogger\nfrom urllib.parse import urlparse\n\nimport tldextract\nfrom aiohttp.web import RouteTableDef\nfrom aiohttp.web_exceptions import (\n HTTPBadRequest,\n HTTPNotFound,\n HTTPFound,\n HTTPInternalServerError,\n HTTPException,\n)\nfrom aiohttp.web_request import Request\nfrom aiohttp.web_response import Response\nfrom aiohttp_session import get_session\nfrom authcaptureproxy import AuthCaptureProxy\nfrom bson import ObjectId\nfrom bson.errors import InvalidId\nfrom httpx import Cookies\nfrom motor.motor_asyncio import AsyncIOMotorCollection\nfrom yarl import URL\n\nfrom .proxy import create_proxy\nfrom ..helpers import truthy_string, LimitedSizeDict, IS_DEBUG\nfrom ..types import Flow, Session\n\nroutes = RouteTableDef()\n\nlogger = getLogger(__name__)\n\n\n@routes.get(\"/flows/{flow_id}\")\nasync def initialize_flow(request: Request) -> Response:\n aiohttp_session = await get_session(request)\n\n flow_id: str = request.match_info[\"flow_id\"] # hex\n\n try:\n object_id = ObjectId(flow_id) # raises if invalid\n except InvalidId as e:\n raise HTTPBadRequest(reason=\"Unparseable Flow ID\") from e\n\n flow: Flow = await request.app[\"db\"][\"flows\"].find_one({\"_id\": object_id})\n\n if flow is None:\n raise HTTPNotFound(reason=\"Flow ID not found\")\n\n redirect_uri: str | None = truthy_string(request.query.get(\"redirect_uri\", \"\"))\n\n final_redirect_uri: str | None = None\n if redirect_uri is not None:\n try:\n parsed = urlparse(redirect_uri.lower().strip())\n except ValueError as e:\n raise HTTPBadRequest(reason=\"Unparseable redirect URI\") from e\n\n if not URL(redirect_uri).is_absolute():\n raise HTTPBadRequest(reason=\"Redirect URI must be absolute\")\n elif parsed.scheme not in (\"http\", \"https\"):\n raise HTTPBadRequest(reason=\"Redirect URI must be HTTP or HTTPS\")\n elif parsed.netloc not in flow[\"redirect_uri_domains\"]:\n # ok, so our redirect URI is not directly on the list\n # we need to check if any wildcard is matched\n\n redirect_uri_result = tldextract.extract(parsed.netloc)\n allowed: bool = False\n\n if \"*\" in flow[\"redirect_uri_domains\"]:\n # allow global wildcard\n allowed = True\n else:\n for domain in flow[\"redirect_uri_domains\"]:\n try:\n domain_result = tldextract.extract(domain)\n if (\n redirect_uri_result.registered_domain\n == domain_result.registered_domain\n and (\n redirect_uri_result.subdomain.endswith(\n domain_result.subdomain\n )\n or domain_result.subdomain\n == \"*\" # allow wildcard like *.twitter.com\n )\n ):\n allowed = True\n except ValueError:\n continue\n\n if not allowed:\n raise HTTPBadRequest(reason=\"Redirect URI not allowed per flow rules\")\n else:\n final_redirect_uri = parsed.geturl()\n else:\n # redirect URI is good as is.\n final_redirect_uri = parsed.geturl()\n # redirect_uri is not declared\n elif \"redirect_code\" in flow and flow[\"redirect_code\"]:\n # redirect code is set, so we do not need to follow the redirect URI.\n pass # lets the final_redirect_uri be none\n elif not IS_DEBUG:\n raise HTTPBadRequest(reason=\"Missing redirect URI\")\n\n new_session = await request.app[\"db\"][\"sessions\"].insert_one(\n {\n \"flow_id\": flow[\"_id\"],\n \"state\": \"pending\",\n \"auth_data\": None,\n \"redirect_uri\": final_redirect_uri,\n \"ip_address\": request.remote,\n \"created_at\": datetime.utcnow(),\n }\n )\n\n new_session_id = new_session.inserted_id\n\n path = f\"/flows/{flow_id}/session/{new_session_id}/auth\"\n\n return HTTPFound(location=path) # auth time\n\n\nasync def handle_auth(request: Request) -> Response:\n # this code handles out of domain redirects\n cdn_scheme: str | None = request.match_info.get(\"scheme\")\n cdn_domain: str | None = request.match_info.get(\"domain\")\n cdn_index_url: URL | None = (\n URL.build(scheme=cdn_scheme, host=cdn_domain)\n if cdn_scheme is not None and cdn_domain is not None\n else None\n )\n\n aiohttp_session = await get_session(request)\n\n flow_id: str | None = request.match_info.get(\"flow_id\") # hex\n\n if flow_id is None:\n if \"flow_id\" in aiohttp_session:\n flow_id = aiohttp_session[\"flow_id\"]\n else:\n raise HTTPBadRequest(reason=\"Missing flow ID\")\n\n try:\n object_id = ObjectId(flow_id) # raises if invalid\n except InvalidId as e:\n raise HTTPBadRequest(reason=\"Unparseable Flow ID\") from e\n\n flow: Flow | None = await request.app[\"db\"][\"flows\"].find_one({\"_id\": object_id})\n\n if flow is None:\n raise HTTPNotFound(reason=\"Flow ID not found\")\n else:\n if aiohttp_session.get(\"flow_id\") != flow_id:\n aiohttp_session[\"flow_id\"] = flow_id\n\n session_id: str | None = request.match_info.get(\"session_id\") # hex\n\n if session_id is None:\n if \"session_id\" in aiohttp_session:\n session_id = aiohttp_session[\"session_id\"]\n else:\n raise HTTPBadRequest(reason=\"Missing session ID\")\n\n try:\n object_id = ObjectId(session_id) # raises if invalid\n except InvalidId as e:\n raise HTTPBadRequest(reason=\"Unparseable Session ID\") from e\n\n session: Session | None = await request.app[\"db\"][\"sessions\"].find_one(\n {\"_id\": object_id}\n )\n\n if session is None:\n raise HTTPNotFound(reason=\"Session ID not found\")\n else:\n if aiohttp_session.get(\"session_id\") != session_id:\n aiohttp_session[\"session_id\"] = session_id\n\n if session[\"flow_id\"] != flow[\"_id\"]:\n raise HTTPBadRequest(reason=\"Session ID does not match Flow ID\")\n elif session[\"state\"] != \"pending\":\n raise HTTPBadRequest(reason=\"Session already completed\")\n\n # get AuthCaptureProxy\n proxies: LimitedSizeDict = request.app[\"auth_capture_proxies\"]\n\n cookies: Cookies | None = None\n\n if session_id not in proxies:\n cookies = Cookies()\n # this is here because we may need to reuse the session of the proxy to preserve cookies\n session_collection: AsyncIOMotorCollection = request.app[\"db\"][\"sessions\"]\n # this is done to prevent the request from being a closure variable in the proxy_factory function\n # which may take a lot of memory\n initial_base_url: URL = request.url\n\n new_proxy = create_proxy(\n flow,\n session,\n initial_base_url,\n session_collection,\n cookie_jar=cookies,\n )\n\n new_proxy.__initial_base_url = initial_base_url\n\n proxies[session_id] = new_proxy\n\n proxy: AuthCaptureProxy = proxies[session_id]\n\n proxy_url: URL | None = None\n target_url: URL | None = None\n\n if cdn_index_url is not None:\n initial_proxy_url = proxy._proxy_url\n\n proxy_url = initial_proxy_url.with_path(\n f\"{initial_proxy_url.path.removesuffix('auth')}cdn/{cdn_scheme}/{cdn_domain}\"\n )\n target_url = cdn_index_url\n\n try:\n handler_kwargs = {}\n\n if proxy_url is not None:\n handler_kwargs[\"access_url\"] = proxy_url\n\n if target_url is not None:\n handler_kwargs[\"host_url\"] = target_url\n\n return await proxy.all_handler(request, **handler_kwargs)\n except Exception as e:\n logger.exception(\"Error in proxy handler!\")\n raise HTTPInternalServerError(reason=\"Failed to pass login data back!\") from e\n\n\nroutes.view(\"/flows/{flow_id}/session/{session_id}/auth\")(handle_auth)\nroutes.view(\"/flows/{flow_id}/session/{session_id}/cdn/{scheme}/{domain}/{tail:.*}\")(\n handle_auth\n) # dealing with redirects out\nroutes.view(\"/flows/{flow_id}/session/{session_id}/auth/{tail:.*}\")(\n handle_auth\n) # other routes go through proxy\n\n\n@routes.view(\"/{tail:.*}\")\nasync def handle_root(request: Request) -> Response:\n \"\"\"\n handle bad routes\n :param request:\n :return:\n \"\"\"\n aiohttp_session = await get_session(request)\n\n if \"flow_id\" in aiohttp_session and \"session_id\" in aiohttp_session:\n try:\n return await handle_auth(request)\n except HTTPException as e:\n raise HTTPInternalServerError(reason=\"Failed to handle catch-all!\") from e\n else:\n raise HTTPNotFound()\n\n\n__all__ = (\"routes\",)\n","repo_name":"nefarium/nefarium-classic","sub_path":"src/nefarium/server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8936,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"73971874888","text":"from os import path\n\nimport pandas as pd\nimport numpy as np\n\nfrom Sources.Preprocessing import remove_edges_from_graph_for_link_prediction\nfrom Sources.Preprocessing import select_emb_save_path\n\n\ndef get_k_fold_data(run_node2vec_emb,data, graph_with_no_added_qualified_edges,\n use_saved_emb_file=None,\n add_qualified_edges=None,\n dataset=None, use_weighted_edges=None,\n normalized_weighted_edges=None,\n edges_percent=None,\n edges_number=None,\n added_edges_percent_of=None,\n use_shared_gene_edges=None,\n use_shared_phenotype_edges=None,\n use_shared_gene_and_phenotype_edges=None,\n use_shared_gene_but_not_phenotype_edges=None,\n use_shared_phenotype_but_not_gene_edges=None,\n use_shared_gene_or_phenotype_edges=None,\n use_gene_disease_graph=None,\n use_phenotype_gene_disease_graph=None,\n graph_edges_type=None,\n task=None,\n enforce_end2end=None,\n cross_validation=None,\n k_fold=None,\n split=None,\n get_data_with_emb_as_feat=None,\n split_by_node = None):\n raise NotImplementedError()\n\n reset_graph = graph_with_no_added_qualified_edges\n edges_with_features_dict = {}\n\n splitted_edges_dir = f'C:\\\\Users\\\\Anak\\\\PycharmProjects\\\\recreate_gene_disease\\\\Data\\\\processed\\\\LinkPrediction\\\\GeneDiseaseProject\\\\copd\\\\PhenotypeGeneDisease\\\\PGDP\\\\Node2Vec\\\\UnweightedEdges\\\\NoAddedEdges\\\\'\n\n if split_by_node:\n splitted_edges_dir += f'SplitByNode\\\\KFold={k_fold}\\\\'\n else:\n splitted_edges_dir += f'SplitByEdge\\\\KFold={k_fold}\\\\'\n\n # for i, (train_set_np, test_set_np) in enumerate(\n # data.split_cross_validation(data, k_fold, stratify=True, task=task,\n # # reset_cross_validation_split=True,\n # splitted_edges_dir=splitted_edges_dir\n # )):\n for i, (train_test_dict) in enumerate(\n data.split_cross_validation(data, k_fold, stratify=True, task=task,\n split_by_node=split_by_node,\n # reset_cross_validation_split=True,\n splitted_edges_dir=splitted_edges_dir\n )):\n if i == 10:\n print('run node2vec without disease nodes of the highest degree')\n exit()\n train_set_np = train_test_dict['train_set'] # is it np or df?\n test_set_np = train_test_dict['test_set']\n embedding_model_file_path = select_emb_save_path(\n save_path_base='data',\n emb_type='node2vec',\n add_qualified_edges=add_qualified_edges,\n dataset=dataset,\n use_weighted_edges=use_weighted_edges,\n edges_percent=edges_percent,\n edges_number=edges_number,\n added_edges_percent_of=added_edges_percent_of,\n use_shared_phenotype_edges=use_shared_phenotype_edges,\n use_shared_gene_edges=use_shared_gene_edges,\n use_shared_gene_and_phenotype_edges=use_shared_gene_and_phenotype_edges,\n use_shared_gene_but_not_phenotype_edges=use_shared_gene_but_not_phenotype_edges,\n use_shared_phenotype_but_not_gene_edges=use_shared_phenotype_but_not_gene_edges,\n use_shared_gene_or_phenotype_edges=use_shared_gene_or_phenotype_edges,\n use_gene_disease_graph=use_gene_disease_graph,\n use_phenotype_gene_disease_graph=use_phenotype_gene_disease_graph,\n graph_edges_type=graph_edges_type,\n task=task,\n split=split,\n k_fold=k_fold,\n k_fold_ind=i,\n split_by_node=split_by_node\n )\n\n if not path.exists(\n embedding_model_file_path):\n graph_with_no_added_qualified_edges = reset_graph.copy()\n # print(len( reset_graph.edges ))\n\n graph_with_no_added_qualified_edges_with_removed_test_edges = remove_edges_from_graph_for_link_prediction(\n data, graph_with_no_added_qualified_edges, train_set_np,\n test_set_np)\n\n run_node2vec_emb(data=data,\n G=graph_with_no_added_qualified_edges_with_removed_test_edges,\n embedding_model_file_path=embedding_model_file_path,\n # enforce_end2end=enforce_end2end,\n edges_percent=edges_percent,\n edges_number=edges_number,\n use_weighted_edges=use_weighted_edges,\n add_qualified_edges=add_qualified_edges,\n added_edges_percent_of=added_edges_percent_of,\n enforce_end2end=enforce_end2end\n )\n\n node_with_features = get_data_with_emb_as_feat(data,\n # use_saved_emb_file,\n add_qualified_edges,\n dataset,\n use_weighted_edges,\n normalized_weighted_edges,\n edges_percent,\n edges_number,\n added_edges_percent_of,\n use_shared_gene_edges,\n use_shared_phenotype_edges,\n use_shared_gene_and_phenotype_edges,\n use_shared_gene_but_not_phenotype_edges,\n use_shared_phenotype_but_not_gene_edges,\n use_gene_disease_graph,\n use_phenotype_gene_disease_graph,\n graph_edges_type,\n task,\n split,\n k_fold,\n split_by_node,\n k_fold_ind=i,\n # need it to choose file name\n )\n\n edges_with_features = []\n edges_label = []\n # BUG: there are duplicate value in edes_label? How come?\n ## even if there are duplicate index name, embedding value is unique. What is the reason\n for node1, node2, _ in np.concatenate([train_set_np, test_set_np],\n axis=0).tolist():\n edges_instance_with_features = np.concatenate(\n [node_with_features.loc[node1],\n node_with_features.loc[node2]]).tolist()\n edges_instance_label = f'{node1}_{node2}'\n edges_with_features.append(edges_instance_with_features)\n edges_label.append(edges_instance_label)\n\n edges_with_features_df = pd.DataFrame(edges_with_features,\n index=edges_label)\n edges_with_features_dict[i] = edges_with_features_df\n\n print()\n return edges_with_features_dict\n\n","repo_name":"Anak2016/recreate_gene_disease","sub_path":"Sources/Preparation/Features/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9050760551","text":"import os\r\nimport sys\r\n\r\n\r\ntrain_filename1 = sys.argv[1:][0]\r\ntrain_filename2 = sys.argv[1:][1]\r\n\r\n\r\nif train_filename1 ==\"./LOF_GOF/goflof_HGMD2019_vepout.csv\":\r\n out_filename = \"./LOF_GOF/goflof_HGMD2019_with_gnomAD.csv\"\r\n\r\nout_fp = open(out_filename, \"w\")\r\n\r\n\r\nfp = open(train_filename2, \"r\")\r\nhead = next(fp)\r\nhead__arr = head.strip().split(\",\")\r\nhead__mapping = {el: i for i, el in enumerate(head__arr)}\r\nnew_head =[]\r\nfor i, el in enumerate(head__arr):\r\n if i == head__mapping[\"phyloP100way_vertebrate\"]:\r\n continue\r\n new_head.append(el)\r\nl = \",\".join(new_head)\r\nout_fp.write(l)\r\nout_fp.write(\"\\n\")\r\nfor line in fp:\r\n line__arr = line.strip().split(\",\")\r\n new_line = []\r\n for i, el in enumerate(line__arr):\r\n if i == head__mapping[\"phyloP100way_vertebrate\"]:\r\n continue\r\n new_line.append(el)\r\n l = \",\".join(new_line)\r\n out_fp.write(l)\r\n out_fp.write(\"\\n\")\r\n\r\n\r\nfp = open(train_filename1, \"r\")\r\nhead = next(fp)\r\nhead__arr = head.strip().split(\",\")\r\nhead__mapping = {el: i for i, el in enumerate(head__arr)}\r\nfor line in fp:\r\n line__arr = line.strip().split(\",\")\r\n new_line = []\r\n for i, el in enumerate(line__arr):\r\n if i == head__mapping[\"id\"] or i == head__mapping[\"LOFGOF\"]:\r\n continue\r\n new_line.append(el)\r\n l = \",\".join(new_line)\r\n out_fp.write(l)\r\n out_fp.write(\"\\n\")\r\n","repo_name":"huangwenkui/UPPER","sub_path":"preprocess_snpdata/preprocess_LOFGOF_data/Build_LOFGOF_data.py","file_name":"Build_LOFGOF_data.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10176068347","text":"\"\"\"A set of examples with which to test the sweepy functionality. These tests serve a) as partial documentation, so that\nuse of the module should be clear from examining the examples, and b) as a testing suit for future updates.\n\n\"\"\"\n\nimport sweepy\nimport math\nfrom numpy import random\nimport numpy as np\n\ndef almost_square( x, sig ):\n\t\"\"\"\n\tA function which takes inputs x and sig and retruns x^(2+E) where E is a small\n\tgaussian error term generatesd randomly from random.normal(0,sig)\n\n\t\"\"\"\n\t\n\treturn x**( 2 + random.normal( 0, sig ) )\n\nclass survival_of_the_largest():\n\t\"\"\"\n\tClass defining a model which evolves integers. An integer's fitness is defined\n\tas how large it is.\n\t\n\t\"\"\"\n\n\tdef __init__( self, pop_size, mu, gens ):\n\t\t\"\"\"\n\t\tinputs:\n\t\t-------\n\t\tpop_size : int\n\t\t\tnumber of integer individuals to have in the population.\n\t\tmu : 0 < float < 1\n\t\t\tmutation rate, with probability mu an individual turns into another random number\n\t\tgens : int\n\t\t\tnumber of generations to select for\n\n\t\t\"\"\"\n\t\tself.pop_size = int( pop_size )\n\t\tself.mu = mu\n\t\tself.gens = gens\n\t\tself.population = random.choice( list( range( 100 ) ) , self.pop_size )\n\n\tdef mutate( self, individual ):\n\t\t\"\"\"\n\t\tRandomly mutates an individual with probability mu\n\n\t\t\"\"\"\n\t\tif random.random() < self.mu :\n\t\t\treturn random.randint(100)\n\t\telse:\n\t\t\treturn individual\n\n\tdef go(self):\n\t\t\"\"\"\n\t\tSet the simulation running\n\n\t\t\"\"\"\n\n\t\t##Define fitness as the value of the individual, normalised\n\t\t##to sum to one.\n\t\tfor gen in range(self.gens):\n\t\t\t\n\t\t\ttotal = float( sum( self.population ) )\n\t\t\tfitness = [ i/total for i in self.population ]\n\n\t\t\t#choose the individuals who will reproduce\n\t\t\tnew_pop = random.choice( self.population, p = fitness, replace = True, size = self.pop_size )\n\n\t\t\tself.population = list( map( self.mutate, new_pop ) )\n\n\t\t##The fittest indivdual\n\t\tself.max = max( self.population )\n\n\t\t##The least fit individual\n\t\tself.min = min( self.population )\n\n\t\t##The mean fitness\n\t\tself.mean_fitness = np.mean( self.population )\n\n\t\t##The standard deviation of fitness\n\t\tself.std_fitness = np.std( self.population )\n\n\nif __name__ == \"__main__\":\n\n\t#Run all the examples\n\n\t##Lets run almost_square with a fixed value of sigma = 0.2 and sweep over x from 0 to 2 in steps of 100, taking the mean of 5 runs\n\tsweepy.sweep_func( almost_square, [ ['x', 0, 2, 100] ], fixed_params = {'sig': 0.2}, reps = 5, output_directory = 'example_almost_square', ensure_dir = True )\n\n\t##Let's run almost square, but this time sweeping over sig as well. Have x run from 0.2 to 1.2 in steps of 1000 and sigma\n\t#from 0.05 to .5 in steps of 100, and this time don't repeat and give the output parameter a more descriptive name, so that\n\t##sweepy can name the subdirectories.\n\tsweepy.sweep_func( almost_square, [ [ 'x', 0.2, 1.2, 1000 ], [ 'sig', 0.05, 0.5, 100 ] ],\\\n\t output_directory = 'example_almost_square2', ensure_dir = True, output_names = [ 'x_to_two_ish' ] )\n\n\t##Run the survival of the largest module, and sweep over mutation rate and population size, and observe both mean fitness and std of fitness\n\tsweepy.sweep_class( survival_of_the_largest, [ [ 'mu', 0, 0.5, 10 ], [ 'pop_size', 10 , 50, 5 ] ], reps = 5,\\\n\t output_variables = [ 'mean_fitness', 'std_fitness' ], fixed_params = {'gens':100}, output_directory = 'example_SOTL', ensure_dir = True )\n","repo_name":"simontudge/sweepy","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"9915302340","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt # Erzeugt eine Instanz von Pyplot\r\nimport numpy as np # Erzeugt eine Instanz von Numpy\n\n# Definition einer Potentialwandlandschaft - Tiefe V_0, (eV) Position x_0 (nm) und Breite L (nm) - Delta_X ist der Bereich indem gerechnet wird (nm), N = Anzahl der Schritte in x\r\n\r\ndef Potential_Landschaft(V0,X0,L,Delta_X,N):\r\n\tWerte_Potential = []\r\n\tX_Achse = []\r\n\th = Delta_X / N # Auflösung in nm /schritt\r\n\tprint(\"Auflösung in nm\")\r\n\tprint(h)\r\n\tSchritte_bis_X0 = round(X0/h)#Ganze Zahl für Schleife\r\n\tprint(\"Schritte bis X0\")\r\n\tprint(Schritte_bis_X0)\r\n\tSchritte_ohne_Potential = round(L/h)#Ganze Zahl für Schleife\r\n\tprint(\"Schritte zwischen den Wänden\")\r\n\tprint(Schritte_ohne_Potential)\r\n\tSchritte_nach_Potential = N-Schritte_bis_X0 - Schritte_ohne_Potential # Brauchen wir nicht\r\n\t\r\n\tfor i in range(N):\r\n\t\tX_Achse.append(i*h)#Erzeugung der x-Achse in nm\r\n\t\t\r\n\tfor i in range(Schritte_bis_X0):#Bis X0 gibt es eine Potentialwand\r\n\t\tWerte_Potential.append(V0)\r\n\tfor i in range(Schritte_bis_X0,(Schritte_bis_X0 + Schritte_ohne_Potential)):#Der Bereich ab X0 im Bereich L ist potentialfrei\r\n\t\tWerte_Potential.append(0)\r\n\tfor i in range((Schritte_bis_X0 + Schritte_ohne_Potential),N):#Danach gibt es die rechte Potentialwand\r\n\t\tWerte_Potential.append(V0)\r\n\t\r\n\treturn Werte_Potential, X_Achse#Rückgabe des Potentials (eV) und der X-Achse (nm)\r\n\r\n# jetzt müssen wir noch die richtige K_List berechnen - dass ist der Omega^2 Vorfaktor und er hängt von der Energie des Teilchens ab\r\n\r\ndef K_list_gen(Potential,Energie,N):\r\n\tK_List = []\r\n\tFaktor = 26.27#Umrechnungsfaktor für eV und nm\r\n\tfor i in range(N):\r\n\t\tK_List.append(Faktor * (Energie-Potential[i]))#Die Energie des Teilchens ist fest aber die Potential-Landschaft ändert sich mit der Position in X\r\n\treturn K_List # Rückgabe des ortsabhängigen K\n\ndef Wavefunction(K,U0,U1,Delta_X,N):#Berechnung der Wellenfunktion mit ortsabhängigem K\r\n\tWerte_U = []\r\n\tWerte_U.append(U0) # Trägt den Start von U in den Listenplatz \"Null\" ein\r\n\tWerte_U.append(U1) # Wir benötign den Wert x-h und x um x+h abzuschätzen\r\n\th = Delta_X / N\r\n\t# print(\"Schrittweite\")\r\n\t# print(h)\r\n\tfor i in range(2, N):\r\n\t\tWerte_U.append(((2 * Werte_U[i - 1] * (1 - 5 / 12 * h * h * K[i-1])) - Werte_U[i - 2] * (\r\n\t\t\t\t1 + h * h * K[i-2] / 12)) / (1 + 1 / 12 * h * h * K[i])) # Numerov mit K(x) vergleiche mit Numerov oben\r\n\t\t\r\n\treturn Werte_U\n\ndef Berechnung_der_Normierten_Wellenfunktion(Eigenwert,U0,U1,Potential,Delta_X,N):\r\n\tNorm_Wave =[]\r\n\tfrequency = K_list_gen(Potential,Eigenwert,N)\r\n\tEigen_Wave = Wavefunction(frequency,U0,U1,Delta_X,N)\r\n\t# Wir haben jetzt eine nicht-normierte Wellenfunktion zum Energie-Eigenwert ausgerechnet\r\n\t# Wir müssen jetzt die Normierung ausrechnen\r\n\tSumme = 0\r\n\t\r\n\tfor i in range(N):#Summe = Integral\r\n\t\tSumme = Eigen_Wave[i]**2+Summe\r\n\tprint(\"Normierung\",Summe)\r\n\tfor i in range(N):\r\n\t\tNorm_Wave.append((Eigen_Wave[i]/np.sqrt(Summe))) #Normierung der Werte\r\n\t\t\r\n\treturn Norm_Wave\n\n\nNum_0 = 0\r\nNum_1 = 0.000001\r\nRange = 15 # in x von 0 bis 15 nm\r\nPosition = 2.5 # Position linke Wand\r\nLength = 10 # Ohne Potential\r\nSteps = 1000 # Schritte\r\nPotential_Wert = 1 # Potential in eV\r\n\r\n# 1 eV (compare to 0.00375637 (inf) / 0.0034764667 (n=1) / 0.0139058668 n=2 (nom.) 0.013904041\n\nPot, X =Potential_Landschaft(Potential_Wert,Position,Length,Range,Steps)\n\nWave_1 = Berechnung_der_Normierten_Wellenfunktion(0.0034764667,Num_0,Num_1,Pot,Range,Steps)\nWave_2 = Berechnung_der_Normierten_Wellenfunktion(0.013904041005392142,Num_0,Num_1,Pot,Range,Steps)\r\nWave_3 = Berechnung_der_Normierten_Wellenfunktion(0.03127716244972,Num_0,Num_1,Pot,Range,Steps)\n\nfig, ax1 = plt.subplots()\r\n\r\ncolor = \"tab:red\"\r\nax1.set_xlabel(\"X-Axis (nm)\")\r\nax1.set_ylabel(\"Potential (eV)\")\r\nax1.plot(X, Pot, color = color)\r\nax1.tick_params(axis='y', labelcolor = color)\r\n\r\n\r\nax2 = ax1.twinx()\r\n\r\ncolor = \"tab:blue\"\r\nax2.set_ylabel(\"Psi\", color = color)\r\n#ax2.plot(X, Wave, color = color)\r\nax2.plot(X, Wave_1, color = color)\r\n#ax2.plot(X, Wave_2, color=color)\r\n#ax2.plot(X, Wave_3, color = color)\r\nax2.tick_params(axis='y', labelcolor =color)\r\n\r\nfig.tight_layout()\r\n\r\n#plt.xlim(1,14) #Achsen einschränken\r\n#plt.ylim(-50,50)\r\n\r\nplt.show()","repo_name":"RasmusRaschke/Informatik_Nanos","sub_path":"code_vorlesung/Energie_Values_K_list.py","file_name":"Energie_Values_K_list.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1515718352","text":"from django.urls import path\n# from django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name= \"adoptionManagement\"\nurlpatterns = [\n path(\"\", views.displayDog, name=\"home\"),\n path(\"upload/\", views.uploadDog, name=\"uploadDog\"),\n path(\"updateDog//\", views.updateDog, name=\"updateDog\"),\n path(\"deleteDog//\", views.deleteDog, name=\"deleteDog\"),\n path(\"queue/\", views.registerDog, name=\"registerDog\"),\n path(\"registrations/\", views.displayRegistration, name=\"displayRegistration\"),\n path(\"acceptRegistration//\", views.acceptRegistration, name=\"acceptRegistration\"),\n path(\"requestDog//\", views.requestDog, name=\"requestDog\"),\n path(\"cancelRequest//\", views.cancelRequest, name=\"cancelRequest\"),\n path(\"displayRequest/\", views.displayRequest, name=\"displayRequest\"),\n path(\"clientRequest/\", views.clientRequest, name=\"clientRequest\"),\n path(\"acceptRequest//\", views.acceptRequest, name=\"acceptRequest\"),\n path(\"declineRequest//\", views.declineRequest, name=\"declineRequest\"),\n path(\"search/\", views.searchResult, name = \"searchResult\"),\n \n \n\n]","repo_name":"S-Sahanii0/Petmedia","sub_path":"adoptionManagement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28796751471","text":"from __future__ import print_function\nfrom datetime import datetime\nfrom dateutil import tz\nfrom datetime import timedelta\nimport dateutil.parser\nest = tz.gettz('America/Detroit')\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nimport time as t\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\n\ndef sayEvents(speechEngine):\n \"\"\"Shows basic usage of the Google Calendar API.\n Prints the start and name of the next 10 events on the user's calendar.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n #today = datetime.utcnow().date()\n #start = datetime(today.year, today.month, today.day, tzinfo=tz.tzutc()).astimezone(est)\n #end = start + timedelta(1)\n #print(start.isoformat())\n #print(end.isoformat())\n today = datetime.now().date()\n dateString = today.strftime(\"%Y-%m-%d\")\n events_result = service.events().list(calendarId='primary', timeMin=dateString+'T00:00:00-00:00',\n maxResults=10, timeMax= dateString+'T23:59:00-00:00',singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n sayString = 'Today you have the following events: '\n sayList = []\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #speechEngine.say(str(start)+str(event['summary']))\n sayList.append(str(event['summary']))\n print(start, event['summary'])\n speechEngine.say(sayString)\n t.sleep(0.5)\n speechEngine.say(sayList[0])\n t.sleep(1.0)\n for event in sayList[1:]:\n speechEngine.say('and ' + event)\n t.sleep(1.0)\n","repo_name":"ankithu/HomeworkManagementServer","sub_path":"RaspberryPiScripts/googleCalenderEngine.py","file_name":"googleCalenderEngine.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37891646854","text":"import cv2\nimport os\nimport sys\nimport time\n\nif __name__ == \"__main__\" :\n\n cap = cv2.VideoCapture(\"introduce.mp4\")\n if not cap.isOpened() :\n sys.exit()\n\n cap_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n cap_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n fourcc = cv2.VideoWriter_fourcc('m','p','4','v')\n writer = cv2.VideoWriter('detect_face.mp4',fourcc, fps, (cap_width, cap_height))\n\n # cascade_base_path = \"haarcascades/\"\n # #準備 カスケードを取得\n # face_cascade = cv2.CascadeClassifier(os.path.join(cascade_base_path, 'haarcascade_frontalface_alt_tree.xml'))\n # right_eye_cascade = cv2.CascadeClassifier(os.path.join(cascade_base_path, 'haarcascade_righteye_2splits.xml'))\n # left_eye_cascade = cv2.CascadeClassifier(os.path.join(cascade_base_path, 'haarcascade_lefteye_2splits.xml'))\n\n face_cascade = cv2.CascadeClassifier(\"cascade/cascade.xml\")\n start = time.time()\n\n while True :\n # 1フレーム読み込み\n ret, frame = cap.read()\n\n if ret :\n # 現在のフレームを出力\n print(\"Processing frame: {}\".format(cap.get(cv2.CAP_PROP_POS_FRAMES)))\n\n # 画像を適応的ヒストグラム平坦化\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3, 3))\n result = clahe.apply(v)\n hsv = cv2.merge((h,s,result))\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n # グレースケールに変換\n img_gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)\n\n # 1. フレームの中に顔が写っている \n face_points = face_cascade.detectMultiScale(img_gray)\n\n for (fx,fy,fw,fh) in face_points:\n\n # #2. ROI(Region of Interest:対象領域)となる画像を切り出す\n # #右領域と左領域でそれぞれ分割(アシュラ��爵方式)\n # width_center = fx + int(fw * 0.5)\n # face_right_gray = img_gray[fy:fy+fh, fx:width_center]\n # face_left_gray = img_gray[fy:fy+fh, width_center:fx+fw]\n\n # #3. 右目と左目の両方が写っているか判定し出力\n # right_eye_points = right_eye_cascade.detectMultiScale(face_right_gray)\n # left_eye_points = left_eye_cascade.detectMultiScale(face_left_gray)\n\n # if 0 < len(right_eye_points) :\n # #右目はオレンジ\n # (rx,ry,rw,rh) = right_eye_points[0]\n # cv2.rectangle(frame,(fx+rx,fy+ry),(fx+rx+rw,fy+ry+rh),(0,255,255),2)\n\n # if 0 < len(left_eye_points) :\n # #左目は赤\n # (lx,ly,lw,lh) = left_eye_points[0]\n # cv2.rectangle(frame,(width_center+lx,fy+ly),(width_center+lx+lw,fy+ly+lh),(0,0,255),2)\n\n #顔全体は緑\n cv2.rectangle(frame,(fx,fy),(fx+fw,fy+fh),(0,255,0),2)\n\n writer.write(frame) \n else : break\n\n print(\"処理時間 {} 秒\".format(time.time() - start))\n writer.release()\n cap.release()","repo_name":"moosan6363/opencv","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1327784007","text":"#############################################################################\n# Code for Calculating Mean Annual Temperature, Annual Temperature Variation \n# and sediment thickness for Pliocene\n#############################################################################\n\nfrom math import *\nfrom numpy import *\nfrom netCDF4 import MFDataset, Dataset\n\n#################################################################\n# Importing the netCDF files for all the years in Pliocene\n#################################################################\n\npath1 = \"/esd/esd/data/climate_models/echam/echam_output/ESD/\"\nmodel = \"e010_hpc-bw_e5w2.3_PLIO_t159l31.1d\"\npath2 = \"/output_raw/\"\nfile1 = \"e010_1004*.nc\"\ndata1 = path1 + model + path2 + file1\nnc1 = MFDataset(data1,'r') # MFDataset used to import multiple .nc files\nfile2 = \"e010_1005*.nc\"\ndata2 = path1 + model + path2 + file2\nnc2 = MFDataset(data2,'r')\nfile3 = \"e010_1006*.nc\"\ndata3 = path1 + model + path2 + file3\nnc3 = MFDataset(data3,'r')\nfile4 = \"e010_1007*.nc\"\ndata4 = path1 + model + path2 + file4\nnc4 = MFDataset(data4,'r')\nfile5 = \"e010_1008*.nc\"\ndata5 = path1 + model + path2 + file5\nnc5 = MFDataset(data5,'r')\nfile6 = \"e010_1009*.nc\"\ndata6 = path1 + model + path2 + file6\nnc6 = MFDataset(data6,'r')\nfile7 = \"e010_1010*.nc\"\ndata7 = path1 + model + path2 + file7\nnc7 = MFDataset(data7,'r')\nfile8 = \"e010_1011*.nc\"\ndata8 = path1 + model + path2 + file8\nnc8 = MFDataset(data8,'r')\nfile9 = \"e010_1012*.nc\"\ndata9 = path1 + model + path2 + file9\nnc9 = MFDataset(data9,'r')\nfile10 = \"e010_1013*.nc\"\ndata10 = path1 + model + path2 + file10\nnc10 = MFDataset(data10,'r')\nfile11 = \"e010_1014*.nc\"\ndata11 = path1 + model + path2 + file11\nnc11 = MFDataset(data11,'r')\nfile12 = \"e010_1015*.nc\"\ndata12 = path1 + model + path2 + file12\nnc12 = MFDataset(data12,'r')\nfile13 = \"e010_1016*.nc\"\ndata13 = path1 + model + path2 + file13\nnc13 = MFDataset(data13,'r')\nfile14 = \"e010_1017*.nc\"\ndata14 = path1 + model + path2 + file14\nnc14 = MFDataset(data14,'r')\nfile15 = \"e010_1018*.nc\"\ndata15 = path1 + model + path2 + file15\nnc15 = MFDataset(data15,'r') \n\n# Initializing the variable for Surface Temperature and computing the dimension sizes for Surface Temperature\n\nT1 = nc1.variables['tslm1'][:,:,:]-273.15\ntime, lat, lon = shape(T1) \nT2 = nc2.variables['tslm1'][:,:,:]-273.15 \nT3 = nc3.variables['tslm1'][:,:,:]-273.15\nT4 = nc4.variables['tslm1'][:,:,:]-273.15\nT5 = nc5.variables['tslm1'][:,:,:]-273.15\nT6 = nc6.variables['tslm1'][:,:,:]-273.15\nT7 = nc7.variables['tslm1'][:,:,:]-273.15\nT8 = nc8.variables['tslm1'][:,:,:]-273.15\nT9 = nc9.variables['tslm1'][:,:,:]-273.15\nT10 = nc10.variables['tslm1'][:,:,:]-273.15 \nT11 = nc11.variables['tslm1'][:,:,:]-273.15\nT12 = nc12.variables['tslm1'][:,:,:]-273.15\nT13 = nc13.variables['tslm1'][:,:,:]-273.15\nT14 = nc14.variables['tslm1'][:,:,:]-273.15\nT15 = nc15.variables['tslm1'][:,:,:]-273.15\n\n#######################################################\n# Write out data to a new netCDF file for whole year\n#######################################################\n\nnco = Dataset(\"/esd/esd/docs/hsharma/data_small/data/1003_1017_Plio.nc\",'w', format=\"NETCDF4_CLASSIC\")\n\n# Creating Dimensions\nnco.createDimension('time', None) # It will store only year\nnco.createDimension('lat', lat)\nnco.createDimension('lon', lon)\n\n# Creating Variables\ntimeo = nco.createVariable('time', 'i4', ('time',), fill_value=False) # Creating variable time with integger data type\nlato = nco.createVariable('lat', 'f4', ('lat',), fill_value=False)\nlono = nco.createVariable('lon', 'f4', ('lon',), fill_value=False)\nMAT_o= nco.createVariable('MAT', 'f4', ('time','lat', 'lon',)) # Creating a variable for Mean Annual Temperature\nTa_o = nco.createVariable('Ta', 'f4', ('time','lat', 'lon',)) # Creating a variable for Annual Temperature Variation\n\n# Attributes\ntimeo.units = 'year'\nlato.units = 'degrees north'\nlono.units = 'degrees east'\nMAT_o.units = 'Kelvin'\nTa_o.units = 'Kelvin'\n\n# Populate the variables with data\ntimeo[:] = arange(1003,1018) # All the years\nlato[:] = nc1.variables['lat'][:]\nlono[:] = nc1.variables['lon'][:]\n\nT = [T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15]\nTa = zeros((15,lat, lon), dtype=float64)\nMAT = zeros((15,lat,lon), dtype=float64)\n\nfor i in range(15):\n\tMAT[i,:,:] = mean(T[i], axis=0)\n\tTa[i,:,:] = 0.5*(amax(T[i], axis=0)-amin(T[i], axis=0))\n\n\tMAT_o[i,:,:] = MAT[i,:,:]\n\tTa_o[i,:,:] = Ta[i,:,:]\n\t\t\n\t\t\t\t\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"toddehlers/Frost_cracking_GCM_calculation","sub_path":"scripts/Temperature/1003_1017_Plio.py","file_name":"1003_1017_Plio.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21245612120","text":"#----------------------------------------------------ROAD FIGHTER-----------------------------------------------------------#\n#----------------------------------ELABORADO POR JUA JOSÉ HOYOS URCUE---------------------------------#\n#----------------------------------PONTIFICIA UNIVERSIDAD JAVERIANA CALI---------------------------------#\n#----------------------------------INGENIERÍA DE SISTEMAS Y COMPUTACIÓN---------------------------------#\n#---------------------------------------------------------2017-1---------------------------------------------------------------------#\n\n\n\nimport tkinter\nimport time\nimport random\nimport math\nimport pygame\nfrom pygame.locals import * #solo para la música\nimport webbrowser# para poder abrir el archivo pdf que contiene el manual de usuario\n\n\npygame.mixer.init()\n\n\nmusicaMenu = pygame.mixer.Sound(\"menu.wav\")\n\nsonidofail=pygame.mixer.Sound(\"loser.wav\")\ncrash=pygame.mixer.Sound(\"choque.wav\")\n\nrecarga=pygame.mixer.Sound(\"gasolina.wav\")\n\n\n\n\n\n\n\n\n\nmusicaMenu.play(20)\n\n# Crea la ventana que tendrá el menú asociado a la variable ventana\nventana = tkinter.Tk()\nventana.title(\"Road Fighter\")\nventana.geometry(\"1500x900\")\nv = tkinter.Toplevel()\nv.geometry(\"1500x900\")\nimagen1 = tkinter.PhotoImage(file=\"fondoprincipal.png\")\nfondomenu = tkinter.Label(ventana, image=imagen1).place(x=0 ,y=0)\n\n\n\n\n\n#Boton de instrucciones del juego\ndef instrucciones():\n \"\"\"\n Esta función permite abrir el documento pdf que contiene las instrucciones\n\n \"\"\"\n\n webbrowser.open_new(r'C:\\Users\\JuanJoséHoyosUrcu\\Documents\\GitHub\\xCarGameJuanx\\juegofirstry\\manual.pdf')\ninstructions=tkinter.Button(ventana,text=\"¿Cómo jugar?\", command= instrucciones).place(x=20, y= 500 )\n\n\n\n#x=tkinter.StringVar()\nfondojuego =tkinter.Canvas(v,height=600,width=900,bg=\"black\")\n\nho= []\n\nvar=tkinter.StringVar()#charge textvariable\n#fondotextos= tkinter.Label(ventana,textvariable =var,bg=\"black\",fg=\"white\")\n#cargo las imágenes a usar\n\n\n#fondos primer nivel\nfondomarder=tkinter.PhotoImage(file=\"marderecha.png\")\nfondomarizq=tkinter.PhotoImage(file=\"marizq.png\")\ncentro1=tkinter.PhotoImage(file=\"centermar.png\")\n\n\n#fondos segundo nivel\nfondotierraizq=tkinter.PhotoImage(file=\"tierraizq.png\")\nfondotierrader=tkinter.PhotoImage(file=\"tierrader.png\")\ncentro2=tkinter.PhotoImage(file=\"centertierra.png\")\n\n#fondos tercer nivel\n\nmoradoizq=tkinter.PhotoImage(file=\"fondo3izq.png\")\nmoradoder=tkinter.PhotoImage(file=\"fondo3der.png\")\ncentro3=tkinter.PhotoImage(file=\"centermora.png\")\n\n#fondos cuarto nivel\n\nrojoizq=tkinter.PhotoImage(file=\"rojoizq.png\")\nrojoder=tkinter.PhotoImage(file=\"rojoder.png\")\ncentro4=tkinter.PhotoImage(file=\"centerrojo.png\")\n\n#fondos quinto nivel\n\nspaceizq=tkinter.PhotoImage(file=\"spaceizq.png\")\nspaceder=tkinter.PhotoImage(file=\"spaceder.png\")\ncentro5=tkinter.PhotoImage(file=\"spacecenter.png\")\n\n\n\n#Entradas para ingresar y mostrar los nombres de los jugadores\n#1\nnombre=tkinter.StringVar ()\nnombre.set(\"\")\ninstruccion_nombre1= tkinter.Label(ventana,text=\"Nombre del jugador 1\", font=(\"Tempus Sans ITC\",12),fg=\"black\").place(x=20, y=395)\nnombrecaja = tkinter.Entry(ventana,font=(\"Tempus Sans ITC\",12), textvariable=nombre).place(x=180, y = 395)\nmuestraplayer1= tkinter.Label(v,textvariable=nombre, font=(\"Tempus Sans ITC\",12),fg=\"black\")\n\n#2\nnombre2=tkinter.StringVar()\nnombre2.set(\"\")\ninstruccion_nombre2= tkinter.Label(ventana,text=\"Nombre del jugador 2\", font=(\"Tempus Sans ITC\",12)).place(x=20, y=445)\nnombrecaja2 = tkinter.Entry(ventana,font=(\"Tempus Sans ITC\",12) ,textvariable=nombre2).place(x=180, y = 445)\nmuestraplayer2= tkinter.Label(v,textvariable=nombre2, font=(\"Tempus Sans ITC\",12),fg=\"black\")\n\n\n\n\n\n\n#photopo=tkinter.PhotoImage(file=\"Tryfont3m.png\")\ncarrov1 = tkinter.PhotoImage(file=\"UserCare.png\")#carro verde\ncarrov2 = tkinter. PhotoImage(file=\"UserCare.png\")#carro verde\nexplosion = tkinter. PhotoImage(file=\"explosion1.png\")#explosion de choque\nphoto=tkinter.PhotoImage(file=\"BlueCare.png\")#CarroRunnerAzul\nvancar=tkinter.PhotoImage(file=\"RedCare.png\")#Carro Minivan rojo\nfighter=tkinter.PhotoImage(file=\"YellowCare.png\")\n\n\nuser2=tkinter.PhotoImage(file=\"usercar2e.png\")\n\n\n\nminivan2=tkinter.PhotoImage(file=\"RedCare.png\")\n\nrun2=tkinter.PhotoImage(file=\"BlueCare.png\")\n\n\nfi2=tkinter.PhotoImage(file=\"YellowCare.png\")\n\nchargegas=tkinter.PhotoImage(file=\"Recarga.png\")\nchargegas2=tkinter.PhotoImage(file=\"Recarga.png\")\n\nmancha1=tkinter.PhotoImage(file=\"aceite.png\")\nmancha2=tkinter.PhotoImage(file=\"aceite.png\")\n\n#cargo las imagenes de los botones\nimagen_2boton=tkinter.PhotoImage(file=\"Level1.png\")\nimagen_3boton=tkinter.PhotoImage(file=\"Level2.png\")\nimagen_4boton=tkinter.PhotoImage(file=\"Level3.png\")\nimagen_5boton=tkinter.PhotoImage(file=\"Level4.png\")\nimagen_6boton=tkinter.PhotoImage(file=\"Level5.png\")\nimagen_1boton = tkinter.PhotoImage(file=\"BotonSalir.png\")\n\n#movimientofondo\n#photopo=tkinter.PhotoImage(file=\"Tryfont3m.png\")\n\n#Doy título a mi Juego\n\nImagen = tkinter.Label(ventana,text=\"Road Fighter\", font=(\"Tempus Sans ITC\",72)).place(x=450, y=20)\n\n#defino las variables abiertas (no definidas dentro de las funciones)que voy a usar dentro de mis funciones\npresiono = False\nx = None\ni = 0\nj = 0\nm= 0\ni=0\nz=0\nq=0\ng = 0 \nc = 0\nd = 5\n\n\n\n\n\n\n\n \n\n\n\n\n\n#defino canvas principal\nfondojuego=tkinter.Canvas(v, width=1350, height=700,bd=0,highlightthickness=0)\n\n\n\n\n\n#widgest que se crearán a partit de las imágenes con canvas\n#mapajuego= fondojuego.create_image(680,200, image=u)#carga\n\n\n\n\n\n#widgest del fondo nivel1\nmard=fondojuego.create_image(1145,55, image=fondomarder)\nmari=fondojuego.create_image(230,55, image=fondomarizq)\nc1=fondojuego.create_image(695,380, image=centro1)#centro estático\n\n\n# widgest del fondo nivel 2\ntierrizq=fondojuego.create_image(230,55, image=fondotierraizq)\ntierrder=fondojuego.create_image(1145,55,image=fondotierrader)\nc2=fondojuego.create_image(690,380, image=centro2)#centro estático\n\n#widgest del fondo nivel 3\n\nmoraizq=fondojuego.create_image(230,55, image=moradoizq)\nmorader=fondojuego.create_image(1145,55, image=moradoder)\nc3=fondojuego.create_image(690,380,image=centro3)\n\n#widgest del fondo nivel 4\n\nroizq=fondojuego.create_image(230,55, image=rojoizq)\nroder=fondojuego.create_image(1140,55, image=rojoder)\nc4=fondojuego.create_image(690, 380, image= centro4)\n\n#widgest del fondo nivel 5\n\nsizq=fondojuego.create_image(230,55, image= spaceizq)\nsder=fondojuego.create_image(1145,55, image=spaceizq)\nc5=fondojuego.create_image(690,380,image=centro5)\n\n\n\n\n\n\n#widgest de carros, obastáculos y enemigos jugador 1\nx = fondojuego.create_image(100,600,image=carrov1)\nk = fondojuego.create_image(97,50,image=vancar)\nh=fondojuego.create_image(150,55, image=photo)\nf=fondojuego.create_image(250, 55, image=fighter)\n#widgest de carros, obastáculos y enemigos jugador 2\nvan2=fondojuego.create_image(1220,50,image=minivan2)\nu2=fondojuego.create_image(1250,600, image= user2)\nf2=fondojuego.create_image(1250, 55, image=fi2)\nr2=fondojuego.create_image(1100,55, image=run2)\nga=fondojuego.create_image(200,55, image=chargegas)\nga2=fondojuego.create_image(1300,55, image=chargegas2)\nma1=fondojuego.create_image(120,50,image=mancha1)\nma2=fondojuego.create_image(1220,50,image=mancha1)\n\n\n\n\n#movimientofondo\n#po=fondojuego.create_image(680,200, image=photopo)\n\n\n#defino las funciones que van a dinamizar mi juego\n\n\n\n\n\n\n\n\n\n\ndef MiniVan(s):\n \"\"\"\n Esta función mueve verticalmente la MiniVan, un carro de color rojo, cuyo objetivo es moverse verticalmente hacia abajo, siendo un obstáculo\n para el jugador, que por cierto es el menos peligroso, ya que no cambia de carril mientras se mueve, es decir, su movimiente es constante.\n \n \"\"\"\n x=random.randint(0,50)\n global fondojuego, m\n \n fondojuego.move(k, 0, s)\n \n if(fondojuego.coords(k)[1]>700):\n fondojuego.move(k,x,-700)\n\n if(fondojuego.coords(k)[0]>=310):\n fondojuego.move(k,-203,0)\n\ndef MiniVan2(s):\n \"\"\"\n Esta función mueve verticalmenta la MiniVan, un carro de color rojo, cuyo objetivo es moverse verticalmente hacia abajo, siendo un obstáculo\n para el jugador, que por cierto es el menos peligroso, ya que no cambia de carril mientras se mueve, es decir, su movimiente es constante.\n \n \"\"\"\n global fondojuego, m\n\n x=random.randint(0,50)\n \n \n fondojuego.move(van2, 0, s)\n \n if(fondojuego.coords(van2)[1]>700):\n fondojuego.move(van2,x,-700)\n\n if(fondojuego.coords(van2)[0]>=1220):\n fondojuego.move(van2,-203,0)\n\n\ndef mancha(s):\n \n \"\"\"\n Esta función mueve verticalmente la mancha de aceite, de color negro, cuyo objetivo es moverse verticalmente hacia abajo, siendo un obstáculo\n para el jugador, en el cual se puede resbalar, aunque el código que lo hace resbalar se encuentra en las colisiones\n \n \"\"\"\n global fondojuego\n x=random.randint(0,50)\n mm=random.randint(-5000,-3000)\n \n fondojuego.move(ma1, 0, s)\n \n if(fondojuego.coords(ma1)[1]>700):\n fondojuego.move(ma1,x,mm)\n\n if(fondojuego.coords(ma1)[0]>=310):\n fondojuego.move(ma1,-203,0)\n\ndef mancha2(s):\n \"\"\"\n Esta función mueve verticalmente la mancha de aceite, de color negro, cuyo objetivo es moverse verticalmente hacia abajo, siendo un obstáculo\n para el jugador, en el cual se puede resbalar, aunque el código que lo hace resbalar se encuentra en las colisiones\n \n \"\"\"\n global fondojuego, m\n mm=random.randint(-5000,-3000)\n x=random.randint(0,100)\n \n \n fondojuego.move(ma2, 0, s)\n \n if(fondojuego.coords(ma2)[1]>700):\n fondojuego.move(ma2,x,mm)\n\n if(fondojuego.coords(ma2)[0]>=1220):\n fondojuego.move(ma2,-203,0)\n\n \ndef Fighter2(X,Y):\n \"\"\"\n Esta función mueve al carro de color amarillo , cuyo objetivo es perseguir al carro del jugador para chocarlo, es el enemigo más peligroso de los tres,\n ya que ataca directamente al carro del jugador.\n \n \"\"\"\n \n if(fondojuego.coords(u2)[0]fondojuego.coords(f2)[0]):\n fondojuego.move(f2,X,Y)\n \n if(fondojuego.coords(u2)[0]==fondojuego.coords(f2)[0]):\n fondojuego.move(f2,0,Y)\n if(fondojuego.coords(f2)[1]>700):\n fondojuego.move(f2,0,-700)\n\n\ndef Fighter(X,Y):\n \"\"\"\n Esta función mueve al carro de color amarillo , cuyo objetivo es perseguir al carro del jugador para chocarlo, es el enemigo más peligroso de los tres,\n ya que ataca directamente al carro del jugador.\n \n \"\"\"\n \n if(fondojuego.coords(x)[0]fondojuego.coords(f)[0]):\n fondojuego.move(f,X,Y)\n \n if(fondojuego.coords(x)[0]==fondojuego.coords(f)[0]):\n fondojuego.move(f,0,Y)\n if(fondojuego.coords(f)[1]>700):\n fondojuego.move(f,0,-700)\n\ndef charge(s):\n \"\"\"\n Esta función mueve verticalmenta la nave de recarga, la cuál permitirá que el usuario que pase por encima de ella recargue un poco su combustible\n aunque el código que hace que se recargue la gasolina se encuentra en la función de las colisiones.\n \n \"\"\"\n global fondojuego, m\n\n x=random.randint(0,50)\n \n \n fondojuego.move(ga, 0, s)\n \n if(fondojuego.coords(ga)[1]>700):\n fondojuego.move(ga,x,-3000)\n\n if(fondojuego.coords(ga)[0]>=300):\n fondojuego.move(ga,-203,0)\n\ndef charge2(s):\n \"\"\"\n Esta función mueve verticalmenta la nave de recarga, la cuál permitirá que el usuario que pase por encima de ella recargue un poco su combustible\n aunque el código que hace que se recargue la gasolina se encuentra en la función de las colisiones.\n \n \"\"\"\n global fondojuego, m\n\n x=random.randint(0,50)\n \n \n fondojuego.move(ga2, 0, s)\n \n if(fondojuego.coords(ga2)[1]>700):\n fondojuego.move(ga2,x,-700)\n\n if(fondojuego.coords(ga2)[0]>=1220):\n fondojuego.move(ga2,-203,0)\n\n\n#usercar\n\ndef keyup(e):\n global x,ho\n\n if(e.keycode in ho):\n ho.pop(ho.index(e.keycode))\n \n\ndef keydown(e):\n global x,ho\n if not e.keycode in ho:\n \n ho.append(e.keycode)\n \n \ndef key():\n global ho\n if(65 in ho): #letra A en código ascii\n fondojuego.move(x,-5,0)\n \n if(68 in ho):\n fondojuego.move(x,5,0) #letra D en código ascii\n \ndef key2():\n \n if(37 in ho):\n fondojuego.move(u2,-5,0) #flecha de dirección izquiera en código ascii\n if(39 in ho):\n fondojuego.move(u2,5,0) #flecha de dirección derecha en código ascii\n\n \n\n\n\ndef Runner():\n \"\"\"\n Esta función se encarga de mover el carro azul, cuyo objetivo es tratar de chocar al carro del jugador,cambiando de carril constantemente\n mientras se mueve verticalmente hacia abajo y horizontalmente de derecha a izquierda y visceversa.\n\n \"\"\"\n vv=3\n z =fondojuego.coords(h)[0]\n b= 7\n \n y= math.sin(2*fondojuego.coords(h)[1]*math.pi/(300))*b\n if(fondojuego.coords(h)[1]>600):\n fondojuego.move(h,100-z,-600)\n q=0\n fondojuego.move(h,y,vv)\n\n\ndef Runner2():\n \"\"\"\n Esta función se encarga de mover el carro azul, cuyo objetivo es tratar de chocar al carro del jugador,ncambiando de carril constantemente\n mientras se mueve verticalmente hacia abajo y horizontalmente de derecha a izquierda y visceversa.\n\n \"\"\"\n vv=3\n z =fondojuego.coords(r2)[0]\n b= 7\n \n y= math.sin(2*fondojuego.coords(r2)[1]*math.pi/(300))*b\n if(fondojuego.coords(r2)[1]>=600):\n fondojuego.move(r2,1015-z,-600)\n q=0\n fondojuego.move(r2,y,vv)\ndef colisionesbor():\n \"\"\"\n Esta función se encarga de hacer el efecto de explosión cuando el carro del jugador uno toca alguno de los dos extremos de la carretera, implicando\n que si esto sucede, el jugador habrá perdido la partida. \n \n \"\"\"\n \n x1=fondojuego.coords(x)[0]\n y1=fondojuego.coords(x)[1]\n \n\n if(x1<=85):\n coli=fondojuego.create_image(x1,y1,image=explosion)\n return True\n if(x1>=350):\n coli=fondojuego.create_image(x1,y1,image=explosion)\n return True\n\n\ndef colisionesbor2():\n \"\"\"\n Esta función se encarga de hacer el efecto de explosión cuando el carro del jugador dos toca alguno de los dos extremos de la carretera, implicando\n que si esto sucede, el jugador habrá perdido la partida.\n \n \"\"\"\n \n \n x2= fondojuego.coords(u2)[0]\n y2=fondojuego.coords(u2)[1]\n \n\n \n if(x2<=1005):\n coli=fondojuego.create_image(x2,y2,image=explosion)\n return True\n if(x2>=1275):\n coli=fondojuego.create_image(x2,y2,image=explosion)\n return True\n\ndef destruirall():\n\n \"\"\"\n Esta función permite destruir todo lo que actualmente se esté ejecutando en el juego,\n ya que destruye sus dos ventanas\n \"\"\"\n v.destroy()\n ventana.destroy()\n\n\n\n\n \n\n \n \n \n\nii=0\n\ndef colisionescarros():\n \n '''\n Esta función se encarga de hacer el efecto de choque entre el carro del jugador uno y los enemigos, además permite recargar de gasolina con la\n nave de recarga, y a su vez resbalar en la mancha de aceite.\n\n '''\n global x,contadorg1, contadorv1\n \n\n\n\n x1=fondojuego.coords(x)[0]\n x2=fondojuego.coords(f)[0]\n x3=fondojuego.coords(k)[0]\n x4=fondojuego.coords(h)[0]\n y1=fondojuego.coords(x)[1]\n y2=fondojuego.coords(f)[1]\n y3=fondojuego.coords(k)[1]\n y4=fondojuego.coords(h)[1]\n xr=fondojuego.coords(ga)[0]\n yr=fondojuego.coords(ga)[1]\n xm=fondojuego.coords(ma1)[0]\n ym=fondojuego.coords(ma1)[1]\n mm=random.randint(-7000,-5000)\n \n\n #con el fighter\n \n if(x1>=x2 and x1<=x2+26 and y1>=y2 and y1<=y2+53):\n #nivel1sound.stop()\n crash.play()\n sonidofail.play()\n\n coli=fondojuego.create_image(x1,y1,image=explosion)\n boton1 = tkinter.Button(v, text=\"Salir: si aun tienes las hagllas para jugar, ábreme de nuevo\",command=destruirall).place(x=550, y=655)\n contadorg1=0\n contadorv1= contadorv1-5\n \n \n\n\n elif(x1+26>=x2 and x1<=x2+26 and y1+26>=y2 and y1<=y2+53):\n #nivel1sound.stop()\n crash.play()\n sonidofail.play()\n coli=fondojuego.create_image(x1,y1,image=explosion)\n boton1 = tkinter.Button(v, text=\"Salir: si aun tienes las hagllas para jugar, ábreme de nuevo\",command=destruirall).place(x=550, y=655)\n contadorg1=0\n contadorv1= contadorv1-5\n \n\n #con la van\n\n \n if(x1>=x3 and x1<=x3+26 and y1>=y3 and y1<=y3+53):\n crash.play()\n fondojuego.move(x,5,0)\n contadorg1=contadorg1-250\n contadorv1= contadorv1-5\n if (fondojuego.move(x,5,0)):\n fondojuego.move(x, 0,-5)\n\n\n elif(x1+26>=x3 and x1<=x3+26 and y1+26>=y3 and y1<=y3+53):\n crash.play()\n if (fondojuego.move(x,-5,0)):\n fondojuego.move(x, 0,-5)\n\n # con el runner\n\n \n if(x1>=x4 and x1<=x4+26 and y1>=y4 and y1<=y4+52):\n crash.play()\n fondojuego.move(x,5,0)\n contadorg1=contadorg1-250\n contadorv1= contadorv1-5\n if (fondojuego.move(x,5,0)):\n fondojuego.move(x, 0,-5)\n\n\n elif(x1+26>=x4 and x1<=x4+26 and y1+26>=y4 and y1<=y4+52):\n crash.play()\n if (fondojuego.move(x,-5,0)):\n fondojuego.move(x, 0,-5)\n\n\n #recargador de gasolina\n\n \n\n if(x1>=xr and x1<=xr+30 and y1>=yr and y1<=yr+35):\n fondojuego.move(ga,0, mm)\n recarga.play()\n contadorg1=contadorg1+1000\n\n \n\n elif(x1+26>=xr and x1<=xr+30 and y1+26>=yr and y1<=yr+35):\n\n fondojuego.move(ga,0, mm)\n recarga.play()\n contadorg1=contadorg1+1000\n\n #mancha de aceite\n\n if(x1>=xm and x1<=xm+40 and y1>=ym and y1<=ym+40):\n fondojuego.move(x,7, 0)\n contadorg1=contadorg1-200\n contadorv1= contadorv1-5\n \n\n elif(x1+26>=xm and x1<=xm+40 and y1+26>=ym and y1<=ym+40):\n\n fondojuego.move(x,7, 0)\n contadorg1=contadorg1-200\n contadorv1= contadorv1-5\n \n\n\ndef colisionescarros2():\n global x,contadorg2, contadorv2\n '''\n Esta función se encarga de hacer el efecto de choque entre el carro del jugador dos y los enemigos, además permite recargar de gasolina con la\n nave de recarga, y a su vez resbalar en la mancha de aceite.\n\n '''\n \n \n \n\n x1=fondojuego.coords(u2)[0]\n x2=fondojuego.coords(f2)[0]\n x3=fondojuego.coords(van2)[0]\n x4=fondojuego.coords(r2)[0]\n y1=fondojuego.coords(u2)[1]\n y2=fondojuego.coords(f2)[1]\n y3=fondojuego.coords(van2)[1]\n y4=fondojuego.coords(r2)[1]\n xr=fondojuego.coords(ga2)[0]\n yr=fondojuego.coords(ga2)[1]\n xm=fondojuego.coords(ma2)[0]\n ym=fondojuego.coords(ma2)[1]\n mm=random.randint(-7000,-5000)\n \n\n # con el fighter\n \n if(x1>=x2 and x1<=x2+26 and y1>=y2 and y1<=y2+52):\n\n crash.play()\n sonidofail.play()\n coli=fondojuego.create_image(x1,y1,image=explosion)\n boton1 = tkinter.Button(v, text=\"Salir: si aun tienes las hagllas para jugar, ábreme de nuevo\",command=destruirall).place(x=550, y=655)\n contadorg2=0\n contadorv2= contadorv2-5\n\n elif(x1+26>=x2 and x1<=x2+26 and y1+26>=y2 and y1<=y2+52):\n\n crash.play()\n sonidofail.play()\n coli=fondojuego.create_image(x1,y1,image=explosion)\n boton1 = tkinter.Button(v, text=\"Salir: si aun tienes las hagllas para jugar, ábreme de nuevo\",command=destruirall).place(x=550, y=655)\n contadorg2=0\n contadorv2= contadorv2-5\n \n #con la van\n\n\n if(x1>=x3 and x1<=x3+26 and y1>=y3 and y1<=y3+53):\n crash.play()\n fondojuego.move(u2,5,0)\n contadorg2=contadorg2-250\n contadorv2= contadorv2-5 \n if (fondojuego.move(u2,5,0)):\n fondojuego.move(u2, 0,-5)\n\n elif(x1+26>=x3 and x1<=x3+26 and y1+26>=y3 and y1<=y3+53):\n crash.play()\n if (fondojuego.move(u2,-5,0)):\n fondojuego.move(u2, 0,-5)\n\n # con el runner\n\n if(x1>=x4 and x1<=x4+26 and y1>=y4 and y1<=y4+53):\n crash.play()\n fondojuego.move(u2,5,0)\n contadorg2=contadorg2-250\n contadorv2= contadorv2-5\n if (fondojuego.move(u2,5,0)):\n fondojuego.move(u2, 0,-5)\n\n elif(x1+26>=x4 and x1<=x4+26 and y1+26>=y4 and y1<=y4+53):\n crash.play()\n if (fondojuego.move(u2,-5,0)):\n fondojuego.move(u2, 0,-5)\n\n\n #recargador de gasolina\n\n\n if(x1>=xr and x1<=xr+30 and y1>=yr and y1<=yr+35):\n fondojuego.move(ga2,0, mm)\n recarga.play()\n contadorg2=contadorg2+1000\n\n elif(x1+26>=xr and x1<=xr+30 and y1+26>=yr and y1<=yr+35):\n fondojuego.move(ga2,0, mm)\n recarga.play()\n contadorg2=contadorg2+1000\n #mancha de aceite\n\n if(x1>=xm and x1<=xm+40 and y1>=ym and y1<=ym+40):\n fondojuego.move(u2,7, 0)\n contadorg2=contadorg2-200\n contadorv2= contadorv2-5\n \n\n elif(x1+26>=xm and x1<=xm+40 and y1+26>=ym and y1<=ym+40):\n\n fondojuego.move(u2,7, 0)\n contadorg2=contadorg2-200\n contadorv2= contadorv2-5\n\n\n\n\n \n\n \n\n \n \n\n\n \n\nvelocidadfondo=15\ndef fondomoving(fondoizquierda,velocidad):\n\n \"\"\"\n Esta función se encarga de mover la parte izquierda de los fondos, con el fin de crear el efecto de aceleración\n\n \"\"\"\n\n global fondojuego, v, velocidadfondo\n \n\n fondojuego.move(fondoizquierda, 0, velocidadfondo)\n if(fondojuego.coords(fondoizquierda)[1]>2500):\n fondojuego.move(fondoizquierda,0,-fondojuego.coords(fondoizquierda)[1])\n\ndef fondomoving2(fondoderecha,velocidad):\n \"\"\"\n Esta función se encarga de mover la parte derecha de los fondos, con el fin de crear el efecto de aceleración\n\n \"\"\"\n\n global fondojuego, v, velocidadfondo\n \n\n \n fondojuego.move(fondoderecha, 0, velocidadfondo)\n if(fondojuego.coords(fondoderecha)[1]>2500):\n fondojuego.move(fondoderecha,0,-fondojuego.coords(fondoderecha)[1])\n \n#################\nv1=0 #\nv2=0 #\nv3=0 #\nF=0 #\n # Parámetros de inicialización\n\nimagenizquierda=mari # \nimagenderecha=mard #\nvelocity=0 #\n#################\n\n#tiempo en etiquetas del jugador 1\ncontador1=60\ntiempojugador1=tkinter.StringVar()\n\n#Gasolina en etiquetas del jugador 1\n\ncontadorg1=7000\ngasolinajugador1=tkinter.StringVar()\n\n#Velocidad en etiquetas del jugador 1\n\ncontadorv1=0\nvelocidadjugador1=tkinter.StringVar()\n\n#distancia para el juador 1\n\ncontadord1=0\ndistanciajugador1=tkinter.StringVar()\n\n\n\n\n#llamado de funciones\ndef principal():\n\n \"\"\"\n Esta función se encarga de llamar a todas las funciones antes creadas para que al momento de ser llamada empiecen todos los movimientos y se\n pueda iniciar el juego en el respectivo nivel para el jugador 1\n \n \"\"\"\n global imagenizquierda,velocity,tiempo1,contador1,tiempojugador1,contadorg1,contadorv1,velocidadjugador1,contadorg2,contadord1,distanciajugador1, archivo\n\n\n if colisionesbor() :\n crash.play()\n sonidofail.play()\n return 0\n\n if contador1<=0:\n pygame.mixer.music.load(\"win.wav\")\n pygame.mixer.music.play(2)\n \n boton1 = tkinter.Button(v, text=\"Felicitaciones: vuelve a abrirme y tendrás acceso al siguiente nivel\",command=pasar).place(x=540, y=655)\n return 0\n elif (contadorg1<=0):\n return 0\n \n \n\n else:\n\n Fighter(F,v2)#F es el movimiento en x, v2 el movimiento en y\n MiniVan(v1)\n \n Runner()\n fondomoving(imagenizquierda,velocity)\n mancha(1)\n charge(4)\n colisionescarros()\n\n partida=open(\"archivo.txt\",\"w\")\n # lista=partida.readline()\n #partida.close()\n \n\n if contador1>0:\n \n contador1=contador1-0.015\n partida.write(str(round(contador1)))\n partida.write(\"\\n\"+str(round(contadorg1)))\n partida.write(\"\\n\"+str(round(contadorv1)))\n partida.write(\"\\n\"+str(round(contadord1)))\n \n \n\n tiempojugador1.set(round(contador1))\n\n else:\n partida.write(str(round(contador1)))\n partida.write(\"\\n\"+str(round(contadorg1)))\n partida.write(\"\\n\"+str(round(contadorv1)))\n partida.write(\"\\n\"+str(round(contadord1)))\n\n contador1=contador1-0.015\n \n\n if contadorg1>0:\n\n contadorg1=contadorg1-1\n\n gasolinajugador1.set(round(contadorg1))\n \n\n\n\n if contadorv1<200:\n\n contadorv1=contadorv1+0.09\n \n\n velocidadjugador1.set(round(contadorv1))\n\n\n contadord1=contadord1+0.09\n\n\n distanciajugador1.set(round(contadord1))\n\n \n\n \n\n key()\n\n \n\n v.after(15,principal)\n\n# tiempo de jugador 2\ncontador2=60\ntiempojugador2=tkinter.StringVar()\n\n\n#Gasolina del jugador 2\n\ncontadorg2=7000\ngasolinajugador2=tkinter.StringVar()\n\n#Velocidad en etiquetas del jugador 2\n\ncontadorv2=0\nvelocidadjugador2=tkinter.StringVar()\n\n#distancia para el juador 2\n\ncontadord2=0\ndistanciajugador2=tkinter.StringVar()\n\n\ndef principal2():\n\n \"\"\"\n Esta función se encarga de llamar a todas las funciones antes creadas para que al momento de ser llamada empiecen todos los movimientos y se\n pueda iniciar el juego en el respectivo nivel para el jugador 2\n \n \"\"\"\n global imagenderecha, velocity, contador2, tiempojugador2,contadorg2,contadorv2, contadord2\n\n if colisionesbor2() :\n crash.play()\n sonidofail.play()\n return 0\n\n elif contadorg2<=0:\n return 0\n\n if contador2<=0:\n boton1 = tkinter.Button(v, text=\"Felicitaciones: vuelve a abrirme y tendrás acceso al siguiente nivel\",command=pasar).place(x=540, y=655)\n return 0\n \n \n\n else:\n MiniVan2(v1)\n Fighter2(F,v2)#F es el movimiento en x, v2 el movimiento en y\n Runner2()\n fondomoving2(imagenderecha,velocity)\n mancha2(1)\n charge2(4)\n\n colisionescarros2()\n partida=open(\"archivo2.txt\",\"w\")\n if contador2>0:\n\n contador2=contador2-0.015\n partida.write(str(round(contador2)))\n partida.write(\"\\n\"+str(round(contadorg2)))\n partida.write(\"\\n\"+str(round(contadorv2)))\n partida.write(\"\\n\"+str(round(contadord2)))\n else:\n \n \n partida.write(str(round(contador2)))\n partida.write(\"\\n\"+str(round(contadorg2)))\n partida.write(\"\\n\"+str(round(contadorv2)))\n partida.write(\"\\n\"+str(round(contadord2)))\n contador2=contador2-0.015\n\n \n\n\n contador2=contador2-0.015 \n\n tiempojugador2.set(round(contador2))\n\n if contadorg2>0:\n\n contadorg2=contadorg2-1\n\n gasolinajugador2.set(round(contadorg2))\n key2()\n v.after(15, principal2)\n if contadorv2<200:\n\n contadorv2=contadorv2+0.09\n\n velocidadjugador2.set(round(contadorv2))\n\n contadord2=contadord1+0.09\n distanciajugador2.set(round(contadord1))\n\n \n\ndef lvl1():\n \"\"\"\n En esta función se hará el llamado de la función principal, se darán valores para las funciones que tienen parámetros( los cuales se han creado con el fin de\n controlar totalmente, y especialmente para poder darle dificultad a cada nivel , en este caso, al nivel 1), además se agregan condicionales que paran el juego\n en caso de que el jugador pierda la partida.\n\n \"\"\"\n global v1,v2,F,po, imagenizquierda, velocity, nombre, nombrecaja,tiempo1, tiempojugador1, contador1\n musicaMenu.stop()\n\n pygame.mixer.music.load(\"nivel1.wav\")\n pygame.mixer.music.play()\n nivel=1\n\n\n fondojuego.delete(moraizq)\n fondojuego.delete(morader)\n fondojuego.delete(tierrizq)\n fondojuego.delete(tierrder)\n fondojuego.delete(roizq)\n fondojuego.delete(roder)\n fondojuego.delete(c2)\n fondojuego.delete(c3)\n fondojuego.delete(c4)\n fondojuego.delete(sizq)\n fondojuego.delete(sder)\n fondojuego.delete(c5)\n\n\n\n #fondojuego.lower(c1)\n\n labeljugador1=tkinter.Label(v,text=nombre.get(), font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=325)\n labeljugador2=tkinter.Label(v,text=nombre2.get(), font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=325)\n labeltiempo1=tkinter.Label(v,textvariable=tiempojugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=550)\n labeltiempo2=tkinter.Label(v,textvariable=tiempojugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=550)\n labelgasolina1=tkinter.Label(v,textvariable=gasolinajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=450)\n labelgasolina2=tkinter.Label(v,textvariable=gasolinajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=770, y=450)\n labelvelocidad1=tkinter.Label(v,textvariable=velocidadjugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=390)\n labelvelocidad2=tkinter.Label(v,textvariable=velocidadjugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=390)\n labeldistancia1=tkinter.Label(v,textvariable=distanciajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=607)\n labeldistancia2=tkinter.Label(v,textvariable=distanciajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=607) \n fondojuego.focus_set()\n \n \n v.deiconify()\n ventana.iconify()\n v1=2\n v2=2#F es el movimiento en x, v2 el movimiento en y, v1 el movimiento vertical de la van\n F=1\n velocity=10\n\n\n principal()\n principal2()\n\n\n \n\n\n \n\n\nv.iconify()\nboton2=tkinter.Button(ventana, image=imagen_2boton,command=lvl1).place(x=1200, y=300)\n\n\n\ndef lvl2():\n \"\"\"\n En esta función se hará el llamado de la función principal, se darán valores para las funciones que tienen parámetros( los cuales se han creado con el fin de\n controlar totalmente, y especialmente para poder darle dificultad a cada nivel , en este caso, al nivel 2), además se agregan condicionales que paran el juego\n en caso de que el jugador pierda la partida.\n\n \"\"\"\n global v1,v2,F, imagenizquierda, velocity, imagenderecha, tiempojugador1, contador1, velocidadfondo\n musicaMenu.stop()\n\n pygame.mixer.music.load(\"nivel2.wav\")#canción del nivel 2\n pygame.mixer.music.play()#reproduce la canción del nivel2\n\n\n velocidadfondo=20\n fondojuego.delete(moraizq)\n fondojuego.delete(morader)\n fondojuego.delete(roizq)\n fondojuego.delete(roder)\n fondojuego.delete(c1)\n fondojuego.delete(c3)\n fondojuego.delete(c4)\n fondojuego.delete(sizq)\n fondojuego.delete(sder)\n fondojuego.delete(c5)\n labeljugador1=tkinter.Label(v,text=nombre.get(), font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=540, y=339)\n labeljugador2=tkinter.Label(v,text=nombre2.get(), font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=740, y=339)\n labeltiempo1=tkinter.Label(v,textvariable=tiempojugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=560)\n labeltiempo2=tkinter.Label(v,textvariable=tiempojugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=560)\n labelgasolina1=tkinter.Label(v,textvariable=gasolinajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=480)\n labelgasolina2=tkinter.Label(v,textvariable=gasolinajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=480)\n labelvelocidad1=tkinter.Label(v,textvariable=velocidadjugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=400) \n labelvelocidad2=tkinter.Label(v,textvariable=velocidadjugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=400)\n labeldistancia1=tkinter.Label(v,textvariable=distanciajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=620)\n labeldistancia2=tkinter.Label(v,textvariable=distanciajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=620) \n\n fondojuego.focus_set()\n v.deiconify()\n ventana.iconify()\n v1=4#F es el movimiento en x, v2 el movimiento en y, v1 el movimiento vertical de la van\n v2=3\n F=1.2\n imagenizquierda=tierrizq\n imagenderecha=tierrder\n velocity=13\n principal()\n principal2()\n\nv.iconify()\nboton3=tkinter.Button(ventana, image=imagen_3boton,command=lvl2).place(x=1200, y=350)\n\ndef lvl3():\n\n \"\"\"\n En esta función se hará el llamado de la función principal, se darán valores para las funciones que tienen parámetros( los cuales se han creado con el fin de\n controlar totalmente, y especialmente para poder darle dificultad a cada nivel , en este caso, al nivel 3), además se agregan condicionales que paran el juego\n en caso de que el jugador pierda la partida.\n\n \"\"\"\n global v1,v2,F,imagenizquierda, velocity, imagenderecha, tiempojugador1, contador1, velocidadfondo\n\n musicaMenu.stop()\n pygame.mixer.music.load(\"nivel3.wav\")\n pygame.mixer.music.play()\n\n\n velocidadfondo=30\n\n fondojuego.delete(tierrizq)\n fondojuego.delete(tierrder)\n fondojuego.delete(roizq)\n fondojuego.delete(roder)\n fondojuego.delete(c2)\n fondojuego.delete(c1)\n fondojuego.delete(c4)\n fondojuego.delete(sizq)\n fondojuego.delete(sder)\n fondojuego.delete(c5)\n labeljugador1=tkinter.Label(v,text=nombre.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=545, y=329)\n labeljugador2=tkinter.Label(v,text=nombre2.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=745, y=329)\n labeltiempo1=tkinter.Label(v,textvariable=tiempojugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=550)\n labeltiempo2=tkinter.Label(v,textvariable=tiempojugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=550)\n labelgasolina1=tkinter.Label(v,textvariable=gasolinajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=480)\n labelgasolina2=tkinter.Label(v,textvariable=gasolinajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=480)\n labelvelocidad1=tkinter.Label(v,textvariable=velocidadjugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=565, y=390) \n labelvelocidad2=tkinter.Label(v,textvariable=velocidadjugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=390)\n labeldistancia1=tkinter.Label(v,textvariable=distanciajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=612)\n labeldistancia2=tkinter.Label(v,textvariable=distanciajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=612) \n fondojuego.focus_set()\n v.deiconify()\n ventana.iconify()\n v1=5#F es el movimiento en x, v2 el movimiento en y, v1 el movimiento vertical de la van\n v2=4\n F=1.5\n imagenizquierda=moraizq\n imagenderecha=morader\n velocity=15\n principal()\n principal2()\nv.iconify()\nboton4=tkinter.Button(ventana, image=imagen_4boton,command=lvl3).place(x=1200, y=400)\n\ndef lvl4():\n\n \"\"\"\n En esta función se hará el llamado de la función principal, se darán valores para las funciones que tienen parámetros( los cuales se han creado con el fin de\n controlar totalmente, y especialmente para poder darle dificultad a cada nivel , en este caso, al nivel 2), además se agregan condicionales que paran el juego\n en caso de que el jugador pierda la partida.\n\n \"\"\"\n global v1,v2,F,imagenizquierda, velocity, imagenderecha, tiempojugador1, contador1, velocidadfondo\n\n musicaMenu.stop()\n pygame.mixer.music.load(\"nivel4.wav\")\n pygame.mixer.music.play()\n\n velocidadfondo=40\n\n\n fondojuego.delete(sizq)\n fondojuego.delete(sder)\n fondojuego.delete(c5)\n labeljugador1=tkinter.Label(v,text=nombre.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=540, y=339)\n labeljugador2=tkinter.Label(v,text=nombre2.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=740, y=339)\n labeltiempo1=tkinter.Label(v,textvariable=tiempojugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=550)\n labeltiempo2=tkinter.Label(v,textvariable=tiempojugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=550)\n labelgasolina1=tkinter.Label(v,textvariable=gasolinajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=460)\n labelgasolina2=tkinter.Label(v,textvariable=gasolinajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=460)\n labelvelocidad1=tkinter.Label(v,textvariable=velocidadjugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=555, y=387) \n labelvelocidad2=tkinter.Label(v,textvariable=velocidadjugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=755, y=387)\n labeldistancia1=tkinter.Label(v,textvariable=distanciajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=607)\n labeldistancia2=tkinter.Label(v,textvariable=distanciajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=607) \n fondojuego.focus_set()\n v.deiconify()\n ventana.iconify()\n v1=6\n v2=5#F es el movimiento en x, v2 el movimiento en y, v1 el movimiento vertical de la van\n F=1.6\n velocity=15\n imagenizquierda=roizq\n imagenderecha=roder\n principal()\n principal2()\nv.iconify()\nboton5=tkinter.Button(ventana, image=imagen_5boton,command=lvl4).place(x=1200, y=450)\n\ndef lvl5():\n\n \"\"\"\n En esta función se hará el llamado de la función principal, se darán valores para las funciones que tienen parámetros( los cuales se han creado con el fin de\n controlar totalmente, y especialmente para poder darle dificultad a cada nivel , en este caso, al nivel 2), además se agregan condicionales que paran el juego\n en caso de que el jugador pierda la partida.\n\n \"\"\"\n global v1,v2,F,imagenizquierda, velocity, imagenderecha, tiempojugador1, contador1,velocidadfondo\n musicaMenu.stop()\n pygame.mixer.music.load(\"nivel5.wav\")\n pygame.mixer.music.play()\n\n velocidadfondo=50\n\n\n\n labeljugador1=tkinter.Label(v,text=nombre.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=540, y=339)\n labeljugador2=tkinter.Label(v,text=nombre2.get(), font=(\"Tempus Sans ITC\",20),fg=\"black\",bg=\"white\").place(x=740, y=339)\n labeltiempo1=tkinter.Label(v,textvariable=tiempojugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=570, y=550)\n labeltiempo2=tkinter.Label(v,textvariable=tiempojugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=765, y=550)\n labelgasolina1=tkinter.Label(v,textvariable=gasolinajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=460)\n labelgasolina2=tkinter.Label(v,textvariable=gasolinajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=460)\n labelvelocidad1=tkinter.Label(v,textvariable=velocidadjugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=387) \n labelvelocidad2=tkinter.Label(v,textvariable=velocidadjugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=387) \n labeldistancia1=tkinter.Label(v,textvariable=distanciajugador1, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=560, y=607)\n labeldistancia2=tkinter.Label(v,textvariable=distanciajugador2, font=(\"Tempus Sans ITC\",20),fg=\"blue\",bg=\"white\").place(x=760, y=607) \n fondojuego.focus_set()\n v.deiconify()\n ventana.iconify()\n v1=7\n v2=8\n F=2#F es el movimiento en x, v2 el movimiento en y, v1 el movimiento vertical de la van\n velocity=16\n imagenizquierda=sizq\n imagenderecha=sder\n principal()\n principal2()\nv.iconify()\nboton6=tkinter.Button(ventana, image=imagen_6boton,command=lvl5).place(x=1200, y=500)\n\n\n\ndef pasar():\n \"\"\"\n Esta función permite al usuario pasar al siguiente nivel, sabiendo que debe cerrar el juego y volverlo a abrir para tener aaceso al siguiente nivel\n\n \"\"\"\n v.destroy()\n ventana.destroy()\n\ndef cargardatos():\n \"\"\"\n Esta funció permite cargar datos previamente almacenados de ambos jugadores, y si es el caso\n permitirles continuar la partida que estaban jugando\n \"\"\"\n global archivo,contador1, contadorg1, contadorv1, contadord1\n\n \n\n partida= open(\"archivo.txt\", \"r\")\n contador1=eval(partida.readline())\n print(contador1)\n contadorg1=eval(partida.readline())\n print(contadorg1)\n contadorv1=eval(partida.readline())\n print(contadorv1)\n contadord1=eval(partida.readline())\n print(contadord1)\n\n partida2= open(\"archivo2.txt\", \"r\")\n contador2=eval(partida2.readline())\n print(contador2)\n contadorg2=eval(partida2.readline())\n print(contadorg2)\n contadorv2=eval(partida2.readline())\n print(contadorv2)\n contadord2=eval(partida2.readline())\n print(contadord1)\n lvl1()\n\n partida.close()\n partida2.close()\n\ndef cargardatos2():\n global archivo,contador1, contadorg1, contadorv1, contadord1\n \"\"\"\n Esta funció permite cargar datos previamente almacenados de ambos jugadores, y si es el caso\n permitirles continuar la partida que estaban jugando\n \"\"\"\n\n \n\n partida= open(\"archivo.txt\", \"r\")\n contador1=eval(partida.readline())\n print(contador1)\n contadorg1=eval(partida.readline())\n print(contadorg1)\n contadorv1=eval(partida.readline())\n print(contadorv1)\n contadord1=eval(partida.readline())\n print(contadord1)\n\n partida2= open(\"archivo2.txt\", \"r\")\n contador2=eval(partida2.readline())\n print(contador2)\n contadorg2=eval(partida2.readline())\n print(contadorg2)\n contadorv2=eval(partida2.readline())\n print(contadorv2)\n contadord2=eval(partida2.readline())\n print(contadord1)\n lvl2()\n\n\n partida.close()\n partida2.close()\ndef cargardatos3():\n global archivo,contador1, contadorg1, contadorv1, contadord1\n\n \"\"\"\n Esta funció permite cargar datos previamente almacenados de ambos jugadores, y si es el caso\n permitirles continuar la partida que estaban jugando\n \"\"\"\n \n\n partida= open(\"archivo.txt\", \"r\")\n contador1=eval(partida.readline())\n print(contador1)\n contadorg1=eval(partida.readline())\n print(contadorg1)\n contadorv1=eval(partida.readline())\n print(contadorv1)\n contadord1=eval(partida.readline())\n print(contadord1)\n\n partida2= open(\"archivo2.txt\", \"r\")\n contador2=eval(partida2.readline())\n print(contador2)\n contadorg2=eval(partida2.readline())\n print(contadorg2)\n contadorv2=eval(partida2.readline())\n print(contadorv2)\n contadord2=eval(partida2.readline())\n print(contadord1)\n lvl3()\n\n partida.close()\n partida2.close()\n\ndef cargardatos4():\n global archivo,contador1, contadorg1, contadorv1, contadord1\n \"\"\"\n Esta funció permite cargar datos previamente almacenados de ambos jugadores, y si es el caso\n permitirles continuar la partida que estaban jugando\n \"\"\"\n\n \n\n partida= open(\"archivo.txt\", \"r\")\n contador1=eval(partida.readline())\n print(contador1)\n contadorg1=eval(partida.readline())\n print(contadorg1)\n contadorv1=eval(partida.readline())\n print(contadorv1)\n contadord1=eval(partida.readline())\n print(contadord1)\n\n partida2= open(\"archivo2.txt\", \"r\")\n contador2=eval(partida2.readline())\n print(contador2)\n contadorg2=eval(partida2.readline())\n print(contadorg2)\n contadorv2=eval(partida2.readline())\n print(contadorv2)\n contadord2=eval(partida2.readline())\n print(contadord1)\n lvl4()\n\n partida.close()\n partida2.close()\n\ndef cargardatos5():\n global archivo,contador1, contadorg1, contadorv1, contadord1, contador2, tiempojugador2\n\n \"\"\"\n Esta funció permite cargar datos previamente almacenados de ambos jugadores, y si es el caso\n permitirles continuar la partida que estaban jugando\n \"\"\"\n\n \n\n \n partida= open(\"archivo.txt\", \"r\")\n contador1=eval(partida.readline())\n print(contador1)\n contadorg1=eval(partida.readline())\n print(contadorg1)\n contadorv1=eval(partida.readline())\n print(contadorv1)\n contadord1=eval(partida.readline())\n print(contadord1)\n\n partida2= open(\"archivo2.txt\", \"r\")\n contador2=eval(partida2.readline())\n print(contador2)\n contadorg2=eval(partida2.readline())\n print(contadorg2)\n contadorv2=eval(partida2.readline())\n print(contadorv2)\n contadord2=eval(partida2.readline())\n print(contadord1)\n lvl5()\n\n partida.close()\n partida2.close()\n \n \n\n\n##Label y botones para permitir cargar las partidas y seleccionar cuando y donde se quiere vcargar\n\nlabelcontinuar=tkinter.Label(ventana, text=\"Cargar partida en : \",font=(\"Tempus Sans ITC\",15),bg=\"white\").place(x=50, y=615) \nContinuar=tkinter.Button(ventana,text=\"Nivel 1\" ,font=(\"Tempus Sans ITC\",14),command=cargardatos).place(x=230, y=615)\nContinuar2=tkinter.Button(ventana,text=\"Nivel 2\" ,font=(\"Tempus Sans ITC\",14),command=cargardatos2).place(x=330, y=615)\nContinuar3=tkinter.Button(ventana,text=\"Nivel 3\" ,font=(\"Tempus Sans ITC\",14),command=cargardatos3).place(x=430, y=615)\nContinuar4=tkinter.Button(ventana,text=\"Nivel 4\" ,font=(\"Tempus Sans ITC\",14),command=cargardatos4).place(x=530, y=615)\nContinuar5=tkinter.Button(ventana,text=\"Nivel 5\" ,font=(\"Tempus Sans ITC\",14),command=cargardatos5).place(x=630, y=615)\n\n\n\n# Liga el evento key al canvas\nfondojuego.bind(\"\",keydown)\nfondojuego.bind(\"\",keyup)\n\n\n#enpaquetado( mostrar lo hecho con canvas)\n\nfondojuego.pack()\n\n\n#ciclo para escuchar los eventos\nv.mainloop()\n","repo_name":"2203juan/Car-Game","sub_path":"juegofirstry/BodyGame.py","file_name":"BodyGame.py","file_ext":"py","file_size_in_byte":46442,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4154201974","text":"from unicorn_binance_rest_api.manager import BinanceRestApiManager\nimport logging\nimport os\n\n# https://docs.python.org/3/library/logging.html#logging-levels\nlogging.getLogger(\"unicorn_binance_rest_api\")\nlogging.basicConfig(level=logging.DEBUG,\n filename=os.path.basename(__file__) + '.log',\n format=\"{asctime} [{levelname:8}] {process} {thread} {module}: {message}\",\n style=\"{\")\n\n# Define API Key and Secret\nAPI_KEY = \"\"\nAPI_SECRET = \"\"\n\n# Define quantity in USDT to buy\nBUY_QUANTITY_USDT = 15\n\n# Define number of decimal places to round prices\nROUND_DECIMAL_PLACES = 2\n\n# Define the time in force for the stop limit order\nSTOP_LIMIT_TIME_IN_FORCE = \"GTC\" # ENUM: GTC, FOK, IOC - https://www.delta.exchange/blog/support/time-in-force-flags-on-delta-exchange-fok-ioc-gtc\n\n# Define the percentage gap between the buy price and the stop loss price\nSTOP_LOSS_GAP_TO_BUY_PRICE_IN_PERCENT = 2\n\n# Define the gap in USDT between the stop loss trigger and the stop loss price\nSTOP_LOSS_TRIGGER_GAP_USDT = 0.01\n\n# Define the percentage gap between the buy price and the take profit price\nTAKE_PROFIT_GAP_TO_BUY_PRICE_IN_PERCENT = 2\n\n# Create a BinanceRestApiManager instance with the exchange and API credentials\nubra = BinanceRestApiManager(exchange=\"binance.com-isolated_margin\", api_key=API_KEY, api_secret=API_SECRET)\n\n# Buy BTC with a market order using the specified USDT quantity\nbuy_order = ubra.create_margin_order(symbol=\"BTCUSDT\",\n isIsolated=\"TRUE\",\n side=\"BUY\",\n type=\"MARKET\",\n quoteOrderQty=BUY_QUANTITY_USDT)\nprint(f\"Buy Order Result: {buy_order}\")\n\n# If the buy order was filled\nif buy_order['status'] == \"FILLED\":\n # Calculate prices\n buy_price = float(buy_order['fills'][0]['price'])\n take_profit_price = buy_price * (100+TAKE_PROFIT_GAP_TO_BUY_PRICE_IN_PERCENT) / 100\n stop_loss_price = buy_price * (100-STOP_LOSS_GAP_TO_BUY_PRICE_IN_PERCENT) / 100\n stop_loss_price_trigger = stop_loss_price + STOP_LOSS_TRIGGER_GAP_USDT\n\n # Sell BTC with TakeProfit or StopLoss (oco order)\n oco_sell_order = ubra.create_margin_oco_order(symbol=\"BTCUSDT\",\n isIsolated=\"TRUE\",\n price=round(take_profit_price, ROUND_DECIMAL_PLACES),\n quantity=buy_order['executedQty'],\n side=\"SELL\",\n stopPrice=round(stop_loss_price_trigger, ROUND_DECIMAL_PLACES),\n stopLimitPrice=round(stop_loss_price, ROUND_DECIMAL_PLACES),\n stopLimitTimeInForce=STOP_LIMIT_TIME_IN_FORCE)\n print(f\"OCO Order Result: {oco_sell_order}\")","repo_name":"LUCIT-Systems-and-Development/unicorn-binance-rest-api","sub_path":"example_buy_and_sell_oco_order_isolated_margin.py","file_name":"example_buy_and_sell_oco_order_isolated_margin.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"16"} +{"seq_id":"74247255049","text":"from sklearn.externals import joblib\nimport os\n\ndef check_scaler():\n SCALER_PATH = 'zernike_scaler-latest'\n assert os.path.exists(SCALER_PATH)\n\n scaler = joblib.load(SCALER_PATH)\n \n assert hasattr(scaler, 'scale_') # Make sure not using a deprecated version of sklearn","repo_name":"zmr/namsel","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"} +{"seq_id":"70710845768","text":"from datetime import datetime\nimport sys\nimport numpy as np\n\ndef save_npz( save_dir, file_name, output_data, suffix_time:bool=True ):\n save_path = f\"{save_dir}\\{file_name}\"\n save_time = \"\"\n if suffix_time:\n save_time = str(datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n save_time = \"_\"+save_time\n save_path = save_path+save_time+\".npz\" \n np.savez(save_path, **output_data)","repo_name":"shiau109/QM_opt","sub_path":"exp/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72161148489","text":"class QueryBuilder(object):\n def __init__(self, cube, **kwargs):\n super(QueryBuilder, self).__init__()\n self.cube = cube\n self.path = [[] for _ in range(cube.get_dimension())]\n self.precision = [0 for _ in range(cube.get_dimension())]\n self.timeframe = [None, None]\n\n if kwargs.has_key('debug'):\n self._init_debug()\n else:\n self.debug = False\n\n def find(self, dimension_name, dive):\n if dimension_name not in self.cube.dimensions and dimension_name != \"Location\":\n raise Exception(\"Dimension '{0}' not found.\".format(dimension_name))\n\n if dimension_name == \"Location\":\n path_index = 0\n else:\n path_index = self.cube.dimensions.index(dimension_name) + 1\n\n if len(self.path[path_index]) != 0:\n raise Exception(\"Try to override dimension: {0}\".format(dimension_name))\n\n if isinstance(dive, tuple):\n new_path = dive[0]\n new_precision = dive[1]\n else:\n new_path = [dive]\n new_precision = 0\n\n self.path[path_index] = new_path\n self.precision[path_index] = new_precision\n return self\n\n def after(self, date):\n self.timeframe[0] = date\n return self\n\n def before(self, date):\n self.timeframe[1] = date\n return self\n\n def between(self, begin, end):\n self.after(begin)\n self.before(end)\n return self\n\n def _retrieve_timeserietable(self, path):\n node = self.cube.world\n\n remaining_levels = self.cube.get_dimension()\n for level_path in path:\n if self.debug:\n visited_links = []\n\n for key in level_path:\n if node.has_shared_content:\n break\n node = node.get_child(key)\n if node is None:\n return node\n elif self.debug:\n visited_links.append(key)\n\n remaining_levels -= 1\n node = node.content\n if self.debug:\n self.trace.append(visited_links)\n\n for i in range(remaining_levels):\n node = node.content\n if self.debug:\n self.trace.append([])\n\n return node\n\n def execute(self, **kwargs):\n if kwargs.has_key('debug'):\n self._init_debug()\n\n table = self._retrieve_timeserietable(self.path)\n if table is None:\n return 0\n else:\n return table.query(self.timeframe[0], self.timeframe[1])\n\n def _init_debug(self):\n self.debug = True\n self.trace = []","repo_name":"nano-db/NanoCube","sub_path":"server/querybuilder.py","file_name":"querybuilder.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70574049927","text":"from __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport six\n\nfrom pwd import getpwnam\n\nimport pytest\n\nfrom vdsm.common import supervdsm\nfrom vdsm.constants import VDSM_USER\n\n\n@pytest.fixture\ndef dropped_privileges():\n vdsm_uid, vdsm_gid = getpwnam(VDSM_USER)[2:4:]\n os.setgroups([])\n os.setgid(vdsm_gid)\n os.setuid(vdsm_uid)\n\n\n@pytest.mark.skipif(os.geteuid() != 0, reason=\"Requires root\")\ndef test_ping_call(dropped_privileges):\n proxy = supervdsm.getProxy()\n assert bool(proxy.ping())\n\n\n# This requires environment with tmpfs mounted to /sys/kernel/mm/ksm\n@pytest.mark.skipif(os.geteuid() != 0, reason=\"Requires root\")\ndef test_ksm_action(dropped_privileges):\n proxy = supervdsm.getProxy()\n ksmParams = {\"run\": 0,\n \"merge_across_nodes\": 1,\n \"sleep_millisecs\": 0xffff,\n \"pages_to_scan\": 0xffff}\n proxy.ksmTune(ksmParams)\n\n for k, v in six.iteritems(ksmParams):\n with open(\"/sys/kernel/mm/ksm/%s\" % k, \"r\") as f:\n assert str(v) == f.read().rstrip()\n","repo_name":"oVirt/vdsm","sub_path":"tests/functional/supervdsmFuncTests.py","file_name":"supervdsmFuncTests.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"} +{"seq_id":"35606814654","text":"#!/usr/bin/env python\nimport pyowm\nimport time\n\nowm = pyowm.OWM('3cc9239f71004c1fa171a50d24e460e6')\nfilePath = \"/home/gbk/data/weatherTracker/\"\n\"\"\"\nfile structure\ntimeStamp,tempInFahrenheit['temp'],humidity,detailedStatus,rainVolume,wind-deg,wind-spe$\nGPS Coords for my house = 39.857979, -89.544616\n\"\"\"\ndef buildRow(owm):\n weatherRow = []\n observation = owm.weather_at_coords(39.857979, -89.544616)\n w = observation.get_weather()\n tempInFahrenheit = w.get_temperature('fahrenheit')\n humidity = w.get_humidity()\n timeStamp = time.time()\n detailedStatus = w.get_detailed_status()\n rain = w.get_rain()\n try:\n rainVolume = rain['3h']\n except KeyError:\n rainVolume = 0\n wind = w.get_wind()\n clouds = w.get_clouds()\n weatherRow.extend([str(timeStamp), str(tempInFahrenheit['temp']), str(humidity), detailedStatus])\n weatherRow.extend([str(rainVolume), str(wind['deg']), str(wind['speed']), str(clouds)])\n return weatherRow\n\ntheDate = time.strftime(\"%Y_%m_%d\")\nweatherFile = filePath + theDate + \"_weather_obs.txt\"\ndelimiter = ','\nrow = buildRow(owm)\nwith open(weatherFile, 'a') as outFile:\n outFile.write(delimiter.join(row) + '\\n')\n\n","repo_name":"locwyn/weatherTracker","sub_path":"pullWeatherBeta.py","file_name":"pullWeatherBeta.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70942001929","text":"from datetime import timedelta, datetime\nfrom django.shortcuts import render\nimport math\nimport configparser\nimport psycopg2\nfrom sys import exit\nfrom functools import lru_cache\n\nconfig = configparser.ConfigParser()\nconfig.read('../conf.ini')\n\npghost = config['Postgres']['host']\npguser = config['Postgres']['user']\npgpass = config['Postgres']['password']\npgdb = config['Postgres']['db']\ntweetaccounts = config['Twitter']['accounts']\ntzoffset = int(config['Timezone']['tzoffset'])\ninterval = config['Postgres']['interval']\n\n# Create your views here.\ndef index(request):\n context = dict()\n return render(request, \"twitter/index.html\", context=context)\n\ndef scroller(request):\n #if request.method == \"GET\":\n # pull tweets for the last 5 minutes and pass them to template\n data = list()\n refresh = int(int(interval) / 2 * 60)\n try:\n conn = psycopg2.connect(f\"dbname={pgdb} user={pguser} password={pgpass} host={pghost}\")\n except Exception as e:\n print(e)\n exit(\"Unable to connect to postgres database!\")\n cur = conn.cursor()\n cur.execute(\n f\"SELECT accname, hashtags, symbols, ttext, created from twitter where created >= CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - INTERVAL '{interval} minutes' ORDER BY created DESC;\")\n tweets = cur.fetchall()\n for tweet in tweets:\n local = tweet[4] + timedelta(hours=tzoffset)\n twDict = {\"account\": tweet[0], \"hashtags\": tweet[1], \"symbols\": tweet[2], \"ttext\": tweet[3], \"created\": local}\n data.append(twDict)\n # 12 tweets displayed / minute\n # expand the dictionary to accomodate this\n runMins = round(int(interval) / 2)\n tweetsNeeded = runMins * 12\n\n # 4 tweets fit on a page, if less than this, don't duplicate tweets, if not duplicate\n #print(\"Initial length: \", len(data))\n if len(data) > 4:\n dataLength = len(data)\n multiplier = round(tweetsNeeded / dataLength)\n data = data * multiplier\n #print(\"With multplier: \", len(data))\n return render(request, \"twitter/scroller.html\", {'refresh': refresh, 'interval': interval, \"data\": data})\n\n#@lru_cache(maxsize=2)\ndef feeds(request):\n \"\"\"Display a list of feeds from the config file with additional information\"\"\"\n # also pull additional information on them from db and display it\n # get info from config file\n data = dict()\n # {barrons: {name: barrons, id: barrons, description: , url: , followers: }}\n tweetList0 = tweetaccounts.splitlines()\n tweetList = [item for item in tweetList0 if item]\n try:\n conn = psycopg2.connect(f\"dbname={pgdb} user={pguser} password={pgpass} host={pghost}\")\n except Exception as e:\n print(e)\n exit(\"Unable to connect to postgres database!\")\n cur = conn.cursor()\n # select distinct on (acctid), tjson from twitter where acctid in {list from conf.ini}\n cur.execute(f\"SELECT DISTINCT ON (acctid) acctid, tjson from twitter where acctid in {tuple(tweetList)};\")\n accounts = cur.fetchall()\n for item in accounts:\n acctName = item[0]\n acctDict = {\"username\": item[1][\"user\"][\"name\"], \"screenname\": item[1][\"user\"][\"screen_name\"],\n \"description\": item[1][\"user\"][\"description\"], \"url\": item[1][\"user\"][\"url\"],\n \"followers\": item[1][\"user\"][\"followers_count\"], \"totaltweets\": item[1][\"user\"][\"statuses_count\"]}\n data.update({acctName: acctDict})\n\n return render(request, \"twitter/feeds.html\", {'data': data})\n\n\n","repo_name":"stlewandowski/twitterscroller","sub_path":"companalysis/twitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1285023859","text":"import time\r\nfrom urllib.request import urlopen\r\nfrom urllib.request import urlretrieve\r\nfrom bs4 import BeautifulSoup\r\nbaseurl = 'https://read.douban.com'\r\npath = '/ebooks/'\r\nabsoluteurl = baseurl + path\r\nhtml = urlopen(absoluteurl)\r\nbsObj = BeautifulSoup(html,'lxml')\r\ntags = bsObj.find('ul',class_='list categories-list')\r\nfor i in tags.children:\r\n path = i.a['href']\r\n absoluteurl=baseurl+path+'?cat=book&sort=top&start='\r\n k = 0\r\n while k<999:\r\n absoluteurl = absoluteurl + str(k)\r\n html1 = urlopen(absoluteurl)\r\n bsObj1 = BeautifulSoup(html1,'lxml')\r\n a = bsObj1.findAll('div', class_='cover shadow-cover')\r\n b = bsObj1.findAll('div', class_='title')\r\n time.sleep(5)\r\n c = [urlretrieve(i.img['src'],'E:\\python project\\spider\\downloaded\\%s.jpg'%j.get_text()) for (i,j) in zip(a,b)]","repo_name":"shuangluhuifeng/xinjy","sub_path":"paqudoubanpictures.py","file_name":"paqudoubanpictures.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34174378888","text":"import math\nfrom collections import deque\n\nn, k = map(int, input().split())\narr = [[0, 0]] + [list(map(int, input().split())) for _ in range(n)]\n\ndef dist1(x1, y1, x2, y2):\n return math.ceil((((x1-x2)**2 + (y1-y2)**2) ** 0.5) / 10)\n\ndef dist2(x1, y1):\n return math.ceil((((10000-x1)**2 + (10000-y1)**2) ** 0.5) / 10)\n\ndef bfs(mid):\n visited = [0]*(n+1)\n q = deque()\n q.append((0, 0))\n while q:\n t, cnt = q.popleft()\n if cnt > k:\n continue\n for i in range(1, n+1):\n if visited[i] == 0:\n if dist1(arr[t][0], arr[t][1], arr[i][0], arr[i][1]) <= mid:\n if dist2(arr[i][0], arr[i][1]) <= mid:\n return True\n q.append((i, cnt+1))\n visited[i] = 1\n return False\n\nl, r = 0, 100000001\nans = 0\nwhile l <= r:\n mid = (l+r)//2\n if bfs(mid):\n ans = mid\n r = mid-1\n else:\n l = mid+1\n\nprint(ans)","repo_name":"seho27060/sep-algo-study","sub_path":"0913/2585_yuzu.py","file_name":"2585_yuzu.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"15497620026","text":"from .client import DollarClient, RofexClient, CmeClient, BloombergClient\nfrom .constants import CME_BANKS_URLS, BLOOMBERG_BANKS_URLS\nfrom datetime import datetime\nfrom pytz import timezone\n\n\nclass DatetimeService:\n @staticmethod\n def get_time() -> str:\n \"\"\"\n It returns a string with the current date and time in the timezone\n \"America/Argentina/Buenos_Aires\"\n :return: A string with the current date and time in the example format: YYYY-MM-DD HH:MM:SS\n \"\"\"\n TIMEZONE: str = \"America/Argentina/Buenos_Aires\"\n now_bs_as_arg: datetime = datetime.now().astimezone(timezone(TIMEZONE))\n return now_bs_as_arg.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\nclass DollarService:\n @staticmethod\n def get_nacion_bank_scraping_data() -> float:\n \"\"\"\n It gets the dollar value from the website of the National Bank of Argentina\n :return: The value of the dollar in the Nacion Bank\n \"\"\"\n try:\n nacion_bank = DollarClient.get_nacion_bank_data()\n\n if nacion_bank is None:\n return 0\n\n dollar_value: str = (\n nacion_bank.find(\"div\", id=\"divisas\")\n .find(\"tbody\")\n .find_all(\"td\")[2]\n .text\n )\n dollar: float = round(float(dollar_value), 2)\n return dollar\n\n except:\n return -1\n\n\nclass RofexService:\n @staticmethod\n def get_rofex_scraping_data() -> dict:\n \"\"\"\n It gets the data from the Rofex website, and returns a dictionary with the values\n :return: A dictionary with the values of the rofex bank.\n \"\"\"\n try:\n ROFEX_DICT: dict = dict()\n rofex_bank = RofexClient.get_rofex_bank_data()\n\n if rofex_bank is None:\n return dict()\n\n rofex_values = (\n rofex_bank.find(\"div\", {\"class\": \"table-responsive\"})\n .find(\"tbody\")\n .find_all(\"tr\")\n )\n\n for rof in rofex_values:\n rofex_title: str = rof.find_all(\"td\")[0].text\n rofex_value: float = round(float(rof.find_all(\"td\")[1].text), 2)\n ROFEX_DICT[rofex_title] = rofex_value\n\n return ROFEX_DICT\n\n except:\n return dict()\n\n\nclass CmeService:\n\n def __get_cme_values(self, name_page: str, response: dict) -> dict:\n try:\n CME_VALUES: list = list()\n\n for i in range(6):\n if (i == 0) and (response[\"quotes\"][i][\"last\"] != \"-\"):\n cme_value = response[\"quotes\"][i][\"last\"]\n cme_value = response[\"quotes\"][i][\"priorSettle\"]\n\n cme_value: float = round(float(cme_value), 2)\n CME_VALUES.append(cme_value)\n\n return {\n f\"{name_page}_M0\": CME_VALUES[0],\n f\"{name_page}_M1\": CME_VALUES[1],\n f\"{name_page}_M2\": CME_VALUES[2],\n f\"{name_page}_M3\": CME_VALUES[3],\n f\"{name_page}_M4\": CME_VALUES[4],\n f\"{name_page}_M5\": CME_VALUES[5],\n }\n\n except:\n return dict()\n\n def get_cme_scraping_data(self) -> dict:\n \"\"\"\n It takes the CME_BANKS_URLS dictionary, iterates through it, and for each key-value pair, it\n makes a request to the url, and then parses the response to get the data we want\n :return: A dictionary with the name of the bank and a dictionary with the values of the bank.\n \"\"\"\n CME_SCRAP: dict = dict()\n\n for name_page, url in CME_BANKS_URLS.items():\n\n response = CmeClient.get_cme_bank_data(url=url)\n\n if response is None:\n return dict()\n\n CME_SCRAP[name_page] = self.__get_cme_values(name_page, response)\n\n return CME_SCRAP\n\n\nclass BloombergService:\n\n def __get_bloomberg_values(self, response: dict) -> list:\n try:\n BLOOMBERG_VALUES: list = list()\n\n for i in range(2):\n bloomberg_price: float = round(\n response[\"fieldDataCollection\"][i][\"price\"], 2)\n\n bloomberg_priceChange: float = round(\n response[\"fieldDataCollection\"][i][\"priceChange1Day\"], 2)\n\n BLOOMBERG_VALUES.append(bloomberg_price)\n BLOOMBERG_VALUES.append(bloomberg_priceChange)\n\n return BLOOMBERG_VALUES\n\n except:\n return list()\n\n def get_bloomberg_scraping_data(self) -> dict:\n \"\"\"\n It takes the Bloomberg URLs and returns a dictionary with the bank names as keys and a list of\n the price and price change as values\n :return: A dictionary with the name of the bank as the key and a list of values as the value.\n \"\"\"\n BLOOMBRG_SCRAP: dict = dict()\n\n for name_page, url in BLOOMBERG_BANKS_URLS.items():\n\n response = BloombergClient.get_bloomberg_bank_data(url=url)\n\n if response is None:\n return dict()\n\n BLOOMBRG_SCRAP[name_page] = self.__get_bloomberg_values(response)\n\n return BLOOMBRG_SCRAP\n","repo_name":"RRodriQZ/Dollar-Web-Scraping","sub_path":"app/api/v1/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"78308780","text":"import collections\n\nhand = input().split()\nfirstC = []\ni = 0\n#print(len(hand))\nfor i in range(len(hand)):\n temp = list(hand[i])\n firstC.append(temp[0])\n\n#print(hand)\n#print(firstC)\ncounter = collections.Counter(firstC)\nbla = counter.most_common(1)\n#print(counter)\n#print(str(bla))\nmCommon = list(str(bla))\nprint(mCommon[7])\n#print(mCommon)","repo_name":"HendrikM25/kattisSolutions","sub_path":"python/kattisPractice/pokerHand.py","file_name":"pokerHand.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29963318201","text":"from typing import List\n\nfrom tqdm.auto import tqdm\n\n\ndef count_lines_in_file(file_path: str, buffer_size: int = 1024 * 1024) -> int:\n \"\"\"Count the number of lines in the given file.\n\n :param file_path: path to the file\n :param buffer_size: size of temporary buffer during reading the file\n :return: number of lines\n \"\"\"\n n_lines = 0\n with open(file_path, \"rb\") as file:\n file_reader = file.read\n buffer = file_reader(buffer_size)\n while buffer:\n n_lines += buffer.count(b\"\\n\")\n buffer = file_reader(buffer_size)\n return n_lines\n\n\ndef get_lines_offsets(file_path: str, show_progress_bar: bool = True) -> List[int]:\n \"\"\"Calculate cumulative offsets for all lines in the given file.\n\n :param file_path: path to the file\n :param show_progress_bar: if True then tqdm progress bar will be display\n :return: list of ints with cumulative offsets\n \"\"\"\n line_offsets: List[int] = []\n cumulative_offset = 0\n with open(file_path, \"r\") as file:\n file_iter = tqdm(file, total=count_lines_in_file(file_path)) if show_progress_bar else file\n for line in file_iter:\n line_offsets.append(cumulative_offset)\n cumulative_offset += len(line.encode(file.encoding))\n return line_offsets\n\n\ndef get_line_by_offset(file_path: str, offset: int) -> str:\n \"\"\"Get line by byte offset from the given file.\n\n :param file_path: path to the file\n :param offset: byte offset\n :return: read line\n \"\"\"\n with open(file_path, \"r\") as data_file:\n data_file.seek(offset)\n line = data_file.readline().strip()\n return line\n","repo_name":"SpirinEgor/commode-utils","sub_path":"commode_utils/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41739247912","text":"#Rainfall Prediction using Random Forest\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import resample\nimport seaborn as sns\n\nfull_data = pd.read_csv('weatherAUS.csv')\nfull_data.head()\n\n\nfull_data['RainToday'].replace({'No': 0, 'Yes': 1},inplace = True)\nfull_data['RainTomorrow'].replace({'No': 0, 'Yes': 1},inplace = True)\n\nno = full_data[full_data.RainTomorrow == 0]\nyes = full_data[full_data.RainTomorrow == 1]\nyes_oversampled = resample(yes, replace=True, n_samples=len(no), random_state=123)\noversampled = pd.concat([no, yes_oversampled])\n\n\n\ntotal = oversampled.isnull().sum().sort_values(ascending=False)\npercent = (oversampled.isnull().sum()/oversampled.isnull().count()).sort_values(ascending=False)\nmissing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\nmissing.head(4)\n\n\n #Imputing missing data using MICE (Multiple Imputation by Chained Equations ) \n\noversampled.select_dtypes(include=['object']).columns\n\n# Impute categorical var with Mode\noversampled['Date'] = oversampled['Date'].fillna(oversampled['Date'].mode()[0])\noversampled['Location'] = oversampled['Location'].fillna(oversampled['Location'].mode()[0])\noversampled['WindGustDir'] = oversampled['WindGustDir'].fillna(oversampled['WindGustDir'].mode()[0])\noversampled['WindDir9am'] = oversampled['WindDir9am'].fillna(oversampled['WindDir9am'].mode()[0])\noversampled['WindDir3pm'] = oversampled['WindDir3pm'].fillna(oversampled['WindDir3pm'].mode()[0])\n\n# Convert categorical features to continuous features with Label Encoding\nfrom sklearn.preprocessing import LabelEncoder\nlencoders = {}\nfor col in oversampled.select_dtypes(include=['object']).columns:\n lencoders[col] = LabelEncoder()\n oversampled[col] = lencoders[col].fit_transform(oversampled[col])\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Multiple Imputation by Chained Equations\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nMiceImputed = oversampled.copy(deep=True) \nmice_imputer = IterativeImputer()\nMiceImputed.iloc[:, :] = mice_imputer.fit_transform(oversampled)\n\n# Detecting outliers with IQR\nQ1 = MiceImputed.quantile(0.25)\nQ3 = MiceImputed.quantile(0.75)\nIQR = Q3 - Q1\n\n# Removing outliers from the dataset\nMiceImputed = MiceImputed[~((MiceImputed < (Q1 - 1.5 * IQR)) |(MiceImputed > (Q3 + 1.5 * IQR))).any(axis=1)]\nMiceImputed.shape\n\n\n\n\n# Removing outliers from the dataset\nMiceImputed = MiceImputed[~((MiceImputed < (Q1 - 1.5 * IQR)) |(MiceImputed > (Q3 + 1.5 * IQR))).any(axis=1)]\nMiceImputed.shape\n\n\n# Standardizing data\nfrom sklearn import preprocessing\nr_scaler = preprocessing.MinMaxScaler()\nr_scaler.fit(MiceImputed)\nmodified_data = pd.DataFrame(r_scaler.transform(MiceImputed), index=MiceImputed.index, columns=MiceImputed.columns)\n\n# Feature Importance using Filter Method (Chi-Square)\nfrom sklearn.feature_selection import SelectKBest, chi2\nX = modified_data.loc[:,modified_data.columns!='RainTomorrow']\ny = modified_data[['RainTomorrow']]\nselector = SelectKBest(chi2, k=10)\nselector.fit(X, y)\nX_new = selector.transform(X)\n\n\n#Training Rainfall Prediction Model with Random Forest\n\n\nfeatures = MiceImputed[['Location', 'MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustDir', \n 'WindGustSpeed', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am', \n 'Humidity3pm', 'Pressure9am', 'Pressure3pm', 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm', \n 'RainToday']]\ntarget = MiceImputed['RainTomorrow']\n\n# Split into test and train (85% for training 15% for testing)\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.15, random_state=12345)\n\n# Normalize Features\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.fit_transform(X_test)\n\n\nimport time\nfrom sklearn.metrics import accuracy_score, roc_auc_score, cohen_kappa_score, plot_confusion_matrix, roc_curve, classification_report\ndef run_model(model, X_train, y_train, X_test, y_test, verbose=True):\n t0=time.time()\n if verbose == False:\n model.fit(X_train,y_train, verbose=0)\n else:\n model.fit(X_train,y_train)\n y_pred = model.predict(X_test)\n accuracy = accuracy_score(y_test, y_pred)\n roc_auc = roc_auc_score(y_test, y_pred) \n coh_kap = cohen_kappa_score(y_test, y_pred)\n time_taken = time.time()-t0\n print(\"Accuracy = {}\".format(accuracy))\n print(\"ROC Area under Curve = {}\".format(roc_auc))\n print(\"Cohen's Kappa = {}\".format(coh_kap))\n print(\"Time taken = {}\".format(time_taken))\n \n probs = model.predict_proba(X_test) \n probs = probs[:, 1] \n fper, tper, thresholds = roc_curve(y_test, probs) \n \n return model, accuracy, roc_auc, coh_kap, time_taken\n\n#Training using Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\n\nparams_rf = {'max_depth': 16,\n 'min_samples_leaf': 1,\n 'min_samples_split': 2,\n 'n_estimators': 100,\n 'random_state': 12345}\n\nmodel_rf = RandomForestClassifier(**params_rf)\nmodel_rf, accuracy_rf, roc_auc_rf, coh_kap_rf, tt_rf = run_model(model_rf, X_train, y_train, X_test, y_test)\n\n#Accuracy= 0.9555649597968684\n#95.5%\n\n\n #Testing Random Data using the existing Data\ndata = {\n 'MinTemp': [7.7], \n 'MaxTemp': [26.7], \n 'Rainfall':[0],\n 'Evaporation': [5.85812957879808], \n 'Sunshine': [11.9605549245276], \n 'WindGustDir': [13],\n 'WindGustSpeed': [35], \n 'WindDir9am': [10], \n 'WindDir3pm': [13], \n 'WindSpeed9am': [6], \n 'WindSpeed3pm': [17], \n 'Humidity9am': [48], \n 'Humidity3pm': [19], \n 'Pressure9am': [1010.8], \n 'Pressure3pm': [1008.6], \n 'Cloud9am': [1.55340991681667], \n 'Cloud3pm': [2.26956647477166], \n 'Temp9am': [16.3], \n 'Temp3pm': [25.5],\n 'RainToday':[0]}\ninput = pd.DataFrame(data)\n\n\n# Make the prediction\nprediction = model_rf.predict_proba(input)[0,1]\n\nprint(\"prediction= {}\".format(prediction))\n\n#gives probability => 0.39894736842105263 \n\nif prediction<(0.25):\n print(\"No rain is expected tomorrow\")\nelse:\n print(\"Rain is expected tomorrow\") \n\n\n\n\n#Taking User Input\n\nmin_temp = float(input(\"Enter the minimum temperature: \"))\nmax_temp = float(input(\"Enter the maximum temperature: \"))\nRainfall = float(input(\"Enter the Rainfall: \"))\nevaporation = float(input(\"Enter the evaporation: \"))\nsunshine = float(input(\"Enter the sunshine: \"))\nwind_gust_dir = input(\"Enter the wind gust direction: \")\nwind_gust_speed = float(input(\"Enter the wind gust speed: \"))\nwind_dir_9am = input(\"Enter the wind direction at 9am: \")\nwind_dir_3pm = input(\"Enter the wind direction at 3pm: \")\nwind_speed_9am = float(input(\"Enter the wind speed at 9am: \"))\nwind_speed_3pm = float(input(\"Enter the wind speed at 3pm: \"))\nhumidity_9am = float(input(\"Enter the humidity at 9am: \"))\nhumidity_3pm = float(input(\"Enter the humidity at 3pm: \"))\npressure_9am = float(input(\"Enter the pressure at 9am: \"))\npressure_3pm = float(input(\"Enter the pressure at 3pm: \"))\ncloud_9am = float(input(\"Enter the cloudiness at 9am: \"))\ncloud_3pm = float(input(\"Enter the cloudiness at 3pm: \"))\ntemp_9am = float(input(\"Enter the temperature at 9am: \"))\ntemp_3pm = float(input(\"Enter the temperature at 3pm: \"))\nRainToday = float(input(\"Enter the Rain today: \"))\n\n# Convert the user input into a dataframe\ndata = {'MinTemp': [min_temp], \n 'MaxTemp': [max_temp], \n 'Rainfall': [Rainfall],\n 'Evaporation': [evaporation], \n 'Sunshine': [sunshine], \n 'WindGustDir': [wind_gust_dir],\n 'WindGustSpeed': [wind_gust_speed], \n 'WindDir9am': [wind_dir_9am], \n 'WindDir3pm': [wind_dir_3pm], \n 'WindSpeed9am': [wind_speed_9am], \n 'WindSpeed3pm': [wind_speed_3pm], \n 'Humidity9am': [humidity_9am], \n 'Humidity3pm': [humidity_3pm], \n 'Pressure9am': [pressure_9am], \n 'Pressure3pm': [pressure_3pm], \n 'Cloud9am': [cloud_9am], \n 'Cloud3pm': [cloud_3pm], \n 'Temp9am': [temp_9am], \n 'Temp3pm': [temp_3pm],\n 'RainToday': [RainToday]}\n\nuser_input = pd.DataFrame(data)\n\n\n# Make the prediction\nprediction = model_rf.predict_proba(user_input)[0,1]\n\nprint(\"prediction= {}\".format(prediction))\n\nif prediction<(0.25):\n print(\"No rain is expected tomorrow\")\nelse:\n print(\"Rain is expected tomorrow\") ","repo_name":"Kakarrxt/Rainfall_Prediction_ML","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8393047579","text":"# pyname.py\n\nimport asyncio\nimport glob\nimport os\nimport re\n\nfrom venom import venom, MyMessage, Config, plugin_name\n\nhelp_ = Config.HELP[plugin_name(__name__)] = {'type': 'devs', 'commands': []}\nDOT_ = Config.BULLET_DOT\n\n\nasync def _init() -> None:\n \"\"\" load non-py file list \"\"\"\n Config.NON_PY_FILES.clear()\n sep = \"\\\\\" if \"\\\\\" in dir(__name__) else \"/\"\n total_list_ = glob.glob(f\"venom{sep}plugins{sep}*{sep}*\")\n for one in total_list_:\n search_ = re.search(r\"(\\w+)[\\\\/](\\w+)$\", one)\n plugin = search_.group(2) if search_ else \"\"\n if not plugin or plugin.startswith(\"__\"):\n continue\n Config.NON_PY_FILES.update({plugin: one})\n\n########################################################################################################################\n\nhelp_['commands'].append(\n {\n 'command': 'pyname',\n 'flags': {\n '-l': 'list non-py files',\n '-r': 'reload list'\n },\n 'usage': 'Rename python files to enable them',\n 'syntax': '{tr}pyname [cmd|plugin name]',\n 'sudo': False\n }\n)\n\n\n@venom.trigger('pyname')\nasync def python_rename(_, message: MyMessage):\n \"\"\" Rename python files to enable them \"\"\"\n flags_ = message.flags\n await message.edit(\"`Searching...`\")\n if \"-l\" in flags_:\n if Config.NON_PY_FILES:\n list_ = \"\"\n for one in Config.NON_PY_FILES.keys():\n list_ += f\"{DOT_} {one}\"\n out_ = f\"Unloaded plugins: [{len(Config.NON_PY_FILES.keys())}]\\n\\n{list_}\"\n else:\n out_ = \"`No plugins are unloaded...`\"\n return await message.edit(out_)\n elif \"-r\" in flags_:\n await _init()\n return await message.edit(\"`Non-loaded list reloaded...`\")\n plug_ = message.filtered_input\n if plug_ not in Config.NON_PY_FILES.keys():\n return await message.edit(f\"`No plugin named {plug_} is disabled...`\", del_in=5)\n await message.edit(f\"`Plugin {plug_} found.\\nReloading...`\")\n path_ = Config.NON_PY_FILES[plug_]\n os.rename(path_, f\"{path_}.py\")\n await message.edit(f\"Plugin {plug_} reloaded.\\n`Now restarting...`\")\n asyncio.get_event_loop().create_task(venom.restart())\n","repo_name":"ashwinstr/VenomX","sub_path":"venom/plugins/devs/pyname.py","file_name":"pyname.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"10519126741","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('spam', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='spammyposting',\n name='comment',\n field=models.TextField(blank=True, null=True),\n ),\n migrations.AddField(\n model_name='spammyposting',\n name='reviewer',\n field=models.ForeignKey(related_name='reviewer', blank=True, null=True, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='spammyposting',\n name='status',\n field=models.IntegerField(default=10, choices=[(10, 'Flagged'), (20, 'Under review'), (30, 'Rejected'), (40, 'Approved')]),\n ),\n migrations.AlterField(\n model_name='spammyposting',\n name='reporter',\n field=models.ForeignKey(related_name='reporter', blank=True, null=True, to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"pydanny/dj-spam","sub_path":"spam/migrations/0002_auto_20150728_2050.py","file_name":"0002_auto_20150728_2050.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"16"} +{"seq_id":"13642641662","text":"from django.shortcuts import render\nfrom rest_framework import response\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import Category,Product\nfrom .serializers import *\n\n# Create your views here.\n\nclass CategoryView(APIView):\n\n def get(self,request):\n query_set = Category.objects.all()\n serializer = CategorySerializer(query_set,many=True)\n return Response(serializer.data)\n \n\n def post(self,request):\n serializer = CategorySerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n def put(self,request):# put means update\n id = request.POST.get('id')\n category = request.POST.get('category')\n try:\n query_set =Category.objects.get(id=id)\n if query_set:\n query_set.category = category\n query_set.save()\n res = {\n 'success' : 'true',\n 'message' : 'Category has been successfully updated'\n }\n return Response(res, status=status.HTTP_201_CREATED)\n except:\n res = {\n 'success' : 'false',\n 'message' : 'Something went wrong'\n\n }\n return Response(res,status=status.HTTP_304_NOT_MODIFIED)\n\n\n def delete(self,request):\n id = request.POST.get(\"id\")\n try:\n query_set = Category.objects.get(id=id).delete()\n if query_set:\n res = {\n 'success' : 'true',\n 'message' : 'Category has been successfully deleted'\n\n }\n return Response(res, status=status.HTTP_200_OK)\n except:\n res = {\n 'success' : 'false',\n 'message' : 'Record does not exist'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n\n \nclass ProductView(APIView):\n #Retrieve Product \n def get(self, request):\n qs = Product.objects.all()\n ser = ProductSerializer(qs, many=True)\n return Response(ser.data)\n\n #Create Product\n def post(self, request):\n serializer = ProductSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n #Update Product\n def put(self, request):\n id = request.POST.get(\"id\") \n product_name = request.POST.get(\"product_name\")\n product_model_name = request.POST.get(\"product_model_name\")\n price = request.POST.get(\"price\") \n try: \n qs = Product.objects.get(id=id)\n if qs:\n qs.product_name = product_name\n qs.product_model_name = product_model_name\n qs.price = price\n qs.save()\n resp = {\n 'success' : 'true',\n 'message' : \"Product Has Been Successfully Updated\",\n }\n return Response(resp, status=status.HTTP_201_CREATED)\n except:\n resp = {\n 'success' : 'false',\n 'message' : \"Something went wrong try again\", \n } \n return Response(resp, status=status.HTTP_304_NOT_MODIFIED) \n\n #Delete Product\n def delete(self, request):\n id = request.POST.get(\"id\") \n try: \n qs = Product.objects.get(id=id).delete()\n if qs:\n resp = {\n 'success' : 'true',\n 'message' : \"Product Deleted\",\n }\n return Response(resp, status=status.HTTP_200_OK)\n except:\n resp = {\n 'success' : 'false',\n 'message' : \"Record does not exist\", } \n return Response(resp, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"pankaj-stack/Assessment_3_Revisied","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38913652984","text":"import argparse\nimport json\nimport os\nimport re\nimport requests\nimport subprocess\nimport shutil\nimport glob\nimport versioneer\nimport euphonic_version\nfrom update_dependencies import update_submodules\n\n__version__ = versioneer.get_version()\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n print(args)\n\n update_submodules()\n if args.github:\n args.create_toolbox = True\n\n if args.create_toolbox:\n create_mltbx()\n\n test = not args.notest\n if args.github:\n release_github(test)\n\ndef check_submodule_version(submodule):\n \"\"\"\n Check release version of Horace-Euphonic-Interface depends\n on release versions of submodules\n \"\"\"\n ret = subprocess.run('git tag --points-at HEAD',\n cwd=submodule,\n capture_output=True)\n ver = ret.stdout.decode('utf-8').strip()\n if ver == '':\n raise Exception(f'Submodule {submodule} is not a tagged (release) '\n f'version. A release version of Horace-Euphonic-Interface '\n f'should depend on release versions of its submodules')\n\n\nHELPDOCSTR = '\\n' \\\n ' % Overloaded help command to display Python help in Matlab\\n' \\\n ' % To use it, please type\\n' \\\n ' %\\n' \\\n ' % >> import euphonic.help\\n' \\\n ' % >> help \\n' \\\n ' %\\n' \\\n ' % where is a Python class or method which has been wrapped for use in Matlab.\\n' \\\n ' % If the topic is not wrapped, the normal Matlab help is displayed.\\n' \\\n\ndef replace_matlab_docstring(filename, replacement_str):\n with open(filename) as f:\n txt = f.read()\n cm = [m.start() for m in re.finditer(r'\\n\\s*%', txt)]\n nl = [m.start() for m in re.finditer(r'\\n', txt)]\n idx = [cm[idx] for idx in range(len(cm)) if cm[idx] == nl[idx]]\n newtxt = txt[:idx[0]] + replacement_str + txt[idx[-1]:]\n with open(filename, 'w') as f:\n f.write(newtxt)\n\n\ndef create_mltbx():\n import fileinput\n # replace version string\n version = __version__.split('+')[0] if '+' in __version__ else __version__ # Matlab only accepts numbers\n with fileinput.FileInput('mltbx/horace_euphonic_interface.prj', inplace=True) as prj:\n for line in prj:\n # FileInput redirect stdout to the file, for inplace replacement; end='' means don't add extra newlines\n print(line.replace('1.0', f'{version}'), end='')\n euphonic_version.update_euphonic_version()\n # shutil.copytree expects destination to not exist\n for dest_folder in ['+light_python_wrapper', 'euphonic_sqw_models', '+euphonic']:\n if os.path.isdir('mltbx/' + dest_folder): shutil.rmtree('mltbx/' + dest_folder)\n shutil.copytree('light_python_wrapper/+light_python_wrapper', 'mltbx/+light_python_wrapper')\n shutil.copytree('euphonic_sqw_models/euphonic_sqw_models', 'mltbx/euphonic_sqw_models/euphonic_sqw_models')\n shutil.copytree('+euphonic', 'mltbx/+euphonic')\n for fil in glob.glob('light_python_wrapper/helputils/*.m'): shutil.copy(fil, 'mltbx/+euphonic')\n for fil in glob.glob('light_python_wrapper/helputils/private/*.m'): shutil.copy(fil, 'mltbx/+euphonic/private')\n replace_matlab_docstring('mltbx/+euphonic/help.m', HELPDOCSTR)\n replace_matlab_docstring('mltbx/+euphonic/doc.m', HELPDOCSTR.replace('help', 'doc'))\n subprocess.run(['matlab', '-batch', 'create_mltbx'], cwd='mltbx')\n print('.mltbx created')\n\n\ndef release_github(test=True):\n submodules = ['light_python_wrapper', 'euphonic_sqw_models']\n for submodule in submodules:\n check_submodule_version(submodule)\n\n with open('CHANGELOG.rst') as f:\n changelog = f.read()\n # Remove working changes caused by .mltbx creation because this would\n # bump the versioneer version to .dirty\n subprocess.run('git restore +euphonic/private/required_modules.m')\n subprocess.run('git restore mltbx/horace_euphonic_interface.prj')\n hor_eu_interface_ver = 'v' + __version__\n changelog_ver = re.findall('\\n`(v\\d+\\.\\d+\\.\\S+)\\s', changelog)[0]\n if hor_eu_interface_ver != changelog_ver:\n raise Exception((\n f'VERSION and CHANGELOG.rst version mismatch!\\n'\n f'VERSION: {hor_eu_interface_ver}\\nCHANGELOG.rst: '\n f'{changelog_ver}'))\n desc = re.search('`v\\d+\\.\\d+\\.\\S+.*?^-+\\n(.*?)^`v', changelog,\n re.DOTALL | re.MULTILINE).groups()[0].strip()\n\n payload = {\n \"tag_name\": changelog_ver,\n \"target_commitish\": \"master\",\n \"name\": changelog_ver,\n \"body\": desc,\n \"draft\": False,\n \"prerelease\": False\n }\n if test:\n print(payload)\n else:\n response = requests.post(\n 'https://api.github.com/repos/pace-neutrons/horace-euphonic-interface/releases',\n data=json.dumps(payload),\n headers={\"Authorization\": \"token \" + os.environ[\"GITHUB_TOKEN\"]})\n print(response.text)\n\n # Upload Matlab toolbox\n if not test:\n upload_url = response.json().get('upload_url').split('{')[0]\n fname = 'horace_euphonic_interface.mltbx'\n response = requests.post(\n upload_url,\n data=open(os.path.join('mltbx', fname), 'rb'),\n params=(('name', fname),),\n headers={\"Content-Type\": 'application/octet-stream',\n \"Authorization\": \"token \" + os.environ[\"GITHUB_TOKEN\"]},\n )\n print(response.text)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--create-toolbox',\n action='store_true',\n help=('Create .mltbx file. This is automatically set to True if '\n '--github is used'))\n parser.add_argument(\n '--github',\n action='store_true',\n help='Release on Github')\n parser.add_argument(\n '--notest',\n action='store_true',\n help='Actually send/upload')\n return parser\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"mducle/horace-euphonic-interface","sub_path":"release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"1065202347","text":"import mnist\nimport numpy as np\nfrom conv import Conv3x3\nfrom maxpool import MaxPool2\nfrom softmax import Softmax\n\n# the mnist package handles the MNIST dataset for us\n# learn more https://github.com/datapythonista/mnist\ntrain_images = mnist.train_images()[:1000]\ntrain_labels = mnist.train_labels()[:1000]\n\nconv = Conv3x3(8)\npool = MaxPool2()\nsoftmax = Softmax(13 * 13 * 8, 10)\n\n\ndef forward(image, label):\n '''\n Completes a forward pass of the CNN and calculates the accuracy and\n cross-entropy loss.\n - image is a 2d numpy array\n - label is a digit\n '''\n # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier\n # to work with. This is standard practice.\n out = conv.forward((image / 255) - 0.5)\n out = pool.forward(out)\n out = softmax.forward(out)\n\n # Calculate cross-entropy loss and accuracy. np.log() is the natural log.\n loss = -np.log(out[label])\n acc = 1 if np.argmax(out) == label else 0\n\n return out, loss, acc\n\n\nprint('MNIST CNN initialized!')\n\n\nloss = 0\nnum_correct = 0\nfor i, (im, label) in enumerate(zip(test_images, test_labels)):\n # Do a forward pass.\n _, l, acc = forward(im, label)\n loss += l\n num_correct += acc\n\n # Print stats every 100 steps.\n if i % 100 == 99:\n print(\n '[Step %d] Past 100 steps: Average Loss %.3f | Accuracy: %d%%' %\n (i + 1, loss / 100, num_correct)\n )\n loss = 0\n num_correct = 0\n\n\n# output = conv.forward(train_images[0])\n# output = conv.forward(output)\n\n# print(output.shape)\n","repo_name":"olivrg/CNN","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24791481163","text":"from queue import Queue\nfrom threading import Thread\n\n\nclass Robot:\n def __init__(self, in_queue: Queue, out_queue: Queue):\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.painted = {(0, 0): True}\n self.position = (0, 0)\n self.dir = (-1, 0)\n self.min_x = 0\n self.max_x = 0\n self.min_y = 0\n self.max_y = 0\n self.task = Thread(target=self.run)\n self.task.setDaemon(True)\n\n def turn_left(self):\n if self.dir == (-1, 0):\n self.dir = (0, -1)\n elif self.dir == (0, -1):\n self.dir = (1, 0)\n elif self.dir == (1, 0):\n self.dir = (0, 1)\n elif self.dir == (0, 1):\n self.dir = (-1, 0)\n\n def turn_right(self):\n if self.dir == (-1, 0):\n self.dir = (0, 1)\n elif self.dir == (0, 1):\n self.dir = (1, 0)\n elif self.dir == (1, 0):\n self.dir = (0, -1)\n elif self.dir == (0, -1):\n self.dir = (-1, 0)\n\n def print_result(self):\n print('num painted')\n print(len(self.painted))\n print('')\n print(self.min_x, self.max_x, self.min_y, self.max_y)\n for x in range(self.min_x, self.max_x+1):\n for y in range(self.min_y, self.max_y+1):\n if (x, y) in self.painted and self.painted[(x, y)]:\n print('@', end='')\n else:\n print(' ', end='')\n print('')\n\n def update_limits(self):\n if self.position[0] < self.min_x:\n self.min_x = self.position[0]\n if self.position[0] > self.max_x:\n self.max_x = self.position[0]\n if self.position[1] < self.min_y:\n self.min_y = self.position[1]\n if self.position[1] > self.max_y:\n self.max_y = self.position[1]\n\n \n def move_forward(self):\n self.position = (self.position[0] + self.dir[0], self.position[1] + self.dir[1])\n \n def run(self):\n while True:\n code = self.in_queue.get()\n if code == 99:\n break\n elif code == 98:\n if self.position in self.painted and self.painted[self.position]:\n self.out_queue.put(1)\n else:\n self.out_queue.put(0)\n else:\n if code == 0:\n self.painted[self.position] = False\n elif code == 1:\n self.painted[self.position] = True\n self.update_limits()\n code = self.in_queue.get()\n if code == 0:\n self.turn_left()\n elif code == 1:\n self.turn_right()\n self.move_forward()\n\n def start(self):\n self.task.start()\n\n def join(self):\n self.task.join()","repo_name":"christnil/adventofcode-2019","sub_path":"day11/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34982357954","text":"import pandas as pd\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ntrain_df = pd.read_csv('train.csv')\nimage_ids = train_df['image_id'].values\nlabels = train_df['label'].unique()\n\ndef displayLabel(train_df):\n label_counts = train_df['label'].value_counts()\n plt.bar(label_counts.index, label_counts.values)\n plt.xlabel('Label ID')\n plt.ylabel('Count')\n plt.title('Distribution of Label IDs')\n plt.show()\n\ndef plot_selected_images(train_df, labels):\n selected_images = []\n for label in labels:\n image_id = train_df[train_df['label'] == label]['image_id'].iloc[0]\n selected_images.append(image_id)\n \n fig, axes = plt.subplots(nrows=len(selected_images), figsize=(8, 8))\n \n for i, image_id in enumerate(selected_images):\n img_path = f'train_tfimages\\\\{image_id}' # Assuming the images are stored in a directory named 'train_images'\n img = plt.imread(img_path)\n axes[i].imshow(img)\n axes[i].set_title(f'Label: {labels[i]}')\n \n plt.tight_layout()\n plt.show()\n\nplot_selected_images(train_df, labels)\n\ndata_directory = 'train_tfimages'\nimg = cv2.imread(os.path.join('train_tfimages', '6103.jpg'))\nimg.shape\n","repo_name":"nttrung2406/Cassava-Leaf-Disease-Classification.","sub_path":"overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"50703576066","text":"\ndef user_serialize(user_db):\n user = {\n 'id': user_db.id,\n 'first_name': user_db.first_name,\n 'last_name': user_db.last_name,\n 'password': '',\n 'username': user_db.username,\n 'role': user_db.role\n }\n return user\n\n\ndef users_serialize(users):\n result = []\n for user in users:\n user_add = user_serialize(user)\n result.append(user_add)\n return result\n\n\ndef apartment_serialize(apartment_db):\n urls = [image.url for image in apartment_db.images]\n apartment = {\n 'id': apartment_db.id,\n 'lamella': apartment_db.lamella,\n 'address': apartment_db.address,\n 'quadrature': apartment_db.quadrature,\n 'floor': apartment_db.floor,\n 'num_rooms': apartment_db.num_rooms,\n 'orientation': apartment_db.orientation,\n 'num_terrace': apartment_db.num_terrace,\n 'price': apartment_db.price,\n 'status': apartment_db.status,\n 'new_construction': apartment_db.new_construction,\n 'in_construction': apartment_db.in_construction,\n 'available_from': str(apartment_db.available_from),\n 'images': urls\n }\n return apartment\n\n\ndef apartments_serialize(apartments):\n result = []\n for apartment in apartments:\n apartment_add = apartment_serialize(apartment)\n result.append(apartment_add)\n return result\n\n\ndef customer_serialize(customer_db):\n customer = {\n 'id': customer_db.id,\n 'legal_entity': customer_db.legal_entity,\n 'name': customer_db.name,\n 'email': customer_db.email,\n 'telephone_number': customer_db.telephone_number,\n 'pib_jmbg': customer_db.pib_jmbg,\n 'place': customer_db.place,\n 'street': customer_db.street,\n 'num': customer_db.num,\n 'date_of_first_visit': str(customer_db.date_of_first_visit)\n }\n return customer\n\n\ndef customers_serialize(customers):\n result = []\n for customer in customers:\n customer_add = customer_serialize(customer)\n result.append(customer_add)\n\n return result\n\n\ndef offer_serialize(offer):\n serialized_offer = {\n 'id': offer.id,\n 'apartment_id': offer.apartment_id,\n 'customer_id': offer.customer_id,\n 'customer_status': offer.customer_status,\n 'customer_price': offer.customer_price,\n 'price_approved': offer.price_approved,\n 'note': offer.note,\n 'payment_method': offer.payment_method,\n 'deposit_amount': offer.deposit_amount,\n 'contract_deadline': offer.contract_deadline,\n 'bank': offer.bank,\n 'loan_amount': offer.loan_amount,\n 'cash_amount': offer.cash_amount,\n 'contract_number': offer.contract_number,\n 'contract_date': offer.contract_date\n }\n\n return serialized_offer\n\n\ndef customer_apartment_serialize(customers_apartments):\n result = []\n for customer, offer in customers_apartments:\n dict_to_append = {}\n cust = customer_serialize(customer)\n off = offer_serialize(offer)\n dict_to_append.update(cust)\n dict_to_append.update(off)\n result.append(dict_to_append)\n\n return result\n\n\ndef apartment_customer_serialize(apartments_customer):\n result = []\n for apartment, offer in apartments_customer:\n dict_to_append = {}\n apart = apartment_serialize(apartment)\n off = offer_serialize(offer)\n dict_to_append.update(apart)\n dict_to_append.update(off)\n result.append(dict_to_append)\n\n return result\n\n\ndef price_for_approval_serialize(customers_apartments_price):\n result = []\n for offer in customers_apartments_price:\n dict_to_append = {\n 'apartment_customer_id': offer.offer_id,\n 'apartment_id': offer.apartment_id,\n 'apartment_address': offer.apartment_address,\n 'apartment_quadrature': offer.apartment_quadrature,\n 'apartment_price': offer.apartment_price,\n 'apartment_lowest_price': offer.apartment_lowest_price,\n 'apartment_customer_price': offer.apartment_customer_price,\n 'price_approved': offer.price_approved,\n 'customer_id': offer.customer_id,\n 'customer_name': offer.customer_name\n }\n result.append(dict_to_append)\n\n return result\n\n\n\n","repo_name":"markodmutavdzic/prodaja_stanova","sub_path":"app/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25821350350","text":"from uplogic.nodes import ULActionNode\nfrom uplogic.nodes import ULOutSocket\nfrom uplogic.utils import clamp\n\n\nclass ULClampedModifyProperty(ULActionNode):\n def __init__(self):\n ULActionNode.__init__(self)\n self.condition = None\n self.game_object = None\n self.property_name = None\n self.property_value = None\n self.mode = 0\n self.operator = None\n self.range = None\n self.done = False\n self.OUT = ULOutSocket(self, self._get_done)\n\n def _get_done(self):\n return self.done\n\n def evaluate(self):\n self.done = False\n if not self.get_input(self.condition):\n return\n game_object = self.get_input(self.game_object)\n property_name = self.get_input(self.property_name)\n property_value = self.get_input(self.property_value)\n val_range = self.get_input(self.range)\n obj = game_object.blenderObject if self.mode else game_object\n value = obj.get(property_name, 0)\n new_val = self.operator(value, property_value)\n obj[property_name] = (\n clamp(new_val, val_range.x, val_range.y)\n )\n self.done = True\n","repo_name":"UPBGE/uplogic","sub_path":"uplogic/nodes/actions/clampedmodifyproperty.py","file_name":"clampedmodifyproperty.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"1043526420","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Intakes a list and sorts the list\"\"\"\n\nimport time\n\n\ndef insertion_sort(a_list):\n \"\"\"insertion_sort Function - intakes a list of objects and sorts the list.\n Args:\n a_list (list): A list of objects to be sorted\n Output: a_list sorted and the time it took to sort \n Example:\n test_list = [1, 2, 32, 8, 17, 19, 42, 13, 0]\n insertion_sort(test_list)\n >>>([0, 1, 2, 8, 13, 17, 19, 32, 42], .000015)\n \"\"\"\n start_time = time.time()\n for index in range(1, len(a_list)):\n current_value = a_list[index]\n position = index\n while position > 0 and a_list[position - 1] > current_value:\n a_list[position] = a_list[position - 1]\n position = position - 1\n a_list[position] = current_value\n end_time = time.time()\n total_time = end_time - start_time\n return (a_list, total_time)\n\ndef shell_sort(a_list):\n \"\"\"Shell Short Function - intakes a list of objects and sorts the list.\n Args:\n a_list (list): A list of objects to be sorted\n Output: a_list sorted and the time it took to sort \n Example:\n test_list = [1, 2, 32, 8, 17, 19, 42, 13, 0]\n shell_sort(test_list)\n >>>([0, 1, 2, 8, 13, 17, 19, 32, 42], .000015)\n \"\"\"\n start_time = time.time()\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n gap_insertion_sort(a_list, start_position, sublist_count)\n sublist_count = sublist_count // 2\n end_time = time.time()\n total_time = end_time - start_time\n return (a_list, total_time)\n\ndef gap_insertion_sort(a_list, start, gap):\n \"\"\"gap_insertion_sort Function - intakes a list of objects sorts them\n Args:\n a_list (list): A list of objects to be searched\n start (int): starting position\n gap(int): the gap position of the sort\n Output: a sorted list\n Example:\n test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42,]\n gap_insertion_sort(test_list, 3, 2)\n >>>[0, 1, 2, 8, 13, 17, 19, 32, 42,]\n \"\"\"\n for i in range(start + gap, len(a_list), gap):\n current_value = a_list[i]\n position = i\n while position >= gap and a_list[position - gap] > current_value:\n a_list[position] = a_list[position - gap]\n position = position - gap\n a_list[position] = current_value\n return (a_list)\n\ndef python_sort(mycmp):\n \"\"\"Convert a cmp= function into a key= function\n\n \"\"\"\n start_time = time.time()\n class K(object):\n def __init__(self, obj, *args):\n self.obj = obj\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n end_time = time.time()\n total_time = end_time - start_time\n return (K, total_time)\n\ndef binary_search_recursive(a_list, item):\n \"\"\"binary_search_recursive Function - intakes a list of objects sorts them\n Args:\n a_list (list): A list of objects to be searched\n item (int): An int that will be compared against a_list\n Output: A sorted list \n Example:\n test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42,]\n binary_search_recursive(test_list, 3)\n >>>[0, 1, 2, 8, 13, 17, 19, 32, 42,]\n \"\"\"\n start_time = time.time()\n if len(a_list) == 0:\n end_time = time.time()\n total_time = end_time - start_time\n return (False, total_time)\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n end_time = time.time()\n total_time = end_time - start_time\n return (True, total_time)\n else:\n if item < a_list[midpoint]:\n end_time = time.time()\n total_time = end_time - start_time\n return ((binary_search_recursive(a_list[:midpoint], item)\n , total_time))\n else:\n end_time = time.time()\n total_time = end_time - start_time\n return ((binary_search_recursive(a_list[midpoint + 1:], item)\n , total_time))\n \ndef main():\n \"\"\"Main Function - Tests search and sort functions to identify Bio-O logic.\n Args:\n Output: Facts about the parsed Big-O testing data. \n Example:\n $ python sort_compare.py\n >>>'Insertion Sort on average took 0.0000675 seconds to run 500 records\n 0.0001135 to run 1000 records, and 0.0002669 to run 2500 records...\n \"\"\"\n test_list500 = ([[number+1 for number in range(500)]\n for group in range(100)])\n insertion_sort_500 = []\n shell_sort_500 = []\n python_sort_500 = []\n \n test_list1000 = ([[number+1 for number in range(1000)]\n for group in range(100)])\n insertion_sort_1000 = []\n shell_sort_1000 = []\n python_sort_1000 = []\n \n test_list2500 = ([[number+1 for number in range(2500)]\n for group in range(100)])\n insertion_sort_2500 = []\n shell_sort_2500 = []\n python_sort_2500 = []\n \n for i in test_list500:\n insertion_sort_500.append(insertion_sort(i))\n shell_sort_500.append(shell_sort(i))\n python_sort_500.append(python_sort(i))\n insertion_sort_avg_500 = ((sum([i[1] for i in insertion_sort_500])) /\n (len(insertion_sort_500)))\n shell_sort_avg_500 = ((sum([i[1] for i in shell_sort_500]))\n / (len(shell_sort_500)))\n python_sort_avg_500 = ((sum([i[1] for i in python_sort_500]))\n / (len(python_sort_500)))\n\n for i in test_list1000:\n insertion_sort_1000.append(insertion_sort(i))\n shell_sort_1000.append(shell_sort(i))\n python_sort_1000.append(python_sort(i))\n insertion_sort_avg_1000 = ((sum([i[1] for i in insertion_sort_1000])) /\n (len(insertion_sort_1000)))\n shell_sort_avg_1000 = ((sum([i[1] for i in shell_sort_1000])) /\n (len(shell_sort_1000)))\n python_sort_avg_1000 = ((sum([i[1] for i in python_sort_1000])\n ) / (len(python_sort_1000)))\n\n for i in test_list2500:\n insertion_sort_2500.append(insertion_sort(i))\n shell_sort_2500.append(shell_sort(i))\n python_sort_2500.append(python_sort(i))\n insertion_sort_avg_2500 = ((sum([i[1] for i in insertion_sort_2500])) /\n (len(insertion_sort_2500)))\n shell_sort_avg_2500 = ((sum([i[1] for i in shell_sort_2500])) /\n (len(shell_sort_2500)))\n python_sort_avg_2500 = ((sum([i[1] for i in python_sort_2500])\n ) / (len(python_sort_2500)))\n print (\"Insertion Sort on average took %10.7f seconds to run 500 records\"\n \"%10.7f to run 1000 records, and %10.7f to run 2500 records.\"\n %(insertion_sort_avg_500, insertion_sort_avg_1000\n , insertion_sort_avg_2500))\n print (\"Shell Sort on average took %10.7f seconds to run\"\n \"500 records, %10.7f to run 1000 records\"\n \", and %10.7f to run 2500 records.\"\n %(shell_sort_avg_500, shell_sort_avg_1000,\n shell_sort_avg_2500))\n print (\"Python Sort on average took %10.7f seconds to run\"\n \"500 records, %10.7f to run 1000 records\"\n \", and %10.7f to run 2500 records.\"\n %(python_sort_avg_500, python_sort_avg_1000,\n python_sort_avg_2500))\n \nmain()\n","repo_name":"detroitsteel/IS211_Assignment4","sub_path":"sort_compare.py","file_name":"sort_compare.py","file_ext":"py","file_size_in_byte":7827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43661559568","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\n#Defines system parameters.\nD=5000\nd=100\nwavelen=0.5\nL=5000\nx=np.linspace(-L/2,L/2,int(L))\n\n#Define the slit aperture function.\ndef A(x,d):\n\treturn (np.s(x) int:\n num_rows,num_cols,num_islands = len(grid), len(grid[0]),0\n visited = set()\n \n def findIsland(row,col):\n # add this cell to the visited set\n visited.add((row,col))\n \n # add left, right, top and down neighbours of this cell respectively. Here the first element is row id and second one is the column id\n neighbours = [[0,-1], [0,1],[-1,0],[1,0]]\n \n for row_id,col_id in neighbours:\n # Visit a partiular neighbour if the index values are within bounds and the string value stored at neighbour == \"1: and that cell is not already visited\n if 0 <= (col+col_id) < num_cols and 0 <= (row+row_id ) < num_rows and grid[row+row_id][col+col_id] == \"1\" and (row+row_id,col+col_id) not in visited:\n findIsland(row+row_id,col+col_id)\n \n # driver code\n for row in range(num_rows):\n for col in range(num_cols):\n # call each cell element if that is not already visited\n if grid[row][col] == \"1\" and (row,col) not in visited:\n findIsland(row,col)\n \n # increment the count if findIsland() returned it's execution as it means one set of continuous cells have been traversed\n num_islands += 1\n return num_islands\n \n ","repo_name":"amarjitdhillon/CP_2021","sub_path":"200-number-of-islands/200-number-of-islands.py","file_name":"200-number-of-islands.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"69977455368","text":"import sys\r\nT = int(sys.stdin.readline())\r\n\r\nzero = 0\r\none = 0\r\n\r\nzero_dict = {}\r\none_dict = {}\r\n\r\nfib_list = [0,1]\r\nzero_dict[0] = 1\r\nzero_dict[1] = 0\r\none_dict[0] = 0\r\none_dict[1] = 1\r\n\r\n\r\n\r\n\r\ndef fibonacci(num):\r\n global zero\r\n global one\r\n if(num in fib_list):\r\n zero += zero_dict[num]\r\n one += one_dict[num]\r\n return 0\r\n elif(num-1 in fib_list and num-2 in fib_list):\r\n fib_list.append(num)\r\n zero_dict[num] = zero_dict[num-1] + zero_dict[num-2]\r\n one_dict[num] = one_dict[num-1] + one_dict[num-2]\r\n zero += zero_dict[num]\r\n one += one_dict[num]\r\n return 0\r\n else:\r\n return fibonacci(num-1)+fibonacci(num-2)\r\n\r\nfor i in range(T):\r\n zero = 0\r\n one = 0\r\n N = int(sys.stdin.readline())\r\n fibonacci(N)\r\n print(zero, one)\r\n","repo_name":"dainshon/CODING","sub_path":"백준/Silver/1003. 피보나치 함수/피보나치 함수.py","file_name":"피보나치 함수.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9870586566","text":"import itertools\r\nimport random\r\nimport threading\r\nimport concurrent.futures\r\nimport os\r\n\r\nimport constants\r\nfrom coup_matchup_environment import CoupMatchupEnvironment\r\n\r\n\r\ndef run_matchup(player1_cards, player2_cards, verbose=False):\r\n matchup_env = CoupMatchupEnvironment(player1_cards, player2_cards)\r\n matchup_env.solve(verbose=verbose)\r\n initial_state = matchup_env.get_start_game_state()\r\n\r\n winner = None\r\n if initial_state in matchup_env.get_win_region(1):\r\n winner = 1\r\n elif initial_state in matchup_env.get_win_region(2):\r\n winner = 2\r\n\r\n assert winner is not None, f\"Error with {matchup_env}, no winner found.\"\r\n return winner, matchup_env.get_policy(1), matchup_env.get_policy(2), matchup_env\r\n\r\n\r\ndef get_game_run(matchup: CoupMatchupEnvironment, policy_1: dict, policy_2: dict):\r\n \"\"\"\r\n Returns a list in the form (state, action, state, action...) representing a run of game matchup with the\r\n given policies.\r\n :param policy_1: Policy for player 1\r\n :param policy_2: Policy for player 2\r\n :param matchup: CoupMatchupEnvironment environment of the game\r\n :return:\r\n \"\"\"\r\n run = list()\r\n state = matchup.get_start_game_state()\r\n run.append(state)\r\n\r\n while state not in matchup.get_goal_states(1) and state not in matchup.get_goal_states(2):\r\n turn = state[2]\r\n action = policy_1[state] if turn == 1 else policy_2[state]\r\n # if action is None that means there is no action for player to win so a random action is selected\r\n if action is None:\r\n action = random.choice(CoupMatchupEnvironment.get_enabled_actions(state))\r\n new_state = CoupMatchupEnvironment.transition(state, action)\r\n run.extend([action, new_state])\r\n print(state, action)\r\n state = new_state\r\n return run\r\n\r\n\r\ndef get_run_graph(matchup: CoupMatchupEnvironment, pi1: dict, pi2: dict):\r\n \"\"\"\r\n Returns a graph in the form graph[state] = list(successor_states) that shows a game run where the player who wins\r\n takes their optimal action and the player who loses has all possible successor states listed\r\n \"\"\"\r\n initial_state = matchup.get_start_game_state()\r\n graph = dict()\r\n _get_run_graph(initial_state, graph, matchup, pi1, pi2)\r\n return graph\r\n\r\n\r\ndef save_run_graph(path: str, graph):\r\n with open(path, 'w') as file:\r\n file.write(f\"source,target\\n\")\r\n for source in graph.keys():\r\n for target in graph[source]:\r\n file.write(f\"{str(source).replace(',', '.')},{str(target).replace(',', '.')}\\n\")\r\n\r\n\r\ndef _get_run_graph(initial_state, graph: dict, matchup: CoupMatchupEnvironment, pi1: dict, pi2: dict):\r\n \"\"\"\r\n Utility function -- see get_run_graph\r\n \"\"\"\r\n state = initial_state\r\n graph[state] = list()\r\n if state in matchup.get_goal_states(1) or state in matchup.get_goal_states(2):\r\n return\r\n else:\r\n turn = state[2]\r\n action = pi1[state] if turn == 1 else pi2[state]\r\n # if action is None that means there is no action for player to win, so we look at all their possible actions\r\n if action is None:\r\n for action in CoupMatchupEnvironment.get_enabled_actions(state):\r\n new_state = CoupMatchupEnvironment.transition(state, action)\r\n graph[state].append(new_state)\r\n if new_state not in graph.keys():\r\n _get_run_graph(new_state, graph, matchup, pi1, pi2)\r\n else:\r\n new_state = CoupMatchupEnvironment.transition(state, action)\r\n graph[state].append(new_state)\r\n _get_run_graph(new_state, graph, matchup, pi1, pi2)\r\n return\r\n\r\n\r\ndef get_next_states(matchup: CoupMatchupEnvironment, state, pi):\r\n \"\"\"\r\n If there is a winning action for the player with strategy pi return the resulting state from taking that action\r\n otherwise returns a list of all states that can result from any enabled action.\r\n \"\"\"\r\n if pi[state] is not None:\r\n return [matchup.transition(state, pi[state])]\r\n else:\r\n return [matchup.transition(state, action) for action in matchup.get_enabled_actions(state)]\r\n\r\n\r\ndef write_to_file(result, file, lock):\r\n \"\"\"\r\n Write to a file in a thread safe way\r\n \"\"\"\r\n lock.acquire()\r\n try:\r\n file.write(f\"{result}\\n\")\r\n finally:\r\n lock.release()\r\n\r\n\r\ndef run_experiment(path=\"../data/results.txt\", verbose=False, num_cores=1, overwrite=False):\r\n \"\"\"\r\n Evaluate all possible matchups and write the results to the file specified by path\r\n :param path: Path of file to write results to\r\n :param verbose: Boolean value indicating whether to print matchup debug info\r\n :param num_cores: Number of cores to use for parallel processing\r\n :param overwrite: Whether to overwrite the file at path if it already exists\r\n \"\"\"\r\n if os.path.isfile(path) and overwrite is False:\r\n raise Exception(\r\n f\"File {path} already exists, if you wish to overwrite it call this function with overwrite=True\")\r\n if num_cores < 1:\r\n raise Exception(f\"Error, value of {num_cores} for num_cores not allowed\")\r\n # Compute combinations of cards since (DUKE,CAPTAIN) is the same as (CAPTAIN,DUKE)\r\n card_pairs = list(itertools.combinations(constants.CARDS, 2))\r\n # Add duplicate pairs (CAPTAIN, CAPTAIN), (DUKE,DUKE), etc.\r\n card_pairs.extend([(card, card) for card in constants.CARDS])\r\n matchups = list(itertools.product(card_pairs, card_pairs))\r\n i = 0\r\n if num_cores == 1:\r\n with open(path, \"w\") as file:\r\n for matchup in matchups:\r\n winner, _, _, _ = run_matchup(matchup[0], matchup[1], verbose=verbose)\r\n file.write(f\"{matchup[0]},{matchup[1]},{winner}\\n\")\r\n print(f\"Solved {i + 1}/{len(matchups)} matchups\")\r\n i += 1\r\n else:\r\n max_workers = os.cpu_count() if num_cores > os.cpu_count() else num_cores\r\n with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:\r\n with open(path, 'w') as file:\r\n lock = threading.Lock()\r\n future_results = [executor.submit(run_matchup, matchup[0], matchup[1], verbose=verbose)\r\n for matchup in matchups]\r\n\r\n i = 0\r\n for future in concurrent.futures.as_completed(future_results):\r\n winner, _, _, matchup = future.result()\r\n result = f\"{matchup.player1_cards}, {matchup.player2_cards}, {winner}\"\r\n write_to_file(result, file, lock)\r\n print(f\"Solved {i + 1}/{len(matchups)} matchups\")\r\n i += 1\r\n\r\n\r\ndef main():\r\n # run_experiment(verbose=False, num_cores=1)\r\n matchup = CoupMatchupEnvironment((constants.DUKE, constants.ASSASSIN), (constants.AMBASSADOR, constants.AMBASSADOR))\r\n matchup.solve(verbose=True)\r\n matchup.save_game_graph_edge_list(path=\"../data/DAvAMAM_full_graph.csv\")\r\n graph = get_run_graph(matchup, pi1=matchup.get_policy(1), pi2=matchup.get_policy(2))\r\n save_run_graph(path=\"../data/DAvAMAM_all_runs.csv\", graph=graph)\r\n matchup.play_game(save_run=True, path=\"../data/DAvAMAM_run.txt\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"matthewScohen/coup_endgame_solving","sub_path":"src/run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40184777702","text":"from http import HTTPStatus\n\nfrom modular_sdk.commons.constants import ApplicationType\nfrom modular_sdk.services.impl.maestro_credentials_service import \\\n RabbitMQApplicationMeta, RabbitMQApplicationSecret\n\nfrom handlers.abstracts.abstract_handler import AbstractHandler\nfrom helpers import build_response\nfrom helpers.constants import HTTPMethod, \\\n CUSTOMER_ATTR, MAESTRO_USER_ATTR, RABBIT_EXCHANGE_ATTR, \\\n REQUEST_QUEUE_ATTR, RESPONSE_QUEUE_ATTR, SDK_ACCESS_KEY_ATTR, \\\n CONNECTION_URL_ATTR, SDK_SECRET_KEY_ATTR\nfrom helpers.log_helper import get_logger\nfrom models.modular.application import Application\nfrom services import SERVICE_PROVIDER\nfrom services.modular_service import ModularService\nfrom services.ssm_service import SSMService\n\n_LOG = get_logger(__name__)\n\n\nclass RabbitMQHandler(AbstractHandler):\n def __init__(self, modular_service: ModularService,\n ssm_service: SSMService):\n self._modular_service = modular_service\n self._ssm_service = ssm_service\n\n @classmethod\n def build(cls) -> 'RabbitMQHandler':\n return cls(\n modular_service=SERVICE_PROVIDER.modular_service(),\n ssm_service=SERVICE_PROVIDER.ssm_service()\n )\n\n def define_action_mapping(self) -> dict:\n return {\n '/customers/rabbitmq': {\n HTTPMethod.POST: self.post,\n HTTPMethod.GET: self.get,\n HTTPMethod.DELETE: self.delete\n },\n }\n\n @staticmethod\n def get_dto(application: Application) -> dict:\n \"\"\"\n Very specific case,\n :param application:\n :return:\n \"\"\"\n return {\n CUSTOMER_ATTR: application.customer_id,\n **application.meta.as_dict()\n }\n\n def post(self, event: dict) -> dict:\n customer = event[CUSTOMER_ATTR]\n item = next(self._modular_service.get_applications(\n customer=customer,\n _type=ApplicationType.RABBITMQ,\n limit=1,\n deleted=False\n ), None)\n if item:\n return build_response(\n code=HTTPStatus.CONFLICT,\n content='RabbitMQ configuration already exists'\n )\n meta = RabbitMQApplicationMeta(\n maestro_user=event[MAESTRO_USER_ATTR],\n rabbit_exchange=event.get(RABBIT_EXCHANGE_ATTR),\n request_queue=event[REQUEST_QUEUE_ATTR],\n response_queue=event[RESPONSE_QUEUE_ATTR],\n sdk_access_key=event[SDK_ACCESS_KEY_ATTR]\n )\n name = self._ssm_service.save_data(\n name=f'{customer}-rabbitmq-configuration',\n value=RabbitMQApplicationSecret(\n connection_url=event[CONNECTION_URL_ATTR],\n sdk_secret_key=event[SDK_SECRET_KEY_ATTR]\n ).dict(),\n prefix='caas'\n )\n application = self._modular_service.create_application(\n customer=customer,\n _type=ApplicationType.RABBITMQ,\n description='RabbitMQ configuration for Custodian',\n meta=meta.dict(),\n secret=name\n )\n _LOG.info('Saving application item')\n self._modular_service.save(application)\n return build_response(content=self.get_dto(application))\n\n def get(self, event) -> dict:\n customer = event[CUSTOMER_ATTR]\n application = next(self._modular_service.get_applications(\n customer=customer,\n _type=ApplicationType.RABBITMQ,\n limit=1,\n deleted=False\n ), None)\n if not application:\n return build_response(\n code=HTTPStatus.NOT_FOUND,\n content=f'RabbitMQ configuration not found'\n )\n return build_response(content=self.get_dto(application))\n\n def delete(self, event) -> dict:\n customer = event[CUSTOMER_ATTR]\n application = next(self._modular_service.get_applications(\n customer=customer,\n _type=ApplicationType.RABBITMQ,\n limit=1,\n deleted=False\n ), None)\n if not application:\n return build_response(code=HTTPStatus.NO_CONTENT)\n erased = self._modular_service.delete(application)\n if not erased:\n return build_response(\n code=HTTPStatus.BAD_REQUEST,\n content='Could not remove the application. '\n 'Probably it\\'s used by some parents.'\n )\n # erased\n if application.secret:\n _LOG.info(f'Removing application secret: {application.secret}')\n if not self._ssm_service.delete_secret(application.secret):\n _LOG.warning(f'Could not remove secret: {application.secret}')\n # Modular sdk does not remove the app, just sets is_deleted\n self._modular_service.save(application)\n return build_response(code=HTTPStatus.NO_CONTENT)\n","repo_name":"epam/ecc","sub_path":"src/handlers/rabbitmq_handler.py","file_name":"rabbitmq_handler.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14525588619","text":"# coding: utf-8\n# author: Fengzhijin\n# time: 2017.12.5\n# ==================================\n'''\n实现了将车牌字符识别数据集源文件转换成tfrecords文件\n1._int64_feature() - int64数据转换函数\n2._bytes_feature() - 二进制字符串转换函数\n3.convert_to() - tfrecords文件生成函数\n'''\n\nfrom PIL import Image\nimport os\nimport tensorflow as tf\n\n\nclasses_zimu = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H',\n 8: 'J', 9: 'K', 10: 'L', 11: 'M', 12: 'N', 13: 'P', 14: 'Q',\n 15: 'R', 16: 'S', 17: 'T', 18: 'U', 19: 'V', 20: 'W', 21: 'X',\n 22: 'Y', 23: 'Z'}\nclasses_hanzi = {24: '藏', 25: '川', 26: '鄂', 27: '甘', 28: '赣', 29: '广', 30: '桂',\n 31: '贵', 32: '黑', 33: '沪', 34: '吉', 35: '冀', 36: '津', 37: '晋',\n 38: '京', 39: '辽', 40: '鲁', 41: '蒙', 42: '闽', 43: '宁', 44: '青',\n 45: '琼', 46: '陕', 47: '苏', 48: '皖', 49: '湘', 50: '新', 51: '渝',\n 52: '豫', 53: '粤', 54: '云', 55: '浙'}\nclasses_shuzi = {56: '0', 57: '1', 58: '2', 59: '3', 60: '4', 61: '5', 62: '6',\n 63: '7', 64: '8', 65: '9'}\nclasses = ['字母', '汉字', '数字']\nvalidation_size = 630\ntrain_size = 17819\nsize = 18449\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _float_feature(value):\n return tf.train.Feature(bytes_list=tf.train.FloatList(value=[value]))\n\n\ndef convert_to():\n cwd = '../测试图像集/'\n # writer_train = tf.python_io.TFRecordWriter('../data/tfrecords/train.tfrecords')\n # writer_validation = tf.python_io.TFRecordWriter('../data/tfrecords/validation.tfrecords')\n writer_test = tf.python_io.TFRecordWriter('../data/tfrecords/test.tfrecords')\n sum = 0\n for i in classes:\n if i == '字母':\n classes_1 = classes_zimu\n elif i == '汉字':\n classes_1 = classes_hanzi\n elif i == '数字':\n classes_1 = classes_shuzi\n for index in classes_1:\n class_path = cwd + i + '/' + classes_1[index] + '/'\n print(\"写入\"+classes_1[index]+\"数据\")\n # sum_validation = 0\n # sum_train = 0\n for img_name in os.listdir(class_path):\n img_path = class_path + img_name\n image = Image.open(img_path)\n image = image.resize((24, 48))\n image_raw = image.tobytes()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _int64_feature(int(index)),\n 'image_raw': _bytes_feature(image_raw)}))\n # if (sum < 10):\n # sum_validation += 1\n # writer_validation.write(example.SerializeToString())\n # elif (sum >= 10):\n # sum_train += 1\n # writer_train.write(example.SerializeToString())\n writer_test.write(example.SerializeToString())\n sum += 1\n # print(\"sum_validation = %d\" % sum_validation)\n # print(\"sum_train = %d\" % sum_train)\n # writer_train.close()\n # writer_validation.close()\n print(\"sum = %d\" % sum)\n writer_test.close()\n\n\nif __name__ == \"__main__\":\n convert_to()\n","repo_name":"m-L-0/17b-FengZhijin-2015","sub_path":"Vehicle_License_Plate_Recognition/code/to_tfrecord.py","file_name":"to_tfrecord.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36647747719","text":"import os\nimport random\nimport time\nfrom datetime import timedelta\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as utils\nimport torch_geometric.transforms as T\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.nn import GATConv, GCNConv, GINConv, MessagePassing, SAGEConv\nfrom torch_geometric.nn.inits import glorot, zeros\nfrom torch_geometric.utils import add_remaining_self_loops\nfrom torch_scatter import scatter_add\n\nfrom cargonet.dataset.activeroutesv1 import ActiveRoutesV1\nfrom cargonet.dataset.simulator import Simulation\nfrom cargonet.models.eval.losses import LossCollector, MAELoss\nfrom cargonet.models.gtcn import ActiveRoutesModelTCN\nfrom cargonet.models.model import MLModel\nfrom cargonet.models.normalization import MinMaxScaler, Scaler, ZScoreScaler\nfrom cargonet.models.sociallstm import ActiveRoutesModelLSTM, ActiveRoutesModelLSTMGAT\nfrom cargonet.models.stgcnv1 import ActiveRoutesModelSTGCNV1\nfrom cargonet.visualization.delays import DelayProgressPlot\n\nclass ActiveRoutesModelV1(MLModel):\n \n @property\n def model_state_path(self):\n # # \"_stateful\" if self.use_rnn else \"\"\n return os.path.join(self.trained_model_dir, self.name + (\n \"_sim\" if self.simulation else \"\"\n ) + \".pt\")\n\n def __init__(\n self,\n dataset,\n node_input_dim,\n edge_input_dim,\n output_size=1,\n seq_len=3,\n pred_seq_len=3,\n # rnn_size=64 + 16,\n rnn_size=64 + 16,\n use_rnn=False,\n dropout=0.1, # 4, # .3,\n # dropout=0.6 , # 0.3\n # embedding_size=32,\n # embedding_size=64,\n embedding_size=64,\n lr=0.001,\n l1_reg=0.00, # 0001, #0001, #.01, # 0001,\n weight_decay=0.001,\n max_transports=1000,\n grad_clip=False,\n shuffle_after_split=None,\n **kwargs\n ):\n shuffle_after_split = (not use_rnn) if (shuffle_after_split is None) else shuffle_after_split\n super().__init__(dataset, \n l1_reg=l1_reg, shuffle_after_split=shuffle_after_split, **kwargs) # chunks=2,\n \n self.use_rnn = use_rnn\n self.name = self.name + (\"_stateful\" if self.use_rnn else \"\")\n self.node_input_dim = node_input_dim\n self.edge_input_dim = edge_input_dim\n self.rnn_size = rnn_size\n self.dropout = dropout\n self.grad_clip = grad_clip\n self.embedding_size = embedding_size\n self.max_transports = max_transports\n \n\n self.output_size = output_size\n self.seq_len = seq_len\n self.pred_seq_len = pred_seq_len\n\n net = self.dataset.net.to(self.device)\n self.model = ActiveRoutesModelTCN(\n device=self.device,\n input_dim=self.node_input_dim,\n output_size=self.output_size,\n seq_len=self.seq_len,\n pred_seq_len=self.pred_seq_len,\n rnn_size=self.rnn_size,\n use_rnn=self.use_rnn,\n dropout=self.dropout,\n embedding_size=self.embedding_size,\n max_transports=self.max_transports,\n ).to(self.device)\n\n self.net = self.dataset.net.to(self.device)\n self.loss = torch.nn.MSELoss()\n # self.loss = MAELoss()\n\n \"\"\"\n self.optimizer = torch.optim.AdamW([\n dict(params=self.model.encoder.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.stgblock.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.start_conv.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.final_conv.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.ground_encoder.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.ground_sync.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.ground_sync2.parameters(), lr=lr, weight_decay=weight_decay),\n dict(params=self.model.cell.parameters(), lr=0.00001, weight_decay=0.1),\n # self.model.parameters(), lr=lr, weight_decay=weight_decay\n ]) # lr=0.0001\n \"\"\"\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.001) # , weight_decay=0)\n # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.000000001, weight_decay=1.0)\n # self.optimizer = torch.optim.RMSprop(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n # optimizer = torch.optim.Adagrad(net.parameters(), weight_decay=args.lambda_param)\n # optimizer = torch.optim.Adam(net.parameters(), weight_decay=args.lambda_param)\n\n self.lr_scheduler = None\n # self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min', verbose=True)\n \n def init_rnn_state(self):\n net_size = self.dataset.net.x.size(0)\n net_state_hidden = torch.autograd.Variable(torch.zeros(net_size, self.rnn_size), requires_grad=False).to(\n self.device\n )\n net_cell_states = torch.autograd.Variable(torch.zeros(net_size, self.rnn_size), requires_grad=False).to(self.device)\n return net_state_hidden, net_cell_states\n\n def feed(self, data, net_state_hidden=None, net_cell_states=None):\n x = data.x\n y = data.y\n \n net_size = self.net.x.size(0)\n if net_state_hidden is None:\n net_state_hidden = Variable(torch.zeros(net_size, self.rnn_size))\n if net_cell_states is None:\n net_cell_states = Variable(torch.zeros(net_size, self.rnn_size))\n\n net_state_hidden = net_state_hidden.to(self.device)\n net_cell_states = net_cell_states.to(self.device)\n\n outputs, net_state_hidden, net_cell_states = self.model(\n data,\n self.net,\n net_state_hidden,\n net_cell_states,\n data.num_transports,\n data.transport_mask,\n data.current_transports,\n )\n outputs = outputs\n expected = y.view(-1, self.pred_seq_len)\n if self.denormalize:\n delay_index = -1\n outputs = self.tf.inverse_zscore(\n outputs,\n mean=self.tf.means[\"x\"][delay_index],\n std=self.tf.stds[\"x\"][delay_index]\n )\n expected = self.tf.inverse_zscore(\n expected,\n mean=self.tf.means[\"x\"][delay_index],\n std=self.tf.stds[\"x\"][delay_index]\n )\n \n # print(expected.shape, outputs.shape)\n assert expected.shape == outputs.shape\n \n return outputs, expected, net_state_hidden, net_cell_states\n\n\ndef train_model(\n plot,\n limit=1,\n epochs=100,\n reprocess=False,\n redownload=False,\n device=None,\n train=False,\n bptt=False,\n evaluate=True,\n):\n\n torch.cuda.empty_cache()\n if device:\n print(\"Using\", device)\n print(\"bptt\", bptt)\n\n base_path = os.path.dirname(os.path.realpath(__file__))\n dataset_base_path = os.path.join(base_path, \"../../datasets\")\n models_base_path = os.path.join(base_path, \"../../trained\")\n assert os.path.exists(dataset_base_path)\n assert os.path.exists(models_base_path)\n\n dataset_name = \"active-routes-v1\"\n dataset_path = os.path.join(dataset_base_path, dataset_name)\n\n simulation_dataset_name = \"simulation-v1\"\n simulation_dataset_path = os.path.join(dataset_base_path, simulation_dataset_name)\n\n ds_options = dict(seq_len=10, pred_seq_len=10,)\n batch_hours = 7 * 24 # 1 Week\n\n use_simulation = False\n\n if use_simulation:\n dataset = Simulation(\n root=simulation_dataset_path,\n name=simulation_dataset_name,\n limit=32 * 10 * 2,\n force_reprocess=reprocess,\n **ds_options\n )\n else:\n dataset = ActiveRoutesV1(\n root=dataset_path,\n name=dataset_name,\n limit=limit,\n batch=timedelta(hours=batch_hours),\n force_reprocess=reprocess,\n force_redownload=redownload,\n **ds_options\n )\n\n denormalize = False\n model_options = dict(\n node_input_dim=len(dataset.encoder.seq_route_node_fts),\n edge_input_dim=len(dataset.encoder.route_edge_fts),\n simulation=use_simulation,\n denormalize=denormalize,\n )\n\n def normalize_func(data, means, stds, **kwargs):\n data.x = Scaler.zscore(data.x, mean=means[\"x\"], std=stds[\"x\"])\n if denormalize:\n delay_index = -1\n data.y = Scaler.zscore(data.y, mean=means[\"x\"][delay_index], std=stds[\"x\"][delay_index])\n data.temporal_edge_attr = Scaler.zscore(\n data.temporal_edge_attr,\n mean=means[\"temporal_edge_attr\"],\n std=stds[\"temporal_edge_attr\"],\n )\n assert not torch.isnan(data.temporal_edge_attr).any()\n assert not torch.isnan(data.x).any()\n return data\n\n # Initialize model\n model = ActiveRoutesModelV1(\n dataset, device=device, shuffle=False, loader_batch_size=1, use_rnn=bptt,\n shuffle_after_split=None if not use_simulation else (not bptt),\n **ds_options, **model_options\n )\n\n print(\"fitting normalization\")\n cache = \"%s_norm_%d_%d\" % (dataset.name, batch_hours, limit)\n z_score_norm = Scaler.fit(\n model.train_data,\n normalize=normalize_func,\n attrs=dict(temporal_edge_attr=1, x=1, y=1,),\n cache=cache\n )\n model.dataset.transform = z_score_norm\n model.init_loaders()\n print(\"done fitting normalization\")\n\n # Train\n if train:\n if bptt:\n train_losses = model.bptt_train(epochs=epochs)\n else:\n train_losses = model.train(epochs=epochs)\n model.save()\n if train_losses:\n # Plot loss curve\n plt.plot(train_losses)\n plt.savefig(\n os.path.join(models_base_path, model.name + \"_loss.pdf\"),\n format=\"pdf\",\n dpi=600,\n )\n else:\n # Load the model\n try:\n model.load()\n except FileNotFoundError:\n print(\"No trained model to load. Train one first using --train\")\n\n if evaluate:\n print(\"Avaluating \")\n val_accs, val_losses = model.test()\n print(LossCollector.format(val_losses))\n plot_len = 400\n model.plot_primitive_prediction(\n \"val\", val_losses[\"ys\"][-plot_len:], val_losses[\"xs\"][-plot_len:]\n )\n\n return\n if evaluate:\n print(\"Evaluating model...\")\n val_acc, val_loss = model.test(plot=plot)\n print(\"Validation acc:\", val_acc.view(-1))\n print(\"Validation MSE loss: {:.4f}\".format(val_loss))\n print(\"Mean validation acc: {:.4f}\".format(val_acc.mean().item()))\n return\n\n from cargonet.models.predictor import AvgDelayV1Predictor\n from cargonet.visualization.delays import plot_station_delay_progress\n\n p = AvgDelayV1Predictor(model=model, dataset=dataset)\n\n for d, sample in enumerate(dataset[:1]):\n pred = model.predict(sample)\n for s in range(0, 3):\n # continue\n plt.plot(\n range(0, 10),\n sample.x[:, s, 0].cpu().detach().numpy(),\n color=\"black\",\n )\n plt.plot(\n range(10, 12),\n sample.y[:, s, 0].repeat(2, 1).cpu().detach().numpy(),\n color=\"blue\",\n linestyle=\"solid\",\n )\n plt.plot(\n range(10, 12),\n pred.T[s, 0].repeat(2, 1).cpu().detach().numpy(),\n color=\"red\",\n linestyle=\"dashed\",\n )\n plt.show()\n\n return\n\n node_max = 1000\n node_batch = 1000\n time_batch = 1000\n for b in range(0, dataset.number_of_nodes, node_batch):\n for t in range(0, len(dataset), time_batch):\n if node_max <= b:\n return\n\n ds = dataset[t : t + time_batch]\n # Ground truth\n station_delays = torch.zeros(time_batch, node_batch, dtype=torch.float)\n print(station_delays.shape)\n for d, sample in enumerate(ds):\n station_delays[d] = sample.x.view(-1)[b : b + node_batch].detach()\n\n plt.plot(station_delays[:, i].cpu().detach().numpy(), color=\"red\")\n plt.show()\n\n # Predict\n preds = p.compare_predictions(\n dataset=ds, b=b, time_batch=time_batch, node_batch=node_batch\n )\n\n for edge, i in dataset.mapping.items():\n u, v = edge\n\n i -= node_batch\n if not i in range(node_batch):\n continue\n\n test = station_delays[:, i]\n print(test.mean(), test.min(), test.max())\n if station_delays[:, i].max() <= 0:\n continue\n\n plot_station_delay_progress(\n u,\n v,\n dataset,\n timeseries=[\n dict(\n times=dataset.timerange[t : t + time_batch],\n values=station_delays[:, i].cpu().detach().numpy(),\n label=\"Ground truth\",\n style=\"solid\",\n color=\"black\",\n ),\n dict(\n times=dataset.prediction_timerange[t : t + time_batch],\n values=preds[:, i].cpu().detach().numpy(),\n label=\"Prediction [1]\",\n style=\"dashed\",\n color=\"blue\",\n ),\n ],\n )\n","repo_name":"romnn/rail-stgcnn","sub_path":"cargonet/models/activeroutesv1.py","file_name":"activeroutesv1.py","file_ext":"py","file_size_in_byte":14425,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"16951769562","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom bcc import BPF\nfrom ctypes import *\n\ndef encode_dns(name):\n if len(name) + 1 > 255:\n raise Exception(\"DNS Name too long.\")\n b = bytearray()\n for element in name.split('.'):\n sublen = len(element)\n if sublen > 63:\n raise ValueError('DNS label %s is too long' % element)\n b.append(sublen)\n b.extend(element.encode('ascii'))\n b.append(0) # Add 0-len octet label for the root server\n return b\n\n\ndef add_entry(table, value):\n key = table.Key()\n key_len = len(key.p)\n name_buffer = encode_dns(value)\n # Pad the buffer with null bytes if it is too short\n name_buffer.extend((0,) * (key_len - len(name_buffer)))\n key.p = (c_ubyte * key_len).from_buffer(name_buffer)\n leaf = table.Leaf()\n leaf.p = (c_ubyte * 4).from_buffer(bytearray(4))\n table[key] = leaf\n\ntext =\"\"\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nstruct Key {\n unsigned char p[255];\n};\n\nstruct Leaf {\n // Not really needed in this example\n unsigned char p[4];\n};\n\nstruct dns_char_t\n{\n char c;\n} BPF_PACKET_HEADER;\n\nBPF_HASH(test, struct Key, struct Leaf, 128);\n\nint kprobe__sys_clone(void *ctx) {\n struct Key key = {};\n u16 i = 0;\n u8 *cursor = 0;\n struct dns_char_t *c;\n \n struct Leaf * lookup_leaf = test.lookup(&key);\n \n if(lookup_leaf) {\n bpf_trace_printk(\"%s\\\\n\", &lookup_leaf);\n return -1;\n }\n\n}\n\"\"\"\n\ndns_list=[\"foo.bar\",\"abcd.com\"]\n\nbpf = BPF(text = text, debug=0)\n\ntest = bpf.get_table(\"test\")\n\nfor e in dns_list:\n print(\">>>> Adding map entry: \", e)\n add_entry(test, e)\n print(\"entry added\")\n bpf.trace_print()","repo_name":"notmorpheus/DDoS_Processor","sub_path":"usertokerntests/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"10661355113","text":"## @package unittests.testRadHydroMMS\n# Contains unittest class to test an MMS problem with the full\n# radiation-hydrodynamics scheme\n\n# add source directory to module search path\nimport sys\nsys.path.append('../src')\n\n# symbolic math packages\nfrom sympy import symbols, exp, sin, pi, sympify\nfrom sympy.utilities.lambdify import lambdify\n\n# numpy\nimport numpy as np\n\n# unit test package\nimport unittest\n\n# local packages\nfrom createMMSSourceFunctions import createMMSSourceFunctionsRadHydro\nfrom mesh import Mesh\nfrom hydroState import HydroState\nfrom radiation import Radiation\nfrom plotUtilities import plotHydroSolutions, plotTemperatures, plotRadErg\nfrom utilityFunctions import computeRadiationVector, computeAnalyticHydroSolution\nfrom crossXInterface import ConstantCrossSection\nfrom transient import runNonlinearTransient\nfrom hydroBC import HydroBC\nfrom radBC import RadBC\nimport globalConstants as GC\n\n## Derived unittest class to test the MMS source creator functions\n#\nclass TestRadHydroMMS(unittest.TestCase):\n def setUp(self):\n pass\n def tearDown(self):\n pass\n def test_RadHydroMMS(self):\n \n # slope limiter: choices are:\n # none step minmod double-minmod superbee minbee vanleer\n slope_limiter = 'none' \n \n # number of elements\n n_elems = 50\n\n # end time\n t_end = 0.1\n\n # choice of solutions for hydro\n hydro_case = \"linear\" # constant linear exponential\n # choice of solutions for radiation\n rad_case = \"zero\" # zero constant sin\n\n # declare symbolic variables\n x, t, alpha, c = symbols('x t alpha c')\n \n # create solution for thermodynamic state and flow field\n if hydro_case == \"constant\":\n rho = sympify('4.0')\n u = sympify('1.2')\n E = sympify('10.0')\n elif hydro_case == \"linear\":\n rho = 1 + x - t\n u = sympify('1')\n E = 5 + 5*(x - 0.5)**2\n elif hydro_case == \"exponential\":\n rho = exp(x+t)+5\n u = exp(-x)*sin(t) - 1\n E = 10*exp(x+t)\n else:\n raise NotImplementedError(\"Invalid hydro test case\")\n \n # create solution for radiation field\n if rad_case == \"zero\":\n psim = sympify('0')\n psip = sympify('0')\n elif rad_case == \"constant\":\n psim = 50*c\n psip = 50*c\n elif rad_case == \"sin\":\n rad_scale = 50*c\n psim = rad_scale*2*t*sin(pi*(1-x))+10*c\n psip = rad_scale*t*sin(pi*x)+10*c\n else:\n raise NotImplementedError(\"Invalid radiation test case\")\n \n # numeric values\n alpha_value = 0.01\n cv_value = 1.0\n gamma_value = 1.4\n sig_s = 1.0\n sig_a = 1.0\n #sig_a = 0.0\n \n # create MMS source functions\n rho_src, mom_src, E_src, psim_src, psip_src = createMMSSourceFunctionsRadHydro(\n rho = rho,\n u = u,\n E = E,\n psim = psim,\n psip = psip,\n sigma_s_value = sig_s,\n sigma_a_value = sig_a,\n gamma_value = gamma_value,\n cv_value = cv_value,\n alpha_value = alpha_value,\n display_equations = False)\n\n # create functions for exact solutions\n substitutions = dict()\n substitutions['alpha'] = alpha_value\n substitutions['c'] = GC.SPD_OF_LGT\n rho = rho.subs(substitutions)\n u = u.subs(substitutions)\n mom = rho*u\n E = E.subs(substitutions)\n psim = psim.subs(substitutions)\n psip = psip.subs(substitutions)\n rho_f = lambdify((symbols('x'),symbols('t')), rho, \"numpy\")\n u_f = lambdify((symbols('x'),symbols('t')), u, \"numpy\")\n mom_f = lambdify((symbols('x'),symbols('t')), mom, \"numpy\")\n E_f = lambdify((symbols('x'),symbols('t')), E, \"numpy\")\n psim_f = lambdify((symbols('x'),symbols('t')), psim, \"numpy\")\n psip_f = lambdify((symbols('x'),symbols('t')), psip, \"numpy\")\n \n # create uniform mesh\n width = 1.0\n mesh = Mesh(n_elems, width)\n\n # compute radiation IC\n psi_IC = computeRadiationVector(psim_f, psip_f, mesh, t=0.0)\n rad_IC = Radiation(psi_IC)\n\n # compute radiation BC; assumes BC is independent of time\n psi_left = psip_f(x=0.0, t=0.0)\n psi_right = psim_f(x=width, t=0.0)\n\n #Create Radiation BC object\n rad_BC = RadBC(mesh, \"dirichlet\", psi_left=psi_left, psi_right=psi_right)\n\n # compute hydro IC\n hydro_IC = computeAnalyticHydroSolution(mesh,t=0.0,\n rho=rho_f, u=u_f, E=E_f, cv=cv_value, gamma=gamma_value)\n\n # create hydro BC\n hydro_BC = HydroBC(bc_type='dirichlet', mesh=mesh, rho_BC=rho_f,\n mom_BC=mom_f, erg_BC=E_f)\n \n # create cross sections\n cross_sects = [(ConstantCrossSection(sig_s, sig_s+sig_a),\n ConstantCrossSection(sig_s, sig_s+sig_a))\n for i in xrange(mesh.n_elems)]\n\n # if run standalone, then be verbose\n if __name__ == '__main__':\n verbosity = 2\n else:\n verbosity = 0\n\n # run the rad-hydro transient\n rad_new, hydro_new = runNonlinearTransient(\n mesh = mesh,\n problem_type = 'rad_hydro',\n dt_option = 'CFL',\n #dt_option = 'constant',\n CFL = 0.5,\n #dt_constant = 0.002,\n slope_limiter = slope_limiter,\n time_stepper = 'BDF2',\n use_2_cycles = True,\n t_start = 0.0,\n t_end = t_end,\n rad_BC = rad_BC,\n cross_sects = cross_sects,\n rad_IC = rad_IC,\n hydro_IC = hydro_IC,\n hydro_BC = hydro_BC,\n mom_src = mom_src,\n E_src = E_src,\n rho_src = rho_src,\n psim_src = psim_src,\n psip_src = psip_src,\n verbosity = verbosity,\n check_balance = False)\n\n # plot\n if __name__ == '__main__':\n\n # compute exact hydro solution\n hydro_exact = computeAnalyticHydroSolution(mesh, t=t_end,\n rho=rho_f, u=u_f, E=E_f, cv=cv_value, gamma=gamma_value)\n\n # plot hydro solution\n plotHydroSolutions(mesh, hydro_new, x_exact=mesh.getCellCenters(),\n exact=hydro_exact)\n\n # compute exact radiation energy\n Er_exact_fn = 1./GC.SPD_OF_LGT*(psim + psip)\n Er_exact = []\n x = mesh.getCellCenters()\n for xi in x:\n substitutions = {'x':xi, 't':t_end}\n Er_exact.append(Er_exact_fn.subs(substitutions))\n\n # plot radiation energy\n plotRadErg(mesh, rad_new.E, exact_Er=Er_exact)\n\n# run main function from unittest module\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"joshuahansel/radhydro","sub_path":"trunk/unittests/testRadHydroMMS.py","file_name":"testRadHydroMMS.py","file_ext":"py","file_size_in_byte":6774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42322157461","text":"\n__all__ = (\n 'Stp',\n )\n\n# import python\nimport operator\n\n# import genie\nfrom genie.decorator import managedattribute\nfrom genie.conf.base.config import CliConfig\nfrom genie.conf.base.base import DeviceFeature, InterfaceFeature\nfrom genie.conf.base.attributes import DeviceSubAttributes,\\\n SubAttributesDict,\\\n AttributesHelper, \\\n KeyedSubAttributes\n# import genie.libs\nfrom genie.conf.base.attributes import InterfaceSubAttributes\n\n\n# Structure\n# Stp\n# +- Device\n# +- Mode\n# +- Pvst\n# | +- Vlan\n# | | +- Interface\n# | +- Interface\n# +- Pvrstag\n# | +- Interface\n# | +- Vlan\n# +- Pvstag\n# | +- Interface\n# | +- Vlan\n# +- Mst\n# | +- Instance\n# | +- Interface\n# | +- Interface\n# +- Mstag\n# +- Interface\n# +- Instance\n\nclass Stp(DeviceFeature, InterfaceFeature):\n\n # callable to check regexp\n @staticmethod\n def test_isregexp(reg):\n '''Create a transformation function that allows only an object\n contained in the specified reg.\n\n Use with the managedattribute 'type' argument to accept only an object\n contained in the specified reg (where `value in reg`)\n\n Upon success, the resulting transformation function returns the value\n unchanged.\n\n Args:\n reg: Any reg, such as an regexp pattern ('\\d+')\n\n Example:\n\n attr = managedattribute(\n name='attr',\n type=managedattribute.test_in({1, 2, 3}))\n\n attr = managedattribute(\n name='attr',\n type=managedattribute.test_in(range(10)))\n '''\n\n msg = 'Not string like %r.' % (reg,)\n import re\n\n def f(value):\n if not re.search(reg, value):\n raise ValueError(msg)\n return value\n\n return f\n\n # callable to check regexp\n @staticmethod\n def test_isincrements_in_range(base, container):\n '''Create a transformation function that allows only an object\n in increments of base number, and in a range of numbers\n\n Args:\n base: Any integer, such as 16, 4096\n\n Example:\n\n attr = managedattribute(\n name='attr',\n type=managedattribute.test_isincrements(16))\n '''\n\n msg = 'Not in increments of %r.' % (base,)\n\n def f(value):\n if value not in container:\n raise ValueError('Not in %r.' % (container,))\n\n if value%base:\n raise ValueError(msg)\n return value\n\n return f\n\n # add method to managedattribute\n managedattribute.test_isregexp = test_isregexp\n managedattribute.test_isincrements_in_range = test_isincrements_in_range\n\n # device attributes\n bridge_assurance = managedattribute(\n name='bridge_assurance',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n etherchannel_misconfig_guard = managedattribute(\n name='etherchannel_misconfig_guard',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n bpduguard_timeout_recovery = managedattribute(\n name='bpduguard_timeout_recovery',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n loop_guard = managedattribute(\n name='loop_guard',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n bpdu_guard = managedattribute(\n name='bpdu_guard',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n bpdu_filter = managedattribute(\n name='bpdu_filter',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n hold_count = managedattribute(\n name='hold_count',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n # mode mst attributes\n mst_domain = managedattribute(\n name='mst_domain',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n m_max_hop = managedattribute(\n name='m_max_hop',\n default=None,\n type=(None, managedattribute.test_in(range(1, 256))))\n\n m_hello_time = managedattribute(\n name='m_hello_time',\n default=None,\n type=(None, managedattribute.test_in(range(1, 11))))\n\n m_max_age = managedattribute(\n name='m_max_age',\n default=None,\n type=(None, managedattribute.test_in(range(6, 41))))\n\n m_forwarding_delay = managedattribute(\n name='m_forwarding_delay',\n default=None,\n type=(None, managedattribute.test_in(range(4, 31))))\n\n mst_id = managedattribute(\n name='mst_id',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n \n m_vlans = managedattribute(\n name='m_vlans',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n \n m_name = managedattribute(\n name='m_name',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n m_revision = managedattribute(\n name='m_revision',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n m_bridge_priority = managedattribute(\n name='m_bridge_priority',\n default=None,\n type=(None, managedattribute.test_isincrements_in_range(\n base=4096, container=range(0, 61441))))\n\n m_inst_if_cost = managedattribute(\n name='m_inst_if_cost',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n m_inst_if_port_priority = managedattribute(\n name='m_inst_if_port_priority',\n default=None,\n type=(None, managedattribute.test_isincrements_in_range(\n base=16, container=range(0, 241))))\n \n m_if_edge_port = managedattribute(\n name='m_if_edge_port',\n default=None,\n type=(None, managedattribute.test_in(['edge_enable','edge_disable','edge_auto'])))\n \n m_if_link_type = managedattribute(\n name='m_if_link_type',\n default=None,\n type=(None, managedattribute.test_in(['p2p','shared','auto'])))\n \n m_if_guard = managedattribute(\n name='m_if_guard',\n default=None,\n type=(None, managedattribute.test_in(['root','loop', 'none'])))\n\n m_if_bpdu_guard = managedattribute(\n name='m_if_bpdu_guard',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n m_if_bpdu_filter = managedattribute(\n name='m_if_bpdu_filter',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n m_if_hello_time = managedattribute(\n name='m_if_hello_time',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n # mode mstag attributes\n mag_domain = managedattribute(\n name='mag_domain',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n mag_if_name = managedattribute(\n name='mag_if_name',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n mag_if_revision = managedattribute(\n name='mag_if_revision',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n mag_if_bridge_id = managedattribute(\n name='mag_if_bridge_id',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n \n mag_id = managedattribute(\n name='mag_id',\n default=None,\n type=(None, managedattribute.test_in(range(0, 4095))))\n\n mag_if_root_id = managedattribute(\n name='mag_if_root_id',\n default=None,\n type=(None, managedattribute.test_isregexp('\\w+\\.\\w+\\.\\w+')))\n \n mag_if_vlans = managedattribute(\n name='mag_if_vlans',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n \n mag_if_priority = managedattribute(\n name='mag_if_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 61441))))\n\n mag_if_root_priority = managedattribute(\n name='mag_if_root_priority',\n default=None,\n type=(None, managedattribute.test_istype(int)))\n\n # attribtues for pvst mode\n pvst_id = managedattribute(\n name='pvst_id',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n p_max_age = managedattribute(\n name='p_max_age',\n default=None,\n type=(None, managedattribute.test_in(range(6, 41))))\n\n p_hold_count = managedattribute(\n name='p_hold_count',\n default=None,\n type=(None, managedattribute.test_in(range(1, 11))))\n\n p_forwarding_delay = managedattribute(\n name='p_forwarding_delay',\n default=None,\n type=(None, managedattribute.test_in(range(4, 31))))\n\n vlan_id = managedattribute(\n name='vlan_id',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n v_hello_time = managedattribute(\n name='v_hello_time',\n default=None,\n type=(None, managedattribute.test_in(range(1, 11))))\n\n v_max_age = managedattribute(\n name='v_max_age',\n default=None,\n type=(None, managedattribute.test_in(range(6, 41))))\n\n v_forwarding_delay = managedattribute(\n name='v_forwarding_delay',\n default=None,\n type=(None, managedattribute.test_in(range(4, 31))))\n\n v_bridge_priority = managedattribute(\n name='v_bridge_priority',\n default=None,\n type=(None, managedattribute.test_isincrements_in_range(\n base=4096, container=range(0, 61441))))\n\n v_interface = managedattribute(\n name='v_interface',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n \n v_if_cost = managedattribute(\n name='v_if_cost',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n v_if_port_priority = managedattribute(\n name='v_if_port_priority',\n default=None,\n type=(None, managedattribute.test_isincrements_in_range(\n base=16, container=range(0, 241))))\n\n p_if_edge_port = managedattribute(\n name='p_if_edge_port',\n default=None,\n type=(None, managedattribute.test_in(['edge_enable','edge_disable','edge_auto'])))\n\n p_if_link_type = managedattribute(\n name='p_if_link_type',\n default=None,\n type=(None, managedattribute.test_in(['p2p','shared','auto'])))\n\n p_if_guard = managedattribute(\n name='p_if_guard',\n default=None,\n type=(None, managedattribute.test_in(['root','loop','none'])))\n\n p_if_bpdu_guard = managedattribute(\n name='p_if_bpdu_guard',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n p_if_bpdu_filter = managedattribute(\n name='p_if_bpdu_filter',\n default=None,\n type=(None, managedattribute.test_istype(bool)))\n\n p_if_hello_time = managedattribute(\n name='p_if_hello_time',\n default=None,\n type=(None, managedattribute.test_in([1, 2])))\n\n # attributes for mode pvrstag\n prag_domain = managedattribute(\n name='prag_domain',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n prag_if_v_root_priority = managedattribute(\n name='prag_if_v_root_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 61441))))\n\n prag_if_v_root_id = managedattribute(\n name='prag_if_v_root_id',\n default=None,\n type=(None, managedattribute.test_isregexp('\\w+\\.\\w+\\.\\w+')))\n \n prag_if_v_root_cost = managedattribute(\n name='prag_if_v_root_cost',\n default=None,\n type=(None, managedattribute.test_in(range(0, 4294967296))))\n \n prag_if_v_priority = managedattribute(\n name='prag_if_v_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 61441))))\n\n prag_if_v_bridge_id = managedattribute(\n name='prag_if_v_bridge_id',\n default=None,\n type=(None, managedattribute.test_isregexp('\\w+\\.\\w+\\.\\w+')))\n\n prag_if_v_port_priority = managedattribute(\n name='prag_if_v_port_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 241))))\n\n prag_if_v_max_age = managedattribute(\n name='prag_if_v_max_age',\n default=None,\n type=(None, managedattribute.test_in(range(6, 41))))\n\n prag_if_v_hello_time = managedattribute(\n name='prag_if_v_hello_time',\n default=None,\n type=(None, managedattribute.test_in([1,2])))\n\n # attributes for mode pvstag\n pag_domain = managedattribute(\n name='pag_domain',\n default=None,\n type=(None, managedattribute.test_istype(str)))\n\n pag_if_v_root_priority = managedattribute(\n name='pag_if_v_root_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 65536))))\n\n pag_if_v_root_id = managedattribute(\n name='pag_if_v_root_id',\n default=None,\n type=(None, managedattribute.test_isregexp('\\w+\\.\\w+\\.\\w+')))\n \n pag_if_v_root_cost = managedattribute(\n name='pag_if_v_root_cost',\n default=None,\n type=(None, managedattribute.test_in(range(0, 4294967296))))\n \n pag_if_v_priority = managedattribute(\n name='pag_if_v_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 65536))))\n\n pag_if_v_bridge_id = managedattribute(\n name='pag_if_v_bridge_id',\n default=None,\n type=(None, managedattribute.test_isregexp('\\w+\\.\\w+\\.\\w+')))\n\n pag_if_v_port_priority = managedattribute(\n name='pag_if_v_port_priority',\n default=None,\n type=(None, managedattribute.test_in(range(0, 256))))\n\n pag_if_v_max_age = managedattribute(\n name='pag_if_v_max_age',\n default=None,\n type=(None, managedattribute.test_in(range(6, 41))))\n\n pag_if_v_hello_time = managedattribute(\n name='pag_if_v_hello_time',\n default=None,\n type=(None, managedattribute.test_in([1,2])))\n\n\n class DeviceAttributes(DeviceSubAttributes):\n\n class ModeAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.mode = key\n super().__init__(parent)\n\n mode = property(operator.attrgetter('_mode'))\n\n @mode.setter\n def mode(self, d):\n assert d in ['mstp', 'mstag', 'pvst', 'rapid-pvst', 'pvrstag', 'pvstag'], \\\n \"should be 'mstp', 'mstag', 'pvst', 'rapid-pvst', 'pvrstag', 'pvstag' \"\n self._mode = d\n\n # ---------------\n # mode MST\n # ---------------\n class MstAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.mst_domain = key\n super().__init__(parent)\n\n # +- Mst\n # | +- Interface\n class InterfaceAttributes(InterfaceSubAttributes):\n pass\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n # +- Mst\n # | +- Instance\n # | +- Interface\n class InstanceAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.mst_id = key\n super().__init__(parent)\n\n class InterfaceAttributes(InterfaceSubAttributes):\n pass\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n instance_attr = managedattribute(\n name='instance_attr',\n read_only=True,\n doc=InstanceAttributes.__doc__)\n\n @instance_attr.initter\n def instance_attr(self):\n return SubAttributesDict(self.InstanceAttributes, parent=self)\n\n mst_attr = managedattribute(\n name='mst_attr',\n read_only=True,\n doc=MstAttributes.__doc__)\n\n @mst_attr.initter\n def mst_attr(self):\n return SubAttributesDict(self.MstAttributes, parent=self)\n\n # ---------------\n # mode Mstag\n # ---------------\n class MstagAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.mag_domain = key\n super().__init__(parent)\n\n # +- Mstag\n # | +- Interface\n # | +- Instance\n class InterfaceAttributes(InterfaceSubAttributes):\n\n class InstanceAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.mag_id = key\n super().__init__(parent)\n\n instance_attr = managedattribute(\n name='instance_attr',\n read_only=True,\n doc=InstanceAttributes.__doc__)\n\n @instance_attr.initter\n def instance_attr(self):\n return SubAttributesDict(self.InstanceAttributes, parent=self)\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n mstag_attr = managedattribute(\n name='mstag_attr',\n read_only=True,\n doc=MstagAttributes.__doc__)\n\n @mstag_attr.initter\n def mstag_attr(self):\n return SubAttributesDict(self.MstagAttributes, parent=self)\n\n\n # ---------------\n # mode Pvst\n # ---------------\n class PvstAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.pvst_id = key\n super().__init__(parent)\n\n # +- Pvst\n # | +- Interface\n class InterfaceAttributes(InterfaceSubAttributes):\n pass\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n # +- Pvst\n # | +- Vlan\n # | +- Interface\n class VlanAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.vlan = key\n super().__init__(parent)\n\n class InterfaceAttributes(InterfaceSubAttributes):\n pass\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n vlan_attr = managedattribute(\n name='vlan_attr',\n read_only=True,\n doc=VlanAttributes.__doc__)\n\n @vlan_attr.initter\n def vlan_attr(self):\n return SubAttributesDict(self.VlanAttributes, parent=self)\n\n pvst_attr = managedattribute(\n name='pvst_attr',\n read_only=True,\n doc=PvstAttributes.__doc__)\n\n @pvst_attr.initter\n def pvst_attr(self):\n return SubAttributesDict(self.PvstAttributes, parent=self)\n\n\n # ---------------\n # mode Pvrstag\n # ---------------\n class PvrstagAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.prag_domain = key\n super().__init__(parent)\n\n # +- Pvrstag\n # | +- Interface\n # | +- Vlan\n class InterfaceAttributes(InterfaceSubAttributes):\n \n class VlanAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.prag_vlan = key\n super().__init__(parent)\n\n vlan_attr = managedattribute(\n name='vlan_attr',\n read_only=True,\n doc=VlanAttributes.__doc__)\n\n @vlan_attr.initter\n def vlan_attr(self):\n return SubAttributesDict(self.VlanAttributes, parent=self)\n\n interface_attr = managedattribute(\n name='interface_attr',\n read_only=True,\n doc=InterfaceAttributes.__doc__)\n\n @interface_attr.initter\n def interface_attr(self):\n return SubAttributesDict(self.InterfaceAttributes, parent=self)\n\n pvrstag_attr = managedattribute(\n name='pvrstag_attr',\n read_only=True,\n doc=PvrstagAttributes.__doc__)\n\n @pvrstag_attr.initter\n def pvrstag_attr(self):\n return SubAttributesDict(self.PvrstagAttributes, parent=self)\n\n # ---------------\n # mode Pvstag\n # ---------------\n class PvstagAttributes(PvrstagAttributes):\n def __init__(self, parent, key):\n self.pag_domain = key\n super().__init__(parent)\n\n # +- Pvstag\n # | +- Interface\n # | +- Vlan\n\n pvstag_attr = managedattribute(\n name='pvstag_attr',\n read_only=True,\n doc=PvrstagAttributes.__doc__)\n\n @pvstag_attr.initter\n def pvstag_attr(self):\n return SubAttributesDict(self.PvstagAttributes, parent=self)\n\n\n mode_attr = managedattribute(\n name='mode_attr',\n read_only=True,\n doc=ModeAttributes.__doc__)\n\n @mode_attr.initter\n def mode_attr(self):\n return SubAttributesDict(self.ModeAttributes, parent=self)\n\n device_attr = managedattribute(\n name='device_attr',\n read_only=True,\n doc=DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def build_config(self, devices=None, apply=True, attributes=None,\n **kwargs):\n cfgs = {}\n assert not kwargs, kwargs\n attributes = AttributesHelper(self, attributes)\n\n if devices is None:\n devices = self.devices\n devices = set(devices)\n\n for key, sub, attributes2 in attributes.mapping_items(\n 'device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n\n if apply:\n self.testbed.config_on_devices(cfgs, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, apply=True, attributes=None,\n **kwargs):\n cfgs = {}\n assert not kwargs, kwargs\n attributes = AttributesHelper(self, attributes)\n\n if devices is None:\n devices = self.devices\n devices = set(devices)\n\n for key, sub, attributes2 in attributes.mapping_items(\n 'device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n\n if apply:\n self.testbed.config_on_devices(cfgs, fail_invalid=True)\n else:\n return cfgs\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/conf-pkg/src/genie/libs/conf/stp/stp.py","file_name":"stp.py","file_ext":"py","file_size_in_byte":25254,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"2035324136","text":"#!/usr/bin/python2\n\n# -*- coding: utf-8 -*-\n\n# watchdog\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.events import FileSystemEvent\n\n# general\nimport pprint\nimport sys\n\n# mine\nfrom archie import *\nfrom extract import *\nfrom settings import *\n\npp = pprint.PrettyPrinter(indent=4)\n\n\n\nclass MyHandler(FileSystemEventHandler):\n\n def on_modified(self, event):\n if event.src_path.endswith('.pdf'):\n output_filename = hl2md(event.src_path, md_folder)\n print(\"changes, output filename\")\n print(output_filename)\n got_json = process(output_filename)\n pp.pprint(got_json)\n bibkey = got_json['text'][0]['Highlight'][0]['bib']\n del_by_bibkey(es_index, bibkey)\n post2es(got_json)\n else:\n pass\n\n def on_created(self, event):\n if event.src_path.endswith('.pdf'):\n output_filename = hl2md(event.src_path, md_folder)\n print(\"changes, output filename\")\n print(output_filename)\n got_json = process(output_filename)\n pp.pprint(got_json)\n post2es(got_json)\n else:\n pass\n\nif __name__ == '__main__':\n print(\"---init main changes_2018---\")\n args = text_folder\n\n observer = Observer()\n observer.schedule(MyHandler(), path=args if args else '.')\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n print(\"Watchdog watching ...\")\n","repo_name":"uree/colourcat4000","sub_path":"changes.py","file_name":"changes.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10392707591","text":"#!/usr/bin/python3\n\"\"\"\nThis is the test suite for the FileStorage class of the model\n\"\"\"\nimport unittest\nimport os\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\n\n\nclass TestFileStorage(unittest.TestCase):\n def setUp(self):\n self.file_path = \"file.json\"\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n self.storage = FileStorage()\n\n def tearDown(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n\n def test_all(self):\n all_objects = self.storage.all()\n self.assertIsInstance(all_objects, dict)\n self.assertEqual(len(all_objects), 0)\n\n def test_new(self):\n obj = BaseModel()\n self.storage.new(obj)\n all_objects = self.storage.all()\n self.assertEqual(len(all_objects), 1)\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.assertIn(key, all_objects)\n\n def test_save(self):\n all_objects = self.storage.all()\n initial_length = len(all_objects)\n\n obj = BaseModel()\n self.storage.new(obj)\n self.storage.save()\n\n all_objects = self.storage.all()\n self.assertEqual(len(all_objects), initial_length + 1)\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.assertIn(key, all_objects)\n\n def test_reload(self):\n obj = BaseModel()\n self.storage.new(obj)\n self.storage.save()\n self.storage.reload()\n all_objects = self.storage.all()\n self.assertEqual(len(all_objects), 1)\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.assertIn(key, all_objects)\n\n def test_reload_file_not_found(self):\n self.storage.reload()\n all_objects = self.storage.all()\n self.assertIsInstance(all_objects, dict)\n self.assertEqual(len(all_objects), 0)\n self.assertEqual(FileStorage.__FileStorage__objects, {})\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"T-sosthenese/AirBnB_clone","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4751258231","text":"# Heapsort implementation\r\n\r\nimport math\r\n\r\n# Returns index of right child\r\ndef right(arr, index):\r\n return math.floor((2 * index) + 2)\r\n\r\n# Returns index of left child\r\ndef left(arr, index):\r\n return math.floor((2 * index) + 1)\r\n\r\n# Returns index of parent node\r\ndef parent(arr, index):\r\n return math.floor((index - 1) / 2)\r\n\r\ndef heapify(arr, index):\r\n parent_node = arr[parent(arr, index)]\r\n child_node = arr[index]\r\n\r\n print(\"\\nHeapifying at index {}\".format(index))\r\n print(\"-> Comparing parent ({}) vs child ({})\".format(parent_node, child_node))\r\n\r\n # Swap with the parent node if it is less than the child node\r\n if parent_node < child_node:\r\n arr[parent(arr, index)], arr[index] = arr[index], arr[parent(arr, index)] # swap\r\n print(\"-> Swapped parent and child node\")\r\n print(\"-> Parent is now {} and child is now {}\".format(arr[parent(arr, index)], arr[index]))\r\n\r\n# Builds a max heap from given array\r\ndef build_maxheap(arr):\r\n # Traverse array backwards to heapify from bottom up\r\n for i in range(len(arr)-1, 0, -1):\r\n heapify(arr, i)\r\n\r\n# Takes a max heap and fully sorts\r\ndef heapsort(arr):\r\n sorted = []\r\n # Sort from bottom up\r\n for i in range(len(arr)-1, 0, -1):\r\n # Currently, the biggest value in array is the root, so we append that to our sorted list\r\n sorted.append(arr[0])\r\n # Replace root with current index, and reperform max heap construction in order for root\r\n # to once again be the largest element\r\n arr[0] = arr[i]\r\n del arr[i] #\r\n build_maxheap(arr)\r\n return sorted[::-1] # returns ascending sorted list\r\n\r\n# Driver code to demonstrate use\r\ndata = [4, 13, 65, 47, 1, 23, 12, 32, 75, 20, -99, -2012030, 40, 0, 999999999999999, -69, -33, -1]\r\nprint(\"Data before building max heap: {}\".format(data))\r\nbuild_maxheap(data)\r\nprint(\"\\nMax Heap: {}\".format(data))\r\nsorted_data = heapsort(data)\r\nprint(\"\\nSorted Data: {}\".format(sorted_data))\r\n","repo_name":"kylemaestro/heapsort","sub_path":"heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2324273080","text":"import os.path as op\n\nfrom AFQ.utils.path import drop_extension\n\n__all__ = [\"Definition\", \"find_file\", \"name_from_path\"]\n\n\nclass Definition(object):\n '''\n All Definitions should inherit this.\n For a given subject and session within the API, the definition is used\n to create a given image or map.\n Definitions have an init function which the users uses to specify\n how they want the definition to behave.\n The find_path function is called by the AFQ API.\n The api calls find_path to let the definition find relevant files\n for the given subject and session.\n '''\n\n def __init__(self):\n raise NotImplementedError(\"Please implement an __init__ method\")\n\n def find_path(self, bids_layout, from_path, subject, session):\n raise NotImplementedError(\"Please implement a find_path method\")\n\n def str_for_toml(self):\n \"\"\"\n Uses __init__ in str_for_toml to make string that will instantiate\n itself. Assumes object will have attributes of same name as\n __init__ args. This is important for reading/writing definitions\n as arguments to config files.\n \"\"\"\n return type(self).__name__\\\n + \"(\"\\\n + _arglist_to_string(\n self.__init__.__code__.co_varnames,\n get_attr=self)\\\n + ')'\n\n\ndef _arglist_to_string(args, get_attr=None):\n '''\n Helper function\n Takes a list of arguments and unfolds them into a string.\n If get_attr is not None, it will be used to get the attribute\n corresponding to each argument instead.\n '''\n to_string = \"\"\n for arg in args:\n if arg == \"self\":\n continue\n if get_attr is not None:\n arg = getattr(get_attr, arg)\n if isinstance(arg, Definition):\n arg = arg.str_for_toml()\n elif isinstance(arg, str):\n arg = f\"\\\"{arg}\\\"\"\n elif isinstance(arg, list):\n arg = f\"[{_arglist_to_string(arg)}]\"\n to_string = to_string + str(arg) + ', '\n if to_string[-2:] == ', ':\n to_string = to_string[:-2]\n return to_string\n\n\ndef name_from_path(path):\n file_name = op.basename(path) # get file name\n file_name = drop_extension(file_name) # remove extension\n if \"-\" in file_name:\n file_name = file_name.split(\"-\")[-1] # get suffix if exists\n return file_name\n\n\ndef find_file(bids_layout, path, filters, suffix, session, subject,\n extension=\".nii.gz\"):\n \"\"\"\n Helper function\n Generic calls to get_nearest to find a file\n \"\"\"\n if \"extension\" not in filters:\n filters[\"extension\"] = extension\n if \"suffix\" not in filters:\n filters[\"suffix\"] = suffix\n\n # First, try to match the session.\n nearest = bids_layout.get_nearest(\n path,\n **filters,\n session=session,\n subject=subject,\n full_search=True,\n strict=False,\n )\n\n if nearest is None:\n # If that fails, loosen session restriction\n nearest = bids_layout.get_nearest(\n path,\n **filters,\n subject=subject,\n full_search=True,\n strict=False,\n )\n\n if nearest is None:\n # If nothing is found still, raise an error\n raise ValueError((\n \"No file found with these parameters:\\n\"\n f\"suffix: {suffix},\\n\"\n f\"session (searched with and without): {session},\\n\"\n f\"subject: {subject},\\n\"\n f\"filters: {filters},\\n\"\n f\"near path: {path},\\n\"))\n\n path_subject = bids_layout.parse_file_entities(path).get(\n \"subject\", None\n )\n file_subject = bids_layout.parse_file_entities(nearest).get(\n \"subject\", None\n )\n if path_subject != file_subject:\n raise ValueError(\n f\"Expected subject IDs to match for the retrieved image file \"\n f\"and the supplied `from_path` file. Got sub-{file_subject} \"\n f\"from image file {nearest} and sub-{path_subject} \"\n f\"from `from_path` file {path}.\"\n )\n\n return nearest\n","repo_name":"yeatmanlab/pyAFQ","sub_path":"AFQ/definitions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"16"} +{"seq_id":"26809196457","text":"# https://leetcode.com/problems/best-time-to-buy-and-sell-stock\n# O(N) T | O(1) S\n\nfrom best_time_to_buy_and_sell_stock_iv import best_time_to_buy_and_sell_stock_iv\n\n\ndef best_time_to_buy_and_sell_stock_sliding_window(prices: list[int]) -> int:\n max_profit = 0\n buy_day = 0\n for sell_day in range(1, len(prices)):\n profit = prices[sell_day] - prices[buy_day]\n if profit < 0:\n buy_day = sell_day\n else:\n max_profit = max(max_profit, profit)\n return max_profit\n\n\ndef best_time_to_buy_and_sell_stock_kadane_algorithm(prices: list[int]) -> int:\n max_profit = current_profit = 0\n for index in range(1, len(prices)):\n current_profit = max(0, current_profit + prices[index] - prices[index - 1])\n max_profit = max(max_profit, current_profit)\n return max_profit\n\n\ndef best_time_to_buy_and_sell_stock_bottom_up(prices: list[int]) -> int:\n return best_time_to_buy_and_sell_stock_iv(prices, 1)\n","repo_name":"duongleh/data-structures-and-algorithms","sub_path":"Dynamic Programming/best_time_to_buy_and_sell_stock.py","file_name":"best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1729454898","text":"import numpy as np\nimport cv2 \n\ncam = cv2.VideoCapture(0)\nface = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nknown_distance = 40 # kameraya uzaklik\nknown_width = 12 # yüzünüzün ortalam genisligi\n\ndef focal_length(measured_distance, real_width, width_in_rf_image):\n focal_length = (width_in_rf_image * measured_distance)/ real_width\n return focal_length\n\ndef distance_finder(Focal_length,real_face_width,face_width_in_frame):\n distance = (real_face_width* Focal_length)/ face_width_in_frame \n return distance\n\ndef face_data(img):\n fotogri = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face.detectMultiScale(fotogri,1.3,2)\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),1)\n global face_width \n face_width = w\n return face_width\n\nref_image = cv2.imread('fotom.jpg')\nref_image_face_width = face_data(ref_image)\nfocal_length_found = focal_length(known_distance,known_width,ref_image_face_width)\nfonts = cv2.FONT_HERSHEY_COMPLEX\n\n\n\nwhile True: \n _,goruntu = cam.read()\n goruntu = cv2.flip(goruntu,1)\n\n face_width_in_frame = face_data(goruntu)\n if face_width_in_frame !=0:\n Distance = distance_finder(focal_length_found,known_width,face_width_in_frame)\n cv2.putText(goruntu,f\"Distance = {Distance}\",(50,50),fonts,1,(0,0,255),2)\n\n cv2.imshow(\"video\",goruntu)\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"mustafakendiguzel/OpenCV-Python-Face-Detection-Distance-Estimation-","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6875631930","text":"from email.mime import image\nfrom unicodedata import category\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http.response import Http404\nimport datetime as dt\n\nfrom phoros.models import Location, Category, Image\n\n# Create your views here.\ndef index(request):\n location = Location.objects.all() \n category = Category.objects.all()\n image = Image.objects.all()\n \n return render(request, 'index.html', {\"loaction\":location, \"category\":category, \"image\":image})\n\ndef location(request,location):\n image=Image.objects.filter(location=location)\n\n return render(request,'location.html',{\"image\":image})\n\ndef category(request,category):\n\n category = Category.objects.all()\n image=Image.objects.filter(category=category)\n\n return render(request,'category.html',{\"category\":category, \"image\":image})\n\ndef search_results(request):\n \n if 'category' in request.GET and request.GET[\"category\"]:\n search_term = request.GET.get(\"category\")\n searched_category = Category.search_by_category(search_term)\n message = f\"{search_term}\"\n \n return render(request,\"search.html\", {'message':message,'category':searched_category})\n else:\n message = \"Please input a valid category\"\n return render(request,'search.html', {'message':message})\n\n\ndef get_image_by_id(request,image_id):\n try:\n image= Image.objects.get(id=image_id)\n except:\n raise Http404() \n return render(request,'display.html',{'image':image})\n\ndef filter_by_location(request,location):\n image= Image.filter_by_location(location)\n \n return render(request,'location.html',{'image':image})","repo_name":"Andrewowalla/per-gallery","sub_path":"phoros/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7292062769","text":"import timeit\n\nrepetition = 1000\n\ndef time_elapsed(label, setup, stmt):\n _elapsed = timeit.timeit(stmt, setup, number=repetition) * 10**6 / repetition\n print(label, \"{:.3f} μs\".format(_elapsed))\n return _elapsed\n\nsetup = \"\"\"import pure_python\"\"\"\nstmt = \"pure_python.get_neighborhood((10, 10), True, True, 10, False, 30, 30)\"\nelapsed_default = time_elapsed(\"default\", setup, stmt)\n\nsetup = \"\"\"\ndef empty():\n return\n\"\"\"\nstmt = \"empty()\"\ntime_elapsed(\"python empty\", setup, stmt)\n\nsetup = \"\"\"\nimport cython_grid\ngrid2 = cython_grid.Grid(30, 30)\n\"\"\"\nstmt = \"grid2.get_neighborhood((10, 10), True, 10)\"\nelapsed_cython_ndarray = time_elapsed(\"cython np.array\", setup, stmt)\nprint(\" faster\", round(elapsed_default / elapsed_cython_ndarray, 2))\n\nsetup = \"import tortar\"\nstmt = \"tortar.compute_neighborhood((10, 10), True, True, 10, False, 30, 30)\"\ntime_elapsed(\"cython list\", setup, stmt)\n\nsetup = \"from numba_version import get_neighborhood; get_neighborhood(30, 30, (10, 10), True, 10)\"\nstmt = \"get_neighborhood(30, 25, (10, 10), True, 10)\"\ntime_elapsed(\"numba np.array\", setup, stmt)\n\nsetup = \"from numba_version import get_neighborhood_typed_list\"\nstmt = \"get_neighborhood_typed_list(30, 30, (10, 10), True, 10)\"\ntime_elapsed(\"numba typed_list\", setup, stmt)\n\nsetup = \"from cython_array import compute_neighborhood_array\"\nstmt = \"compute_neighborhood_array((10, 10), True, 10, 30, 30)\"\ntime_elapsed(\"cython array\", setup, stmt)\n","repo_name":"rht/mesa-perf","sub_path":"experiments/get_neighborhood/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"11614427487","text":"from datetime import datetime\n\n\n# Create new question with its elements in the data table\n# SQL table title: ID;Submisson Time;View Number;Vote Number;Title;Message;Image\n#\n# @req_form: dictionary from html form\ndef init_question_values(req_form, user_id):\n local_time = datetime.now()\n view_number = \"0\"\n vote_number = \"0\"\n title = req_form[\"title\"]\n data_form_story = req_form[\"story\"]\n image = \"\"\n new_question = [str(local_time)[:-7], view_number, vote_number,\n title, data_form_story, image, user_id]\n return new_question\n\n\n# Create new answer with its elements in the data table\n# SQL table title: ID;Submisson Time;Vote Number;Question ID;Message;Image\n#\n# @req_form: dictionary from html form\n# @question_id: int - index of the question\ndef init_answer_values(message, user_id):\n local_time = datetime.now()\n vote_number = \"0\"\n new_answer = [str(local_time)[:-7], vote_number, message, user_id]\n return new_answer\n\n\n# Create new comment with its elements in the data table\n# SQL table title: ID,Message;Foreign key;Foreign key value;Submission time\n#\n# @req_form: dictionary from html form\n# @path: list\n# @question_id: int - index of the question\ndef init_comment_values(req_form, path, id):\n comment = {'message': '',\n 'foreign_key': '',\n 'foreign_key_value': '',\n 'submission_time': ''}\n comment['message'] = \"'\" + str(req_form['comment']).replace(\"'\", \"''\") + \"'\"\n print(path)\n if \"answer\" in path:\n comment['foreign_key'] = 'answer_id'\n if \"question\" in path:\n comment['foreign_key'] = 'question_id'\n comment['foreign_key_value'] = id\n comment['submission_time'] = \"'\" + str(datetime.now())[:-7] + \"'\"\n\n return comment\n\n\n# Create new user with its elements in the data table\n# SQL table title: Id, User mates name; Reputation; Submission time\n#\n# @req_form: dictionary from html form\ndef init_user_values(req_form):\n user_mates_name = req_form[\"new_user_name\"]\n reputation = 0\n local_time = datetime.now()\n new_user_mates = [user_mates_name, reputation, str(local_time)[:-7]]\n return new_user_mates\n","repo_name":"AndrasKovacs84/mittuDOMAIN-AskMate_project_sql","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"395656150","text":"\nfrom flask_app.config.mysqlconnection import MySQLConnection, connectToMySQL\nfrom flask_app import app\n\nclass Friendship:\n db = 'ijam_schema'\n def __init__(self, data):\n self.friend = data['friend'],\n self.user = data['user'],\n self.user_id = data['user_id'],\n self.friend_id = data['friend_id']\n\n\n# create Friendship\n @classmethod\n def create_friendship(cls, data):\n #use hidden inputs from form to get ids\n query= '''\n Insert INTO friendships (user_id, friend_id)\n VALUES (%(user_id)s, %(friend_id)s)\n ;'''\n\n return connectToMySQL(cls.db).query_db(query, data)\n\n @classmethod\n def display_friendships(cls):\n query= '''\n SELECT users.first_name as user, users2.first_name as friend\n FROM users\n LEFT JOIN friendships ON users.id = friendships.user_id\n LEFT JOIN users as users2 ON users2.id = friendships.friend_id\n ;'''\n# ____either query would work--------\n\n # query = ''' SELECT users.first_name as user, users2.first_name as friend, friendships.* FROM friendships\n # LEFT JOIN users ON users.id = friendships.user_id\n # Left Join users as users2 ON users2.id = friendships.friend_id;'''\n\n return connectToMySQL(cls.db).query_db(query)\n\n\n\n\n","repo_name":"Ryannally91/jam-sesh-final-project","sub_path":"flask_app/models/friendship.py","file_name":"friendship.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40573619708","text":"import os\nimport logging\nimport boto3\nfrom dlow._dlow_core import ResourceDownloader\n\n\nclass S3FolderDownloader(ResourceDownloader):\n \"\"\"Downloads a folder from an S3 bucket, reproducing its structure within a local filesystem folder.\n Performs parallel post-download processing when post_download_processes are provided.\"\"\"\n def __init__(self, s3_bucket_name, s3_top_level_source_folder):\n self._s3_bucket_name = s3_bucket_name\n self._s3_top_level_source_folder = s3_top_level_source_folder\n self._s3_client = boto3.client('s3')\n self._s3_resource = boto3.resource('s3')\n\n # Make sure the folder name ends with a slash.\n if not self._s3_top_level_source_folder.endswith('/'):\n self._s3_top_level_source_folder += '/'\n # Make sure the folder path doesn't start with a slash.\n self._s3_top_level_source_folder = self._s3_top_level_source_folder.lstrip('/')\n\n def _download_s3_dir(self, dest_dir, logger, recursive=True, s3_current_source_folder=None):\n \"\"\"Downloads all objects in the given S3 bucket and source folder into the given local destination folder.\n If recursive=True, the folder structure of s3_top_level_source_folder will be recreated inside local_dest_folder with copying performed recursively.\"\"\"\n\n s3_current_source_folder = self._s3_top_level_source_folder if s3_current_source_folder is None else s3_current_source_folder\n paginator = self._s3_client.get_paginator('list_objects')\n for s3_objects_list in paginator.paginate(Bucket=self._s3_bucket_name, Delimiter='/', Prefix=s3_current_source_folder):\n if recursive:\n s3_folders = s3_objects_list.get('CommonPrefixes')\n if s3_folders is not None:\n for subfolder in s3_folders:\n # Recurse. Use yield to pop yielded results back up the recursion stack.\n for downloaded_file_path in self._download_s3_dir(dest_dir, logger, recursive=True, s3_current_source_folder=subfolder.get('Prefix')):\n yield downloaded_file_path\n\n # Folders have a key ending with a slash so we filter them out.\n s3_file_objects = [obj for obj in s3_objects_list.get('Contents') or [] if not obj['Key'].endswith('/')]\n for s3_file_object in s3_file_objects:\n s3_object_key = s3_file_object['Key']\n # Here we chop off the path segments of the top level source directory for the destination path.\n # This has the effect of copying only the folder structure from that point inward, rather than the whole bucket folder structure from the top.\n dest_path_of_file = os.path.realpath(os.path.join(dest_dir, s3_object_key[len(self._s3_top_level_source_folder):]))\n dest_folder_of_file = os.path.dirname(dest_path_of_file)\n if not os.path.exists(dest_folder_of_file):\n logger.info('Creating local directory %s' % (dest_folder_of_file,))\n os.makedirs(dest_folder_of_file)\n logger.info('Starting download of object %s from bucket %s to local directory %s' % (s3_object_key, self._s3_bucket_name, dest_path_of_file))\n with open(dest_path_of_file, 'wb') as local_file_handle:\n self._s3_client.download_fileobj(self._s3_bucket_name, s3_object_key, local_file_handle)\n logger.info('Finished download of object %s from bucket %s to local directory %s' % (s3_object_key, self._s3_bucket_name, dest_path_of_file))\n\n yield dest_path_of_file\n\n def iter_downloaded_files(self, dest_dir, logger=logging.getLogger(), recursive=True):\n for downloaded_file_path in self._download_s3_dir(dest_dir, logger, recursive):\n yield downloaded_file_path","repo_name":"samjgalbraith/dlow","sub_path":"dlow/s3/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"44858453450","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().system('pip install bs4')\nget_ipython().system('pip install requests')\n\n\n# In[2]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# # a python program to display all the header tags from wikipedia.org and make data frame.\n# \n\n# In[3]:\n\n\npage=requests.get(\"https://en.wikipedia.org/wiki/Main_Page\")\npage\n\n\n# In[4]:\n\n\nsoup=BeautifulSoup(page.content)\n\nprint(soup.prettify())\n\n\n# In[5]:\n\n\nheadings=soup.find_all('div',class_=\"vector-header-container\")\nheadings\n\n\n# In[6]:\n\n\ntitles=[]\n\nfor title in headings:\n title=title.get_text().replace('\\n', \"\")\n title=title.strip(\" \")\n titles.append(title)\n \ntitles\n\n\n# In[7]:\n\n\nimport pandas as pd\n\n\n# In[8]:\n\n\ndata=pd.DataFrame()\ndata['Heading name']=titles\ndata\n\n\n# # top 50 IMDB movies \n\n# In[9]:\n\n\npage=requests.get(\"https://www.imdb.com/chart/top/?ref_=nv_mv_50\")\npage\n\n\n# In[10]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[11]:\n\n\nmovie=soup.find_all('td',class_=\"titleColumn\")\nmovie\n\n\n# In[12]:\n\n\nnames = []\nfor name in movie:\n name=name.get_text().replace('\\n', \"\")\n name=name.strip(\" \")\n names.append(name)\nnames\n\n\n# In[13]:\n\n\nrate=soup.find_all('td',class_=\"ratingColumn imdbRating\")\nrate\n\n\n# In[14]:\n\n\nratings = []\nfor rating in rate:\n rating=rating.get_text().replace('\\n', \"\")\n rating=rating.strip(\" \")\n ratings.append(rating)\nratings\n\n\n# In[15]:\n\n\nimport pandas as pd\n\n\n# In[16]:\n\n\ndata=pd.DataFrame()\ndata['movies name']=names\ndata['ratings']=ratings\ndata.head(50)\n\n\n# # 50 indian movies \n\n# In[17]:\n\n\npage=requests.get(\"https://www.imdb.com/india/top-rated-indian-movies/\")\npage\n\n\n# In[18]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[19]:\n\n\nindmovie=soup.find_all('td',class_=\"titleColumn\")\nindmovie\n\n\n# In[20]:\n\n\nnames = []\nfor name in indmovie:\n name=name.get_text().replace('\\n', \"\")\n name=name.strip(\" \")\n names.append(name)\nnames\n\n\n# In[21]:\n\n\nrate=soup.find_all('td',class_=\"ratingColumn imdbRating\")\nrate\n\n\n# In[22]:\n\n\nratings = []\nfor rating in rate:\n rating=rating.get_text().replace('\\n', \"\")\n rating=rating.strip(\" \")\n ratings.append(rating)\nratings\n\n\n# In[23]:\n\n\ndata=pd.DataFrame()\ndata['movies name']=names\ndata['ratings']=ratings\ndata.head(50)\n\n\n# # python program to display list of respected former presidents of India\n\n# In[24]:\n\n\npage=requests.get(\"https://presidentofindia.nic.in/former-presidents.htm\")\npage\n\n\n# In[25]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[26]:\n\n\nindpresident=soup.find_all('div', class_=\"presidentListing\")\nindpresident\n\n\n# In[27]:\n\n\nnames = []\nfor name in indpresident:\n name=name.get_text().replace('\\n', \"\")\n name=name.strip(\" \")\n names.append(name)\nnames\n\n\n# In[28]:\n\n\npresiname=soup.find_all('h3')\npresiname\n\n\n# In[29]:\n\n\npresiterms=soup.find_all('p')[0:14]\npresiterms\n\n\n# In[30]:\n\n\nnames = []\nfor name in presiname:\n name=name.get_text().replace('\\n', \"\")\n name=name.strip(\" \")\n names.append(name)\nnames\n\n\n# In[31]:\n\n\nterms = []\nfor term in presiterms:\n term=term.get_text().replace('\\n',\"\")\n term=term.strip(\" \")\n terms.append(term)\nterms\n\n\n# In[32]:\n\n\ndata=pd.DataFrame()\ndata['president names']=names\ndata['terms of office']=terms\ndata\n\n\n# # a python program to scrape cricket rankings\n\n# In[33]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/mens/team-rankings/odi\")\npage\n\n\n# In[34]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[35]:\n\n\nteam=soup.find_all('span',class_=\"u-hide-phablet\")\nteam\n\n\n# In[36]:\n\n\nteams = []\nfor oditeam in team:\n oditeam=oditeam.get_text().replace('\\n',\"\")\n oditean=oditeam.strip(\" \")\n teams.append(oditeam)\nteams\n\n\n# In[37]:\n\n\nausmatch=soup.find_all('td',class_=\"rankings-block__banner--matches\")\nausmatch\n\n\n# In[38]:\n\n\naumatches = []\nfor auodimatch in ausmatch:\n auodimatch=auodimatch.get_text().replace('\\n',\"\")\n auodimatch=auodimatch.strip(\" \")\n aumatches.append(auodimatch)\naumatches\n\n\n# In[39]:\n\n\nauspoint=soup.find_all('td',class_=\"rankings-block__banner--points\")\nauspoint\n\n\n# In[40]:\n\n\nratpoints = []\nfor point in auspoint:\n point=point.get_text().replace('\\n',\"\")\n point=point.strip(\" \")\n ratpoints.append(point)\nratpoints\n\n\n# In[41]:\n\n\nausrating=soup.find_all('td',class_=\"rankings-block__banner--rating u-text-right\")\nausrating\n\n\n# In[42]:\n\n\nrataus = []\nfor rat in ausrating:\n rat=rat.get_text().replace('\\n',\"\")\n rat=rat.strip(\" \")\n rataus.append(rat)\nrataus\n\n\n# In[43]:\n\n\nmatch_p=soup.find_all('td',class_=\"table-body__cell u-center-text\")\nmatch_p\n\n\n# In[44]:\n\n\nmatches = []\nfor odimatch in match_p:\n odimatch=odimatch.get_text().replace('\\n',\"\")\n odimatch=odimatch.strip(\" \")\n matches.append(odimatch)\nmatches\n\n\n# In[45]:\n\n\nmatrating=soup.find_all('td',class_=\"table-body__cell u-text-right rating\")\nmatrating\n\n\n# In[46]:\n\n\nratings = []\nfor mrating in matrating:\n mrating=mrating.get_text().replace('\\n',\"\")\n mrating=mrating.strip(\" \")\n ratings.append(mrating)\nratings\n\n\n# In[47]:\n\n\ndata=pd.DataFrame()\ndata['team name']=teams\ndata['matches']=matches\ndata['ratings']=ratings\ndata.head(10)\n\n\n# In[48]:\n\n\nlen(matches)\n\n\n# In[49]:\n\n\nlen(ratings)\n\n\n# In[50]:\n\n\nlen(teams)\n\n\n# In[51]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/mens/player-rankings/odi\")\npage\n\n\n# In[52]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[53]:\n\n\nname=soup.find_all('td', class_=\"table-body__cell name\")\nname\n\n\n# In[54]:\n\n\nplayers = []\nfor players_name in name:\n players_name=players_name.get_text().replace('\\n',\"\")\n players_name=players_name.strip(\" \")\n players.append(players_name)\nplayers\n\n\n# In[55]:\n\n\nnation=soup.find_all('td', class_=\"table-body__cell nationality-logo\")\nnation\n\n\n# In[56]:\n\n\nnationality = []\nfor players_nationality in nation:\n players_nationality=players_nationality.get_text().replace('\\n',\"\")\n players_nationality=players_nationality.strip(\" \")\n nationality.append(players_nationality)\nnationality\n\n\n# In[57]:\n\n\nratings_play=soup.find_all('td', class_=\"table-body__cell u-text-right rating\")\nratings_play\n\n\n# In[58]:\n\n\nratings_point = []\nfor players_ratings in ratings_play:\n players_ratings=players_ratings.get_text().replace('\\n',\"\")\n players_ratings=players_ratings.strip(\" \")\n ratings_point.append(players_ratings)\nratings_point\n\n\n# In[59]:\n\n\ndata=pd.DataFrame()\ndata['players_name']=players\ndata['nationality']=nationality\ndata['ratings']=ratings_point\ndata.head(10)\n\n\n# In[60]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/mens/player-rankings/odi/all-rounder\")\npage\n\n\n# In[61]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[62]:\n\n\nall_name=soup.find_all('td', class_=\"table-body__cell rankings-table__name name\")\nall_name\n\n\n# In[63]:\n\n\nallrounders = []\nfor players_all in all_name:\n players_all=players_all.get_text().replace('\\n',\"\")\n players_all=players_all.strip(\" \")\n allrounders.append(players_all)\nallrounders\n\n\n# In[64]:\n\n\nnation=soup.find_all('td', class_=\"table-body__cell nationality-logo rankings-table__team\")\nnation\n\n\n# In[65]:\n\n\nnationalityy = []\nfor players_nation in nation:\n players_nation=players_nation.get_text().replace('\\n',\"\")\n players_nation=players_nation.strip(\" \")\n nationalityy.append(players_nation)\nnationalityy\n\n\n# In[66]:\n\n\nratings=soup.find_all('td', class_=\"table-body__cell rating\")\nratings\n\n\n# In[67]:\n\n\nplayers_rating = []\nfor all_rating in ratings:\n all_rating=all_rating.get_text().replace('\\n',\"\")\n all_rating=all_rating.strip(\" \")\n players_rating.append(all_rating)\nplayers_rating\n\n\n# In[68]:\n\n\ndata=pd.DataFrame()\ndata['players_name']=allrounders\ndata['nationality']=nationalityy\ndata['ratings']=players_ratings\ndata.head(10)\n\n\n# # a python program to scrape cricket rankings for womens\n\n# In[69]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/womens/team-rankings/odi\")\npage\n\n\n# In[70]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[71]:\n\n\nteamwood=soup.find_all('span', class_=\"u-show-phablet\")\nteamwood\n\n\n# In[72]:\n\n\nteams = []\nfor womens_team in teamwood:\n womens_team=womens_team.get_text().replace('\\n',\"\")\n womens_team=womens_team.strip(\" \")\n teams.append(womens_team)\nteams\n\n\n# In[73]:\n\n\nwomenrating=soup.find_all('td', class_=\"table-body__cell u-text-right rating\")\nwomenrating\n\n\n# In[74]:\n\n\nteamsra = []\nfor womens_teamrat in womenrating:\n womens_teamrat=womens_teamrat.get_text().replace('\\n',\"\")\n womens_teamrat=womens_teamrat.strip(\" \")\n teamsra.append(womens_teamrat)\nteamsra[0:12]\n\n\n# In[75]:\n\n\nwomenmatches=soup.find_all('td', class_=\"rankings-block__banner--matches\")\nwomenmatches\n\n\n# In[76]:\n\n\ndata=pd.DataFrame()\ndata['players_name']=teams\ndata['nationality']=teamsra\ndata.head(10)\n\n\n# In[ ]:\n\n\nlen(teams)\n\n\n# In[ ]:\n\n\nlen(teamsra)\n\n\n# In[ ]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/womens/player-rankings/odi\")\npage\n\n\n# In[ ]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[ ]:\n\n\nplayerwomen=soup.find_all('td', class_=\"table-body__cell name\")\nplayerwomen\n\n\n# In[ ]:\n\n\nplayername = []\nfor womens_player in playerwomen:\n womens_player=womens_player.get_text().replace('\\n',\"\")\n womens_player=womens_player.strip(\" \")\n playername.append(womens_player)\nplayername[0:28]\n\n\n# In[ ]:\n\n\nnatwomen=soup.find_all('span', class_=\"table-body__logo-text\")\nnatwomen\n\n\n# In[77]:\n\n\nplayernation = []\nfor womens_nation in natwomen:\n womens_nation=womens_nation.get_text().replace('\\n',\"\")\n womens_nation=womens_nation.strip(\" \")\n playernation.append(womens_nation)\nplayernation\n\n\n# In[78]:\n\n\nratwomen=soup.find_all('td', class_=\"table-body__cell u-text-right rating\")\nratwomen\n\n\n# In[79]:\n\n\nplayerrating = []\nfor womens_rating in ratwomen:\n womens_rating=womens_rating.get_text().replace('\\n',\"\")\n womens_rating=womens_rating.strip(\" \")\n playerrating.append(womens_rating)\nplayerrating\n\n\n# In[80]:\n\n\ndata=pd.DataFrame()\ndata['players_name']=playername\ndata['nationality']=playernation\ndata['ratings']=playerrating\ndata.head(10)\n\n\n# In[81]:\n\n\nlen(playername)\n\n\n# In[82]:\n\n\nlen(playernation)\n\n\n# In[83]:\n\n\nlen(playerrating)\n\n\n# In[84]:\n\n\npage=requests.get(\"https://www.icc-cricket.com/rankings/womens/player-rankings/odi/all-rounder\")\npage\n\n\n# In[85]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[90]:\n\n\nal_women=soup.find_all('td', class_=\"table-body__cell rankings-table__name name\")\nal_women\n\n\n# In[91]:\n\n\nalplayer = []\nfor womens_all in al_women:\n womens_all=womens_all.get_text().replace('\\n',\"\")\n womens_all=womens_all.strip(\" \")\n alplayer.append(womens_all)\nalplayer\n\n\n# In[93]:\n\n\nnat_women=soup.find_all('td', class_=\"table-body__cell nationality-logo rankings-table__team\")\nnat_women\n\n\n# In[94]:\n\n\nnatplayer = []\nfor womens_nat in nat_women:\n womens_nat=womens_nat.get_text().replace('\\n',\"\")\n womens_nat=womens_nat.strip(\" \")\n natplayer.append(womens_nat)\nnatplayer\n\n\n# In[95]:\n\n\nrat_women=soup.find_all('td', class_=\"table-body__cell rating\")\nrat_women\n\n\n# In[96]:\n\n\nratplayer = []\nfor womens_rat in rat_women:\n womens_rat=womens_rat.get_text().replace('\\n',\"\")\n womens_rat=womens_rat.strip(\" \")\n ratplayer.append(womens_rat)\nratplayer\n\n\n# In[97]:\n\n\ndata=pd.DataFrame()\ndata['players_name']=alplayer\ndata['nationality']=natplayer\ndata['ratings']=ratplayer\ndata.head(10)\n\n\n# # a python program to scrape mentioned news details \n\n# In[98]:\n\n\npage=requests.get(\"https://www.cnbc.com/world/?region=world\")\npage\n\n\n# In[99]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[104]:\n\n\nnews=soup.find_all('div', class_=\"LatestNews-headlineWrapper\")\nnews\n\n\n# In[105]:\n\n\nnews_headings = []\nfor news_lat in news:\n news_lat=news_lat.get_text().replace('\\n',\"\")\n news_lat=news_lat.strip(\" \")\n news_headings.append(news_lat)\nnews_headings\n\n\n# In[103]:\n\n\nnews_time=soup.find_all('time', class_=\"LatestNews-timestamp\")\nnews_time\n\n\n# In[ ]:\n\n\nnews_headings = []\nfor news_lat in news:\n news_lat=news_lat.get_text().replace('\\n',\"\")\n news_lat=news_lat.strip(\" \")\n news_headings.append(news_lat)\nnews_headings\n\n\n# In[107]:\n\n\nnews_link=soup.find_all('div', class_=\"nav-menu-navLinks\")\nnews_link\n\n\n# In[108]:\n\n\nlink = []\nfor latest in news_link:\n latest=latest.get_text().replace('\\n',\"\")\n latest=latest.strip(\" \")\n link.append(latest)\nlink\n\n\n# In[168]:\n\n\ndata=pd.DataFrame()\ndata['time & headings']=news_headings\ndata['link']=link\ndata\n\n\n# # a python program to scrape the details of most downloaded articles from AI\n\n# In[111]:\n\n\npage=requests.get(\"https://www.journals.elsevier.com/artificial-intelligence/most-downloaded-articles\")\npage\n\n\n# In[112]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[115]:\n\n\narticle=soup.find_all('h2', class_=\"sc-1qrq3sd-1 gRGSUS sc-1nmom32-0 sc-1nmom32-1 btcbYu goSKRg\")\narticle\n\n\n# In[116]:\n\n\npaper_title = []\nfor title in article:\n title=title.get_text().replace('\\n',\"\")\n title=title.strip(\" \")\n paper_title.append(title)\npaper_title\n\n\n# In[117]:\n\n\nauthor=soup.find_all('span', class_=\"sc-1w3fpd7-0 dnCnAO\")\nauthor\n\n\n# In[118]:\n\n\nauthor_title = []\nfor writer in author:\n writer=writer.get_text().replace('\\n',\"\")\n writer=writer.strip(\" \")\n author_title.append(writer)\nauthor_title\n\n\n# In[119]:\n\n\ndate=soup.find_all('span', class_=\"sc-1thf9ly-2 dvggWt\")\ndate\n\n\n# In[120]:\n\n\npublished = []\nfor disclose in date:\n disclose=disclose.get_text().replace('\\n',\"\")\n disclose=disclose.strip(\" \")\n published.append(disclose)\npublished\n\n\n# In[131]:\n\n\ndata=pd.DataFrame()\ndata['paper title']=paper_title\ndata['authors']=author_title\ndata['published date']=published\ndata\n\n\n# # a python program to scrape mentioned details from dineout.co.in \n\n# In[132]:\n\n\npage=requests.get(\"https://www.dineout.co.in/delhi-restaurants/buffet-special\")\npage\n\n\n# In[133]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[145]:\n\n\nname=soup.find_all('a', class_=\"restnt-name ellipsis\")\nname\n\n\n# In[146]:\n\n\nras_name = []\nfor rastaurant in name:\n rastaurant=rastaurant.get_text().replace('\\n',\"\")\n rastaurant=rastaurant.strip(\" \")\n ras_name.append(rastaurant)\nras_name\n\n\n# In[152]:\n\n\ncuisine=soup.find_all('span', class_=\"double-line-ellipsis\")\ncuisine\n\n\n# In[153]:\n\n\nras_cuisine = []\nfor type in cuisine:\n type=type.get_text().replace('\\n',\"\")\n type=type.strip(\" \")\n ras_cuisine.append(type)\nras_cuisine\n\n\n# In[154]:\n\n\nlocation=soup.find_all('div', class_=\"restnt-loc ellipsis\")\nlocation\n\n\n# In[155]:\n\n\nras_location = []\nfor place in location:\n place=place.get_text().replace('\\n',\"\")\n place=place.strip(\" \")\n ras_location.append(place)\nras_location\n\n\n# In[156]:\n\n\nrating=soup.find_all('div', class_=\"restnt-rating rating-4\")\nrating\n\n\n# In[167]:\n\n\nras_rating = []\nfor points in rating:\n points=points.get_text().replace('\\n',\"\")\n ras_rating.append(points)\nras_rating\n\n\n# In[158]:\n\n\nimage=soup.find_all('img', class_=\"no-img\")\nimage\n\n\n# In[162]:\n\n\nras_image = []\nfor photos in image:\n photos=photos.get_text().replace('\\n',\"\")\n photos=photos.strip(\" \")\n ras_image.append(photos)\nras_image\n\n\n# In[163]:\n\n\ndata=pd.DataFrame()\ndata['rastaurant_name']=ras_name\ndata['cuisine']=ras_cuisine\ndata['location']=ras_location\ndata['ratings']=ras_location\ndata['image']=ras_image\ndata\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"amarjeetvashisht/DATASCIENTIST-VASHISHT","sub_path":"assignment 1 python.py","file_name":"assignment 1 python.py","file_ext":"py","file_size_in_byte":15094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21016230258","text":"from typing import Iterator, Union\n\nfrom talon import Context, Module\n\nmod = Module()\nctx = Context()\n\ndigit_list = \"zero one two three four five six seven eight nine\".split()\nteens = \"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen\".split()\ntens = \"twenty thirty forty fifty sixty seventy eighty ninety\".split()\nscales = \"hundred thousand million billion trillion quadrillion quintillion sextillion septillion octillion nonillion decillion\".split()\n\ndigits_map = {n: i for i, n in enumerate(digit_list)}\ndigits_map[\"oh\"] = 0\nteens_map = {n: i + 10 for i, n in enumerate(teens)}\ntens_map = {n: 10 * (i + 2) for i, n in enumerate(tens)}\nscales_map = {n: 10 ** (3 * (i + 1)) for i, n in enumerate(scales[1:])}\nscales_map[\"hundred\"] = 100\n\nnumbers_map = digits_map.copy()\nnumbers_map.update(teens_map)\nnumbers_map.update(tens_map)\nnumbers_map.update(scales_map)\n\n\ndef parse_number(l: list[str]) -> str:\n \"\"\"Parses a list of words into a number/digit string.\"\"\"\n l = list(scan_small_numbers(l))\n for scale in scales:\n l = parse_scale(scale, l)\n return \"\".join(str(n) for n in l)\n\n\ndef scan_small_numbers(l: list[str]) -> Iterator[Union[str, int]]:\n \"\"\"\n Takes a list of number words, yields a generator of mixed numbers & strings.\n Translates small number terms (<100) into corresponding numbers.\n Drops all occurrences of \"and\".\n Smashes digits onto tens words, eg. [\"twenty\", \"one\"] -> [21].\n But note that \"ten\" and \"zero\" are excluded, ie:\n [\"ten\", \"three\"] -> [10, 3]\n [\"fifty\", \"zero\"] -> [50, 0]\n Does nothing to scale words (\"hundred\", \"thousand\", \"million\", etc).\n \"\"\"\n # reversed so that repeated pop() visits in left-to-right order\n l = [x for x in reversed(l) if x != \"and\"]\n while l:\n n = l.pop()\n # fuse tens onto digits, eg. \"twenty\", \"one\" -> 21\n if n in tens_map and l and digits_map.get(l[-1], 0) != 0:\n d = l.pop()\n yield numbers_map[n] + numbers_map[d]\n # turn small number terms into corresponding numbers\n elif n not in scales_map:\n yield numbers_map[n]\n else:\n yield n\n\n\ndef parse_scale(scale: str, l: list[Union[str, int]]) -> list[Union[str, int]]:\n \"\"\"Parses a list of mixed numbers & strings for occurrences of the following\n pattern:\n\n \n\n where is a scale word like \"hundred\", \"thousand\", \"million\", etc and\n multiplier and remainder are numbers or strings of numbers of the\n appropriate size. For example:\n\n parse_scale(\"hundred\", [1, \"hundred\", 2]) -> [102]\n parse_scale(\"thousand\", [12, \"thousand\", 3, 45]) -> [12345]\n\n We assume that all scales of lower magnitude have already been parsed; don't\n call parse_scale(\"thousand\") until you've called parse_scale(\"hundred\").\n \"\"\"\n scale_value = scales_map[scale]\n scale_digits = len(str(scale_value))\n\n # Split the list on the desired scale word, then parse from left to right.\n left, *splits = split_list(scale, l)\n for right in splits:\n # (1) Figure out the multiplier by looking to the left of the scale\n # word. We ignore non-integers because they are scale words that we\n # haven't processed yet; this strategy means that \"thousand hundred\"\n # gets parsed as 1,100 instead of 100,000, but \"hundred thousand\" is\n # parsed correctly as 100,000.\n before = 1 # default multiplier\n if left and isinstance(left[-1], int) and left[-1] != 0:\n before = left.pop()\n\n # (2) Absorb numbers to the right, eg. in [1, \"thousand\", 1, 26], \"1\n # thousand\" absorbs [\"1\", \"26\"] to make 1,126. We pull numbers off\n # `right` until we fill up the desired number of digits.\n after = \"\"\n while right and isinstance(right[0], int):\n next = after + str(right[0])\n if len(next) >= scale_digits:\n break\n after = next\n right.pop(0)\n after = int(after) if after else 0\n\n # (3) Push the parsed number into place, append whatever was left\n # unparsed, and continue.\n left.append(before * scale_value + after)\n left.extend(right)\n\n return left\n\n\ndef split_list(value, l: list) -> Iterator:\n \"\"\"Splits a list by occurrences of a given value.\"\"\"\n start = 0\n while True:\n try:\n i = l.index(value, start)\n except ValueError:\n break\n yield l[start:i]\n start = i + 1\n yield l[start:]\n\n\n# # ---------- TESTS (uncomment to run) ----------\n# def test_number(expected, string):\n# print('testing:', string)\n# l = list(scan_small_numbers(string.split()))\n# print(\" scan --->\", l)\n# for scale in scales:\n# old = l\n# l = parse_scale(scale, l)\n# if scale in old: print(\" parse -->\", l)\n# else: assert old == l, \"parse_scale should do nothing if the scale does not occur in the list\"\n# result = \"\".join(str(n) for n in l)\n# assert result == parse_number(string.split())\n# assert str(expected) == result, f\"parsing {string!r}, expected {expected}, got {result}\"\n\n# test_number(105000, \"one hundred and five thousand\")\n# test_number(1000000, \"one thousand thousand\")\n# test_number(1501000, \"one million five hundred one thousand\")\n# test_number(1501106, \"one million five hundred and one thousand one hundred and six\")\n# test_number(123, \"one two three\")\n# test_number(123, \"one twenty three\")\n# test_number(104, \"ten four\") # borderline, but valid in some dialects\n# test_number(1066, \"ten sixty six\") # a common way of saying years\n# test_number(1906, \"nineteen oh six\") # year\n# test_number(2001, \"twenty oh one\") # year\n# test_number(2020, \"twenty twenty\")\n# test_number(1001, \"one thousand one\")\n# test_number(1010, \"one thousand ten\")\n# test_number(123456, \"one hundred and twenty three thousand and four hundred and fifty six\")\n# test_number(123456, \"one twenty three thousand four fifty six\")\n\n# ## failing (and somewhat debatable) tests from old numbers.py\n# #test_number(10000011, \"one million one one\")\n# #test_number(100001010, \"one million ten ten\")\n# #test_number(1050006000, \"one hundred thousand and five thousand and six thousand\")\n\n\n# ---------- CAPTURES ----------\nalt_digits = \"(\" + \"|\".join(digits_map.keys()) + \")\"\nalt_teens = \"(\" + \"|\".join(teens_map.keys()) + \")\"\nalt_tens = \"(\" + \"|\".join(tens_map.keys()) + \")\"\nalt_scales = \"(\" + \"|\".join(scales_map.keys()) + \")\"\nnumber_word = \"(\" + \"|\".join(numbers_map.keys()) + \")\"\n# don't allow numbers to start with scale words like \"hundred\", \"thousand\", etc\nleading_words = numbers_map.keys() - scales_map.keys()\nleading_words -= {\"oh\", \"o\"} # comment out to enable bare/initial \"oh\"\nnumber_word_leading = f\"({'|'.join(leading_words)})\"\n\n\n# Numbers used in `number_small` capture\nnumber_small_list = [*digit_list, *teens]\nfor ten in tens:\n number_small_list.append(ten)\n number_small_list.extend(f\"{ten} {digit}\" for digit in digit_list[1:])\nnumber_small_map = {n: i for i, n in enumerate(number_small_list)}\n\nmod.list(\"number_small\", desc=\"List of small numbers\")\nctx.lists[\"self.number_small\"] = number_small_map.keys()\n\n\n# TODO: allow things like \"double eight\" for 88\n@ctx.capture(\"digit_string\", rule=f\"({alt_digits} | {alt_teens} | {alt_tens})+\")\ndef digit_string(m) -> str:\n return parse_number(list(m))\n\n\n@ctx.capture(\"digits\", rule=\"\")\ndef digits(m) -> int:\n \"\"\"Parses a phrase representing a digit sequence, returning it as an integer.\"\"\"\n return int(m.digit_string)\n\n\n@mod.capture(rule=f\"{number_word_leading} ([and] {number_word})*\")\ndef number_string(m) -> str:\n \"\"\"Parses a number phrase, returning that number as a string.\"\"\"\n return parse_number(list(m))\n\n\n@ctx.capture(\"number\", rule=\"\")\ndef number(m) -> int:\n \"\"\"Parses a number phrase, returning it as an integer.\"\"\"\n return int(m.number_string)\n\n\n@ctx.capture(\"number_signed\", rule=f\"[negative|minus] \")\ndef number_signed(m):\n number = m[-1]\n return -number if (m[0] in [\"negative\", \"minus\"]) else number\n\n\n@ctx.capture(\"number_small\", rule=\"{user.number_small}\")\ndef number_small(m) -> int:\n return number_small_map[m.number_small]\n\n\n@mod.capture(rule=f\"[negative|minus] \")\ndef number_signed_small(m) -> int:\n \"\"\"Parses an integer between -99 and 99.\"\"\"\n number = m[-1]\n return -number if (m[0] in [\"negative\", \"minus\"]) else number\n","repo_name":"talonhub/community","sub_path":"core/numbers/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":498,"dataset":"github-code","pt":"16"} +{"seq_id":"7388147059","text":"from Connect4_Globals import *\nfrom Connect4_Players import Player, RandomPlayer\n\nclass ComputerPlayer(Player):\n \"\"\"A class that represents an AI player in the game\"\"\"\n \n def __init__(self, coin_type, player_type, epsilon=0.2, alpha=0.3, gamma=0.9, exploration_coeff=1):\n \"\"\"\n Initialize an AI with the proper type which are one of Random, \n Q learner and Sarsa learner\n \"\"\"\n if (player_type == \"qlearner\"):\n self.player = QLearningPlayer(coin_type, epsilon, alpha, gamma)\n elif (player_type == \"sarsalearner\"):\n self.player = SarsaLearningPlayer(coin_type, epsilon, alpha, gamma)\n elif (player_type == \"montecarlo\"):\n self.player = MonteCarloPlayer(coin_type, exploration_coeff)\n elif (player_type == \"minimax\"):\n self.player = MiniMaxPlayer(coin_type)\n else:\n self.player = RandomPlayer(coin_type)\n \n def complete_move(self, coin, board, game_logic, background):\n \"\"\"\n Move the coin and decide which slot to drop it in and learn from the\n chosen move\n \"\"\"\n actions = board.get_available_actions()\n state = board.get_state()\n chosen_action = self.choose_action(state, actions, coin, board, game_logic, background)\n coin.move_right(background, chosen_action)\n coin.set_column(chosen_action)\n game_over = board.insert_coin(coin, background, game_logic)\n self.player.learn(board, actions, chosen_action, game_over, game_logic)\n \n return game_over\n \n def get_coin_type(self):\n \"\"\"\n Return the coin type of the AI player\n \"\"\"\n return self.player.get_coin_type()\n \n def choose_action(self, state, actions, coin=None, board=None, game_logic=None, background=None):\n \"\"\"\n Choose an action (which slot to drop in) based on the state of the\n board\n \"\"\"\n return self.player.choose_action(state, actions, coin, board, game_logic, background)\n \nclass QLearningPlayer(Player):\n \"\"\"A class that represents an AI using Q-learning algorithm\"\"\"\n \n def __init__(self, coin_type, epsilon=0.2, alpha=0.3, gamma=0.9):\n \"\"\"\n Initialize a Q-learner with parameters epsilon, alpha and gamma\n and its coin type\n \"\"\"\n Player.__init__(self, coin_type)\n self.q = {}\n self.epsilon = epsilon # e-greedy chance of random exploration\n self.alpha = alpha # learning rate\n self.gamma = gamma # discount factor for future rewards \n \n def getQ(self, state, action):\n \"\"\"\n Return a probability for a given state and action where the greater\n the probability the better the move\n \"\"\"\n # encourage exploration; \"optimistic\" 1.0 initial values\n if self.q.get((state, action)) is None:\n self.q[(state, action)] = 1.0\n return self.q.get((state, action)) \n \n def choose_action(self, state, actions, coin, board, game_logic, background):\n \"\"\"\n Return an action based on the best move recommendation by the current\n Q-Table with a epsilon chance of trying out a new move\n \"\"\"\n current_state = state\n\n if random.random() < self.epsilon: # explore!\n chosen_action = random.choice(actions)\n return chosen_action\n\n qs = [self.getQ(current_state, a) for a in actions]\n maxQ = max(qs)\n\n if qs.count(maxQ) > 1:\n # more than 1 best option; choose among them randomly\n best_options = [i for i in range(len(actions)) if qs[i] == maxQ]\n i = random.choice(best_options)\n else:\n i = qs.index(maxQ)\n\n return actions[i]\n \n def learn(self, board, actions, chosen_action, game_over, game_logic):\n \"\"\"\n Determine the reward based on its current chosen action and update\n the Q table using the reward recieved and the maximum future reward\n based on the resulting state due to the chosen action\n \"\"\"\n reward = 0\n if (game_over):\n win_value = game_logic.get_winner()\n if win_value == 0:\n reward = 0.5\n elif win_value == self.coin_type:\n reward = 1\n else:\n reward = -2\n prev_state = board.get_prev_state()\n prev = self.getQ(prev_state, chosen_action)\n result_state = board.get_state()\n maxqnew = max([self.getQ(result_state, a) for a in actions])\n self.q[(prev_state, chosen_action)] = prev + self.alpha * ((reward + self.gamma*maxqnew) - prev)\n\nclass SarsaLearningPlayer(Player):\n \"\"\"A class that represents an AI using Sarsa-learning algorithm\"\"\"\n \n def __init__(self, coin_type, epsilon=0.2, alpha=0.3, gamma=0.9):\n \"\"\"\n Initialize a sarsa-learner with parameters epsilon, alpha and gamma\n and its coin type\n \"\"\"\n Player.__init__(self, coin_type)\n self.q = {}\n self.epsilon = epsilon # e-greedy chance of random exploration\n self.alpha = alpha # learning rate\n self.gamma = gamma # discount factor for future rewards \n \n def getQ(self, state, action):\n \"\"\"\n Return a probability for a given state and action where the greater\n the probability the better the move\n \"\"\"\n # encourage exploration; \"optimistic\" 1.0 initial values\n if self.q.get((state, action)) is None:\n self.q[(state, action)] = 1.0\n return self.q.get((state, action)) \n \n def choose_action(self, state, actions, coin, board, game_logic, background):\n \"\"\"\n Return an action based on the best move recommendation by the current\n Q-Table with a epsilon chance of trying out a new move\n \"\"\"\n current_state = state\n\n if random.random() < self.epsilon: # explore!\n chosen_action = random.choice(actions)\n return chosen_action\n\n qs = [self.getQ(current_state, a) for a in actions]\n maxQ = max(qs)\n\n if qs.count(maxQ) > 1:\n # more than 1 best option; choose among them randomly\n best_options = [i for i in range(len(actions)) if qs[i] == maxQ]\n i = random.choice(best_options)\n else:\n i = qs.index(maxQ)\n\n return actions[i]\n def learn(self, board, actions, chosen_action, game_over, game_logic):\n \"\"\"\n Determine the reward based on its current chosen action and update\n the Q table using the reward recieved and the exploring future reward\n based on the resulting state due to the chosen action\n \"\"\"\n reward = 0\n if (game_over):\n win_value = game_logic.get_winner()\n if win_value == 0:\n reward = 0.5\n elif win_value == self.coin_type:\n reward = 1\n else:\n reward = -2\n \n prev_state = board.get_prev_state()\n prev = self.getQ(prev_state, chosen_action) \n result_state = board.get_state()\n qnew = self.getQ(result_state, chosen_action)\n self.q[(prev_state, chosen_action)] = prev + self.alpha * ((reward + self.gamma*qnew) - prev)\n\nclass MiniMaxPlayer(Player):\n def __init__(self, coin_type):\n Player.__init__(self, coin_type) # coin type is 1 or 2\n self.EMPTY = 0\n self.AI_PIECE = self.coin_type\n if self.AI_PIECE == 1:\n self.PLAYER_PIECE = 2\n else:\n self.PLAYER_PIECE = 1\n \n def evaluate_window(self, window, piece):\n score = 0\n opp_piece = self.PLAYER_PIECE \n if piece == self.PLAYER_PIECE:\n opp_piece = self.AI_PIECE\n if window.count(piece) == 4:\n score += 100\n elif window.count(piece) == 3 and window.count(self.EMPTY) == 1:\n score += 5\n elif window.count(piece) == 2 and window.count(self.EMPTY) == 2:\n score += 2\n if window.count(opp_piece) == 3 and window.count(self.EMPTY) == 1:\n score -= 4\n return score \n\n def score_position(self, board, piece):\n (board_n_rows, board_n_cols) = board.get_dimensions()\n board_state = np.asarray(board.get_state()) # convert tuple into numpy array\n n_in_a_row = board.get_n_in_a_row()\n score = 0\n ## Score center column\n center_array = [int(i) for i in list(board_state[:, board_n_cols//2])]\n center_count = center_array.count(piece)\n score += center_count * 3\n\n ## Score Horizontal\n for r in range(board_n_rows):\n row_array = [int(i) for i in list(board_state[r,:])]\n for c in range(board_n_cols-3):\n window = row_array[c:c+n_in_a_row]\n score += self.evaluate_window(window, piece)\n\n ## Score Vertical\n for c in range(board_n_cols):\n col_array = [int(i) for i in list(board_state[:,c])]\n for r in range(board_n_rows-3):\n window = col_array[r:r+n_in_a_row]\n score += self.evaluate_window(window, piece)\n\n ## Score positive sloped diagonal\n for r in range(board_n_rows-3):\n for c in range(board_n_cols-3):\n window = [board_state[r+i][c+i] for i in range(n_in_a_row)]\n score += self.evaluate_window(window, piece)\n\n for r in range(board_n_rows-3):\n for c in range(board_n_cols-3):\n window = [board_state[r+3-i][c+i] for i in range(n_in_a_row)]\n score += self.evaluate_window(window, piece)\n return score \n\n def choose_action(self, state, actions, coin, board, game_logic, background):\n minmax_algo = self.minmax(actions, coin, board, 5, -math.inf, math.inf, True, game_logic, background)\n next_action = minmax_algo[0]\n if next_action == None:\n return random.choice(actions)\n return next_action\n \n def minmax(self, actions, coin, board, depth, alpha, beta, maximizingPlayer, game_logic, background):\n AI_PIECE = self.AI_PIECE\n PLAYER_PIECE = self.PLAYER_PIECE \n valid_locations = actions\n \n is_terminal = board.is_terminal_node(PLAYER_PIECE, AI_PIECE)\n if depth == 0 or is_terminal:\n if is_terminal:\n if board.is_winning_move(AI_PIECE):\n return (None, 100000000000000)\n elif board.is_winning_move(PLAYER_PIECE):\n return (None, -10000000000000)\n else: # Game is over, no more valid moves\n return (None, 0)\n else: # Depth is zero\n value = self.score_position(board, AI_PIECE)\n return (None, value)\n \n if maximizingPlayer:\n value = -math.inf\n column = random.choice(valid_locations)\n for col in valid_locations:\n row = board.determine_row_to_insert(col)\n b_copy = board.copy()\n b_copy.drop_piece(row, col, AI_PIECE)\n new_score = self.minmax(actions, coin, b_copy, depth-1, alpha, beta, False, game_logic, background)[1]\n if new_score > value:\n value = new_score\n column = col\n alpha = max(alpha, value)\n if alpha >= beta:\n break\n return (column, value)\n\n else: # Minimizing player\n value = math.inf\n column = random.choice(valid_locations)\n for col in valid_locations:\n row = board.determine_row_to_insert(col)\n b_copy = board.copy()\n b_copy.drop_piece(row, col, PLAYER_PIECE)\n new_score = self.minmax(actions, coin, b_copy, depth-1, alpha, beta, True, game_logic, background)[1]\n if new_score < value:\n value = new_score\n column = col\n beta = min(beta, value)\n if alpha >= beta:\n break\n return (column, value)\n \n def learn(self, board, actions, chosen_action, game_over, game_logic):\n \"\"\"\n A method to make a move and update any learning parameters if any\n \"\"\"\n pass \n\n# Adapted from: http://mcts.ai/code/python.html by Christopher Yong\n# https://replit.com/talk/challenge/Connect-4-AI-using-Monte-Carlo-Tree-Search/10640\n# https://jyopari.github.io/MCTS\n\nclass Node:\n def __init__(self, piece, board, parent=None, move=None, exploration_coeff=1):\n self.board = board.copy()\n self.parent = parent\n self.move = move\n self.untriedMoves = board.get_available_actions()\n self.childNodes = []\n self.wins = 0\n self.visits = 0\n self.player = piece \n self.exploration_coeff = exploration_coeff\n \n # return child with largest UCT value\n def selection(self):\n # Upper Confidence bounds applied to Trees\n # uct = Xj + sqrt(In(N)/Nj)\n # Xj is the win ratio for a child node\n # N is the number of times the parent node has been visited\n # Nj is the number of times the child node has been visited.\n # Xj represents exploitation, as it is a large value when the win rate is high\n # Second term represents exploration, as it is large when the number of visits for that node have been low.\n uct = lambda x: x.wins / x.visits + self.exploration_coeff * np.sqrt(2 * np.log(self.visits) / x.visits)\n return sorted(self.childNodes, key=uct)[-1]\n\n # return child when move is taken\n # remove move from current node\n def expand(self, move, board, exploration_coeff=1):\n child = Node(piece=board.prev_player, \n board=board,\n parent=self,\n move=move,\n exploration_coeff=exploration_coeff)\n self.untriedMoves.remove(move)\n self.childNodes.append(child)\n return child\n\n def update(self, result):\n self.wins += result\n self.visits += 1\n\nclass MonteCarloPlayer(Player):\n \"\"\"A class that represents an AI using montecarlo algorithm\"\"\"\n \n def __init__(self, coin_type, exploration_coeff):\n \"\"\"\n Initialize a montecarlo player with coin type\n \"\"\"\n self.currentNode = None\n Player.__init__(self, coin_type)\n self.cur_player = self.coin_type\n self.prev_player = 2 if self.cur_player == 1 else 1 \n self.exploration_coeff=exploration_coeff \n \n def choose_action(self, state, actions, coin, board, game_logic, background):\n board.prev_player = self.prev_player\n board.current_player = self.cur_player\n\n self.currentNode = Node(piece=board.prev_player,board=board, exploration_coeff=self.exploration_coeff)\n return self.mcts(actions, board, 20000, self.currentNode, coin, game_logic, background, 5)\n\n def mcts(self, actions, board, itermax, currentNode, coin, game_logic, background, timeout=5):\n rootnode = Node(piece=board.prev_player,board=board, exploration_coeff=self.exploration_coeff)\n if currentNode is not None: rootnode = currentNode\n\n start = time.perf_counter()\n for i in range(itermax):\n node = rootnode\n state = board.copy()\n \n # selection\n while node.untriedMoves == [] and node.childNodes != []:\n # keep going down the tree based on best UCT values until terminal or unexpanded node\n \n node = node.selection()\n row = state.determine_row_to_insert(node.move)\n state.drop_piece(row, node.move, state.current_player)\n\n # expand\n if node.untriedMoves != []:\n col = random.choice(node.untriedMoves)\n row = state.determine_row_to_insert(col)\n state.drop_piece(row, col, state.current_player)\n node = node.expand(col, state, self.exploration_coeff)\n\n # rollout\n while state.get_available_actions():\n col = random.choice(state.get_available_actions())\n row = state.determine_row_to_insert(col)\n state.drop_piece(row, col, state.current_player)\n if state.is_winning_move(state.prev_player):\n break \n\n # backpropagate\n while node is not None:\n node.update(self.result(actions, state, node.player))\n node = node.parent\n\n duration = time.perf_counter() - start\n if duration > timeout: break\n\n win_ratio = lambda x: x.wins / x.visits\n sortedChildNodes = sorted(rootnode.childNodes, key=win_ratio)[::-1]\n return sortedChildNodes[0].move\n\n def result(self, actions, board, piece):\n opp_player = 2 if piece == 1 else 1 \n if board.is_winning_move(piece): # player wins\n return 1\n elif board.is_winning_move(opp_player): # opponent wins\n return 0\n elif len(board.get_available_actions()) == 0: # draw\n return 0.5\n \n def learn(self, board, actions, chosen_action, game_over, game_logic):\n \"\"\"\n A method to make a move and update any learning parameters if any\n \"\"\"\n pass","repo_name":"Team-Equality-RL-Project/connect-4","sub_path":"src/Connect4_RLPlayers.py","file_name":"Connect4_RLPlayers.py","file_ext":"py","file_size_in_byte":17477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43409891147","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\nimport os\nimport sys\n\n\ndef cwd(p=\"-L\"):\n cmd = \"pwd {}\".format(p)\n value = os.popen(cmd).read().strip('\\n')\n return value\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"param\", help=\"\", nargs=\"?\")\nparser.add_argument(\"-p\", \"--parent\", action=\"store_true\", help=\"cd to the parent directory of the specified item\")\nparser.add_argument(\"-P\", \"--physical\", action=\"store_true\", help=\"cd through physical directory structure\")\nargs = parser.parse_args()\n\nparam = args.param\ntry:\n param = int(param)\nexcept (ValueError, TypeError):\n pass\n\n\np = \"-P\" if args.physical else \"-L\"\norig_dir = cwd(p)\n\n\nif param is None:\n param = os.path.basename(orig_dir)\n\ntarget = None\nif isinstance(param, int):\n if param > 0:\n target = os.path.normpath(os.path.join(orig_dir, \"../\"*param))\n\n\nif target is None:\n SEARCHDIRS = os.getenv(\"UPDIRS\")\n if SEARCHDIRS:\n SEARCHDIRS = SEARCHDIRS.split(os.pathsep)\n for S in SEARCHDIRS:\n searchdir = os.path.expanduser(os.path.expandvars(S))\n check_path = os.path.join(searchdir, param)\n if os.path.exists(check_path):\n if os.path.isdir(check_path) and not args.parent:\n target = check_path\n else:\n target = searchdir\n\n\nif target is None:\n currdir = orig_dir\n while os.path.dirname(currdir) != currdir: # check if its root\n check_path = os.path.join(currdir, param)\n if os.path.exists(check_path):\n if os.path.isdir(check_path) and not args.parent:\n target = check_path\n else:\n target = currdir\n break\n else:\n currdir = os.path.normpath(os.path.join(currdir, \"..\"))\n\n\nif target is None:\n exit(101)\n\n\nprint(target)","repo_name":"DanChianucci/dotfiles","sub_path":"scripts/up.py","file_name":"up.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41343441400","text":"\"\"\"\nInstantce: Instantly recognize and replace objects with instances!\nAuthors: Jérôme Stephan & Darby Edelen\n\nPossible future TODO:\n - Speed up tree / list view fns by using deques instead of lists\n\"\"\"\nPLUGIN_ID = 1061542\n\nfrom typing import Union\nimport c4d\nimport os\nimport sys\nimport webbrowser\nimport random\nimport time\nfrom collections import defaultdict\nimport typing\n\nfrom c4d import Vector\n\ndoc: c4d.documents.BaseDocument = None # The currently active document.\nop: typing.Optional[c4d.BaseObject] # The selected object within that active document. Can be None.\n\nclass InstanceFinder:\n def __init__(self, objects, consider_dict, precision = 3, samples = 100, seed = 12345, reportBack = None, doc = c4d.documents.GetActiveDocument()):\n self.doc = doc\n self.reportBack = reportBack\n self.consider = consider_dict\n self.poly_objs = objects\n self.poly_objs_count = len(objects)\n self.precision = precision\n self.samples = samples\n self.seed = seed\n self.instance_groups = defaultdict(list)\n\n def get_sample_pts(self, obj, count):\n total_num = obj.GetPointCount()\n count = min(count, total_num)\n \n if count < total_num / 3:\n return self.sample_pts_a(obj, count, total_num)\n else:\n return self.sample_pts_b(obj, count)\n\n\n def sample_pts_a(self, obj, count, total_num):\n \n random.seed(123456)\n sample_indices = random.sample(range(total_num), count)\n \n return (obj.GetPoint(i) for i in sample_indices)\n\n def sample_pts_b(self, obj, count):\n all_points = obj.GetAllPoints()\n \n random.seed(123456)\n samples = random.sample(all_points, count)\n\n return samples\n\n def convert_vector(self, vec):\n # The point position comparison appears to be somewhat sensitive\n # to different levels of precision (decimals). I'd like to come up\n # with a more consistent way to prepare floats for equality comparison.\n\n x = round(vec.x, self.precision)\n y = round(vec.y, self.precision)\n z = round(vec.z, self.precision)\n return (x,y,z)\n\n def iterate_hierarchy(self, op, type=c4d.BaseObject):\n while op:\n if isinstance(op, type):\n self.poly_obj_count += 1\n yield op\n if op.GetDown():\n op = op.GetDown()\n continue\n while not op.GetNext() and op.GetUp():\n op = op.GetUp()\n op = op.GetNext()\n\n def _calculate_relative_matrix(self, obj):\n # try catch for when the object has no polygons\n try:\n poly = obj.GetPolygon(0)\n except IndexError:\n print(f\"Object {obj.GetName()} has no polygons, aborting.\")\n return None\n\n off = obj.GetPoint(poly.a)\n v2 = obj.GetPoint(poly.b) - off\n scale = v2.GetLength()\n v2.Normalize()\n v3 = (v2 % (obj.GetPoint(poly.c) - off)).GetNormalized()\n v1 = v2 % v3\n v1 *= scale\n v2 *= scale\n v3 *= scale\n return c4d.Matrix(off, v1, v2, v3);\n\n def _hash_base_container(self, bc):\n def traverse_bc(bc):\n ignore_keys = [1011, 1012, 1013] if bc[1004] == 6 else [] # Ignore texture positions if tag is set to UVW\n for key, data in bc:\n if key in ignore_keys:\n continue\n if type(data) == c4d.BaseContainer:\n yield from traverse_bc(data)\n else:\n yield data\n\n return hash(tuple(traverse_bc(bc)))\n\n def _hash_tag(self, tag, index = 0):\n if self.consider['materials'] and tag.GetType() == c4d.Ttexture:\n mat = tag.GetMaterial()\n\n if mat:\n bc = tag.GetData()\n # print(self._hash_base_container(bc))\n poly_select = bc.GetString(c4d.TEXTURETAG_RESTRICTION)\n\n if poly_select:\n poly_select_tags = [s for s in tag.GetObject().GetTags() if s.GetName() == poly_select]\n\n if poly_select_tags:\n poly_select_tag = poly_select_tags[0]\n obj = poly_select_tag.GetObject()\n\n return hash(\n index +\n hash(mat) +\n hash(tuple(poly_select_tag.GetBaseSelect().GetAll(obj.GetPolygonCount()))) +\n self._hash_base_container(bc)\n )\n return hash(index + hash(mat) + self._hash_base_container(bc))\n\n if self.consider['normals'] and tag.GetType() in (c4d.Tphong, c4d.Tnormal):\n if not tag.GetObject().GetTag(c4d.Tnormal):\n # Hash Phong tag because there are no Normal tags\n return self._hash_base_container(tag.GetData())\n\n # Hash Normal tag\n return hash(tag.GetLowlevelDataAddressR())\n\n if self.consider['uvs'] and tag.GetType() == c4d.Tuvw:\n return hash(tag.GetLowlevelDataAddressR())\n\n\n def _calculate_hash(self, obj):\n \"\"\"\n This function returns a unique hash for unique c4d.PolygonObjects\n \"\"\"\n\n # Point Count is one measure of uniqueness\n point_count = obj.GetPointCount()\n\n # Poly Count is another measure of uniqueness\n poly_count = obj.GetPolygonCount()\n\n # Calculating the local point positions is challenging, but a good indicator of uniqueness\n mg = self._calculate_relative_matrix(obj)\n if not mg:\n return None\n pts = frozenset(self.convert_vector(pt * ~mg) for pt in self.get_sample_pts(obj, self.samples))\n\n # UVs are another measure of uniqueness\n # uvs = obj.GetTag(c4d.Tuvw).GetLowlevelDataAddressR() if self.consider[\"uvs\"] else None\n\n #Tags should be the same as well\n tags = frozenset(self._hash_tag(tag, i) for i, tag in enumerate(obj.GetTags()))\n\n # Hash as many or as few measures as you like together\n instance_ident = hash(hash(point_count) + hash(poly_count) + hash(pts) + hash(tags))\n material_tags = [tag for tag in obj.GetTags() if tag.GetType() == c4d.Ttexture]\n self.instance_groups[instance_ident].append({\"obj\": obj, \"mg\": mg, \"mat_tags\": material_tags, \"hash\": instance_ident, \"opened\": True})\n\n return instance_ident\n\n def build_instance_dict(self):\n total_num = self.poly_objs_count\n for i, obj in enumerate(self.poly_objs):\n self._calculate_hash(obj)\n if self.reportBack:\n self.reportBack.UpdateProgressBar(percent=int((i+1)*100/total_num), col=None)\n if self.reportBack:\n self.reportBack.StopProgressBar()\n\n\n def create_instances(self):\n if not self.instance_groups:\n self.build_instance_dict()\n\n count = 0\n total_num = self.poly_objs_count - len(self.instance_groups)\n\n self.doc.StartUndo()\n\n for instance_grp in self.instance_groups.values():\n instance_grp.reverse()\n element = instance_grp.pop()\n ref_obj = element[\"obj\"]\n ref_mtx = element[\"mg\"]\n ref_materials = element[\"mat_tags\"]\n\n if not self.consider[\"materials\"]:\n ref_parent = c4d.BaseObject(c4d.Onull)\n self.doc.InsertObject(ref_parent, pred = ref_obj)\n self.doc.AddUndo(c4d.UNDOTYPE_NEWOBJ, ref_parent)\n ref_parent.SetMg(ref_obj.GetMg())\n ref_parent.SetName(f\"{ref_obj.GetName()}_parent\")\n self.doc.AddUndo(c4d.UNDOTYPE_DELETEOBJ, ref_obj)\n ref_obj.Remove()\n self.doc.InsertObject(ref_obj, parent = ref_parent)\n self.doc.AddUndo(c4d.UNDOTYPE_NEWOBJ, ref_obj)\n ref_obj.SetMl(c4d.Matrix())\n\n for material in ref_materials:\n self.doc.AddUndo(c4d.UNDOTYPE_DELETEOBJ, material)\n material.Remove()\n ref_parent.InsertTag(material)\n\n for element in instance_grp:\n obj = element[\"obj\"]\n mtx = element[\"mg\"]\n materials = element[\"mat_tags\"]\n\n instance_obj = c4d.InstanceObject()\n if instance_obj is None:\n raise RuntimeError(\"Failed to create an instance object.\")\n instance_obj.SetReferenceObject(ref_obj)\n instance_obj.SetMl(obj.GetMl() * mtx * ~ref_mtx)\n instance_obj.SetName(obj.GetName())\n instance_obj[c4d.INSTANCEOBJECT_RENDERINSTANCE_MODE] = c4d.INSTANCEOBJECT_RENDERINSTANCE_MODE_SINGLEINSTANCE\n\n if not self.consider[\"materials\"]:\n for material in materials:\n self.doc.AddUndo(c4d.UNDOTYPE_DELETEOBJ, material)\n material.Remove()\n instance_obj.InsertTag(material)\n\n self.doc.InsertObject(instance_obj, pred = obj)\n self.doc.AddUndo(c4d.UNDOTYPE_NEWOBJ, instance_obj)\n self.doc.AddUndo(c4d.UNDOTYPE_DELETEOBJ, obj)\n obj.Remove()\n count += 1\n if self.reportBack:\n self.reportBack.UpdateProgressBar(percent=int((count)*100/total_num), col=None)\n\n if self.reportBack:\n self.reportBack.StopProgressBar()\n\n self.doc.EndUndo()\n\n return True\n\n# Colors\nBG_DARK = c4d.Vector(0.13, 0.13, 0.13)\nBG_DARKER = c4d.Vector(0.11, 0.11, 0.11)\nDARK_BLUE_TEXT_COL = c4d.Vector(0, 0.78125, 0.99609375)\nACCENT_COL = c4d.Vector(1, 0.337, 0)\nACCENT_COL_C4D = c4d.Vector(.36, 0.38, .65)\n\n# ---------------------------------------------------------------------\n# Creating GUI Instance Functions UI Elements Operations \n# Hepler Methods. \n# ---------------------------------------------------------------------\n\n#----------------------------------------------------------------------\n# TreeViewFunctions Class\n#----------------------------------------------------------------------\n\nclass InstanceListFns(c4d.gui.TreeViewFunctions):\n def GetBackgroundColor(self, root: object, userdata: object, obj: object, line: int, col: int | Vector) -> int | Vector:\n return BG_DARKER if line % 2 else BG_DARK\n def EmptyText(self, root: object, userdata: object) -> str:\n return \"Add objects by dragging them here, \\nor opening Instantce! with objects selected.\"\n def GetFirst(self, root, userdata):\n return root[0] if root else None\n return root.GetFirstObject()\n def GetDown(self, root, userdata, obj):\n return None\n return obj.GetDown()\n def GetNext(self, root, userdata, obj):\n currentObjIndex = root.index(obj)\n return root[currentObjIndex+1] if currentObjIndex+1 < len(root) else None\n def GetPred(self, root, userData, item):\n \"\"\"\n Gets the predecessor item for #item in #data.\n \"\"\"\n i: int = root.index(item)\n return root[i-1] if (i - 1) >= 0 else None\n def IsOpened(self, root, userdata, obj):\n return obj.GetBit(c4d.BIT_OFOLD)\n def Open(self, root, userdata, obj, onoff):\n if onoff:\n obj.SetBit(c4d.BIT_OFOLD)\n else:\n obj.DelBit(c4d.BIT_OFOLD)\n def IsSelected(self, root, userdata, obj):\n return obj.GetBit(c4d.BIT_ACTIVE)\n def Select(self, root, userdata, obj, mode):\n if mode == c4d.SELECTION_NEW:\n obj.SetBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj, c4d.SELECTION_NEW)\n elif mode == c4d.SELECTION_ADD:\n obj.SetBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj, c4d.SELECTION_ADD)\n else:\n obj.DelBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj, c4d.SELECTION_SUB)\n c4d.EventAdd()\n def GetID(self, root, userdata, obj):\n return obj.GetGUID()\n def GetName(self, root, userdata, obj):\n return obj.GetName()\n def GetDragType(self, root, userdata, obj):\n return c4d.DRAGTYPE_ATOMARRAY\n def AcceptDragObject(self, root, userdata, obj, dragtype, dragobject):\n if dragtype == c4d.DRAGTYPE_ATOMARRAY:\n return c4d.INSERT_UNDER, False\n return 0\n def InsertObject(self, root, userData, item, dragType: int, dragData: any, insertMode: int, doCopy: bool) -> None:\n \"\"\"\n Called by Cinema 4D once a drag event has finished which before has been indicated as valid by #AcceptDragObject.\n \"\"\"\n new_items = [item for item in dragData if item not in root and item.GetType() == c4d.Opolygon]\n root.extend(new_items)\n def DeletePressed(self, root: object, userdata: object) -> None:\n for element in reversed(root):\n if self.IsSelected(root, userdata, element):\n root.remove(element)\n\nclass InstanceTreeFns(c4d.gui.TreeViewFunctions):\n def GetBackgroundColor(self, root: object, userdata: object, obj: object, line: int, col: int | Vector) -> int | Vector:\n return BG_DARKER if line % 2 else BG_DARK\n def GetFirst(self, root, userdata):\n return root[list(root.keys())[0]][0]\n return root[0] if root else None\n def GetDown(self, root, userdata, obj):\n if root[obj[\"hash\"]].index(obj) == 0:\n return root[obj[\"hash\"]][1] if len(root[obj[\"hash\"]]) > 1 else None\n return None\n def GetNext(self, root, userdata, obj):\n if root[obj[\"hash\"]].index(obj) == 0:\n key_list = list(root.keys())\n return root[key_list[key_list.index(obj[\"hash\"])+1]][0] if key_list.index(obj[\"hash\"])+1 < len(key_list) else None\n else:\n currentObjIndex = root[obj[\"hash\"]].index(obj)\n return root[obj[\"hash\"]][currentObjIndex+1] if currentObjIndex+1 < len(root[obj[\"hash\"]]) else None\n def IsOpened(self, root, userdata, obj):\n return obj[\"opened\"]\n def Open(self, root, userdata, obj, onoff):\n if onoff:\n obj[\"opened\"] = True\n else:\n obj[\"opened\"] = False\n def IsSelected(self, root, userdata, obj):\n return obj[\"obj\"].GetBit(c4d.BIT_ACTIVE)\n def Select(self, root, userdata, obj, mode):\n if mode == c4d.SELECTION_NEW:\n obj[\"obj\"].SetBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj[\"obj\"], c4d.SELECTION_NEW)\n elif mode == c4d.SELECTION_ADD:\n obj[\"obj\"].SetBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj[\"obj\"], c4d.SELECTION_ADD)\n else:\n obj[\"obj\"].DelBit(c4d.BIT_ACTIVE)\n doc.SetActiveObject(obj[\"obj\"], c4d.SELECTION_SUB)\n c4d.EventAdd()\n def GetID(self, root, userdata, obj):\n return obj[\"obj\"].GetGUID()\n def GetName(self, root, userdata, obj):\n return obj[\"obj\"].GetName()\n\n####################################################################################################\n## ##\n## UI ##\n## ##\n####################################################################################################\n\nVERSION_NUMBER = \" v1.0 \"\nABOUT_TEXT_COPYRIGHT = \"©2023 by Jérôme Stephan & Darby Edelen\"\nABOUT_TEXT_WEBSITE = \"https://jeromestephan.de\"\nABOUT_LINK_README = \"https://jeromestephan.gumroad.com/l/Instantce?layout=profile\"\nABOUT_SUPPORT = \"https://jeromestephan.gumroad.com/\"\n\nGROUP_BORDER_SPACE = 6\nGROUP_BORDER_SPACE_SM = GROUP_BORDER_SPACE - 2\n\nID_LINK_ABOUT = 11000\nID_LINK_README = 11001\nID_AUTHOR_TEXT = 11002\nID_LINK_WEBSITE = 11003\nID_SUPPORT_ME = 11004\nID_VERSION_NUMBER = 11005\n\nID_INEXCLUDE_LIST = 10000\nID_EXTRACT_BTN = 10001\nID_PROCESS_BTN = 10002\n\nID_PROGRESSBAR = 10100\nID_PROGRESSBAR_TEXT = 10101\n\nID_PRECISION = 10200\nID_SAMPLES = 10201\nID_SEED = 10202\nID_BLIND_MODE = 10203\n\nID_CONSIDER_TAGORDER = 10301\nID_CONSIDER_MATERIALS = 10302\nID_CONSIDER_NORMALS = 10303\nID_CONSIDER_OTHERTAGS = 10304\nID_CONSIDER_UVS = 10305\n\nID_BLANK = 101010\n\nclass MainDialog(c4d.gui.GeDialog):\n\n def AddTreeView(self, w_size, h_size):\n bc_IEsettings = c4d.BaseContainer()\n bc_IEsettings.SetData(c4d.TREEVIEW_OUTSIDE_DROP, True)\n bc_IEsettings.SetData(c4d.TREEVIEW_ALTERNATE_BG, True)\n self._treeView = self.AddCustomGui(ID_INEXCLUDE_LIST, c4d.CUSTOMGUI_TREEVIEW, \"\", c4d.BFH_SCALEFIT|c4d.BFV_SCALEFIT, w_size, h_size, bc_IEsettings)\n tree_settings = c4d.BaseContainer()\n tree_settings.SetInt32(0, c4d.LV_TREE)\n self._treeView.SetLayout(1, tree_settings)\n return True\n \n def UpdateTreeView(self, root, treeViewFns):\n self._treeView.SetRoot(root, treeViewFns, None)\n self._treeView.Refresh()\n return True\n \n def Extract(self, instance_args):\n if self._listViewRoot:\n start = time.perf_counter()\n blind = instance_args[\"blind\"]\n reportBack = None if blind else self\n self._instanceFinder = InstanceFinder(self._listViewRoot, \n consider_dict = instance_args[\"consider\"],\n precision = instance_args[\"precision\"], \n samples = instance_args[\"samples\"], \n seed = instance_args[\"seed\"],\n reportBack = reportBack,\n doc = doc)\n self._instanceFinder.build_instance_dict()\n instance_count = len(self._instanceFinder.instance_groups)\n total_count = self._instanceFinder.poly_objs_count\n\n duration = time.perf_counter() - start\n if instance_count > 0:\n print(f\"Recognized {total_count - instance_count} objects with instances in {duration:.03} seconds ({((total_count - instance_count)/duration):.02f} objects / second). Remaining objects: {instance_count}\")\n self.UpdateTreeView(self._instanceFinder.instance_groups, InstanceTreeFns())\n c4d.EventAdd()\n else: \n print(\"No Objects in the List\")\n return True\n \n def ClearExtraction(self):\n self._instanceFinder = None\n self.UpdateTreeView(self._listViewRoot, self._listViewFns)\n return True\n\n def Process(self):\n if self._instanceFinder:\n start = time.perf_counter()\n self._instanceFinder.create_instances()\n instance_count = len(self._instanceFinder.instance_groups)\n total_count = self._instanceFinder.poly_objs_count\n\n duration = time.perf_counter() - start\n if instance_count > 0:\n print(f\"Replaced {total_count - instance_count} objects with instances in {duration:.03} seconds ({((total_count - instance_count)/duration):.02f} objects / second). Remaining objects: {instance_count}\")\n self._listViewRoot = []\n self.UpdateTreeView(self._listViewRoot, self._listViewFns)\n c4d.EventAdd()\n else:\n print(\"No Instances extracted yet\")\n return False\n return True\n \n def AddProgressBar(self, w_size, h_size):\n self.GroupBegin(0, c4d.BFH_SCALEFIT, 0, 1) \n self.GroupBorderNoTitle(c4d.BORDER_THIN_IN)\n self.AddCustomGui(ID_PROGRESSBAR, c4d.CUSTOMGUI_PROGRESSBAR, \"\", c4d.BFH_SCALEFIT|c4d.BFV_SCALEFIT, w_size, h_size)\n self.AddSeparatorV(0, c4d.BFV_SCALEFIT)\n self.AddStaticText(ID_PROGRESSBAR_TEXT, c4d.BFH_MASK, 50, h_size, \"\", c4d.BORDER_WITH_TITLE_BOLD) \n self.GroupEnd()\n return True\n \n def UpdateProgressBar(self, percent, col):\n progressMsg = c4d.BaseContainer(c4d.BFM_SETSTATUSBAR)\n progressMsg[c4d.BFM_STATUSBAR_PROGRESSON] = True\n progressMsg[c4d.BFM_STATUSBAR_PROGRESS] = percent/100.0 \n # this if you want a custom color\n if col:\n self.SetDefaultColor(ID_PROGRESSBAR, c4d.COLOR_PROGRESSBAR, col) \n self.SendMessage(ID_PROGRESSBAR, progressMsg)\n self.SetString(ID_PROGRESSBAR_TEXT, str(int(percent))+\"%\")\n return True\n \n def StopProgressBar(self):\n progressMsg = c4d.BaseContainer(c4d.BFM_SETSTATUSBAR)\n progressMsg.SetBool(c4d.BFM_STATUSBAR_PROGRESSON, False)\n self.SendMessage(ID_PROGRESSBAR, progressMsg)\n return True\n \n # ====================================== # \n # Main GeDialog Class Overrides\n # ====================================== #\n def __init__(self):\n \"\"\"\n The __init__ is an Constuctor and help get \n and passes data on from the another class.\n \"\"\" \n self._instanceFinder: InstanceFinder | None = None \n self._treeView: c4d.gui.TreeViewCustomGui | None = None\n self._listViewFns = InstanceListFns()\n self._treeViewFns = InstanceTreeFns()\n self._listViewRoot = None\n self._treeViewRoot = None\n self.extracted = False\n # super(Tool_WindowDialog, self).__init__()\n\n # UI Layout\n def CreateLayout(self):\n # Dialog Title\n self.SetTitle(\"Instantce!\")\n \n self.MenuSubBegin(\"About\")\n self.MenuAddString(ID_LINK_ABOUT, \"About\")\n self.MenuAddString(ID_LINK_README, \"Readme\")\n self.MenuSubEnd()\n \n self.MenuSubBegin(\"Support this project & me!\")\n self.MenuAddString(ID_SUPPORT_ME, \"Support this & other projects (& me) on Gumroad!\")\n self.MenuSubEnd()\n self.MenuFinished()\n \n # Top Menu addinng Tool Version\n self.GroupBeginInMenuLine()\n self.AddStaticText(ID_VERSION_NUMBER, 0)\n self.SetString(ID_VERSION_NUMBER, VERSION_NUMBER)\n self.GroupEnd() \n \n # self.GroupBegin(self.IDS_OverallGrp, c4d.BFH_SCALEFIT, 1, 0, \"\") # Overall Group.\n \n # Static UI Text\n # self.AddStaticText(self.IDS_StaticText, c4d.BFH_CENTER, 0, 15, \"Instantce Demo\", c4d.BORDER_WITH_TITLE_BOLD)\n \n # self.AddSeparatorH(0, c4d.BFH_SCALEFIT) # Line Separator / eg: self.AddSeparatorH(0, c4d.BFH_MASK) and AddSeparatorV \n\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT | c4d.BFV_SCALEFIT, 2, 0, \"\") \n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n \n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT | c4d.BFV_SCALEFIT, 1, 0, \"\") \n self.AddStaticText(ID_BLANK, c4d.BFH_LEFT, 0, 15, \" Objects :\", c4d.BORDER_WITH_TITLE_BOLD)\n self.AddTreeView(w_size=500, h_size=300)\n self.GroupEnd() \n\n # self.AddSeparatorV(0, c4d.BFV_SCALEFIT)\n \n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT|c4d.BFV_TOP, 1, 0, \"\")\n self.AddStaticText(ID_BLANK, c4d.BFH_LEFT, 0, 15, \" Settings :\", c4d.BORDER_WITH_TITLE_BOLD)\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"Precision\")\n self.GroupBorder(c4d.BORDER_GROUP_IN)\n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE_SM, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n self.AddEditSlider(ID_PRECISION, c4d.BFH_SCALEFIT, 0, 0)\n self.GroupEnd()\n\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"Samples\")\n self.GroupBorder(c4d.BORDER_GROUP_IN)\n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE_SM, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n self.AddEditSlider(ID_SAMPLES, c4d.BFH_SCALEFIT, 0, 0)\n self.GroupEnd()\n\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"Sampling Seed\")\n self.GroupBorder(c4d.BORDER_GROUP_IN)\n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE_SM, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n self.AddEditSlider(ID_SEED, c4d.BFH_SCALEFIT, 0, 0)\n self.GroupEnd()\n\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"Consider\", cols=1)\n self.GroupBorder(c4d.BORDER_GROUP_IN)\n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE_SM, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n self.AddCheckbox(ID_CONSIDER_MATERIALS, c4d.BFH_SCALEFIT, 0, 0, \"Materials\")\n self.AddCheckbox(ID_CONSIDER_NORMALS, c4d.BFH_SCALEFIT, 0, 0, \"Normals\")\n self.AddCheckbox(ID_CONSIDER_UVS, c4d.BFH_SCALEFIT, 0, 0, \"UVs\")\n self.GroupEnd()\n\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT)\n self.GroupBorder(c4d.BORDER_GROUP_IN)\n self.GroupBorderSpace(GROUP_BORDER_SPACE, GROUP_BORDER_SPACE_SM, GROUP_BORDER_SPACE, GROUP_BORDER_SPACE)\n self.AddCheckbox(ID_BLIND_MODE, c4d.BFH_SCALEFIT, 0, 0, \"Blind Mode\")\n self.GroupEnd()\n\n self.GroupEnd() \n self.GroupEnd() # After this, we are in Overall group.\n \n self.AddSeparatorH(0, c4d.BFH_SCALEFIT)\n self.AddProgressBar(w_size=100, h_size=10)\n # self.AddSeparatorH(0, c4d.BFH_SCALEFIT)\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"\", cols = 2)\n self.AddButton(ID_EXTRACT_BTN, c4d.BFH_SCALEFIT, 0, 30, name=\"Extract Instances\") \n self.AddButton(ID_PROCESS_BTN, c4d.BFH_SCALEFIT, 0, 30, name=\"Instantce!\") \n self.GroupEnd()\n\n # self.AddSubDialog(ID_BLANK, c4d.BFV_SCALEFIT, 0, 0)\n self.GroupBegin(ID_BLANK, c4d.BFH_SCALEFIT, title=\"About\", cols = 2)\n self.AddSeparatorH(c4d.BFH_SCALEFIT)\n self.AddSeparatorH(c4d.BFH_SCALEFIT)\n self.AddSubDialog(ID_BLANK, c4d.BFH_SCALEFIT, 0, 0)\n self.AddStaticText(ID_AUTHOR_TEXT, c4d.BFH_RIGHT, 0, 0, ABOUT_TEXT_COPYRIGHT)\n # self.AddRadioText(ID_LINK_WEBSITE, c4d.BFH_FIT, 0, 0, ABOUT_TEXT_WEBSITE)\n self.GroupEnd()\n \n # self.AddSeparatorH(0, c4d.BFH_SCALEFIT)\n # self.GroupEnd() # End of the overall group. \n return True\n\n def InitValues(self):\n \"\"\" \n Called when the dialog is initialized by the GUI / GUI's startup values basically.\n \"\"\"\n global doc\n doc = c4d.documents.GetActiveDocument()\n self.SetDefaultColor(ID_INEXCLUDE_LIST, c4d.COLOR_BG, BG_DARKER)\n self.SetDefaultColor(ID_VERSION_NUMBER, c4d.COLOR_TEXT, ACCENT_COL_C4D)\n self.SetString(ID_PROGRESSBAR_TEXT, \"0%\")\n self.SetInt32(ID_PRECISION, 3, min=0, max=5, step=1, max2=10)\n self.SetInt32(ID_SAMPLES, 100, min=0, max=1000, step=1, max2=100000)\n self.SetInt32(ID_SEED, 12345, min=0, max=99999, step=1)\n\n # self.SetBool(ID_CONSIDER_TAGORDER, True)\n self.SetBool(ID_CONSIDER_NORMALS, True)\n self.SetBool(ID_CONSIDER_UVS, True)\n\n self.Enable(ID_PROCESS_BTN, False)\n\n self._listViewRoot = [obj for obj in doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_CHILDREN) if obj.GetType() == c4d.Opolygon] #c4d.GETACTIVEOBJECTFLAGS_SELECTIONORDER)]\n self.UpdateTreeView(self._listViewRoot, self._listViewFns)\n return True \n \n def Command(self, id, msg):\n \"\"\"\n This Method is called automatically when the user clicks on a gadget and/or changes its value this function will be called.\n It is also called when a string menu item is selected.\n :param messageId: The ID of the gadget that triggered the event.\n :param bc: The original message container\n :return: False if there was an error, otherwise True.\n \"\"\"\n if (id == ID_EXTRACT_BTN and not self.extracted):\n consider_dict = {\n \"materials\": self.GetBool(ID_CONSIDER_MATERIALS),\n \"normals\": self.GetBool(ID_CONSIDER_NORMALS),\n \"uvs\": self.GetBool(ID_CONSIDER_UVS),\n }\n instance_args = {\n \"precision\": self.GetBool(ID_PRECISION),\n \"samples\": self.GetBool(ID_SAMPLES),\n \"seed\": self.GetBool(ID_SEED),\n \"blind\": self.GetBool(ID_BLIND_MODE),\n \"consider\": consider_dict,\n }\n self.Extract(instance_args)\n self.extracted = True\n self.SetString(ID_EXTRACT_BTN, \"Clear Instances\")\n self.Enable(ID_PROCESS_BTN, True)\n \n elif (id == ID_EXTRACT_BTN and self.extracted):\n self.ClearExtraction()\n self.extracted = False\n self.SetString(ID_EXTRACT_BTN, \"Extract Instances\")\n self.Enable(ID_PROCESS_BTN, False)\n\n elif (id == ID_PROCESS_BTN):\n self.extracted = False\n self.SetString(ID_EXTRACT_BTN, \"Extract Instances\")\n self.Enable(ID_PROCESS_BTN, False)\n self.Process()\n\n\n\n \n elif id == ID_LINK_ABOUT:\n about_dlg = AboutDialog()\n about_dlg.Open(c4d.DLG_TYPE_MODAL, xpos=-2, ypos=-2)\n elif id == ID_LINK_README:\n webbrowser.open(ABOUT_LINK_README)\n elif id == ID_LINK_WEBSITE:\n webbrowser.open(ABOUT_TEXT_WEBSITE)\n elif id == ID_SUPPORT_ME:\n webbrowser.open(ABOUT_SUPPORT)\n \n return True\n \n\n def CoreMessage(self, id, msg):\n \"\"\"\n Override this function if you want to react to Cinema 4D core messages. \n The original message is stored in msg\n \"\"\" \n if id == c4d.EVMSG_CHANGE:\n pass\n return True\n\nclass AboutDialog(c4d.gui.GeDialog):\n def CreateLayout(self):\n self.SetTitle(\"About\")\n self.AddStaticText(ID_BLANK, c4d.BFH_CENTER, 0, 0, \"Instantce\")\n self.AddStaticText(ID_BLANK, c4d.BFH_CENTER, 0, 0, VERSION_NUMBER)\n self.AddStaticText(ID_BLANK, c4d.BFH_CENTER, 0, 0, \"Instantly recognize & replace identical objects with instances!\")\n self.AddSeparatorH(c4d.BFH_SCALEFIT)\n self.AddStaticText(ID_AUTHOR_TEXT, c4d.BFH_FIT, 0, 0, \"Authors:\\t\\tMarvin Jérôme Stephan & Darby Edelen\")\n self.AddRadioText(ID_SUPPORT_ME, c4d.BFH_FIT, 0, 0, \"Support me:\\t\" + ABOUT_SUPPORT)\n self.AddRadioText(ID_LINK_WEBSITE, c4d.BFH_FIT, 0, 0, \"Website:\\t\\t\" + ABOUT_TEXT_WEBSITE)\n return True\n \n def Command(self, mid, msg):\n if mid == ID_SUPPORT_ME:\n webbrowser.open(ABOUT_SUPPORT)\n elif mid == ID_LINK_WEBSITE:\n webbrowser.open(ABOUT_TEXT_WEBSITE)\n return True\n\n\nclass MainDialogCommand(c4d.plugins.CommandData):\n dlg = None\n def Execute(self, doc):\n if self.dlg is None:\n self.dlg = MainDialog()\n return self.dlg.Open(c4d.DLG_TYPE_ASYNC, pluginid=PLUGIN_ID, defaultw=0, defaulth=0, xpos=-2, ypos=-2)\n \n def RestoreLayout(self, sec_ref):\n if self.dlg is None:\n self.dlg = MainDialog()\n return self.dlg.Restore(pluginid=PLUGIN_ID, secret=sec_ref)\n\nif __name__=='__main__':\n directory, _ = os.path.split(__file__)\n icon = os.path.join(directory, \"res\", \"Instantce.tif\")\n bmp = c4d.bitmaps.BaseBitmap()\n if bmp.InitWith(icon)[0] != c4d.IMAGERESULT_OK:\n raise MemoryError(\"Failed to initialize the BaseBitmap.\")\n c4d.plugins.RegisterCommandPlugin(id=PLUGIN_ID, \n str=\"Instantce!\", \n info=0, \n help=\"Instantly recognize & replace identical objects with instances!\", \n dat=MainDialogCommand(), \n icon=bmp)","repo_name":"HerzogVonWiesel/Instantce","sub_path":"Instantce.pyp","file_name":"Instantce.pyp","file_ext":"pyp","file_size_in_byte":31642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36279352122","text":"# Command + B: Go to def.\n# Command + [, ]: Back, Forward\n# Alt + Space: Preview (이거 진짜 강추)\n# Command + Shift + F: Find all\n# Command + L: Go to line\n# Alt + Command + O: Go to symbol (Anything)\n# F3: Toggle bookmark\n# Alt + F3: Toggle custom bookmark (Numeric, Alphabet..)\n# Command + F3: Show bookmarks\n# Ctrl + R: Run\n# Ctrl + D: Debug\n# Command + R: Rerun\n# Shift + Command + R: Resume on debugging\n# Command + F2: Stop\n# Shift + F6: Rename\n# Command + F8: Toggle break point\n#option+shift+e\n\n\n##############################################\n#4/24 return과 break차이점\ndef stop_func(num):\n for i in range(1,num+1):\n print('숫자 {0}을 출력합니다'.format(i))\n if i == 5:\n retrun #리턴 뒤에 아무것도 안적어주면 함수 종료 #break랑 뭐가 다른가\nstop_func(10)\n\n#ex151)아래와 같이 숫자를 입력하고 함수를 실행하면 숫자가 세로로 출력되게 하시오\n#print_something(1,2,3,4,5) #얼마든지 숫자를 쓸 수 있어야함\n\ndef print_someting(*num_list):\n for i in num_list:\n print(i)\n\nprint_someting(1,2,3,4,5) #print쓰면 none하고 같이 출력/ 함수 쓰면 그냥 나옴\n\n\ndef factorial(num): #10\n if num > 1:\n return factorial(num-1) * num #10-1=9\n elif num == 1:\n return 1\n\nprint(factorial(5))\n\n\ndef mult(a, b):\n if b == 0:\n return 0\n rest = mult(a, b - 1)\n value = a + rest\n return value\nprint(\"3 * 2 = \", mult(3, 2))\n\n\n","repo_name":"misoniiii/PythonClass","sub_path":"i_wanna/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33934441465","text":"with open(\"input24.txt\") as f:\n content = f.readlines()\n\npart1 = 0\nblack_tiles = []\n\nfor cur_line in content:\n cur_line = cur_line.strip()\n line_index = 0\n cur_coord = [0, 0]\n while line_index < len(cur_line):\n if cur_line[line_index] == 'e':\n cur_coord[1] += 1\n line_index += 1\n continue\n if cur_line[line_index] == 'w':\n cur_coord[1] -= 1\n line_index += 1\n continue\n if cur_line[line_index] == 'n' and cur_line[line_index + 1] == 'e':\n cur_coord[0] += 1\n cur_coord[1] += 0.5\n line_index += 2\n continue\n if cur_line[line_index] == 'n' and cur_line[line_index + 1] == 'w':\n cur_coord[0] += 1\n cur_coord[1] -= 0.5\n line_index += 2\n continue\n if cur_line[line_index] == 's' and cur_line[line_index + 1] == 'e':\n cur_coord[0] -= 1\n cur_coord[1] += 0.5\n line_index += 2\n continue\n if cur_line[line_index] == 's' and cur_line[line_index + 1] == 'w':\n cur_coord[0] -= 1\n cur_coord[1] -= 0.5\n line_index += 2\n continue\n\n cur_coord = (cur_coord[0], cur_coord[1])\n if cur_coord in black_tiles:\n black_tiles.remove(cur_coord)\n else:\n black_tiles.append(cur_coord)\n\npart1 = len(black_tiles)\nprint(\"part 1 = \" + str(part1))\n\n\ndef count_adjacent(tile_coords):\n num_adjacent_black = 0\n adjacent_white = []\n directions = [(0, 1), (0, -1), (1, 0.5), (1, -0.5), (-1, 0.5), (-1, -0.5)]\n for direction in directions:\n adjacent_tile = (tile_coords[0] + direction[0], tile_coords[1] + direction[1])\n if adjacent_tile in black_tiles:\n num_adjacent_black += 1\n else:\n adjacent_white.append(adjacent_tile)\n return (num_adjacent_black, adjacent_white)\n\n\nfor move_num in range(100):\n new_black_tiles = []\n white_adjacent_to_old_black_tiles = []\n for black_tile in black_tiles:\n (adjacent_black_num, adjacent_white) = count_adjacent(black_tile)\n if adjacent_black_num in [1, 2]:\n new_black_tiles.append(black_tile)\n for white_tile in adjacent_white:\n (adjacent_black_num, _) = count_adjacent(white_tile)\n if adjacent_black_num == 2:\n new_black_tiles.append(white_tile)\n new_black_tiles = list(set(new_black_tiles))\n black_tiles = new_black_tiles\n\npart2 = len(black_tiles)\nprint(\"part 2 = \" + str(part2))\n","repo_name":"dzolotusky/advent-of-code","sub_path":"2020/24/24.2.py","file_name":"24.2.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36021034111","text":"class Matrix:\n def __init__(self):\n self.a_matrix = []\n def insertmatrix(self,row,col):\n for r in range(row):\n temp = []\n for c in range(col):\n print(\"r \",r,\" c \",c,end=\" \")\n temp.append(int(input()))\n self.a_matrix.append(temp)\n \n def colprediction(self,row,col):\n temp = []\n for c in range(col):\n temp.append(0)\n print(temp)\n for r in range(row):\n for c in range(col):\n if self.a_matrix[r][c] == 0:\n temp[c] = temp[c] + 1\n else:\n temp[c] = temp[c] + 0\n count = -1\n for c in range(col-1):\n if temp[c] > temp[c+1]:\n count = c\n elif temp[c] < temp[c+1]:\n count = c+1\n print(count)\n \nif __name__==\"__main__\":\n t = int(input(\"t \"))\n for i in range(t):\n m = Matrix()\n row = int(input(\"row \"))\n col = int(input(\"col \"))\n m.insertmatrix(row,col)\n m.colprediction(row,col)\n","repo_name":"aarjukhicher/geeksprogram","sub_path":"Matrix/col_prediction.py","file_name":"col_prediction.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"53930951986","text":"from pydub import AudioSegment\nimport os, re\n\n\ncount = 1 #start from 2 in for-loop\n\ndir = \"./Unit10\"\n\nprint(len(os.listdir(dir)))\n\nallSong = AudioSegment.from_mp3(dir + \"/1.mp3\")\nfiveSecBlank = AudioSegment.from_mp3(\"blank.mp3\") # 5 sec blank\nendListening = AudioSegment.from_mp3(\"end_listening.mp3\") # End of Listening\ndbAllSong = allSong.dBFS\n\nfor each in os.listdir(dir):\n\tcount = count + 1\n\tif count >= len(os.listdir(dir)) :\n\t\tbreak;\n\tfilename = dir + \"/\" + str(count) + \".mp3\"\n\tif filename:\n\t\tprint(\"Adding \" + filename)\n\t\tsong = AudioSegment.from_mp3(filename)\n\t\tdbSong = song.dBFS\n\t\tdbplus = dbAllSong - dbSong\n\t\tif dbplus < 0:\n\t\t\tdbAllSong+=abs(dbplus)\n\t\telif dbplus > 0:\n\t\t\tdbSong+=abs(dbplus)\n\t\tallSong = allSong + fiveSecBlank + song\n\nprint (\"Add Ending\")\nallSong = allSong + fiveSecBlank + endListening\n\nallSong.export(dir + \"/Unit1.mp3\", format=\"mp3\") #导出为MP3格式\nprint (\"End Generating\")\n\n\"\"\"\nfilename[0] += '.mp3'\nmp3 = AudioSegment.from_mp3(filename[0]) # 打开mp3文件\nmp3[17*1000+500:].export(filename[0], format=\"mp3\") # 切割前17.5秒并覆盖保存\n\n\nnPath = \"%s%s/%s\"%(enDir,file,enfile) #英文文件的路径\ncnPath = \"%s%s/%s\"%(cnDir,file,enfile.replace(\"en_w\",\"cn_w\"))#中文文件的路径\ntargetPath = \"%s%s/%s\"%(toDir,file,enfile.replace(\"en_w\",\"all\")) #合并文件的路径\n\n#加载MP3文件\nsong1 = AudioSegment.from_mp3(enPath)\nsong2 = AudioSegment.from_mp3(cnPath)\n \n#取得两个MP3文件的声音分贝\ndb1 = song1.dBFS\ndb2 = song2.dBFS\n \nsong1 = song1[300:] #从300ms开始截取英文MP3\n \n#调整两个MP3的声音大小,防止出现一个声音大一个声音小的情况\ndbplus = db1 - db2\nif dbplus < 0: # song1的声音更小\n song1+=abs(dbplus)\nelif dbplus > 0: #song2的声音更小\n song2+=abs(dbplus)\n \n#拼接两个音频文件\nsong = song1 + song2\n \n#导出音频文件\nsong.export(targetPath, format=\"mp3\") #导��为MP3格式\"\"\"","repo_name":"zwm0426/combine-word-listening","sub_path":"combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"16"} +{"seq_id":"15511554322","text":"# หากถามว่าระหว่างข้อมูลส่วนตัวและความปลอดภัยนั้น สิ่งใดสำคัญกว่ากัน\n# สำหรับนักวิทยาการคอมพิวเตอร์แล้ว การรักษาความปลอดภัยของข้อมูลส่วนตัวเป็นสิ่งที่ต้องคำนึงสูงสุด\n# วิธีการหนึ่งของการรักษาความปลอดภัยของข้อมูล คือการเข้ารหัสข้อมูลให้ไม่สามารถถอดรหัสได้\n# เพียงเพื่อใช้ในขั้นตอนการตรวจสอบข้อมูลว่าตรงกันเท่านั้น\n# ในข้อนี้จะนำเสนอวิธีการเข้ารหัสแบบง่ายรูปแบบหนึ่ง โดยนำประโยคข้อความภาษาอังกฤษมาแบ่งเป็นคำ\n# ซึ่งแต่ละคำจะแบ่งตามช่องว่างในประโยค แล้วนำตัวอักษรมาแปลงเป็นค่าแฮช (value) โดยคำนวณจาก\n\n# value = (Alphabet Position) + (Position in Word)\n\n# Alphabet Position คือลำดับของตัวอักษรภาษาอังกฤษ\n# โดยให้ตัวอักษร A หรือ a มีค่า Alphabet Position เป็น 0,\n# ตัวอักษร B หรือ b มีค่า Alphabet Position เป็น 1,\n# ตัวอักษร C หรือ c มีค่า Alphabet Position เป็น 2\n# เป็นเช่นนี้ไปเรื่อย ๆ จนกระทั่งถึงตัวอักษร Z หรือ z มีค่า Alphabet Position เป็น 25\n\n# Position in Word คือตำแหน่งของตัวอักษรในคำ เริ่มจากตำแหน่ง 0 คือตัวอักษรตัวแรกของคำ\n\n# ตัวอย่างค่าแฮชข้อความ \"DATA HASH\" คือ 66\n# ได้จากผลรวมค่าแฮชของ D + A + T + A + H + A + S+ H = 3 + 1 + 21 + 3 + 7 + 1 + 20 + 10 = 66\n# ซึ่งคำนวณดังนี้\n\n# คำว่า DATA\n# D มีค่าแฮชเป็น 3 = 3 + 0 A (ตัวถัดจาก D) มีค่าแฮชเป็น 1 = 0 + 1\n# T มีค่าแฮชเป็น 21 = 19 + 2 A (ตัวสุดท้ายของคำว่า DATA) มีค่าแฮชเป็น 3 = 0 + 3\n \n# คำว่า HASH\n# H (ตัวแรก) มีค่าแฮชเป็น 7 = 7 + 0 A (ตัวถัดจาก H) มีค่าแฮชเป็น 1 = 0 + 1\n# S มีค่าแฮชเป็น 20 = 18 + 2 H (ตัวสุดท้าย) มีค่าแฮชเป็น 10 = 7 + 3\n\n \n\n# ให้นิสิตเขียนโปรแกรมเพื่อหาค่าแฮชของข้อความที่ต้องการเข้ารหัส\n# ข้อมูลเข้า\n# บรรทัดเดียว เป็นข้อความภาษาอังกฤษที่ต้องการเข้ารหัส ประกอบด้วยตัวอักษรหรือช่องว่างเท่านั้น ไม่มีตัวเลขหรืออักขระพิเศษ\n# ข้อมูลออก\n# ค่าแฮชที่ได้จากการเข้ารหัสข้อความตามวิธีข้างต้น\n# ตัวอย่างข้อมูลออก/ข้อมูลออก\n# ข้อมูลเข้า\tข้อมูลออก\n\n \n# DATA HASH\n \n\n \t\n \n# 66\n \n\n \n\n \n# privacy or security\n \n\n \t\n \n# 280\n \n\n \n\n \n# z z z z z z\n \n\n \t\n \n# 150\n \n\n \n\n \n# SLEEPY zzz ZZZ\n \n\n \t\n \n# 247\n\ntext = input()\ntext = text.lower()\n\ntext = text.split(\" \")\n\ntotal = 0\n\nfor t in text:\n for i in range(len(t)):\n hashy = (ord(t[i]) - 97) + i\n total += hashy\n\nprint(total)","repo_name":"3xbun/elab-cpe","sub_path":"final_test62/A4.py","file_name":"A4.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40855052978","text":"import logging\nimport time\n\nfrom acoustid.data.stats import (\n NUM_PARTITIONS,\n unpack_user_agent_stats_key,\n update_user_agent_stats,\n)\nfrom acoustid.script import Script\nfrom acoustid.tasks import enqueue_task\nfrom acoustid.utils import call_internal_api\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_update_all_user_agent_stats(script: Script) -> None:\n delay = 60.0 / NUM_PARTITIONS\n with script.context() as ctx:\n for partition in range(-1, NUM_PARTITIONS):\n enqueue_task(ctx, \"update_user_agent_stats\", {\"partition\": partition})\n time.sleep(delay)\n\n\ndef run_update_user_agent_stats(script: Script, partition: int):\n if partition == -1:\n root_key = \"ua\"\n else:\n root_key = f\"ua:{partition:02x}\"\n logger.info(\"Updating user agent stats (key %s)\", root_key)\n db = script.db_engines[\"app\"].connect()\n redis = script.get_redis()\n for key, count in redis.hgetall(root_key).items():\n count = int(count)\n date, application_id, user_agent, ip = unpack_user_agent_stats_key(key)\n if not count:\n # the only way this could be 0 is if we already processed it and\n # nothing touched it since then, so it's safe to delete\n redis.hdel(root_key, key)\n else:\n if script.config.cluster.role == \"master\":\n update_user_agent_stats(db, application_id, date, user_agent, ip, count)\n else:\n call_internal_api(\n script.config,\n \"update_user_agent_stats\",\n application_id=application_id,\n date=date,\n user_agent=user_agent,\n ip=ip,\n count=count,\n )\n redis.hincrby(root_key, key, -count)\n","repo_name":"acoustid/acoustid-server","sub_path":"acoustid/scripts/update_user_agent_stats.py","file_name":"update_user_agent_stats.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"16"} +{"seq_id":"33160706188","text":"import sys\n\nsys.setrecursionlimit(10 ** 6)\n\n\nclass Tree:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n\npre, post = [], []\n\n\ndef preorder(node, nodeinfo):\n pre.append(nodeinfo.index(node.data) + 1)\n if node.left:\n preorder(node.left, nodeinfo)\n if node.right:\n preorder(node.right, nodeinfo)\n\n\ndef postorder(node, nodeinfo):\n if node.left:\n postorder(node.left, nodeinfo)\n if node.right:\n postorder(node.right, nodeinfo)\n post.append(nodeinfo.index(node.data) + 1)\n\n\ndef solution(nodeinfo):\n answer = []\n sort_node = sorted(nodeinfo, key=lambda x: (-x[1], x[0]))\n root = None\n for node in sort_node:\n if not root:\n root = Tree(node)\n else:\n cur = root\n while 1:\n if node[0] < cur.data[0]:\n if cur.left:\n cur = cur.left\n continue\n else:\n cur.left = Tree(node)\n break\n if node[0] > cur.data[0]:\n if cur.right:\n cur = cur.right\n continue\n else:\n cur.right = Tree(node)\n break\n break\n preorder(root, nodeinfo)\n postorder(root, nodeinfo)\n answer.append(pre)\n answer.append(post)\n return answer\n","repo_name":"nem-sh/algorithm-solving","sub_path":"2019 KAKAO BLIND RECRUITMENT/길 찾기 게임_다솜.py","file_name":"길 찾기 게임_다솜.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5221292696","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.db import IntegrityError\nfrom gitpm.models import (EventLog, EventLogFields, Repository)\nfrom bson.objectid import ObjectId\n\ndef get_eventLog_name(log_name):\n try:\n logName = EventLog.objects.filter(\n name=log_name).only('name')\n return list(logName)\n except ObjectDoesNotExist:\n return None\n\ndef validate_name(log_name):\n message = ''\n modified = False\n logName = log_name\n while get_eventLog_name(logName):\n modified = True\n if '_copy' in logName:\n op = logName.split('_copy')\n logName = op[0] + '_copy' + str(int(op[1]) + 1)\n else:\n logName += '_copy1'\n\n if modified:\n message = 'A log with the name provided already exists, changed name to {}'.format(logName)\n return {\n 'logName': logName,\n 'message': message,\n 'modified': modified\n }\n\ndef get_repositoryInEventLog(log_name):\n log = list(EventLog.objects.filter(\n name=log_name))\n if len(log) > 1:\n raise IntegrityError\n log = log[0]\n repo = list(Repository.objects.filter(\n _id=ObjectId(log.repository_id)\n ))\n return repo[0]\n\ndef get_eventLog(log_name):\n log = list(EventLog.objects.filter(\n name=log_name))\n if len(log) > 1:\n raise IntegrityError\n logEntries = list(EventLogFields.objects.filter(\n log=log[0]._id\n ))\n return {'log': log, 'entries': logEntries}\n\n","repo_name":"kaniakl/gitpm","sub_path":"web/server/gitpm/queries/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71320065607","text":"import os\n\nimport numpy as np\n\nfrom nas_big_data.combo.load_data import load_data, load_data_npz_gz\nimport autosklearn.regression\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n\nautoml = autosklearn.regression.AutoSklearnRegressor(\n time_left_for_this_task=160,\n per_run_time_limit=30,\n tmp_folder=os.path.join(HERE, 'autosklearn_regression_example_tmp'),\n output_folder=os.path.join(HERE, 'autosklearn_regression_example_out'),\n memory_limit = 100 * 1024 # 100 GB\n)\n\n(X_train, y_train), _ = load_data()\n\nX_train = np.concatenate(X_train, axis=1)\n\nautoml.fit(X_train, y_train, dataset_name='combo')\n\nX_test, y_test = load_data_npz_gz(test=True)\nX_test = np.concatenate(X_test, axis=1)\n\ny_pred = automl.predict(X_test)\n\nr2 = r2_score(y_test, y_pred)\nmae = mean_absolute_error(y_test, y_pred)\nmse = mean_squared_error(y_test, y_pred)\n\nprint(f\"Test - mse: {mse:.3f}, mae: {mae:.3f}, r2: {r2:.3f}\")\n\n\n","repo_name":"deephyper/NASBigData","sub_path":"nas_big_data/combo/autosklearn/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"42435982024","text":"from typing import Any, Callable, List, Optional, Tuple\n\nimport torch\nfrom PIL import Image\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\n\n\nclass ListDataset(Dataset):\n def __init__(self, list_file_path: str, transform: Optional[Callable] = None) -> None:\n super().__init__()\n\n self.items: List[Tuple[Image.Image, Tensor]] = []\n\n with open(list_file_path) as f:\n lines = [line.strip() for line in f.readlines()]\n\n for line in lines:\n img_path, label = line.split(\",\")\n img = Image.open(img_path).convert(\"RGB\")\n label = torch.tensor(int(label), dtype=torch.long)\n self.items.append((img, label))\n\n self.transform = transform\n\n def __len__(self) -> int:\n return len(self.items)\n\n def __getitem__(self, index: int) -> Tuple[Any, Tensor]:\n img, label = self.items[index]\n if self.transform:\n img = self.transform(img)\n\n return img, label\n","repo_name":"CVLAB-Unibo/netspace","sub_path":"data/listdset.py","file_name":"listdset.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"20940969531","text":"import pygame as pg\nimport math\n\n\ndef collatz(n):\n sequenz = []\n while n > 1:\n sequenz.append(n % 2 == 0)\n n = n // 2 if n % 2 == 0 else n * 3 + 1\n sequenz.append(False)\n return sequenz[::-1]\n\n\ndef rotate(winkel):\n winkel = math.radians(winkel)\n x = (math.cos(winkel) - math.sin(winkel)) * SCALE\n y = (math.sin(winkel) + math.cos(winkel)) * SCALE\n return x, -y\n\n\nBREITE, HÖHE = 1000, 1000\npg.init()\nscreen = pg.display.set_mode([BREITE, HÖHE])\nscreen.fill((0, 0, 0))\nfarbe = pg.Color(0)\npg.mouse.set_visible(False)\n\nSCALE = 3\nWINKELSCHRITT = 8\n\nfor n in range(100_000):\n sequenz = collatz(n)\n start_x, start_y = BREITE // 2.5, HÖHE\n winkel = 0\n for even in sequenz:\n winkel = winkel + WINKELSCHRITT if even else winkel - WINKELSCHRITT * 1.8\n farbe.hsva = (winkel % 360, 100, 70)\n x1, y1 = rotate(winkel)\n ziel_x, ziel_y = start_x + x1, start_y + y1\n pg.draw.line(screen, farbe, (start_x, start_y), (ziel_x, ziel_y), 1)\n start_x, start_y = ziel_x, ziel_y\n if n % 2000 == 0:\n pg.display.flip()\n\npg.mouse.set_visible(True)\n\nweitermachen = True\nwhile weitermachen:\n for ereignis in pg.event.get():\n if ereignis.type == pg.QUIT or (ereignis.type == pg.KEYDOWN and\n ereignis.key == pg.K_ESCAPE):\n weitermachen = False\npg.quit()\n","repo_name":"Buettovan/flappy_bird_G18E","sub_path":"Teil_xx_Collatz.py","file_name":"Teil_xx_Collatz.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70918825288","text":"from sqlalchemy import (Column, Integer, String, Boolean,\n Float, ForeignKey, DateTime,)\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass Tutorial(Base):\n __tablename__ = 'tutorial'\n id = Column(Integer, primary_key=True)\n order_id = Column(Integer, unique=True)\n content = Column(Integer, unique=True)\n\n @property\n def words(self):\n return len(self.content.split(' '))\n\n\nclass User(Base):\n __tablename__ = 'user'\n id = Column(Integer, primary_key=True)\n username = Column(String, unique=True)\n email = Column(String, unique=True)\n password = Column(String)\n salt = Column(String)\n superuser = Column(Boolean)\n next_tutorial_order_id = Column(Integer, ForeignKey(Tutorial.order_id), default=1)\n tutorial = relationship(Tutorial, backref='users')\n\n def __str__(self):\n return f'{self.id} | {self.username} | {self.email} | {self.password}'\n\n\nclass Text(Base):\n __tablename__ = 'text'\n id = Column(Integer, primary_key=True)\n content = Column(String, unique=True)\n\n @property\n def words(self):\n return len(self.content.split(' '))\n\n\nclass SpeedTest(Base):\n __tablename__ = 'speedtest'\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey(User.id))\n user = relationship(User, backref='speedtests')\n text_id = Column(Integer, ForeignKey(Text.id))\n text = relationship(Text, backref='speedtests')\n words_per_minute = Column(Float)\n when = Column(DateTime)\n","repo_name":"dzhelek/Touch-typing-tutorial","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"12850939690","text":"import numpy as np\n\n# Initialize Weights (and biases)\ndef initialize_weight(shape, bias=True, initializer=\"xavier\"):\n \"\"\"\n Initialize weights according to initializer\n [1] [ReLU] He Initialization : https://arxiv.org/pdf/1502.01852\n [2] [Tanh] Xavier(Caffe version)\n [3] [Sigmoid] Xavier Initialization : http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf\n \"\"\"\n if (initializer==\"He\"): # \n w = np.random.randn(*shape) * np.sqrt(2/shape[0])\n b = np.random.randn(1, shape[1]) * np.sqrt(2/shape[0])\n elif (initializer==\"xavier\"): \n w = np.random.randn(*shape) * np.sqrt(1/shape[0])\n b = np.random.randn(1, shape[1]) * np.sqrt(1/shape[0])\n elif (initializer==\"xavier_orig\"):\n assert shape[0] > shape[1] # Original two-sided Xavier initialization\n w = np.random.randn(*shape) * np.sqrt(2/(shape[0]-shape[1]))\n b = np.random.randn(1, shape[1]) * np.sqrt(2/(shape[0]-shape[1]))\n \n if (~bias):\n b = np.zeros((1, shape[1]))\n \n return w, b","repo_name":"rrmina/eureka","sub_path":"eureka/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"38843169694","text":"import sys\nimport numpy as np\nfrom matplotlib import pyplot\nimport usb.core\nimport usb.util\nfrom xcore_ai_ie import xcore_ai_ie_usb, xcore_ai_ie_spi\n\nie = xcore_ai_ie_usb()\nie.connect()\n\nfor i in range (1):\n #INPUT_SHAPE = (32, 128, 3)\n INPUT_SHAPE = (160, 160, 3)\n #INPUT_SHAPE = (16, 66, 1)\n\n # Get image from device\n #ie.acquire_set_i2c(0x3C, 0xfe, 0x00)\n #ie.acquire_set_i2c(0x3C, 0x84, 0x03)\n ie.start_acquire_single(400, 1200, 200, 1000, 160, 160)\n #raw_img = ie.read_input_tensor(engine_num = 1)\n raw_img = ie.read_input_tensor(engine_num = 0)\n print(len(raw_img))\n raw_img = raw_img[:INPUT_SHAPE[0]*INPUT_SHAPE[1]*INPUT_SHAPE[2]]\n print(len(raw_img))\n np_img = np.asarray(raw_img).reshape(INPUT_SHAPE)\n rgb = np_img + np.asarray([128,128,128])\n pyplot.imshow(rgb)\n pyplot.show(block = True)\n pyplot.pause(0.1)\npyplot.pause(1)\n \n \n","repo_name":"xmos/aisrv","sub_path":"app_alpr/recv_picture.py","file_name":"recv_picture.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13770309062","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-03-20 16:18:07\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os\nimport sys\n\n#获取脚本路径\ndef cur_file_dir():\n pathx = sys.argv[0]\n path,_file = os.path.split(pathx)\n if cmp(path,'') == 0:\n path = sys.path[0]\n #判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径\n if os.path.isdir(path):\n return path\n elif os.path.isfile(path):\n return os.path.dirname(path)\n \n#获取父目录\ndef GetParentPath(strPath):\n if not strPath:\n return None;\n lsPath = os.path.split(strPath);\n if lsPath[1]:\n return lsPath[0];\n lsPath = os.path.split(lsPath[0]);\n return lsPath[0];\n\n#获取所有界面的json文件列表\ndef getAllExtFile(path,fromatx = \".txt\"):\n jsondir = path\n jsonfilelist = []\n for root, _dirs, files in os.walk(jsondir):\n for filex in files: \n #print filex\n name,text = os.path.splitext(filex)\n if cmp(text,fromatx) == 0:\n jsonArr = []\n rootdir = path\n dirx = root[len(rootdir):]\n pathName = dirx +os.sep + filex\n jsonArr.append(pathName)\n (newPath,_name) = os.path.split(pathName)\n jsonArr.append(newPath)\n jsonArr.append(name)\n jsonfilelist.append(jsonArr)\n return jsonfilelist\n\npngfiles = getAllExtFile('flypai','.png')\n\nfor l in pngfiles:\n fpth = 'flypai' + l[0]\n newpth = 'flypai' + os.sep + 'pai' + l[2] + '.png'\n os.system('mv %s %s'%(fpth,newpth))\n","repo_name":"qbzjs/cmdtool","sub_path":"fileTool/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70097588488","text":"# -*- coding: utf-8 -*-\n\n'''\nTests for basic capturing\n'''\n\nimport os\nimport os.path\nimport shutil\nimport tempfile\nimport unittest\n\nfrom unittest.mock import patch\n\nfrom pyca import ingest, config, db, utils\nfrom tests.tools import should_fail, terminate_fn\n\n\nclass TestPycaIngest(unittest.TestCase):\n\n def setUp(self):\n ingest.http_request = lambda x, y=False: b'xxx'\n self.fd, self.dbfile = tempfile.mkstemp()\n self.cadir = tempfile.mkdtemp()\n config.config('agent')['database'] = 'sqlite:///' + self.dbfile\n config.config('capture')['directory'] = self.cadir\n config.config()['services']['org.opencastproject.ingest'] = ['']\n config.config()['services']['org.opencastproject.capture.admin'] = ['']\n\n # Mock event\n db.init()\n event = db.RecordedEvent()\n event.uid = '123123'\n event.status = db.Status.FINISHED_RECORDING\n event.start = utils.timestamp()\n event.end = event.start + 1\n prop = 'org.opencastproject.capture.agent.properties'\n dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'\n data = [{'data': u'äü%sÄÜß' % dcns,\n 'fmttype': 'application/xml',\n 'x-apple-filename': 'episode.xml'},\n {'data': u'äü%sÄÜß' % dcns,\n 'fmttype': 'application/xml',\n 'x-apple-filename': 'series.xml'},\n {'data': u'event.title=äüÄÜß\\n' +\n u'org.opencastproject.workflow.config.x=123\\n' +\n u'org.opencastproject.workflow.definition=fast',\n 'fmttype': 'application/text',\n 'x-apple-filename': prop}]\n event.set_data({'attach': data})\n\n # Create recording\n os.mkdir(event.directory())\n trackfile = os.path.join(event.directory(), 'test.mp4')\n open(trackfile, 'wb').close()\n event.set_tracks([('presenter/source', trackfile)])\n session = db.get_session()\n session.add(event)\n session.commit()\n self.event = db.RecordedEvent(event)\n\n def tearDown(self):\n os.close(self.fd)\n os.remove(self.dbfile)\n shutil.rmtree(self.cadir)\n\n @patch(__name__+'.ingest.ingest')\n def test_safe_start_ingest(self, ingest_fn):\n ingest_fn.side_effect = lambda x: None\n ingest.safe_start_ingest(self.event)\n ingest_fn.side_effect = should_fail\n ingest.safe_start_ingest(self.event)\n\n def test_run(self):\n ingest.terminate(True)\n ingest.run()\n ingest.terminate = terminate_fn(1)\n ingest.run()\n config.config('agent')['backup_mode'] = True\n ingest.run()\n\n def test_get_config_params(self):\n properties = '\\n'.join([\n 'org.opencastproject.workflow.config.encode_720p=true',\n 'org.opencastproject.workflow.config.cutting=false',\n 'org.opencastproject.workflow.definition=fast',\n 'org.opencastproject.nonsense=whatever'\n ])\n workflow, parameters = ingest.get_config_params(properties)\n self.assertEqual(workflow, 'fast')\n self.assertEqual(\n set([('encode_720p', 'true'), ('cutting', 'false')]),\n set(parameters))\n","repo_name":"opencast/pyCA","sub_path":"tests/test_ingest.py","file_name":"test_ingest.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"16"} +{"seq_id":"74868874249","text":"import time\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nfrom decisionTreeClassifier import DecisionTree\n\ndef accuracy(ytrue, ypred):\n\treturn np.sum(ypred == ytrue) / len(ypred)\n\ndef makeDataset():\n\tbreastCancer = datasets.load_breast_cancer()\n\tX, y = breastCancer.data, breastCancer.target\n\treturn X, y\n\nif __name__ == \"__main__\":\n\tX, y = makeDataset()\n\txtrain, xtest, ytrain, ytest = train_test_split(X, y, random_state = 101)\n\n\tprint('='*25 + 'DECISION TREE CLASSIFIER' + '='*25 + '\\n\\n')\n\tstart = time.time()\n\ttreeModel = DecisionTree()\n\ttreeModel.fit(xtrain, ytrain)\n\tpredictions = treeModel.predict(xtest)\n\tscore = accuracy(ytest, predictions)\n\tend = time.time()\n\tprint(f'accuracy score: {score}\\n')\n\tprint(f'time taken: {end - start}\\n')","repo_name":"AryanSharma5/ML-Algorithms-From-Scratch","sub_path":"DecisionTree/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71548369287","text":"#Imports\nfrom flask import render_template, request, redirect\nfrom app import app\nfrom app.forms import information_form\nimport os\n\n\n@app.route('/success')\ndef success():\n return render_template('index.html', title='Success')\n\n@app.route('/')\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n form = information_form()\n if form.validate_on_submit():\n s3_bucket_location = form.s3_bucket.data\n write_path = form.write_path.data\n columns = form.columns.data\n en_or_de = form.en_or_de.data\n delimiter = form.delimiter.data\n file = open(\"/home/ubuntu/InvisibleMe/src/system_info.csv\",\"w+\")\n for info in [s3_bucket_location, write_path, en_or_de, delimiter,columns]:\n if info == columns:\n file.writelines(info)\n else:\n file.writelines(info+\",\")\n command = \"spark-submit /home/ubuntu/InvisibleMe/src/run.py\"\n os.system(command)\n return redirect('/success')\n return render_template('form.html', form=form)\n","repo_name":"SamuelDJudge/InvisibleMe","sub_path":"tools/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14175006299","text":"from unittest import TestCase\nfrom unittest.mock import mock_open, patch\n\nfrom parameterized import parameterized\n\nfrom design_patterns.solid.srp_good.repository import Repository\nfrom design_patterns.solid.srp_good.journal import Journal\n\n\nclass TestRepository(TestCase):\n def setUp(self) -> None:\n self.journal = Journal()\n self.journal.add_entry(\"Journal\")\n\n def test_save_to_file_called_valid_store_journal(self):\n open_mock = mock_open()\n with patch(\"design_patterns.solid.srp_good.repository.open\", open_mock, create=True):\n Repository.save_to_file(self.journal, \"filename\")\n\n open_mock.assert_called_with(\"filename\", \"w\")\n open_mock.return_value.write.assert_called_once_with(str(self.journal))\n\n\nclass TestJournal(TestCase):\n def setUp(self) -> None:\n self.journal = Journal()\n self.journal.add_entry(\"Journal #1\")\n self.journal.add_entry(\"Journal #2\")\n\n @parameterized.expand([(0, \"1: Journal #2\"), (1, \"0: Journal #1\")])\n def test_remove_entry_called_valid_then_remove_selected(self, pos, expected):\n self.journal.remove_entry(pos)\n self.assertEqual(expected, str(self.journal))\n\n @parameterized.expand(\n [\n (2, ValueError),\n ]\n )\n def test_remove_entry_called_invalid_then_raise_exception(self, pos, exception):\n with self.assertRaises(exception):\n self.journal.remove_entry(pos)\n","repo_name":"schuna/design-patterns-python","sub_path":"tests/unit/test_srp_good.py","file_name":"test_srp_good.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24758402237","text":"from operator import attrgetter\nfrom itertools import count\nimport numpy as np\nfrom random import choice\n\n\nfrom hipop.shortest_path import dijkstra\nfrom mnms.demand.user import User\nfrom mnms.time import Time\nfrom mnms.demand.manager import BaseDemandManager\n\n\ndef generate_random_demand(mlgraph: \"MultiLayerGraph\",\n nb_user: int,\n tstart=\"07:00:00\",\n tend=\"18:00:00\",\n min_cost=0,\n cost_path=None,\n distrib_time=np.random.uniform,\n repeat=1, seed=None) -> BaseDemandManager:\n \"\"\"Create a random demand by using the extremities of the mobility_graph as origin destination pair, the departure\n time use a distribution function to generate the departure between tstart and tend.\n\n Args:\n mmgraph: The graph use to generate the demand\n tstart: Lower bound of departure time\n tend: Upper boumd of departure time\n min_cost: Minimal cost to accept an origin destination pair\n cost_path: The name of the cost to use for shortest path\n distrib_time: Distribution function to generate random departure dates\n repeat: Repeat each origin destination pair\n seed: Random seed\n\n Returns:\n The generated demand\n\n \"\"\"\n if cost_path is None:\n cost_path = \"length\"\n\n if seed is not None:\n np.random.seed(seed)\n\n tstart = Time(tstart).to_seconds()\n tend = Time(tend).to_seconds()\n\n demand = []\n origins = list(mlgraph.odlayer.origins.keys())\n destinations = list(mlgraph.odlayer.destinations.keys())\n uid = count(0)\n\n graph = mlgraph.graph\n user_count = 0\n\n map_layer_services = {lid:list(layer.mobility_services.keys())[0] for lid, layer in mlgraph.layers.items()}\n map_layer_services[\"TRANSIT\"] = \"WALK\"\n\n while user_count <= nb_user:\n unode = choice(origins)\n dnode = choice(destinations)\n\n _, path_cost = dijkstra(graph, unode, dnode, cost_path, map_layer_services)\n if min_cost <= path_cost < float('inf'):\n demand.extend([User(str(next(uid)), unode, dnode, Time.from_seconds(distrib_time(tstart, tend))) for _ in\n range(repeat)])\n user_count+=repeat\n\n demand.sort(key=attrgetter('departure_time'))\n\n uid = count(0)\n for u in demand:\n u.id = str(next(uid))\n return BaseDemandManager(demand)\n\n\nif __name__ == \"__main__\":\n\n from mnms.generation.mlgraph import generate_manhattan_passenger_car\n from mnms.io.graph import save_odlayer, save_graph\n\n mlgraph = generate_manhattan_passenger_car(20, 100)\n\n demand = generate_random_demand(mlgraph,\n 500,\n min_cost=300)\n\n demand.to_csv(\"random_demand_20x20.csv\")\n save_odlayer(mlgraph.odlayer, \"odlayer_20x20.json\")\n save_graph(mlgraph, \"manhattan_20x20.json\")","repo_name":"licit-lab/MnMS","sub_path":"src/mnms/generation/demand.py","file_name":"demand.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"3436580106","text":"import pandas as pd\n\nfrom data import Data5Royal\nimport math\n\nfrom scripts.data_path import resource_path\n\n\nclass Persona:\n def __init__(self, arcana: str, level: int, name: str, special: bool, ultimate: bool, dlc: bool, treasure: bool):\n self.arcana = arcana\n self.level = level\n self.current_level = level\n self.name = name\n self.owned = False\n self.special = special\n self.ultimate = ultimate\n self.dlc = dlc\n self.treasure_demon = treasure\n self.can_be_fused = False\n self.fusion_material_list = []\n self.cost = 0\n\n def set_owned(self, owned):\n self.owned = owned\n if owned:\n self.fusion_material_list = []\n self.can_be_fused = False\n\n def __str__(self):\n # return f'{self.arcana} {self.level} {self.name}'\n return f'{self.name};{self.current_level};{self.cost}'\n\n def __repr__(self):\n # return f'{self.arcana} {self.level} {self.name}'\n return f'{self.name};{self.current_level};{self.cost}'\n\n\nclass FileReader:\n def __init__(self):\n self.persona_list = self.excel_to_persona(resource_path('data/compendium.xlsx'))\n self.persona_map = self.create_persona_map()\n self.reverse_fusion_map = self.create_reverse_table()\n self.add_special_fusions()\n\n def excel_to_persona(self, path):\n data = pd.read_excel(path)\n df = pd.DataFrame(data)\n df = df.reset_index()\n persona_list = []\n for index, row in df.iterrows():\n # print(row['Arcana'], row['Level'], row['Name'])\n persona = Persona(row['Arcana'], int(row['Level']), row['Name'], str(row['Special']) == \"True\",\n str(row['Ultimate']) == \"True\", str(row['DLC']) == \"True\", str(row['Treasure']) == \"True\")\n if str(row['Cost']) != \"nan\":\n persona.cost = int(float(str(row['Cost'])))\n persona_list.append(persona)\n print(persona_list)\n return persona_list\n\n def create_reverse_table(self):\n reverse_fusion_map = {}\n\n for first_persona_index in range(len(self.persona_list)):\n for second_persona_index in range(first_persona_index + 1, len(self.persona_list)):\n p1 = self.persona_list[first_persona_index]\n p2 = self.persona_list[second_persona_index]\n\n if (p1.treasure_demon and not p2.treasure_demon) or (p2.treasure_demon and not p1.treasure_demon):\n continue\n\n result_arcana = \"\"\n for combo in Data5Royal.arcana2CombosRoyal:\n if combo['source'] == [p1.arcana, p2.arcana] or combo['source'] == [p2.arcana, p1.arcana]:\n result_arcana = combo['result']\n break\n result_level = (p1.level + p2.level) / 2\n result_level += 1 if result_level.is_integer() else 0.5\n\n if result_arcana != \"\":\n persona = self.forward_fusion(p1.name, p2.name)\n if persona is not None:\n if persona.name in reverse_fusion_map.keys():\n reverse_fusion_map.get(persona.name).append((p1.name, p2.name))\n else:\n reverse_fusion_map[persona.name] = [(p1.name, p2.name)]\n\n # print(reverse_fusion_map)\n # print(len(reverse_fusion_map))\n return reverse_fusion_map\n\n def forward_fusion(self, persona1, persona2):\n p1 = self.persona_map[persona1]\n p2 = self.persona_map[persona2]\n\n if (p1.treasure_demon and not p2.treasure_demon) or (p2.treasure_demon and not p1.treasure_demon):\n self.treasure_demon_fusion(persona1, persona2)\n\n result_arcana = \"\"\n for combo in Data5Royal.arcana2CombosRoyal:\n if combo['source'] == [p1.arcana, p2.arcana] or combo['source'] == [p2.arcana, p1.arcana]:\n result_arcana = combo['result']\n break\n result_level = math.floor((p1.level + p2.level) / 2) + 1\n if result_arcana != \"\":\n result_arcana_list = [x for x in self.persona_list if x.arcana == result_arcana]\n if p1.arcana != p2.arcana:\n # different arcana fusion\n for persona in result_arcana_list:\n if persona.special or persona.treasure_demon:\n continue\n if persona.level >= result_level:\n return persona\n else:\n # same arcana fusion\n for persona in reversed(result_arcana_list):\n if persona.special or persona.treasure_demon or persona == p1 or persona == p2:\n continue\n if persona.level <= result_level:\n return persona\n\n def forward_special_fusion(self, materials):\n specials = [x for x in self.persona_map.values() if x.special]\n for special in specials:\n if all(x in special.fusion_material_list[0] for x in materials):\n return special\n\n def treasure_demon_fusion(self, persona1, persona2):\n p1 = self.persona_map[persona1]\n p2 = self.persona_map[persona2]\n treasure = p1 if p1.treasure_demon else p2\n non_treasure = p1 if not p1.treasure_demon else p2\n arcana_list = [x for x in self.persona_list if x.arcana == non_treasure.arcana and not x.special\n and not x.treasure_demon]\n\n index = Data5Royal.rarePersonaeRoyal.index(treasure.name)\n rank = Data5Royal.rareCombosRoyal[non_treasure.arcana][index]\n\n # find \"index\" of non treasure persona at its current level\n counter = 0\n for item in arcana_list:\n if item.level > non_treasure.current_level:\n break\n counter += 1\n if rank > 0:\n counter -= 1\n\n if len(arcana_list) > counter + rank >= 0:\n return arcana_list[counter + rank]\n\n def create_persona_map(self):\n persona_map = {}\n for persona in self.persona_list:\n persona_map[persona.name] = persona\n return persona_map\n\n def add_special_fusions(self):\n data = pd.read_excel(resource_path('data/special_compendium.xlsx'))\n df = pd.DataFrame(data)\n df = df.reset_index()\n for index, row in df.iterrows():\n p = self.persona_map[row['Name']]\n l = []\n for i in range(1, 7):\n mat = row[f'Material{i}']\n if not pd.isna(mat):\n l.append(mat)\n p.fusion_material_list.append(l)\n self.reverse_fusion_map[p.name] = [tuple(l)]\n\n # create_reverse_table()\n\n\n# s = CompendiumScanner(FileReader().persona_map)\n# s.take_screenshot({\"top\": 260, \"left\": 590, \"width\": 40, \"height\": 20}).show()\n","repo_name":"JadaMaar/Persona5R-fusion-helper","sub_path":"scripts/persona.py","file_name":"persona.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34874830591","text":"import os\nimport sys\nimport random\n\nimport tempfile\nfrom subprocess import call\n\n\ndef main(file, temporary=False):\n tf_os, tpath = tempfile.mkstemp(dir='/home/mouna.mn/code/DIN-V2-CODE')\n tf = open(tpath, 'w')\n\n fd = open(file, \"r\")\n for l in fd:\n print >> tf, l.strip(\"\\n\")\n tf.close()\n\n lines = open(tpath, 'r').readlines()\n random.shuffle(lines)\n if temporary:\n path, filename = os.path.split(os.path.realpath(file))\n fd = tempfile.TemporaryFile(prefix=filename + '.shuf', dir=path)\n else:\n fd = open(file + '.shuf', 'w')\n\n for l in lines:\n s = l.strip(\"\\n\")\n print >> fd, s\n\n if temporary:\n fd.seek(0)\n else:\n fd.close()\n\n os.remove(tpath)\n\n return fd\n\n\nif __name__ == '__main__':\n main(sys.argv[1])\n\n","repo_name":"princewen/tensorflow_practice","sub_path":"recommendation/Basic-DIEN-Demo/source_code/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":6427,"dataset":"github-code","pt":"16"} +{"seq_id":"4797286821","text":"from flask import Flask,render_template,request,jsonify,send_file\nfrom flask import after_this_request\nimport requests\nimport random\nimport os\nimport io\nimport img2pdf\nfrom bs4 import BeautifulSoup\nimport time\nimport re\nfrom os import listdir\nimport shutil\nfrom pdfrw import PdfReader, PdfWriter, PageMerge\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n\n\napp=Flask(__name__)\n\n\n\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\",title=\"home\")\n\n\n@app.route(\"/download\",methods=['GET','POST'])\ndef download(): \n if request.method==\"POST\":\n\n realurl=request.form['search']\n \n\n rannumber=random.randint(0,11)\n directory=f\"mypdf{rannumber}\"\n os.mkdir(directory)\n try:\n\n response=requests.get(realurl)\n Soup=BeautifulSoup(response.text,\"html.parser\")\n gather=Soup.findAll(\"div\" ,class_=\"image-thumb\")\n urls=[images[\"data-lazy\"] for images in gather]\n for url in urls:\n filename = re.search(r'/([\\w_-]+[.](jpg|gif|png))$', url)\n if not filename:\n statusbar.config(text=\"Regex didn't match with the url: {0}\".format(url))\n continue\n with open(directory+\"/\"+filename.group(1), 'wb') as f:\n if 'http' not in url:\n url = '{}{}'.format(site, url)\n response = requests.get(url) \n f.write(response.content)\n\n except Exception as e:\n print(e)\n\n\n\n\n ran=random.randint(1,11111)\n pdfname=f\"hello{ran}.pdf\"\n \n try:\n with open(pdfname,\"wb\") as f:\n try:\n imgs = []\n for fname in os.listdir(directory):\n if not fname.endswith(\".png\"):\n continue\n path = os.path.join(directory, fname)\n if os.path.isdir(path):\n continue\n imgs.append(path)\n f.write(img2pdf.convert(imgs))\n except:\n print(\"not jpg or jpeg\")\n\n\n try:\n imgs = []\n for fname in os.listdir(directory):\n if not fname.endswith(\".jpg\"):\n continue\n path = os.path.join(directory, fname)\n if os.path.isdir(path):\n continue\n imgs.append(path)\n f.write(img2pdf.convert(imgs))\n except:\n print(\"not png or jpeg\")\n\n\n try:\n imgs = []\n for fname in os.listdir(directory):\n if not fname.endswith(\".jpeg\"):\n continue\n path = os.path.join(directory, fname)\n if os.path.isdir(path):\n continue\n imgs.append(path)\n f.write(img2pdf.convert(imgs))\n except:\n print(\"not png or jpg\")\n except Exception as e:\n print(e)\n\n\n with open(pdfname,'rb') as file:\n return_data = io.BytesIO(file.read())\n return_data.seek(0)\n shutil.rmtree(directory)\n os.remove(pdfname)\n ran=random.randint(0,111)\n return send_file(return_data,as_attachment=True,mimetype='application/pdf',attachment_filename=f\"mypdf{ran}.pdf\")\n\n\n\n\n\nif __name__==\"__main__\":\n app.run(debug=True,host=\"192.168.1.204\")","repo_name":"ShreyasMohite/xhamster-pic-download","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70421424649","text":"parameters = [['IM', 'OSM'], ['KommaS', 'PlusS'], ['IR', 'DR', 'GIR', 'GDR']]\nfor mutation_input in range(1,3):\n for selection_input in range(1,3):\n for recombination_input in range(1,5):\n for initial_sigma in range(1,11):\n for num_parents in range(2,21):\n for num_offspring in range(2,21):\n test = str(parameters[0][mutation_input-1]) + ' '+ str(parameters[1][selection_input-1]) + ' ' + str(parameters[2][recombination_input-1]) + ' ' + (f\"IS:{initial_sigma/100}\") + ' ' + (f\"NP:{num_parents}\") + ' ' + (f'NO:{num_offspring}')\n print(type(test))\n break\n break\n break\n break\n break\n break\n\nparameters = [['IM', 'OSM'], ['KommaS', 'PlusS'], ['IR', 'DR', 'GIR', 'GDR']]","repo_name":"Knightjulius/EA-Ass","sub_path":"Vis.py","file_name":"Vis.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18621603110","text":"import random\nfrom datetime import datetime\n\nimport requests\nimport redis\nimport pymysql\n\n# 链接MySQL数据库\nconn = pymysql.Connect(host='10.10.107.7', user='root', password='xinqian@saibao', database='bigdata', port=3306)\ncursor = conn.cursor()\n# 链接Redis数据库\nredisDB = redis.Redis(host='127.0.0.1', port=6379, db=2)\n# 固定资产投资完成额变化\nredis_dict_key = 'ods_cnncnybgdzctzwcebhnd'\n\n\n# 获取讯代理IP\ndef getIp():\n xdl_url = 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=913d4f4b67e24be0998a3eb344ff732b&orderno=YZ2021923652gUFZCj&returnType=2&count=10'\n ipListData = requests.get(url=xdl_url).json()\n ipList = []\n ipList.clear()\n # 将ip以字典的形式添加至ip池\n for everyIp in ipListData['RESULT']:\n ipList.append({\n 'ip': everyIp['ip'],\n 'port': everyIp['port']\n })\n return ipList\n\n\ndef insertMysql(item):\n if redisDB.hexists(redis_dict_key, item['completedAmountofSocialInvestment'] + '-' + item['Ctime'] + '-' + item[\n 'nlmyCompletionAmount']):\n print('已存在该值,不作处理...')\n else:\n redisDB.hset(redis_dict_key, item['completedAmountofSocialInvestment'] + '-' + item['Ctime'] + '-' + item[\n 'nlmyCompletionAmount'], 0)\n sql = 'insert into ods_chinanongcunnongyebugdzctzwcebhnd(classification,completedAmountofSocialInvestment,Ctime,nlmyCompletionAmount,nlmyProportion,unit,insertTime)values (%s,%s,%s,%s,%s,%s,%s)'\n cursor.execute(sql, (item['classification'],\n item['completedAmountofSocialInvestment'], item['Ctime'], item['nlmyCompletionAmount'],\n item['nlmyProportion'], item['unit'], item['insertTime']))\n print('正在插入数据,请稍等...')\n conn.commit()\n\n\ndef getData():\n url = 'http://zdscxx.moa.gov.cn:8080/nyb/qggdzctz'\n item = {}\n headers = {\n 'Referer': 'http://zdscxx.moa.gov.cn:8080/nyb/pc/index.jsp',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.8 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n data = {\n 'item': '年度'\n }\n ip = random.choice(ipList)\n response = requests.post(url=url, headers=headers, data=data,\n proxies={'http': 'http://' + ip['ip'] + ':' + ip['port']}).json()['result']['rowDatas']\n item['classification'] = '年度'\n for perData in response:\n # 全社会固定资产投资完成额\n item['completedAmountofSocialInvestment'] = perData['全社会固定资产投资完成额']\n # 时间\n item['Ctime'] = perData['时间']\n try:\n # 农、林、牧、渔业全社会固定资产投资完成额\n item['nlmyCompletionAmount'] = perData['农、林、牧、渔业全社会固定资产投资完成额']\n except:\n item['nlmyCompletionAmount'] = ''\n try:\n # 农、林、牧、渔业投资占比(%)\n item['nlmyProportion'] = perData['农、林、牧、渔业投资占比(%)']\n except:\n item['nlmyProportion'] = ''\n # 单位\n item['unit'] = '亿元'\n # 插入时间\n item['insertTime'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n insertMysql(item)\n\n\nif __name__ == '__main__':\n ipList = getIp()\n getData()\n cursor.close()\n conn.close()\n","repo_name":"CuiXiangTuT/MyProject","sub_path":"WebCrawler/中华人民共和国农业农村部/3_宏观经济_固定资产投资完成额变化年度.py","file_name":"3_宏观经济_固定资产投资完成额变化年度.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"6004427112","text":"import requests\nfrom bs4 import BeautifulSoup as BS\n\n\npage = 4735\ncount = 1\n\nwith open('data.txt', 'w', encoding='utf-8') as f:\n while count < page:\n r = requests.get(\"https://www.unipage.net/ru/cities?page=\"+str(count)+\"&per-page=10\")\n html = BS(r.content, \"html.parser\")\n selector = html.select(\".generated-card-header__title\")\n\n if len(selector):\n for el in selector:\n text = el.select(\"a\")\n for i in text:\n f.write(i.text + '\\n')\n else: print(\"ERROR\")\n count += 1\n\nprint(\"[INFO] Success\")\n","repo_name":"sewaustav/BedolagaV1.5-Vosk","sub_path":"testgpt.py","file_name":"testgpt.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31077099254","text":"# desafios: 12 - Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto.\n\nproduto = str(input('Informe o nome do produto: '))\npreco = float(input('Qual o valor desse produto: '))\ndesconto = float(input('Desconto do dia: '))\n\ncalculo = preco * desconto\ncalculo2 = calculo /100\ncalculo3 = preco - calculo2\n\nprint(' * Oferta do dia: {}\\n * Preço: R$ {} \\n * Produto com desconto de {} % \\n * Valor da Oferta: R$ {:.2f}' .format( produto, preco, desconto, calculo3))\n# print (produto, preco, desconto, calculo, calculo2, calculo3)\n\n# Solução\n\npreco = float(input('Qual é o preço do produto? R$ '))\nnovo = preco - (preco * 5 /100)\nprint('O produto que custava R$ {}, na promoção com desconto de 5% vai custar R$ {}' .format(preco, novo))","repo_name":"lexxbr/Python_port","sub_path":"Python/aula07 - DESAFIO 12 - DESCONTO PERCENTUAL.py","file_name":"aula07 - DESAFIO 12 - DESCONTO PERCENTUAL.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28109693003","text":"from gl_utils import draw_gl_polyline,draw_gl_point\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import gluOrtho2D\n\nfrom glfw import glfwGetWindowSize,glfwGetCurrentContext,glfwGetCursorPos,GLFW_RELEASE,GLFW_PRESS\nfrom plugin import Plugin\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Trim_Marks(Plugin):\n \"\"\"docstring for Trim_Mark\n \"\"\"\n def __init__(self, g_pool,capture):\n super(Trim_Marks, self).__init__()\n self.order = .8\n self.g_pool = g_pool\n self.capture = capture\n self.frame_count = capture.get_frame_count()\n self._in_mark = 0\n self._out_mark = self.frame_count\n self.drag_in = False\n self.drag_out = False\n #display layout\n self.padding = 20. #in sceen pixel\n\n @property\n def in_mark(self):\n return self._in_mark\n\n @in_mark.setter\n def in_mark(self, value):\n self._in_mark = int(min(self._out_mark,max(0,value)))\n\n @property\n def out_mark(self):\n return self._out_mark\n\n @out_mark.setter\n def out_mark(self, value):\n self._out_mark = int(min(self.frame_count,max(self.in_mark,value)))\n\n\n def init_gui(self):\n self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))\n\n def on_window_resize(self,window,w,h):\n self.window_size = w,h\n self.h_pad = self.padding * self.frame_count/float(w)\n self.v_pad = self.padding * 1./h\n\n def update(self,frame,recent_pupil_positions,events):\n\n if frame.index == self.out_mark or frame.index == self.in_mark:\n self.g_pool.play=False\n\n if self.drag_in:\n x,y = glfwGetCursorPos(glfwGetCurrentContext())\n x,_ = self.screen_to_bar_space((x,y))\n self.in_mark = x\n\n elif self.drag_out:\n x,y = glfwGetCursorPos(glfwGetCurrentContext())\n x,_ = self.screen_to_bar_space((x,y))\n self.out_mark = x\n\n\n def on_click(self,img_pos,button,action):\n \"\"\"\n gets called when the user clicks in the window screen\n \"\"\"\n pos = glfwGetCursorPos(glfwGetCurrentContext())\n #drag the seek point\n if action == GLFW_PRESS:\n screen_in_mark_pos = self.bar_space_to_screen((self.in_mark,0))\n screen_out_mark_pos = self.bar_space_to_screen((self.out_mark,0))\n\n #in mark\n dist = abs(pos[0]-screen_in_mark_pos[0])+abs(pos[1]-screen_in_mark_pos[1])\n if dist < 10:\n if self.distance_in_pix(self.in_mark,self.capture.get_frame_index()) > 20:\n self.drag_in=True\n return\n #out mark\n dist = abs(pos[0]-screen_out_mark_pos[0])+abs(pos[1]-screen_out_mark_pos[1])\n if dist < 10:\n if self.distance_in_pix(self.out_mark,self.capture.get_frame_index()) > 20:\n self.drag_out=True\n\n elif action == GLFW_RELEASE:\n self.drag_out = False\n self.drag_in = False\n\n def atb_get_in_mark(self):\n return self.in_mark\n def atb_get_out_mark(self):\n return self.out_mark\n def atb_set_in_mark(self,val):\n self.in_mark = val\n def atb_set_out_mark(self,val):\n self.out_mark = val\n\n def distance_in_pix(self,frame_pos_0,frame_pos_1):\n fr0_screen_x,_ = self.bar_space_to_screen((frame_pos_0,0))\n fr1_screen_x,_ = self.bar_space_to_screen((frame_pos_1,0))\n return abs(fr0_screen_x-fr1_screen_x)\n\n\n def bar_space_to_screen(self,pos):\n width,height = self.window_size\n x,y=pos\n y = 1-y\n x = (x/float(self.frame_count))*(width-self.padding*2) +self.padding\n y = y*(height-2*self.padding)+self.padding\n return x,y\n\n\n def screen_to_bar_space(self,pos):\n width,height = glfwGetWindowSize(glfwGetCurrentContext())\n x,y=pos\n x = (x-self.padding)/(width-2*self.padding)*self.frame_count\n y = (y-self.padding)/(height-2*self.padding)\n return x,1-y\n\n def gl_display(self):\n\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n gluOrtho2D(-self.h_pad, (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n\n color1 = (.1,.9,.2,.5)\n color2 = (.1,.9,.2,.5)\n\n if self.in_mark != 0 or self.out_mark != self.frame_count:\n draw_gl_polyline( [(self.in_mark,0),(self.out_mark,0)],color=(.1,.9,.2,.5),thickness=2)\n draw_gl_point((self.in_mark,0),color=color2,size=10)\n draw_gl_point((self.out_mark,0),color=color2,size=10)\n\n glMatrixMode(GL_PROJECTION)\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()","repo_name":"DuongHoangThuy/pupil","sub_path":"pupil_src/player/trim_marks.py","file_name":"trim_marks.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"15481436992","text":"class Solution:\n def digitSum(self, n):\n sm = 0\n while n != 0:\n sm += n % 10\n n //= 10\n return sm\n def makeIntegerBeautiful(self, n: int, target: int) -> int:\n lst = 1 # the number of zeros we want to leave at the end\n add = 0\n #if the sum of digits is greater than target, it is most optimal to make the last few digits equal to zero\n while self.digitSum(n + add) > target:\n x = 10 ** lst\n add = x - n % x\n lst += 1\n \n return add","repo_name":"ARYASINGHBJC/LEETCODE","sub_path":"2457-minimum-addition-to-make-integer-beautiful/2457-minimum-addition-to-make-integer-beautiful.py","file_name":"2457-minimum-addition-to-make-integer-beautiful.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14763876888","text":"#Train-Valid Partitioning\r\nfrom shutil import copy2\r\nimport random\r\nfrom glob import glob\r\nimport os\r\nrandom.seed(17)\r\n\r\n# This is the path where our dataset is stored\r\npath = 'Dataset/Skin_Diseases'\r\n# These are the paths where we intend to store our train & valid sets\r\nvalid = 'Dataset/Valid'\r\ntrain = 'Dataset/Train'\r\n\r\nif not os.path.exists(valid):\r\n\tos.makedirs(valid)\r\nif not os.path.exists(train):\r\n\tos.makedirs(train)\r\n# glob module is used to retrieve files/pathnames matching a specified pattern \r\nfor folder in glob(path+'/*'):\r\n\tprint(folder)\r\n\r\n\t# find number of images in folder\r\n\tno_images_in_folder = len(os.listdir(folder))\r\n\tprint(\"no of images in this folder: {}\".format(no_images_in_folder))\r\n\r\n\t# make new folder inside test and train\r\n\tfolder_valid = valid+'/'+folder.split('\\\\')[1]+'/'\r\n\tfolder_train = train+'/'+folder.split('\\\\')[1]+'/'\r\n\tprint(folder_valid)\r\n\tprint(folder_train)\r\n\r\n\tif not os.path.exists(folder_valid):\r\n\t\tos.makedirs(folder_valid)\r\n\tif not os.path.exists(folder_train):\r\n\t\tos.makedirs(folder_train)\r\n\r\n\tprint('---------------------------------------------\\n')\r\n\r\n\t#Divide the images in Datase to Train set & valdi set by 0.8 : 0.2 ratio\r\n\tvalid_num = int(no_images_in_folder*0.25)\r\n\t\t\r\n\t# Shuffle the data in the folder to divide evenly\r\n\tx = list(enumerate(glob(folder+'/*')))\r\n\trandom.shuffle(x)\r\n\r\n # iterate from 0 to valid_num and copy to valid_folder\r\n\t# iterate valid_num to end and copy to train_folder\r\n\tcount = 0\r\n\tfor idx, im in x:\r\n\t\tif count <= valid_num:\r\n\t\t# copy to valid\r\n\t\t\tcopy2(im, folder_valid)\r\n\t\t\tcount += 1\r\n\t\telse:\r\n\t\t# copy to train\r\n\t\t\tcopy2(im, folder_train)\r\n\t\t\tcount += 1","repo_name":"IBM-EPBL/IBM-Project-10829-1659236203","sub_path":"Final Deliverables/App/Train-Valid-Seperation.py","file_name":"Train-Valid-Seperation.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70107502408","text":"def avtaler():\r\n class avtale:\r\n def __init__(self, sted, tidspunkt, lengde):\r\n self.sted = sted\r\n self.tidspunkt = tidspunkt\r\n self.lengde=lengde\r\n \r\n def __str__(self):\r\n return f\" {self.sted} {self.tidspunkt} {self.lengde}\"\r\n \r\n x=0\r\n index=0\r\n while x ==0:\r\n janei=input(\"ønsker du å legge til en avtale?: \")\r\n if janei==\"ja\":\r\n p1 = avtale(input(\"sted hvor avtalen skjer: \"),input(\"dato og tid avtalen skjer: \"), input(\"Hvor lenge avtalen pågår i hele minutter: \"))\r\n overskrift=input(\"legg til overskrift: \")\r\n index+=1\r\n print()\r\n print(index,overskrift,p1)\r\n print()\r\n x=0\r\n else:\r\n x=1\r\n \r\navtaler() \r\n\r\n ","repo_name":"martinPatte/Gruppe29","sub_path":"øving 9 g.py","file_name":"øving 9 g.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36478603113","text":"from googleads.adwords import AdWordsClient\nfrom googleads.oauth2 import GoogleRefreshTokenClient\nfrom googleads.errors import GoogleAdsError\nfrom django.conf import settings\nfrom time import sleep\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef adwords_service(client_customer_id=None):\n \"\"\"\n Get an instance of GoogleRefreshTokenClient with configuration as per defined settings\n and use that to create an instance of AdwordsClient.\n \"\"\"\n if not client_customer_id:\n client_customer_id = settings.GOOGLEADWORDS_CLIENT_CUSTOMER_ID\n\n oauth2_client = GoogleRefreshTokenClient(\n client_id=settings.GOOGLEADWORDS_CLIENT_ID,\n client_secret=settings.GOOGLEADWORDS_CLIENT_SECRET,\n refresh_token=settings.GOOGLEADWORDS_REFRESH_TOKEN\n )\n\n return AdWordsClient(\n developer_token=settings.GOOGLEADWORDS_DEVELOPER_TOKEN,\n oauth2_client=oauth2_client,\n user_agent=settings.GOOGLEADWORDS_USER_AGENT,\n client_customer_id=client_customer_id\n )\n\n\ndef paged_request(service, selector={}, number_results=100, start_index=0, retry=True, number_pages=False):\n \"\"\"\n Yields paged data as retrieved from the Adwords API.\n\n Alert Service Example:\n\n selector = {\n 'query': {\n 'clientSpec': 'ALL',\n 'filterSpec': 'ALL',\n 'types': ['ACCOUNT_BUDGET_BURN_RATE', 'ACCOUNT_BUDGET_ENDING',\n 'ACCOUNT_ON_TARGET', 'CAMPAIGN_ENDED', 'CAMPAIGN_ENDING',\n 'CREDIT_CARD_EXPIRING', 'DECLINED_PAYMENT',\n 'KEYWORD_BELOW_MIN_CPC', 'MANAGER_LINK_PENDING',\n 'MISSING_BANK_REFERENCE_NUMBER', 'PAYMENT_NOT_ENTERED',\n 'TV_ACCOUNT_BUDGET_ENDING', 'TV_ACCOUNT_ON_TARGET',\n 'TV_ZERO_DAILY_SPENDING_LIMIT', 'USER_INVITE_ACCEPTED',\n 'USER_INVITE_PENDING', 'ZERO_DAILY_SPENDING_LIMIT'],\n 'severities': ['GREEN', 'YELLOW', 'RED'],\n 'triggerTimeSpec': 'ALL_TIME'\n }\n }\n for (data, selector) in paged_request(service='AlertService', number_results=selector=selector):\n print data\n\n Targeting Ideas Service Example:\n\n {{{\n selector = {\n 'searchParameters': [\n {\n 'xsi_type': 'RelatedToQuerySearchParameter',\n 'queries': ['seo', 'adwords', 'adwords seo']\n },\n {\n 'xsi_type': 'LanguageSearchParameter',\n 'languages': [{'id': '1000'}]\n },\n {\n 'xsi_type': 'LocationSearchParameter',\n 'locations': [{'id': '2036'}]\n },\n ],\n 'ideaType': 'KEYWORD',\n 'requestType': 'IDEAS',\n 'requestedAttributeTypes': ['KEYWORD_TEXT', 'SEARCH_VOLUME'],\n }\n\n for (data, selector) in paged_request('TargetingIdeaService', selector):\n print data\n\n }}}\n\n @param service: A string representing the client service class, ie.. GetTargetingIdeaService\n @param selector: A dict of values used to specify the request to the API.\n @param number_results: Results per page.\n @param start_index: Offset to start results at.\n @yield data, selector\n \"\"\"\n client = adwords_service()\n service = client.GetService(service, settings.GOOGLEADWORDS_CLIENT_VERSION)\n\n if 'paging' not in selector:\n selector['paging'] = {}\n if start_index is not None:\n selector['paging']['startIndex'] = str(start_index)\n if number_results is not None:\n selector['paging']['numberResults'] = str(number_results)\n\n more_pages = True\n page_number = 1\n\n while more_pages:\n try:\n response = service.get(selector)\n yield response.entries, selector\n\n # Now, get the next set of results\n start_index += number_results\n selector['paging']['startIndex'] = str(start_index)\n if number_pages:\n if number_pages >= page_number:\n more_pages = False\n else:\n more_pages = start_index < int(response.totalNumEntries)\n\n page_number += 1\n\n except GoogleAdsError as e:\n if not retry or not hasattr(e, 'fault') or not hasattr(e.fault, 'detail') or not hasattr(e.fault.detail, 'ApiExceptionFault') or not hasattr(e.fault.detail.ApiExceptionFault, 'errors'):\n raise\n retryAfterSeconds = sum([int(fault.retryAfterSeconds) for fault in e.fault.detail.ApiExceptionFault.errors if getattr(fault, 'ApiError.Type') == 'RateExceededError'])\n if retryAfterSeconds > 0:\n # We've hit a RateExceededError, sleep for some period of time\n logger.info(\"Sleeping due to 'RateExceededError' for '%s' seconds.\" % retryAfterSeconds)\n sleep(retryAfterSeconds)\n else:\n # We haven't hit an error we care about, raise it.\n raise\n","repo_name":"alexhayes/django-google-adwords","sub_path":"django_google_adwords/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"} +{"seq_id":"11234187825","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport os\nimport socket\n\nfrom EvaluateService.config_related import ConfigMixin\n\n\nSOCKET_RECEIVE_SIZE = 512\nREBOOT_CODE = 9\nLOCAL_CONFIG_NAME = 'lzqs.ini'\n\n\ndef check_result(func):\n def wrapper(self, Data, *args, **kwargs):\n assert isinstance(Data, dict), '传入数据类型错误'\n # assert 'evaluate' in Data.keys(), '配置数据中无evaluate'\n if Data['resCode'] != 0:\n print(f\"系统未找到服务配置\")\n return None\n return func(self, Data, *args, **kwargs)\n return wrapper\n\n\nclass EvaluateConfig(ConfigMixin):\n def __init__(self):\n pass\n self.Config_Data = super().read_service_config('evaluate_service_config')\n\n def config_parser(self, Url_Path: tuple) -> dict:\n File_Path = f\"{os.getcwd()}\\\\{LOCAL_CONFIG_NAME}\"\n Config_Parse = EvaluateConfig.config_reader({File_Path})\n try:\n Settings = (Config_Parse['lzqs']['ip'],\n Config_Parse['lzqs']['port'],\n Config_Parse['lzqs']['org_id'])\n except KeyError:\n print(f'文件:{File_Path}配置错误')\n else:\n Url_Path = '/'.join(Url_Path)\n Url = f\"http://{Settings[0]}:{Settings[1]}/{Url_Path}/{Settings[2]}\"\n print(f\"请求地址:{Url}\")\n Res = self.request(Url_Path, 'get')\n return Res\n\n @check_result\n def get_service_address(self, Config_Data: dict, Service_Name='evaluate') -> list:\n '''\n 获取服务的ip和port信息\n :param: service name, dict data\n :return: service address list\n '''\n Service_Name = Service_Name.lower()\n Ip_List = socket.gethostbyname_ex(socket.gethostname())[2]\n Service_Address_List = []\n # if Config_Data['resCode'] != 0:\n # print(f\"系统未配置{Service_Name}服务\")\n # return Service_Address_List\n try:\n Service_Config_Msg = Config_Data[Service_Name]\n for i in Service_Config_Msg:\n Service_Ip = i[f'{Service_Name}ServiceIp']\n Service_Port = i[f'{Service_Name}ServicePort']\n # Service_Address = (Service_Ip, Service_Port) if Service_Ip in Ip_List else ()\n if Service_Ip in Ip_List:\n Service_Address = (Service_Ip, Service_Port)\n Service_Address_List.append(Service_Address)\n\n except KeyError:\n print(f'配置中无{Service_Name}参数')\n else:\n return Service_Address_List\n\n def get_backed_address(self, File_name):\n try:\n Config_Parser = self.config_reader({f'{os.getcwd()}\\\\{File_name}'})\n Lzqs = Config_Parser['lzqs']\n Address = (Lzqs['ip'], Lzqs['port'], Lzqs['org_id'])\n except KeyError:\n print(f'本地配置文件{File_name}错误')\n else:\n return Address\n\n @check_result\n def get_evaluate_config(self, Data: dict, Sequence=1):\n Evaluate_Config = dict()\n Nums = len(Data['evaluate'])\n for Item in range(0, Nums):\n Evaluate_Service_Ip = Data['evaluate'][Item]['evaluateServiceIp']\n Evaluate_Service_Port = Data['evaluate'][Item]['evaluateServicePort']\n Evaluate_Port = Data['evaluate'][Item]['evaluatePort']\n Evaluate_Control_Window = Data['evaluate'][Item]['evaluateControlWindow']\n Evaluate_Config[f\"evaluate{Item+1}\"] = dict(Evaluate_Service_Ip=Evaluate_Service_Ip,\n Evaluate_Service_Port=Evaluate_Service_Port,\n Evaluate_Port=Evaluate_Port,\n Evaluate_Control_Window=Evaluate_Control_Window)\n\n return Evaluate_Config[f\"evaluate{Sequence}\"]\n\n # @check_result\n def window_map(self, Data):\n Window_List = Data['Evaluate_Control_Window']\n Evaluate_Port = Data['Evaluate_Port']\n Map = dict()\n for Item in Window_List:\n Window_Name = Item['windowName']\n Evaluate_Ip = Item['evaluateIp']\n # Item = {}\n # Map = {**Map, **Item}\n Map[Window_Name] = (Evaluate_Ip, Evaluate_Port)\n return Map\n\n def tips_map(self, Data):\n if Data['type'] == 1:\n Tips = f\"叫号:{Data['ticketNumber']}\"\n elif Data['type'] == 2:\n Tips = f\"开始办理:{Data['ticketNumber']}\"\n elif Data['type'] == 3:\n Tips = f\"办理结束:{Data['ticketNumber']}\"\n elif Data['type'] == 4:\n Tips = f\"窗口:{Data['windowName']}--> 暂停服务\"\n elif Data['type'] == 5:\n Tips = f\"窗口:{Data['windowName']}--> 恢复服务\"\n elif Data['type'] == 6:\n Tips = f\"更新评价:{Data}\"\n elif Data['type'] == 7:\n Tips = f\"窗口:{Data['windowName']} --> 用户登录\"\n elif Data['type'] == 8:\n Tips = f\"窗口:{Data['windowName']} --> 用户退出\"\n else:\n Tips = ''\n return Tips\n\n\n\n\n\n\n","repo_name":"doever/qs","sub_path":"EvaluateService/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"6893012211","text":"from discord.ext import commands\nimport os\nfrom dotenv import load_dotenv\nfrom db import mongo_setup\nfrom db.prefixes import Prefix\n\nmongo_setup.global_init()\n\n\ndef get_prefix(client, message) -> Prefix:\n for pref in Prefix.objects:\n if pref._guild_id == str(message.guild.id):\n return pref._prefix\n\n\nclient = commands.Bot(command_prefix=get_prefix)\nclient.remove_command('help')\n\n\n@client.event\nasync def on_ready():\n print(\"Uniques is ready for some development\")\n client.load_extension('cogs.commands')\n client.load_extension('cogs.help')\n\n\n@client.command()\nasync def load(ctx, extension):\n await client.load_extension(f'cogs.{extension}')\n\n\n@client.command()\nasync def unload(ctx, extension):\n await client.unload_extension(f'cogs.{extension}')\n\n\n@client.command()\nasync def reload(ctx, extension):\n await client.reload_extension(f'cogs.{extension}')\n\nload_dotenv()\ntoken = os.getenv(\"DISCORD_TOKEN\")\nclient.run(token)\n","repo_name":"XanderWatson/uniques-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36021090601","text":"def insertarray(array,n):\n for j in range(n):\n array.append(int(input(\"input \")))\n print(array)\ndef countpairsum(array1,array2,n1,n2,x):\n count = 0\n for n in range(n1):\n m = n2 -1\n while m >= 0 :\n if array1[n]+array2[m] == x :\n count += 1\n elif array1[n]+array2[m] < x :\n break\n m = m-1\n print(count)\nif __name__==\"__main__\":\n t =int(input(\"t \"))\n for i in range(t):\n array1 = []\n array2 = []\n n1 = int(input(\"n1 \"))\n n2 = int(input(\"n2 \"))\n insertarray(array1,n1)\n insertarray(array2,n2)\n x = int(input(\"x \"))\n countpairsum(array1,array2,n1,n2,x)\n","repo_name":"aarjukhicher/geeksprogram","sub_path":"Sorting/count_pairsum.py","file_name":"count_pairsum.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2112973487","text":"import requests\r\nfrom requests.auth import AuthBase\r\nfrom Crypto.Hash import HMAC\r\nfrom Crypto.Hash import SHA256\r\nfrom datetime import datetime\r\nfrom dateutil import tz\r\n\r\n\r\nclass AuthHmacMetos(AuthBase):\r\n \"\"\"Creates HMAC authorization header for Metos REST service POST request.\"\"\"\r\n def __init__(self, apiRoute, publicKey, privateKey, method):\r\n self._publicKey = publicKey\r\n self._privateKey = privateKey\r\n self._method = method\r\n self._apiRoute = apiRoute\r\n\r\n def __call__(self, request):\r\n dateStamp = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')#'Mon, 23 Jul 2018 13:24:09 GMT'#\r\n request.headers['Date'] = dateStamp\r\n msg = (self._method + self._apiRoute + dateStamp + self._publicKey).encode(encoding='utf-8')\r\n h = HMAC.new(self._privateKey.encode(encoding='utf-8'), msg, SHA256)\r\n signature = h.hexdigest()\r\n authorizationStr = 'hmac ' + self._publicKey + ':' + signature\r\n request.headers['Authorization'] = authorizationStr\r\n #print('HMAC header decrypted: {}'.format(msg)\r\n #print('Accept: {}'.format(request.headers['Accept']))\r\n #print('Authorization: {}'.format(request.headers['Authorization']))\r\n #print('Date: {}'.format(request.headers['Date'])) \r\n return request\r\n\r\nclass FcApi:\r\n \"\"\"Sets API endpoint URL for GET and POST requests.\"\"\"\r\n def __init__(self, apiUri, publicKey, privateKey):\r\n self._apiUri = apiUri\r\n self._publicKey = publicKey\r\n self._privateKey = privateKey\r\n\r\n def __checkStatus(self, response, auth, route):\r\n response.close()\r\n print(\" > {} {}\".format(auth._method, self._apiUri + route))\r\n if response.status_code != 200:\r\n print(\" > {} {}\".format(response.status_code, response.reason))\r\n\r\n def get(self, route):\r\n #remove parameters from the route for signature calculation\r\n auth = AuthHmacMetos(route.split('?', 1)[0], self._publicKey, self._privateKey, 'GET')\r\n response = requests.get(self._apiUri + route, headers={'Accept': 'application/json'}, auth=auth)\r\n response.close()\r\n self.__checkStatus(response, auth, route)\r\n return response\r\n\r\n def post(self, route, payload):\r\n auth = AuthHmacMetos(route.split('?', 1)[0], self._publicKey, self._privateKey, 'POST')\r\n response = requests.post(self._apiUri + route, data=payload, headers={'Accept': 'application/json'}, auth=auth)\r\n response.close()\r\n self.__checkStatus(response, auth, route)\r\n return response\r\n\r\n def put(self, route, payload):\r\n auth = AuthHmacMetos(route.split('?', 1)[0], self._publicKey, self._privateKey, 'PUT')\r\n response = requests.put(self._apiUri + route, data=payload, headers={'Accept': 'application/json'}, auth=auth)\r\n response.close()\r\n self.__checkStatus(response, auth, route)\r\n return response\r\n\r\n def getEpochs(self, timestamp, station_timezone=None):\r\n \"\"\"\r\n Convert given datetime to UNIX epochs in local station timezone\r\n\r\n timestamp -- e.g. datetime(2018, 10, 10, 0, 0, 0)\r\n station_timezone -- e.g. tz.tzlocal()\r\n \"\"\"\r\n if station_timezone is None:\r\n station_timezone = tz.tzlocal()\r\n # conversion to POSIX seconds\r\n t0 = datetime(1970, 1, 1).replace(tzinfo=station_timezone)\r\n return int((timestamp.replace(tzinfo=station_timezone) - t0).total_seconds())\r\n","repo_name":"onemario/METOS","sub_path":"APIv2/Python/fc_api.py","file_name":"fc_api.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29583439091","text":"import numpy as np\nimport cv2\nimport time\n\ncap = cv2.VideoCapture(1)\n\n#frame rate\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint('fps value:', fps)\n\n# width and heigt\nw = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\nprint('(width, height)', w, h)\n\n# # Setting width and height (640, 480) -> (320,320)\n# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)\n# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 320)\n\n#time\nprev_time = 0\ncurrent_time = 0\n\nwhile True:\n ret, img = cap.read()\n\n #Measure FPS\n current_time = time.time()\n fps_m = 1 /(current_time - prev_time)\n prev_time = current_time\n print('fps measured', fps_m)\n\n\n cv2.imshow('Frame',img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n","repo_name":"kotai2003/OpenCV_Lectures","sub_path":"01.Controlling-Video/Camera-Control.py","file_name":"Camera-Control.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71234344327","text":"from vertex import Vertex\n\nclass Graph:\n def __init__(self, directed=False):\n self.directed = directed \n self.graph_dict = {}\n\n def add_vertex(self, vertex):\n print('Adding vertex {}'.format(vertex.value[0]))\n self.graph_dict[vertex.value[0]] = vertex \n\n def add_edge(self, from_vertex, to_vertex, weight=0):\n self.graph_dict[from_vertex.value[0]].add_edge(to_vertex.value[0], weight)\n if not self.directed:\n self.graph_dict[to_vertex.value[0]].add_edge(from_vertex.value[0], weight)\n \n def find_path(self, start_vertex, end_vertex):\n start = [start_vertex]\n seen = {}\n while len(start) > 0:\n current_vertex = start.pop()\n #print(current_vertex)\n seen[current_vertex] = True \n\n if current_vertex == end_vertex:\n print(\"Link found! \")\n return \n else:\n vertex = self.graph_dict[current_vertex]\n next_vertices = vertex.get_edges()\n next_vertices = [vertex for vertex in next_vertices if not vertex in seen]\n start.extend(next_vertices)\n \n print(\"There is no link between the two modems\")\n return \n\n\n\n\n","repo_name":"GEEGABYTE1/Connection","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28280306351","text":"import pytest\n\nimport anytree\nimport anytree.exporter\n\nfrom sprog import tree, utils\n\n\ndef test_load_and_save_tree():\n outprefix = \"tmp.test_tree\"\n utils.syscall(f\"rm -rf {outprefix}*\")\n tmp_newick = f\"{outprefix}.newick\"\n tmp_tsv = f\"{outprefix}.tsv\"\n tmp_json = f\"{outprefix}.json\"\n\n # Tree looks like this:\n # ┌─subsp1\n # ┌─speciesA─┤\n # │ └─subsp2\n # ────┤\n # ├─speciesB\n # └─speciesC\n expect_tree_root = anytree.Node(\"root\")\n speciesA = anytree.Node(\"speciesA\", parent=expect_tree_root)\n anytree.Node(\"speciesB\", parent=expect_tree_root)\n anytree.Node(\"speciesC\", parent=expect_tree_root)\n anytree.Node(\"subsp1\", parent=speciesA)\n anytree.Node(\"subsp2\", parent=speciesA)\n exporter = anytree.exporter.JsonExporter(indent=2, sort_keys=True)\n expect_json = exporter.export(expect_tree_root)\n expect_nodes = [\"root\", \"speciesA\", \"speciesB\", \"speciesC\", \"subsp1\", \"subsp2\"]\n\n with open(tmp_newick, \"w\") as f:\n print(\"((subsp1,subsp2)speciesA,speciesB,speciesC);\", file=f)\n\n with open(tmp_tsv, \"w\") as f:\n print(\"speciesA\", \"subsp1\", sep=\"\\t\", file=f)\n print(\"speciesA\", \"subsp2\", sep=\"\\t\", file=f)\n print(\"speciesB\", sep=\"\\t\", file=f)\n print(\"speciesC\", sep=\"\\t\", file=f)\n\n t = tree.Tree(tmp_newick)\n assert t.to_json(filename=tmp_json) == expect_json\n assert sorted(list(t.nodes.keys())) == expect_nodes\n assert [t.nodes[x].name for x in sorted(t.nodes)] == expect_nodes\n\n t = tree.Tree(tmp_tsv)\n assert t.to_json() == expect_json\n assert sorted(list(t.nodes.keys())) == expect_nodes\n assert [t.nodes[x].name for x in sorted(t.nodes)] == expect_nodes\n\n t = tree.Tree(tmp_json)\n assert t.to_json() == expect_json\n assert sorted(list(t.nodes.keys())) == expect_nodes\n assert [t.nodes[x].name for x in sorted(t.nodes)] == expect_nodes\n\n assert t.find_node_by_name(\"speciesC\").name == \"speciesC\"\n assert t.find_node_by_name(\"not_in_tree\") is None\n\n utils.syscall(f\"rm -f {outprefix}*\")\n\n\ndef test_leaf_combo_parent_nodes():\n outprefix = \"tmp.test_leaf_combo_parent_nodes\"\n utils.syscall(f\"rm -rf {outprefix}*\")\n tmp_newick = f\"{outprefix}.newick\"\n with open(tmp_newick, \"w\") as f:\n print(\n \"(((subsp1,subsp2,subsp3)speciesA,speciesB,speciesC)genus1,(speciesX,speciesY)genus2);\",\n file=f,\n )\n t = tree.Tree(tmp_newick)\n # print(anytree.RenderTree(t.tree_root, style=anytree.render.ContStyle()))\n # tree looks like this:\n # Node('/root')\n # ├── Node('/root/genus1')\n # │ ├── Node('/root/genus1/speciesA')\n # │ │ ├── Node('/root/genus1/speciesA/subsp1')\n # │ │ ├── Node('/root/genus1/speciesA/subsp2')\n # │ │ └── Node('/root/genus1/speciesA/subsp3')\n # │ ├── Node('/root/genus1/speciesB')\n # │ └── Node('/root/genus1/speciesC')\n # └── Node('/root/genus2')\n # ├── Node('/root/genus2/speciesX')\n # └── Node('/root/genus2/speciesY')\n\n leaf_names = [\n \"subsp1\",\n \"subsp2\",\n \"subsp3\",\n \"speciesB\",\n \"speciesC\",\n \"speciesX\",\n \"speciesY\",\n ]\n t.init_leaf_combination_data(leaf_names)\n # check leaf combinations correct. The values are actually Node\n # objects. We check the names match instead of creating nodes\n expect = [\n ({0, 1, 2, 3, 4, 5, 6}, \"root\"),\n ({0, 1, 2, 3, 4}, \"genus1\"),\n ({0, 1, 2}, \"speciesA\"),\n ({5, 6}, \"genus2\"),\n ]\n assert sorted(expect) == sorted((x[0], x[1].name) for x in t.leaf_combos)\n\n got = t.leaf_combo_parent_nodes({0}, min_prop_contain=0.95, max_prop_outside=0.05)\n assert got.name == \"subsp1\"\n got = t.leaf_combo_parent_nodes({1}, min_prop_contain=0.95, max_prop_outside=0.05)\n assert got.name == \"subsp2\"\n got = t.leaf_combo_parent_nodes({2}, min_prop_contain=0.95, max_prop_outside=0.05)\n assert got.name == \"subsp3\"\n got = t.leaf_combo_parent_nodes(\n {0, 1}, min_prop_contain=0.95, max_prop_outside=0.05\n )\n assert got is None\n got = t.leaf_combo_parent_nodes(\n {0, 1}, min_prop_contain=0.68, max_prop_outside=0.05\n )\n assert got is None\n got = t.leaf_combo_parent_nodes(\n {0, 1}, min_prop_contain=0.66, max_prop_outside=0.05\n )\n assert got.name == \"speciesA\"\n got = t.leaf_combo_parent_nodes(\n {0, 1, 4}, min_prop_contain=0.66, max_prop_outside=0.05\n )\n assert got is None\n got = t.leaf_combo_parent_nodes(\n {0, 1, 4}, min_prop_contain=0.66, max_prop_outside=0.5\n )\n assert got.name == \"speciesA\"\n utils.syscall(f\"rm -f {outprefix}*\")\n\n\ndef test_leaf_combinations_to_nodes():\n outprefix = \"tmp.test_leaf_combos\"\n utils.syscall(f\"rm -rf {outprefix}*\")\n tmp_newick = f\"{outprefix}.newick\"\n with open(tmp_newick, \"w\") as f:\n print(\"((subsp1,subsp2)speciesA,speciesB,speciesC);\", file=f)\n t = tree.Tree(tmp_newick)\n leaf_names = [\"subsp1\", \"subsp2\", \"speciesB\", \"speciesC\"]\n got = t.leaf_combinations_to_nodes(leaf_names)\n assert len(got) == 6\n assert got[(0,)].name == \"subsp1\"\n assert got[(1,)].name == \"subsp2\"\n assert got[(0, 1)].name == \"speciesA\"\n assert got[(2,)].name == \"speciesB\"\n assert got[(3,)].name == \"speciesC\"\n assert got[(0, 1, 2, 3)].name == \"root\"\n utils.syscall(f\"rm -f {outprefix}*\")\n","repo_name":"iqbal-lab-org/sprog","sub_path":"tests/tree_test.py","file_name":"tree_test.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"8545008273","text":"#!/usr/bin/env python3\n\nimport re\nimport json\nimport arrow\nimport lxml.html\nimport requests\n\nwith open('data/transcripts/transcript_list.json') as f:\n transcript_list_gen = (json.loads(line) for line in f)\n transcript_list = sorted(transcript_list_gen, key=lambda x: x['date'])\n\ndef find_article_id(doc):\n scripts = doc.cssselect(\"script\")\n script = [script for script in scripts if \"var articleInfo\" in script.text_content()][0].text_content()\n vcmId = re.search(r\"vcmId: \\\"(.*)\\\"\", script).groups()[0]\n return vcmId\n\ndef find_transcript_element(doc):\n article = doc.cssselect(\"div.main > article > div > div.article-body > div.article-text\")[0]\n return article\n\ndef download_article(url):\n response = requests.get(url)\n doc = lxml.html.fromstring(response.content)\n vcmId = find_article_id(doc)\n transcript_el = find_transcript_element(doc)\n return {\n \"vcmId\": vcmId,\n \"html\": lxml.html.tostring(transcript_el).decode('utf-8')\n }\n\nfor item in transcript_list:\n url = item.pop('url')[0]\n\n print(f\"Downloading {url}\")\n article = download_article(url)\n vcmId, html = article['vcmId'], article['html']\n\n output = {\n \"vcmId\": vcmId,\n \"url\": url,\n \"html\": html,\n \"title\": item['title'],\n \"description\": item['description'],\n \"date\": item['date']\n }\n\n with open(f'data/transcripts/raw/{vcmId}.json', 'w') as f:\n json.dump(output, f)\n","repo_name":"AlJohri/late-night-talk-show-analysis","sub_path":"the-oreilly-factor/scripts/scrape_transcripts.py","file_name":"scrape_transcripts.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40967048532","text":"# -*- coding: utf-8 -*-\nfrom dataapi import Client\nif __name__ == \"__main__\":\n try:\n client = Client()\n client.init('e97fd48ed3ada633e20848c501fa018db3a52734767bfedc20ce0f1ac3aea723')\n \n \n url1='/api/macro/getChinaDataGDP.json?field=&indicID=M010000002&indicName=&beginDate=&endDate='\n code, result = client.getData(url1)\n if code==200:\n print (result)\n else:\n print (code)\n print (result)\n url2='/api/subject/getThemesContent.json?field=&themeID=&themeName=&isMain=1&themeSource='\n code, result = client.getData(url2)\n if(code==200):\n file_object = open('thefile.csv', 'w')\n file_object.write(result)\n file_object.close( )\n else:\n print (code)\n print (result)\n \n \n url3 = '/api/equity/getEqu.json?field=&ticker=&secID=&equTypeCD=A&listStatusCD=L'\n code, result = client.getData(url3)\n \n url4 = '/api/market/getMktStockFactorsOneDayPro.json?field=ticker,tradeDate,pe&secID=&ticker=000001,600000&tradeDate=20160727'\n code,result = client.getData(url4)\n \n url5 = '/api/equity/getSecST.json?field=&secID=&ticker=000521&beginDate=20020101&endDate=20160831'\n code,result_st = client.getData(url5)\n except Exception as e:\n #traceback.print_exc()\n raise e\n\n\n\nresult.to_csv('stockpool.csv')","repo_name":"quzhengqi/intraday","sub_path":"samplecode.py","file_name":"samplecode.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11150227641","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nnumbers = list(map(int, input().split()))\ncount = 0\n\ndpLast = [0 for _ in range(21)]\ndpLast[numbers[0]] += 1\n\nfor num in numbers[1:-1]:\n dp = [0 for _ in range(21)]\n for i in range(21):\n if dpLast[i]:\n if i-num >= 0:\n dp[i-num] += dpLast[i]\n if i+num <= 20:\n dp[i+num] += dpLast[i]\n dpLast = dp.copy()\n\nprint(dpLast[numbers[-1]])","repo_name":"twinklesu/algorithm_py","sub_path":"5557.py","file_name":"5557.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33946245185","text":"from unnamed.unnamed import run_test\nfrom unnamed.evaluate import EvaluateException\nfrom unnamed.parsetree import ParseException\nfrom test.base_test_case import BaseTestCase\n\nsimple0 = \"\"\"\n loop (let i = 0; i < 5; i = i + 1;) {\n print i;\n }\n \"\"\"\nsimple1 = \"\"\"\n let i = 0;\n loop (i < 5) {\n print i;\n i = i + 1;\n }\n \"\"\"\nsimple2 = \"\"\"\n print 0;\n loop (False) {\n print 0;\n }\n \"\"\"\n\nclass LoopTest(BaseTestCase):\n def test_simple0(self):\n self.assert_stdout(simple0, '0\\n1\\n2\\n3\\n4\\n')\n\n def test_simple1(self):\n self.assert_stdout(simple1, '0\\n1\\n2\\n3\\n4\\n')\n\n def test_simple2(self):\n self.assert_stdout(simple2, '0\\n')\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"xcjackpan/ceci","sub_path":"test/test_loop.py","file_name":"test_loop.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40753852583","text":"RE_RECOGNIZE_MONEY = False\nPRECISELY_SEARCH = False\n\nreplace_map = {'O': '0', ',': '.', '_': '', 'Q': '0', 'D': '0', '@': '0', 'B': '8', 'L': '1',\n '~': '', ',': '.', ']': '1', '[': '1', '-': '', 'S': '5', 'Y': '7', ':': '', 'l': '1', 'G': '6'\n }\n\nbeijing_table_config1 = {\n 'left': [\n ['^门诊大额支付$', '^门诊大.*支付$', '^门.大额支付$', '.{2,}大额支付$', '诊大额支付'],\n ['退休补充支付', '^.{2}补充支付', '.*休补充支付'],\n ['残军补助支付', '.军补助支付'],\n ['单位补.*公疗.*支付', '.*原公疗.*'],\n []\n ],\n 'right': [\n ['本次医保范围内金额', '本次医保范.内金额'],\n ['累计医保内范围金额', '累计医保内范.*金额', '累计医保内范围.*'],\n ['年度门诊大额累计支付', '^年度门诊大额累计', '^年度门.*大额累计'],\n ['本次支付后个人账户余额', '本次支付后个人账户.*'],\n []\n ],\n 'name': [\n 'men_zhen_da_e_zhi_fu',\n 'tui_xiu_bu_chong_zhi_fu',\n 'can_jun_bu_zhu_zhi_fu',\n 'dan_wei_bu_chong_xian_zhi_fu',\n 'medicalpaymoney'\n ],\n 'template_bg_config':\n {\n 'loc': [0, 283, 459, 459],\n 'width': 819,\n 'height': 497\n },\n 're_recog': False\n}\nbeijing_table_config2 = {\n 'left': [\n ['本次医保范围内金额', '本次医保范.*内金'],\n ['累计医保内范围金额', '累计医保内范.*金额'],\n ['年度门诊大额累计支付'],\n ['本次支付后个人账户余额', '本次支付后个人账户.*'],\n []\n\n ],\n 'right': [\n ['^起付金额', '^起付金'],\n ['超封顶金额', '超封.金额', '超.{2}金额'],\n ['自付二'],\n ['自费'],\n [],\n ],\n 'name': [\n 'ben_ci_yi_bao_fan_wei_nei_jin_e',\n 'lei_ji_yi_bao_fan_wei_nei_jin_e',\n 'nian_du_men_zhen_da_e_lei_ji_zhi_fu',\n 'ben_ci_zhi_fu_hou_ge_ren_zhang_hu_yu_e',\n 'personal_account_pay_money'\n ],\n 'template_bg_config':\n {\n 'loc': [245, 283, 590, 457],\n 'width': 819,\n 'height': 497\n },\n 're_recog': False\n}\nbeijing_table_config3 = {\n 'left': [\n ['自付一', '自村一'],\n ['起付金额', '起村金额'],\n ['超封顶金额', '超封.*金额'],\n ['自付二', '自村二'],\n ['自费'],\n ['个人.*付金额']\n ],\n 'right': None,\n 'name': [\n 'selfpayone',\n 'qi_fu_jin_e',\n 'chao_feng_ding_jin_e',\n 'selfpaytwo',\n 'selfpaymoney',\n 'personpaymoney'\n ],\n 'template_bg_config':\n {\n 'loc': [408, 285, 732, 456],\n 'width': 819,\n 'height': 497\n },\n 're_recog': True\n}\nbeijing_table_config4 = {\n 'left': [\n ['本次医保范围内金', '本.*医保范围内金', '本次医保范.内金额'],\n ['年度累计医保范���内金额', '年度累.医.范围内金', '年度.计医保范.内金额', '.*累计医保范围内金额'],\n ['年度居民基本医疗保险基金门诊累计支付', '年.居民基本医疗保险基金', '年度居民基本医疗保.基金门诊.*', '.*居民基本医疗*.'],\n []\n ],\n 'right': None,\n 'name': [\n 'ben_ci_yi_bao_fan_wei_nei_jin_e',\n 'lei_ji_yi_bao_fan_wei_nei_jin_e',\n 'nian_du_men_zhen_da_e_lei_ji_zhi_fu',\n 'medicalpaymoney'\n ],\n 'template_bg_config':\n {\n 'loc': [82, 283, 459, 459],\n 'width': 819,\n 'height': 497\n },\n 're_recog': False\n}\n\nbeijing_table_config = [\n beijing_table_config1,\n beijing_table_config2,\n beijing_table_config3,\n beijing_table_config4\n]\n\nbeijing_teshu_table_config1 = {\n 'left': [\n ['统筹基金支付', '统.基金支.{2}'],\n ['住院大额支付', '住院大.支付', '住.大额支付'],\n ['退休补充支付', '^.{2}补充支付', '.*休补充支付'],\n ['残军补助支付', '.军补助支付'],\n ['单位补.*公疗.*支付', '.*原公疗.*'],\n []\n ],\n 'right': [\n ['费用起止时间', '.*用起止时.*', '.*用起.*时间'],\n ['本次医保范围内金', '本.*医保范围内金', '本次医保范.*内金额'],\n ['年度统筹基金累计支付', '年度统.*基.*累计支.', '.*度统筹.金累计.*'],\n ['年度大额资金.*住院.*累计支付', '年度大.*资金.*住院.*计支.', '年.大额.*金.*住院.*累计支付'],\n ['本次支付后个人账户余额', '本次支付后个人账户余.*', '.*支付后个人账户.*'],\n []\n ],\n 'name': [\n 'tong_chou_ji_jin_zhi_fu',\n 'zhu_yuan_da_e_zhi_fu',\n 'tui_xiu_bu_chong_zhi_fu',\n 'can_jun_bu_zhu_zhi_fu',\n 'dan_wei_bu_chong_xian_zhi_fu',\n 'medicalpaymoney',\n ],\n 're_recog': False\n}\n\nbeijing_teshu_table_config2 = {\n 'left': [\n ['本次医保范围内金', '本.*医保范围内金', '本次医保范.*内金额'],\n ['年度统筹基金累计支付', '年度统.*基.*累计支.', '.*度统筹.金累计.*'],\n ['年度大额资金.*住院.*累计支付', '年度大.*资金.*住院.*计支.', '年.大额.*金.*住院.*累计支付'],\n ['本次支付后个人账户余额', '本次支付后个人账户余.*', '.*支付后个人账户.*'],\n []\n ],\n 'right': [\n ['起付金额', '起村金额'],\n ['超封顶金额', '超封.*金额'],\n ['自付二', '自村二'],\n ['自费'],\n ['个人.*付金额']\n ],\n 'name': [\n 'ben_ci_yi_bao_fan_wei_nei_jin_e',\n 'nian_du_tong_chou_ji_jin_lei_ji_zhi_fu',\n 'nian_du_da_e_zi_jin_zhu_yuan_lei_ji_zhi_fu',\n 'ben_ci_zhi_fu_hou_ge_ren_zhang_hu_yu_e',\n 'personal_account_balance'\n ],\n 're_recog': False\n}\n\nbeijing_teshu_table_config3 = {\n 'left': [\n ['自付一', '自村一'],\n ['起付金额', '起村金额'],\n ['超封顶金额', '超封.*金额'],\n ['自付二', '自村二'],\n ['自费'],\n ['个人.*付金额']\n ],\n 'right': None,\n 'name': [\n 'selfpayone',\n 'qi_fu_jin_e',\n 'chao_feng_ding_jin_e',\n 'selfpaytwo',\n 'selfpaymoney',\n 'personpaymoney'\n ],\n 're_recog': True\n}\n\nbeijing_table_teshu_config4 = {\n 'left': [\n ['本次医保范围内金额', ],\n ['年度居民基本医疗保险', '年度居民基本医疗保.', '金累计.住院.支付金.'],\n []\n ],\n 'right': None,\n 'name': [\n 'ben_ci_yi_bao_fan_wei_nei_jin_e',\n 'nian_du_ju_min_ji_ben_yi_liao_bao_xian_ji_jin_lei_ji_zhu_yuan_zhi_fu',\n 'medicalpaymoney',\n ],\n 're_recog': True\n}\n\nbeijing_teshu_table_config = [\n beijing_teshu_table_config1,\n beijing_teshu_table_config2,\n beijing_teshu_table_config3,\n beijing_table_teshu_config4\n]\n","repo_name":"imfifc/myocr","sub_path":"ocr_structuring/core/utils/medical_amount_utils/medical_table_config.py","file_name":"medical_table_config.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24786625811","text":"from app import App\nimport sys\nimport logging\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n app = App()\n arg_len = len(sys.argv)\n command = sys.argv[1]\n\n if command == \"send\":\n if arg_len < 4:\n logging.exception(f\"Not enough arguments for send command. Expected: 2 got {arg_len - 2}\")\n return\n try:\n filename = sys.argv[2]\n cipher = sys.argv[3]\n logging.info(\"sending file..\")\n app.send_file(filename, cipher)\n except Exception as e:\n logging.exception(e)\n return\n\n logging.info(\"file sent successfully!\")\n\n elif command == \"receive\":\n try:\n app.receive_file()\n except Exception as e:\n logging.exception(e)\n return\n logging.info(\"file received succesfully!\")\n \n else:\n logging.exception(f\"Command not found for {command}\")\n return\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"rikiachmad/kij-assignment-symmetric-cipher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14239749125","text":"# Lago Logs module - An easy way to set up your logs \n#\n# To use this module, import the function logs_lake and run it.\n# The function will automatically configure the logging in the right way.\n# The module is extremely biased, so use by your own risk.\n\nimport logging \n\n\ndef logs_lake():\n # Start the log level as debug, everything gets logged\n logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', \n filename='example.log', encoding='utf-8',datefmt='%d/%m/%Y %I:%M:%S %p',\n level=logging.DEBUG)\n logger = logging.getLogger().debug(\"Logger started successfully\")\n\n","repo_name":"morallito/web_scrapping","sub_path":"books/python_web_scraping/lago_logs.py","file_name":"lago_logs.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13365569069","text":"from copyreg import pickle\nfrom PIL import Image\nimport numpy as np\nimport sys\nimport pickle\nSOURCE_PATH = r\"/home/user/ZeroDeepLearning/01.ゼロから作るDeepLearning/99.source\"\nsys.path.append(SOURCE_PATH)\nfrom dataset.mnist import load_mnist # noqa\nfrom common.functions import sigmoid, softmax # noqa\n\n\ndef get_data():\n (x_train, t_train), (x_test, t_test) = \\\n load_mnist(flatten=True, normalize=False, one_hot_label=False)\n return x_test, t_test\n\n\ndef init_network():\n dir = SOURCE_PATH + r\"/ch03/\"\n with open(dir + \"sample_weight.pkl\", 'rb') as f:\n network = pickle.load(f)\n return network\n\n\ndef predict(network, x):\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n a3 = np.dot(z2, W3) + b3\n y = softmax(a3)\n\n return y\n\n\nx, t = get_data()\nnetwork = init_network()\naccuracy_cnt = 0\nfor i in range(len(x)):\n y = predict(network, x[i])\n p = np.argmax(y) # 最も確率の高い要素のインデックスを取得\n if p == t[i]:\n accuracy_cnt += 1\n\nprint(\"Accuracy:\" + str(float(accuracy_cnt) / len(x)))\n\n# Accuracy:0.9207\n# 出力結果が書籍と異なる。環境によるものか?下記のオーバーフローが影響していそう。2回出てた\n# RuntimeWarning: overflow encountered in exp\n# return 1 / (1 + np.exp(-x))\n","repo_name":"YukariMazeDofu/ZeroDeepLearning","sub_path":"01.ゼロから作るDeepLearning/03.ニューラルネットワーク/3.6/newralnet_mnist_mod.py","file_name":"newralnet_mnist_mod.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23663857375","text":"\"\"\"\nURL configuration for project_practice project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom project_practice import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('about',views.aboutUs),\n path('html/',views.htmlResponse),\n path('',views.dynamicRequest),\n path('animation',views.htmlAnimation),\n path('geolocation',views.getGeolocation),\n path('dragdrop',views.dragDrop),\n path('fileinputapi',views.fileInputAPI),\n path(\"localstorageapi\",views. localStoarageAPI)\n]\n","repo_name":"BantuD/FullStack-Django","sub_path":"project_practice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31041626226","text":"import os\nimport json\nfrom copy import deepcopy\nfrom typing import NamedTuple\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\n\nfrom utils import checkpoint\n# from utils.logger import Logger\nfrom tensorboardX import SummaryWriter\nfrom utils.utils import output_logging\n\n\nclass Trainer(object):\n \"\"\"Training Helper class\"\"\"\n def __init__(self, cfg, model, data_iter, optimizer, device):\n self.cfg = cfg\n self.model = model\n self.optimizer = optimizer\n self.device = device\n\n # data iter\n if len(data_iter) == 1:\n self.sup_iter = data_iter[0]\n elif len(data_iter) == 2:\n self.sup_iter = self.repeat_dataloader(data_iter[0])\n self.unsup_iter = self.repeat_dataloader(data_iter[1])\n elif len(data_iter) == 3:\n self.sup_iter = self.repeat_dataloader(data_iter[0])\n self.unsup_iter = self.repeat_dataloader(data_iter[1])\n self.eval_iter = data_iter[2]\n\n def train(self, get_loss, get_acc, model_file, pretrain_file):\n \"\"\" train uda\"\"\"\n\n # tensorboardX logging\n if self.cfg.results_dir:\n logger = SummaryWriter(log_dir=os.path.join(self.cfg.results_dir, 'logs'))\n\n self.model.train()\n self.load(model_file, pretrain_file) # between model_file and pretrain_file, only one model will be loaded\n model = self.model.to(self.device)\n if self.cfg.data_parallel: # Parallel GPU mode\n model = nn.DataParallel(model)\n\n global_step = 0\n loss_sum = 0.\n max_acc = [0., 0] # acc, step\n\n # Progress bar is set by unsup or sup data\n # uda_mode == True --> sup_iter is repeated\n # uda_mode == False --> sup_iter is not repeated\n iter_bar = tqdm(self.unsup_iter, total=self.cfg.total_steps) if self.cfg.uda_mode \\\n else tqdm(self.sup_iter, total=self.cfg.total_steps)\n for i, batch in enumerate(iter_bar):\n \n # Device assignment\n if self.cfg.uda_mode:\n sup_batch = [t.to(self.device) for t in next(self.sup_iter)]\n unsup_batch = [t.to(self.device) for t in batch]\n else:\n sup_batch = [t.to(self.device) for t in batch]\n unsup_batch = None\n\n # update\n self.optimizer.zero_grad()\n final_loss, sup_loss, unsup_loss = get_loss(model, sup_batch, unsup_batch, global_step)\n final_loss.backward()\n self.optimizer.step()\n\n # print loss\n global_step += 1\n loss_sum += final_loss.item()\n if self.cfg.uda_mode:\n iter_bar.set_description('final=%5.3f unsup=%5.3f sup=%5.3f'\\\n % (final_loss.item(), unsup_loss.item(), sup_loss.item()))\n else:\n iter_bar.set_description('loss=%5.3f' % (final_loss.item()))\n\n # logging \n if self.cfg.uda_mode:\n logger.add_scalars('data/scalar_group',\n {'final_loss': final_loss.item(),\n 'sup_loss': sup_loss.item(),\n 'unsup_loss': unsup_loss.item(),\n 'lr': self.optimizer.get_lr()[0]\n }, global_step)\n else:\n logger.add_scalars('data/scalar_group',\n {'sup_loss': final_loss.item()}, global_step)\n\n if global_step % self.cfg.save_steps == 0:\n self.save(global_step)\n\n if get_acc and global_step % self.cfg.check_steps == 0 and global_step > 4999:\n results = self.eval(get_acc, None, model)\n total_accuracy = torch.cat(results).mean().item()\n logger.add_scalars('data/scalar_group', {'eval_acc' : total_accuracy}, global_step)\n if max_acc[0] < total_accuracy:\n self.save(global_step)\n max_acc = total_accuracy, global_step\n print('Accuracy : %5.3f' % total_accuracy)\n print('Max Accuracy : %5.3f Max global_steps : %d Cur global_steps : %d' %(max_acc[0], max_acc[1], global_step), end='\\n\\n')\n\n if self.cfg.total_steps and self.cfg.total_steps < global_step:\n print('The total steps have been reached')\n print('Average Loss %5.3f' % (loss_sum/(i+1)))\n if get_acc:\n results = self.eval(get_acc, None, model)\n total_accuracy = torch.cat(results).mean().item()\n logger.add_scalars('data/scalar_group', {'eval_acc' : total_accuracy}, global_step)\n if max_acc[0] < total_accuracy:\n max_acc = total_accuracy, global_step \n print('Accuracy :', total_accuracy)\n print('Max Accuracy : %5.3f Max global_steps : %d Cur global_steps : %d' %(max_acc[0], max_acc[1], global_step), end='\\n\\n')\n self.save(global_step)\n return\n return global_step\n\n def eval(self, evaluate, model_file, model):\n \"\"\" evaluation function \"\"\"\n if model_file:\n self.model.eval()\n self.load(model_file, None)\n model = self.model.to(self.device)\n if self.cfg.data_parallel:\n model = nn.DataParallel(model)\n\n results = []\n iter_bar = tqdm(self.sup_iter) if model_file \\\n else tqdm(deepcopy(self.eval_iter))\n for batch in iter_bar:\n batch = [t.to(self.device) for t in batch]\n\n with torch.no_grad():\n accuracy, result = evaluate(model, batch)\n results.append(result)\n\n iter_bar.set_description('Eval Acc=%5.3f' % accuracy)\n return results\n \n def load(self, model_file, pretrain_file):\n \"\"\" between model_file and pretrain_file, only one model will be loaded \"\"\"\n if model_file:\n print('Loading the model from', model_file)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(model_file))\n else:\n self.model.load_state_dict(torch.load(model_file, map_location='cpu'))\n\n elif pretrain_file:\n print('Loading the pretrained model from', pretrain_file)\n if pretrain_file.endswith('.ckpt'): # checkpoint file in tensorflow\n checkpoint.load_model(self.model.transformer, pretrain_file)\n elif pretrain_file.endswith('.pt'): # pretrain model file in pytorch\n self.model.transformer.load_state_dict(\n {key[12:]: value\n for key, value in torch.load(pretrain_file).items()\n if key.startswith('transformer')}\n ) # load only transformer parts\n \n def save(self, i):\n \"\"\" save model \"\"\"\n if not os.path.isdir(os.path.join(self.cfg.results_dir, 'save')):\n os.makedirs(os.path.join(self.cfg.results_dir, 'save'))\n torch.save(self.model.state_dict(),\n os.path.join(self.cfg.results_dir, 'save', 'model_steps_'+str(i)+'.pt'))\n\n def repeat_dataloader(self, iterable):\n \"\"\" repeat dataloader \"\"\"\n while True:\n for x in iterable:\n yield x\n","repo_name":"SanghunYun/UDA_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"16"} +{"seq_id":"35641200827","text":"from scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom ScrapSurfSpot.ScrapSurfSpot.spiders import Spot2\nimport os\n\n\nclass Scraper:\n def __init__(self):\n settings_file_path = 'ScrapSurfSpot.ScrapSurfSpot.settings'\n os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_file_path)\n self.settings = get_project_settings()\n self.process = CrawlerProcess(self.settings)\n self.spider = Spot2.ExampleSpider \n\n def run_spider(self):\n self.process.crawl(self.spider)\n self.process.start() ","repo_name":"AntoineMOREAU1/Application-flask-spots-surf","sub_path":"ScrapSurfSpot/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31640097522","text":"from frequent_functions import get_valid_answer, format_list_to_string\nMENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\nmoney = 0 #how much money in machine\n\nemojis = []\n\n# TODO: 1. Main Funct: Ask Flavor, off return\ndef coffee_machine():\n flavor_options = ['report', 'off', 'espresso', 'latte', 'cappuccino']\n flavor = get_valid_answer(flavor_options, \"What would you like? (espresso/latte/cappuccino): \")\n\n if flavor == 'off':\n return\n elif flavor == 'report':\n report()\n else:\n missing_resources = check_resources(flavor)\n if len(missing_resources) == 0:\n payment(flavor)\n else:\n missing_string = format_list_to_string(missing_resources)\n print(f\"Sorry there is not enough {missing_string}.\")\n coffee_machine()\n\n# TODO: 2. Report Function\ndef report():\n print(f\"Water: {resources['water']}ml\")\n print(f\"Milk: {resources['milk']}ml\")\n print(f\"Coffee: {resources['coffee']}g\")\n print(f\"Money: ${money}\")\n\n# TODO: 3. check resource enough\ndef check_resources(flavor):\n required_resources = MENU[flavor]['ingredients']\n missing = []\n for item in required_resources:\n if required_resources[item] > resources[item]:\n missing.append(resources[item])\n return missing\n\n# TODO: 4. payment function check enough\ndef payment(flavor):\n print(\"Please insert coins.\")\n quarters = int(input(\"How many quarters?: \"))\n dimes = int(input(\"How many dimes?: \"))\n nickles = int(input(\"How many nickles?: \"))\n pennies = int(input(\"How many pennies?: \"))\n amount_paid = (quarters * 25 + dimes * 10 + nickles * 5 + pennies)/100\n cost = MENU[flavor]['cost']\n\n if amount_paid < cost:\n print(f\"Sorry that's not enough money. Money refunded\")\n else:\n change = round(amount_paid - cost, 2)\n if change != 0:\n print(f\"Here is ${change} in change.\")\n make_drink(flavor)\n\n# TODO: 5. make drink function\ndef make_drink(flavor):\n flavor_details = MENU[flavor]\n flavor_ingredients = flavor_details['ingredients']\n for item in flavor_ingredients:\n resources[item] -= flavor_ingredients[item]\n print(f\"Here is your {flavor} ☕. Enjoy!\")\n global money\n money += flavor_details['cost']\n\n\ncoffee_machine()","repo_name":"burgus7/100-day-bootcamp","sub_path":"day_15/coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42003308972","text":"import pygame\nfrom constants.colors import BLACK_COLOR, FILL_COLOR, BACKGROUND_COLOR\n\nclass Box(object):\n def __init__(self, screen, font, x, y, value):\n self.x = x\n self.y = y\n self.width = 39\n self.height = 39\n self.value = value\n self.clickable = True\n self.set_initial_value(screen, font, value)\n self.draw_box(screen)\n\n def get_x_y(self):\n return self.x / 40, ' - ', self.y / 40\n\n def set_initial_value(self, screen, font, value):\n self.value = value\n if value != 0:\n self.clickable = False\n\n self.highlight_box(screen, font, FILL_COLOR)\n\n self.set_number(screen, font)\n\n def set_number(self, screen, font):\n if self.value != 0:\n number = font.render(str(int(self.value)), True, (0, 0, 0))\n screen.blit(\n number,\n (\n self.x + 10,\n self.y + 5 \n )\n )\n def erase_number(self, screen, font):\n self.value = 0\n self.highlight_box(screen, font, BACKGROUND_COLOR)\n\n def draw_box(self, screen):\n pygame.draw.rect(\n screen, BLACK_COLOR, pygame.Rect(self.x, self.y, 40, 40), 1\n )\n\n def clicked_box(self, screen, clicked_x, clicked_y):\n if self.clickable:\n return (clicked_x >= self.x and (clicked_x <= self.x + self.width)) and (clicked_y >= self.y and (clicked_y <= self.y + self.height))\n return False\n\n def highlight_box(self, screen, font, color):\n if self.clickable:\n pygame.draw.rect(\n screen,\n color,\n pygame.Rect(self.x, self.y, self.width, self.height))\n else:\n pygame.draw.rect(\n screen,\n FILL_COLOR,\n pygame.Rect(self.x, self.y, self.width, self.height))\n number = font.render(str(int(self.value)), True, (0, 0, 0))\n self.set_number(screen, font)\n\n def on_click(self, screen, font, value):\n if isinstance(value, int):\n number = font.render(str(value), True, (0, 0, 0))\n self.value = value\n self.set_number(screen, font)\n","repo_name":"kasia-jablonski/Beginning-Python","sub_path":"pygame_projects/Sudoku v2/classes/Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37793830384","text":"import pyxel\r\nimport random\r\n\r\npyxel.init(48, 48, caption=\"Conway's game of life\", fps=30)\r\n\r\nboard = []\r\nboard_change = []\r\nrun = False\r\ngen = 0\r\n\r\nfor i in range(pyxel.width * pyxel.height):\r\n\tboard.append(0)\r\nboard_change = board.copy()\r\n\r\ndef randomise():\r\n\tboard.clear()\r\n\tfor i in range(pyxel.width * pyxel.height):\r\n\t\tboard.append(random.randint(0, 1))\r\n\tboard_change = board.copy()\r\n\t\r\n\t\r\ndef update():\r\n\tglobal run, board, board_change, gen\r\n\tif pyxel.btnp(pyxel.KEY_Q):\r\n\t\tpyxel.quit()\r\n\tif pyxel.btnp(pyxel.KEY_R):\r\n\t\trandomise()\r\n\tif pyxel.btnp(pyxel.KEY_C):\r\n\t\tboard.clear()\r\n\t\tfor i in range(pyxel.width * pyxel.height):\r\n\t\t\tboard.append(0)\r\n\t\tboard_change = board.copy()\r\n\tif pyxel.btn(pyxel.MOUSE_LEFT_BUTTON) and run == False and not pyxel.mouse_x > pyxel.width and not pyxel.mouse_y > pyxel.height:\r\n\t\tboard.pop(pyxel.mouse_x + pyxel.width * pyxel.mouse_y)\r\n\t\tboard.insert(pyxel.mouse_x + pyxel.width * pyxel.mouse_y, 1)\r\n\t\tboard_change = board.copy()\r\n\tif pyxel.btn(pyxel.MOUSE_RIGHT_BUTTON) and run == False:\r\n\t\tboard.pop(pyxel.mouse_x + pyxel.width * pyxel.mouse_y)\r\n\t\tboard.insert(pyxel.mouse_x + pyxel.width * pyxel.mouse_y, 0)\r\n\t\tboard_change = board.copy()\r\n\tif pyxel.btnp(pyxel.KEY_ENTER):\r\n\t\trun = not run\r\n\t\tgen = 0\r\n\t\t\r\n\t# cell logic\r\n\tif run or pyxel.btnp(pyxel.KEY_RIGHT):\r\n\t\tfor c in range(len(board)):\r\n\t\t\tcell_sum = 0\r\n\t\t\t#cell_sum = board[c - 1] + board[c + 1] + board[c - pyxel.width] + board[c - pyxel.width - 1] + board[c - pyxel.width + 1] + board[c + pyxel.width] + board[c + pyxel.width + 1] + board[c + pyxel.width - 1]\r\n\t\t\t\r\n\t\t\tif not c - 1 < 0:\r\n\t\t\t\tcell_sum += board[c - 1]\r\n\t\t\tif not c + 1 > (len(board) - 1):\r\n\t\t\t\tcell_sum += board[c + 1]\r\n\t\t\tif not c - pyxel.width - 1 < 0:\r\n\t\t\t\tcell_sum += board[c - pyxel.width - 1]\r\n\t\t\tif not c - pyxel.width + 1 < 0:\r\n\t\t\t\tcell_sum += board[c - pyxel.width + 1]\r\n\t\t\tif not c + pyxel.width + 1 > (len(board) - 1):\r\n\t\t\t\tcell_sum += board[c + pyxel.width + 1]\r\n\t\t\tif not c + pyxel.width - 1 > (len(board) - 1):\r\n\t\t\t\tcell_sum += board[c + pyxel.width - 1]\r\n\t\t\tif not c - pyxel.width < 0:\r\n\t\t\t\tcell_sum += board[c - pyxel.width]\r\n\t\t\tif not c + pyxel.width > (len(board) - 1):\r\n\t\t\t\tcell_sum += board[c + pyxel.width]\r\n\t\t\t\r\n\t\t\t# rules of life\r\n\t\t\tif cell_sum < 2:\r\n\t\t\t\tboard_change[c] = 0\r\n\t\t\telif (board[c] == 1 and cell_sum == 2) or cell_sum == 3:\r\n\t\t\t\tboard_change[c] = 1\r\n\t\t\telif cell_sum > 3:\r\n\t\t\t\tboard_change[c] = 0\r\n\t\t\telif board[c] == 0 and cell_sum == 3:\r\n\t\t\t\tboard_change[c] = 1\r\n\t\t\telse:\r\n\t\t\t\tboard_change[c] = 0\r\n\t\t\r\n\t\tfor p in range(len(board) - 1):\r\n\t\t\tboard[p] = board_change[p]\r\n\t\r\n\t\tgen += 1\r\n\t\t#print(gen)\r\n\t\t\r\ndef draw():\r\n\tpyxel.cls(0)\r\n\tfor y in range(pyxel.height):\r\n\t\tfor x in range(pyxel.width):\r\n\t\t\tif board[x + pyxel.width * y] == 1:\r\n\t\t\t\tpyxel.pset(x, y, 7)\r\n\t# draw cursor\r\n\tpyxel.pset(pyxel.mouse_x, pyxel.mouse_y, 10)\r\n\tif run:\r\n\t\tpyxel.line(0, 0, pyxel.width, 0, 11)\r\n\t\r\npyxel.run(update, draw)","repo_name":"moosipea/python-game-of-life","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75319311367","text":"import openai\r\nimport pandas as pd\r\nimport json\r\nimport os\r\nimport tiktoken\r\nimport asyncio\r\nimport asyncpg\r\nimport numpy as np\r\nfrom pgvector.asyncpg import register_vector\r\nfrom openai.embeddings_utils import get_embedding\r\n\r\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\") \r\n\r\n# embedding model parameters\r\nembedding_model = \"text-embedding-ada-002\"\r\nembedding_encoding = \"cl100k_base\" # this the encoding for text-embedding-ada-002\r\nmax_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191\r\n\r\ntest_text = 'Dark, firm and focused, with fine, tightly wound tannins surrounding a core of black cherry, bay leaf, licorice and black pepper flavors, lingering intently on the expressive, graceful finish. '\r\ntest_text1 = 'A well-structured red, this features lightly chewy tannins that frame flavors of damson plum preserves, ground coffee, fig paste and bittersweet cocoa. This is dark and brooding, with a smoky mineral sublayer as well as hints of dried herb and kirsch on the finish.'\r\n\r\n# v = get_embedding(test_text1, engine=embedding_model)\r\n# print(v)\r\n# print(len(v))\r\n\r\ndef build_embeddings(file_name):\r\n with open(f\"c:/temp/{file_name}\", \"r\") as file:\r\n content = file.read()\r\n wines = json.loads(content)\r\n\r\n # wines = wines[1:5]\r\n print(file.name, 'contains ', len(wines), ' reviews')\r\n\r\n for wine in wines:\r\n print(wine)\r\n embedding = get_embedding(wine['tasting_note'], engine=embedding_model)\r\n wine['embedding'] = embedding\r\n\r\n return wines\r\n\r\n\r\nasync def update_db(wines):\r\n conn = await asyncpg.connect(os.getenv(\"NEONDB_CONNSTR\"))\r\n await register_vector(conn)\r\n\r\n # Store all the generated embeddings back into the database.\r\n for wine in wines:\r\n await conn.execute(\r\n \"INSERT INTO wines (vineyard, wine_name, price, tasting_note, embedding) VALUES ($1, $2, $3, $4, $5)\",\r\n wine['vineyard'],\r\n wine['wine_name'],\r\n wine['price'],\r\n wine['tasting_note'],\r\n np.array(wine['embedding']),\r\n )\r\n\r\n await conn.close()\r\n\r\n\r\n# # Run the SQL commands now.\r\n# await main() # type: ignore\r\n\r\nasync def find_similar_wines(update_test=False):\r\n #https://truemythwinery.com/wines/true-myth-cabernet-sauvignon/ \r\n test_note = 'full of polished aromas of blueberry, cherry and vanilla, leading to flavors of dark red fruits, black currants and hints of pepper, mocha and caramelized oak. Rich yet smooth, '\r\n conn = await asyncpg.connect(os.getenv(\"NEONDB_CONNSTR\"))\r\n await register_vector(conn)\r\n\r\n similarity_threshold = 0.1\r\n num_matches = 10\r\n qe = get_embedding(test_note, engine=embedding_model)\r\n print(qe);\r\n\r\n if update_test: \r\n await conn.execute(\r\n \"INSERT INTO test_wine (id, wine_name, tasting_note, embedding) VALUES (1, $1, $2, $3)\",\r\n 'Cabernet Sauvignon True Myth 2020',\r\n test_note,\r\n np.array(qe),\r\n )\r\n\r\n # Find similar products to the query using cosine similarity search \r\n # over all vector embeddings. This new feature is provided by `pgvector`.\r\n results = await conn.fetch(\r\n \"\"\"\r\n SELECT vineyard, wine_name, price, tasting_note, 1 - (embedding <=> $1) AS similarity\r\n FROM wines\r\n WHERE 1 - (embedding <=> $1) > $2\r\n ORDER BY similarity DESC\r\n LIMIT $3\r\n \"\"\",\r\n qe,\r\n similarity_threshold,\r\n num_matches\r\n )\r\n\r\n if len(results) == 0:\r\n raise Exception(\"Did not find any results. Adjust the query parameters.\")\r\n\r\n for r in results:\r\n print(r['wine_name'], r['price'], r['vineyard'], r['similarity'])\r\n\r\n\r\n await conn.close()\r\n\r\n df = pd.DataFrame(results, columns=['wine_name', 'price', 'vineyard', 'tasting_note', 'similarity'])\r\n print(df.head())\r\n\r\n # html = df.to_html(index=False)\r\n # with open('c:/temp/most_dissimilar_wines.html', 'w') as fo:\r\n # fo.write(html)\r\n\r\n\r\ndef main():\r\n # wines_w_embeddings = build_embeddings('merlot_output_1.json')\r\n # asyncio.get_event_loop().run_until_complete(update_db(wines_w_embeddings))\r\n\r\n asyncio.get_event_loop().run_until_complete(find_similar_wines(update_test=False))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"sanimesa/wine_tasting","sub_path":"scripts/wineembeddings.py","file_name":"wineembeddings.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30886214166","text":"import pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom IPython.core.display import display, HTML\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\ndef carrega_arquivo_inep(arq,cols=None):\n return pd.read_csv(arq,encoding='latin1',low_memory=False, sep='|',usecols=cols)\n\n#reduz o tamanho para economizar memoria\ndef ajusta_colunas_int_df_inep(df, vai_printar_cols = False):\n ls_itips = [np.int8,np.int16,np.int32,np.int64]\n ls_ftips = [np.float16,np.float32,np.float64]\n for c in df.columns:\n try:\n if c.startswith('IN_') or c.startswith('ID_'):\n m = int(max(df[c].values))\n try:\n for t in ls_itips:\n if m < np.iinfo(t).max():\n df[c] = df[c].astype(t)\n break;\n except:\n pass\n if df[c].dtype in ls_ftips:\n m = df[c].max()\n for t in ls_ftips:\n if m < np.finfo(t).max:\n df[c] = df[c].astype(t)\n break;\n elif df[c].dtype in ls_itips:\n m = df[c].max()\n for t in ls_itips:\n if m < np.iinfo(t).max:\n df[c] = df[c].astype(t)\n break;\n except Exception as e:\n pass\n if vai_printar_cols:\n print(f'{c}: {df[c].dtype}')\n return df\n\n\ndef monta_df_inep(arq):\n df = carrega_arquivo_inep(arq)\n df = df.fillna(-1)\n df = ajusta_colunas_int_df_inep(df)\n return df.replace(-1,np.nan)\n\n\n\ndef monta_por_tip_ensino(df,tip,dcr,ano):\n qtds = None\n if ano < 2015:\n qtds = df.loc[df.FK_COD_MOD_ENSINO == tip].groupby(['FK_COD_DOCENTE','PK_COD_ENTIDADE'])[['PK_COD_TURMA']].nunique()\n elif tip == 1:\n qtds = df.groupby(['CO_PESSOA_FISICA','CO_ENTIDADE'])[['IN_REGULAR']].sum()\n elif tip == 3:\n qtds = df.groupby(['CO_PESSOA_FISICA','CO_ENTIDADE'])[['IN_EJA']].sum()\n qtds.reset_index(inplace=True)\n qtds.columns = ['CO_PESSOA_FISICA','CO_ENTIDADE',f'NU_QTD_TURMAS_{dcr}_{ano}']\n return qtds\n\ndef monta_por_tip_serie(df,tip,ano,nivel_escola=True):\n qtds = None\n series = {\n 'INFANT' : [1,2,3],\n 'MEDIO' : list(range(25,39))\n }\n cols_base = []\n if ano < 2015:\n cols_base = ['FK_COD_DOCENTE','PK_COD_ENTIDADE'] if nivel_escola else ['FK_COD_DOCENTE']\n qtds = df.loc[df.FK_COD_ETAPA_ENSINO.isin(series[tip])].groupby(cols_base)[['PK_COD_TURMA']].nunique()\n else:\n cols_base = ['CO_PESSOA_FISICA','CO_ENTIDADE'] if nivel_escola else ['CO_PESSOA_FISICA']\n qtds = df.loc[df.TP_ETAPA_ENSINO.isin(series[tip])].groupby(cols_base)[['ID_TURMA']].nunique()\n\n cols_base = ['CO_PESSOA_FISICA','CO_ENTIDADE'] if nivel_escola else ['CO_PESSOA_FISICA']\n qtds.reset_index(inplace=True)\n qtds.columns = cols_base + [f'NU_QTD_TURMAS_{tip}_{ano}']\n return qtds\n","repo_name":"itsbarreto/educacao","sub_path":"educ_utils/df_inep_utils.py","file_name":"df_inep_utils.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23671174171","text":"# Write a script to extract the 26 letters from the file and add it to the list\n\n\n# import string\n# FILE_PATH = r\"C:\\Users\\nkrishnappa\\Desktop\\100DaysOfCode\\Python\\Day-#40\\Ex-45\"\n# extract_letter = []\n# for letter in string.ascii_uppercase:\n# file_name = FILE_PATH + f\"/{letter}.txt\"\n# with open(file_name, \"r\")as file:\n# extract_letter.append(file.read())\n\n# print(extract_letter)\n\nimport glob\nFILE_PATH = r\"C:\\Users\\nkrishnappa\\Desktop\\100DaysOfCode\\Python\\Day-#40\\Ex-45\"\nextract_letter = []\nfor file in glob.glob(f\"{FILE_PATH}\\*.txt\"):\n with open(file, \"r\") as f:\n extract_letter.append(f.read())\n\nprint(extract_letter)\n\n\n# ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']","repo_name":"nkrishnappa/100DaysOfCode","sub_path":"Python/Day-#40/Ex-46.py","file_name":"Ex-46.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7109312333","text":"\"\"\"\n8. Global & Return\n\"\"\"\n### 8.0 GLOBALS #######################################################\ndef add(value1, value2):\n \"\"\"Add two values and return the result\"\"\"\n return value1 + value2\n\nresult = add(3, 5)\nprint(f\"Result 1 is {result}\")\n# Output: 8\n\n#: Return is a keyword that hands-back a value from a function, exiting\n#: the function in the process.\n#: Here, it is assigned to the 'result' variable. In this case, result\n#: is a global variable - a variable that can be accessed from outside\n#: the scope of a function or class. They are usually declared in the\n#: module namespace, but it is possible to make global variables inside\n#: functions.\n\ndef add2(value1, value2):\n \"\"\"Add two values and create a global named result2 with the result\"\"\"\n global result2\n result2 = value1 + value2\n\nadd2(2, 4)\nprint(f\"Result 2 is {result2}\")\n\n#: In practice, using globals is a bad practice as it makes it very\n#: to understand where variables are coming from.\n\n### 8.1 MULTIPLE RETURN VALUES ########################################\n\n# option 1: globals - obvious but awful\ndef profile1():\n global name1\n global age1\n name1 = 'Danny'\n age1 = 31\n\nprofile1()\nprint(name1) # Output: Danny\nprint(age1) # Output: 31\n\n\n# option 2: packed tuples\ndef profile2():\n name2 = 'Also Danny'\n age2 = 32\n return (name2, age2)\n\nprofile_data2=profile2()\nprint(profile_data2[0])\nprint(profile_data2[1])\n\n\n# option 3: tuple unpacking\ndef profile3():\n name3 = 'Danny the 3rd'\n age3 = 33\n return name3, age3\n\nprofile_name3, profile_age3 = profile3()\nprint(profile_name3)\nprint(profile_age3)\n\n\n# option 4: named tuples\nfrom collections import namedtuple\ndef profile4():\n my_profile = namedtuple('profile', ['name', 'age'])\n my_profile.name = 'Danny the 4th'\n my_profile.age = 34\n return my_profile\n\nperson = profile4()\nprint(person.name)\nprint(person.age)\n","repo_name":"jtannas/intermediate_python","sub_path":"global_n_return.py","file_name":"global_n_return.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40350075415","text":"\"\"\"Consider a list (list = []). You can perform the following commands: `\ninsert i e: Insert integer e at position i.\nprint: Print the list.\nremove e: Delete the first occurrence of integer e.\nappend e: Insert integer e at the end of the list.\nsort: Sort the list.\npop: Pop the last element from the list.\nreverse: Reverse the list.\n\nInitialize your list and read in the value of n followed by n lines of commands\nwhere each command will be of the 7 types listed above.\nIterate through each command in order and perform the corresponding operation on your list.\n\nSample Input:\n12\ninsert 0 5\ninsert 1 10\ninsert 0 6\nprint\nremove 6\nappend 9\nappend 1\nsort\nprint\npop\nreverse\nprint\n\nSample Output:\n[6, 5, 10]\n[1, 5, 9, 10]\n[9, 5, 1]\n\"\"\"\n\n\nif __name__ == \"__main__\":\n N = int(input())\n my_list = []\n\n for _ in range(N):\n command = input().split()\n\n if command[0] == \"insert\":\n i, e = map(int, command[1:])\n my_list.insert(i, e)\n elif command[0] == \"print\":\n print(my_list)\n elif command[0] == \"remove\":\n e = int(command[1])\n my_list.remove(e)\n elif command[0] == \"append\":\n e = int(command[1])\n my_list.append(e)\n elif command[0] == \"sort\":\n my_list.sort()\n elif command[0] == \"pop\":\n my_list.pop()\n elif command[0] == \"reverse\":\n my_list.reverse()\n","repo_name":"CihatAcar/HackerRank-Python-Exercises","sub_path":"BasicDataTypes/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11766463691","text":"from typing import List\nclass Board:\n \n def create_board(self) -> List:\n matrix: List = []\n for _ in range(60):\n temp_row: List = []\n for _ in range(60):\n temp_row.append(0)\n matrix.append(temp_row)\n return matrix\n\nboard = Board()\nfor x in board.create_board():\n print(x)","repo_name":"grifmang/droids_dennis","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23330937684","text":"from argparse import ArgumentParser\nfrom curses import wrapper\nfrom sys import maxsize\nfrom columns import prtcols\nfrom .event import Event\n\n\ndef get_args():\n parser = ArgumentParser(description=\"Curses list picker.\")\n parser.add_argument(\"items\", nargs=\"+\", help=\"Items for the picker.\")\n parser.add_argument(\n \"--limit\", \"-l\", type=int, default=maxsize, help=\"Limit number of picks.\"\n )\n parser.add_argument(\n \"--numbers\", \"-n\", default=False, action=\"store_true\", help=\"Show row numbers.\"\n )\n parser.add_argument(\n \"--header\",\n \"-H\",\n type=str,\n default=\"PICK ITEMS FROM THIS LIST:\",\n help=\"A string to use as a header.\",\n )\n parser.add_argument(\n \"--footer\",\n \"-F\",\n type=str,\n default=\"Press [?] to view keybindings\",\n help=\"A string to use as a footer.\",\n )\n return parser.parse_args()\n\n\ndef event(stdscr, **kwargs):\n picker = Event(stdscr, **kwargs)\n return picker.get_picks()\n\n\ndef pick(**kwargs):\n return wrapper(event, **kwargs)\n\n\ndef main():\n args = get_args()\n kwargs = {\n \"items\": args.items,\n \"limit\": args.limit,\n \"numbers\": args.numbers,\n \"header\": args.header,\n \"footer\": args.footer,\n }\n picked = pick(**kwargs)\n if picked:\n prtcols(picked, 6)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tslight/cpick","sub_path":"cpick/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"28783212511","text":"from datetime import date\n \n # function input should have form date(YYYY, M, D)\ndef age_calculator(DoB):\n today = date.today()\n age = today.year - DoB.year - ((today.month, today.day) < (DoB.month, DoB.day))\n return str(age) + \" years\"\n\n\nprint(age_calculator(date(2001, 4, 19)))","repo_name":"lcawood/sigma-prework","sub_path":"age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17338855180","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom hacker_tools.tools import Manager, config\n\n\nclass App(tk.Tk):\n\n def __init__(self):\n super().__init__()\n self.running = False\n self.interval = 1024\n\n self.manager = Manager(4)\n self.title('HackerTools')\n self.resizable(0, 0)\n\n # Define tables and labels\n self.process_queue_label = self.create_label({\"row\": 0, \"column\": 0}, \"PROCESS QUEUE\")\n self.process_queue = self.create_table({\"row\": 1, \"column\": 0}, config.PROCESS_INFO)\n\n self.rejection_queue_label = self.create_label({\"row\": 2, \"column\": 0}, \"REJECTION QUEUE\")\n self.rejection_queue = self.create_table({\"row\": 3, \"column\": 0}, config.PROCESS_INFO)\n\n self.cpu_label = self.create_label({\"row\": 0, \"column\": 1}, \"CPU: tact 0\")\n self.cpu_view = self.create_table({\"row\": 1, \"column\": 1, \"sticky\": tk.EW}, ('Core id', \"Current Process\"))\n\n self.finished_processes_label = self.create_label({\"row\": 2, \"column\": 1}, \"FINISHED PROCESSES\")\n self.finished_processes_view = self.create_table({\"row\": 3, \"column\": 1, \"sticky\": tk.EW},\n config.PROCESS_SHORT_INFO)\n\n # add menu bar\n self.menubar = tk.Menu(self)\n self.controls = tk.Menu(self.menubar, tearoff=0)\n\n self.controls.add_command(label=\"Next tact\", command=self.tick, accelerator=\"Ctrl+T\")\n self.controls.add_command(label=\"New Process\", command=self.generate_process, accelerator=\"Ctrl+N\")\n self.menubar.add_cascade(label=\"Controls\", menu=self.controls)\n\n self.autorun_menu = tk.Menu(self.menubar, tearoff=0)\n self.autorun_menu.add_command(label=\"Start/Stop\", command=self.switch_state, accelerator=\"Ctrl+S\")\n self.autorun_menu.add_command(label=\"Speed up\", command=self.decrease_interval, accelerator=\"Ctrl+Up\")\n self.autorun_menu.add_command(label=\"Slow down\", command=self.increase_interval, accelerator=\"Ctrl+Down\")\n self.autorun_menu.add_command(label=\"Default speed\", command=self.default_interval, accelerator=\"Ctrl+Home\")\n self.menubar.add_cascade(label=\"Autorun\", menu=self.autorun_menu)\n\n self.config(menu=self.menubar)\n self.bind_all(\"\", self.switch_state)\n self.bind_all(\"\", self.tick)\n self.bind_all(\"\", self.generate_process)\n self.bind_all(\"\", self.decrease_interval)\n self.bind_all(\"\", self.default_interval)\n self.bind_all(\"\", self.increase_interval)\n\n self.show_data()\n self.autorun()\n\n def default_interval(self, event=None):\n self.interval = 1024\n\n def decrease_interval(self, event=None):\n self.interval = self.interval // 2 if self.interval > 16 else 16\n\n def increase_interval(self, event=None):\n self.interval = self.interval * 2 if self.interval < 16384 else 16384\n\n def create_button(self, grid, text, command):\n button = ttk.Button(self, text=text, command=command)\n\n button.grid(**grid)\n\n return button\n\n def create_label(self, grid, text):\n label = ttk.Label(self, text=text, font=(\"Consolas\", 16))\n label.grid(**grid)\n\n return label\n\n def create_table(self, grid, columns, height=10):\n tree = ttk.Treeview(self, columns=columns, show='headings', height=height)\n\n for column in columns:\n tree.column(column, width=len(column) * 12, anchor=tk.CENTER)\n\n # define headings\n for column in columns:\n tree.heading(column, text=column)\n\n tree.grid(**grid, pady=10, padx=25)\n\n return tree\n\n def show_data(self):\n data = self.manager.generate_output()\n self.clear_tables()\n for process in data[\"process_queue\"]:\n self.process_queue.insert('', tk.END, values=process)\n\n for process in data[\"rejection_queue\"]:\n self.rejection_queue.insert('', tk.END, values=process)\n\n for process in data[\"finished_processes\"]:\n self.finished_processes_view.insert('', tk.END, values=process)\n\n for core in data[\"cpu\"]:\n self.cpu_view.insert('', tk.END, values=core)\n\n self.cpu_label[\"text\"] = f\"CPU: tact {data['current_tact']}\"\n self.process_queue_label[\"text\"] = f\"PROCESS QUEUE: {data['process_len']}\"\n self.rejection_queue_label[\"text\"] = f\"REJECTION QUEUE: {data['rejection_len']}\"\n self.finished_processes_label[\"text\"] = f\"FINISHED PROCESSES: {data['finished_len']}\"\n\n def clear_tables(self):\n self.process_queue.delete(*self.process_queue.get_children())\n self.rejection_queue.delete(*self.rejection_queue.get_children())\n self.finished_processes_view.delete(*self.finished_processes_view.get_children())\n self.cpu_view.delete(*self.cpu_view.get_children())\n\n def tick(self, event=None):\n self.manager.do_work()\n self.show_data()\n\n def generate_process(self, event=None):\n self.manager.generate_process()\n self.show_data()\n\n def switch_state(self, event=None):\n self.running = not self.running\n\n def autorun(self):\n if self.running:\n self.tick()\n self.after(self.interval, self.autorun)\n\n\ndef main():\n app = App()\n app.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MichaelPotemkin/ProcessManager","sub_path":"process_manager.py","file_name":"process_manager.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1998403694","text":"import os\nimport csv\n\n\ndef get_data(index):\n with open(os.path.join(os.path.dirname(__file__), 'save.txt'), 'r', encoding='utf-8') as f:\n w = f.readlines()\n print(w[0])\n print(w[1])\n print(w[2])\n # return d[index]\n\n\ndef getCsv():\n rows = []\n with open('input.csv') as f:\n reader = csv.reader(f)\n for iter in reader:\n rows.append(iter)\n print(rows)\n\n\ngetCsv()","repo_name":"ycw786369470/myblog","sub_path":"auto_test/Code/ddt_test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28678151965","text":"import time\n\nfrom getpass import getpass\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass Swiper():\n def __init__(self):\n self.driver = webdriver.Chrome(ChromeDriverManager().install())\n\n #modify implicit waits later instead of using timer.sleep. write an implicit wait function\n def tinder_login(self):\n self.driver.get('https://tinder.com/')\n time.sleep(5)\n loginbutton = self.driver.find_element_by_xpath(\"/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/header/div[1]/div[2]/div\")\n loginbutton.click()\n time.sleep(5)\n phonelogin = self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div/div[1]/div/div[3]/span/div[3]/button\")\n phonelogin.click()\n time.sleep(5)\n phonenumber = self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div/div[1]/div[2]/div/input\")\n phonenumber.send_keys(input('Opened Tinder. Please enter your phone number: '))\n\n verificationcode = input('Please enter the 6 digit authentication code sent to your phone number: ')\n verificationcode = list(verificationcode)\n #validate that the code is 6 digits before proceeding \n\n #iterating through 6 digit verifcation code fields\n for number in verificationcode:\n verificationfield = self.driver.find_element_by_xpath(f\"/html/body/div[2]/div/div/div[1]/div[3]/input[{verificationcode.index(number) + 1}]\")\n verificationfield.send_keys(f\"{number}\")\n\n #click continue button \n\n def fb_login(self):\n self.driver.get('https://www.facebook.com/')\n a = self.driver.find_element_by_id('email')\n a.send_keys(input('Opened Facebook. Please enter username/email: '))\n b = self.driver.find_element_by_id('pass')\n b.send_keys(getpass(prompt='Username entered. Please enter password: '))\n print(\"Password entered. Logging in.\")\n c = self.driver.find_element_by_id('loginbutton')\n c.click()\n try: #Check whether login was successful by finding the home button\n self.driver.find_element_by_id('u_0_c')\n except:\n return False\n return True\n\n def tinder_login(self):\n self.driver.get('http://tinder.com')\n time.sleep(5)\n print(\"Clicking on sign in with Facebook.\")\n self.driver.find_element_by_xpath(\"//*[@id=\\\"modal-manager\\\"]/div/div/div[2]/div[1]/div/div[3]/button[1]/span\").click();\n time.sleep(2)\n try: #Selenium scripts open a testing environment in chrome. Every login acts like a brand new login. Must click through tutorial\n print(\"Dismissing tutorial prompts\")\n self.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/span/div/div[2]/div/div[1]/div[1]/div/button/span/span\").click()\n print(\"Prompt 1\")\n time.sleep(2)\n self.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/span/div/div[2]/div/div/main/div/button/span/span\").click()\n print(\"Prompt 2\")\n time.sleep(2)\n self.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/span/div/div[2]/div/div/div[1]/div/div/div[4]/button[1]/span/span\").click()\n print(\"Prompt 3\")\n time.sleep(2)\n self.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/span/div/div[2]/div/div/div[1]/div/div/div[4]/button[1]/span/span\").click()\n print (\"Prompt 4\")\n except:\n print('Something went wrong during login.')\n return False\n print(\"Ready to start swiping.\")\n return True\n\n def swipe_tinder(self):\n actions = ActionChains(self.driver)\n time.sleep(5)\n print(\"Swipe until there are no more profiles.\")\n try:\n #Stop swiping by catching the exception of not finding a profile. closes browser\n while self.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/span/div/div[1]/div/main/div/div/div/div[1]/div[1]/div/div[3]/div[1]\"):\n actions.send_keys(Keys.ARROW_RIGHT).perform()\n time.sleep(2)\n except:\n print(\"No more profiles found. Quitting.\")\n self.driver.quit()\n\nif __name__ == \"__main__\":\n swiper = Swiper()\n if(swiper.fb_login()):\n if swiper.tinder_login():\n swiper.swipe_tinder()\n else:\n print(\"Facebook login failed, Quitting\")\n swiper.driver.quit()","repo_name":"marshallnw18/Python-SysAdmin-Scripts","sub_path":"projectscripts/tinder_selenium.py","file_name":"tinder_selenium.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8535977946","text":"#!/usr/bin/evn python\n#-*-:coding:utf-8 -*-\n\n#Author:404\n#Name:东软通用门户软件 UniPortal 1.2存在通用型未授权访问\n#Refer:http://www.wooyun.org/bugs/wooyun-2015-0125186,http://www.wooyun.org/bugs/wooyun-2010-0116361\n\n\ndef assign(service,arg):\n if service==\"uniportal\":\n return True,arg \n \n\n\ndef audit(arg):\n url=arg+\"ecdomain/portal/survey/admin/SurveyStatis.jsp\"\n code, head, res, errcode, _ = curl.curl2(url)\n if code==200 and \"|./?$%^&*_~@#'''\r\ni=0\r\nstop_words = set(stopwords.words('english')) \r\n# read page text as a dictionary, suppressing extra spaces in CJK fonts\r\nblocks = page.getText(\"dict\", flags=11)[\"blocks\"]\r\nfor b in blocks: # iterate through the text blocks\r\n for l in b[\"lines\"]: # iterate through the text lines\r\n for s in l[\"spans\"]: # iterate through the text spans\r\n print(\"\")\r\n font_properties = \"Font: '%s' (%s), size %g, color #%06x\" % (\r\n s[\"font\"],flags_decomposer(s[\"flags\"]),s[\"size\"],s[\"color\"])\r\n print(\"Text: '%s'\" % s[\"text\"]) # simple print of text\r\n k = re.split(' ',s[\"text\"])\r\n if(len(k)>1):\r\n for i in k:\r\n if i not in stop_words and i not in punctuations:\r\n if i.isspace() == False:\r\n if i:\r\n df=df.append({'text':i,'font':s['font'],'size':s['size'],'color':s['color']},\r\n ignore_index=True)\r\n else:\r\n df=df.append({'text':s['text'],'font':s['font'],'size':s['size'],'color':s['color']},\r\n ignore_index=True)\r\n ","repo_name":"kennethjones17/PDFparser_TextpropertyExtractor","sub_path":"Basic parser.py","file_name":"Basic parser.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17460235862","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/12911\n\ndef one_counter(n):\n binary = str(format(n, 'b'))\n one = 0\n for i in binary:\n if i == \"1\":\n one += 1\n return one\n \ndef solution(n):\n answer = 0\n target = one_counter(n)\n n += 1\n while one_counter(n) != target:\n n += 1\n \n return n","repo_name":"updaun/level2-cv-09","sub_path":"src/kkm/algorithm/programmers/level2_다음_큰_숫자.py","file_name":"level2_다음_큰_숫자.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34463241853","text":"\"\"\"\nKe Chen knutchen@ucsd.edu\n\nTone-Octave Network - utils file\n\nThis file contains useful common methods\n\n\"\"\"\nimport os\nimport numpy as np\nimport torch\nimport mir_eval\nimport config\n\ndef index2centf(seq, centfreq):\n centfreq[0] = 0\n re = np.zeros(len(seq))\n for i in range(len(seq)):\n for j in range(len(centfreq)):\n if seq[i] < 0.1:\n re[i] = 0\n break\n elif centfreq[j] > seq[i]:\n re[i] = j\n break\n return re \n\n\ndef freq2octave(freq):\n if freq < 1.0 or freq > 2050:\n return config.octave_class\n else:\n return int(np.round(69 + 12 * np.log2(freq/440)) // 12) \n\ndef freq2tone(freq):\n if freq < 1.0 or freq > 2050:\n return config.tone_class\n else:\n return int(np.round(69 + 12 * np.log2(freq/440)) % 12) \n\ndef tofreq(tone, octave):\n if tone >= config.tone_class or octave >= config.octave_class or octave < 2:\n return 0.0\n else:\n return 440 * 2 ** ((12 * octave + tone * 12 / config.tone_class - 69) / 12)\n\n\ndef pos_weight(data, freq_bins):\n frames = data.shape[-1]\n non_vocal = float(len(data[data == 0]))\n vocal = float(data.size - non_vocal)\n z = np.zeros((freq_bins, frames))\n z[1:,:] += (non_vocal / vocal)\n z[0,:] += vocal / non_vocal\n print(non_vocal, vocal)\n return torch.from_numpy(z).float()\n\ndef freq2octave(freq):\n if freq < 1.0 or freq > 1990: \n return 0\n pitch = round(69 + 12 * np.log2(freq / 440))\n return int(pitch // 12)\n\ndef compute_roa(pred, gd):\n pred = pred[gd > 0.1]\n gd = gd[gd > 0.1]\n pred = np.array([freq2octave(d) for d in pred])\n gd = np.array([freq2octave(d) for d in gd])\n return np.sum(pred == gd) / len(pred)\n\n\ndef melody_eval(pred, gd):\n ref_time = np.arange(len(gd)) * 0.01\n ref_freq = gd\n\n est_time = np.arange(len(pred)) * 0.01\n est_freq = pred\n\n output_eval = mir_eval.melody.evaluate(ref_time,ref_freq,est_time,est_freq)\n VR = output_eval['Voicing Recall']*100.0 \n VFA = output_eval['Voicing False Alarm']*100.0\n RPA = output_eval['Raw Pitch Accuracy']*100.0\n RCA = output_eval['Raw Chroma Accuracy']*100.0\n ROA = compute_roa(est_freq, ref_freq) * 100.0\n OA = output_eval['Overall Accuracy']*100.0\n eval_arr = np.array([VR, VFA, RPA, RCA, ROA, OA])\n return eval_arr\n\ndef tonpy_fn(batch):\n dict_key = batch[0].keys()\n output_batch = {}\n for dk in dict_key:\n output_batch[dk] = np.array([d[dk] for d in batch])\n return output_batch","repo_name":"RetroCirce/TONet","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"16"} +{"seq_id":"20409907135","text":"#!/usr/bin/env python3\n\n'''\nThis modole contains functions for getting data from Spotify API\n\n'''\n\n\n\n########################################################################\n## Import librarys\n\nimport pandas as pd\n\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\nimport config\n\n\n\n########################################################################\n## Set connections with API\n\ndef get_spotify_connection() -> spotipy.client.Spotify:\n \"\"\"\n Returns a spotipy client connection based on the keys stored in the file \"config.py\"\n \n Output:\n - sp: spotipy client connection\n \"\"\"\n \n cid = config.acces_credentials.get('client_id')\n secret = config.acces_credentials.get('secret_id')\n \n client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)\n sp = spotipy.Spotify(client_credentials_manager= client_credentials_manager)\n \n return sp\n\n\n# get tracks features from a list of id's\n\ndef tracks_features(track_list, sp=get_spotify_connection()):\n \"\"\"\n Create a pandas dataframe with the features of the trakcs in the list.\n \n Input:\n -track_list: List of tracks id's\n -sp: spotipy client connection\n \n Output:\n -pandas dataframe with the features of the tracks list.\n \"\"\"\n \n # get list's len\n playlist_len = len(track_list)\n\n # define empty lists for the features\n track_id = []\n track_name = []\n duration_ms = []\n explicit = []\n track_popularity = []\n\n artist_name = []\n artist_id = []\n\n album_name = []\n album_id = []\n album_type = []\n release_date = []\n\n acousticness = []\n danceability = []\n energy = []\n instrumentalness = []\n key = []\n liveness = []\n loudness = []\n mode = []\n speechiness = []\n tempo = []\n time_signature = []\n valence = []\n\n artist_popularity = []\n artist_genres = []\n artist_followers = []\n\n\n # get features in batches\n for i in range(0,playlist_len,50):\n batch_list = track_list[i:i+50]\n playlist = sp.tracks(tracks=batch_list)\n\n for t in playlist['tracks']:\n # tracks characteristics\n track_id.append(t['id'])\n track_name.append(t['name'])\n duration_ms.append(t['duration_ms'])\n explicit.append(t['explicit'])\n track_popularity.append(t['popularity'])\n\n # artist characteristics\n artist_name.append(t['artists'][0]['name'])\n artist_id.append(t['artists'][0]['id'])\n\n # album characteristics\n album_name.append(t['album']['name'])\n album_id.append(t['album']['id'])\n album_type.append(t['album']['type'])\n release_date.append(t['album']['release_date'])\n\n # get tracks features\n batch_track_features = sp.audio_features(tracks=track_id[i:])\n\n # get artists fetures\n batch_artist_features = sp.artists(artist_id[i:])['artists']\n\n # add tracks' feature for the current batch \n acousticness = acousticness + [feature['acousticness'] for feature in batch_track_features]\n danceability = danceability + [feature['danceability'] for feature in batch_track_features]\n energy = energy + [feature['energy'] for feature in batch_track_features]\n instrumentalness = instrumentalness + [feature['instrumentalness'] for feature in batch_track_features]\n key = key + [feature['key'] for feature in batch_track_features]\n liveness = liveness + [feature['liveness'] for feature in batch_track_features]\n loudness = loudness + [feature['loudness'] for feature in batch_track_features]\n mode = mode + [feature['mode'] for feature in batch_track_features]\n speechiness = speechiness + [feature['speechiness'] for feature in batch_track_features]\n tempo = tempo + [feature['tempo'] for feature in batch_track_features]\n time_signature = time_signature + [feature['time_signature'] for feature in batch_track_features]\n valence = valence + [feature['valence'] for feature in batch_track_features]\n\n # add artists' feature for the current batch\n artist_popularity = artist_popularity + [feature['popularity'] for feature in batch_artist_features]\n artist_genres = artist_genres + [feature['genres'] for feature in batch_artist_features]\n artist_followers = artist_followers + [feature['followers']['total'] for feature in batch_artist_features]\n \n # convert to pandas dataframe\n df_playlist = pd.DataFrame({\n 'track_id': track_id,\n 'track_name': track_name,\n 'duration_ms': duration_ms,\n 'explicit': explicit,\n 'track_popularity': track_popularity,\n 'acousticness': acousticness,\n 'danceability': danceability,\n 'energy': energy,\n 'instrumentalness': instrumentalness,\n 'key': key,\n 'liveness': liveness,\n 'loudness': loudness,\n 'mode': mode,\n 'speechiness': speechiness,\n 'tempo': tempo,\n 'time_signature': time_signature,\n 'valence': valence,\n 'artist_name': artist_name,\n 'artist_id': artist_id,\n 'artist_popularity': artist_popularity,\n 'artist_genres': artist_genres,\n 'artist_followers': artist_followers,\n 'album_name': album_name,\n 'album_id': album_id,\n 'album_type': album_type,\n 'release_date': release_date\n })\n \n # add realise year\n df_playlist['release_year'] = df_playlist['release_date'].str[:4].astype(int)\n \n return df_playlist\n\n\n## get tracks' features from a playlist id\n\ndef playlist_features(playlist_id: str, sp=get_spotify_connection()):\n '''\n Create a pandas dataframe with the features of the trakcs extracted from a playlist.\n \n Input:\n -playlist_id: string with the playlist id.\n -sp: spotipy client\n \n Output:\n -pandas dataframe with the features of the tracks in the playlist.\n '''\n \n # empty list for ids\n ids_list = []\n \n # get playlist total len\n playlist_len = sp.playlist_tracks(playlist_id=playlist_id, limit=1)['total']\n \n # get playlist items in batches\n for i in range(0,playlist_len,50):\n # get playlist json\n batch_json = sp.playlist_tracks(playlist_id=playlist_id,\n limit=50, offset=i)\n # get batch id's\n ids_list = ids_list + [item['track']['id'] for item in batch_json['items']]\n \n # use tracks features function\n df_playlist = tracks_features(ids_list, sp)\n \n return df_playlist\n\n\n########################################################################\n\ndef main():\n\tpass\n\t\nif __name__ == \"__main__\":\n\tmain()","repo_name":"sergiogg94/spotify_recommender_system","sub_path":"src/data_wranling.py","file_name":"data_wranling.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13216836182","text":"'''\nTake two inputs from the user. One will be an integer. The other will be a float number. Then multiply them to display the output.\n'''\n\n\ndef input_to_number(): \n '''\n returning product of values\n '''\n first_input = input('Enter value: ')\n second_input = input('Enter value: ')\n\n result = int(first_input) * float(second_input)\n\n return result\n\n\nprint(input_to_number())","repo_name":"ak-alam/Python_Problem_Solving","sub_path":"user_input_2_number/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26338411493","text":"import pygame,random\r\nfrom pygame.locals import *\r\nfrom modules.vector2D import Vector2\r\nimport math\r\nfrom modules.systems import *\r\n\r\nWORLD_SIZE = Vector2(1000,600)\r\n\r\n# Turn these on or off to see the various particle systems\r\nSHOW = {\r\n \"snow\" : False,\r\n \"rain\" : False,\r\n \"smoke\": False,\r\n \"shower\" : False,\r\n \"fountain\" : True,\r\n \"cotton\" : True\r\n}\r\n \r\n\r\ndef main():\r\n # Pygame setup\r\n pygame.init()\r\n screen = pygame.display.set_mode(list(WORLD_SIZE))\r\n\r\n clock=pygame.time.Clock()\r\n \r\n # Arbitrary line to simulate a floor\r\n floor = pygame.Rect(0,505,WORLD_SIZE.x,WORLD_SIZE.y)\r\n \r\n # Create and simulate the smoke system \r\n smoke = SmokeSystem(Vector2(215, 500))\r\n smoke.simulate()\r\n \r\n # Create and simulate the rain system\r\n rain = RainSystem(Vector2(0,0))\r\n rain.setWidth(WORLD_SIZE.x * 2)\r\n rain.simulate()\r\n \r\n # Create and simulate the snow system\r\n snow = SnowSystem(Vector2(0,0)) \r\n snow.setWidth(WORLD_SIZE.x)\r\n snow.simulate()\r\n \r\n # Create and simulate the shower system\r\n shower = ShowerSystem(Vector2(615, 20))\r\n shower.simulate()\r\n \r\n fountain = FountainSystem(Vector2(615, 505))\r\n fountain.simulate()\r\n \r\n cotton = CottonwoodSystem(Vector2(0,500))\r\n cotton.setWidth(WORLD_SIZE.x)\r\n cotton.simulate()\r\n\r\n RUNNING = True\r\n while RUNNING:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n RUNNING = False\r\n elif event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n RUNNING = False\r\n elif event.key == K_1:\r\n SHOW[\"snow\"] = not SHOW[\"snow\"]\r\n elif event.key == K_2:\r\n SHOW[\"rain\"] = not SHOW[\"rain\"]\r\n elif event.key == K_3:\r\n SHOW[\"smoke\"] = not SHOW[\"smoke\"]\r\n elif event.key == K_4:\r\n SHOW[\"shower\"] = not SHOW[\"shower\"]\r\n elif event.key == K_5:\r\n SHOW[\"fountain\"] = not SHOW[\"fountain\"]\r\n elif event.key == K_6:\r\n SHOW[\"cotton\"] = not SHOW[\"cotton\"]\r\n \r\n elif event.key == K_SPACE:\r\n rain.toggleActiveAreaDraw()\r\n smoke.toggleActiveAreaDraw()\r\n snow.toggleActiveAreaDraw()\r\n shower.toggleActiveAreaDraw()\r\n fountain.toggleActiveAreaDraw()\r\n cotton.toggleActiveAreaDraw()\r\n \r\n\r\n # Tick tock\r\n seconds = min(0.04, clock.get_time() / 1000)\r\n \r\n # Fill the screen and draw the floor\r\n screen.fill((10, 10, 50))\r\n pygame.draw.rect(screen, (100,100,100), floor)\r\n \r\n # Show the systems\r\n if SHOW[\"rain\"]:\r\n rain.update(seconds)\r\n rain.draw(screen)\r\n \r\n if SHOW[\"smoke\"]:\r\n smoke.update(seconds)\r\n smoke.draw(screen)\r\n \r\n if SHOW[\"snow\"]:\r\n snow.update(seconds)\r\n snow.draw(screen)\r\n \r\n if SHOW[\"shower\"]:\r\n shower.update(seconds)\r\n shower.draw(screen)\r\n \r\n \r\n if SHOW[\"fountain\"]:\r\n fountain.update(seconds)\r\n fountain.draw(screen)\r\n \r\n if SHOW[\"cotton\"]:\r\n cotton.update(seconds)\r\n cotton.draw(screen)\r\n \r\n\r\n pygame.display.flip()\r\n clock.tick(60)\r\n \r\n pygame.quit()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"NicholasQNguyen/CSCI-319","sub_path":"16-ParticleEffects-Download/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27437437793","text":"from cs50 import SQL\nfrom flask import Flask, render_template, redirect, request, url_for\n\napp = Flask(__name__)\n\n# tell cs50 library to use sqlite and open a db file called froshims2\n\ndb = SQL(\"sqlite:///froshims2.db\")\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/register\", methods=[\"POST\"])\ndef register():\n if request.form[\"name\"] == \"\" or request.form[\"dorm\"] == \"\":\n return render_template(\"failure.html\")\n db.execute(\"INSERT INTO registrants (name, dorm) VALUES(:name, :dorm)\",\n name=request.form[\"name\"], dorm=request.form[\"dorm\"]) # : is often a placeholder convention that says 'this is a variable'\n return render_template(\"success.html\")","repo_name":"pszujewski/cs50_harvardedx","sub_path":"lecture_notes/sql_python.py","file_name":"sql_python.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12130103084","text":"## Find the most frequent element in an Array\r\nimport sys\r\n\r\n# Space complexity : O(n)\r\n# Time complexity : O(n)\r\n\r\narr = [1,2,2,3,3,3,3,4,4,4,5,5,5,6]\r\ndef nonRepeatingEelemnt1(arr):\r\n count = {}\r\n for i in arr:\r\n if i not in count:\r\n count[i] = 1\r\n else:\r\n count[i] += 1\r\n\r\n ans = []\r\n\r\n for num, cou in count.items():\r\n if cou ==1:\r\n ans.append(num)\r\n return ans\r\n\r\nnonRepeat = nonRepeatingEelemnt1(arr)\r\nprint(\"Non Repeating element in arr: \", nonRepeat)","repo_name":"Manasa-Shivarudra/Python-DataStructures-and-Coding","sub_path":"Arrays/Simple/nonRepeatingElements.py","file_name":"nonRepeatingElements.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40536403652","text":"\"\"\"\nProject : Homework8.3\n- 거리를 나타내는 단위인 Km와 mile의 양방향 변환 계산기를 구현하라.\n 이 변환기에는 Km 단위 거리와 Mile 단위 거리를 입력할 수 있는 entry가 있으며,\n Km와 Mile을 나타내는 lable이 표시되고, \"Km->Mile\" 버튼과 \"Km<-Mile\" 버튼이 표시된다.\nAuthor : Ha-Rim Kim\nDate of last update : 2021.11.02.\n\"\"\"\n\nfrom tkinter import*\n\nclass Km_to_Mile:\n def __init__(self, master):\n frame=Frame(master)\n frame.pack()\n Label(frame, text='km').grid(row=0, column=1)\n self.km_var=DoubleVar()\n Entry(frame, textvariable=self.km_var).grid(row=0, column=0)\n Label(frame, text='mile').grid(row=0, column=3)\n self.mile_var=DoubleVar()\n self.mile_var.set(0.0)\n Entry(frame, textvariable=self.mile_var).grid(row=0, column=2)\n button_km_to_mile=Button(frame, text='Km -> Mile', command=self.convert_km_to_mile)\n button_km_to_mile.grid(row=1, column=0)\n button_mile_to_km=Button(frame, text='Mile -> Km', command=self.convert_mile_to_km)\n button_mile_to_km.grid(row=1, column=2)\n\n def convert_km_to_mile(self):\n km=self.km_var.get()\n self.mile_var.set(km/1.60934)\n\n def convert_mile_to_km(self):\n mile=self.mile_var.get()\n self.km_var.set(mile*1.60934)\n\n\n\nwindow=Tk()\nwindow.wm_title('Km < - > Mile Converter')\nkm_to_mile=Km_to_Mile(window)\nwindow.mainloop()","repo_name":"jaseok75/YU","sub_path":"2021/2학기/PythonClass/Homework8/Homework8.3.py","file_name":"Homework8.3.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3727005952","text":"from keras.layers import Concatenate, Dense, Flatten, Input\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard, LambdaCallback\nfrom rl.agents import DDPGAgent\nfrom rl.memory import SequentialMemory\nfrom rlsp.envs.action_norm_processor import ActionScheduleProcessor\nfrom rl.random import GaussianWhiteNoiseProcess\nfrom rlsp.agents.rlsp_agent import RLSPAgent\nfrom rlsp.utils.util_functions import create_simulator\nimport copy\nimport csv\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\nEPISODE_REWARDS = {}\n\n\nclass DDPG(RLSPAgent):\n \"\"\"\n RLSP DDPG Agent\n This class creates a DDPG agent with params for RLSP\n \"\"\"\n def __init__(self, agent_helper):\n self.agent_helper = agent_helper\n self.create()\n pass\n\n def create(self):\n \"\"\"Create the agent\"\"\"\n assert len(self.agent_helper.env.action_space.shape) == 1\n nb_actions = int(self.agent_helper.env.action_space.shape[0])\n\n # set #nodes and #sfs based on env limits. used for splitting the output layer and action processor\n num_nodes = self.agent_helper.env.env_limits.MAX_NODE_COUNT\n num_sfcs = self.agent_helper.env.env_limits.MAX_SF_CHAIN_COUNT\n num_sfs = self.agent_helper.env.env_limits.MAX_SERVICE_FUNCTION_COUNT\n\n # create the actor NN\n observation_input = Input(shape=(1,) + self.agent_helper.env.observation_space.shape, name='observation_input')\n flattened_observation = Flatten()(observation_input)\n prev_layer = flattened_observation\n \n # create hidden layers according to config\n for num_hidden in self.agent_helper.config['actor_hidden_layer_nodes']:\n hidden_layer = Dense(num_hidden,\n activation=self.agent_helper.config['actor_hidden_layer_activation'])(prev_layer)\n prev_layer = hidden_layer\n # split output layer into separate parts for each node and SF and apply softmax individually\n out_parts = [Dense(num_nodes, activation='softmax')(prev_layer) for _ in range(num_nodes * num_sfs * num_sfcs)]\n # logger.info(f\"out_parts: {len(out_parts)}\")\n # logger.info(f\"out_parts: {out_parts}\")\n out = Concatenate()(out_parts)\n # normal output layer\n # out_test = Dense(nb_actions, activation='tanh')(prev_layer)\n # logger.info(f\"nb_actions: {len(out_test)}\")\n # logger.info(f\"nb_actions: {out_test}\")\n actor = Model(inputs=observation_input, outputs=out)\n\n # create the critic NN\n action_input = Input(shape=(nb_actions,), name='action_input')\n observation_input = Input(shape=(1,) + self.agent_helper.env.observation_space.shape, name='observation_input')\n flattened_observation = Flatten()(observation_input)\n prev_layer = Concatenate()([action_input, flattened_observation])\n # create hidden layers according to config\n for num_hidden in self.agent_helper.config['critic_hidden_layer_nodes']:\n hidden_layer = Dense(num_hidden,\n activation=self.agent_helper.config['critic_hidden_layer_activation'])(prev_layer)\n prev_layer = hidden_layer\n out_critic = Dense(1, activation='linear')(prev_layer)\n critic = Model(inputs=[action_input, observation_input], outputs=out_critic)\n\n # write NN summary to string\n actor_summary_lst = []\n actor.summary(print_fn=actor_summary_lst.append)\n actor_summary = \"\".join(actor_summary_lst)\n actor.summary(print_fn=logger.debug)\n\n # write NN summary to string\n critic_summary_lst = []\n critic.summary(print_fn=critic_summary_lst.append)\n critic_summary = \"\".join(critic_summary_lst)\n critic.summary(print_fn=logger.debug)\n\n # This following line is causing aliasing issues. Ex: 'nb_observation' is added to agent_config\n self.agent_helper.result.agent_config = copy.copy(self.agent_helper.config) # Set agent params in result file\n self.agent_helper.result.agent_config['nb_observation'] = self.agent_helper.env.observation_space.shape[0]\n self.agent_helper.result.agent_config['nb_actions'] = nb_actions\n\n self.agent_helper.result.agent_config['actor'] = {}\n self.agent_helper.result.agent_config['actor']['summary'] = actor_summary\n\n self.agent_helper.result.agent_config['critic'] = {}\n self.agent_helper.result.agent_config['critic']['summary'] = critic_summary\n self.agent_helper.result.agent_config['metrics'] = ['mae']\n\n # creating the Agent\n processor = ActionScheduleProcessor(num_nodes=num_nodes, num_sfcs=num_sfcs, num_sfs=num_sfs)\n memory = SequentialMemory(limit=self.agent_helper.config['mem_limit'],\n window_length=self.agent_helper.config['mem_window_length'])\n random_process = GaussianWhiteNoiseProcess(sigma=self.agent_helper.config['rand_sigma'],\n mu=self.agent_helper.config['rand_mu'], size=nb_actions)\n\n agent = DDPGAgent(nb_actions=nb_actions,\n actor=actor,\n critic=critic,\n critic_action_input=action_input,\n memory=memory,\n nb_steps_warmup_critic=self.agent_helper.config['nb_steps_warmup_critic'],\n nb_steps_warmup_actor=self.agent_helper.config['nb_steps_warmup_actor'],\n random_process=random_process,\n gamma=self.agent_helper.config['gamma'],\n target_model_update=self.agent_helper.config['target_model_update'],\n processor=processor,\n batch_size=64)\n agent.compile(Adam(lr=self.agent_helper.config['learning_rate'],\n decay=self.agent_helper.config['learning_rate_decay']), metrics=['mae'])\n self.agent = agent\n\n def fit(self, env, episodes, verbose, episode_steps, callbacks, log_interval, agent_id=-1):\n \"\"\"Mask the agent fit function\"\"\"\n self.agent_helper.callbacks = self.create_callbacks(self.agent_helper.graph_path, self.agent_helper.config_dir)\n # create additional, custom callback to store agent's episode rewards\n EPISODE_REWARDS[agent_id] = []\n reward_dict_callback = LambdaCallback(\n on_epoch_end=lambda epoch, logs: [\n EPISODE_REWARDS[agent_id].append(logs['episode_reward'])\n ]\n )\n self.agent_helper.callbacks.append(reward_dict_callback)\n steps = episodes * self.agent_helper.episode_steps\n self.agent.fit(env, steps, verbose=verbose, nb_max_episode_steps=episode_steps,\n callbacks=self.agent_helper.callbacks, log_interval=log_interval, nb_max_start_steps=0)\n\n def test(self, env, episodes, verbose, episode_steps, callbacks):\n \"\"\"Mask the agent fit function\"\"\"\n # Check to see if the test is called after training. Causes duplicate CSV headers\n # when agent is called only for testing.\n if self.agent_helper.train:\n # Create a fresh simulator with test argument\n self.agent_helper.env.simulator = create_simulator(self.agent_helper)\n self.agent_helper.callbacks = self.create_callbacks(self.agent_helper.graph_path, self.agent_helper.config_dir)\n self.agent.test(env, episodes, verbose=verbose,\n nb_max_episode_steps=episode_steps,\n callbacks=self.agent_helper.callbacks)\n\n def save_weights(self, file, overwrite=True):\n weights_file = f\"{file}weights.h5f\"\n dir_path = os.path.dirname(os.path.realpath(weights_file))\n os.makedirs(dir_path, exist_ok=True)\n\n # After training is done, we save the final weights in the result_base_path.\n logger.info(\"saving model and weights to %s\", weights_file)\n self.agent.save_weights(weights_file, overwrite)\n\n with open(f\"{file}model_critic.yaml\", \"w\") as critic_yaml:\n critic_yaml.write(self.agent.critic.to_yaml())\n with open(f\"{file}model_actor.yaml\", \"w\") as actor_yaml:\n actor_yaml.write(self.agent.actor.to_yaml())\n\n def load_weights(self, weights_file):\n self.agent.load_weights(f\"{weights_file}.h5f\")\n\n def create_callbacks(self, graph_id, config_dir):\n # Now we create a tensorboard callback. This logs the episode rewards to the tensorboard.\n tensorboard_callback = TensorBoard(log_dir=graph_id, write_graph=True, write_images=True)\n\n # To log the observation vector we add an other callback, which simply outputs the vector.\n logger_callback = LambdaCallback()\n\n # write the reward to a csv.\n run_reward_csv_writer = csv.writer(open(f\"{config_dir}run_reward.csv\", 'a+', newline=''))\n episode_reward_csv_writer = csv.writer(open(f\"{config_dir}episode_reward.csv\", 'a+', newline=''))\n run_reward_csv_writer.writerow(['run', 'reward']) # add a header\n episode_reward_csv_writer.writerow(['episode', 'reward']) # add a header\n\n reward_csv_callback = LambdaCallback(\n on_batch_end=lambda step, logs: [\n run_reward_csv_writer.writerow([step, logs['reward']])\n ],\n on_epoch_end=lambda epoch, logs: [\n episode_reward_csv_writer.writerow([epoch, logs['episode_reward']])\n ]\n )\n\n return [tensorboard_callback, logger_callback, reward_csv_callback]\n","repo_name":"DatLQ95/tue_drl_vnf","sub_path":"src/rlsp/agents/rlsp_ddpg.py","file_name":"rlsp_ddpg.py","file_ext":"py","file_size_in_byte":9632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"32569287987","text":"from datetime import time as time_sys, date as date_sys\nimport json\nfrom datetime import timedelta, datetime as datetime_sys\nimport msgpack\nimport os\nimport pytz\nimport re\nfrom slugify import slugify\nfrom socket import _GLOBAL_DEFAULT_TIMEOUT\nfrom typing import Any, Union, TypeVar, Callable, Sequence, Dict\nfrom urllib.parse import urlparse\nimport voluptuous as vol\n\n# Import Yombo libraries\nfrom yombo.core.exceptions import YomboInvalidValidation, YomboWarning\nfrom yombo.core.library import YomboLibrary\nfrom yombo.core.log import get_logger\nfrom yombo.constants import \\\n TEMP_FAHRENHEIT, TEMP_CELSIUS, MISC_UNIT_SYSTEM_METRIC, MISC_UNIT_SYSTEM_IMPERIAL, WEEKDAYS\nfrom yombo.lib.template import JinjaTemplate\nimport yombo.utils.datetime as dt\n\nlogger = get_logger(\"library.validate\")\n\n# typing typevar\nT = TypeVar(\"T\")\n\n\nTIME_PERIOD_ERROR = \"offset {} should be format 'HH:MM' or 'HH:MM:SS'\"\n\nRE_SANITIZE_FILENAME = re.compile(r\"(~|\\.\\.|/|\\\\)\")\nRE_SANITIZE_PATH = re.compile(r\"(~|\\.(\\.)+)\")\n\n\nclass Validate(YomboLibrary):\n \"\"\"\n Handles common validation tasks.\n \"\"\"\n #####################################################\n # Basic types\n @staticmethod\n def boolean(value: Any, coerce: Union[None, bool] = None) -> bool:\n \"\"\"Validate and maybe coerce a boolean value.\"\"\"\n if isinstance(value, bool):\n return value\n if coerce in (None, True):\n if isinstance(value, int):\n value = str(int)\n if isinstance(value, str):\n value = value.lower()\n if value in (\"1\", \"true\", \"yes\", \"on\", \"enable\"):\n return True\n if value in (\"0\", \"false\", \"no\", \"off\", \"disable\"):\n return False\n raise YomboInvalidValidation(\"Value is not a boolean and cannot be coerced.\")\n\n @staticmethod\n def string(value: Any) -> str:\n \"\"\"Coerce value to string, except for None.\"\"\"\n if value is not None:\n try:\n return str(value)\n except:\n pass\n raise YomboInvalidValidation(\"Couldn't make value a string.\")\n\n @staticmethod\n def basic_list(value: Union[T, Sequence[T]]) -> Sequence[T]:\n \"\"\"Wrap value in list if it is not one.\"\"\"\n if value is None:\n return []\n return value if isinstance(value, list) else [value]\n\n @staticmethod\n def basic_string(value, minimum: int = 1, maximum: int = 255):\n \"\"\" A short string with alphanumberic, spaces, and periods. \"\"\"\n if isinstance(value, str) and len(value) >= minimum and len(value) <= maximum:\n return value\n raise YomboInvalidValidation(\"Value is not a string, or doesn't fit within the required lengths.\")\n\n @staticmethod\n def basic_word(value):\n \"\"\" A single word. \"\"\"\n if value.strip().count(' ') == 1:\n return value\n raise YomboInvalidValidation(\"Value is not a single word.\")\n\n # Adapted from:\n # https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666\n @staticmethod\n def has_at_least_one_key(*keys: str) -> Callable:\n \"\"\"Validate that at least one key exists.\"\"\"\n\n def validate(obj: Dict) -> Dict:\n \"\"\"Test keys exist in dict.\"\"\"\n if not isinstance(obj, dict):\n raise YomboInvalidValidation(\"expected dictionary\")\n\n for k in obj.keys():\n if k in keys:\n return obj\n raise YomboInvalidValidation(f\"must contain one of {', '.join(keys)}.\")\n return validate\n\n @staticmethod\n def has_at_least_one_key_value(*items: list) -> Callable:\n \"\"\"Validate that at least one (key, value) pair exists.\"\"\"\n\n def validate(obj: Dict) -> Dict:\n \"\"\"Test (key,value) exist in dict.\"\"\"\n if not isinstance(obj, dict):\n raise YomboInvalidValidation(\"Expected dictionary\")\n\n for item in obj.items():\n if item in items:\n return obj\n raise YomboInvalidValidation(f\"must contain one of {str(items)}.\")\n return validate\n\n #####################################################\n # OS / File system items\n def is_device(value):\n \"\"\" Validate that value is a real device. \"\"\"\n try:\n os.stat(value)\n return str(value)\n except OSError:\n raise YomboInvalidValidation(f\"No device at {value} found\")\n\n @staticmethod\n def is_dir(value: Any) -> str:\n \"\"\"Validate that the value is an existing dir.\"\"\"\n if value is None:\n raise YomboInvalidValidation(\"not a directory\")\n dir_in = os.path.expanduser(str(value))\n\n if not os.path.isdir(dir_in):\n raise YomboInvalidValidation(\"not a directory\")\n if not os.access(dir_in, os.R_OK):\n raise YomboInvalidValidation(\"directory not readable\")\n return dir_in\n\n @staticmethod\n def is_file(value: Union[str, None]) -> str:\n \"\"\"Validate that the value is an existing file.\"\"\"\n if value is None:\n raise YomboInvalidValidation(\"None is not file\")\n file_in = os.path.expanduser(str(value))\n\n if not os.path.isfile(file_in):\n raise YomboInvalidValidation(\"not a file\")\n if not os.access(file_in, os.R_OK):\n raise YomboInvalidValidation(\"file not readable\")\n return file_in\n\n #####################################################\n # Time related items\n @staticmethod\n def time_zone(value):\n \"\"\"Validate timezone.\"\"\"\n try:\n return pytz.timezone(input)\n except pytz.exceptions.UnknownTimeZoneError:\n raise YomboInvalidValidation(\"Invalid time zone passed in. Valid options can be found here: \"\n \"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones\")\n\n @staticmethod\n def time(value) -> time_sys:\n \"\"\"Validate and transform a time.\"\"\"\n if isinstance(value, time_sys):\n return value\n\n try:\n time_val = dt.parse_time(value)\n except TypeError:\n raise YomboInvalidValidation(\"Not a parseable type\")\n\n if time_val is None:\n raise YomboInvalidValidation(f\"Invalid time specified: {value}\")\n\n return time_val\n\n @staticmethod\n def date(value) -> date_sys:\n \"\"\"Validate and transform a date.\"\"\"\n if isinstance(value, date_sys):\n return value\n\n try:\n date_val = dt.parse_date(value)\n except TypeError:\n raise YomboInvalidValidation(\"Not a parseable type\")\n\n if date_val is None:\n raise YomboInvalidValidation(\"Could not parse date\")\n\n return date_val\n\n @staticmethod\n def time_period_str(value: str) -> timedelta:\n \"\"\"Validate and transform time offset.\"\"\"\n if isinstance(value, int):\n raise YomboInvalidValidation(\"Make sure you wrap time values in quotes\")\n elif not isinstance(value, str):\n raise YomboInvalidValidation(TIME_PERIOD_ERROR.format(value))\n\n negative_offset = False\n if value.startswith(\"-\"):\n negative_offset = True\n value = value[1:]\n elif value.startswith(\"+\"):\n value = value[1:]\n\n try:\n parsed = [int(x) for x in value.split(\":\")]\n except ValueError:\n raise YomboInvalidValidation(TIME_PERIOD_ERROR.format(value))\n\n if len(parsed) == 2:\n hour, minute = parsed\n second = 0\n elif len(parsed) == 3:\n hour, minute, second = parsed\n else:\n raise YomboInvalidValidation(TIME_PERIOD_ERROR.format(value))\n\n offset = timedelta(hours=hour, minutes=minute, seconds=second)\n\n if negative_offset:\n offset *= -1\n\n return offset\n\n @staticmethod\n def time_period_seconds(value: Union[int, str]) -> timedelta:\n \"\"\"Validate and transform seconds to a time offset.\"\"\"\n try:\n return timedelta(seconds=int(value))\n except (ValueError, TypeError):\n raise YomboInvalidValidation(f\"Expected seconds, got {value}\")\n\n #####################################################\n # Yombo items\n @staticmethod\n def id_string(string, minimum: int = 1, maximum: int = 200):\n \"\"\" Ensure value is a string, with at least 4 characters and maximum of 100.\"\"\"\n s = vol.Schema(vol.All(\n str,\n vol.Length(min=minimum, max=maximum),\n vol.Match(r\"^[a-zA-Z_0-9. ]+$\")\n ))\n try:\n return s(string)\n except Exception as e:\n raise YomboInvalidValidation(\"Provided ID contains invalid characters.\")\n\n #####################################################\n # Misc\n @staticmethod\n def template(value):\n \"\"\"Validate a jinja2 template.\"\"\"\n if value is None:\n raise YomboInvalidValidation(\"template value is None\")\n elif isinstance(value, str) is False:\n raise YomboInvalidValidation(\"template value should be a string\")\n\n value = JinjaTemplate(str(value))\n\n try:\n value.ensure_valid()\n return value\n except YomboWarning as e:\n raise YomboInvalidValidation(f\"invalid template ({e})\")\n\n @staticmethod\n def url(url_in, protocols=None):\n if protocols is None:\n protocols = [\"http\", \"https\", \"sftp\", \"ftp\"]\n\n if urlparse(url_in).scheme in protocols:\n try:\n return vol.Schema(vol.Url())(url_in)\n except:\n pass\n raise YomboInvalidValidation(\"Invalid URL.\")\n\n @staticmethod\n def match_all(self, value):\n \"\"\"Validate that matches all values.\"\"\"\n return value\n\n @staticmethod\n def positive_timedelta(value: timedelta) -> timedelta:\n \"\"\"Validate timedelta is positive.\"\"\"\n if value < timedelta(0):\n raise YomboInvalidValidation(\"Time period should be positive\")\n return value\n\n @staticmethod\n def _slugify(text: str) -> str:\n \"\"\"Slugify a given text.\"\"\"\n return slugify(text, separator=\"_\")\n\n def slug(self, value):\n \"\"\"Validate value is a valid slug (aka: machine_label)\"\"\"\n if value is None:\n raise YomboInvalidValidation(\"Slug should not be None\")\n value = str(value)\n slg = self._slugify(value)\n if value == slg:\n return value\n raise YomboInvalidValidation(f\"invalid slug {value} (try {slg})\")\n\n @classmethod\n def slugify(cls, value):\n \"\"\"Coerce a value to a slug.\"\"\"\n # print(\"going to try to slugify: %s\" % value)\n if value is None:\n raise YomboInvalidValidation(\"Slug should not be None\")\n slg = cls._slugify(str(value))\n if slg:\n return slg\n # print(\"can't make slug: %s\" % slg)\n raise YomboInvalidValidation(f\"Unable to slugify {value}\")\n\n @classmethod\n def temperature_unit(cls, value) -> str:\n \"\"\"Validate and transform temperature unit.\"\"\"\n value = str(value).upper()\n if value == \"C\":\n return TEMP_CELSIUS\n elif value == \"F\":\n return TEMP_FAHRENHEIT\n raise YomboInvalidValidation(\"invalid temperature unit (expected C or F)\")\n\n unit_system = vol.All(vol.Lower, vol.Any(MISC_UNIT_SYSTEM_METRIC,\n MISC_UNIT_SYSTEM_IMPERIAL))\n\n @staticmethod\n def time(value):\n \"\"\"Validate time.\"\"\"\n try:\n return dt.time_from_string(value)[0]\n except Exception:\n raise YomboInvalidValidation(f\"YomboInvalidValidation time specified: {value}\")\n\n @staticmethod\n def datetime(self, value):\n \"\"\"Validate datetime.\"\"\"\n if isinstance(value, datetime_sys):\n return value\n\n try:\n return dt.time_from_string(value)[0]\n except Exception:\n raise YomboInvalidValidation(f\"YomboInvalidValidation datetime specified: {value}\")\n\n @staticmethod\n def time_zone(value):\n \"\"\"Validate timezone.\"\"\"\n if dt.get_time_zone(value) is not None:\n return value\n raise YomboInvalidValidation(\n \"YomboInvalidValidation time zone passed in. Valid options can be found here: \"\n \"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones\")\n\n weekdays = vol.All(basic_list, [vol.In(WEEKDAYS)])\n\n @staticmethod\n def socket_timeout(value):\n \"\"\"Validate timeout float > 0.0.\n\n None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.\n \"\"\"\n if value is None:\n return _GLOBAL_DEFAULT_TIMEOUT\n else:\n try:\n float_value = float(value)\n if float_value > 0.0:\n return float_value\n raise YomboInvalidValidation(\"YomboInvalidValidation socket timeout value.\"\n \" float > 0.0 required.\")\n except Exception as e:\n raise YomboInvalidValidation(f\"YomboInvalidValidation socket timeout: {e}\")\n\n @staticmethod\n def x10_address(value):\n \"\"\"Validate an x10 address.\"\"\"\n regex = re.compile(r\"([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$\")\n if not regex.match(value):\n raise YomboInvalidValidation(\"YomboInvalidValidation X10 Address\")\n return str(value).lower()\n\n @staticmethod\n def basic_list(value: Union[T, Sequence[T]]) -> Sequence[T]:\n \"\"\"Wrap value in list if it is not one.\"\"\"\n if value is None:\n return []\n return value if isinstance(value, list) else [value]\n\n @classmethod\n def basic_list_csv(cls, value: Any) -> Sequence:\n \"\"\"Ensure that input is a list or make one from comma-separated string.\"\"\"\n if isinstance(value, str):\n return [member.strip() for member in value.split(\",\")]\n return cls.basic_list(value)\n\n @staticmethod\n def is_json(value):\n \"\"\"\n Determine if data is json or not.\n\n :param value:\n :return:\n \"\"\"\n try:\n json_object = json.loads(value)\n except:\n return False\n return True\n\n @staticmethod\n def is_msgpack(value):\n \"\"\"\n Helper function to determine if data is msgpack or not.\n\n :param mymsgpack:\n :return:\n \"\"\"\n try:\n json_object = msgpack.unpackb(value)\n except:\n return False\n return True\n\n # Validator helpers\n @staticmethod\n def key_dependency(key, dependency):\n \"\"\"Validate that all dependencies exist for key.\"\"\"\n\n def validator(value):\n \"\"\"Test dependencies.\"\"\"\n if not isinstance(value, dict):\n raise YomboInvalidValidation(\"key dependencies require a dict\")\n if key in value and dependency not in value:\n raise YomboInvalidValidation(f'dependency violation - key \"{key}\" requires key \"{dependency}\" to exist')\n\n return value\n\n return validator\n\n @classmethod\n def time_period_dict(cls):\n return vol.All(\n dict, vol.Schema({\n \"days\": vol.Coerce(int),\n \"hours\": vol.Coerce(int),\n \"minutes\": vol.Coerce(int),\n \"seconds\": vol.Coerce(int),\n \"milliseconds\": vol.Coerce(int),\n }),\n cls.has_at_least_one_key(\"days\", \"hours\", \"minutes\", \"seconds\", \"milliseconds\"),\n lambda value: timedelta(**value))\n\n @classmethod\n def time_period(cls):\n return vol.Any(cls.time_period_str, cls.time_period_seconds, timedelta, cls.time_period_dict)\n","repo_name":"yombo/yombo-gateway","sub_path":"yombo/lib/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":15810,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"21849428263","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 17 15:33:54 2020\n@author: ant_t\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport re\n\n\ndef get_stripe_Mz(\n Nx=None,\n Ny=None,\n Nz=None,\n eff_z=1,\n periodic_offset_x=0,\n periodic_offset_y=0,\n angle=0,\n interlayer_coupling=1,\n width_up=None,\n width_down=None,\n):\n M_data = np.zeros([3, Nx, Ny, Nz])\n period = width_up + width_down\n\n xgrid, ygrid = np.meshgrid(np.arange(0, Nx), np.arange(0, Ny))\n\n sign_M = interlayer_coupling\n for l in np.arange(0, eff_z):\n M_data[2, :, :, l] = sign_M * (\n -np.ones([Nx, Ny])\n + 2 * (np.mod(xgrid - np.mod(l, 2) * periodic_offset_x, period) < width_up)\n )\n sign_M = sign_M * interlayer_coupling\n\n return M_data\n\n\ndef get_circle_Mz(\n Nx=None,\n Ny=None,\n Nz=None,\n eff_z=1,\n offset_x=0,\n offset_y=0,\n interlayer_coupling=1,\n radius=0,\n):\n M_data = np.zeros([3, Nx, Ny, Nz])\n\n xgrid, ygrid = np.meshgrid(np.arange(-Nx / 2, Nx / 2), np.arange(-Ny / 2, Ny / 2))\n rgrid = ((xgrid + offset_x) ** 2 + (ygrid + offset_x) ** 2) ** 0.5\n\n sign_M = interlayer_coupling\n for l in np.arange(0, eff_z):\n M_data[2, :, :, l] = sign_M * (-np.ones([Nx, Ny]) + 2 * (rgrid < radius))\n sign_M = sign_M * interlayer_coupling\n\n return M_data\n\n\ndef write_ovf(\n filepath=None,\n lenx=None,\n leny=None,\n lenz=None,\n Nx=None,\n Ny=None,\n Nz=None,\n dataset=None,\n):\n\n if (np.size(dataset) == (Nx * Ny * Nz * 3)) and (\n (len(dataset.shape) == 4) or (len(dataset.shape) == 2)\n ):\n # change to (Nx*Ny*Nz,3)\n if len(dataset.shape) == 4:\n if dataset.shape[-1] != 3:\n dataset = np.transpose(dataset, (3, 2, 1, 0))\n dataset = np.reshape(dataset, (Nx * Ny * Nz, 3))\n elif len(dataset.shape) == 2:\n if dataset.shape[-1] != 3:\n dataset = np.transpose(dataset, (1, 0))\n\n # note that python writes \\n as \\r\\n in windows\n # mumax throws an error with \\r\n # newline=\\n forces python to write \\n as \\n in windows\n with open(filepath, \"w\", newline=\"\\n\") as f:\n f.write(u\"# OOMMF: rectangular mesh v1.0\\n\")\n f.write(u\"# Segment count: 1 \\n\")\n f.write(u\"# Begin: Segment \\n\")\n f.write(u\"# Begin: Header \\n\")\n f.write(u\"# Desc: Time (s) :0\\n\")\n f.write(u\"# Title: m \\n\")\n f.write(u\"# meshtype: rectangular \\n\")\n f.write(u\"# meshunit: m \")\n f.write(u\"\\n# xbase: \" + np.format_float_scientific(lenx, trim=\"0\") + \" \")\n f.write(u\"\\n# ybase: \" + np.format_float_scientific(leny, trim=\"0\") + \" \")\n f.write(u\"\\n# zbase: \" + np.format_float_scientific(lenz, trim=\"0\") + \" \")\n f.write(\n u\"\\n# xstepsize: \"\n + np.format_float_scientific(lenx / Nx, trim=\"0\")\n + \" \"\n )\n f.write(\n u\"\\n# ystepsize: \"\n + np.format_float_scientific(leny / Ny, trim=\"0\")\n + \" \"\n )\n f.write(\n u\"\\n# zstepsize: \"\n + np.format_float_scientific(lenz / Nz, trim=\"0\")\n + \" \"\n )\n f.write(u\"\\n# xmin: 0 \")\n f.write(u\"\\n# ymin: 0 \")\n f.write(u\"\\n# zmin: 0 \")\n f.write(u\"\\n# xmax: \" + np.format_float_scientific(lenx, trim=\"0\") + \" \")\n f.write(u\"\\n# ymax: \" + np.format_float_scientific(leny, trim=\"0\") + \" \")\n f.write(u\"\\n# zmax: \" + np.format_float_scientific(lenz, trim=\"0\") + \" \")\n f.write(u\"\\n# xnodes: %i \" % Nx)\n f.write(u\"\\n# ynodes: %i \" % Ny)\n f.write(u\"\\n# znodes: %i \" % Nz)\n f.write(u\"\\n# ValueRangeMinMag: 1e-08 \")\n f.write(u\"\\n# ValueRangeMaxMag: 1 \")\n f.write(u\"\\n# valueunit: \")\n f.write(u\"\\n# valuemultiplier: 1 \")\n f.write(u\"\\n# End: Header \")\n f.write(u\"\\n# Begin: Data Text \\n\")\n np.savetxt(f, dataset, fmt=\"%.2f\")\n f.write(u\"# End: Data Text \")\n f.write(u\"\\n# End: Segment\\n\")\n\n display(\"File sucessfully saved\")\n else:\n display(\"Size of dataset does not match simulation paramters\")\n\n # replacement strings\n \"\"\"\n WINDOWS_LINE_ENDING = b'\\r\\n'\n UNIX_LINE_ENDING = b'\\n'\n \n with open(filepath, 'rb') as open_file:\n content = open_file.read()\n \n content = content.replace(WINDOWS_LINE_ENDING, UNIX_LINE_ENDING)\n \n with open(filepath, 'wb') as open_file:\n open_file.write(content)\n \"\"\"\n\n\ndef read_ovf(filepath=None, data_sep=\" \", comment=\"#\", engine=\"c\"):\n \"\"\"\n Function to read .ovf files.\n \n Parameters\n ----------\n filepath: string\n The path of the .ovf file. Default is none\n datasep: string\n The separator of the numbers. Default is ' '\n comment: string\n The symbol defining a commented line. Default is '#'\n engine: string, 'c' or 'python'\n The engine used by pandas to read the data. Default is 'c'\n \n Returns\n -------\n data: np.ndarray\n The 4d matrix containing the coordinates. It is arranged as (zcoord, xcoord, ycoord, component).\n header_dictionary: dict\n A dictionary containing the metadata in the .ovf header\n \"\"\"\n\n with open(filepath) as file:\n keepreading = True\n header_dictionary = {}\n while keepreading:\n line = file.readline()\n # Keep reading as long as the line starts with the comment symbol\n if line.startswith(comment):\n line = line[\n 1:\n ].strip() # Remove trailing white spaces and the comment symbol\n split = line.split(\":\", maxsplit=1) # Split on the first colon\n # Use the first list element as a dictionary key\n # and use the second element of the list as value\n key = split[0].strip()\n value = split[1].strip()\n try:\n value = float(\n value\n ) # Attempt to convert the string into float, if possible\n # Lazy solution to cast to int if the float is an integer number.\n # helps to feed the number straight into linspace without recasting by hand\n if value.is_integer():\n value = int(value)\n except:\n pass\n header_dictionary[key] = value\n else:\n keepreading = False\n\n # Load the data as a pandas dataframe and convert it into numpy array\n data = pd.read_csv(\n filepath,\n sep=data_sep,\n comment=comment,\n usecols=[0, 1, 2],\n skip_blank_lines=True,\n header=None,\n engine=engine,\n ).values\n\n # Reshape the columns into 3D matrices with shape (znodes, xnodes, ynodes, 3)\n shape = (\n header_dictionary[\"xnodes\"],\n header_dictionary[\"ynodes\"],\n header_dictionary[\"znodes\"],\n 3,\n )\n data = data.reshape(shape, order=\"F\").transpose(2, 1, 0, 3)\n\n return data, header_dictionary\n\n\n\"\"\"\nclass ovf_data:\n def __init__(self):\n self._Begin = None\n \n def read_ovf(self,ovf_file):\n \n print('loading file...')\n with open(ovf_file,'r') as ovf_file_handle:\n self._Begin=None\n \n #Retrieve header information\n while self._Begin != 'Data Text': #last line of header info\n line = ovf_file_handle.readline()\n self._assign_headers(line) \n \n #create data array\n self.data=np.zeros((self.Nx*self.Ny*self.Nz,3))\n \n #Retrieve OVF data \n for line_ind, line in zip(np.arange(0,self.Nx*self.Ny*self.Nz),ovf_file_handle):\n self.data[line_ind,:]=np.fromstring(line,dtype=float,count=3,sep=' ')\n \n self.data=self.data.reshape((self.Nx,self.Ny,self.Nz,3),order='F').transpose(1,0,2,3) \n \n #Retrieve OVF data via pandas (slower?) \n\n self.data=pd.read_csv(ovf_file,sep='\\s+',skiprows=data_line, \\\n skipfooter=2, index_col=None,header=None,engine='python').to_numpy()\\\n .reshape((self.Nx,self.Ny,self.Nz,3),order='F').transpose(1,0,2,3)\n\n print('done!') \n \n def _assign_headers(self, line):\n #regular expression pattern: # xyz: 123\n header_exp=re.compile(r'#\\s(\\D*):\\s(.*)')\n info=header_exp.findall(line)\n \n if np.size(info) == 2:\n if info[0][0] == 'meshunit':\n self.xyzunit =info[0][1]\n elif info[0][0] == 'xstepsize':\n self.xstep = float(info[0][1])\n elif info[0][0] == 'ystepsize':\n self.ystep= float(info[0][1])\n elif info[0][0] == 'zstepsize':\n self.zstep= float(info[0][1])\n elif info[0][0] == 'xnodes':\n self.Nx= int(info[0][1])\n elif info[0][0] == 'ynodes':\n self.Ny= int(info[0][1])\n elif info[0][0] == 'znodes':\n self.Nz= int(info[0][1])\n elif info[0][0] == 'valueunit':\n self.Dunit= info[0][1]\n elif info[0][0] == 'ValueRangeMaxMag':\n self.Dmax= float(info[0][1])\n elif info[0][0] == 'ValueRangeMinMag':\n self.Dmin= float(info[0][1])\n elif info[0][0] == 'Begin':\n self._Begin=info[0][1]\n \"\"\"\n","repo_name":"pikestefan/nv_libraries","sub_path":"nvlibraries/f_ovf.py","file_name":"f_ovf.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"288104388","text":"from psana import dgram\nimport os\n\nclass TestDgramInit:\n\n def testInvalidEmptyDgram(self):\n raised = False\n try:\n d = dgram.Dgram()\n except RuntimeError:\n raised = True\n assert raised\n\n def testInvalidFileDescriptor(self):\n raised = False\n try:\n d = dgram.Dgram(file_descriptor=42)\n except OSError:\n raised = True\n assert raised\n\n def testInvalidSequentialRead(self):\n \"\"\" prevent reading data dgram without config \"\"\"\n dir_path = os.path.dirname(os.path.realpath(__file__))\n full_path = os.path.join(dir_path, \"smd.xtc2\")\n if os.path.isfile(full_path):\n fd = os.open(full_path, os.O_RDONLY)\n d = dgram.Dgram(file_descriptor=fd)\n raised = False\n try: \n another_d = dgram.Dgram(file_descriptor=fd)\n except StopIteration:\n raised = True\n assert raised\n\ndef run():\n test = TestDgramInit()\n test.testInvalidEmptyDgram()\n test.testInvalidFileDescriptor()\n test.testInvalidSequentialRead() \n\nif __name__ == \"__main__\":\n run()\n","repo_name":"slac-lcls/lcls2","sub_path":"psana/psana/tests/test_dgraminit.py","file_name":"test_dgraminit.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"69906063367","text":"\nimport cv2 \nimport numpy as np \n#from matplotlib import pyplot as pypt\nimport matplotlib.pyplot as plt\n\n# 1.Reading and Displaying the Image \nimg=cv2.imread('dark.jpg',0) #reading image #correction_image3.jpg, Correction2.jpg, Scene.jpg\ncv2.imshow('Image',img) \ncv2.waitKey(0)\n\n\n# 2.Know the dimension of an image \ndimensions = img.shape\nprint('Image Dimension : ',dimensions)\n#Image Dimension : (70, 105)\n\n\n\n#Resizing\n#resize = cv2.resize(img, (210,140),interpolation = cv2.INTER_NEAREST)\nresize = cv2.resize(img, (320,480),interpolation = cv2.INTER_NEAREST)\ncv2.imshow('resize', resize)\ncv2.waitKey(0)\nimg=resize\n\n\n#3.Histogram equalization Transformation:considers the global contrast of the image\neq=cv2.equalizeHist(img)\n# stacking images side-by-side\nres=np.hstack((img, eq))\n\n#comparing input & output images side by side\ncv2.imshow('Equalized Image',res)\n#showing the comparision\nplt.hist(img.ravel(),256,[0,256]) #plotting histogram for input image pypt.show() #showing histogram \nplt.hist(eq.ravel(),256,[0,256]) #plotting histogram for output image pypt.show() #showing histogram\nplt.show() #showing histogram\n\ncv2.waitKey(0)\n\n\n#4.CLAHE (Contrast Limited Adaptive Histogram Equalization)\n #image is divided into small blocks called \"tiles\"\n #(tileSize is 8x8 by default in OpenCV).\n# Then each of these blocks are histogram equalized as usual.\n #So in a small area, histogram would confine to a small region\n #(unless there is noise). If noise is there, it will be amplified.\n #To avoid this, contrast limiting is applied. If any histogram bin is\n# above the specified contrast limit (by default 40 in OpenCV), those\n# pixels are clipped and distributed uniformly to other bins before applying\n# histogram equalization. After equalization,\n# to remove artifacts in tile borders, bilinear interpolation is applied.\n\n #CLAHE operates on small regions in the image, called tiles, rather\n# than the entire image. The neighboring tiles are then combined using\n #bilinear interpolation to remove the artificial boundaries.\n\n# create a CLAHE object (Arguments are optional).\n\n# The declaration of CLAHE \n# clipLimit -> Threshold for contrast limiting \nclahe = cv2.createCLAHE(clipLimit = 3 ) #clipLimit – This parameter sets the threshold for contrast limiting. The default value is 40.\nfinal_img = clahe.apply(img) \ncv2.imshow(\"CLAHE image\", final_img)\ncv2.waitKey(0)\n\n\n# stacking images side-by-side\nres=np.hstack((eq, final_img))\n#comparing input & output images side by side\ncv2.imshow('Equalized vs CLAHE Image',res)\n\n\n\n#Logarithmic Transformation: \n\n#img = cv2.imread('correction_image3.jpg',0) #reading image \nc = 255/(np.log(1 + np.max(img))) #log transformation\n# Apply log transformation method \noutput = np.log(img + 1) * c \noutput = np.array(output, dtype = np.uint8) #int datatype \n#cv2.imshow('log_transformed.jpg', output) #showing image\n\n#pypt.hist(img.ravel(),256,[0,256]) \n#pypt.show() \n#pypt.hist(output.ravel(),256,[0,256]) \n#pypt.show() \n# Display both images \n\n# stacking images side-by-side\nres=np.hstack((img, output))\n\n#comparing input & output images side by side\ncv2.imshow('log_transformed Image',res)\n\ncv2.waitKey(0)\n\n\n#Power law transformation: \n\n#Give desired gamma value in the shell \ngvalue=float(input(\"Enter gamma value:\")) \n#int datatype \noutput = np.array(255*(img / 255) ** gvalue, dtype = np.uint8) \n#cv2.imshow(\"Gamma Transformation\", output) #showing image\ncv2.waitKey(0)\n# stacking images side-by-side\nres=np.hstack((img, output))\n\n#comparing input & output images side by side\ncv2.imshow('Gamma_transformed Image',res)\n\n\n#plt.hist(img.ravel(),256,[0,256]) #plotting histograms pypt.show() \n#plt.hist(output.ravel(),256,[0,256]) \n#plt.show()\n\n#plt.imshow(output, cmap='gray')\n#plt.show()","repo_name":"arjun0200/Digital-Image-Processing","sub_path":"img_enhancement.py","file_name":"img_enhancement.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9383064512","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nsc\n\n\n# In[8]:\n\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"london_crime\").getOrCreate()\n\n\n# In[9]:\n\n\nspark\n\n\n# In[93]:\n\n\ndf = spark.read.format(\"csv\").option(\"header\", True).option('inferSchema',True).load(\"londom_crime.csv\")\n\n\n# In[95]:\n\n\ndf.printSchema()\ndf.take(10)\n\n\n# In[96]:\n\n\ndf.count()\n\n\n# In[97]:\n\n\ndf.drop('NA')\n\n\n# In[98]:\n\n\ndf_new.count()\n\n\n# In[99]:\n\n\ndf = df_new.drop(\"lsoa_code\")\n\n\n# In[100]:\n\n\ndf.take(10)\n\n\n# In[101]:\n\n\ntotaleBorough = df.select('borough').distinct()\ntotaleBorough.show()\n\n\n# In[102]:\n\n\ntotaleBorough.count()\n\n\n# In[103]:\n\n\nhackeneydata = df.filter(df['borough']=='Hackney')\nhackeneydata.count()\n\n\n# In[104]:\n\n\nyearfilterdf = df.filter((df['year'] == '2016') | (df['year'] == '2015'))\nyearfilterdf.count()\n\n\n# In[105]:\n\n\n#using isin operator\nyearIsInfilterdf=df.filter((df['year']).isin(['2015','2016']))\nyearIsInfilterdf.count()\n\n\n# In[106]:\n\n\ndf.filter(df['year'] >= 2014).show()\n\n\n# In[107]:\n\n\n## performing aggregation on data\n#group all convictions by borough\ndf.groupBy('borough').count().show()\n\n\n# In[108]:\n\n\nboroughValueSum = df.groupBy('borough').agg({'value':'sum'})\nboroughValueSum = df.groupBy('borough').agg({'value':'sum'}).withColumnRenamed('sum(value)','conviction')\nboroughValueSum.show()\n\n\n# In[109]:\n\n\ntotalconviction = boroughValueSum.agg({'conviction':'sum'})\n\n\n# In[110]:\n\n\ntotalconviction.show()\n\n\n# In[111]:\n\n\ntotalconcivtionno=totalconviction.collect()[0][0]\n\n\n# In[112]:\n\n\ntotalconcivtionno\n\n\n# In[117]:\n\n\n#find % of conviction on per borough basis\nprocesseddf = boroughValueSum.withColumn('percent',(boroughValueSum.conviction/totalconcivtionno)*100)\nprocesseddf.orderBy(processeddf[2].desc()).show()\n\n\n# In[118]:\n\n\ndf.take(10)\n\n\n# In[122]:\n\n\nmonthlyGroupeddf = df.filter(df['year'] == 2014).groupBy(df['month']).agg({'value':'sum'}).withColumnRenamed('sum(value)','total')\n\n\n# In[129]:\n\n\ntotalcovictions = monthlyGroupeddf.agg({'total':'sum'}).collect()[0][0]\n\n\n# In[130]:\n\n\ntotalcovictions\n\n\n# In[132]:\n\n\nmonthlyGroupeddf.withColumn('percent',(monthlyGroupeddf.total/totalcovictions)*100).show()\n\n\n# In[137]:\n\n\nimport pyspark.sql.functions as func\n\n\n# In[149]:\n\n\nupdateddf = monthlyGroupeddf.withColumn('percent',func.round((monthlyGroupeddf.total/totalcovictions)*100,2))\nupdateddf.printSchema()\nupdateddf.show()\n\n\n# In[150]:\n\n\n#other aggregations\n## convictions based on category in london\ndf.show()\n\n\n# In[156]:\n\n\ndf.groupBy('major_category').agg({'value':'sum'}).withColumnRenamed('sum(value)','totalValue').orderBy('totalValue').show()\n\n\n# In[159]:\n\n\n#use of min and max aggregations\ndf_year = df.select('year')\ndf_year.agg({'year':'min'}).show()\ndf_year.agg({'year':'max'}).show()\n\n\n# In[161]:\n\n\ndf_year.describe().show()\n\n\n# In[163]:\n\n\ndf.crosstab('borough','major_category').show()\n\n\n# In[164]:\n\n\ndf.crosstab('borough','major_category').select('borough_major_category','Burglary','Fraud or Forgery','Robbery').show()\n\n\n# In[166]:\n\n\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\n# In[179]:\n\n\ndef describe_year(year):\n filtereddf = df.filter(df['year'] == year).agg({'value':'sum'}).withColumnRenamed('sum(value)','convictions')\n burough_list = [x[0] for x in filtereddf.toLocalIterator()]\n conviction_list = [x[1] for x in filtereddf.toLocalIterator() ]\n \n plt.title('Crime for the year:'+year,frontsize=30)\n plt.xlable('Boroughs',fontsize=30)\n plt.ylable('Convictions',fontsize=30)\n\n plt.xtics(rotation=90,frontsize=30)\n plt.ytics(frontsize=30)\n plt.autoscale()\n \n plt.figure(figsize=(33,10))\n plt.bar(burough_list,conviction_list)\n plt.xtic\n plt.show()\n \n \n \n\n\n# In[197]:\n\n\ndef describe_year(year):\n filtereddf = df.filter(df['year'] == year).groupBy('borough').agg({'value':'sum'}).withColumnRenamed('sum(value)','convictions')\n burough_list = [x[0] for x in filtereddf.toLocalIterator()]\n conviction_list = [x[1] for x in filtereddf.toLocalIterator() ]\n \n plt.title('Crime for the year:'+year,frontsize=30)\n plt.xlable('Boroughs',fontsize=30)\n plt.ylable('Convictions',fontsize=30)\n\n plt.xtics(rotation=90,fontsize=30)\n plt.ytics(frontsize=30)\n plt.autoscale()\n \n plt.figure(figsize=(33,10))\n plt.bar(burough_list,conviction_list)\n plt.xtic\n plt.show()\n \n \n\n\n# In[199]:\n\n\ndescribe_year('2014')\n\n","repo_name":"jayshreechaudhary52/spark","sub_path":"spark2.1_London_crime_analysis.py","file_name":"spark2.1_London_crime_analysis.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41885860888","text":"from collections import Counter\r\n\r\ndef read_fish():\r\n file_name = \"Data/day6.txt\"\r\n file = open(file_name, \"r\")\r\n \r\n for line in file:\r\n return list(map(int, line.split(\",\")))\r\n \r\ndef simulate_fish(fish, days):\r\n fish = Counter(fish)\r\n \r\n for t in range(days):\r\n new_fish = {}\r\n for i in range(1,9):\r\n new_fish[i-1] = fish.get(i, 0)\r\n \r\n new_fish[6] = new_fish.get(6, 0) + fish.get(0, 0)\r\n new_fish[8] = new_fish.get(8, 0) + fish.get(0, 0)\r\n fish = new_fish\r\n \r\n return sum(fish.values())\r\n \r\nif __name__ == \"__main__\":\r\n fish = read_fish()\r\n print(f\"Part one: {simulate_fish(fish, 80)}\")\r\n print(f\"Part two: {simulate_fish(fish, 256)}\")\r\n ","repo_name":"HarrisonGreen/Advent-of-Code-2021","sub_path":"Scripts/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35768515934","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json, re\n# from urllib.parse import quote\nfrom zhihuPro.items import *\nfrom scrapy_splash import SplashRequest\n\n# from scrapy_splash import SplashResponse\n\nscript = \"\"\"\nfunction main(splash, args)\n splash:go(\"https://www.zhihu.com\")\n btn = splash:select(\".SignContainer-switch span\")\n btn:mouse_click()\n splash:wait(3)\n input = splash:select(\".SignFlow-accountInput input\")\n input:send_text(\"18235056870\")\n splash:wait(1)\n pwd = splash:select(\".SignFlow-password input\")\n pwd:send_text(\"lwy123456\")\n splash:wait(1)\n loginBtn = splash:select(\".SignFlow-submitButton\")\n loginBtn:mouse_click()\n splash:wait(3)\n return splash:get_cookies()\nend\n\"\"\"\n\n\nclass ZhihuSpider(scrapy.Spider):\n name = 'zhihu'\n allowed_domains = ['zhihu.com']\n base_url = 'https://www.zhihu.com/'\n cookie = None\n myheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n\n # dzb632816\n def start_requests(self):\n try:\n with open('cookie.json', 'r') as f:\n self.cookie = json.loads(f.read())\n yield scrapy.Request(\n url=self.base_url,\n callback=self.parse_page, cookies=self.cookie)\n except:\n yield SplashRequest(url=self.base_url, callback=self.parse_login, endpoint='execute', args={\n 'lua_source': script,\n })\n\n def parse_login(self, response):\n py_list = json.loads(response.text)\n # print(type(py_list))\n cookie = {i['name']: i['value'] for i in py_list}\n with open('cookie.json', 'w') as file:\n json.dump(cookie, file, ensure_ascii=False, indent=4)\n yield scrapy.Request(url=self.base_url, callback=self.parse_page, cookies=cookie)\n\n def parse_page(self, response):\n # 18235056870 lwy123456\n # api_url = 'https://www.zhihu.com/api/v3/feed/topstory/recommend?session_token=94b179cd0fb867a3729b0af574a1a77d&desktop=true&limit=7&action=down&after_id=0'\n # https://www.zhihu.com/api/v3/feed/topstory/recommend?session_token=94b179cd0fb867a3729b0af574a1a77d&desktop=true&limit=7&action=down&after_id=13\n session_token = response.xpath('.').re_first('.*?session_token=(.*?)&')\n api_url = 'https://www.zhihu.com/api/v3/feed/topstory/recommend?session_token={}&desktop=true&limit=7&action=down&after_id=0'.format(session_token)\n yield scrapy.Request(url=api_url, callback=self.parse_json, cookies=self.cookie)\n\n def parse_json(self, response):\n datas = json.loads(response.text)\n for i in datas['data']:\n question_item = ZhihuproItem()\n # with open('bug.json', 'w') as file:\n # json.dump(i['target'], file, ensure_ascii=False, indent=4)\n if 'question' in i['target'].keys():\n # 问题作者id\n question_item['author_id_id'] = i['target']['author']['id']\n if question_item['author_id_id'] != '0':\n author_url = 'https://api.zhihu.com/people/' + question_item['author_id_id']\n #因外键关系 使用requests\n resp = requests.get(author_url,headers=self.myheaders,cookies=self.cookie)\n # print(resp.text)\n yield self.parse_author(resp.text)\n # 问题id\n question_item['id'] = i['target']['question']['id']\n # title\n question_item['title'] = i['target']['question']['title']\n # 问题创建时间\n question_item['pub_time'] = i['target']['question']['created']\n # 问题的回答数量\n question_item['answer_count'] = i['target']['question']['answer_count']\n # 关注的数量\n question_item['follower_count'] = i['target']['question']['follower_count']\n # 问题的描述\n question_item['intro'] = i['target']['question']['excerpt']\n\n yield question_item\n\n # 问题标签\n # https: // www.zhihu.com / question / 296894584\n tags_url = 'https://www.zhihu.com/question/' + str(question_item['id'])\n yield scrapy.Request(url=tags_url, callback=self.parse_tag, meta={'que_id': question_item['id']})\n # 回答\n answer_url = 'https://www.zhihu.com/api/v4/questions/' + str(question_item[\n 'id']) + '/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=20&offset=0&sort_by=default'\n yield scrapy.Request(url=answer_url, callback=self.parse_answer)\n # 获取问题作者信息\n\n # break\n # print(que_author_id,id,title,pub_time,answer_count,follower_count,intor,tags)\n # 下一页\n # yield scrapy.Request(url=datas['paging']['next'],callback=self.parse_json)\n\n def parse_answer(self, response):\n answer = json.loads(response.text)\n for a in answer['data']:\n answer_item = ZhihuAnswerItem()\n # 回答id\n answer_item['id'] = a['id']\n # 问题id\n answer_item['qid_id'] = a['question']['id']\n # 作者id\n answer_item['ahthor_id_id'] = a['author']['id']\n if answer_item['ahthor_id_id'] != \"0\":\n # 作者url 获取作者信息\n author_url = 'https://api.zhihu.com/people/' + answer_item['ahthor_id_id']\n print(author_url)\n resp = requests.get(author_url,headers=self.myheaders, cookies=self.cookie)\n yield self.parse_author(resp.text)\n # 内容\n answer_item['content'] = a['content']\n # print('--------------------------',answer_item['content'])\n # 发布时间\n answer_item['pub_time'] = a['created_time']\n # 更新时间\n answer_item['update_time'] = a['updated_time']\n # 赞同数量\n answer_item['endorse'] = a['voteup_count']\n # 评论数量\n answer_item['comment_num'] = a['comment_count']\n\n yield answer_item\n # 评论\n comment_url = 'https://www.zhihu.com/api/v4/answers/{}/root_comments?include=data%5B*%5D.author%2Ccollapsed%2Creply_to_author%2Cdisliked%2Ccontent%2Cvoting%2Cvote_count%2Cis_parent_author%2Cis_author&order=normal&limit=30&offset=0&status=open'.format(\n answer_item['id'])\n print(comment_url)\n yield scrapy.Request(url=comment_url, callback=self.parse_comment,meta={'ans_id':answer_item['id']})\n\n\n # break\n #下一页\n # yield scrapy.Request(url=answer['paging']['next'],callback=self.parse_json)\n def parse_comment(self, response):\n comment_list = json.loads(response.text)['data']\n # print(comment_list)\n uid = response.meta['ans_id']\n for comment in comment_list:\n comment_item = ZhihuCommentItem()\n comment_item['id'] = comment['id']\n #回答id\n comment_item['aid_id'] = uid\n comment_item['author_id_id'] = comment['author']['member']['id']\n if comment_item['author_id_id'] != \"0\":\n # 作者url 获取作者信息\n author_url = 'https://api.zhihu.com/people/' + comment_item['author_id_id']\n resp = requests.get(author_url,headers=self.myheaders, cookies=self.cookie)\n yield self.parse_author(resp.text)\n # print(info)\n comment_item['author_name'] = comment['author']['member']['name']\n #创建时间\n comment_item['pub_time'] = comment['created_time']\n comment_item['content'] = comment['content']\n comment_item['like_num'] = comment['vote_count']\n #子评论\n comment_item['child_comment_count'] = comment['child_comment_count']\n #只取 子评论的 作者名和内容\n comment_item['child_comments'] = str({s['author']['member']['name']:s['content'] for s in comment['child_comments']})\n # print(comment_item)\n yield comment_item\n\n\n def parse_author(self, res):\n author = json.loads(res)\n print(author)\n if 'error' in author.keys():\n print('该用户不存在')\n return False\n author_item = ZhihuAuthorItem()\n author_item['id'] = author['id']\n author_item['name'] = author['name']\n #性别\n author_item['gender'] = author['gender']\n # 居住地\n if 'location' in author.keys():\n author_item['address'] = author['location'][0]['name']\n author_item['add_desc'] = author['location'][0]['introduction']\n else:\n # 居住地\n author_item['address'] = '无'\n author_item['add_desc'] = '无'\n\n author_item['headline'] = author['headline']\n #简介\n author_item['intro'] = author['description']\n #学历\n if 'education' in author.keys():\n author_item['school'] = author['education'][0]['name']\n\n else:\n author_item['school'] = '无'\n\n #职业\n author_item['jobname'] = author['business']['name']\n #回答的问题数量\n author_item['answer_count'] = author['answer_count']\n #提出问题的数量\n author_item['question_count'] = author['question_count']\n #被关注的数量\n author_item['follower_count'] = author['follower_count']\n # print(author_item)\n return author_item\n\n def parse_tag(self, response):\n re_obj = re.compile('\"topics\":(\\[.*?\\]),', re.S)\n tags = re.findall(re_obj, response.text)\n tag_list = json.loads(tags[0])\n # print('===============================', tag_list)\n # 多对多\n qid = response.meta['que_id']\n # 获取标签\n for tag in tag_list:\n tag_item = ZhihuTagsItem()\n many_item = ZhihuManyItem()\n tag_item['id'] = tag['id']\n tag_item['name'] = tag['name']\n tag_item['intro'] = tag['excerpt']\n tag_item['tag_url'] = tag['url']\n many_item['question_id'] = qid\n many_item['tags_id'] = tag['id']\n yield tag_item\n yield many_item\n # print(tag_item)\n # yield tag_item\n","repo_name":"yuemeiss/database","sub_path":"june_爬虫/zhihuPro/zhihuPro/spiders/zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":11145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19492920915","text":"\n\ndef collatz(number) -> object:\n if number % 2 == 0:\n print(number, \"// 2 = \", number // 2)\n number = number // 2\n return number\n else:\n print(\"3 *\", number, \"+ 1 = \", 3 * number + 1)\n number = 3 * number + 1\n return number\n\n\nprint('Enter an integer: ')\nuserNumber = input()\n\nwhile userNumber != 1:\n userNumber = collatz(int(userNumber))\n","repo_name":"MarcusJMcGill/PyFiles","sub_path":"CollatzSequence.py","file_name":"CollatzSequence.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28804867205","text":"from bc4py_stratum_pool.config import Const, co_efficiency\nfrom bc4py_stratum_pool.client import *\nfrom bc4py_stratum_pool.job import *\nfrom bc4py_stratum_pool.ask import *\nfrom bc4py_stratum_pool.commands import *\nfrom bc4py_stratum_pool.account import *\nfrom bc4py_extension import address2bech\nfrom bc4py.config import V\nfrom aiohttp import client_exceptions\nfrom binascii import a2b_hex\nfrom os import urandom\nfrom time import time\nfrom logging import getLogger\n\n\"\"\"Methods (client to server)\n\"\"\"\n\nlog = getLogger(__name__)\n\n\nasync def mining_authorize(client: Client, params: list, uuid: int):\n \"\"\"\n mining.authorize(\"username\", \"password\")\n\n The result from an authorize request is usually true (successful), or false.\n The password may be omitted if the server does not require passwords.\n \"\"\"\n try:\n username, password, *others = params\n hrp, version, identifier = address2bech(username)\n if not (hrp == V.BECH32_HRP and version == 0 and len(identifier) == 20):\n log.debug(f\"wrong address format hrp:{hrp} ver:{version}\")\n await response_success(client, False, uuid)\n return\n if username is None or password is None:\n log.debug(\"authorization account incorrect\")\n await response_failed(client, False, uuid)\n return\n client.username = username\n client.password = password\n # send job\n job = get_best_job(client.algorithm)\n if job is None:\n job = await add_new_job(client.algorithm)\n async with create_db(Const.DATABASE_PATH) as db:\n cur = await db.cursor()\n account_id = await read_address2account_id(cur=cur, address=username, create_if_missing=True)\n await db.commit()\n client.account_id = account_id\n await mining_notify(job, f_clean=False)\n log.debug(f\"authorize success by '{username}:{password}' id={account_id}\")\n await response_success(client, True, uuid)\n except (ConnectionError, client_exceptions.ClientError) as e:\n log.debug(f\"authorize failed by '{str(e)}'\")\n await response_success(client, False, uuid)\n except ValueError as e:\n log.debug(f\"address format error {str(e)}\")\n await response_success(client, False, uuid)\n except Exception:\n log.debug(\"authorize failed\", exc_info=True)\n await response_success(client, False, uuid)\n\n\nasync def mining_extranonce_subscribe(client: Client, params: list, uuid: int):\n \"\"\"\n mining.extranonce.subscribe()\n\n Indicates to the server that the client supports the mining.set_extranonce method.\n \"\"\"\n # client.f_extranonce = True\n await response_success(client, None, uuid)\n\n\nasync def mining_get_transactions(client: Client, params: list, uuid: int):\n \"\"\"\n mining.get_transactions(\"job id\")\n\n Server should send back an array with a hexdump of each transaction in the block specified for the given job id.\n \"\"\"\n job_id, *others = params\n job = get_job_by_id(job_id)\n if job is None:\n await response_failed(client, JOB_NOT_FOUND, uuid)\n txs = [tx[0][::-1].hex() for tx in job.unconfirmed]\n await response_success(client, txs, uuid)\n\n\nasync def mining_submit(client: Client, params: list, uuid: int):\n \"\"\"\n mining.submit(\"username\", \"job id\", \"ExtraNonce2\", \"nTime\", \"nonce\")\n\n Miners submit shares using the method \"mining.submit\". Client submissions contain:\n Worker Name.\n Job ID.\n ExtraNonce2.\n nTime.\n nonce.\n Server response is result: true for accepted, false for rejected (or you may get an error with more details).\n \"\"\"\n try:\n username, job_id, extranonce2, ntime, nonce, *others = params\n job_id = int.from_bytes(a2b_hex(job_id), 'big')\n extranonce2 = a2b_hex(extranonce2)\n ntime = int.from_bytes(a2b_hex(ntime), 'big')\n nonce = a2b_hex(nonce)[::-1]\n job = get_job_by_id(job_id)\n # check\n if client.username is None:\n await response_failed(client, UNAUTHORIZED_WORKER, uuid)\n return\n if client.extranonce_1 is None:\n await response_failed(client, NOT_SUBSCRIBED, uuid)\n return\n if job is None:\n await response_failed(client, JOB_NOT_FOUND, uuid)\n return\n if job.ntime != ntime:\n log.warning(f\"submit different time, {job.ntime} != {ntime}\")\n await response_failed(client, OTHER_UNKNOWN, uuid)\n return\n if client.algorithm not in co_efficiency:\n log.warning(f\"not found algorithm in co_efficiency?\")\n await response_failed(client, OTHER_UNKNOWN, uuid)\n return\n # try to generate submit data\n fixed_difficulty = min(client.diff_list) / co_efficiency[client.algorithm]\n submit_data, block, f_mined, f_shared = get_submit_data(\n job, client.extranonce_1, extranonce2, nonce, fixed_difficulty)\n if block.hash in job.submit_hashs:\n await response_failed(client, DUPLICATE_SHARE, uuid)\n return\n # try to submit work\n if f_mined or f_shared:\n client.n_accept += 1\n average_difficulty = sum(client.diff_list)/len(client.diff_list)\n client.time_works.append((time(), average_difficulty))\n job.submit_hashs.append(block.hash)\n # submit block\n if f_mined:\n pwd = str(job.algorithm)\n response = await ask_json_rpc('submitblock', [submit_data.hex()], 'user', pwd)\n if response:\n f_mined = False\n log.warning(f\"failed mine by '{response}'\")\n else:\n log.info(f\"mined yey!! {client.consensus_name} {job.height} diff={client.difficulty}\")\n else:\n log.debug(f\"shared work!! {client.consensus_name} {job.height} diff={client.difficulty}\")\n await response_success(client, True, uuid)\n # recode share\n async with create_db(Const.DATABASE_PATH) as db:\n cur = await db.cursor()\n # how many ratio you generate hash (target/work)\n share = average_difficulty / block.difficulty / co_efficiency[client.algorithm]\n recode_hash = block.hash if f_mined else None\n payout_id = 0 if Const.PAYOUT_METHOD == 'transaction' else -1\n await insert_new_share(cur=cur, account_id=client.account_id, algorithm=client.algorithm,\n blockhash=recode_hash, share=share, payout_id=payout_id)\n await db.commit()\n else:\n client.n_reject += 1\n await response_failed(client, LOW_DIFFICULTY_SHARE, uuid)\n except Exception:\n log.warning(\"unexpected error on mining_submit\", exc_info=True)\n await response_failed(client, OTHER_UNKNOWN, uuid)\n\n\nasync def mining_subscribe(client: Client, params: list, uuid: int):\n \"\"\"\n mining.subscribe(option: \"version\", option: \"subscription_id\")\n\n The optional second parameter specifies a mining.notify subscription id the client wishes to resume working with\n (possibly due to a dropped connection).\n If provided, a server MAY (at its option) issue the connection the same extranonce1.\n Note that the extranonce1 may be the same (allowing a resumed connection) even if the subscription id is changed!\n\n The client receives a result:\n [[[\"mining.set_difficulty\", \"subscription id 1\"], [\"mining.notify\", \"subscription id 2\"]], \"extranonce1\", extranonce2_size]\n\n The result contains three items:\n Subscriptions. - An array of 2-item tuples, each with a subscription type and id.\n ExtraNonce1. - Hex-encoded, per-connection unique string which will be used for creating generation transactions later.\n ExtraNonce2_size. - The number of bytes that the miner users for its ExtraNonce2 counter.\n \"\"\"\n client.version = str(params[0]) if 0 < len(params) else 'unknown'\n client.subscription_id = a2b_hex(params[1]) if 1 < len(params) else None\n if client.subscription_id is None:\n # setup new subscription info\n async with create_db(Const.DATABASE_PATH) as db:\n cur = await db.cursor()\n client.extranonce_1 = urandom(4)\n client.subscription_id = await insert_new_subscription(cur=cur, extranonce=client.extranonce_1)\n await db.commit()\n else:\n # restore works from close_deque\n for old_client in reversed(closed_deque):\n if client.subscription_id != old_client.subscription_id:\n continue\n elif client.algorithm != old_client.algorithm:\n continue\n else:\n client.time_works = old_client.time_works\n client.difficulty = old_client.difficulty\n client.submit_span = old_client.submit_span\n client.extranonce_1 = old_client.extranonce_1\n client.n_accept = old_client.n_accept\n client.n_reject = old_client.n_reject\n log.debug(\"resume from disconnected client data\")\n closed_deque.remove(old_client)\n break\n else:\n # recover subscription from database\n async with create_db(Const.DATABASE_PATH) as db:\n cur = await db.cursor()\n extranonce_1 = await read_subscription_id2extranonce(\n cur=cur, subscription_id=client.subscription_id)\n if extranonce_1 is None:\n # remove client info\n client.subscription_id = None\n raise ConnectionError('unknown subscription id')\n else:\n client.extranonce_1 = extranonce_1\n log.debug(\"resume from database\")\n # notify subscription info\n extranonce_2_size = 4\n result = [\n [\n [\"mining.set_difficulty\", client.subscription_id.hex()],\n [\"mining.notify\", client.subscription_id.hex()],\n ],\n client.extranonce_1.hex(),\n extranonce_2_size\n ]\n log.debug(f\"subscribe {client}\")\n await response_success(client, result, uuid)\n\n\nasync def mining_suggest_difficulty(client: Client, params: list, uuid: int):\n \"\"\"\n mining.suggest_difficulty(preferred share difficulty Number)\n\n Used to indicate a preference for share difficulty to the pool.\n Servers are not required to honour this request, even if they support the stratum method.\n \"\"\"\n # difficulty, *others = params\n # client.difficulty = difficulty\n\n\nasync def mining_suggest_target(client: Client, params: list, uuid: int):\n \"\"\"\n mining.suggest_target(\"full hex share target\")\n\n Used to indicate a preference for share target to the pool, usually prior to mining.subscribe.\n Servers are not required to honour this request, even if they support the stratum method.\n \"\"\"\n # target, *others = params\n # target = a2b_hex(target)\n # client.difficulty = round(DEFAULT_TARGET / float(int.from_bytes(target, 'little')), 8)\n\n\n__all__ = [\n \"mining_authorize\",\n \"mining_extranonce_subscribe\",\n \"mining_get_transactions\",\n \"mining_submit\",\n \"mining_subscribe\",\n \"mining_suggest_difficulty\",\n \"mining_suggest_target\",\n]\n","repo_name":"namuyan/bc4py-stratum-pool","sub_path":"bc4py_stratum_pool/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":11398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23656613204","text":"\"\"\"\nThis script is the main launcher of the multi-modal non-linear image\nregistration\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\nimport numpy as np\nimport nibabel as nib\nfrom dipy.fixes import argparse as arg\nfrom experiments.registration.rcommon import getBaseFileName, decompose_path\nimport experiments.registration.evaluation as evaluation\nfrom nipy.algorithms.registration import HistogramRegistration, resample\nfrom nipy.io.files import nipy2nifti, nifti2nipy\n\nparser = arg.ArgumentParser(\n description=\"Affine image registration.\"\n )\n\nparser.add_argument(\n 'moving', action = 'store', metavar = 'moving',\n help = '''Nifti1 image or other formats supported by Nibabel''')\n\nparser.add_argument(\n 'static', action = 'store', metavar = 'static',\n help = '''Nifti1 image or other formats supported by Nibabel''')\n\nparser.add_argument(\n 'warp_dir', action = 'store', metavar = 'warp_dir',\n help = '''Directory (relative to ./ ) containing the images to be warped\n with the obtained deformation field''')\n\nparser.add_argument(\n '-mm', '--moving_mask', action = 'store', metavar = 'moving_mask',\n help = '''Nifti1 image or other formats supported by Nibabel''',\n default=None)\n\nparser.add_argument(\n '-sm', '--static_mask', action = 'store', metavar = 'static_mask',\n help = '''Nifti1 image or other formats supported by Nibabel''',\n default=None)\n\nparser.add_argument(\n '-m', '--metric', action = 'store', metavar = 'metric',\n help = '''Any of {MI[L]} specifying the metric to be used\n MI= mutual information and the comma-separated (WITH NO SPACES) parameter\n list L:\n MI[nbins]\n nbins: number of histogram bins\n ''',\n default = 'crl1')\n\nparser.add_argument(\n '-t', '--transforms', action = 'store', metavar = 'transforms',\n help = '''A comma-separated (WITH NO SPACES) list of transform names,\n each being any of {TRANSLATION, ROTATION, RIGID, SCALING, AFFINE} specifying\n the desired sequence of transformation types\n ''',\n default = 'affine')\n\nparser.add_argument(\n '-i', '--iter', action = 'store', metavar = 'i_0,i_1,...,i_n',\n help = '''A comma-separated (WITH NO SPACES) list of integers indicating the\n maximum number of iterations at each level of the Gaussian Pyramid\n (similar to ANTS), e.g. 10,100,100 (NO SPACES)''',\n default = '25,100,100')\n\nparser.add_argument(\n '-method', '--method', action = 'store',\n metavar = 'method',\n help = '''Optimization method''',\n default = 'powell')\n\nparser.add_argument(\n '-mask0', '--mask0', action = 'store_true',\n help = '''Set to zero all voxels of the scale space that are zero in the\n original image''')\n\n\ndef print_arguments(params):\n r'''\n Verify all arguments were correctly parsed and interpreted\n '''\n print('========================Parameters========================')\n print('moving: ', params.moving)\n print('static: ', params.static)\n print('moving_mask: ', params.moving_mask)\n print('static_mask: ', params.static_mask)\n print('warp_dir: ', params.warp_dir)\n print('metric: ', params.metric)\n print('transforms: ', params.transforms)\n print('iter:', params.iter)\n print('method:', params.method)\n print('mask0',params.mask0)\n\n\ndef compute_jaccard(aname, bname, keep_existing = True):\n baseA=getBaseFileName(aname)\n baseB=getBaseFileName(bname)\n oname=\"jaccard_\"+baseA+\"_\"+baseB+\".txt\"\n if keep_existing and os.path.exists(oname):\n print('Jaccard overlap found. Skipped computation.')\n jaccard=np.loadtxt(oname)\n return jaccard\n nib_A=nib.load(aname)\n affineA=nib_A.get_affine()\n A=nib_A.get_data().squeeze().astype(np.int32)\n A=np.copy(A, order='C')\n print(\"A range:\",A.min(), A.max())\n nib_B=nib.load(bname)\n #newB=nib.Nifti1Image(nib_B.get_data(),affineA)\n #newB.to_filename(bname)\n B=nib_B.get_data().squeeze().astype(np.int32)\n B=np.copy(B, order='C')\n print(\"B range:\",B.min(), B.max())\n jaccard=np.array(evaluation.compute_jaccard(A,B))\n print(\"Jaccard range:\",jaccard.min(), jaccard.max())\n np.savetxt(oname,jaccard)\n return jaccard\n\n\ndef compute_target_overlap(aname, bname, keep_existing = True):\n baseA=getBaseFileName(aname)\n baseB=getBaseFileName(bname)\n oname=\"t_overlap_\"+baseA+\"_\"+baseB+\".txt\"\n if keep_existing and os.path.exists(oname):\n print('Target overlap overlap found. Skipped computation.')\n socres=np.loadtxt(oname)\n return socres\n nib_A=nib.load(aname)\n affineA=nib_A.get_affine()\n A=nib_A.get_data().squeeze().astype(np.int32)\n A=np.copy(A, order='C')\n print(\"A range:\",A.min(), A.max())\n nib_B=nib.load(bname)\n #newB=nib.Nifti1Image(nib_B.get_data(),affineA)\n #newB.to_filename(bname)\n B=nib_B.get_data().squeeze().astype(np.int32)\n B=np.copy(B, order='C')\n print(\"B range:\",B.min(), B.max())\n socres=np.array(evaluation.compute_target_overlap(A,B))\n print(\"Target overlap range:\",socres.min(), socres.max())\n np.savetxt(oname,socres)\n return socres\n\n\ndef compute_scores(pairs_fname = 'jaccard_pairs.lst'):\n with open(pairs_fname) as input:\n names = [s.split() for s in input.readlines()]\n for r in names:\n moving_dir, moving_base, moving_ext = decompose_path(r[0])\n static_dir, static_base, static_ext = decompose_path(r[1])\n warped_name = \"warpedAff_\"+moving_base+\"_\"+static_base+\".nii.gz\"\n compute_jaccard(r[2], warped_name, False)\n compute_target_overlap(r[2], warped_name, False)\n\n\ndef save_registration_results(sol, params):\n r'''\n Warp the moving image using the obtained solution from Nipy registration\n '''\n warp_dir = params.warp_dir\n\n base_static = getBaseFileName(params.static)\n static_nib = nib.load(params.static)\n static_nib = nib.Nifti1Image(static_nib.get_data().squeeze(), static_nib.get_affine())\n static = nifti2nipy(static_nib)\n static_affine = static_nib.get_affine()\n static_shape = np.array(static.shape, dtype=np.int32)\n\n base_moving = getBaseFileName(params.moving)\n moving_nib = nib.load(params.moving)\n moving_nib = nib.Nifti1Image(moving_nib.get_data().squeeze(), moving_nib.get_affine())\n moving = nifti2nipy(moving_nib)\n moving_affine = moving_nib.get_affine()\n moving_shape = np.array(moving.shape, dtype=np.int32)\n\n dim = len(static.shape)\n static_affine = static_affine[:(dim + 1), :(dim + 1)]\n moving_affine = moving_affine[:(dim + 1), :(dim + 1)]\n\n warped = resample(moving, sol, reference=static, interp_order=1)\n fmoved = 'warpedAff_'+base_moving+'_'+base_static+'.nii.gz'\n nib.save(nipy2nifti(warped, strict=True), fmoved)\n #---warp all volumes in the warp directory using NN interpolation\n names = [os.path.join(warp_dir, name) for name in os.listdir(warp_dir)]\n for name in names:\n to_warp_nib = nib.load(name)\n to_warp_nib = nib.Nifti1Image(to_warp_nib.get_data().squeeze(), to_warp_nib.get_affine())\n to_warp_affine = to_warp_nib.get_affine()\n img_affine = to_warp_affine[:(dim + 1), :(dim + 1)]\n\n to_warp = nifti2nipy(to_warp_nib)\n base_warp = getBaseFileName(name)\n warped = resample(to_warp, sol, reference=static, interp_order=0)\n fmoved = 'warpedAff_'+base_warp+'_'+base_static+'.nii.gz'\n nib.save(nipy2nifti(warped, strict=True), fmoved)\n #---now the jaccard indices\n if os.path.exists('jaccard_pairs.lst'):\n with open('jaccard_pairs.lst','r') as f:\n for line in f.readlines():\n aname, bname, cname= line.strip().split()\n abase = getBaseFileName(aname)\n bbase = getBaseFileName(bname)\n aname = 'warpedAff_'+abase+'_'+bbase+'.nii.gz'\n if os.path.exists(aname) and os.path.exists(cname):\n compute_jaccard(cname, aname, False)\n compute_target_overlap(cname, aname, False)\n else:\n print('Pair not found ['+cname+'], ['+aname+']')\n #---finally, the optional output\n #oname = base_moving+'_'+base_static+'Affine.txt'\n #np.savetxt(oname, affine)\n\n\ndef register_3d(params):\n r'''\n Runs affine registration with the parsed parameters\n '''\n # Default parameters\n renormalize = False\n interp = 'tri'\n\n metric_name=params.metric[0:params.metric.find('[')]\n metric_params_list=params.metric[params.metric.find('[')+1:params.metric.find(']')].split(',')\n nbins=int(metric_params_list[0])\n\n optimizer = params.method\n\n print('Registering %s to %s'%(params.moving, params.static))\n sys.stdout.flush()\n moving_mask = None\n static_mask = None\n\n #Load the data\n moving_nib = nib.load(params.moving)\n moving_nib = nib.Nifti1Image(moving_nib.get_data().squeeze(), moving_nib.get_affine())\n static_nib = nib.load(params.static)\n static_nib = nib.Nifti1Image(static_nib.get_data().squeeze(), static_nib.get_affine())\n\n moving= nifti2nipy(moving_nib)\n static= nifti2nipy(static_nib)\n\n # Register\n tic = time.time()\n R = HistogramRegistration(static, moving, from_bins=nbins, to_bins=nbins,\n similarity=metric_name, interp=interp,\n renormalize=renormalize)\n R.set_fov(spacing=(1,1,1))\n T = R.optimize('affine', optimizer=optimizer)\n toc = time.time()\n\n save_registration_results(T, params)\n\n\nif __name__ == '__main__':\n import time\n params = parser.parse_args()\n print_arguments(params)\n tic = time.clock()\n register_3d(params)\n toc = time.clock()\n print('Time elapsed (sec.): ',toc - tic)\n","repo_name":"omarocegueda/experiments","sub_path":"experiments/registration/nipyreg_affine.py","file_name":"nipyreg_affine.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5185830609","text":"import cv2\nimport mediapipe as mp\n# pip install mediapipe\n\ncap = cv2.VideoCapture('person.jpg')\nmp_fd = mp.solutions.face_detection\nmp_draw = mp.solutions.drawing_utils\nfd = mp_fd.FaceDetection(0.5) \n\n# 읽은 비디오를 여러개의 이미지로\nsuccess, img= cap.read()\n# 색보정(정확도)\nfrom_bgr_to_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# 얼굴을 찾아줘\nresult = fd.process(from_bgr_to_rgb)\n# 얼굴을 찾으면\nif result.detections:\n # 각 얼굴별로\n for id, detection in enumerate(result.detections):\n mp_draw.draw_detection(img, detection) # 네모표시\n\ncv2.imshow('title', img)\ncv2.waitKey(0)\n\n\n","repo_name":"twdssqrtdd/python","sub_path":"day08/day08_4.py","file_name":"day08_4.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12559405754","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import batched_scatter, batched_index_select\n\n\ndef calc_mean_std(feat, eps=1e-5):\n # eps is a small value added to the variance to avoid divide-by-zero.\n size = feat.size()\n assert (len(size) == 4)\n N, C = size[:2]\n feat_var = feat.view(N, C, -1).var(dim=2) + eps\n feat_std = feat_var.sqrt().view(N, C, 1, 1)\n feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)\n return feat_mean, feat_std\n\n\ndef mean_variance_norm(feat):\n size = feat.size()\n mean, std = calc_mean_std(feat)\n normalized_feat = (feat - mean.expand(size)) / std.expand(size)\n return normalized_feat\n\n\ndef calc_content_loss(input, target, norm=False):\n mse_loss = nn.MSELoss()\n if (norm == False):\n return mse_loss(input, target)\n else:\n return mse_loss(mean_variance_norm(input), mean_variance_norm(target))\n\n\ndef calc_style_loss(input, target):\n mse_loss = nn.MSELoss()\n input_mean, input_std = calc_mean_std(input)\n target_mean, target_std = calc_mean_std(target)\n return mse_loss(input_mean, target_mean) + \\\n mse_loss(input_std, target_std)\n\n\ndef recon_criterion(target, output):\n loss = torch.mean(torch.abs(output - target))\n return loss\n\n\ndef weighted_recon_criterion(real_xs, fake_x, similarity=None):\n b, n, c, h, w = real_xs.size()\n if similarity is not None:\n xs_c = torch.matmul(similarity.view(b, 1, n), real_xs.view(b, n, c * h * w))\n xs_c = xs_c.view(b, c, h, w)\n else:\n xs_c = torch.mean(real_xs, dim=1, keepdim=False)\n loss = torch.mean(torch.abs(xs_c - fake_x))\n return loss\n\n\ndef nearest_recon_critertion(real_xs, fake_x):\n b, n, c, h, w = real_xs.size()\n losses = [torch.mean(torch.abs(real_xs[:, i, :, :, :] - fake_x)) for i in range(n)]\n loss = min(losses)\n return loss\n\n\ndef local_recon_criterion(real_xs, fake_x, similarity, indice_base, indice_refs, index, s=8):\n \"\"\"\n Local Reconstruction Loss\n :param real_xs: real images (32*3*3*128*128)\n :param fake_x: generated fake images (32*3*128*128)\n :param similarity: alpha (32*3)\n :param indice_base: the recorded positions of selected base local representations (32*M)\n :param indice_refs: the recorded positions of the matched reference representations (32*2*M)\n :param index: the index of base feature (1)\n :param s: resize the feature map\n :return:\n \"\"\"\n base = real_xs[:, index, :, :, :] # (32*3*128*128)\n refs = torch.cat([real_xs[:, :index, :, :, :], real_xs[:, (index + 1):, :, :, :]], dim=1) # (32*2*3*128*128)\n base_similarity = similarity[:, index] # (32*1)\n ref_similarities = torch.cat([similarity[:, :index], similarity[:, (index + 1):]], dim=1) # (32*2)\n\n base = F.interpolate(base, size=s) # (32*3*8*8)\n\n b, n, c, h, w = refs.size()\n refs = refs.view(-1, c, h, w)\n refs = F.interpolate(refs, size=s)\n refs = refs.view(b, n, c, s * s) # (32*2*3*(8*8))\n\n base = base.view(b, c, -1) # (32*3*64)\n base_select = batched_index_select(base, dim=2, index=indice_base) # (32*3*M)\n\n ref_selects = []\n for j in range(n):\n ref = refs[:, j, :, :] # (32*3*64)\n indice = indice_refs[:, j, :] # (32*M)\n select = batched_index_select(ref, dim=2, index=indice) # (32*3*M)\n ref_selects.append(select)\n ref_selects = torch.cat([item.unsqueeze(1) for item in ref_selects], dim=1) # (32*2*3*M)\n\n base_similarity = base_similarity.view(b, 1, 1) # (32*1*1)\n ref_similarities = ref_similarities.view(b, 1, n) # (32*1*2)\n base_select = base_select.view(b, 1, -1) # (32*1*(3*M))\n ref_selects = ref_selects.view(b, n, -1) # (32*2*(3*M))\n\n patch_fused = torch.matmul(base_similarity, base_select) \\\n + torch.matmul(ref_similarities, ref_selects) # (32*1*(3*M))\n\n num = indice_base.size()[-1]\n patch_fused = patch_fused.view(b, c, num) # (32*3*M)\n\n target = batched_scatter(base, dim=2, index=indice_base, src=patch_fused)\n target = target.view(b, c, s, s) # (32*3*8*8)\n\n fake_x = F.interpolate(fake_x, size=s)\n\n return recon_criterion(target, fake_x)\n\n\ndef ms_loss(fake_x1, fake_x2, sim1, sim2):\n lz = torch.mean(torch.abs(fake_x1 - fake_x2)) \\\n / torch.mean(torch.abs(sim1 - sim2))\n eps = 1 * 1e-5\n loss_lz = 1 / (lz + eps)\n return loss_lz\n","repo_name":"kobeshegu/ECCV2022_WaveGAN","sub_path":"networks/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"16"} +{"seq_id":"36984879940","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nimport json\nfrom datetime import datetime\nfrom videk_rest_client import Videk\nfrom random import randint\n\nx = Videk('secret')\nx.api_url = \"http://localhost/api\"\n\ncluster = \"example\";\nnode = \"example.com\"\nsensor_t = \"test\"\nsensor_q = \"test_value\"\nsensor_u = \"t\"\n\ncluster_id = x.getClusterID(cluster)\nif cluster_id == None:\n x.createCluster(cluster)\n cluster_id = x.getClusterID(cluster)\n\nnode_id = x.getNodeID(node)\nif node_id == None:\n x.createNode(node, cluster_id)\n node_id = x.getNodeID(node)\n\nsensor_id = x.getSensorID(node, sensor_t, sensor_q)\nif sensor_id == None:\n x.createSensor(node_id, sensor_t, sensor_q, sensor_u)\n sensor_id = x.getSensorID(node, sensor_t, sensor_q)\n\nmeasurement = '''{\"latitude\":\"\",\"longitude\":\"\",\"ts\":\"\",\"value\":\"\"}'''\n\nwhile True:\n try:\n v = randint(1, 10)\n\n preparedData = []\n data = json.loads(measurement)\n data['value'] = v\n data['ts'] = datetime.utcnow().isoformat()\n data['latitude'] = 11.111111\n data['longitude'] = 11.111111\n preparedData.append(data)\n\n x.uploadMesurements(preparedData, node_id, sensor_id)\n\n time.sleep(10)\n except (KeyboardInterrupt, SystemExit):\n sys.exit(0)\n except:\n time.sleep(10)\n","repo_name":"matevzv/videk-rest-client","sub_path":"example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13727204505","text":"import sqlite3\n\ntypes_table_sql = '''\nCREATE TABLE IF NOT EXISTS types\n(id INTEGER PRIMARY KEY NOT NULL,\n type TEXT NOT NULL)\n'''\n\nfiles_table_sql = '''\nCREATE TABLE IF NOT EXISTS files\n(id INTEGER PRIMARY KEY NOT NULL,\n name TEXT NOT NULL,\n root TEXT NOT NULL,\n type_id INTEGER NOT NULL)\n'''\n\ncategories_table_sql = '''\nCREATE TABLE IF NOT EXISTS categories\n(id INTEGER PRIMARY KEY NOT NULL,\n category TEXT NOT NULL)\n'''\n\ncounts_table_sql = '''\nCREATE TABLE IF NOT EXISTS counts\n(id INTEGER PRIMARY KEY NOT NULL,\n cat_id INTEGER NOT NULL,\n file_id INTEGER NOT NULL,\n count INTEGER NOT NULL)\n'''\n\nclass Results(object):\n '''Holds the accumulated results of scanning source files\n '''\n\n def __init__(self, name=':memory:'):\n '''construct a new Results object\n \n :param name: The filename for the sqlite database. By default,\n this will construct a memory db\n '''\n self.conn = sqlite3.Connection(name)\n cur = self.conn.cursor()\n cur.execute(types_table_sql)\n cur.execute(files_table_sql)\n cur.execute(categories_table_sql)\n cur.execute(counts_table_sql)\n\n self.__types = {}\n self.__categories = {}\n\n def close(self, commit=True):\n '''close the results object and associated database\n \n :param commit: indicates if the changes should be committed to\n the database\n '''\n if commit:\n self.conn.commit()\n self.conn.close()\n\n def add_result(self, filename, root, type, counts):\n cur = self.conn.cursor()\n\n try:\n typeid = self.__types[type]\n except KeyError:\n cur.execute('INSERT INTO types VALUES(?,?)',\n (None, type))\n typeid = cur.lastrowid\n self.__types[type] = typeid\n \n cur.execute('INSERT INTO files VALUES(?,?,?,?)',\n (None, filename, root, typeid))\n fileid = cur.lastrowid\n \n for cat,count in counts.items():\n try:\n catid = self.__categories[cat]\n except KeyError:\n cur.execute('INSERT INTO categories VALUES(?,?)',\n (None, cat))\n catid = cur.lastrowid\n self.__categories[cat] = catid\n \n cur.execute('INSERT INTO counts VALUES(?,?,?,?)',\n (None, catid, fileid, count))\n\n def categories(self):\n '''get a list of all categories present in the results\n\n Note that not all files will have all categories\n '''\n cur = self.conn.cursor()\n cur.execute('SELECT category FROM categories')\n return [row[0] for row in cur]\n\n def types(self):\n '''get a list of all file-types present in the results'''\n\n cur = self.conn.cursor()\n cur.execute('SELECT type FROM types')\n return [row[0] for row in cur]\n\n def counts_by_type(self, filetype):\n '''get counts summed over a particular file type\n \n :param filetype: the filetype to get counts for\n :return: a dictionary { category -> sum }\n '''\n cur = self.conn.cursor()\n cur.execute('''SELECT categories.category, \n counts.count\n FROM counts, \n types, \n files, \n categories \n WHERE types.type = ?\n AND files.type_id = types.id\n AND counts.file_id = files.id\n AND categories.id = counts.cat_id''',\n (filetype,))\n counts = [(row[0], row[1]) for row in cur]\n rslt = {}\n for cat,count in counts:\n try:\n rslt[cat] += count\n except KeyError:\n rslt[cat] = count\n return rslt\n \n def counts_by_category(self, category):\n cur = self.conn.cursor()\n cur.execute('''SELECT types.type,\n counts.count\n FROM counts,\n types,\n files,\n categories\n WHERE categories.category = ?\n AND counts.cat_id = categories.id\n AND files.id = counts.file_id\n AND types.id = files.type_id''',\n (category,))\n counts = [(row[0],row[1]) for row in cur]\n rslt = {}\n for type,count in counts:\n try:\n rslt[type] += count\n except KeyError:\n rslt[type] = count\n return rslt\n\n def sum_categories(self, filetype):\n pass\n","repo_name":"abingham/pyloc","sub_path":"src/pyloc/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"16280428949","text":"# compareNEDirectly.py\n#\n# from approximation of range gates from Ray Tracing\n# look at the CUTLASS ne versus the EISCAT ne\n#\n# LKS, January 2016 SANSA\n#\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport readProcessedEISCAT\nimport matplotlib.dates as dt\nfrom matplotlib.dates import HourLocator, DayLocator, DateFormatter, MinuteLocator\nfrom matplotlib.ticker import ScalarFormatter, FormatStrFormatter\n#\n# load in the IRI data\nos.chdir('ModelData')\nIRI=np.genfromtxt('IRIdata_range=880km.txt')\nIRIalt=IRI[:,0]\nIRIne=IRI[:,1]\nos.chdir('..')\n\ndef runningMedian(seq, M):\n \"\"\"\n Purpose: Find the median for the points in a sliding window (odd number in size) \n as it is moved from left to right by one point at a time.\n Inputs:\n seq -- list containing items for which a running median (in a sliding window) \n is to be calculated\n M -- number of items in window (window size) -- must be an integer > 1\n Otputs:\n medians -- list of medians with size N - M + 1\n Note:\n 1. The median of a finite list of numbers is the \"center\" value when this list\n is sorted in ascending order. \n 2. If M is an even number the two elements in the window that\n are close to the center are averaged to give the median (this\n is not by definition)\n \"\"\"\n from itertools import islice\n from collections import deque,Counter\n from bisect import insort, bisect_left\n\n seq = iter(seq)\n s = [] \n m = M // 2\n\n # Set up list s (to be sorted) and load deque with first window of seq\n s = [item for item in islice(seq,M)] \n d = deque(s)\n\n # Simple lambda function to handle even/odd window sizes \n median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5\n\n # Sort it in increasing order and extract the median (\"center\" of the sorted window)\n s.sort() \n medians = [median()] \n\n # Now slide the window by one point to the right for each new position (each pass through \n # the loop). Stop when the item in the right end of the deque contains the last item in seq\n for item in seq:\n old = d.popleft() # pop oldest from left\n d.append(item)\n # push newest in from right\n try:\n del s[bisect_left(s, old)] # locate insertion point and then remove old \n # insert newest such that new sort is not required\n insort(s, item) \n medians.append(np.nanmedian(s)) \n except:\n medians.append(np.nan)\n pass\n\n return medians\n\n \n#\n# load in EISCAT\ndataEISCAT=readProcessedEISCAT.ReadData()\naltE=np.nanmean(dataEISCAT['alt'], axis=1) # 42 altitudes between 76 km to 613 km\nrightALTs=np.where((altE >= 100) & (altE <= 500))[0] # I think, need to confirm\nTimesE=dt.date2num(dataEISCAT['stim'])\nneE=dataEISCAT['ne']\n#\n# load in SuperDARN\nos.chdir('CUTLASS')\nimport readCUTLASSvel as SD\n#\ndataSD=SD.readSUPERDARN()\n# divide ne by 28 because scatter range of EISCAT = 8 km2\n# SuperDARN = 15 * 15 = 225 km2\n# 225 / 8 = 28.1\n# so the scattering area of the SuperDARN Ne is 28 times larger than EISCAT\n# we apply this later in the plotting section\n# \nAllneSD=np.array(dataSD[0])\n\nAllFreqsSD=np.array(dataSD[1])\nAllfpSD=dataSD[2]\nAllTimesSD=np.array(dataSD[3])\nAllNs=np.array(dataSD[4])\n\nos.chdir('..')\n#\n# so we need ne at certain freqs\n#cFreqs=np.where((FreqsSD > 14500000) & (FreqsSD < 16000000))[0][:-1]\n#cFreqs=np.where(FreqsSD > 0)[0][:-1] # just say all \n#\nlabels=['15_MHz', '16_MHz', '17_MHz', '18_MHz']\nfor iFREQ in range(len(AllFreqsSD)):\n # good SuperDARN data\n cNE=np.swapaxes(AllneSD[iFREQ],1,0) # to make it gates x Time\n cNE[cNE==0]=np.nan\n cTimes=dt.date2num(AllTimesSD[iFREQ])\n gates=np.linspace(28,40,13)\n #\n # side by side compare\n # plot for different alts\n cFactor=[]\n for iRNG in range(len(gates)):\n for iALT in range(len(rightALTs)):\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.subplots_adjust(right=0.7, top=0.92, bottom=0.11, left=0.11)\n days = DayLocator(interval=1) \n hours = MinuteLocator(interval=30) \n hours2 = MinuteLocator(interval=60) \n daysFmt = DateFormatter('%H:%M')\n fig.gca().xaxis.set_major_locator(hours)\n fig.gca().xaxis.set_major_formatter(daysFmt)\n fig.gca().xaxis.set_minor_locator(hours2)\n font = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : 22}\n plt.rc('font', **font)\n t1=max(cTimes[0], TimesE[0])\n t2=min(cTimes[-1], TimesE[-1])\n # find where closest\n cIRIalt=min(range(len(IRIalt)), key=lambda i: abs(IRIalt[i]-altE[rightALTs[iALT]]))\n cNE[gates[iRNG]][cNE[gates[iRNG]]<0]=np.nan\n s1mask=np.isfinite(cNE[gates[iRNG]])\n medWin=8\n \n # for EISCAT data\n tempDat=cNE[gates[iRNG]][s1mask]\n #tempDat[tempDat>1e12]=np.nan\n #tempDat[tempDat<1.0e10]=np.nan\n \n ax.plot(runningMedian(cTimes[s1mask],medWin),runningMedian(tempDat,medWin), lw=3, marker='o',ls='-', color='blue')\n ax.plot(runningMedian(TimesE,medWin), runningMedian(neE[rightALTs[iALT]], medWin), lw=3, marker='o',ls='-', color='gold')\n ax.plot(TimesE, [IRIne[cIRIalt]]*len(TimesE), lw=3, color='red')\n #ax.set_yscale('log')\n ax.set_xlabel('Time')\n# ax.set_ylim(1e10.8,1e11.2)\n #ax.set_xlim(t1, t2)\n ax.set_ylabel('Electron Density [m$^{-3}$]')\n plt.legend(['SuperDARN', 'EISCAT', 'IRI'], bbox_to_anchor=(1.52, 0.7))\n fig.set_size_inches(13,9)\n subdir_name='Compare_EISCAT_CUTLASS'\n if not os.path.exists(subdir_name):\n os.umask(0) # unmask if necessary\n os.makedirs(subdir_name) \n os.chdir(subdir_name)#\n plt.savefig(labels[iFREQ]+'_20150312Ne_alt='+str(altE[rightALTs[iALT]])+'_gate='+str(gates[iRNG])+'.png')\n os.chdir('..')\n plt.close()\n","repo_name":"loisks317/SANSA-Papers-","sub_path":"compareNEDirectly.py","file_name":"compareNEDirectly.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"430749708","text":"# 1.- DEFINIR VARIABLES\nacumulador = 0\ncontador = 1\n# 2.- SOLICITAR LOS DATOS\n# 3.- PROCESAR LA INFO\nprint(\"\\n ********* Suma de numeros ********\")\nwhile contador <= 5:\n numero = int(input(\"Ingrese el número:\"))\n if numero > 0:\n acumulador += numero\n contador += 1 # contador = contador +1\n# 4.- MOSTRAR EL RESULTADO\nprint(\"El factorial es:\", acumulador)","repo_name":"patricioyanez/PGY1121_005","sub_path":"EA3/EjemploWhile3.py","file_name":"EjemploWhile3.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11804935880","text":"import random\n\nasci = \"\"\"\n\n ____ __ __ __ ______ _____ _ __ __ \n / __ \\___ ____ _/ / _ ______ _____/ /___/ / / ____/___ ______ / ___/(_)___ ___ __ __/ /___ _/ /_____ _____\n / /_/ / _ \\/ __ `/ / | | /| / / __ \\/ ___/ / __ / / / __/ __ `/ ___/ \\__ \\/ / __ `__ \\/ / / / / __ `/ __/ __ \\/ ___/\n / _, _/ __/ /_/ / / | |/ |/ / /_/ / / / / /_/ / / /_/ / /_/ (__ ) ___/ / / / / / / / /_/ / / /_/ / /_/ /_/ / / \n/_/ |_|\\___/\\__,_/_/ |__/|__/\\____/_/ /_/\\__,_/ \\____/\\__,_/____/ /____/_/_/ /_/ /_/\\__,_/_/\\__,_/\\__/\\____/_/ \n \n By Ishwak Sharda (2023)\n\n\n Welcome to this amazing gas simulator that lets you simulate\n the millage consumption of your car or even airplane flights!\n\n Plan your fuel costs and usage in advance with this\n application and compare fuel prices with different \n petrol services in Canada.\n\n Furthermore, analyze how much will a flight trip will consume\n your favourite destination!\n \n\"\"\"\nprint(asci)\n\n#flights = getFlights()\n#print(flightSimulation(\"b777\", 10000, 12.3))\n\n## ==================== MODELS =====================\n\ndef defineFlightRoute(company, plane, departure, arrival, distance, duration, flight_type):\n return {\n \"company\": company,\n \"plane\": plane,\n \"route\": [departure, arrival],\n \"distance\": distance,\n \"duration\": duration,\n \"flight_type\": flight_type\n }\n\ndef defineAirport(code, name, country, city):\n return {\n \"name\": name,\n \"code\": code,\n \"country\": country,\n \"city\": city\n }\n\ndef defineGasStation(eid, name, premiumCost, dieselCost, petrolCost):\n return {\n \"id\": eid,\n \"name\": name,\n \"premium_cost\": premiumCost,\n \"diesel_cost\": dieselCost,\n \"petrol_cost\": petrolCost\n }\n\ndef defineCarRoute(departureCity, arrivalCity, distance, time, trafficPoints, trafficLightPoints, highwayPoints, cityPoints, roadConditionPoints):\n return {\n \"departureCity\": departureCity,\n \"arrivalCity\": arrivalCity,\n \"trafficPoints\": trafficPoints,\n \"trafficLightPoints\": trafficLightPoints,\n \"highwayPoints\": highwayPoints,\n \"distance\": distance,\n \"time\": time,\n \"cityPoints\": cityPoints,\n \"roadConditionPoints\": roadConditionPoints\n }\n\ndef defineCar(id, manufacturer, model, engine, power, gear, seat, premium, city_millage, highway_millage, average_comb_city_highway, average_millage_per_100l, fuel_capacity, weight):\n return {\n \"id\": id,\n \"manufacturer\": manufacturer,\n \"model\": model,\n \"engine\": engine,\n \"power\": power,\n \"seat\": seat,\n \"premium\": premium,\n \"city_millage\": city_millage,\n \"highway_millage\": highway_millage,\n \"average_comb_city_highway\": average_comb_city_highway,\n \"average_millage_per_100l\": average_millage_per_100l,\n \"fuel_capacity\": fuel_capacity,\n \"weight\": weight\n }\n\n\ndomesticFlights = []\ninternationalFlights = []\nairports = []\ncarRoutes = []\ngasStations = []\ncars = []\n\n## ==================== DATA GATHERING ====================\n\n\ndomesticFlights.append(defineFlightRoute(\"Wizz air\", \"b777\", \"YVL\", \"YUL\", 4000, 4.5, \"domestic\"))\ndomesticFlights.append(defineFlightRoute(\"Wizz air\", \"b787\", \"YVL\", \"YYL\", 3343, 4.2, \"domestic\"))\n\ndomesticFlights.append(defineFlightRoute(\"Air Canada\", \"a350\", \"YYL\", \"YUL\", 550, 0.9, \"domestic\"))\ndomesticFlights.append(defineFlightRoute(\"Flair air\", \"b737\", \"YVL\", \"YYC\", 950, 1.45, \"domestic\"))\ndomesticFlights.append(defineFlightRoute(\"Flair air\", \"b737\", \"YYL\", \"YYC\", 2705, 3.8, \"domestic\"))\ndomesticFlights.append(defineFlightRoute(\"Flair air\", \"b737\", \"YUL\", \"YYC\", 3006, 4.18, \"domestic\"))\n\ndomesticFlights.append(defineFlightRoute(\"Flair air\", \"b737\", \"YVL\", \"YOW\", 3552, 4.8, \"domestic\"))\n\n\n\n## calgary to vancouver\n## calgary to toronto\n## calgary to montreal\n## vancouver to montreal\n## vancouver to toronto\n\n\n\ninternationalFlights.append(defineFlightRoute(\"Emirates\", \"a380\", \"YYL\", \"DXB\", 10900, 12.75, \"international\")) # ok\ninternationalFlights.append(defineFlightRoute(\"Air India\", \"b777\", \"YYL\", \"DHX\", 11653, 13.5, \"international\")) # ok\ninternationalFlights.append(defineFlightRoute(\"Emirates\", \"a380\", \"YYL\", \"MXP\", 7400, 9.75, \"international\")) # ok\ninternationalFlights.append(defineFlightRoute(\"United\", \"b737\", \"YVL\", \"LAX\", 1804, 2.9, \"international\")) ## ok b737Max\ninternationalFlights.append(defineFlightRoute(\"Air Canada\", \"a350\", \"YVL\", \"MXP\", 7400, 10.5, \"international\")) #ok - check flight add a330\ninternationalFlights.append(defineFlightRoute(\"Air Canada\", \"b777\", \"YYL\", \"LHR\", 5941, 6.9, \"international\")) #ok\ninternationalFlights.append(defineFlightRoute(\"Air France\", \"b787\", \"CDG\", \"YVL\", 10680, 9.75, \"international\")) # b787\ninternationalFlights.append(defineFlightRoute(\"ANA\", \"b787\", \"HND\", \"YVL\", 7845, 9.98, \"international\")) # b787\ninternationalFlights.append(defineFlightRoute(\"Americal Airline\", \"a350\", \"YYC\", \"LAX\", 6287, 5.3, \"international\")) # b787\ninternationalFlights.append(defineFlightRoute(\"Americal Airline\", \"b777\", \"YVL\", \"MXP\", 7400, 10.2, \"international\")) # b787\n\n\n\n\n# seattle to los angeles\n# vancouver to los angeles\n# vancouver to london\n\n\n\nairports.append(defineAirport(\"YVL\", \"Vancouver International Airport\", \"Canada\", \"Vancouver\"))\nairports.append(defineAirport(\"YUL\", \"Montreal International Airport\", \"Canada\", \"Montreal\"))\nairports.append(defineAirport(\"YYL\", \"Toronto Pearson-Trudeau International Airport\", \"Canada\", \"Toronto\"))\nairports.append(defineAirport(\"MXP\", \"Malpensa International Airport\", \"Italy\", \"Milan\"))\nairports.append(defineAirport(\"DHX\", \"Indra-Gandhi International Airport\", \"India\", \"New Delhi\"))\nairports.append(defineAirport(\"DXB\", \"Dubai International Airport\", \"United Arab Emirated\", \"Dubai\"))\n#airports.append(defineAirport(\"SEA\", \"Seattle International Airport\", \"United States of America\", \"Seattle\"))\nairports.append(defineAirport(\"LAX\", \"Los Angeles International Airport\", \"United States of America\", \"Los Angeles\"))\nairports.append(defineAirport(\"LHR\", \"Heathrow Airport\", \"United States of America\", \"Los Angeles\"))\nairports.append(defineAirport(\"CDG\", \"Paris Charles de Gaulle Airport\", \"France\", \"Paris\"))\nairports.append(defineAirport(\"HND\", \"Haneda Airport\", \"Japan\", \"Tokyo\"))\nairports.append(defineAirport(\"YYC\", \"Calgary International Airport\", \"Canada\", \"Calgary\"))\nairports.append(defineAirport(\"YOW\", \"Ottawa International Airport\", \"Ottawa\", \"Ottawa\"))\n\n\n## calgary airport\n## los angeles\n## new york\n## japan\n## london\n\ncars.append(defineCar(\"audi_rsq8\", \"Audi\", \"RSQ8 2022\", \"Twin-turbocharged and intercooled DOHC 32-valve V-8\", 591,\"8-speed automatic\",5,True,11.9,17.5,15,6.7,85,2490))\ncars.append(defineCar(\"porsche_panamera_4s\", \"Porsche\", \"Panamera 4S\", \"6-cylinder\", 473, \"8-speed dual clutch automatic\", 5, True, 12.8, 9.8, 11.4, 5, 80, 2100))\ncars.append(defineCar(\"honda_civic\", \"Honda\", \"Civic 2022 Hatchback\", \"4 cylinder\", 129, \"5-speed automatic\", 5, False, 7.7, 6.3, 7.1, 3, 46, 1256))\ncars.append(defineCar(\"lamborgini_evo\", \"Lamborghini\", \"Huracán EVO Coupè\", \"DOHC 40-valve V-10\", 631, \"7-speed dual-clutch automatic\", 2, True, 18, 12.9, 15.7, 6.7, 83, 1.339))\ncars.append(defineCar(\"volkswagen_golf\", \"Volkswagen\", \"Golf 2021\", \"4-cylinder\", 147, \"6-speed manual\", 5, False, 8.2, 6.5, 7.5, 3, 50, 1425))\n\n# road conditions: fog, mud, wet, bumpy, construction\ncarRoutes.append(defineCarRoute(\"University of the Fraser Valley (Abbotsford) (BC)\", \"Vancouver (BC)\", 70, 0.95, [[35, 45], [75, 100]], [[0, 8], [85, 100]], [[7.5, 65]], [[0, 7.5], [65, 100]], [[0, 5, \"mud\"]]))\ncarRoutes.append(defineCarRoute(\"Vancouver (BC)\", \"Seattle (USA)\", 156, 1.98, [[0, 20], [45, 65]], [[0, 20], [70, 100]], [[20, 70]], [[0, 20], [65, 100]], [[32,67, \"wet\"]]))\ncarRoutes.append(defineCarRoute(\"Toronto (Ontario)\", \"University of Montreal (Quebec)\", 543, 5.07, \n [[0, 8], [34, 39], [67, 73], [96, 100]], \n [[0, 8], [25, 27], [63,67], [90, 100]], \n [[10, 89]], \n [[0, 8], [90, 100]], \n [[35,37, \"construction\"]]\n))\ncarRoutes.append(defineCarRoute(\"Vancouver (BC)\", \"Kelowna (BC)\", 387, 4.2, [[0, 10], [89, 95]], [[0, 10], [90, 100]], [[20, 90]], [[0, 10], [90, 100]], [[40,76, \"bumpy\"]]))\ncarRoutes.append(defineCarRoute(\"University of the Fraser Valley (Abbotsford) (BC)\", \"Kelowna (BC)\", 325, 3.5, [[0, 1], [89, 90]], [[0, 2], [98, 100]], [[2, 96]], [[0, 2], [98, 100]], []))\ncarRoutes.append(defineCarRoute(\"Ottawa (Ontario)\", \"Toronto (Ontario)\", 410, 4.42, [[0, 8], [23, 45], [23, 78], [96, 100]], [[0, 10], [90, 98]], [[20, 89]], [[0, 10], [93, 100]], []))\n\n\n\n\ngasStations.append(defineGasStation(\"g1\", \"Esso\", 1.859, 1.789, 1.679))\ngasStations.append(defineGasStation(\"g2\", \"Petrol Canada\", 2.005, 2.029, 1.749))\ngasStations.append(defineGasStation(\"g3\", \"Shell\", 2.005, 2.029, 1.749))\ngasStations.append(defineGasStation(\"g4\", \"Chevron\", 1.92, 1.859, 1.689))\n\n\n\n\n\n\n## ===================== INPUTS =====================\n\nfactors = [\"protest\", \"accident_on_road\", \"construction\", \"muddy_road\"]\n\n\ndef getPassengers(maximum):\n while(True):\n try:\n passenger = input(\"? Enter the number of passengers (max. \" + str(maximum)+ \"): \")\n if not passenger:\n return 1\n \n passenger = int(passenger)\n if (passenger < 0 or passenger > maximum):\n raise Exception(f\"Invalid passengers number. You added {passenger} but the possible passengers are 1 or \" + str(maximum))\n \n return passenger\n except Exception as err:\n print(err)\n\ndef getExcessiveCarrage():\n while(True):\n try:\n excessive_carrage = input(\"? Enter the weight of the luggage (press enter or 0 if none): \")\n if not excessive_carrage:\n return 0\n \n excessive_carrage = int(excessive_carrage)\n if (excessive_carrage < 0):\n raise Exception(f\"The weight cannot be negative.\")\n \n return excessive_carrage\n except Exception as err:\n print(err)\n\ndef getTowingWeight():\n while(True):\n try:\n towingWeight = input(\"? Enter the weight (kg) of towing (press enter or 0 if none): \")\n if not towingWeight:\n return 0\n\n towingWeight = int(towingWeight)\n if (towingWeight < 0):\n raise Exception(f\"The weight cannot be negative.\")\n\n return towingWeight\n except Exception as err:\n print(err)\n\ndef getTime():\n while(True):\n try:\n time = input(\"? Enter the departure time (format HH:MMPM/AM such as 12:00AM): \")\n formTime = timeFormat(time)\n\n return formTime\n except Exception as err:\n print(err)\n \ndef getInputWithComparisonList(text, array, default):\n while(True):\n try:\n inp = input(text)\n if not inp:\n return default\n\n if inp not in array:\n raise Exception(\"The value you added is not inside the possible values of \"+str(array))\n \n return inp\n except Exception as err:\n print(err)\n\ndef getExperience():\n while(True):\n try:\n years = input(\"? Enter the experience years: \")\n if not years:\n return 0\n \n years = int(years)\n if (years < 0):\n raise Exception(f\"The experience cannot be negative.\")\n \n return years\n except Exception as err:\n print(err)\n\ndef getServiced():\n while(True):\n try:\n service = input(\"? Is the car serviced (yes or no): \")\n if not service or (service).lower() == \"no\":\n return False\n \n if ((service).lower() == \"yes\"):\n return True\n else:\n raise Exception(f\"Invalid. Only yes or no\")\n \n except Exception as err:\n print(err)\n\n## ===================== HELPERS ====================\n\n\ndef timeFormat(time):\n splitted = time.split(\":\")\n if (len(splitted) > 2):\n raise Exception(\"invalid.\")\n hours = int(splitted[0])\n if (hours > 12):\n raise Exception(\"Invalid hours.\")\n other = splitted[1:][0]\n \n minutes = int(other[:2])\n if (minutes > 60):\n raise Exception(\"Invalid minutes\")\n\n day_time = other[2:]\n\n if (day_time != \"PM\" and day_time != \"AM\" and day_time != \"am\" and day_time != \"pm\"):\n raise Exception(\"invalid day.\")\n\n if (day_time == \"PM\" or day_time == \"pm\"):\n hours+=12\n return [hours, minutes]\n \ndef isBetweenInterval(array, value, totalDistance):\n ## [[1,2], [10, 20]]\n \n for interval in (array):\n if len(interval) < 2:\n raise Exception(\"Invalid interval. The interval should be of format [a, b, ...n]\")\n if (value >= interval[0]/100*totalDistance and value <= interval[1]/100*totalDistance):\n return True\n return False\n\ndef isBetweenIntervalRoadConditions(array, value, totalDistance):\n ## [[1,2], [10, 20]]\n if len(array) == 0:\n return [False]\n for interval in (array):\n if len(interval) < 3:\n raise Exception(\"Invalid interval. The interval should be of format [a, b, type]\")\n if (value >= interval[0]/100*totalDistance and value <= interval[1]/100*totalDistance):\n return [True, interval[2]]\n return [False, interval[2]]\n\ndef generateRandomLight():\n randomLight = random.randint(0, 3)\n\n ## 60% probability of red light\n ## 20% probability of yellow light\n ## 20% probability of green light\n if (randomLight <=1):\n return \"red\"\n elif randomLight ==2:\n return \"yellow\"\n else:\n return \"green\"\n\ndef factorImpact(factor, average, delta):\n consumption = 0\n distanceIncrease = 0\n if factor == \"protest\":\n # car is stop, and is consuming 0.15 of the average consumption\n consumption = average/100*0.65*delta\n elif factor == \"construction\":\n ## change road or not\n randChange = random.randint(0, 1)\n if (randChange == 1):\n ## road changed, added more \n distanceIncrease+=5\n else:\n consumption+=average/100*delta\n elif factor == \"accident_on_road\":\n ## more consumption as the car has to ride slowly\n consumption+=average/100*1.2*delta\n elif factor == \"muddy_road\":\n consumption+=average/100*1.15*delta\n\n printImpact(factor, consumption, distanceIncrease)\n\n return {\n \"consumption\": consumption,\n \"distance\": distanceIncrease,\n \"factor\": factor\n }\n\ndef getCars():\n return cars\n\ndef getCar(code):\n for car in cars:\n if car[\"code\"] == code:\n return car\n\ndef findFactorIndex(arr, factor):\n for i in range(len(arr)):\n if arr[i][\"factor\"] == factor:\n return i\n return -1\n\ndef joinFactors(factorsImpact):\n arr = []\n\n for factor in factorsImpact:\n index = findFactorIndex(arr, factor[\"factor\"])\n if (index == -1):\n arr.append({\"factor\": factor[\"factor\"], \"consumption\": factor[\"consumption\"], \"distance\": factor[\"distance\"]})\n else:\n arr[index][\"consumption\"]+=factor[\"consumption\"]\n arr[index][\"distance\"]+=factor[\"distance\"]\n\n return arr\n\ndef generateRandomFactor(factors):\n rand = random.randint(0, len(factors)-1)\n return factors[rand]\n\ndef calculateInitialMillage(car, average, total_weight, tyreCondition, weatherCondition, timeOfYear, mood, experience, serviced):\n ## average affect by passengers\n averages = 0\n factors = 0\n \n averages += average * (car[\"weight\"] / total_weight)*1.1\n factors+=1\n\n # average affect by tyre condition\n if (tyreCondition == \"bad\"):\n averages += 0.9 * average ## affect 10%\n factors+=1\n\n # average affect by weather condition\n averages += (85 + random.random()*7)/100 * average # affect between 8-15%\n factors+=1\n\n\n if (not serviced):\n # a car that has not been serviced is affected by 4%\n averages += 0.04 * average\n factors+=1\n \n if (mood == \"bad\"):\n # a bad mood driver does effect the average by 7%\n averages += 0.07 * average\n factors+=1\n \n if (experience > 10):\n # an experienced driver increases the average by 25%!\n averages += 1.25 * average\n factors+=1\n\n else:\n # a noval driver increases the fuel consumptionby 11%\n averages += 0.89 * average\n factors+=1\n\n if (timeOfYear == \"winter\" or timeOfYear == \"fall\"):\n averages += 0.95 * average\n factors+=1\n else:\n print(\"No air conditioning\")\n\n if (weatherCondition == \"snow\" or weatherCondition == \"rainy\"):\n averages += 0.9 * average\n factors+=1\n \n if (weatherCondition == \"fog\"):\n averages += 0.94 * average\n factors+=1\n\n return ((averages)/factors)\n\n # average affect by time of the year\n \n\ndef findGasStations(total, premium = False):\n gasSt = []\n mini = 0\n best = gasStations[0]\n if premium:\n mini = gasStations[0][\"premium_cost\"]\n else:\n mini = gasStations[0][\"petrol_cost\"]\n for i in range((total)):\n gas = gasStations[i]\n price = 0\n if premium:\n price = gas[\"premium_cost\"]\n else:\n price = gas[\"petrol_cost\"]\n \n if price < mini:\n best = gas\n mini = price\n gasSt.append(gas)\n return [gasSt, best]\n\ndef printGasStations(stations, bestName):\n print(\"=\"*28 + \" GAS STATIONS \" + \"=\"*28)\n print(\"| %-2s | %-20s | %-12s | %-10s | %-10s |\" % (\"ID\", \"Station\", \"Premium (L)\", \"Diesel (L)\", \"Petrol (L)\"))\n print(\"| %-2s | %-20s | %-12s | %-10s | %-10s |\" % (\"\", \"\", \"\", \"\", \"\"))\n for i in range(len(stations)):\n gas = stations[i]\n if bestName == gas[\"name\"]:\n print(\"| %-2d | %-20s | %-12f | %-10f | %-10f |\" % (i+1, gas[\"name\"] + \" (best)\", gas[\"premium_cost\"], gas[\"diesel_cost\"], gas[\"petrol_cost\"]))\n else:\n print(\"| %-2d | %-20s | %-12f | %-10f | %-10f |\" % (i+1, gas[\"name\"], gas[\"premium_cost\"], gas[\"diesel_cost\"], gas[\"petrol_cost\"]))\n print(\"=\"*70)\n\ndef getCities():\n validSet = []\n for route in carRoutes:\n dep = route[\"departureCity\"]\n arr = route[\"arrivalCity\"]\n\n if dep not in validSet:\n validSet.append(dep)\n if arr not in validSet:\n validSet.append(arr)\n\n return validSet\n\ndef getRoutesFromCity(city):\n routes = []\n for route in carRoutes:\n dep = route[\"departureCity\"]\n arr = route[\"arrivalCity\"]\n\n if dep == city or arr == city:\n routes.append(route)\n return routes\n\n\n\n## ===================== PRINTS =====================\n\n\ndef printImpact(factor, fuel, distance):\n print()\n print(\"==========\"*2+ f\"=== HAZARD! ==\" +\"==========\"*2)\n print(\"| %-50s|\" % (\"Type: \" + factor))\n if fuel > 0:\n print(\"| %-50s|\" % (\"Hazzard fuel impact: \" + str(round(fuel, 2)) + \" Litre\"))\n\n if distance > 0 :\n print(\"| %-50s|\" % (\"Hazzard distance impact: \" + str(round(distance, 2)) + \" km\"))\n\n print(\"==========\"*2+ \"==============\" + \"==========\"*2)\n print()\n\ndef printTrafficLight(color, fuel):\n print()\n print(\"===========\"*2+ f\"{color}\" +\"==========\"*2)\n if fuel > 0:\n print(\"| %-50s|\" % (\"Fuel impact: \" + str(round(fuel, 2)) + \" Litre\"))\n print(\"==========\"*2+ \"==============\" + \"==========\"*2)\n print()\n\n\n## =================== CAR MAIN =====================\n\ndef carPrompts():\n cities = getCities()\n cars = getCars()\n\n print(\"Here are the recorded cities: \")\n for i in range(len(cities)):\n city = cities[i]\n print(f\"\\t [{i+1}]: {city}\")\n\n print()\n departureCityInt = int(input(f\"? Enter the departure city number (1 to {len(cities)}): \"))\n departureCity = cities[departureCityInt-1]\n print(\"You selected:\", departureCity)\n print()\n\n print(\"Here is the list of destinations:\")\n routes = getRoutesFromCity(departureCity)\n print()\n print(\"==%-2s===%-22s= DESTINATIONS ==%-14s===%-12s\" % (\"=\"*2, \"=\"*22, \"=\"*14, \"=\"*12))\n print(\"| %-2s | %-40s | %-10s | %-10s |\" % (\"ID\", \"Destination\", \"Distance\", \"Time\"))\n for i in range(len(routes)):\n route = routes[i]\n arr = 0\n if route[\"arrivalCity\"] == departureCity:\n arr = route[\"departureCity\"]\n else:\n arr = route[\"arrivalCity\"]\n \n print(\"| %-2d | %-40s | %-10s | %-10s |\" % (i+1, arr, str(route[\"distance\"]) + \" km\", str(route[\"time\"]) + \" hr\"))\n\n print(\"==%-2s===%-22s===============%-14s===%-10s\" % (\"=\"*2, \"=\"*26, \"=\"*14, \"=\"*10))\n print()\n routeInt = int(input(f\"? Enter the route ID (1 to {len(routes)}): \"))\n route = routes[routeInt-1]\n \n\n destinationCity = \"\"\n if route[\"arrivalCity\"] == departureCity:\n destinationCity = route[\"departureCity\"]\n else:\n destinationCity = route[\"arrivalCity\"]\n \n print(\"You selected route: \", f\"{departureCity} to {destinationCity}\")\n print()\n print(\"It is time to select the vehicle. Here is a list of available vehicles:\")\n\n print(\"==%-2s===%-22s= CARS ==%-37s===%-7s\" % (\"=\"*2, \"=\"*22, \"=\"*37, \"=\"*7))\n print(\"| %-2s | %-40s | %-25s | %-5s |\" % (\"ID\", \"Model\", \"Average fuel (L / 100km)\", \"Seat\"))\n for i in range(len(cars)):\n car = cars[i]\n print(\"| %-2d | %-40s | %-25s | %-5s |\" % (i+1, car[\"manufacturer\"] + \" \" + car[\"model\"], str(car[\"average_comb_city_highway\"]), str(car[\"seat\"])))\n\n print(\"==%-2s===%-22s=========%-37s===%-7s\" % (\"=\"*2, \"=\"*22, \"=\"*37, \"=\"*7))\n print()\n\n\n carInt = int(input(f\"? Select your car (1 to {len(cars)}): \"))\n car = cars[carInt-1]\n\n print(\"=\"*47 + \" YOUR CAR \" + \"=\"*47)\n print(\"| %-17s | %-80s |\" % (\"Manufacturer\", car[\"manufacturer\"]))\n print(\"| %-17s | %-80s |\" % (\"Model\", car[\"model\"]))\n print(\"| %-17s | %-80s |\" % (\"Engine\", car[\"engine\"]))\n print(\"| %-17s | %-80s |\" % (\"Power\", str(car[\"engine\"])+ \" hp\"))\n print(\"| %-17s | %-80s |\" % (\"Seat\", car[\"seat\"]))\n print(\"| %-17s | %-80s |\" % (\"City millage\", str(round(car[\"city_millage\"], 2)) + \" L / 100km\"))\n print(\"| %-17s | %-80s |\" % (\"Highway millage\", str(round(car[\"highway_millage\"], 2)) + \" L / 100km\"))\n print(\"| %-17s | %-80s |\" % (\"Average millage\", str(round(car[\"average_comb_city_highway\"], 2)) + \" L / 100km\"))\n print(\"| %-17s | %-80s |\" % (\"Fuel capacity\", str(round(car[\"fuel_capacity\"], 2)) + \" L\"))\n print(\"| %-17s | %-80s |\" % (\"Weight\", str(car[\"weight\"]) + \" kg\"))\n print(\"=\"*20 + \"==========\" + \"=\"*74)\n print()\n\n \n begin = input(\"Type 'start' to begin with the simulation: \")\n if begin.lower() == 'start':\n print()\n\n print(\"=== BEGINNING WITH THE SIMULATION ===\")\n print()\n return carSimulation(car, destinationCity, route[\"distance\"], route[\"time\"], route[\"trafficPoints\"], route[\"trafficLightPoints\"], route[\"highwayPoints\"], route[\"cityPoints\"], route[\"roadConditionPoints\"])\n \n ## print a summary of the route\n\n\ndef carSimulation(car, destination, distance, duration, trafficIntervals, trafficLightIntervals, highwayIntervals, cityIntervals, roadConditionsIntervals):\n\n\n passengers = getPassengers(car[\"seat\"])\n print(\"Passengers on car: \" + str(passengers))\n print()\n\n excessive_carrage = getExcessiveCarrage()\n print(\"Excessive carriage on car: \" + str(excessive_carrage))\n print()\n\n towingWeight = getTowingWeight()\n print(\"Towing carriage on car: \" + str(towingWeight))\n print()\n\n ## if peak hours, apply all trafficPoints. If not, only some random (max 30%)\n departureTime = getTime()\n print(\"Departure time is \" + str(departureTime))\n print()\n\n tyreConditions = [\"good\", \"bad\"]\n tyreCondition = getInputWithComparisonList(\"Enter the tyre condition \" + str(tyreConditions) + \": \", tyreConditions, \"good\") ## or good or mildy\n print(\"Tyre condition: \", tyreCondition)\n print()\n\n typeOfYears = [\"winter\", \"summer\", \"fall\", \"autumn\"]\n timeOfYear = getInputWithComparisonList(\"Enter the season \" + str(typeOfYears) + \": \", typeOfYears, \"fall\") ## or summer or fall \n print(\"Season: \", timeOfYear)\n print()\n\n weatherConditions = [\"snow\", \"rain\", \"fog\", \"sun\"]\n weatherCondition = getInputWithComparisonList(\"Enter the weather condition \" + str(weatherConditions) + \": \", weatherConditions, \"sun\") ## or raining, sun, fog\n print(\"Weather condition: \", weatherCondition)\n print()\n\n moods = [\"angry\", \"relaxed\", \"cheerful\"]\n mood = getInputWithComparisonList(\"Enter the driver's mood \" + str(moods) + \": \", moods, \"relaxed\") ## or angry \n print(\"Mood: \", mood)\n print()\n\n\n experience = getExperience()\n print(\"Experience: \" + str(experience))\n print()\n\n\n serviced = getServiced()\n print(\"Serviced: \", serviced)\n print()\n\n total_weight = car[\"weight\"] + passengers * 75 + excessive_carrage + towingWeight\n\n # calculate the millage from the initial conditions\n averageCityMillage = calculateInitialMillage(car, car[\"city_millage\"], total_weight, tyreCondition, weatherCondition, timeOfYear, mood, experience, serviced)\n averageHighwayMillage = calculateInitialMillage(car, car[\"highway_millage\"], total_weight, tyreCondition, weatherCondition, timeOfYear, mood, experience, serviced)\n averageCombinationMillage = calculateInitialMillage(car, car[\"average_comb_city_highway\"], total_weight, tyreCondition, weatherCondition, timeOfYear, mood, experience, serviced)\n #averageMillagePer100 = calculateInitialMillage(car, car[\"average_millage_per_100l\"], total_weight, tyreCondition, weatherCondition, timeOfYear, mood, experience, serviced)\n print()\n print()\n print(\"=========\"*2 + \"INITIAL CALCULATIONS\" + \"==========\"*2)\n print(\"| %-30s | %-22s |\" % (\"Average city millage: \", str(round(averageCityMillage, 2)) + \" Litre / 100 km\"))\n print(\"| %-30s | %-22s |\" % (\"Average highway millage: \", str(round(averageHighwayMillage, 2)) + \" Litre / 100 km\"))\n print(\"| %-30s | %-22s |\" % (\"Average combined millage: \", str(round(averageCombinationMillage, 2)) + \" Litre / 100 km\"))\n print(\"=========\"*2 + \"===================\" + \"==========\"*2)\n print()\n print()\n\n distance_travelled = 0\n consumed = 0\n delta = 5\n\n totalRedLights = 0\n totalConsumptionForRedLights = 0\n\n totalYellowLights = 0\n totalConsumptionForYellowLights = 0\n\n totalGreenLights = 0\n\n totalConsumptionForRoadCondition = 0\n\n totalConsumptionInCity = 0\n totalConsumptionInHighway = 0\n totalConsumptionInGeneralRoad = 0\n\n totalDistanceInCity = 0\n totalDistanceInHighway = 0\n totalDistanceInGeneralRoad = 0\n \n factorsImpact = []\n\n while(distance_travelled < distance):\n distance_travelled+=delta\n print(\"_\"*40)\n print(\"Distance travelled: \", distance_travelled, \"km\")\n print(\"Total consumed: \", round(consumed, 2), \"Litre\")\n print()\n print()\n\n ## TODO: if total condumed so far > fuel capactiy, get 2 random petrol pumps and add them into an arrya. at the end we will show the prices.\n\n isTraffic = isBetweenInterval(trafficIntervals, distance_travelled, distance)\n isTrafficLight = isBetweenInterval(trafficLightIntervals, distance_travelled, distance)\n isHighway = isBetweenInterval(highwayIntervals, distance_travelled, distance)\n isCity = isBetweenInterval(cityIntervals, distance_travelled, distance)\n isRoadCondition = isBetweenIntervalRoadConditions(roadConditionsIntervals, distance_travelled, distance)\n\n if (isCity or isTraffic or isTrafficLight):\n # be more precise in the cities\n if delta != 1:\n print(\"========== ENTERING CITY: MAKEING THE SIMULATION MORE PRECISE BY DECREASING THE DELTA ==========\")\n delta = 1\n else:\n if delta != 5:\n print(\"========== EXITING CITY: MAKEING THE SIMULATION MORE WIDE BY INCREASING THE DELTA ==========\")\n delta = 5\n\n\n\n if (isRoadCondition[0]):\n condition = isRoadCondition[1]\n road_condition_consumption = 0\n if (condition == \"fog\"):\n road_condition_consumption+=0.1\n print(condition, \" consumption: \", road_condition_consumption)\n\n elif (condition == \"mud\"):\n road_condition_consumption+=0.2\n print(condition, \"consumption: \", road_condition_consumption)\n\n elif (condition == \"wet\"):\n road_condition_consumption+=0.08\n print(condition, \"consumption: \", road_condition_consumption)\n\n elif condition == \"bumpy\":\n road_condition_consumption+=0.16\n print(condition, \"consumption: \", road_condition_consumption)\n\n elif condition == \"construction\":\n road_condition_consumption+=0.24\n print(condition, \"consumption: \", road_condition_consumption)\n\n else:\n pass\n\n totalConsumptionForRoadCondition+=road_condition_consumption\n consumed+=road_condition_consumption\n ## we are in the traffic\n ## TODO: traffic between 7:9\n if isTraffic:\n local_consumption = 0\n ## +1 km for millage\n dlt = (delta + 1)\n if (isHighway):\n print(\"========== TRAFFIC ON HIGHWAY ==========\")\n local_consumption = averageHighwayMillage/100*dlt\n totalConsumptionInHighway+=local_consumption\n totalDistanceInHighway+=dlt\n elif isCity:\n print(\"========== TRAFFIC IN CITY ==========\")\n local_consumption = averageCityMillage/100*dlt\n totalConsumptionInCity+=local_consumption\n totalDistanceInCity+=dlt\n else:\n print(\"========== TRAFFIC INTERVAL ==========\")\n local_consumption = averageCombinationMillage/100*dlt\n totalConsumptionInGeneralRoad+=local_consumption\n totalDistanceInGeneralRoad+=dlt\n \n consumed+=local_consumption ## add the traffic millage consumption: +2 km more\n \n ## Traffic light\n if isTrafficLight:\n print(\"=========\"*2+ \"= TRAFFIC LIGHT AREA\" + \"========\"*2)\n\n light = generateRandomLight()\n\n\n # TODO: simulate traffic light\n local_consumption = 0\n # average stop at traffic light: 0.1L/minute\n if (isCity):\n totalDistanceInCity+=delta\n if (light == \"red\"):\n local_consumption = averageCityMillage/100*(delta) + 0.19 # consume 0.1L for each red light as waiting time is 1 minute\n totalConsumptionForRedLights+=local_consumption\n totalRedLights+=1\n printTrafficLight(\"= Red light \", local_consumption)\n elif (light == \"yellow\"):\n local_consumption = averageCityMillage/100*(delta) + 0.07 # consume 0.1L for each red light as waiting time is 1 minute\n totalConsumptionForYellowLights+=local_consumption\n totalYellowLights+=1\n printTrafficLight(\"Yellow light \", local_consumption)\n else:\n printTrafficLight(\"Green light =\", 0)\n totalGreenLights+=1\n \n else:\n if (light == \"red\"):\n local_consumption = averageCombinationMillage/100*(delta) + 0.1\n totalConsumptionForRedLights+=local_consumption\n totalRedLights+=1\n printTrafficLight(\"= Red light \", local_consumption)\n elif light == \"yellow\":\n local_consumption = averageCombinationMillage/100*(delta) + 0.05\n totalConsumptionForYellowLights+=local_consumption\n totalYellowLights+=1\n printTrafficLight(\"Yellow light \", local_consumption)\n else:\n totalGreenLights+=1\n printTrafficLight(\"Green light =\", 0)\n\n consumed+=local_consumption\n totalConsumptionInCity+=local_consumption\n totalDistanceInCity+=delta\n \n if isHighway and not isTraffic and not isTrafficLight:\n print(\"========== ON HIGHWAY ==========\")\n totalDistanceInHighway+=delta\n randPerc = random.randint(0, 9)\n\n # 30% chances of a factor affecting\n if (randPerc < 3):\n ## TODO: make tis random!\n eventImpact = factorImpact(generateRandomFactor(factors), averageHighwayMillage, delta)\n factorsImpact.append(eventImpact)\n \n consumed+=eventImpact[\"consumption\"]\n distance+=eventImpact[\"distance\"]\n\n totalConsumptionInHighway+=eventImpact[\"consumption\"]\n else:\n consumed+=averageHighwayMillage/100*delta\n totalConsumptionInHighway+=averageHighwayMillage/100*delta\n ## TODO: ADD RANOM FACTORS: ACCTIDENT, CHANGE OF ROUTE\n \n\n \n if isCity and not isTraffic and not isTrafficLight:\n print(\"On road to city\")\n totalDistanceInCity+=delta\n randPerc = random.randint(0, 9)\n\n # 30% chances of a factor affecting\n if (randPerc < 3):\n eventImpact = factorImpact(generateRandomFactor(factors), averageCityMillage, delta)\n factorsImpact.append(eventImpact)\n consumed+=eventImpact[\"consumption\"]\n distance+=eventImpact[\"distance\"]\n\n totalConsumptionInCity+=eventImpact[\"consumption\"]\n else:\n consumed+=averageCityMillage/100*delta\n totalConsumptionInCity+=averageCityMillage/100*delta\n\n if (not isTraffic and not isTrafficLight and not isCity and not isHighway):\n ## TODO: ADD RANOM FACTORS: ACCTIDENT, CHANGE OF ROUTE\n totalDistanceInGeneralRoad+=delta\n randPerc = random.randint(0, 9)\n\n # 30% chances of a factor affecting\n if (randPerc < 3):\n eventImpact = factorImpact(generateRandomFactor(factors), averageCombinationMillage, delta)\n factorsImpact.append(eventImpact)\n \n consumed+=eventImpact[\"consumption\"]\n distance+=eventImpact[\"distance\"]\n\n totalConsumptionInGeneralRoad+=eventImpact[\"consumption\"]\n else:\n consumed+=averageCombinationMillage/100*delta\n totalConsumptionInGeneralRoad+=averageCombinationMillage/100*delta\n\n\n spacing = \"| %-42s | %10s %-17s |\"\n print()\n print()\n print()\n print(\"=\"*32 + \" SUMMARY TABLE \" + \"=\"*30)\n print(\"=\"*38 + \"=============\" + \"=\"*26)\n print(spacing % (\"\", \"\", \"\"))\n print(spacing % (\"Passengers\", passengers, \"\"))\n print(spacing % (\"Destination\", destination, \"\"))\n print(spacing % (\"Weather\", weatherCondition, \"\"))\n print(spacing % (\"\", \"\", \"\"))\n print(spacing % (\"Red lights\", totalRedLights, \"\"))\n print(spacing % (\"Red lights consumption\", round(totalConsumptionForRedLights, 2), \"Litre\"))\n\n print(spacing % (\"Yellow lights\", totalYellowLights, \"\"))\n print(spacing % (\"Yellow lights consumption\", round(totalConsumptionForYellowLights, 2), \"Litre\"))\n print(spacing % (\"Green lights\", totalGreenLights, \"\"))\n\n\n print(spacing % (\"\", \"\", \"\"))\n print(spacing % (\"Consumption for road conditions\", round(totalConsumptionForRoadCondition, 2), \"Litre\"))\n print(spacing % (\"\", \"\", \"\"))\n\n print(spacing % (\"Consumption in city\", round(totalConsumptionInCity, 2), \"Litre\"))\n print(spacing % (\"Consumption in highway\", round(totalConsumptionInHighway, 2), \"Litre\"))\n print(spacing % (\"Comsumption on general road\", round(totalConsumptionInGeneralRoad, 2), \"Litre\"))\n print(spacing % (\"\", \"\", \"\"))\n\n print(spacing % (\"Travel distance in city\", totalDistanceInCity, \"km\"))\n print(spacing % (\"Travel distance on highway\", totalDistanceInHighway, \"km\"))\n print(spacing % (\"Travel distance in general road\", totalDistanceInGeneralRoad, \"km\"))\n print(spacing % (\"\", \"\", \"\"))\n\n\n if (totalDistanceInCity > 0):\n print(spacing % (\"Final average consumption on city\", round(totalConsumptionInCity /totalDistanceInCity * 100, 2), \"Litre / 100 km\"))\n if (totalDistanceInHighway > 0):\n print(spacing % (\"Final average consumption on highway\", round(totalConsumptionInHighway /totalDistanceInHighway * 100, 2), \"Litre / 100 km\"))\n if totalDistanceInGeneralRoad > 0:\n print(spacing % (\"Final average consumption on general road\", round(totalConsumptionInGeneralRoad /totalDistanceInGeneralRoad * 100, 2), \"Litre / 100 km\"))\n\n print(spacing % (\"\", \"\", \"\"))\n print(spacing % (\"Total overall consumption\", round(consumed, 2), \"Litre\"))\n print(spacing % (\"Overall average after consumption\", round(consumed / distance * 100, 2), \"Litre / 100 km\"))\n print(spacing % (\"Overall distance travelled\",distance, \"km\"))\n print(\"=\"*38 + \"=============\" + \"=\"*26)\n\n print()\n\n\n factors_compressed = joinFactors(factorsImpact)\n print(\"Consumption because of random factors\")\n print(\"=\"*30 + \" FACTORS CONSUMPTION \" + \"=\"*31)\n print(\"=\"*36 + \"=====================\" + \"=\"*25)\n \n\n spacing = \"| %-42s | %-15s | %-15s |\"\n print(spacing % (\"Factor\", \"Consumption\", \"Distance\"))\n for factor in factors_compressed:\n print(spacing % (factor[\"factor\"], str(round(factor[\"consumption\"], 2)) + \" Litre\", str(round(factor[\"distance\"], 2)) + \" km\"))\n print(\"=\"*82)\n\n\n\n return {\n \"red_lights\": totalRedLights,\n \"red_light_consumption\": totalConsumptionForRedLights,\n \"yellow_lights\": totalYellowLights,\n \"yellow_lights_consumption\": totalConsumptionForYellowLights,\n \"road_conditions_consumption\": totalConsumptionForRoadCondition,\n \"city_consumption\": totalConsumptionInCity,\n \"highway_consumption\": totalConsumptionInHighway,\n \"general_road_consumption\": totalConsumptionInGeneralRoad,\n \"city_distance\": totalDistanceInCity,\n \"highway_distance\": totalDistanceInHighway,\n \"general_road_distance\": totalDistanceInGeneralRoad,\n \"total_consumption\": consumed,\n \"final_distance\": distance,\n \"factors\": factorsImpact,\n \"factors_compressed\": joinFactors(factorsImpact),\n \"car\": car\n }\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## PLANE\n\n## ============================= GATHERING DATA =============================\n\na350 = {\n \"code\": \"a350\",\n \"name\": \"Airbus A350\",\n\n \"average_consumption\": 11.2, # L/hour/passenger,\n \"ground_consumption\": 0.27,\n \"take_off_consumption\": 12.6,\n \"max_passengers\": 325,\n\n \"max_other_weight\": 0.3,\n \"max_travel_distance\": 16_600,\n \"max_luggage\": 2,\n \"max_luggage_weight\": 23, # kg\n \"max_luggage_and_passenger_weight\": 50100, # kg\n \"max_fuel_capacity\": 164_000, # L\n \"max_total_weight\": 283_900, # kg\n \"headwind_consumption\": 0.245, # perc\n \"tailwind_consumption\": -0.1, # perc , negative is efficient\n \"max_other_weight_perc\": 100,\n \"taxing_fuel_usage\": 260/0.8, # # If 1 kilogram of water is 1 litre, 0.8 kilograms of jet fuel is 1 litre.\n \"snow_weight\": 100,\n \"max_speed\": 950,\n \"empty_flight_fuel\": 2485\n}\n\na380 = {\n \"code\": \"a380\",\n \"name\": \"Airbus A380\",\n\n \"average_consumption\": 16.21, # L/hour/passenger, the calculation is based on the fact that a plane consumes around 11000L per hour. And given that an average weight of a passenger and cargo together can be counted for 150 each, we found the calculation\n \"ground_consumption\": 0.4,\n \"take_off_consumption\": 18.2,\n \"max_passengers\": 615,\n\n \"max_other_weight\": 0.4,\n \"max_travel_distance\": 15_288,\n \"max_luggage\": 2,\n \"max_luggage_weight\": 30, # kg\n \"max_luggage_and_passenger_weight\": 98_376, # kg\n \"max_fuel_capacity\": 320_000, # L\n 'max_total_weight': 757_000, # kg\n \"headwind_consumption\": 0.3, # perc\n \"tailwind_consumption\": -0.1, # perc , negative is efficient\n \"snow_weight\": 200,\n \"taxing_fuel_usage\": 450/0.8, # kg\n \"max_other_weight_perc\": 0.19*0.79, # pass + lugg + cargo in perc\n \"max_speed\": 1185,\n \"empty_flight_fuel\": 3246\n} \n\nb777 = {\n \"code\": \"b777\",\n \"name\": \"Boeing 777-300ER\",\n\n \"average_consumption\": 12.7, # L/hour/passenger,\n \"ground_consumption\": 0.3,\n \"take_off_consumption\": 13.2,\n \"max_passengers\": 400,\n\n \"max_other_weight\": 0.27, # pass + lugg + cargo in perc\n \"max_travel_distance\": 16_600,\n \"max_luggage\": 2,\n \"max_luggage_weight\": 30, # kg\n \"max_luggage_and_passenger_weight\": 39_000, # kg\n \"max_fuel_capacity\": 171_350, # L\n \"max_total_weight\": 299_371, # kg\n \"headwind_consumption\": 0.21, # perc\n \"tailwind_consumption\": -0.15, # perc , negative is efficient\n \"snow_weight\": 100,\n \"max_other_weight_perc\": 0.15 ,# pass + lugg + cargo in perc\n \"taxing_fuel_usage\": 210/0.8, # kg\n \"max_speed\": 905,\n \"empty_flight_fuel\": 2735\n\n}\n\nb787 = {\n \"code\": \"b787\",\n \"name\": \"Boeing 787-8 Dreamliner\",\n\n \"average_consumption\": 10.7, # L/hour/passenger,\n \"ground_consumption\": 0.25,\n \"take_off_consumption\": 11.2,\n \"max_passengers\": 246,\n\n \"max_other_weight\": 0.34, # pass + lugg + cargo in perc\n \"max_travel_distance\": 14_140,\n \"max_luggage\": 2,\n \"max_luggage_weight\": 23, # kg\n \"max_luggage_and_passenger_weight\": 34_100, # kg\n \"max_fuel_capacity\": 156_461, # L\n \"max_total_weight\": 224_837, # kg\n \"headwind_consumption\": 0.178, # perc\n \"tailwind_consumption\": -0.19, # perc , negative is efficient\n \"snow_weight\": 86,\n \"max_other_weight_perc\": 0.19 ,# pass + lugg + cargo in perc\n \"taxing_fuel_usage\": 189/0.8, # kg\n \"max_speed\": 1085,\n \"empty_flight_fuel\": 2286\n\n}\n\nb737 = {\n \"code\": \"b737\",\n \"name\": \"Boeing 737MAX 8-Passenger\",\n\n \"average_consumption\": 12.698, # L/hour/passenger,\n \"ground_consumption\": 0.2,\n \"take_off_consumption\": 15.2,\n \"max_passengers\": 189,\n\n \"max_other_weight\": 0.34, # pass + lugg + cargo in perc\n \"max_travel_distance\": 6_510,\n \"max_luggage\": 1,\n \"max_luggage_weight\": 23, # kg\n \"max_luggage_and_passenger_weight\": 25_580, # kg\n \"max_fuel_capacity\": 96_461, # L\n \"max_total_weight\": 184_837, # kg\n \"headwind_consumption\": 0.148, # perc\n \"tailwind_consumption\": -0.28, # perc , negative is efficient\n \"snow_weight\": 79,\n \"max_other_weight_perc\": 0.19 ,# pass + lugg + cargo in perc\n \"taxing_fuel_usage\": 154/0.8, # kg\n \"max_speed\": 876,\n \"empty_flight_fuel\": 1759 # L / hour = fuel of the aircraft only\n}\n\n\n\nflights = {\n \"a380\": a380,\n \"b777\": b777,\n \"a350\": a350,\n \"b787\": b787,\n \"b737\": b737\n}\n\n\n## ============================== INPUT METHODS ==============================\n\ndef timeFormat(time):\n splitted = time.split(\":\")\n if (len(splitted) > 2):\n raise Exception(\"invalid.\")\n hours = int(splitted[0])\n if (hours > 12):\n raise Exception(\"Invalid hours.\")\n other = splitted[1:][0]\n \n minutes = int(other[:2])\n if (minutes > 60):\n raise Exception(\"Invalid minutes\")\n\n day_time = other[2:]\n\n if (day_time != \"PM\" and day_time != \"AM\" and day_time != \"am\" and day_time != \"pm\"):\n raise Exception(\"invalid day.\")\n\n if (day_time == \"PM\" or day_time == \"pm\"):\n hours+=12\n return [hours, minutes]\n\n## GET TIME INPUT WITH FORMAT\ndef getTime():\n while(True):\n try:\n time = input(\"? Enter the departure time (format HH:MMPM/AM such as 12:00AM): \")\n formTime = timeFormat(time)\n\n return formTime\n except Exception as err:\n print(err)\n\n## GET TOTAL PASSENGERS ON THE PLANE\ndef getPassengers(maximum):\n while(True):\n try:\n passenger = input(\"? Enter the number of passengers (max. \" + str(maximum)+ \"): \")\n if not passenger:\n return 1\n \n passenger = int(passenger)\n if (passenger < 0 or passenger > maximum):\n raise Exception(f\"Invalid passengers number. You added {passenger} but the possible passengers are 1 or \" + str(maximum))\n \n return passenger\n except Exception as err:\n print(err)\n\n## GET EXCESSIVE CARGO CARRAIGAGE\ndef getExcessiveCarrage():\n while(True):\n try:\n excessive_carrage = input(\"? Enter the weight (kg) of the luggage (press enter or 0 if none): \")\n if not excessive_carrage:\n return 0\n \n excessive_carrage = int(excessive_carrage)\n if (excessive_carrage < 0):\n raise Exception(f\"The weight cannot be negative.\")\n \n return excessive_carrage\n except Exception as err:\n print(err)\n\n## GET THE WIND DIRECTION INPUT\ndef getWindDirection():\n while(True):\n try:\n inp = input(\"? Enter the wind direction (head or tail): \")\n if not inp or inp == \"tail\":\n return \"tail_wind\"\n\n if inp == \"head\":\n return \"head_wind\"\n\n raise Exception(\"The wind direction can be head or tail\")\n except Exception as err:\n print(err)\n\n## GET A POSITIVE VALUE\ndef getPositiveInt(inputText, errorText):\n while(True):\n try:\n inp = input(inputText)\n if not inp:\n return 0\n \n inp = int(inp)\n if (inp < 0):\n raise Exception(errorText)\n \n return inp\n except Exception as err:\n print(err)\n\n## GET AVERAGE LUGGAGE WEIGHT\ndef getAverageLuggage(maximum):\n while(True):\n try:\n inp = input(f\"? Enter the average weight (kg) of the luggage (max. {maximum}kg): \")\n if not inp:\n return 0\n \n inp = int(inp)\n if (inp < 0 or inp > maximum):\n raise Exception(f\"The luggage weight can be between 0 and {str(maximum)}\")\n \n return inp\n except Exception as err:\n print(err)\n\n## GET FEMALE RATIO\ndef getRatio():\n while(True):\n try:\n inp = input(\"? Enter the female ratio (between 0 and 1, a float number such as 0.4 = 40% women): \")\n if not inp:\n return 0\n \n inp = float(inp)\n if (inp < 0 or inp > 1):\n raise Exception(f\"Invalid ratio: value can be between 0 and 1\")\n \n return inp\n except Exception as err:\n print(err)\n\n## GET AN INPUT FROM A LIST OF POSSIBLE VALUES\ndef getInputWithComparisonList(text, array, default):\n while(True):\n try:\n inp = input(text)\n if not inp:\n return default\n\n if inp not in array:\n raise Exception(\"The value you added is not inside the possible values of \"+str(array))\n \n return inp\n except Exception as err:\n print(err)\n\ndef getAirportCode( airports, type):\n while True:\n try:\n airportSelect = input(f\"? Enter the {type} airport code: \")\n isValid = checkIfValidCode(airportSelect, airports)\n\n if not isValid:\n raise Exception(\"you entered an invalid code. we could not find any airport with that code.\")\n \n return airportSelect\n except Exception as err: \n print(err)\n\ndef checkIfValidCode(code, airports):\n for airport in airports:\n if airport[\"code\"] == code:\n return True\n return False\n\n\n## ============================== LOGIC METHODS ==============================\ndef getDomesticAirports():\n arr = []\n valid = []\n for domesticRoute in domesticFlights:\n dep = domesticRoute[\"route\"][0]\n for airport in airports:\n if airport[\"code\"] == dep and dep not in valid:\n arr.append(airport)\n valid.append(dep)\n \n dep = domesticRoute[\"route\"][1]\n for airport in airports:\n if airport[\"code\"] == dep and dep not in valid:\n arr.append(airport)\n valid.append(dep)\n return arr\n\ndef getAirportInfo(code):\n for airport in airports:\n if code == airport[\"code\"]:\n return airport\n return \"\"\n\ndef getFlightsFromAirport(flights, airport):\n validFlights = []\n for flight in flights:\n if airport in flight[\"route\"]:\n if flight[\"route\"][0] != airport:\n if flight[\"route\"][0] not in validFlights:\n validFlights.append(flight[\"route\"][0])\n else:\n if flight[\"route\"][1] not in validFlights:\n validFlights.append(flight[\"route\"][1])\n return validFlights\n\ndef findFlight(routes, departure, arrival):\n validRoutes = []\n for route in routes:\n if departure in route[\"route\"] and arrival in route[\"route\"]:\n validRoutes.append(route)\n return validRoutes\n\n## ======================= HELPERS =======================\n\n\n# GET A RANDOM FLOAT VALUE BETWEEN A AND B\ndef randomDouble(a, b):\n return a + random.random()*b\n\n## RETURN THE LIST OF FLIGHTS\ndef getFlights():\n return flights\n\n## COMPUTE AND FIND ANOTHER FLIGHT THAT SATISFIED THE FOLLOWING CONDITIONS\ndef findAnotherFlight(field, weight):\n possibleFlights = []\n for flight in flights:\n if flights[flight][field] >= weight:\n possibleFlights.append(flight)\n\n return possibleFlights\n\n\n## ============================== PRINT METHODS ==============================\n\n# PRINT THE RESULT OF AN IMPACT IN A TABLE\ndef printImpact(typee, fuel, distance):\n if fuel == 0 and distance == 0:\n print(\"No hazzard impact.\")\n else:\n print()\n print(\"==========\"*2+ \"IMPACT SUMMARY\" +\"==========\"*2)\n print(\"| %-50s|\" % (\"Type: \" + typee))\n print(\"| %-50s|\" % (\"Hazzard fuel impact: \" + str(round(fuel, 2)) + \" Litre\"))\n print(\"| %-50s|\" % (\"Hazzard distance impact: \" + str(round(distance, 2)) + \" km\"))\n print(\"==========\"*2+ \"==============\" + \"==========\"*2)\n print()\n\n# PRINT THE FLIGHT INFORMATION\ndef printFlight(flight):\n print(\"\"\"\n ----------------------\n |AIRCRAFT INFORMATION|\n ----------------------\n \"\"\")\n print(\"\\tCode: \", flight[\"code\"])\n print(\"\\tAircraft: \", flight[\"name\"])\n print()\n print(\"\\tAverage consumption: \", flight[\"average_consumption\"], \"Litre per hour per passenger\")\n print(\"\\tGround consumption: \", flight[\"ground_consumption\"], \"Litre per hour per passenger\")\n print(\"\\tTake-off consumption: \", flight[\"take_off_consumption\"], \"Litre per hour per passenger\")\n print(\"\\tHead wind consumption: \", flight[\"headwind_consumption\"]*100, \"Litre\")\n print(\"\\tTail wind consumption: \", flight[\"tailwind_consumption\"]*100), \"Litre\"\n print()\n print(\"\\tMaximum passengers: \", flight[\"max_passengers\"])\n print(\"\\tMaximum extra weight percentage: \", flight[\"max_other_weight\"]*100)\n print(\"\\tMaximum travel distance: \", flight[\"max_travel_distance\"], \"km\")\n print(\"\\tMaximum luggage weight: \", flight[\"max_luggage_weight\"], \"kg\")\n print(\"\\tMaximum luggage allowance: \", flight[\"max_luggage\"])\n print(\"\\tMaximum passengers and luggage weight: \", flight[\"max_luggage_and_passenger_weight\"], \"kg\")\n print(\"\\tMaximum fuel capacity: \", flight[\"max_fuel_capacity\"], \"Litre\")\n print(\"\\tMaximum overall weight: \", flight[\"max_total_weight\"], \"kg\")\n print(\"\\tMaximum speed: \", flight[\"max_speed\"], \"km / h\")\n\n# GET HAZZARDS DESCRIPTION FROM HAZZARD CODE\ndef flightHazzardText(hazzard):\n if hazzard == \"deicing\":\n return (\"🚨 The flight went the process of deicing. Flight is delayed and ground fuel is consumed.\")\n elif hazzard == \"engine_startup_problem\":\n return (\"🚨 There were some problems with startin the plane engine. Flight is delayed and ground fuel is consumed.\")\n elif hazzard == \"ventilation_problem\":\n return (\"🚨 Cabin ventilation malfunction. Flight is delayed and ground fuel is consumed.\")\n elif hazzard == \"tail_wind\":\n return (\"🚨 Air coming from the back of the plane. Helps with fuel consumption and take off.\")\n elif hazzard == \"litiguous_on_plane\":\n return (\"🚨 There was a flight onboard the plane when the plane was on land. Flight delayed.\")\n elif hazzard == \"medical\":\n return (\"🚨 Medical emergency on land. Fuel is consumed as the motors are active on ground and delayed.\")\n elif hazzard == \"head_wind\":\n return (\"🚨 Head wind on runway, More power is required for takeoff, resulting in excessive fuel usage.\")\n elif hazzard == \"no_runway_available\":\n return (\"🚨 Could not allocate the runway: needs to take longer distance and delayes are expected\")\n elif hazzard == \"bad_runway\":\n return (\"🚨 Bad runway: more fuel consumed because of the inertia.\")\n elif hazzard == \"thunderstorm_land\":\n return (\"🚨 There was a thunderstorm on land: fuel consumed as the flight is waiting with active motors.\")\n elif hazzard == \"raining_land\":\n return (\"🚨 It is raining on land: fuel consumed as the flight is waiting with active motors.\")\n elif hazzard == \"fight_air\":\n return (\"🚨 SEVERE DISRUPTION! The flight had to make an emergency landing to the previous aiport.Some passengers being fighting. Affecting fuel for return.\")\n elif hazzard == \"medical_emergency\":\n return (\"🚨 SEVERE DISRUPTION! The flight had to make an medical emergency landing to the previous aiport.Someone is feeling really ill. Affecting fuel for return.\")\n elif hazzard == \"bird_strike\":\n return (\"🚨 SEVERE DISRUPTION! The flight had to make an emergency landing to the previous aiport.A number of birds damaged the motor. Affecting fuel for return.\")\n\n elif hazzard == \"engine_malfunctioning\":\n return (\"🚨 SEVERE DISRUPTION! The flight had to make an emergency landing to the previous aiport.The engine stopped working. Affecting fuel for return.\")\n elif hazzard == \"thunderstorm_air\":\n return (\"🚨 SEVERE DISRUPTION! Severe storm. Fligh delayed and additional distace added because of route change.\")\n elif hazzard == \"raining_air\":\n return (\"🚨 SEVERE DISRUPTION! Severe raining. Fligh delayed and additional distace added because of route change.\")\n elif hazzard == \"no_taxiing\":\n return (\"🚨 Problem! No taxing found. Flight is waiting with fuel on ground consumption.\")\n elif hazzard == \"tailed_landing\":\n return (\"🚨 Plane failed to land. Making a U turn to try again. Fuel and distance consumed.\")\n elif hazzard == \"round_turn\":\n return (\"🚨 Runway was occupied, required taking a round turn.\")\n elif hazzard == \"failed_landing\":\n return \"🚨 The flight failed the attempt to land.\"\n\n\n\n\ndef printFlightRouteSmall(route):\n dep = route[\"route\"][0]\n arr = route[\"route\"][1]\n airportInfoDeparture = getAirportInfo(dep)\n airportInfoArrival = getAirportInfo(arr)\n\n\n print()\n print(\"Departure code: \", dep)\n print(\"Departure airport: \", airportInfoDeparture[\"name\"], \"(\" + airportInfoDeparture[\"country\"] + \")\")\n print(\"Arrival code: \", arr)\n print(\"Arrival airport: \", airportInfoArrival[\"name\"], \"(\" + airportInfoArrival[\"country\"] + \")\")\n\ndef printFlightRoute(route, i):\n airportInfoDeparture = getAirportInfo(route[\"route\"][0])\n airportInfoArrival = getAirportInfo(route[\"route\"][1])\n planes = getFlights()\n plane = planes[route[\"plane\"]]\n\n print(\"=\"*33 + \"AIRCRAFT PROVIDER #\" + str(i) + \"=\"*34)\n print(\"| %-20s | %-60s |\" % (\"Company\", route[\"company\"]))\n print(\"| %-20s | %-60s |\" % (\"Plane\", plane[\"name\"]))\n print(\"| %-20s | %-60s |\" % (\"Airport 1\", airportInfoDeparture[\"name\"]))\n print(\"| %-20s | %-60s |\" % (\"Airport 2\", airportInfoArrival[\"name\"]))\n print(\"| %-20s | %-60s |\" % (\"Distance\", str(route[\"distance\"])+\" km\"))\n print(\"| %-20s | %-60s |\" % (\"Duration\", str(route[\"duration\"])+\" hours\"))\n print(\"| %-20s | %-60s |\" % (\"Flight type\", route[\"flight_type\"]))\n print(\"=\"*87)\n print()\n\ndef printFlightRoutes(routes):\n for i in range(len(routes)):\n route = routes[i]\n printFlightRoute(route, i+1)\n\ndef printAirport(airport):\n print(\"=\"*35 + \"AIRPORT\" + \"=\"*35)\n print(\"| %-20s | %-50s |\" % (\"Code\", airport[\"code\"]))\n print(\"| %-20s | %-50s |\" % (\"Name\", airport[\"name\"]))\n print(\"| %-20s | %-50s |\" % (\"city\", airport[\"city\"]))\n print(\"| %-20s | %-50s |\" % (\"Country\", airport[\"country\"]))\n print(\"=\"*77)\n print()\n\ndef printAirportWithCode(code):\n airport = getAirportInfo(code)\n printAirport(airport)\n\n## ============================== MAIN ==============================\n\ndef flightPrompts():\n while True:\n try:\n\n flightType = input(\"? Enter 'international' for international flights or 'domestic': \")\n if ((flightType).lower() != \"international\" and (flightType).lower() != \"domestic\"):\n raise Exception(\"Invalid choice: 'international' or 'domestic' possible only.\")\n print(\"Flight type: \", flightType)\n\n #TODO: show planes\n ## show flight options\n \n routes = 0\n local_airports = {}\n if (flightType == \"international\"):\n '''\n for route in internationalFlights:\n printFlightRouteSmall(route)\n '''\n \n routes = internationalFlights\n local_airports = airports\n else:\n '''\n for route in domesticFlights:\n printFlightRouteSmall(route)\n '''\n routes = domesticFlights\n local_airports = getDomesticAirports()\n\n print()\n \n # showind the list of airports to depart from\n for airport in local_airports:\n \n printAirport(airport)\n ## Get the departure airport code\n departureAirport = getAirportCode(local_airports, \"departure\")\n\n # showind the list of flights departing from the departure airport\n print()\n print(\"Here is the list of flights from that airport: \")\n arrivalAirports = (getFlightsFromAirport(routes, departureAirport))\n for arrivalCode in arrivalAirports:\n printAirportWithCode(arrivalCode)\n\n\n ## getting the arrival airport input\n arrivalAirport = getAirportCode(airports, \"arrival\")\n ## checking if the arrival airport exists\n\n '''\n departureAirport = input(\"? Enter the departure airport code: \")\n arrivalAirport = input(\"? Enter the arrival airport code: \")\n '''\n\n ## checking if a valid route can be enstablished. Meaning that the user has inputted correctly.\n validFlights = findFlight(routes, departureAirport, arrivalAirport)\n if (len(validFlights) == 0):\n raise Exception(\"No flight found. try again.\")\n\n ## Selecting the airline\n selection = 0\n print()\n print(\"HERE ARE THE FLIGHTS THAT WE FOUND\")\n printFlightRoutes(validFlights)\n if (len(validFlights) == 1):\n pass\n ## continue loop\n else:\n selection = int(input(\"? Multiple airlines found. Select one (between 1 and \" + str(len(validFlights)) + \"): \"))\n if (selection > len(validFlights) or selection < 1):\n raise Exception(\"Invalid value.\")\n \n route = validFlights[selection-1]\n if not route:\n print(\"An error occured. Could not enstablish a route\")\n\n ## returning the route for simulation\n return route\n except Exception as err:\n print(err)\n print(\"Restarting the app...\")\n \n# MAIN FLIGHT SIMULATION THAT REQUIRED 3 PARAMERS\n## FLIGHT_NAME = FLEET CODE SUCH AS b777 FOR A BOEING 777\n## DESTINATION_DISTANCE = HOW FAR IS THE DESTINATION\n## FLIGHT_DURATION = HOW LONG WILL THE FLIGHT TAKE TO REACH THE DESTINATION\ndef flightSimulation(flight_name, destination_distance, flight_duration):\n\n plane = flights[flight_name]\n\n ## TODO: check for input validation\n\n #input passengers\n passengers = getPassengers(plane[\"max_passengers\"])\n print(\"Passengers: \", passengers)\n print()\n\n #input luggage\n luggage = getPositiveInt(\"? Enter the number of luggage: \", \"The onboard luggage value is invalid.\")\n print(\"Total luggage: \", str(luggage))\n print()\n\n #input average mass luggage\n average_luggage_mass=getAverageLuggage(plane[\"max_luggage_weight\"])\n print(\"Average luggage weight: \", str(average_luggage_mass))\n print()\n\n #input extra luggage\n onboard_luggage = getPositiveInt(\"? Enter the number of onboard luggage: \", \"The onboard luggage value is invalid.\")\n print(\"Onboard luggage: \", str(onboard_luggage))\n print()\n\n #input extra cargo\n extra_cargo_weight = getPositiveInt(\"? Enter the cargo weight(kg): \", \"Invalid cargo weight\")\n print(\"Cargo weight: \", str(extra_cargo_weight))\n print()\n\n # female ration\n female_male_ratio = getRatio()\n print(\"Female ratio: \", str(female_male_ratio))\n \n male = 1 - female_male_ratio\n print(\"Male ratio: \", str(male))\n print()\n\n total_passenger_luggage_weight = male * passengers * 88 + female_male_ratio * passengers * 67 + luggage * average_luggage_mass + onboard_luggage * 8\n gas_required_per_hour = (passengers + luggage/3+extra_cargo_weight/75+onboard_luggage/8) * plane[\"average_consumption\"] + plane[\"empty_flight_fuel\"]\n\n ## CHECKING \n ## Choose another flight if the current one cannot satisfy the need.\n print(\"Check #1: Total luggage and passenger weight:\", total_passenger_luggage_weight, \"kg\")\n if (total_passenger_luggage_weight >= plane[\"max_luggage_and_passenger_weight\"]):\n print(\"❌ Excessive passengers and/or luggage. There is no enought space in the plane. Maximum possible:\", plane[\"max_luggage_and_passenger_weight\"], \"kg\")\n\n possibleFlights = findAnotherFlight(\"max_luggage_and_passenger_weight\", total_passenger_luggage_weight)\n if (len(possibleFlights) == 0):\n print(\"There is no flight that can satisfy the weight. We are sorry. This flight was the biggest available.\")\n return\n print()\n print(\"The flight options are: \")\n for flight in possibleFlights:\n printFlight(flights[flight])\n \n print()\n code = input(\"Please enter the flight code: \")\n new_plane = flights[code]\n if not new_plane:\n print(\"Invalid code. Terminating program.\")\n return\n \n plane = new_plane\n\n print()\n print(\"New aircraft selected: \")\n printFlight(plane)\n print()\n \n \n ## CHECKING IF TOTOAL WEIGHT CAN BE CARRIED\n print(\"Result #1: Flight possible.\")\n print()\n print(\"Check #2: Total overall weight:\", total_passenger_luggage_weight+extra_cargo_weight, \"kg\")\n\n if (total_passenger_luggage_weight+extra_cargo_weight >= plane[\"max_other_weight\"] * plane[\"max_total_weight\"]):\n print(\"❌ Flight is overweighted. The flight cannot carry this much weight\")\n\n possibleFlights = findAnotherFlight(\"max_other_weight\", total_passenger_luggage_weight+extra_cargo_weight)\n if (len(possibleFlights) == 0):\n print(\"There is no flight that can satisfy the weight. We are sorry.\")\n return\n print()\n print(\"The flight options are: \")\n for flight in possibleFlights:\n printFlight(flights[flight])\n \n print()\n code = input(\"Please enter the flight code: \")\n new_plane = flights[code]\n if not new_plane:\n print(\"Invalid code. Terminating program.\")\n return\n \n plane = new_plane\n\n print()\n print(\"New aircraft selected: \")\n printFlight(plane)\n print()\n\n \n ## CHECK IF THE FUEL CONSUMPTION IS POSSIBLE GIVEN THE TOTAL FUEL\n print(\"Result #2: Flight possible.\")\n print()\n print(\"Check #3: Fuel consumption\", gas_required_per_hour * flight_duration, \"Litre\")\n if (gas_required_per_hour * flight_duration >= plane[\"max_fuel_capacity\"]):\n print(\"❌Insufficient fuel capacity. The flight cannot make to the destination. Required: \", gas_required_per_hour * flight_duration)\n\n possibleFlights = findAnotherFlight(\"max_fuel_capacity\", gas_required_per_hour * flight_duration)\n if (len(possibleFlights) == 0):\n print(\"There is no flight that can satisfy the fuel. We are sorry.\")\n return\n print()\n print(\"The flight options are: \")\n for flight in possibleFlights:\n printFlight(flights[flight])\n \n print()\n code = input(\"Please enter the flight code: \")\n new_plane = flights[code]\n if not new_plane:\n print(\"Invalid code. Terminating program.\")\n return\n \n plane = new_plane\n\n print()\n print(\"New aircraft selected: \")\n printFlight(plane)\n print()\n\n \n print(\"Result #3: Flight possible.\")\n total_passenger_luggage_weight += extra_cargo_weight\n\n print()\n # departure time\n departureTime = getTime()\n print(\"Departure time: \", str(departureTime))\n print()\n\n # input weather\n weatherConditions = [\"snow\", \"rain\", \"fog\", \"sun\"]\n weather = getInputWithComparisonList(\"? Enter the weather condition \" + str(weatherConditions) + \": \", weatherConditions, \"sun\") ## or raining, sun, fog\n print(\"Weather condition: \", weather)\n print()\n\n\n #input wind direction\n windConditions = [\"head_wind\", \"tail_wind\"]\n wind_direction = getWindDirection()# or tail\n print(\"Wind direction: \", wind_direction)\n print()\n\n # DISPLAYING INITIAL CALULATIONS\n print(\"=\"*37 + \"INITIAL CALCULATION\" + \"=\"*37)\n print(\"| %-40s | %20s %-8s |\" % (\"Total weight\", round(total_passenger_luggage_weight, 2), \"kg\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Gas required per hour\", round(gas_required_per_hour, 2), \"Litre/hour\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total gas for travel\", round(gas_required_per_hour * flight_duration, 2), \"Litre\"))\n print(\"=\"*86)\n\n\n\n ## DEFINING THE LIST OF HAZZARDS: EACH TYPE OF ROUTE HAS ITS OWN HAZZARDS\n hazzards_on_land_snow = [\"deicing\", \"engine_startup_problem\", \"ventilation_problem\", \"tail_wind\"]\n #[\"snow\", \"attack\", \"bad_passenger\", \"medical\", \"engine_problem\", \"air_conditioning_problem\", \"late_passenger\", \"no_runway\", \"deicing\"]\n\n hazzards_on_land = [\"litiguous_on_plane\", \"medical\", \"thunderstorm_land\", \"raining_land\"]\n\n hazzards_on_runway = [\"no_runway_available\", \"bad_runway\"]\n\n hazzards_in_air = [\"fight_air\", \"medical_emergency\", \"bird_strike\", \"engine_malfunctioning\", \"thunderstorm_air\", \"raining_air\"]\n\n hazzards_on_landing = [\"no_taxiing\", \"bad_runway\"]\n hazzards_on_before_landing = [\"failed_landing\", \"round_turn\"]\n\n ## run every 10km\n ## after 200km, skip by 50km because in air\n\n total_fuel_hazzards = 0\n total_distance_hazzards = 0\n distance_travelled = 0\n air_hazzards = 0\n runway_hazzards = 0\n total_hazzards = []\n\n ## HANDING IMPACT OF EACH HAZZARD. \n ## IT CAN IMPACT ON THE FUEL, DISTANCE, WEIGHT OR DELAY\n def calculateImpactOfHazzard(hazzard, distance_travelled):\n additional_fuel = 0\n additional_distance = 0\n additional_weight = 0\n flight_delay = 0\n print(flightHazzardText(hazzard))\n\n \n ## delay, additiona fuel\n if hazzard == \"deicing\" or hazzard == \"ventilation_problem\" or hazzard == \"engine_startup_problem\":\n delay = randomDouble(0.3, 1.7)\n flight_delay+=delay\n additional_fuel+=(plane[\"taxing_fuel_usage\"] * delay)\n \n # dealy and additional fuel\n elif hazzard == \"medical\":\n delay = randomDouble(0.3, 2.5)\n flight_delay+=delay\n additional_fuel+=plane[\"taxing_fuel_usage\"]\n \n # additional fuel and distance increased\n elif hazzard == \"head_wind\":\n additional_distance+=20\n additional_fuel+=1.1*gas_required_per_hour*0.50 # For 30 minutes from takeoff to stability, there is a consumption higer than 10% of average fuel\n \n elif hazzard == \"tail_wind\":\n additional_fuel-=1.1*gas_required_per_hour*0.50 # For 30 minutes from takeoff to stability, there is a consumption higer than 10% of average fuel\n # could not allocate the runway: needs to take longer distance and delayes are expected\n \n elif hazzard == \"no_runway_available\":\n additional_distance = 10\n additional_fuel+=0.05 * gas_required_per_hour\n flight_delay = randomDouble(0.1, 0.9); # from 0.1 hour to 0.9 hours\n \n elif hazzard == \"litiguous_on_plane\":\n ## flight gets delayed\n flight_delay+=randomDouble(0.1, 0.4)\n \n elif hazzard == \"fight_air\":\n # there was a fight when the flight was in air\n ## consequence: emergency descending to the previous airport, requiring more fuel\n ## we can consider that the flight returns to the original airport and does not descend to the near airport, for simplicity\n after = randomDouble(0.05, 0.2) # in between 5% and 20% of total distance\n additional_distance+=distance_travelled*2\n flight_delay+=flight_duration*after*2\n \n elif hazzard == \"medical_emergency\":\n\n ## TODO: insert petrol \n after = randomDouble(0.05, 0.4) # in between 5% and 20% of total distance\n additional_distance+=distance_travelled*2\n flight_delay+=flight_duration*after*2\n \n elif hazzard == \"bird_strike\":\n after = randomDouble(0.1, 0.2) # in between 5% and 20% of total distance\n additional_distance+=distance_travelled*2\n flight_delay+=flight_duration*after*2\n \n elif hazzard == \"engine_malfunctioning\":\n after = randomDouble(0.1, 0.35) # in between 5% and 20% of total distance\n additional_distance+=distance_travelled*2\n flight_delay+=flight_duration*after*2\n \n elif hazzard == \"failed_landing\":\n # descending: consuming less gas\n additional_fuel-=0.8*gas_required_per_hour*0.2 #\n # ascending again\n additional_fuel+=1.2*gas_required_per_hour*0.15\n additional_distance+=200\n \n # NO TAXING FOUND: FUEL FOR WAITING AND MORE DELAY FOR PASSENGERS\n elif hazzard == \"no_taxiing\":\n delay = randomDouble(0.3, 1.5)\n flight_delay+=delay\n additional_fuel+=plane[\"taxing_fuel_usage\"]\n \n # THUNDERSTORM: CHANGE OF DIRECTION: MORE DISTANCE AND DELAY\n elif hazzard == \"thunderstorm_air\" or hazzard == \"raining_air\":\n randDistance = randomDouble(120, 500)\n flight_delay+=randDistance*plane[\"max_speed\"] # finding delay time from distance and max speed in air\n additional_distance+=randDistance\n \n ## THUNDERSTORM ON LAND: DELAY, FUEL CONSUPTION HAND HEAVY WEIGHT WHEN LIFTING OFF\n elif hazzard == \"thunderstorm_land\" or hazzard == \"raining_land\":\n headwindHazzard = calculateImpactOfHazzard(\"head_wind\", distance_travelled)\n additional_distance+=headwindHazzard[\"distance\"]\n additional_fuel+=headwindHazzard[\"fuel\"]\n additional_weight+=headwindHazzard[\"weight\"]\n flight_delay+=headwindHazzard[\"delay\"]\n additional_fuel+=100\n \n ## BAD RUNWAY: MORE 15% CONSUMPTION \n elif hazzard == \"bad_runway\":\n additional_fuel+=0.2*gas_required_per_hour*0.15\n \n ## ROUND TURN : FAILED LANDING ATTEMPT: MORE DISTANCE TO COVER\n elif hazzard == \"round_turn\":\n randDistance = randomDouble(50, 150)\n additional_distance+=randDistance\n \n else:\n print(\"Hazzard not recognized in the system.\")\n\n print()\n printImpact(hazzard, additional_fuel, additional_distance)\n\n ## RETURNING THE CALCULATION RESULTS\n result = {\n \"hazzard\": hazzard,\n \"fuel\": additional_fuel,\n \"weight\": additional_weight,\n \"distance\": additional_distance,\n \"delay\": flight_delay\n }\n ## APPENDING TO THE LIST OF HAZZARDS\n total_hazzards.append(result)\n return result\n\n \n\n ## GENERATING A BUNCH OF RANDOM HAZZARDS AND CALCULATE THE HAZZARD\n def generateRandomFactorAndCalculateImpact(hazzards, total):\n randHazzards = random.randint(0, total)\n print(\"Randomizing impact result: \", randHazzards)\n\n ## CARRY TOTAL IMPACT PER SIMULATION\n total_distance = 0\n total_fuel = 0\n total_delay = 0\n total_weight = 0\n\n ## LOOP FOR A RANDOM VALUE BETWEEN O AND TOTAL RANDOM HAZZARDS FOUND\n for i in range(randHazzards):\n if len(hazzards) == 0:\n break\n # OTHER RANDOMIZATION FOR THE PROBABILITY OF HAVING THE HAZZARD\n randFactor = random.randint(0, len(hazzards)-1)\n ## FINDINT THE HAZZARD CODE FROM THE LIST\n hazzard = hazzards[randFactor]\n \n ## REMOVE THE HAZZARD FROM THE LIST: WILL NOT HAPPENA AGAIN\n hazzards.pop(randFactor)\n\n # check for the type of hazzard\n impact = calculateImpactOfHazzard(hazzard, distance_travelled)\n total_distance+=impact[\"distance\"]\n total_fuel+=impact[\"fuel\"]\n total_delay+=impact[\"delay\"]\n total_weight+=impact[\"weight\"]\n\n #print(f\"{impact}\")\n\n ## NOT HAZZARD FOUND\n if (total_fuel == 0 and total_distance == 0):\n print(\"No hazzard impact.\")\n\n ## RETURNING THE IMPACT\n total_impact = {\n \"distance\": total_distance,\n \"fuel\": total_fuel,\n \"delay\": total_delay,\n \"weight\": total_weight\n }\n #print(\"Total impact after generation: \", total_impact)\n return total_impact\n\n\n # impact because of wind direction\n ## INITIAL IMPACT\n impact = calculateImpactOfHazzard(wind_direction, distance_travelled)\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n \n ## LOOP UNTIL DESTINATION IS NOT REACHED\n while(distance_travelled < destination_distance):\n print()\n print()\n ## HAZZARDS ON LAND\n if (distance_travelled < 10):\n ## hazzards on land\n impact = {}\n print(\"_\"*70)\n\n ## GENERATING HAZZARS ON LAND WITH SNOW\n if (weather == \"snow\"):\n print()\n print(\"GENERATING RANDOM HAZZARDS FOR SNOW WEATHER\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_on_land_snow, 3)\n else:\n ## GENERATING HAZZARS ON LAND WITHOUT SNOW\n print()\n print(\"GENERATING RANDOM HAZZARDS FOR LAND\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_on_land, 3)\n \n ## UPDATE FULE AND DISTANCE\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n \n ## TAKEOFF: 10-30KM\n elif (distance_travelled > 10 and distance_travelled < 30):\n ## hazzards on runway\n ## TODO: make the time function\n ## RENDER HAZZARDS ONLY IF THERE IS HIGH PEEK IN FLIGHTS\n if (runway_hazzards < 1 and departureTime[0] >= 4 and departureTime[0] <= 10):\n print(\"_\"*70)\n print()\n print(\"GENERATING RANDOM HAZZARDS FOR TAKE-OFF\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_on_runway, 1)\n\n\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n \n # HAZZARDS IN AIR: 30KM TO 60% OF TOTAL TRAVEL DISTANCE\n elif (distance_travelled > 30 and distance_travelled < 0.6*destination_distance):\n # hazzards in air\n ## HAZZARDS ARE LIMITED TO 2 HAZZARDS IN AIR ONLY\n ## THIS IS BECAUSE AIR HAZZARS HAVE A BIG IMPACT ON FUEL, SUCH AS RETURN TO THE PREVIOUS AIRPORT\n if (air_hazzards < 2):\n print(\"_\"*70)\n print()\n print(\"GENERATING RANDOM HAZZARDS IN AIR\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_in_air, 2)\n\n\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n air_hazzards+=1\n \n ## LANDING HAZZARDS\n elif (distance_travelled>0.9*destination_distance and distance_travelled < 0.95 * destination_distance):\n # on landing\n # peak times between 9:00AM AND 12:00PM OR 14:00 OR 16:00\n print(\"_\"*70)\n print()\n print(\"GENERATING RANDOM HAZZARDS FOR BEFORE LANDING\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_on_before_landing, 2)\n\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n \n\n # taxiing HAZZARS\n elif (distance_travelled>0.95*destination_distance):\n print(\"_\"*70)\n print()\n print(\"GENERATING RANDOM HAZZARDS FOR TAXING AFTER LAND\")\n impact = generateRandomFactorAndCalculateImpact(hazzards_on_landing, 2)\n\n total_fuel_hazzards+=impact[\"fuel\"]\n total_distance_hazzards+=impact[\"distance\"]\n \n\n \n \n print(\"-\"*70)\n print(\"Approximate fuel consumption based on 80-percent average speed:\", round(gas_required_per_hour * (distance_travelled / (plane[\"max_speed\"]*0.8))+total_fuel_hazzards + gas_required_per_hour * (total_distance_hazzards / (plane[\"max_speed\"]*0.8)), 2), \"Litre\")\n print(\"Distance travelled:\", distance_travelled, \"km\")\n print()\n\n ## HOW PRECISE SHOULD THE OUTPUT BE\n ## FOR FIRST 200 KM, SKIP BY 7.5KM PER LOOP\n if (distance_travelled < 200):\n distance_travelled+=7.5\n else:\n ## SKIP BY 20 KM PER LOOP\n distance_travelled+=30\n \n print()\n print()\n\n ## FINAL CALCULATIONS OF THE DISTANCE TO FUEL, OVERALL FUEL AND FUEL WEIGHT\n distance_to_fuel = gas_required_per_hour * (total_distance_hazzards / (plane[\"max_speed\"]*0.8))\n total_overall_consumption = (gas_required_per_hour * (distance_travelled / (plane[\"max_speed\"]*0.8))+total_fuel_hazzards + gas_required_per_hour * (total_distance_hazzards / (plane[\"max_speed\"]*0.8))) ## gas_required_per_hour * flight_duration + distance_to_fuel ## \n fuel_weight = total_overall_consumption * 0.84 # 1L = 0.84 kg\n\n ## DISPLAYING FINAL RESULT IN A TABLE\n print(\"=\"*37 + \"=============\" + \"=\"*26)\n print(\"=\"*37 + \"SUMMARY TABLE\" + \"=\"*26)\n print(\"| %-40s | %20s %-8s |\" % (\"Total duration\", round(flight_duration), \"hours\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total weight\", round(total_passenger_luggage_weight, 2), \"kg\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Gas per hour\", round(gas_required_per_hour, 2), \"Litre\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total fuel for original distance\", round(gas_required_per_hour * flight_duration, 2), \"Litre\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total fuel consumed by hazzards\", round(total_fuel_hazzards, 2), \"Litre\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total distance increase\", round(total_distance_hazzards, 2), \"km\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total fuel because of distance increase\", round(distance_to_fuel, 2), \"Litre\"))\n print(\"| %-40s | %20s %-8s |\" % (\"Total fuel overall\", round(total_overall_consumption, 2), \"Litre\"))\n \n print(\"=\"*75)\n\n print()\n print(\"List of total hazzards that happened in the simulation.\")\n print()\n ## RENDERING THE LIST OF HAZZARS\n for hazzard in total_hazzards:\n \n print(flightHazzardText(hazzard[\"hazzard\"]))\n print(\"Fuel: \" + str(round(hazzard[\"fuel\"])) +\" litre\")\n print(\"Distance: \" + str(round(hazzard[\"distance\"])) + \" km\")\n print()\n\n\n return {\n \"passengers\": passengers,\n \"luggage\": luggage,\n \"onboard_luggage\": onboard_luggage,\n \"extra_cargo\": extra_cargo_weight,\n \"departure_time\": departureTime,\n \"weather\": weather,\n \"wind_direction\": wind_direction,\n \"total_duration\": flight_duration,\n \"total_weight\": total_passenger_luggage_weight,\n \"total_weight_with_fuel\": fuel_weight + total_passenger_luggage_weight,\n\n \"gap_per_hour\": gas_required_per_hour,\n \"total_fuel_without_hazzards\": gas_required_per_hour * flight_duration,\n \"total_fuel_for_hazzards\": total_fuel_hazzards,\n \"total_distance_increase\": total_distance_hazzards, # this represents hazzards that required fuel directly\n \"total_fuel_by_distance_hazzards_increase\": distance_to_fuel, # this calculates the fuel from the hazzards that added more distance. \n \"total_overall_fuel\": total_overall_consumption,\n \"total_fuel_weight\": fuel_weight\n }\n\n\n\n\n\n\n\n\n\n\n\n\n\ntypeOfUsage = input(\"? Enter 'car' for car gas millage simulation or 'plane' for flight simulation: \")\nif typeOfUsage != \"car\" and typeOfUsage != \"plane\":\n print(\"Invalid choice: 'car' or 'plane' only.\")\nprint()\n\nif typeOfUsage == \"plane\":\n route = flightPrompts()\n print(\"\\n\\n\")\n begin = input(\"Type 'start' to begin with the simulation: \")\n if begin.lower() == 'start':\n print(\"BEGINNING FLIGHT SIMULATION\")\n print()\n\n flight = getFlights()[route[\"plane\"]]\n printFlight(flight)\n print()\n \n flightSimulation(route[\"plane\"], route[\"distance\"], route[\"duration\"])\nelif typeOfUsage == \"car\":\n results = carPrompts()\n if results:\n print()\n fill = input(\"Would you like to compute the price of the fuel (yes or no)?: \")\n if (fill == \"yes\"):\n if (results[\"car\"][\"premium\"]):\n print(\"Your car requires premium fuel.\")\n prices = findGasStations(4, results[\"car\"][\"premium\"])\n printGasStations(prices[0], prices[1][\"name\"])\n\n selectionId = int(input(f\"Enter the selection ID (1 to {(4)}): \"))\n station = prices[0][selectionId-1]\n\n print()\n print(\"You selected:\", station[\"name\"])\n if results[\"car\"][\"premium\"]:\n print(\"You selected:\", station[\"name\"])\n print(\"Gas price:\", station[\"premium_cost\"], \"CA$ / L\")\n print(\"Your total cost is: \", round(station[\"premium_cost\"] * results[\"total_consumption\"], 2), \"CA$\")\n else:\n print(\"Gas price:\", station[\"petrol_cost\"], \"CA$ / L\")\n print(\"Your total cost is: \", round(station[\"petrol_cost\"] * results[\"total_consumption\"], 2), \"CA$\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shardaishwak/fuel-consumption-simulator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":84807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10168851077","text":"from flask import Flask, request, render_template, redirect, flash\r\nfrom flask_debugtoolbar import DebugToolbarExtension\r\nfrom random import randint\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = \"thisisthekey\"\r\n#app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\r\ndebug = DebugToolbarExtension(app)\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n \"\"\"Home page in root route demo\"\"\"\r\n html = \"\"\"\r\n \r\n \r\n

Home Page

\r\n
Hi page\r\n
\r\n Comment!\r\n \r\n \r\n \"\"\"\r\n return html\r\n\r\n\r\n@app.route('/hello')\r\ndef say_hello():\r\n \"\"\"Return simple 'hello' greeting in flask basics demo\"\"\"\r\n html = \"\"\"\r\n \r\n \r\n

Hello

\r\n \r\n \r\n \"\"\"\r\n return html\r\n\r\n\r\n@app.route('/querystringtest')\r\ndef query_string_test():\r\n \"\"\"Parses query string into more legible format in query string demo\"\"\"\r\n response = ''\r\n for item in request.args:\r\n response+= f\"

{item}:{request.args[item]}

\"\r\n \r\n return response\r\n \r\n\r\n@app.route('/postrequestpage',methods=[\"POST\"])\r\ndef post_request_page():\r\n \"\"\"Makes a post request in POST request demo\"\"\"\r\n\r\n return 'this was a POST request'\r\n\r\n\r\n@app.route('/addcomment')\r\ndef add_comment_form():\r\n \"\"\"Serves a comment form to be POSTed in form submission demo\"\"\"\r\n\r\n html = \"\"\"\r\n \r\n \r\n
\r\n \r\n \r\n
\r\n \r\n \r\n \"\"\"\r\n return html\r\n\r\n\r\n@app.route('/addcomment',methods=[\"POST\"])\r\ndef post_comment():\r\n \"\"\"Posts comment to page in form submission demo\"\"\"\r\n\r\n comment = request.form[\"comment\"]\r\n return f\"You commented: \\n {comment}\"\r\n\r\n\r\n@app.route('/comments/')\r\ndef show_comment(id):\r\n \"\"\"Shows comment number in path variable demo\"\"\"\r\n #int in route path is optional, specifies that path variable is int\r\n\r\n html = f\"\"\"\r\n \r\n \r\n

You've reached comment number {id}

\r\n \r\n \r\n \"\"\"\r\n return html\r\n\r\n\r\n#let's pretend there's a function here to showcase the incredible difficulty of using multiple path variables in one route path\r\n\r\n\r\n@app.route('/templates/hello')\r\ndef jinja_hello():\r\n \"\"\"Demo of Jinja to render static template of hello.html\"\"\"\r\n\r\n return render_template('hello.html')\r\n\r\n\r\n@app.route('/lucky')\r\ndef lucky_page():\r\n \"\"\"Demo of dynamic HTML template using Jinja by making a page that displays a random lucky number\"\"\"\r\n\r\n lucky_num = randint(1,20)\r\n #inside lucky.html, the variable is named lucky_var and we can use it inside {{ }}\r\n return render_template('lucky.html',lucky_num = lucky_num)\r\n\r\n\r\n@app.route('/base')\r\ndef base_temp():\r\n \"\"\"Demo of template inheritance, base template\"\"\"\r\n\r\n return render_template('base.html')\r\n\r\n\r\n@app.route('/child')\r\ndef child_temp():\r\n \"\"\"Demo of template inheritance, child template\"\"\"\r\n\r\n return render_template('child.html')\r\n\r\n\r\n@app.route('/funform')\r\ndef funform_view():\r\n \"\"\"Returns simple name submission form\"\"\"\r\n\r\n return render_template('funform.html')\r\n\r\n\r\nname = 'ravi'\r\n@app.route('/funform', methods=[\"POST\"])\r\ndef funform_post():\r\n \"\"\"Saves name to global variable\"\"\"\r\n name = request.form.get(\"name\",default=\"kiran\")\r\n return redirect('/funformdone')\r\n\r\n\r\n@app.route('/funformdone')\r\ndef funformdone_view():\r\n return name","repo_name":"RaviP1494/PythonFlaskUnit","sub_path":"first-flask-server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39540382635","text":"import requests\nimport re\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom cloudbot import hook\n\nsearch_pages = defaultdict(list)\n\nuser_url = \"http://reddit.com/user/{}/\"\nsubreddit_url = \"http://reddit.com/r/{}/\"\n# This agent should be unique for your cloudbot instance\nagent = {\"User-Agent\":\"gonzobot a cloudbot (IRCbot) implementation for snoonet.org by /u/bloodygonzo\"}\n\n\ndef two_lines(bigstring, chan):\n \"\"\"Receives a string with new lines. Groups the string into a list of strings with up to 2 new lines per string element. Returns first string element then stores the remaining list in search_pages.\"\"\"\n global search_pages\n temp = bigstring.split('\\n')\n for i in range(0, len(temp), 2):\n search_pages[chan].append('\\n'.join(temp[i:i+2]))\n search_pages[chan+\"index\"] = 0\n return search_pages[chan][0]\n\n\ndef smart_truncate(content, length=355, suffix='...\\n'):\n if len(content) <= length:\n return content\n else:\n return content[:length].rsplit(' \\u2022 ', 1)[0]+ suffix + content[:length].rsplit(' \\u2022 ', 1)[1] + smart_truncate(content[length:])\n\ndef statuscheck(status, item):\n \"\"\"since we are doing this a lot might as well return something more meaningful\"\"\"\n out = \"\"\n if status == 404:\n out = \"It appears {} does not exist.\".format(item)\n elif status == 403:\n out = \"Sorry {} is set to private and I cannot access it.\".format(item)\n elif status == 429:\n out = \"Reddit appears to be rate-limiting me. Please try again in a few minutes.\"\n elif status == 503:\n out = \"Reddit is having problems, it would be best to check back later.\"\n else:\n out = \"Reddit returned an error, response: {}\".format(status)\n return out\n\n@hook.command(\"moremod\", autohelp=False)\ndef moremod(text, chan):\n \"\"\"if a sub or mod list has lots of results the results are pagintated. If the most recent search is paginated the pages are stored for retreival. If no argument is given the next page will be returned else a page number can be specified.\"\"\"\n if not search_pages[chan]:\n return \"There are modlist pages to show.\"\n if text:\n index = \"\"\n try:\n index = int(text)\n except:\n return \"Please specify an integer value.\"\n if abs(int(index)) > len(search_pages[chan]) or index == 0:\n return \"please specify a valid page number between 1 and {}.\".format(len(search_pages[chan]))\n else:\n return \"{}(page {}/{})\".format(search_pages[chan][index-1], index, len(search_pages[chan]))\n else:\n search_pages[chan+\"index\"] += 1\n if search_pages[chan+\"index\"] < len(search_pages[chan]):\n return \"{}(page {}/{})\".format(search_pages[chan][search_pages[chan+\"index\"]], search_pages[chan+\"index\"] + 1, len(search_pages[chan]))\n else:\n return \"All pages have been shown.\"\n\n\n@hook.command(\"subs\", \"moderates\", singlethreaded=True)\ndef moderates(text, chan):\n \"\"\"This plugin prints the list of subreddits a user moderates listed in a reddit users profile. Private subreddits will not be listed.\"\"\"\n #This command was written using concepts from FurCode http://github.com/FurCode.\n global search_pages\n search_pages[chan] = []\n search_pages[chan+\"index\"] = 0\n user = text\n r = requests.get(user_url.format(user), headers=agent)\n if r.status_code != 200:\n return statuscheck(r.status_code, user)\n soup = BeautifulSoup(r.text)\n try:\n modlist = soup.find('ul', id=\"side-mod-list\").text\n except:\n return \"{} does not moderate any public subreddits.\".format(user)\n modlist = modlist.split('/r/')\n del modlist[0]\n out = \"\\x02{}\\x02 moderates these public subreddits: \".format(user)\n for sub in modlist:\n out += \"{} \\u2022 \".format(sub)\n out = out[:-2]\n out = smart_truncate(out)\n if len(out.split('\\n')) > 2:\n out = two_lines(out, chan)\n return \"{}(page {}/{}) .moremod\".format(out, search_pages[chan+\"index\"] + 1 , len(search_pages[chan]))\n return out\n\n\n@hook.command(\"karma\", \"ruser\", singlethreaded=True)\ndef karma(text):\n \"\"\"karma will return the information about the specified reddit username\"\"\"\n user = text\n url = user_url + \"about.json\"\n r = requests.get(url.format(user), headers=agent)\n if r.status_code != 200:\n return statuscheck(r.status_code, user)\n data = r.json()\n out = \"\\x02{}\\x02 \".format(user)\n out += \"\\x02{:,}\\x02 link karma and \".format(data['data']['link_karma'])\n out += \"\\x02{:,}\\x02 comment karma, \".format(data['data']['comment_karma'])\n if data['data']['is_gold']:\n out += \"has reddit gold, \"\n if data['data']['is_mod']:\n out += \"is a moderator, \"\n if data['data']['has_verified_email']:\n out += \"email has been verified, \"\n account_age = datetime.now() - datetime.fromtimestamp(data['data']['created'])\n if account_age.days > 365:\n age = int(account_age.days / 365)\n out += \"and has been a redditor for {} years.\".format(age)\n else:\n out += \"and has been a redditor for {} days.\".format(account_age.days)\n return out\n\ndef time_format(numdays):\n age = ()\n if numdays >= 365:\n age = (int(numdays / 365), \"y\")\n if age[0] > 1:\n age = (age[0], \"y\")\n else:\n age = (numdays, \"d\")\n return age\n\n@hook.command(\"submods\", \"mods\", \"rmods\", singlethreaded=True)\ndef submods(text, chan):\n \"\"\"submods prints the moderators of the specified subreddit. Do not include /r/ when specifying a subreddit.\"\"\"\n global search_pages\n search_pages[chan] = []\n search_pages[chan+\"index\"] = 0\n sub = text\n url = subreddit_url + \"about/moderators.json\"\n r = requests.get(url.format(sub), headers=agent)\n if r.status_code != 200:\n return statuscheck(r.status_code, 'r/'+sub)\n data = r.json()\n out = \"r/\\x02{}\\x02 mods: \".format(sub)\n for mod in data['data']['children']:\n username = mod['name']\n # Showing the modtime makes the message too long for larger subs\n # if you want to show this information add modtime.days to out below\n modtime = datetime.now() - datetime.fromtimestamp(mod['date'])\n modtime = time_format(modtime.days)\n out += \"{} ({}{}) \\u2022 \".format(username, modtime[0], modtime[1])\n out = smart_truncate(out)\n out = out[:-3]\n if len(out.split('\\n')) > 2:\n out = two_lines(out, chan)\n return \"{}(page {}/{}) .moremod\".format(out, search_pages[chan+\"index\"] + 1 , len(search_pages[chan]))\n return out\n\n@hook.command(\"subinfo\",\"subreddit\", \"sub\", \"rinfo\", singlethreaded=True)\ndef subinfo(text):\n \"\"\"subinfo fetches information about the specified subreddit. Do not include /r/ when specifying a subreddit.\"\"\"\n sub = text\n url = subreddit_url + \"about.json\"\n r = requests.get(url.format(sub), headers=agent)\n if r.status_code != 200:\n return statuscheck(r.status_code, 'r/'+sub)\n data = r.json()\n if data['kind'] == \"Listing\":\n return \"It appears r/{} does not exist.\".format(sub)\n name = data['data']['display_name']\n title = data['data']['title']\n nsfw = data['data']['over18']\n subscribers = data['data']['subscribers']\n active = data['data']['accounts_active']\n sub_age = datetime.now() - datetime.fromtimestamp(data['data']['created'])\n age = ()\n if sub_age.days >= 365:\n age = (int(sub_age.days / 365), \"y\")\n else:\n age = (sub_age.days, \"d\")\n out = \"r/\\x03{}\\x02 - {} - a community for {}{}, there are {:,} subscribers and {:,} people online now.\".format(name, title, age[0], age[1], subscribers, active)\n if nsfw:\n out += \" \\x0304NSFW\\x0304\"\n return out\n","repo_name":"CrushAndRun/Cloudbot-Tarball","sub_path":"plugins/reddit_info.py","file_name":"reddit_info.py","file_ext":"py","file_size_in_byte":7813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"20647469102","text":"#User function Template for python3\n\nclass Solution:\n def getPairsCount(self, arr, n, k):\n # code here\n d={}\n ans=0\n for i in range(len(arr)):\n if k-arr[i] in d:\n ans+=d[k-arr[i]]\n if arr[i] in d:\n d[arr[i]]+=1\n else:\n d[arr[i]]=1\n return ans\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n tc = int(input())\n while tc > 0:\n n, k = list(map(int, input().strip().split()))\n arr = list(map(int, input().strip().split()))\n ob = Solution()\n ans = ob.getPairsCount(arr, n, k)\n print(ans)\n tc -= 1\n\n# } Driver Code Ends","repo_name":"sumanthboorla/DSA-solving","sub_path":"Count pairs with given sum - GFG/count-pairs-with-given-sum.py","file_name":"count-pairs-with-given-sum.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70414610888","text":"## Rasa \nfrom typing import Any, Text, Dict, List\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.events import (\n UserUtteranceReverted,\n FollowupAction,\n AllSlotsReset,\n Restarted,\n SlotSet,\n EventType,\n LoopInterrupted,\n ActionExecutionRejected\n)\nimport threading\nimport asyncio\n## Telethon Client\nfrom actions.telegram_members.telethonhandler import TelethonHandler\nfrom actions.users.userhandler import UserHandler\nfrom actions.users.user import User\nfrom actions.groups.grouphandler import GroupHandler\nfrom actions.common.common import get_credentials, setup_logging\nimport logging\nlogger = setup_logging()\n\nclass ActionGetChannelMembers(Action):\n\n def name(self) -> Text:\n return \"action_get_channel_members\"\n\n async def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n try:\n is_user = False\n team_mates = None\n if not tracker.get_slot('my_group'): \n user_handler = UserHandler() \n telethon_handler = TelethonHandler()\n team_mates, is_user = await telethon_handler.get_users(tracker.sender_id) \n print(\"\\033[94mTEAM_MATES:\\033[0m \\n%s\" %team_mates)\n if '_id' in team_mates:\n team_mates['_id'] = str(team_mates['_id'])\n return[SlotSet('is_user', is_user), SlotSet('my_group',team_mates), FollowupAction(\"action_start\")]\n else:\n print(\"\\033[94mdont call get members again\\033[0m\")\n except Exception as e:\n logger.exception(e)\n return[SlotSet('is_user', is_user), SlotSet('my_group',team_mates), FollowupAction(\"action_start\")]\n","repo_name":"tubspaulkeller/PCA-Ben","sub_path":"actions/telegram_members/get_channel_members.py","file_name":"get_channel_members.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25400994908","text":"import logging\nimport os\n\nfrom intents import detect_intent_texts\nfrom dotenv import load_dotenv\nfrom log_handler import TelegramBotHandler\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler,\n Filters, CallbackContext)\n\n\nlogger = logging.getLogger('intents')\n\n\ndef error_handler(update, context):\n logger.exception(context.error)\n\n\ndef start(update, context):\n update.message.reply_text('Здравствуйте')\n\n\ndef reply(update, context):\n user_text = update.message.text\n answer, is_fallback = detect_intent_texts(\n context.bot_data['project_id'],\n context.bot_data['token'],\n user_text,\n language_code='ru',\n )\n update.message.reply_text(answer)\n\n\ndef main():\n load_dotenv()\n\n tg_token = os.getenv('TG_BOT_TOKEN')\n logbot_token = os.getenv('TG_LOG_BOT_TOKEN')\n chat_id = os.getenv('TG_CHAT_ID')\n\n logger.setLevel(logging.WARNING)\n logger.addHandler(TelegramBotHandler(logbot_token, chat_id))\n\n updater = Updater(tg_token)\n dispatcher = updater.dispatcher\n context = CallbackContext(dispatcher)\n\n context.bot_data['project_id'] = os.getenv('GOOGLE_CLOUD_PROJECT_ID')\n context.bot_data['token'] = os.getenv('TG_BOT_TOKEN')\n\n dispatcher.add_error_handler(error_handler)\n dispatcher.add_handler(CommandHandler('start', start))\n dispatcher.add_handler(MessageHandler(Filters.text, reply))\n\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"multipassport/flowbot","sub_path":"tgbot.py","file_name":"tgbot.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9144940243","text":"import warnings\nfrom dependency_management.requirements.PackageRequirement import (\n PackageRequirement)\n\n\nclass AnyOneOfRequirements(PackageRequirement):\n \"\"\"\n This class is a subclass of ``PackageRequirement``. It determines which\n requirements can be used to resolve the dependency.\n \"\"\"\n\n def __init__(self, requirements: list):\n \"\"\"\n Constructs a new ``AnyOneOfRequirements``.\n\n Requirements are ordered by priority.\n\n >>> from dependency_management.requirements.ExecutableRequirement \\\\\n ... import ExecutableRequirement\n >>> aor = AnyOneOfRequirements([ExecutableRequirement(\"python\"),\n ... ExecutableRequirement(\"python3\")])\n >>> str(aor)\n 'ExecutableRequirement(python) ExecutableRequirement(python3)'\n \"\"\"\n self.requirements = requirements\n self._packages_str = \" \".join(sorted(\n [\"%s(%s)\" %\n (requirement.__class__.__name__,\n str(requirement))\n for requirement in self.requirements]))\n PackageRequirement.__init__(self, \"any-one-of\", self._packages_str)\n\n def is_installed(self):\n \"\"\"\n Check if any one of the requirements are satisfied.\n\n :return: True if any of the requirements are satisfied, false otherwise\n \"\"\"\n for requirement in self.requirements:\n try:\n if requirement.is_installed():\n return True\n except Exception as e:\n message = 'Exception of type {0} occurred : {1!r}\\n'\n warnings.warn(message.format(type(e).__name__, e.args))\n return False\n","repo_name":"yzgyyang/dependency_management","sub_path":"dependency_management/requirements/AnyOneOfRequirements.py","file_name":"AnyOneOfRequirements.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"19277742525","text":"# Name: Eimantas Pusinskas\r\n# Student ID: 120312336\r\n\r\nfrom queues import *\r\nfrom process import *\r\nfrom block import *\r\nfrom random import randint\r\n\r\nclass BuddySystemBST(object):\r\n \t\r\n class node(object):\r\n def __init__ (self, size):\r\n self._block = Block(size)\r\n self._parent = None\r\n self._leftchild = None\r\n self._rightchild = None\r\n\r\n def __str__ (self):\r\n return f\"{self._block.size} KB\"\r\n\r\n def get_block(self):\r\n return self._block\r\n\r\n def set_block(self, block):\r\n self._block = block\r\n \r\n block = property(get_block, set_block)\r\n\r\n\r\n def __init__(self, size):\r\n self._root = self.node(size)\r\n self._block_nodes = self._root\r\n \r\n def get_root(self):\r\n return self._root \r\n\r\n # checks if a node is a leaf node\r\n def is_leaf(self, node):\r\n if node._leftchild == None and node._rightchild == None:\r\n return True\r\n else:\r\n return False\r\n\r\n # returns a list of all leaf nodes in the tree\r\n def get_leaf_nodes(self):\r\n root = self.get_root()\r\n blocks = []\r\n blocks = self._get_leaf_nodes(root, blocks)\r\n return blocks\r\n \r\n def _get_leaf_nodes(self, node, blocks):\r\n if self.is_leaf(node):\r\n blocks.append(node)\r\n else:\r\n if node._leftchild != None:\r\n self._get_leaf_nodes(node._leftchild, blocks)\r\n\r\n if node._rightchild != None:\r\n self._get_leaf_nodes(node._rightchild, blocks)\r\n return blocks \r\n\r\n # splits a node by instantiating to node objects and setting them\r\n # as the original nodes children\r\n def split_node(self, node):\r\n parent_block_size = node.block.size\r\n\r\n left_child = self.node(parent_block_size//2)\r\n right_child = self.node(parent_block_size//2)\r\n\r\n node._leftchild = left_child\r\n node._rightchild = right_child\r\n\r\n left_child._parent = node\r\n right_child._parent = node\r\n\r\n return node\r\n\r\n def print_leaf_nodes(self):\r\n nodes = self.get_leaf_nodes()\r\n output = \"< \"\r\n for node in nodes:\r\n output += f\"({node}:\"\r\n if node.block._allocated == True:\r\n output += \"1) \"\r\n else:\r\n output += \"0) \"\r\n output += \">\"\r\n print(output)\r\n\r\n # returns a list of every node in the tree\r\n def get_all_nodes(self):\r\n nodes = []\r\n root = self.get_root()\r\n nodes = self._get_all_nodes(root, nodes)\r\n return nodes\r\n\r\n def _get_all_nodes(self, node, nodes):\r\n nodes.append(node)\r\n\r\n if node._leftchild != None:\r\n self._get_all_nodes(node._leftchild, nodes)\r\n\r\n if node._rightchild != None:\r\n self._get_all_nodes(node._rightchild, nodes)\r\n return nodes\r\n\r\n def print_all_nodes(self):\r\n nodes = self.get_all_nodes()\r\n output = \"\"\r\n for node in nodes:\r\n output += f\"({node.block.size}, \"\r\n if node.block._allocated == True:\r\n output += \"1) \"\r\n else:\r\n output += \"0) \"\r\n print(output)\r\n\r\n # removes two children nodes of a parent node\r\n def merge_children_nodes(self, node):\r\n node._leftchild.block = None\r\n node._leftchild._parent = None\r\n node._leftchild = None\r\n\r\n node._rightchild.block = None\r\n node._rightchild._parent = None\r\n node._rightchild = None\r\n\r\n # sets a node to being deallocated. in other words the node is now free for memory allocation\r\n def deallocate_node(self, node):\r\n node.block._allocated = False\r\n node.block._process = None \r\n node.block._accessed = 0\r\n \r\n \r\n\r\nclass memory(object):\r\n\r\n def __init__(self):\r\n # in KB, 1 = 1KB, 1024 = 1024KB = 1MB\r\n self._user_space_mem_size = 4096\r\n self._page_size = 4\r\n self._allocation_queue = QueueV0() #this is the queue where processes in need of memory allocation are enqueue\r\n self._buddy_tree = BuddySystemBST(self._user_space_mem_size)\r\n self._bitmap = {0:0}\r\n self._replacement_queue = QueueV0() # this is the queue used for the second chance algorithm\r\n\r\n def request_memory_allocation(self, process):\r\n self._allocation_queue.enqueue(process)\r\n\r\n # processes requesting memory are dequeued on a FIFO basis and allocated memory\r\n def allocate_memory(self):\r\n if self._allocation_queue.length() != 0:\r\n proc = self._allocation_queue.dequeue()\r\n node_found = self._allocate_memory(proc)\r\n self._replacement_queue.enqueue(node_found)\r\n print(f\"process {proc.pid}: requested {proc.memory}KB - allocated {node_found.block.size}KB\")\r\n return node_found\r\n\r\n def _allocate_memory(self, proc):\r\n if self.is_memory_full():\r\n self.replace()\r\n\r\n # the amount of memory that the process is requesting\r\n mem_required = proc.memory\r\n \r\n if mem_required < self._page_size:\r\n mem_required = self._page_size\r\n \r\n # the memory requested by the process is rounded to the nearest power of 2\r\n mem_required -= 1\r\n k = 1 \r\n while k < mem_required:\r\n k *= 2\r\n target_size = k\r\n\r\n # finds a free block that suits the process and allocates the block to that process\r\n block_nodes = self._buddy_tree.get_leaf_nodes()\r\n found = False\r\n node_found = None\r\n for node in block_nodes:\r\n if node.block.size == target_size and node.block._allocated == False:\r\n found = True\r\n node.block.process = proc\r\n node.block._allocated = True\r\n node.block._accessed = 0\r\n self.update_bitmap()\r\n node_found = node\r\n break\r\n \r\n if found == False:\r\n # finds if any free blocks can be split recursively to be allocated to the process\r\n block_node_to_split = None\r\n max_free_node_size = 0\r\n for node in block_nodes:\r\n if node.block.size > max_free_node_size and node.block._allocated == False and node.block.size >= target_size:\r\n max_free_node_size = node.block.size\r\n block_node_to_split = node\r\n break\r\n \r\n # if a node suitable for a split is found, it is split until its size matches the target_size \r\n # and then allocates the left-most split leaf node to the process requesting memory\r\n # otherwise blocks are deallocated recursively unitl the process requesting memory \r\n # can have memory allocated to it \r\n if block_node_to_split != None:\r\n free_block_node = self.split_until_allocated(target_size, block_node_to_split)\r\n free_block_node.block.process = proc\r\n free_block_node.block._allocated = True\r\n free_block_node.block._accessed = 0\r\n self.update_bitmap()\r\n node_found = free_block_node\r\n else:\r\n self.replace()\r\n node_found = self._allocate_memory(proc)\r\n\r\n return node_found\r\n\r\n # split tree nodes recursively until the leaf nodes of that subtree\r\n # match the target size\r\n def split_until_allocated(self, target_size, block_node_to_split):\r\n free_block_node = None\r\n parent_node = self._buddy_tree.split_node(block_node_to_split)\r\n if parent_node._leftchild.block.size == target_size:\r\n free_block_node = parent_node._leftchild\r\n else:\r\n free_block_node = self.split_until_allocated(target_size, parent_node._leftchild)\r\n return free_block_node\r\n\r\n # does a simple check to see if the memory is completely full\r\n def is_memory_full(self):\r\n full = True\r\n for block in self._bitmap:\r\n if self._bitmap[block] == 0:\r\n full = False\r\n break\r\n return full\r\n\r\n # deallocated blocks from memory using the second chance algorithm\r\n def replace(self):\r\n if self._replacement_queue.length() != 0:\r\n node = self._replacement_queue.dequeue() \r\n if node.block._accessed == 1:\r\n node.block._accessed = 0\r\n self._replacement_queue.enqueue(node)\r\n print(f\"process {node.block.process.pid} has been given a second chance\")\r\n else:\r\n print(f\"process {node.block.process.pid} has been deallocated\")\r\n self.remove_block(node)\r\n \r\n self.update_bitmap()\r\n \r\n def update_bitmap(self):\r\n block_nodes = self._buddy_tree.get_leaf_nodes()\r\n self._bitmap = {}\r\n for node in block_nodes:\r\n if node.block._allocated == True:\r\n self._bitmap[block_nodes.index(node)] = 1\r\n else:\r\n self._bitmap[block_nodes.index(node)] = 0\r\n return self._bitmap\r\n\r\n # merges a block and its buddy if both are not allocated\r\n # othewise the block is simply deallocated and the tree stays as it is\r\n def remove_block(self, node):\r\n if self._buddy_tree.is_leaf(node) == True:\r\n if self._buddy_tree.is_leaf(node._parent._rightchild) == True and node._parent._rightchild.block._allocated == False:\r\n node._parent.block._allocated = False\r\n self._buddy_tree.merge_children_nodes(node._parent) \r\n else:\r\n self._buddy_tree.deallocate_node(node)\r\n self.update_bitmap()\r\n\r\n def calculate_fragmentation(self):\r\n nodes = self._buddy_tree.get_leaf_nodes()\r\n internal_memory_consumed = 0\r\n external_memory_consumed = 0\r\n for node in nodes:\r\n if node.block.process != None:\r\n internal_memory_consumed += node.block.process.memory\r\n external_memory_consumed += node.block.size\r\n\r\n # fragmentation result is the percentage of unused memory\r\n internal_fragmentation = ((external_memory_consumed - internal_memory_consumed) / external_memory_consumed ) * 100\r\n external_fragmentation = ((self._user_space_mem_size - external_memory_consumed) / self._user_space_mem_size) * 100\r\n print(f\"-----------------\\nFragmentation: \\nInternal: {internal_fragmentation}% \\nExternal: {external_fragmentation }%\\n-----------------\")\r\n\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n def basic_test():\r\n mem = memory()\r\n proc1 = Process(1, 50)\r\n mem.request_memory_allocation(proc1)\r\n proc1_node = mem.allocate_memory()\r\n\r\n proc2 = Process(2, 254)\r\n mem.request_memory_allocation(proc2)\r\n mem.allocate_memory()\r\n\r\n proc4 = Process(4, 120)\r\n mem.request_memory_allocation(proc4)\r\n mem.allocate_memory()\r\n \r\n proc5 = Process(5, 1000)\r\n mem.request_memory_allocation(proc5)\r\n mem.allocate_memory()\r\n\r\n proc6 = Process(6, 500)\r\n mem.request_memory_allocation(proc6)\r\n mem.allocate_memory()\r\n\r\n proc7 = Process(7, 2010)\r\n mem.request_memory_allocation(proc7)\r\n mem.allocate_memory()\r\n\r\n\r\n print(mem._bitmap)\r\n mem._buddy_tree.print_leaf_nodes()\r\n\r\n print(\"----------------------\")\r\n\r\n #mem.remove_block(proc1_node)\r\n #print(mem._bitmap)\r\n #mem._buddy_tree.print_leaf_nodes()\r\n\r\n proc8 = Process(8, 60)\r\n mem.request_memory_allocation(proc8)\r\n mem.allocate_memory()\r\n mem._buddy_tree.print_leaf_nodes()\r\n mem.calculate_fragmentation()\r\n\r\n \r\n proc9 = Process(9, 100)\r\n mem.request_memory_allocation(proc9)\r\n mem.allocate_memory()\r\n mem._buddy_tree.print_leaf_nodes()\r\n\r\n proc10 = Process(10, 1500)\r\n mem.request_memory_allocation(proc10)\r\n mem.allocate_memory()\r\n mem._buddy_tree.print_leaf_nodes()\r\n\r\n #proc11 = Process(11, 1)\r\n #mem.request_memory_allocation(proc11)\r\n #mem.allocate_memory()\r\n #mem._buddy_tree.print_leaf_nodes()\r\n\r\n mem.calculate_fragmentation()\r\n\r\n def random_test():\r\n mem = memory()\r\n \r\n for i in range(250):\r\n proc = Process(i, randint(mem._page_size, 32))\r\n mem.request_memory_allocation(proc)\r\n node = mem.allocate_memory()\r\n\r\n node.block._accessed = randint(0,1)\r\n\r\n mem.calculate_fragmentation()\r\n mem._buddy_tree.print_leaf_nodes()\r\n \r\n\r\n #basic_test()\r\n random_test()","repo_name":"EimantasPusinskas/operating_systems_2","sub_path":"assignment2/main_memory.py","file_name":"main_memory.py","file_ext":"py","file_size_in_byte":12828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6492236997","text":"# Faça um Programa que leia um número inteiro menor que 1000\n# e imprima a quantidade de centenas, dezenas e unidades do mesmo.\n\n# Observando os termos no plural a colocação do \"e\", da vírgula entre\n# outros. Exemplo:\n# 326 = 3 centenas, 2 dezenas e 6 unidades\n# 12 = 1 dezena e 2 unidades Testar com: 326, 300, 100, 320, 310,305, 301,\n# 101, 311, 111, 25, 20, 10, 21, 11, 1, 7 e 16\n\nnumero_completo = int(input(\"Insira um número inteiro positivo: \"))\n\nunidade = numero_completo % 10\nnumero = (numero_completo - unidade) / 10\ndezena = numero % 10\nnumero = (numero - dezena) / 10\ncentena = numero % 10\nmilhar = (numero - centena) / 10\n\nunidade = int(unidade)\ndezena = int(dezena)\ncentena = int(centena)\nmilhar = int(milhar)\n\nif numero_completo >= 1000:\n if unidade == 1:\n un = \"unidade\"\n else:\n un = \"unidades\"\n\n if dezena == 1:\n dz = \"dezena\"\n else:\n dz = \"dezenas\"\n\n if centena == 1:\n cent = \"centena\"\n else:\n cent = \"centenas\"\n\n if milhar == 1:\n mil = \"milhar\"\n else:\n mil = \"milhares\"\n\n print(f\"{numero_completo} - {milhar} {mil}, {centena} {cent}, \"\n f\"{dezena} {dz} e {unidade} {un}.\")\n\nelif numero_completo >= 100:\n if unidade == 1:\n un = \"unidade\"\n else:\n un = \"unidades\"\n\n if dezena == 1:\n dz = \"dezena\"\n else:\n dz = \"dezenas\"\n\n if centena == 1:\n cent = \"centena\"\n else:\n cent = \"centenas\"\n\n print(f\"{numero_completo} - {centena} {cent}, {dezena} {dz} \"\n f\"e {unidade} {un}.\")\n\nelif numero_completo >= 10:\n if unidade == 1:\n un = \"unidade\"\n else:\n un = \"unidades\"\n\n if dezena == 1:\n dz = \"dezena\"\n else:\n dz = \"dezenas\"\n\n print(f\"{numero_completo} - {dezena} {dz} e {unidade} {un}.\")\n\nelse:\n if unidade == 1:\n un = \"unidade\"\n else:\n un = \"unidades\"\n\n print(f\"{numero_completo} - {unidade} {un}.\")\n\n# print(f\"{numero_completo} - {milhar} {mil}, {centena} {cent}, {dezena} {dz} \"\n# f\"e {unidade} { un}.\")\n","repo_name":"AndreGorny/exercicios_python","sub_path":"estrutura_decisao/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29403973147","text":"import re\r\nimport tweepy\r\nfrom tweepy import OAuthHandler\r\nfrom textblob import TextBlob\r\nfrom wordcloud import WordCloud\r\nimport pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nplt.style.use('fivethirtyeight')\r\n\r\n#twitter Api credentials\r\nconsumerKey = \"xxxxx-xxxxxxx--xxxxxxx--xxxx\"\r\nconsumerSecret = 'xxxxx--xxxxxxxxxxx---------xxxxxxxx'\r\naccessToken = 'xxxxxxxxxxxx---xxx--xxxxxxxxx--------xxxxxxxx'\r\naccessTokenSecret = 'xxxxxxxxxxxxxxxx---xxxxxxxxxxxxx'\r\n\r\n#Create the authentication object\r\nauthenticate = tweepy.OAuthHandler(consumerKey, consumerSecret)\r\n\r\n#set the authentication ojject\r\nauthenticate.set_access_token(accessToken, accessTokenSecret) \r\n\r\n#Create the API object while passing in the auth information\r\napi = tweepy.API(authenticate, wait_on_rate_limit=True)\r\n\r\n#Extract 100 tweets from the twitter user\r\nposts = api.user_timeline(screen_name = \"BillGates\", count =100, lang = 'en', tweet_mode=\"extended\")\r\n \r\n#create a dataframe with a coulmn called Tweets\r\ndf = pd.DataFrame( [tweet.full_text for tweet in posts] , columns=['Tweets'])\r\n\r\n#Show the first 5 rows of data\r\ndf.head()\r\n\r\n#Clean the text\r\n\r\n#Create a function to clean text\r\ndef cleantxt(text):\r\n text = re.sub(r'@[A-Za-z0-9]+','',text) #Removed @mentions\r\n text = re.sub(r'#', '', text) #Removing the '#' symbol\r\n text = re.sub(r'RT[\\s]+', '', text) #Removing RT\r\n text = re.sub(r'https?:\\/\\/\\S+', '', text)\r\n return text\r\n\r\n#Cleaning the text\r\ndf['Tweets']=df['Tweets'].apply(cleantxt)\r\n\r\n#show the cleaned text\r\ndf\r\n\r\n#Create a function to get the subjectivity\r\ndef getSubjectivity(text):\r\n return TextBlob(text).sentiment.subjectivity\r\n\r\n#create a function to get the polarity\r\ndef getPolarity(text):\r\n return TextBlob(text).sentiment.subjectivity\r\n\r\n#Create two new columns\r\ndf['Subjectivity'] = df['Tweets'].apply(getSubjectivity)\r\ndf['Polarity'] = df['Tweets'].apply(getPolarity)\r\n\r\n#Show the new dataframe with the new columns\r\ndf\r\n\r\n#Plot the word cloud\r\nallWords = ' '.join( [twts for twts in df['Tweets']] )\r\ncloud = WordCloud(width = 500, height = 300, random_state = 21, max_font_size = 119).generate(allWords)\r\n\r\nplt.imshow(cloud, interpolation = \"bilinear\")\r\nplt.axis('off')\r\nplt.show()\r\n\r\n#create a function to compute the negatve, neutral and positive\r\ndef getAnalysis(score):\r\n if score < 0:\r\n return 'Negative'\r\n elif score == 0:\r\n return 'Neutral'\r\n else:\r\n return 'Positive'\r\n\r\ndf['Analysis'] = df['Polarity'].apply(getAnalysis)\r\n\r\n#show the dataframe\r\ndf\r\n\r\n#Print all of the positive tweets\r\nj=1\r\nsortedDF = df.sort_values(by=['Polarity'])\r\nfor i in range(0,sortedDF.shape[0]):\r\n if(sortedDF['Analysis'][i] == 'Positive'):\r\n print(str(j) + ') '+sortedDF['Tweets'][i])\r\n print()\r\n j = j+1\r\n\r\n#print the negative tweets\r\nj=1\r\nsortedDF = df.sort_values(by=['Polarity'], ascending = 'False')\r\nfor i in range(0, sortedDF.shape[0]):\r\n if( sortedDF['Analysis'][i] == 'Negative'):\r\n print(str(j) +') '+ sortedDF['Tweets'][i])\r\n print()\r\n j = j+1\r\n\r\n#plot the polarity and subjectivity\r\nplt.figure(figsize=(8,6))\r\nfor i in range(0,df.shape[0]):\r\n plt.scatter(df['Polarity'][i], df['Subjectivity'][i], color='Blue')\r\n\r\nplt.title('Sentiment Analysis')\r\nplt.xlabel('Polarity')\r\nplt.ylabel('Subjectivity')\r\nplt.show()\r\n\r\n#Get the percentage of positive tweets\r\nptweets = df[df.Analysis == 'Positive']\r\nptweets = ptweets['Tweets']\r\n\r\nround( (ptweets.shape[0] / df.shape[0]) *100 , 1)\r\n\r\n#Get the percentage of negative tweets\r\nntweets = df[df.Analysis == 'Negative']\r\nntweets = ntweets['Tweets']\r\nround( (ntweets.shape[0] / df.shape[0]*100),1)\r\n\r\n#show the value counts\r\ndf['Analysis'].value_counts()\r\n#plot and visualize the counts\r\nplt.title('Sentiment Analysis')\r\nplt.xlabel('Sentiment')\r\nplt.ylabel('Counts')\r\ndf['Analysis'].value_counts().plot(kind='bar')\r\nplt.show()","repo_name":"sohailali26/Sentimental-Analysis-using-Python","sub_path":"SENTIMENT ANYLISIS.py","file_name":"SENTIMENT ANYLISIS.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31859251912","text":"# Импортируем необходимые библиотеки и адаптеры\nimport psycopg2\nimport sys\n\nfrom PyQt5.QtWidgets import (QApplication, QWidget,\n QTabWidget, QAbstractScrollArea,\n QVBoxLayout, QHBoxLayout,\n QTableWidget, QGroupBox,\n QTableWidgetItem, QPushButton, QMessageBox)\n\n# Создаем класс MainWindow с конструктором\nclass MainWindow(QWidget): # Класс QTabWidget создает структуру, которую можно заполнять вкладками.\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self._connect_to_db()\n\n self.setWindowTitle(\"Shedule\")\n self.setGeometry(270, 100, 915, 700)\n\n self.vbox = QVBoxLayout(self)\n\n self.tabs = QTabWidget(self)\n self.vbox.addWidget(self.tabs)\n\n self.update_button = QPushButton(\"Update\")\n self.update_button.clicked.connect(lambda _: self._update())\n self.updatebtn_lay = QHBoxLayout()\n self.vbox.addLayout(self.updatebtn_lay)\n self.updatebtn_lay.addWidget(self.update_button)\n\n self._create_shedule_tab()\n self._create_teachers_tab()\n self._create_subjects_tab()\n\n# Создаем метод для подключения к базе данных\n def _connect_to_db(self):\n self.conn = psycopg2.connect(database=\"postgres\",\n user=\"postgres\",\n password=\"qwerty123\",\n host=\"localhost\",\n port=\"5432\")\n\n self.cursor = self.conn.cursor()\n\n# Создаем метод для отображения вкладки с расписанием\n def _create_shedule_tab(self):\n self.shedule_tab = QWidget() # Класс QWidget() создает виджет, который будет являться вкладкой\n self.tabs.addTab(self.shedule_tab, \"Расписание\")\n days = ['Понедельник', 'Вторник', 'Среда',\n 'Четверг', 'Пятница', 'Суббота']\n\n day_tab = QTabWidget(self)\n\n for i in days:\n day_tab.addTab(self._create_day_table(i.upper()), i)\n day_tab_layout = QVBoxLayout()\n day_tab_layout.addWidget(day_tab)\n self.shedule_tab.setLayout(day_tab_layout)\n\n def _create_day_table(self, day):\n table = QTableWidget()\n table.setColumnCount(8) # Метод setColumnCount() задает таблице количество колонок.\n table.setHorizontalHeaderLabels([\"timetable_id\", \"day\", \"subject\", \"room_numb\", \"start_time\", \"week\", \"\", \"\"])\n self._update_day_table(table, day)\n return table\n\n def _update_day_table(self, table, day):\n self.cursor.execute(f\"SELECT * FROM timetable join subject on\"\n f\" timetable.subject = subject.subject_id WHERE day='{day}' \"\n f\"ORDER BY timetable_id\")\n records = list(self.cursor.fetchall())\n table.setRowCount(len(records) + 1) # Метод setRowCount() задает таблице количество строк.\n\n for i, r in enumerate(records):\n r = list(r)\n editButton = QPushButton(\"Edit\")\n delButton = QPushButton(\"Delete\")\n table.setItem(i, 0,\n QTableWidgetItem(str(r[0]))) # Метод setItem() записывает в ячейку с определенным адресом строковые данные.\n table.setItem(i, 1,\n QTableWidgetItem(str(r[1])))\n table.setItem(i, 2,\n QTableWidgetItem(str(r[7])))\n table.setItem(i, 3,\n QTableWidgetItem(str(r[3])))\n table.setItem(i, 4,\n QTableWidgetItem(str(r[4])))\n table.setItem(i, 5,\n QTableWidgetItem(str(r[5])))\n table.setCellWidget(i, 6, editButton) # Метод setCellWidget() помещает в ячейку с определенным адресом виджет.\n table.setCellWidget(i, 7, delButton)\n\n editButton.clicked.connect(\n lambda _, rowNum=i, table=table: self._change_from_timetable(rowNum, table))\n delButton.clicked.connect(lambda _, rowNum=i, table=table: self._delete_from_timetable(rowNum, table))\n\n addButton = QPushButton(\"Add\")\n addButton.clicked.connect(lambda _, rowNum=len(records), table=table: self._add_row_timetable(rowNum, table))\n table.setCellWidget(len(records), 6, addButton)\n table.resizeRowsToContents() # Метод resizeRowsToContents() автоматически адаптирует размеры ячеек таблицы под размер данных внутри этой ячейки.\n\n def _change_from_timetable(self, rowNum, table):\n row = list()\n for i in range(table.columnCount() - 2):\n try:\n row.append(table.item(rowNum, i).text())\n except:\n row.append(None)\n try:\n self.cursor.execute(\"select subject_id from subject where name=%s\", (row[2],))\n subject = self.cursor.fetchone()\n row[2] = subject[0]\n row.append(row[0])\n row = row[1:]\n self.cursor.execute(\"update timetable set day=%s, subject=%s, room_numb=%s, start_time=%s, \"\n \"week=%s where timetable_id=%s\", tuple(row))\n self.conn.commit()\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _delete_from_timetable(self, rowNum, table):\n try:\n id = table.item(rowNum, 0).text()\n day = table.item(rowNum, 1).text()\n self.cursor.execute(\"delete from timetable where timetable_id=%s\", (id,))\n self.conn.commit()\n table.setRowCount(0)\n self._update_day_table(table, day)\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _add_row_timetable(self, rowNum, table):\n row = list()\n for i in range(1, table.columnCount() - 2):\n try:\n row.append(table.item(rowNum, i).text())\n except:\n row.append(None)\n try:\n self.cursor.execute(\"select subject_id from subject where name=%s\", (row[1],))\n subject = self.cursor.fetchone()\n row[1] = subject[0]\n self.cursor.execute(\n \"insert into timetable (day, subject, room_numb, start_time, week) values(%s, %s, %s, %s, %s)\",\n (tuple(row)))\n self.conn.commit()\n table.setRowCount(0)\n self._update_day_table(table, row[0])\n except Exception as e:\n print(e)\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _create_teachers_tab(self):\n self.teachers = QWidget()\n self.tabs.addTab(self.teachers, \"Преподаватели\")\n table = QTableWidget(self)\n table.setColumnCount(5)\n table.setHorizontalHeaderLabels([\"teacher_id\", \"Имя\", \"Предмет\", \"\", \"\"])\n\n teachers_tab_layout = QVBoxLayout()\n teachers_tab_layout.addWidget(table)\n\n self._update_teachers_tab(table)\n self.teachers.setLayout(teachers_tab_layout)\n\n def _update_teachers_tab(self, table):\n self.cursor.execute(f\"SELECT * FROM teacher join subject on teacher.subject=subject.subject_id\"\n f\" ORDER BY teacher_id\")\n records = list(self.cursor.fetchall())\n table.setRowCount(len(records) + 1)\n\n for i, r in enumerate(records):\n r = list(r)\n editButton = QPushButton(\"Edit\")\n delButton = QPushButton(\"Delete\")\n table.setItem(i, 0,\n QTableWidgetItem(str(r[0])))\n table.setItem(i, 1,\n QTableWidgetItem(str(r[1])))\n table.setItem(i, 2,\n QTableWidgetItem(str(r[4])))\n table.setCellWidget(i, 3, editButton)\n table.setCellWidget(i, 4, delButton)\n\n editButton.clicked.connect(\n lambda _, rowNum=i, tabl=table: self._change_from_teacher(rowNum, tabl))\n delButton.clicked.connect(\n lambda _, rowNum=i, tabl=table:\n self._delete_from_teacher(rowNum, table))\n addButton = QPushButton(\"Add\")\n addButton.clicked.connect(\n lambda _, rowNum=len(records), table=table: self._add_row_teacher(rowNum, table))\n table.setCellWidget(len(records), 3, addButton)\n table.resizeRowsToContents()\n\n def _change_from_teacher(self, rowNum, table):\n row = list()\n\n for i in range(table.columnCount() - 2):\n try:\n row.append(table.item(rowNum, i).text())\n except:\n row.append(None)\n try:\n self.cursor.execute(\"select subject_id from subject where name=%s\", (row[2],))\n subject = self.cursor.fetchone()\n row[2] = subject[0]\n row.append(row[0])\n row = row[1:]\n self.cursor.execute(\"update teacher set full_name=%s, subject=%s where teacher_id=%s\", tuple(row))\n self.conn.commit()\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _delete_from_teacher(self, rowNum, table):\n try:\n id = table.item(rowNum, 0).text()\n self.cursor.execute(\"delete from teacher where teacher_id=%s\", (id,))\n self.conn.commit()\n table.setRowCount(0)\n self._update_teachers_tab(table)\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _add_row_teacher(self, rowNum, table):\n row = list()\n for i in range(1, table.columnCount() - 2):\n try:\n row.append(table.item(rowNum, i).text())\n except:\n row.append(None)\n self.cursor.execute(\"select subject_id from subject where name=%s\", (row[1],))\n subject = self.cursor.fetchone()\n row[1] = subject[0]\n try:\n self.cursor.execute(\n \"insert into teacher (full_name, subject) values(%s, %s)\",\n (tuple(row)))\n self.conn.commit()\n table.setRowCount(0)\n self._update_teachers_tab(table)\n except Exception as e:\n print(e)\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _create_subjects_tab(self):\n self.subjects = QWidget()\n self.tabs.addTab(self.subjects, \"Предметы\")\n table = QTableWidget(self)\n table.setColumnCount(4)\n table.setHorizontalHeaderLabels([\"subject_id\", \"Предмет\", \"\", \"\"])\n\n subjects_tab_layout = QVBoxLayout()\n subjects_tab_layout.addWidget(table)\n\n self._update_subjects_tab(table)\n self.subjects.setLayout(subjects_tab_layout)\n\n def _update_subjects_tab(self, table):\n self.cursor.execute(f\"SELECT * FROM subject ORDER BY subject_id\")\n records = list(self.cursor.fetchall())\n table.setRowCount(len(records) + 1)\n for i, r in enumerate(records):\n r = list(r)\n editButton = QPushButton(\"Edit\")\n delButton = QPushButton(\"Delete\")\n table.setItem(i, 0,\n QTableWidgetItem(str(r[0])))\n table.setItem(i, 1,\n QTableWidgetItem(str(r[1])))\n table.setCellWidget(i, 2, editButton)\n table.setCellWidget(i, 3, delButton)\n\n editButton.clicked.connect(\n lambda _, rowNum=i, table=table: self._change_from_subjects(rowNum, table))\n delButton.clicked.connect(\n lambda _, rowNum=i, table=table:\n self._delete_from_subjects(rowNum, table))\n\n addButton = QPushButton(\"Add\")\n addButton.clicked.connect(\n lambda _, rowNum=len(records), table=table: self._add_row_subject(rowNum, table))\n table.setCellWidget(len(records), 2, addButton)\n table.resizeRowsToContents()\n\n def _change_from_subjects(self, rowNum, table):\n row = list()\n\n for i in range(table.columnCount() - 2):\n try:\n row.append(table.item(rowNum, i).text())\n except:\n row.append(None)\n try:\n row.append(row[0])\n row = row[1:]\n self.cursor.execute(\"update subject set name=%s where subject_id=%s\", tuple(row))\n self.conn.commit()\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _delete_from_subjects(self, rowNum, table):\n try:\n id = table.item(rowNum, 0).text()\n self.cursor.execute(\"delete from teacher where subject=%s\", (id,))\n self.cursor.execute(\"delete from timetable where subject=%s\", (id,))\n self.cursor.execute(\"delete from subject where subject_id=%s\", (id,))\n self.conn.commit()\n table.setRowCount(0)\n self._update_subjects_tab(table)\n except Exception as e:\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n def _add_row_subject(self, rowNum, table):\n subject = table.item(rowNum, 1).text()\n try:\n self.cursor.execute(\n \"insert into subject (name) values(%s)\",\n (subject,))\n self.conn.commit()\n table.setRowCount(0)\n self._update_subjects_tab(table)\n except Exception as e:\n print(e)\n QMessageBox.about(self, \"Error\", str(e))\n self._connect_to_db()\n\n# Создаем метод обновляющий все таблицы на вкладке\n def _update(self):\n self.tabs.removeTab(0)\n self.tabs.removeTab(0)\n self.tabs.removeTab(0)\n self._create_shedule_tab()\n self._create_teachers_tab()\n self._create_subjects_tab()\n\n\napp = QApplication(sys.argv)\nwin = MainWindow()\nwin.show()\nsys.exit(app.exec_())\n","repo_name":"Michael-merlot/All-labaratories-","sub_path":"8_lab_rabota.py","file_name":"8_lab_rabota.py","file_ext":"py","file_size_in_byte":14661,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8771261481","text":"# Importing the necessary libraries\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport os\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score\r\n\r\n# Obtain the dfset and store it in a variable\r\ndf = pd.read_csv(\"C:/Users/reibo/OneDrive/Dokumenti/dokumenti za faks/year 3/machine learning/Car prices/car data.csv\")\r\n\r\n# Creating dummies for the above variables\r\nFuel_type = pd.get_dummies(df['Fuel_Type'], drop_first = True)\r\nSeller_Type = pd.get_dummies(df['Seller_Type'], drop_first = True)\r\nTransmission = pd.get_dummies(df['Transmission'], drop_first = True)\r\n\r\n# Drop the dummy variables from the data frame and combine them into a single one\r\ndf = df.drop(['Fuel_Type', 'Seller_Type', 'Transmission', 'Car_Name'], axis=1)\r\ndf = pd.concat([df,Fuel_type, Seller_Type, Transmission], axis=1)\r\n\r\n# Define the target variable\r\nY = df['Selling_Price']\r\nX = df.drop(['Selling_Price'], axis=1)\r\n#X = StandardScaler().fit_transform(df)\r\n\r\n# Split the data into train and test\r\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=.2, random_state=1)\r\n\r\n# Create the model (using Linear Regression)\r\nmodel = LinearRegression()\r\nmodel.fit(X_train, y_train)\r\n\r\n# Test the model\r\n#print(model.score(X_test, y_test))\r\n\r\n# Compare the prediction with the actual values and show the error\r\npred = model.predict(X_test)\r\npred_overview = pd.DataFrame()\r\npred_overview[\"truth\"] = y_test\r\npred_overview[\"pred\"] = pred\r\npred_overview[\"error\"] = pred_overview[\"truth\"] - pred_overview[\"pred\"]\r\npred_overview[\"error\"] = abs(pred_overview[\"error\"].astype(int))\r\npred_overview = pred_overview.reset_index(drop= True)\r\n#print(pred_overview)\r\n\r\nscore = r2_score(y_test, pred)\r\n#print(score)\r\n\r\nplot = sns.regplot(y=y_test.values.flatten(), x=pred.flatten(), line_kws={\"color\": \"g\"})\r\nplot.set_xlabel(\"predicted price\")\r\nplot.set_ylabel(\"actual price\")\r\nplt.show()\r\n\r\n","repo_name":"Evonn69/SmileDetection","sub_path":"Car prices/car price prediction.py","file_name":"car price prediction.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41280175289","text":"#!/usr/bin/env python\\n\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport sublime\nimport sublime_plugin\nfrom . import utils\nfrom .utils import Redlime\n\n\nclass RedlimeTimeEntryCreateCommand(sublime_plugin.TextCommand):\n\n timeentry_data = {}\n\n def run(self, edit):\n self.redmine = Redlime.connect()\n # issue_id = self.view.settings().get('issue_id', None)\n self.screen = self.view.settings().get('screen')\n issue_id = self.get_issue_id()\n if issue_id is None:\n projects_filter = utils.get_setting('projects_filter', [])\n if projects_filter:\n projects = [self.redmine.project.get(pid) for pid in projects_filter]\n else:\n projects = self.redmine.project.all()\n\n prj_names = []\n self.prj_ids = []\n for prj in projects:\n prj_names.append(prj.name)\n self.prj_ids.append(prj.id)\n self.view.window().show_quick_panel(prj_names, self.on_project_done)\n else:\n self.on_issue_done(issue_id)\n\n def get_issue_id(self):\n issue_id = None\n if self.screen == 'redlime_issue':\n issue_id = self.view.settings().get('issue_id', None)\n elif self.screen == 'redlime_query':\n try:\n line = self.view.substr(self.view.line(self.view.sel()[0].end()))\n issue_id = line.split(utils.TABLE_SEP)[1].strip()\n int(issue_id) # check is number\n except Exception:\n pass\n return issue_id\n\n def on_project_done(self, index):\n if index < 0:\n return\n\n self.timeentry_data['project_id'] = self.prj_ids[index]\n self.view.window().show_input_panel(\"Issue id:\", '', self.on_issue_done, None, None)\n\n def on_issue_done(self, text):\n if text:\n self.timeentry_data['issue_id'] = int(text)\n\n activities = self.redmine.enumeration.filter(resource='time_entry_activities')\n if activities:\n self.acts = []\n self.acts_ids = []\n for enum in activities:\n self.acts.append(enum.name)\n self.acts_ids.append(enum.id)\n\n sublime.set_timeout(lambda: self.view.window().show_quick_panel(self.acts, self.on_activity_done), 1)\n else:\n self.on_activity_done(-1)\n\n def on_activity_done(self, index):\n if index >= 0:\n # not required and needs redmine >= 3.4.0\n self.timeentry_data['activity_id'] = self.acts_ids[index]\n self.timeentry_data['activity_name'] = self.acts[index]\n\n self.view.window().show_input_panel(\"Date:\", datetime.datetime.now().strftime('%Y-%m-%d'), self.on_date_done, None, None)\n\n def on_date_done(self, text):\n if not text:\n return\n self.timeentry_data['spent_on'] = text\n\n self.view.window().show_input_panel(\"Hours:\", '1.0', self.on_hours_done, None, None)\n\n def on_hours_done(self, text):\n if not text:\n return\n self.timeentry_data['hours'] = float(text)\n\n self.view.window().show_input_panel(\"Comment:\", '', self.on_comment_done, None, None)\n\n def on_comment_done(self, text):\n if not text:\n return\n\n time_entry = self.redmine.time_entry.new()\n if self.timeentry_data.get('issue_id', None):\n time_entry.issue_id = self.timeentry_data['issue_id']\n elif self.timeentry_data.get('project_id', None):\n time_entry.project_id = self.timeentry_data['project_id']\n else:\n raise Exception('Issue or project required to create time entry.')\n if self.timeentry_data.get('activity_id', None):\n time_entry.activity_id = self.timeentry_data.get('activity_id', None)\n time_entry.comments = text\n time_entry.spent_on = self.timeentry_data['spent_on']\n time_entry.hours = self.timeentry_data['hours']\n time_entry.save()\n if time_entry.id:\n sublime.message_dialog('Time entry %s (\"%s: %s\") created successfully.' % (time_entry.id, self.timeentry_data['activity_name'], time_entry.comments))\n if self.timeentry_data['issue_id']:\n self.view.run_command('redlime_issue', {'issue_id': self.timeentry_data['issue_id']})\n else:\n sublime.message_dialog('Error! Time entry creation failed..')\n","repo_name":"tosher/Redlime","sub_path":"base/rl_time_entry_create.py","file_name":"rl_time_entry_create.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"5601282097","text":"import os, sys, inspect\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\n\nfrom datetime import date, datetime\nimport pandas as pd\nimport numpy as np\n\nfrom cl_random_forest import *\n\ndf, X_, Y_, X_train_, X_test_, y_train_, y_test_, X_train_part_, X_valid_, y_train_part_, y_valid_ = DO_PREPROCESSING()\nY_ = Y_[col_y_true].tolist()\ny_train_ = y_train_[col_y_true].tolist()\ny_train_part_ = y_train_part_[col_y_true].tolist()\ny_valid_ = y_valid_[col_y_true].tolist()\ny_test_origin = y_test_.copy()\ny_test_ = y_test_[col_y_true].tolist()\n\nM = c_Random_Forest()\nM.get_best_model(14, idx=0, random_state=71)\nM.fit_predict_model(M.best_model, X_train_, y_train_, X_test_, y_test_) \n","repo_name":"af20/mds_ml_predict_stock_market_direction","sub_path":"MAIN.py","file_name":"MAIN.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27328253386","text":"import os\nimport glob\nimport logging\nfrom os.path import basename, splitext\n\n# Zynthian specific modules\nfrom zyngui.zynthian_gui_selector import zynthian_gui_selector\n\n#------------------------------------------------------------------------------\n# Zynthian Option Selection GUI Class\n#------------------------------------------------------------------------------\n\nclass zynthian_gui_option(zynthian_gui_selector):\n\n\tdef __init__(self):\n\t\tself.title = \"\"\n\t\tself.options = {}\n\t\tself.options_cb = None\n\t\tself.cb_select = None\n\t\tself.click_type = False\n\t\tself.close_on_select = True\n\t\tsuper().__init__(\"Option\", True)\n\n\n\tdef config(self, title, options, cb_select, close_on_select=True, click_type=False):\n\t\tself.title = title\n\t\tif callable(options):\n\t\t\tself.options_cb = options\n\t\t\tself.options = None\n\t\telse:\n\t\t\tself.options_cb = None\n\t\t\tself.options = options\n\t\tself.cb_select = cb_select\n\t\tself.close_on_select = close_on_select\n\t\tself.click_type = click_type\n\t\tself.index = 0\n\n\n\tdef config_file_list(self, title, dpaths, fpat, cb_select, close_on_select=True, click_type=False):\n\t\tself.title = title\n\t\tself.options = {}\n\t\tself.options_cb = None\n\t\tself.cb_select = cb_select\n\t\tself.close_on_select = close_on_select\n\t\tself.click_type = click_type\n\t\tself.index = 0\n\n\t\tif isinstance(dpaths, str):\n\t\t\tdpaths = [dpaths]\n\t\tif isinstance(dpaths, (list, tuple)):\n\t\t\tfor dpath in dpaths:\n\t\t\t\ttry:\n\t\t\t\t\tfor fpath in sorted(glob.iglob(\"{}/{}\".format(dpath, fpat))):\n\t\t\t\t\t\tfname = basename(fpath)\n\t\t\t\t\t\tif fpat != \"*\":\n\t\t\t\t\t\t\tfbase, fext = splitext(fname)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfbase = fname\n\n\t\t\t\t\t\tif os.path.isfile(fpath):\n\t\t\t\t\t\t\tself.options[fbase] = fpath\n\t\t\t\texcept Exception as err:\n\t\t\t\t\tlogging.warning(\"Can't get file list for {}/{}: {}\".format(dpath, fpat, err))\n\n\n\tdef fill_list(self):\n\t\ti = 0\n\t\tself.list_data = []\n\t\tif self.options_cb:\n\t\t\tself.options = self.options_cb()\n\t\tfor k, v in self.options.items():\n\t\t\tself.list_data.append((v, i, k))\n\t\t\ti += 1\n\t\tsuper().fill_list()\n\n\n\tdef select_action(self, i, t='S'):\n\t\tif self.close_on_select:\n\t\t\tself.zyngui.close_screen()\n\t\tif self.cb_select and i < len(self.list_data):\n\t\t\tif self.click_type:\n\t\t\t\tself.cb_select(self.list_data[i][2], self.list_data[i][0], t)\n\t\t\telse:\n\t\t\t\tself.cb_select(self.list_data[i][2], self.list_data[i][0])\n\t\t\tif not self.close_on_select:\n\t\t\t\tself.fill_list()\n\n\n\tdef set_select_path(self):\n\t\tself.select_path.set(self.title)\n\n#------------------------------------------------------------------------------\n","repo_name":"zynthian/zynthian-ui","sub_path":"zyngui/zynthian_gui_option.py","file_name":"zynthian_gui_option.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"16"} +{"seq_id":"5215519954","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2020/2/14 16:46\n\"\"\"\n\"\"\"给定一个完美二叉树,其所有叶子节点都在同一层,每个父节点都有两个子节点。二叉树定义如下:\n\nstruct Node {\n int val;\n Node *left;\n Node *right;\n Node *next;\n}\n填充它的每个 next 指针,让这个指针指向其下一个右侧节点。如果找不到下一个右侧节点,则将 next 指针设置为 NULL。\n\n初始状态下,所有 next 指针都被设置为 NULL。\n\n示例:\n\n输入:{\"$id\":\"1\",\"left\":{\"$id\":\"2\",\"left\":{\"$id\":\"3\",\"left\":null,\"next\":null,\"right\":null,\"val\":4},\"next\":null,\"right\":{\"$id\":\"4\",\"left\":null,\"next\":null,\"right\":null,\"val\":5},\"val\":2},\"next\":null,\"right\":{\"$id\":\"5\",\"left\":{\"$id\":\"6\",\"left\":null,\"next\":null,\"right\":null,\"val\":6},\"next\":null,\"right\":{\"$id\":\"7\",\"left\":null,\"next\":null,\"right\":null,\"val\":7},\"val\":3},\"val\":1}\n\n输出:{\"$id\":\"1\",\"left\":{\"$id\":\"2\",\"left\":{\"$id\":\"3\",\"left\":null,\"next\":{\"$id\":\"4\",\"left\":null,\"next\":{\"$id\":\"5\",\"left\":null,\"next\":{\"$id\":\"6\",\"left\":null,\"next\":null,\"right\":null,\"val\":7},\"right\":null,\"val\":6},\"right\":null,\"val\":5},\"right\":null,\"val\":4},\"next\":{\"$id\":\"7\",\"left\":{\"$ref\":\"5\"},\"next\":null,\"right\":{\"$ref\":\"6\"},\"val\":3},\"right\":{\"$ref\":\"4\"},\"val\":2},\"next\":null,\"right\":{\"$ref\":\"7\"},\"val\":1}\n\n解释:给定二叉树如图 A 所示,你的函数应该填充它的每个 next 指针,以指向其下一个右侧节点,如图 B 所示。\n提示:\n\n你只能使用常量级额外空间。\n使用递归解题也符合要求,本题中递归程序占用的栈空间不算做额外的空间复杂度。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/populating-next-right-pointers-in-each-node\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\"\"\"\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n def helper(root):\n if root and root.left:\n root.left.next = root.right\n p = root.left\n q = root.right\n while p.right:\n p = p.right\n q = q.left\n p.next = q\n helper(root.left)\n helper(root.right)\n r = root\n helper(r)\n return root\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/1-120/97-120/116.py","file_name":"116.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72201775689","text":"import pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom core.selector import Selector\nfrom data_loaders.data_loader_all_loaded import DataLoaderAllLoaded\nfrom models.loss import get_criterion\nfrom utils.run_utils import get_model\n\n\ndef get_model_and_dataset(\n start_date,\n end_date,\n model_kwargs,\n data_kwargs,\n checkpoint_fpath,\n input_shape=(540, 420),\n is_validation=True,\n is_test=True,\n):\n loss_kwargs = {'type': 0, 'aggregation_mode': 0, 'kernel_size': None, 'residual_loss': 0, 'w': None}\n model = get_model(start_date, end_date, model_kwargs, loss_kwargs, data_kwargs, '', '', input_shape=input_shape)\n checkpoint = torch.load(checkpoint_fpath)\n _ = model.load_state_dict(checkpoint['state_dict'])\n model = torch.nn.DataParallel(model).cuda()\n\n dataset = DataLoaderAllLoaded(\n start_date,\n end_date,\n data_kwargs['input_len'],\n data_kwargs['target_len'],\n target_offset=data_kwargs['target_offset'],\n data_type=data_kwargs['data_type'],\n hourly_data=data_kwargs['hourly_data'],\n residual=data_kwargs['residual'],\n img_size=input_shape,\n sampling_rate=data_kwargs['sampling_rate'],\n random_std=data_kwargs['random_std'],\n is_validation=is_validation,\n is_test=is_test,\n workers=0,\n )\n return (model, dataset)\n\n\ndef get_worstK_prediction(k,\n start_date,\n end_date,\n model_kwargs,\n data_kwargs,\n loss_kwargs,\n checkpoint_fpath,\n batch_size,\n lean=False,\n input_shape=(540, 420),\n num_workers=4):\n\n selector = Selector(k, 'max')\n model, dataset = get_model_and_dataset(\n start_date,\n end_date,\n model_kwargs,\n data_kwargs,\n checkpoint_fpath,\n input_shape=(540, 420),\n )\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)\n start_index = 0\n criterion = get_criterion(loss_kwargs)\n model.eval()\n recent_target = 0\n with torch.no_grad():\n for batch in tqdm(loader):\n if data_kwargs['residual']:\n inp, target, mask, recent_target = batch\n recent_target = np.swapaxes(recent_target.numpy(), 0, 1)\n else:\n inp, target, mask = batch\n\n N = target.shape[0]\n inp = inp.cuda()\n target = target.cuda()\n mask = mask.cuda()\n\n prediction = model(inp) + recent_target\n assert target.shape[0] == prediction.shape[1]\n loss_list = []\n for b in range(target.shape[0]):\n loss_list.append(criterion(prediction[:, b:b + 1], target[b:b + 1], mask[b:b + 1]).item())\n\n prediction = prediction.permute(1, 0, 2, 3)\n prediction = prediction.cpu().numpy()\n target = target.cpu().numpy()\n ts = [dataset.target_ts(k) for k in range(start_index, start_index + N)]\n if lean:\n selector.add_batch(loss_list, ts)\n else:\n selector.add_batch(loss_list, prediction, target, ts)\n start_index += N\n\n return selector.all()\n\n\ndef load_worstK_predictions(fpath):\n with open(fpath, 'rb') as f:\n data = pickle.load(f)\n\n loss_list = []\n target_list = []\n prediction_list = []\n ts_list = []\n lean = len(data[0]) == 2\n target, prediction = None, None\n while len(data) > 0:\n row = Selector.pop(data)\n assert len(row) == 4 or len(row) == 2\n if lean:\n loss, ts = row\n else:\n loss, prediction, target, ts = row\n loss_list.append(loss)\n target_list.append(target)\n prediction_list.append(prediction)\n ts_list.append(ts)\n\n if lean:\n return {'loss': loss_list, 'prediction': None, 'target': None, 'ts': ts_list}\n\n return {'loss': loss_list, 'prediction': prediction_list, 'target': target_list, 'ts': ts_list}\n\n\ndef get_prediction(start_date,\n end_date,\n model_kwargs,\n data_kwargs,\n loss_kwargs,\n checkpoint_fpath,\n batch_size,\n is_validation=True,\n is_test=False,\n input_shape=(540, 420),\n num_workers=4):\n model, dataset = get_model_and_dataset(\n start_date,\n end_date,\n model_kwargs,\n data_kwargs,\n checkpoint_fpath,\n input_shape=(540, 420),\n is_validation=is_validation,\n is_test=is_test,\n )\n criterion = get_criterion(loss_kwargs)\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)\n\n all_prediction = []\n loss_list = []\n model.eval()\n recent_target = 0\n with torch.no_grad():\n for batch in tqdm(loader):\n if data_kwargs['residual']:\n inp, target, mask, recent_target = batch\n recent_target = np.swapaxes(recent_target.numpy(), 0, 1)\n else:\n inp, target, mask = batch\n\n N = target.shape[0]\n inp = inp.cuda()\n target = target.cuda()\n mask = mask.cuda()\n\n prediction = model(inp) + recent_target\n assert target.shape[0] == prediction.shape[1]\n loss_list.append(N * criterion(prediction, target, mask).item())\n\n prediction = prediction.cpu().numpy()\n all_prediction.append(np.swapaxes(prediction, 0, 1))\n\n print('[Loss]', round(np.sum(loss_list) / len(dataset), 3))\n return np.concatenate(all_prediction, axis=0)\n","repo_name":"ashesh-0/AccClearQPN","sub_path":"utils/prediction_utils.py","file_name":"prediction_utils.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"7195039846","text":"\ndef heapify(i):\n\twhile parent>=0:\n\t\tparent=(i-1)//2\n\t\ttemp=a[i]\n\t\ta[i]=a[maxi]\n\t\ta[maxi]=temp\ndef insert(ele):\n\ta.append(ele)\n\theapify(len(a)-1)\n\nn=int(input('length of array'))\na=list(map(int,input().split()))\nfor i in range(n//2,-1,-1):\n\theapify(i)\nprint(a)","repo_name":"utkarshgupta220399/Algorithms-using-python","sub_path":"heapinsert.py","file_name":"heapinsert.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4805381031","text":"#import flask libraries and os libraries \nfrom flask import Flask, render_template, request, jsonify, make_response, Response, redirect, url_for\nimport os\nimport json\nfrom json import JSONEncoder\n\n#define the template directory (html pages) to be the website directory.\ntemplate_dir = os.path.abspath('./website/')\n\n#define Flask Application\napp = Flask(__name__, template_folder=template_dir)\n\n\n#create routes for the certain pages. When a user travels to the URL contained withing the quotes, it will call the function.\n#index route that brings user to home page.\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n#route that brings user to the How CPU's work page\n@app.route('/howcpuswork')\ndef howcpuswork():\n return render_template('howcpuswork.html')\n\n#route that brings user to the How 8-bit CPU's work page\n@app.route('/how8bitwork')\ndef how8bitwork():\n return render_template('how8bitwork.html')\n\n#route that brings user to the Programming Page\n@app.route('/cpuprogram')\ndef cpuprogram():\n return render_template('cpuprogram.html', result=\"\")\n\n#route that brings user to page that only displays the stream.\n@app.route('/stream')\ndef stream():\n return render_template('stream.html')\n\n#route that brings user to page that has FAQs.\n@app.route('/faq')\ndef faq():\n return render_template('faq.html')\n\n#define class for one instruction\nclass instruction():\n def __init__(self, address, instruct, value):\n self.address = address\n self.instruct = instruct\n self.value = value\n\n def get_instruct(self):\n return \"%s %s %s\" % (self.address, self.instruct, self.value)\n def serialize(self):\n return {\n 'address': self.address,\n 'instruction': self.instruct,\n 'value': self.value\n }\n\n\n\n#define class that is created to contain one set of instructions\nclass program():\n def __init__(self, id, program):\n self.id = id\n self.program = program\n\n def serialize(self):\n return {\n 'id': self.id,\n 'program': self.program.serialize()\n }\n#programEncoder class for JSON encoding\nclass programEncoder(JSONEncoder):\n def default(self, o):\n return o.__dict__\n \n\n\nprograms = [] #array that holds all programs yet to be executed\nids = [] #array to hold list of all ID's. The index of an ID is it's position in queue.\ncurrentId = 1 #value to keep track of next ID to be assigned.\n\n#function that if a value is nothing, changes it to 0000.\ndef processInputVal(inp):\n if (inp == \"\"):\n return \"0000\"\n else:\n return inp\n\n#function that if a instruction is nothing, it is changed to NOP. \ndef processInputInstruct(inst):\n if (inst == \"\"):\n return \"NOP\"\n elif (inst == \"JC\"):\n return \"JC \"\n else:\n return inst\n\n#API URL for creating a new program. It is a POST method. It takes a request, and processes all the information to create a\n#instance of a program and then returns the id and it's position.\n@app.route('/api/new-program', methods=['POST'])\ndef i():\n global currentId\n global ids\n if request.method == 'POST':\n global test\n current_form = request.form\n instructions = []\n for ind in range(0, 16):\n step = format(ind, \"b\")\n \n inst = instruction(step, processInputInstruct(current_form[f'{step}in']), processInputVal(current_form[f'{step}val']))\n instructions.append(inst)\n print(step)\n\n newProgram = program(currentId,instructions)\n programs.append(newProgram)\n ids.append(currentId)\n currentId += 1\n #print(ids.index(newProgram.id))\n #programs.pop(0)\n #ids.pop(0)\n return jsonify(id=newProgram.id, position=ids.index(newProgram.id))\n\n#API URL for returning the current program. It is a GET method. \n@app.route('/api/current-program')\ndef current_program():\n if (len(programs) >= 1):\n return json.dumps(programs[0], cls=programEncoder)\n else:\n return json.dumps(\"No programs\")\n \n \n#API URL for removing the current program. It is a GET method\n@app.route('/api/remove-program')\ndef remove_program():\n programs.pop(0)\n ids.pop(0)\n return \"Success\"\n \n#API URL for getting the position in queue of an ID. It is a GET method that takes a ID value in.\n@app.route('/api/position')\ndef position():\n args = request.args\n request_id = int(args[\"id\"])\n if request_id in ids:\n return jsonify(str(ids.index(request_id)))\n else:\n return jsonify(\"Id isn't present\")\n \n#This will call Flask to run the application to run on the VAPOR link. If the file name is main, it will run the application\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=(os.environ.get('VAPOR_LOCAL_PORT')))\n","repo_name":"CJChristenson/drew-drew-this-isp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5215565684","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2020-06-03 10:24\n\"\"\"\n\"\"\"128. 最长连续序列\n给定一个未排序的整数数组,找出最长连续序列的长度。\n\n要求算法的时间复杂度为 O(n)。\n\n示例:\n\n输入: [100, 4, 200, 1, 3, 2]\n输出: 4\n解释: 最长连续序列是 [1, 2, 3, 4]。它的长度为 4。\"\"\"\n\n\nclass Solution:\n def longestConsecutive(self, nums) -> int:\n res = 0\n nums = set(nums)\n for n in nums:\n if n - 1 not in nums:\n cur = n\n cur_l = 1\n while cur + 1 in nums:\n cur += 1\n cur_l += 1\n res = max(res, cur_l)\n return res\n\n def longestConsecutive2(self, nums) -> int:\n d = {}\n res = 0\n for n in nums:\n if n not in d:\n left = d.get(n - 1, 0)\n right = d.get(n + 1, 0)\n print(d, n, left, right)\n cur = left + 1 + right\n res = max(res, cur)\n d[n] = cur\n d[n - left] = cur\n d[n + right] = cur\n return res\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.longestConsecutive([100, 4, 200, 1, 3, 2]))\n print(sol.longestConsecutive2([100, 4, 200, 1, 3, 2]))\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/121-240/121-144/128.py","file_name":"128.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74719166089","text":"#!/usr/bin/env python3\n\n\nwith open(\"strategy_guide\") as inf:\n\tinput = inf.read().splitlines()\n\n\n# X means *lose*, 0 pts\n# Y means \"Tie\", 3 pts\n# Z means \"Win\", 6 pts\n\n# if we throw a:\n#\tRock:\t1 pt\n#\tPaper:\t2 pts\n#\tScissors: 3 pts\n\ndef brute(line):\n\tif line == \"A X\":\n\t\t# Lose to Rock ==> Throw Scissors\n\t\treturn (0 + 3)\n\tif line == \"A Y\":\n\t\t# Tie with Rock ==> Throw Rock\n\t\treturn (3 + 1)\n\tif line == \"A Z\":\n\t\t# Win vs rock, throw Paper\n\t\treturn (6 + 2)\n\tif line == \"B X\":\n\t\treturn (0 + 1)\n\tif line == \"B Y\":\n\t\treturn (3 + 2)\n\tif line == \"B Z\":\n\t\treturn (6 + 3)\n\tif line == \"C X\":\n\t\treturn (0 + 2)\n\tif line == \"C Y\":\n\t\treturn (3 + 3)\n\tif line == \"C Z\":\n\t\treturn (6 + 1)\n\n\ndef should_throw(line):\n\tif line == \"A Z\" or line == \"B Y\" or line == \"C X\":\n\t\treturn \"P\"\n\tif line == \"A Y\" or line == \"B X\" or line == \"C Z\":\n\t\treturn \"R\"\n\tif line == \"A X\" or line == \"B Z\" or line == \"C Y\":\n\t\treturn \"S\"\n\ndef show_outcome(line):\n\tif \"X\" in line:\n\t\treturn \"Lose\"\n\tif \"Y\" in line:\n\t\treturn \"Tie\"\n\treturn \"Win\"\n\ndef get_winscore(line):\n\tif \"X\" in line:\n\t\treturn 0\n\tif \"Y\" in line:\n\t\treturn 3\n\tif \"Z\" in line:\n\t\treturn 6\n\ndef get_throwscore(line):\n\tif \"R\" in line:\n\t\treturn 1\n\tif \"P\" in line:\n\t\treturn 2\n\tif \"S\" in line:\n\t\treturn 3\n\ntotal = 0\nbrute_total = 0\nfor line in input:\n\tbrute_total += brute(line)\n\tthrow = should_throw(line)\n\tround = get_winscore(line) + get_throwscore(throw)\n\tprint(f\"{line}: {show_outcome(line)}, throw {should_throw(line)}, {brute(line)}? {round} ({get_winscore(line)} + {get_throwscore(throw)})\")\n\ttotal += round\nprint(f\"brute: {brute_total}, elegant: {total}\")\n","repo_name":"natethebobo/adventOfCode2022","sub_path":"day2/2Dec.py","file_name":"2Dec.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22327556933","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.install import install\n\nVERSION = '0.0.1'\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n description = 'verify that the git tag matches our version'\n\n def run(self):\n tag = os.getenv('CIRCLE_TAG')\n tag = tag.lstrip('v')\n\n if tag != VERSION:\n info = f\"Git tag: {tag} does not match the version of this app: {VERSION}\"\n sys.exit(info)\n\n\nsetup(\n name='vttes',\n version=VERSION,\n license='BSD',\n description='Python Tools for Roll20 / VTTES / Better20 integrations',\n long_description='Python tools.',\n author='William Gibb',\n author_email='williamgibb@gmail.com',\n url='https://github.com/forgedconcordance/vttestools',\n packages=find_packages(include=('vttes',)),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Other',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n keywords=[\n # eg: 'keyword1', 'keyword2', 'keyword3',\n ],\n install_requires=[\n 'cmd2==1.4.0',\n 'tabulate==0.8.7',\n 'synapse>=2.33.0,<3.0.0',\n ],\n extras_require={\n # eg:\n # 'rst': ['docutils>=0.11'],\n # ':python_version==\"2.6\"': ['argparse'],\n },\n entry_points={\n 'console_scripts': [\n 'vttestools= vttes.tools.cli:_main',\n ]\n },\n cmdclass={\n 'verify': VerifyVersionCommand,\n },\n)\n","repo_name":"forgedconcordance/vttestools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26141503390","text":"class Solution:\n def minimumFinishTime(self, tires: List[List[int]], changeTime: int, numLaps: int) -> int:\n tires.sort(key=lambda x:x[1])\n candidateTires = []\n for tire in tires:\n if not candidateTires:\n candidateTires.append(tire)\n elif tire[0] < candidateTires[-1][0]:\n candidateTires.append(tire)\n\n n = len(candidateTires)\n\n def totalTime(tire, lap):\n f, r = candidateTires[tire][0], candidateTires[tire][1]\n # f * r^(lap-1)\n # f * r^0 + f*r^1 + f*r^2 + ... + f*r^(lap-1)\n return f * (r**lap-1)//(r-1)\n\n minTime = [inf] * (numLaps + 1)\n for lap in range(1, numLaps+1):\n for i in range(n):\n minTime[lap] = min(minTime[lap], totalTime(i, lap))\n\n dp = [inf] * 1001\n dp[0] = 0\n for i in range(1, numLaps+1):\n for j in range(1, i+1):\n if j < i:\n dp[i] = min(dp[i], dp[i-j] + changeTime + minTime[j])\n elif j == i:\n dp[i] = min(dp[i], dp[i-j] + minTime[j])\n\n return dp[numLaps]","repo_name":"Vergil0327/leetcode-history","sub_path":"2-D Dynamic Programming/2188. Minimum Time to Finish the Race/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70953013129","text":"# Создайте программу для игры с конфетами человек против человека.\n\n# Условие задачи: На столе лежит 2021 конфета. Играют два игрока делая ход друг после друга.\n# Первый ход определяется жеребьёвкой. За один ход можно забрать не более чем 28 конфет.\n# Все конфеты оппонента достаются сделавшему последний ход.\n# Сколько конфет нужно взять первому игроку, чтобы забрать все конфеты у своего конкурента?\n# a) Добавьте игру против бота\n# b) Подумайте как наделить бота \"\"интеллектом\"\"\n\nimport random\ndef input_check(sum):\n while True:\n if sum <= 28:\n max = sum\n else:\n max = 28\n print(f'сколько конфет берём? (максимум {max})')\n try:\n pos = int(input())\n if pos > 0 and pos <= 28:\n return pos\n else:\n print('попробуй ещё раз')\n except:\n print('Мы в конфетки играем. Пиши цифру или уходи')\n\nplayer_1 = input('введите имя первого игрока: ')\nplayer_2 = input('введите имя второго игрока: ')\nplayers = [(0, player_1), (1, player_2)]\nnumber = int(input('на сколько конфет играем?'))\nfirst = random.randint(0, 1)\nwhile number > 0:\n print(f'ходит {players[first%2][1]}')\n number -= input_check(number)\n if number == 0:\n print(f'{players[first%2][1]} побеждает и забирает себе все конфеты! \\n {players[first%2-1][1]} остётся ни с чем')\n break\n first = first % 2 + 1\n print(number)\n","repo_name":"j10r2/Python_meeting","sub_path":"seminar5/homework5/hw5_1.py","file_name":"hw5_1.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5196967122","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nt, q = np.genfromtxt('CircuitoRC.txt').T\n\ndef equation(R, C):\n global t\n V0 = 10\n return V0*C*(1-np.exp(-t/(R*C)))\n\ndef error(small_q):\n global q\n return np.sum((q - small_q)**2)\n\ndef chain(steps = 1000, dr = 0.1, dc = 0.1):\n R = np.zeros(steps)\n C = np.zeros(steps)\n R[0] = np.random.random()*dr\n C[0] = np.random.random()*dc\n\n last_error = error(equation(R[0], C[0]))\n for i in range(steps-1):\n r = abs((2*np.random.random()-1)*dr + R[i])\n c = abs((2*np.random.random()-1)*dc + C[i])\n\n alpha = np.exp(error(equation(R[i], C[i])) - error(equation(r, c)))\n alpha = min(1, alpha)\n if alpha > np.random.random():\n R[i+1] = r\n C[i+1] = c\n else:\n R[i+1] = R[i]\n C[i+1] = C[i]\n return R, C\n\nR, C = chain()\nQ = equation(R[500:].mean(), C[500:].mean())\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.plot(t, q, label = \"Real\")\nax1.plot(t, Q, label = \"Best\")\nax1.set_xlabel('$t$')\nax1.set_ylabel('$q(t)$')\nax1.legend()\n\nax2.plot(R, label = \"$R$\")\nax2.plot(C, label = \"$C$\")\nax2.set_ylabel('Value')\nax2.set_xlabel('Iteration')\nax2.legend()\nplt.savefig(\"CircuitoRC.png\")\n","repo_name":"ComputoCienciasUniandes/MetodosComputacionales","sub_path":"talleres/2017-01/hw_5/Solucion/CircuitoRC.py","file_name":"CircuitoRC.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73521323529","text":"\"\"\"This is a poorly coded statistics page that gives user info.\"\"\"\n\nimport zlib\nimport random\nimport string\nimport hashlib\nimport sqlite3\nimport datetime\n\nfrom pathlib import Path\nfrom jinja2 import Template\nfrom flask import session, request, current_app\n\nfrom riddle.names import random_name, _names, _adjectives\nfrom riddle.utils import create_user, get_user\nfrom riddle.urls import without_answer, add_route\n\n\nsuper_user = (\"monty-python\", \"flying circus\")\n\n\nsuccess_message = f'''\nIf you are reading this message, it means you made it: I am {super_user[0]},\none of the funding members of the WASP10 association and I found the hole in\nthe system that I just exploited to get this message. Sorry if we were not\nvery explicit in telling how to come here, but trust is fundamental in this\nphase. I would like to let you know that you are not alone, and the number of\npeople fighting the EAI must continue to grow.\nAt the venue, among the PyConX staff, there are some WASP10 undercover agents\nthat have been sent there to scout talents and invite them to participate in\nthe competition. Those agents are still unaware of the fact that an EAI\novertook our communications.\nYour job is now to find them and inform them of what is happening.\nYou must gain their trust and let them know the truth. They might help you in \ngetting the necessary information to break into the system and destroy the AI,\nbut you will not able to do it alone.\nIf you find something interesting, use /wasp9/clues to send it to me.\n\nRemember my name when you meet the agents, it might be useful.\nGood luck.\n'''\n# But... WASP10 is now public, why having undercover agents? Why do they need\n# to trust the player?? This is fishy...\n\n# Let's use the same join date to keep it simple and fishy\njoin_date = \"2009-10-30\"\n\nhosts = (\n # Actually interesting hosts\n (1, '127.0.0.1:8888', 'telnet'), # Password breaking challenge\n (2, '5.44.12.71', 'http'), # id=2 will be used by fetch\n # Some random hosts\n # (3, '5.44.12.70', 'ftp'),\n # (4, '1.32.1.32', 'irc'), # This could actually be real :)\n # (5, '10.42.0.112', 'pop3'),\n # Some not-so-random hosts, just for fun\n (6, '20.19.5.2', 'pcx'),\n (7, '20.19.5.3', 'pcx'),\n (8, '20.19.5.4', 'pcx'),\n (9, '20.19.5.5', 'pcx'),\n)\n\nusers = (\n # Let's give PyConX organizers some credit :)\n # Those are some of the many members who made PyConX possible\n # The passwords are there just for fun, let's hope they enjoy them!\n ('patrick91', \"thank you for the birthday gift, Marilyn\"),\n ('davidmugnai', \"measure once, cut once, fix once\"),\n ('hype_fi', \"impossible is just a matter of time\"),\n ('simbasso', \"we should not test in production\"),\n ('cm', \"the glass is half full and half wrong\"),\n ('yakkys', \"d-j-a-n-g-o, the d is silent\"),\n ('mena', \"go go gadget everything\"),\n ('leriomaggio', \"from oxford import british_accent\"),\n ('__pamaron__', \"for the holy Mary\"),\n ('viperale', \"human body is 90% water, mine is 90% spritz\"),\n ('rasky', \"optimising life\"),\n ('fiorella', \"I am tooooo cute\"),\n # Back to the game\n super_user, # Easily breakable\n)\n\n\ndef password_hash(data):\n \"\"\"Return an integer value representing the hash of data.\"\"\"\n return int.from_bytes(hashlib.blake2b(data, digest_size=2).digest(), 'big')\n\n\ndef break_password_hash(target):\n \"\"\"Search for a random string with given hash.\"\"\"\n import time\n for b_order in ['big', 'little']:\n start = time.time()\n count = 0\n while True:\n if time.time() >= start + 60:\n print(\"In 60 seconds we did\", count, \"iterations\")\n count = 0\n start = time.time()\n count += 1\n x = ''.join(random.sample(string.ascii_letters, 10))\n if password_hash(x.encode()) == target:\n print(\"Found\", x, \"with byte order\", b_order)\n break\n\n\ndef get_database(user):\n \"\"\"Return the database of a given user, making it up if not existing.\"\"\"\n root = Path('./data')\n root.mkdir(parents=True, exist_ok=True)\n\n db_file = root / f'db_{user[\"id\"]}.sqlite3'\n if db_file.exists():\n print(\"Loading existing db\")\n db = sqlite3.connect(db_file)\n return db\n\n print(\"Creating database for user\", user)\n db = sqlite3.connect(db_file)\n with db:\n # Seed random number generation uniquely for each player for coherent xp\n rng = random.Random()\n rng.seed(user['id'])\n\n # Generate some random players with unique name\n num_fake_players = rng.randrange(50, 70)\n fake_player_names = {random_name('-', rng)\n for _ in range(num_fake_players)}\n # Add one user which is particular... Why would WASP9 use a special\n # python-related name? This is fishy...\n fake_player_names.add(super_user[0])\n\n # Build a (deterministically) shuffled list of uers\n # user_data = {name: ['2009-10-30'] for name in fake_player_names}\n usernames = sorted(fake_player_names)\n rng.shuffle(usernames)\n\n db.executescript('''\n CREATE TABLE user (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL UNIQUE,\n timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP\n );\n\n CREATE TABLE event (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n origin INTEGER NOT NULL,\n value INTEGER NOT NULL,\n timestamp TIMESTAMP NOT NULL\n );\n\n CREATE TABLE counter (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n event_id TEXT NOT NULL,\n count INTEGER NOT NULL\n );\n\n CREATE TABLE hosts (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n host TEXT NOT NULL,\n protocol TEXT NOT NULL\n );\n\n CREATE TABLE actl (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n user TEXT NOT NULL UNIQUE,\n blake2b_16 TEXT NOT NULL\n );\n ''')\n\n for usr in usernames:\n db.execute('''INSERT INTO user (name, timestamp) VALUES (?,?)''',\n [usr, join_date])\n\n for ev in ['access', 'access', 'score', 'score']:\n db.execute('''INSERT INTO event (name, origin, value, timestamp)\n VALUES (?,?,?,?)''',\n ['access', 'wasp9/stage0', 1, '2009-02-02'])\n\n for n in [(1, 1), (2, 75), (3, 112)]:\n db.execute('''INSERT INTO counter (event_id, count)\n VALUES (?,?)''',\n n)\n\n for h in hosts:\n db.execute('''INSERT INTO hosts (id, host, protocol)\n VALUES (?,?,?)''', h)\n\n for user, pwd in users:\n db.execute('''INSERT INTO actl (user, blake2b_16) VALUES (?,?)''', \n [user, password_hash(pwd.encode())])\n\n return db\n","repo_name":"akiross/PyConXRiddle","sub_path":"riddle/game/wasp10/old/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34304545110","text":"\"\"\"Find the sum of multiples of three or five from 1 to n inclusive.\"\"\"\n\n\ndef find_sum(n):\n nums_sum = 0\n for i in range(3, n+1):\n if i % 3 == 0 or i % 5 == 0:\n nums_sum += i\n return nums_sum\n\n\ndef main():\n n = int(input())\n print(find_sum(n))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"0x70DA/IEEE-ZSB-Technical-Rookies-22","sub_path":"Task-1/problem_7.py","file_name":"problem_7.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18591305641","text":"from math import isnan, isinf\nimport re\n\njs_version = \"0.4\" # the d.o jasper version string that this is synced to.\n\nclass __OutputObject:\n\tdef __init__(self, v, s, p):\n\t\tself._dict = {'value': v, 'schema': s, 'path': p}\n\t\tself.sanitized = None\n\t\tself.errors = []\n\t\tself.warnings = []\n\tdef err(self, msg):\n\t\tself.errors.append(self._dict.copy().update({'message': msg}))\n\tdef warn(self, msg):\n\t\tself.errors.append(self._dict.copy().update({'message': msg}))\n\tdef merge_errors(self, other):\n\t\tself.errors.extend(other.errors)\n\t\tself.warnings.extend(other.warnings)\n\tdef proxy(self, other):\n\t\tself.sanitized = other.sanitized\n\t\tself.errors = other.errors\n\t\tself.warnings = other.warnings\n\nclass __Context:\n\tdef __init__(self, obj, opts):\n\t\tself.obj = obj\n\t\tself.opts = opts\n\n# \n_regex_parse = re.compile(r'^/(.*)/(i?m?|mi)$')\n_regex_flags = {\n\t'i': re.I, 'm': re.M, 'im': re.I | re.M, 'mi': re.I | re.M\n}\ntypeof = lambda s: type(s).__name__\n_inf = float(\"Infinity\")\n\ndef dictify(array):\n\tout = {}\n\tfor i in range(len(array)):\n\t\tout[str(i)] = array[i]\n\treturn out\n\n# The basic validation functions for the primitives are defined here.\n# They all take a basic set of parameters: \n# v: the value in the input object\n# o: an output object\n# m: the metadata object from the schema\n# r: a recurse function, to validate any children\n# c: the context of the function -- called opts and the overall object.\n\n# We create a primitives registry with an accompanying decorator.\nprimitives = {}\ndef primitive(fn):\n\tprimitives[fn.__name__[1:]] = fn\n\treturn fn\n\n@primitive\ndef _freeform(v, o, m, r, c):\n\to.sanitized = v;\n\n@primitive\ndef _regex(v, o, m, r, c):\n\tif typeof(v) == 'SRE_Pattern':\n\t\to.sanitized = v;\n\telif typeof(v) == 'str':\n\t\ttry: \n\t\t\tmatch = _regex_parse.search(v)\n\t\t\tflags = _regex_flags.get(match.group(2), 0)\n\t\t\to.sanitized = re.compile(match.group(1), flags)\n\t\texcept re.error:\n\t\t\to.err('invalid regular expression')\n\t\t\treturn\n\telse:\n\t\to.err('unable to coerce')\n\t\treturn\n\tif c.opts.get('regex_as_string', False):\n\t\tflags = ''\n\t\tif o.sanitized.flags & re.I:\n\t\t\tflags += 'i'\n\t\tif o.sanitized.flags & re.M:\n\t\t\tflags += 'm'\n\t\to.sanitized = '/' + o.sanitized.pattern + '/' + flags\n\n# for consistency with jasper 0.4, we only cast \"true\", not \"True\"\n@primitive\ndef _boolean(v, o, m, r, c): \n\tif typeof(v) == 'bool':\n\t\to.sanitized = v\n\telse: \n\t\tv = str(v)\n\t\tif v in ['true', 'false']:\n\t\t\to.sanitized = (v == 'true')\n\t\t\to.warn('type coercion')\n\t\telse: \n\t\t\to.err('unable to coerce')\n\n@primitive\ndef _number(v, o, m, r, c): \n\tif typeof(v) not in ['int', 'float']:\n\t\to.warn('type coercion')\n\ttry: \n\t\tv = float(v)\n\t\tif isnan(v) or isinf(v):\n\t\t\tv = None\n\texcept ValueError:\n\t\tv = None\n\t\t\n\tif v == None:\n\t\to.err('unable to coerce')\n\telse: \n\t\tif m.get('integer', False) and int(v) == v:\n\t\t\to.err('not an integer')\n\t\tif m.get('max', _inf) < v:\n\t\t\to.err('maximum violated')\n\t\tif m.get('min', -_inf) > v:\n\t\t\to.err('minimum violated')\n\t\t\n\t\to.sanitized = v;\n\n@primitive\ndef _string(v, o, m, r, c):\n\tt = typeof(v)\n\tif t == 'str':\n\t\tpass\n\telif t in ['int', 'float', 'boolean']:\n\t\to.warn('type coercion')\n\t\tv = str(v).lower()\n\telse:\n\t\to.err('unable to coerce')\n\t\treturn\n\t\n\tif m.get('max_length', _inf) < len(v):\n\t\to.err('max length violated')\n\tif m.get('min_length', -_inf) > len(v):\n\t\to.err('min length violated')\n\tif m.get('root_index', False) and v not in c.obj:\n\t\to.err('not a valid key into the root index')\n\tregex = m.get('regex', None)\n\tif regex and not regex.search(v):\n\t\to.err('regex violated')\n\tif m.get('confidential', False) and c.opts.get('hide_confidential', False):\n\t\tv = \"(confidential)\"\n\t\n\to.sanitized = v\n\n@primitive\ndef _index(v, o, m, r, c):\n\tsanitized = {}\n\tsubschema = m.get('elements', 'object')\n\tregex = m.get('valid_keys', None)\n\tt = typeof(v)\n\tif t == 'dict':\n\t\tpass\n\telif t == 'list':\n\t\tif len(v) > 0:\n\t\t\to.warn('type coercion')\n\t\tv = dictify(v)\n\telse:\n\t\to.err('unable to coerce')\n\t\treturn\n\t\n\tfor key in v:\n\t\toutput = r(v[key], subschema, key)\n\t\tsanitized[key] = output.sanitized\n\t\tif regex and not regex.search(key):\n\t\t\to.err('invalid key: ' + key)\n\t\to.merge_errors(output)\n\to.sanitized = sanitized\n\n@primitive\ndef _list(v, o, m, r, c):\n\tsanitized = []\n\tsubschema = m.get('elements', 'object')\n\tif typeof(v) == 'list':\n\t\tfor i in range(len(v)):\n\t\t\toutput = r(v[i], subschema, i)\n\t\t\tsanitized.append(output.sanitized)\n\t\t\to.merge_errors(output)\n\t\to.sanitized = sanitized\n\telse:\n\t\to.err('unable to coerce')\n\n@primitive\ndef _multi(v, o, m, r, c):\n\tsubschemas = m.get('allowed', [])\n\tattempts = []\n\t# we create a default error. The only way out will be o.proxy().\n\to.err('no options matched')\n\t\n\tfor i in range(len(subschemas)):\n\t\toutput = r(v, subschemas[i], '(multi: ' + str(i) + ')')\n\t\tattempts.append(output)\n\t\to.merge_errors(output)\n\t\n\tattempts.sort(key=lambda a: (len(a.errors), len(a.warnings)))\n\t\n\tif len(attempts) > 0:\n\t\tif len(attempts[0].errors) == 0:\n\t\t\to.proxy(attempts[0])\n\n@primitive\ndef _enum(v, o, m, r, c):\n\tvalue = m.get('value_field', 'value')\n\tmeta = m.get('meta_field', 'meta')\n\topts = m.get('options', {})\n\tsubtype = 'object' if m.get('strict', True) else 'args'\n\tt = typeof(v)\n\tif t == 'dict':\n\t\tpass\n\telif t == 'list':\n\t\tv = dictify(v)\n\telse:\n\t\to.err('unable to coerce')\n\t\treturn\n\t\n\tif value not in v:\n\t\to.err('unable to coerce')\n\telif v[value] not in opts:\n\t\to.err('value not allowed by enum: ' + v[value])\n\telse:\n\t\tsubschema = {'type': subtype, 'meta': {'fields': opts[v[value]]}}\n\t\toutput = r(v.get(meta, {}), subschema, meta)\n\t\to.merge_errors(output)\n\t\to.sanitized = {value: v[value], meta: output.sanitized} \n\t\t# note: this key ^ is not necessarily \"value\".\n\n@primitive\ndef _args(v, o, m, r, c):\n\tfields = m.get('fields', {})\n\to.sanitized = {}\n\tt = typeof(v)\n\tif t == 'list':\n\t\tv = dictify(v)\n\telif t != 'dict':\n\t\to.err('unable to coerce')\n\t\treturn\n\t\n\tfor k in v:\n\t\tif k not in fields:\n\t\t\to.warn('extra key not in schema: ' + k)\n\t\telse: \n\t\t\toutput = r(v[k], fields[k], k)\n\t\t\to.merge_errors(output)\n\t\t\to.sanitized[k] = output.sanitized\n\n@primitive\ndef _object(v, o, m, r, c):\n\tfields = m.get('fields', {})\n\t#first, validate as an `args` type\n\t_args(v, o, m, r, c)\n\t#then, check that all keys are accounted for:\n\tif typeof(o.sanitized) == 'dict':\n\t\tfor k in fields:\n\t\t\tif k not in o.sanitized:\n\t\t\t\to.err('missing field: ' + k)\n\ndef validation(obj, model, root_schema, opts=None):\n\tif typeof(opts) != 'dict':\n\t\topts = {}\n\tcontext = __Context(obj, opts)\n\t\n\tdef subvalidate(value, schema, path):\n\t\t# convert schema to {type, meta} form:\n\t\tif typeof(schema) == 'str':\n\t\t\tschema = model.get(schema, {'type': schema, 'meta': {}})\n\t\ttry:\n\t\t\tvalue = value.jasper();\n\t\texcept AttributeError:\n\t\t\tpass\n\t\tout = __OutputObject(value, schema, path)\n\t\tdef recurse(v, s, name):\n\t\t\tsubpath = path[:]\n\t\t\tsubpath.append(name)\n\t\t\treturn subvalidate(v, s, subpath)\n\t\t\n\t\tt = schema['type']\n\t\tif t in primitives:\n\t\t\tprimitives[t](value, out, schema['meta'], recurse, context)\n\t\telse: \n\t\t\tout.err(\"schema type not recognized: \" + t)\n\t\t\n\t\treturn out\n\t\n\tobj = subvalidate(obj, model[root_schema], [])\n\t\n\tif len(obj.errors) > 0:\n\t\treturn {'status': 'errors', 'meta': {'list': obj.errors}};\n\telse:\n\t\treturn {\n\t\t\t'status': 'ok', \n\t\t\t'meta': {'sanitized': obj.sanitized, 'warnings': obj.warnings}\n\t\t}\n\n__not_type = re.compile(\"^((?!type).+|type.+)$\")\n__primitives = re.compile(\"^(\" + \"|\".join(primitives.keys()) + \")$\")\n\nmetamodel = {\n\t\"model\": {\"type\": \"index\", \"meta\": {\n\t\t\"elements\": \"schema\",\n\t\t\"valid_keys\": __not_type\n\t}},\n\t\"primitive string\": {\"type\": \"string\", \"meta\": {\n\t\t\"regex\": __primitives\n\t}}, \n\t\"composite schema\": {\"type\": \"multi\", \"meta\": {\n\t\t\"allowed\": [\n\t\t\t\"schema\", \n\t\t\t\"primitive string\",\n\t\t\t{\"type\": \"string\", \"meta\": {\"root_index\": True}}\n\t\t]\n\t}},\n\t\"natural number\": {\"type\": \"number\", \"meta\": {\"integer\": True, \"min\": 0}},\n\t\"object fields\": {\"type\": \"index\", \"meta\": {\"elements\": \"composite schema\"}},\n\t\"schema\": {\"type\": \"enum\", \"meta\": {\n\t\t\"value_field\": \"type\",\n\t\t\"meta_field\": \"meta\",\n\t\t\"strict\": False,\n\t\t\"options\": {\n\t\t\t\"freeform\": {},\n\t\t\t\"boolean\": {},\n\t\t\t\"regex\": {},\n\t\t\t\"number\": {\n\t\t\t\t\"integer\": \"boolean\",\n\t\t\t\t\"max\": \"number\",\n\t\t\t\t\"min\": \"number\"\n\t\t\t},\n\t\t\t\"string\": {\n\t\t\t\t\"max_length\": \"natural number\",\n\t\t\t\t\"min_length\": \"natural number\",\n\t\t\t\t\"regex\": \"regex\",\n\t\t\t\t\"root_index\": \"boolean\",\n\t\t\t\t\"confidential\": \"boolean\"\n\t\t\t},\n\t\t\t\"enum\": {\n\t\t\t\t\"value_field\": \"string\",\n\t\t\t\t\"meta_field\": \"string\",\n\t\t\t\t\"strict\": \"boolean\",\n\t\t\t\t\"options\": {\"type\": \"index\", \"meta\": {\"elements\": \"object fields\"}}\n\t\t\t},\n\t\t\t\"index\": {\n\t\t\t\t\"elements\": \"composite schema\",\n\t\t\t\t\"valid_keys\": \"regex\"\n\t\t\t},\n\t\t\t\"list\": {\n\t\t\t\t\"elements\": \"composite schema\"\n\t\t\t},\n\t\t\t\"args\": {\n\t\t\t\t\"fields\": \"object fields\"\n\t\t\t},\n\t\t\t\"object\": {\n\t\t\t\t\"fields\": \"object fields\"\n\t\t\t},\n\t\t\t\"multi\": {\n\t\t\t\t\"allowed\": {\"type\": \"list\", \"meta\": {\"elements\": \"composite schema\"}}\n\t\t\t}\n\t\t}\n\t}}\n}\n\nclass Model:\n\tdef __init__(self, model_spec, base_opts=None):\n\t\tself.base_opts = base_opts if typeof(base_opts) == 'dict' else {}\n\t\tmodel = validation(model_spec, metamodel, \"model\")\n\t\tif model['status'] == \"errors\" or len(model['meta']['warnings']) > 0:\n\t\t\traise ValueError(\"Invalid jasper model. Please revalidate it.\")\n\t\t\n\t\tself.model = model['meta']['sanitized']\n\t\n\tdef validate(self, obj, model_name, opts={}):\n\t\tmodel_name = str(model_name)\n\t\tsubopts = self.base_opts.copy().update(opts)\n\t\tif model_name not in self.model:\n\t\t\traise ValueError(\"Unrecognized model name: \" + model_name)\n\t\telse:\n\t\t\treturn validation(obj, self.model, model_name, subopts)","repo_name":"crdrost/d.o","sub_path":"extras/jasper.py","file_name":"jasper.py","file_ext":"py","file_size_in_byte":9450,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"69804984967","text":"from Token import Token\nfrom Error import Error\nfrom prettytable import PrettyTable\nimport webbrowser\nimport time\n\nclass Scanner:\n def __init__(self):\n self.buffer = ''\n self.fila = 1\n self.columna = 1\n self.estado = 0\n self.listaTokens = []\n self.listaErrores = []\n self.i = 0\n self.flag_comillas = False\n \n def agregar_Token(self,caracter,fila,columna,token):\n self.listaTokens.append(Token(caracter,fila,columna,token))\n self.buffer = ''\n \n def agregar_Error(self,caracter,fila,columna):\n if ord(caracter)>= 48 and ord(caracter)<= 57:\n self.listaErrores.append(Error('Caracter \\'' + caracter + '\\' error de tipo Numero',fila,columna))\n else:\n self.listaErrores.append(Error('Caracter \\'' + caracter + '\\' error de tipo Simbolo',fila,columna))\n def s0(self,caracter):\n '''Estado 0'''\n if (caracter.isalpha() and not self.flag_comillas):\n self.estado = 1\n self.buffer += caracter\n self.columna += 1\n elif caracter == '~':\n self.estado = 2\n self.buffer += caracter\n self.columna += 1\n elif caracter == '[':\n self.estado = 3\n self.buffer += caracter\n self.columna += 1\n elif caracter == ']':\n self.estado = 4\n self.buffer += caracter\n self.columna += 1\n elif caracter == '\\\"':\n self.estado = 5\n self.buffer += caracter\n self.columna += 1\n elif caracter == '\\'':\n self.estado = 6\n self.buffer += caracter\n self.columna += 1\n elif (caracter.isalpha() or (ord(caracter)>= 48 and ord(caracter)<= 57)) and self.flag_comillas:\n self.estado = 7\n self.buffer += caracter\n self.columna += 1\n elif caracter == '<':\n self.estado = 8\n self.buffer += caracter\n self.columna += 1\n elif caracter == '>':\n self.estado = 9\n self.buffer += caracter\n self.columna += 1\n elif caracter == ':':\n self.estado = 10\n self.buffer += caracter\n self.columna += 1\n elif caracter == ',':\n self.estado = 11\n self.buffer += caracter\n self.columna += 1\n elif caracter == '\\n':\n self.fila += 1\n self.columna = 1\n elif caracter in ['\\t',' ']:\n self.columna += 1\n elif caracter == '$':\n pass\n else:\n self.agregar_Error(caracter,self.fila,self.columna)\n \n def s1(self,caracter):\n '''Estado 1'''\n if caracter.isalpha():\n self.estado = 1\n self.buffer += caracter\n self.columna += 1\n else:\n if (self.buffer.lower() == 'formulario' or self.buffer.lower() == 'tipo' \n or self.buffer.lower() == 'valor' or self.buffer.lower() == 'fondo' or self.buffer.lower() == 'valores' \n or self.buffer.lower() == 'evento'): \n self.agregar_Token(self.buffer,self.fila,self.columna,'reservada_' + self.buffer)\n self.estado = 0\n self.i -= 1\n elif self.buffer in ['entrada', 'info']:\n self.agregar_Token(self.buffer,self.fila,self.columna,'evento_' + self.buffer)\n self.estado = 0\n self.i -= 1\n\n\n def s2(self):\n '''Estado 2'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Virgulilla')\n self.estado = 0\n self.i -= 1\n \n def s3(self):\n '''Estado 3'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo Abre corchete')\n self.estado = 0\n self.i -= 1\n \n def s4(self):\n '''Estado 4'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo Cierra corchete')\n self.estado = 0\n self.i -= 1\n \n def s5(self):\n '''Estado 5'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo de Comillas')\n self.estado = 0\n self.i -= 1\n if self.flag_comillas:\n self.flag_comillas = False\n else:\n self.flag_comillas = True\n \n def s6(self):\n '''Estado 6'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo Comillas Simple')\n self.estado = 0\n self.i -= 1\n if self.flag_comillas:\n self.flag_comillas = False\n else:\n self.flag_comillas = True\n \n def s7(self,caracter):\n '''Estado 7 - cadenas'''\n if caracter.isalpha():\n self.estado = 7\n self.buffer += caracter\n self.columna += 1\n elif caracter in ['+','!','*','@',' ','-',':',';','#','%','^','&','?',',','.','|']:\n self.estado = 7\n self.buffer += caracter\n self.columna += 1\n elif caracter.isdigit():\n self.estado = 7\n self.buffer += caracter\n self.columna += 1\n else:\n self.agregar_Token(self.buffer,self.fila,self.columna,'Cadena X')\n self.estado = 0\n self.i -= 1\n\n def s8(self):\n '''Estado 8'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo Menor Q')\n self.estado = 0\n self.i -= 1\n \n def s9(self):\n '''Estado 9'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo Mayor Q')\n self.estado = 0\n self.i -= 1\n \n def s10(self):\n '''Estado 10'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo de dos puntos')\n self.estado = 0\n self.i -= 1\n \n def s11(self):\n '''Estado 11'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo coma')\n self.estado = 0\n self.i -= 1\n\n def s12(self):\n '''Estado 12'''\n self.agregar_Token(self.buffer,self.fila,self.columna,'Signo coma')\n self.estado = 0\n self.i -= 1\n \n # self.agregar_Token\n def analizar(self,cadena):\n '''Realiza los cambios de estados'''\n cadena += '$'\n self.i = 0\n while self.i < len(cadena):\n if self.estado == 0:\n self.s0(cadena[self.i])\n elif self.estado == 1:\n self.s1(cadena[self.i])\n elif self.estado == 2:\n self.s2()\n elif self.estado == 3:\n self.s3()\n elif self.estado == 4:\n self.s4()\n elif self.estado == 5:\n self.s5()\n elif self.estado == 6:\n self.s6()\n elif self.estado == 7:\n self.s7(cadena[self.i])\n elif self.estado == 8:\n self.s8()\n elif self.estado == 9:\n self.s9()\n elif self.estado == 10:\n self.s10()\n elif self.estado == 11:\n self.s11()\n self.i += 1\n \n def imprimirTokens(self):\n x = PrettyTable()\n x.field_names = [\"Lexema\",\"fila\",\"Columna\",\"Tipo\"]\n for token in self.listaTokens:\n x.add_row([token.lexema,token.fila,token.columna,token.tipo])\n print(x)\n \n def imprimirErrores(self):\n x = PrettyTable()\n x.field_names = [\"Descripción\",\"fila\",\"Columna\"]\n for error in self.listaErrores:\n x.add_row([error.descripcion,error.fila,error.columna])\n print(x)\n \n def crearTTokens(self):\n texto = ''\n f = open('./ReporteTokens.html','w')\n texto += '''\n \n \n \n Reporte Tokens\n \n \n \n \n \n \n \n \n \n
\n
\n

Tabla de Tokens

\n
\n
A continuacion se presentan tabla de tokens encontrados en el lenguaje:
\n \n \n \n \n \n \n \n \n \n '''\n for token in self.listaTokens:\n texto +='''\n \n \n \n \n \n '''\n texto +='''\n \n
LEXEMAFILACOLUMNATIPO
'''+token.lexema+''''''+str(token.fila)+''''''+str(token.columna)+''''''+token.tipo+'''
\n
\n \n \n
\n
\n
Josue Gramajo - 202000895
\n
Reporte generado:'''+time.ctime()+'''
\n
\n \n '''\n mensaje = texto \n f.write(mensaje)\n f.close()\n\n webbrowser.open_new_tab('ReporteTokens.html')\n\n def crearTErrores(self):\n texto = ''\n f = open('./ReporteErrores.html','w')\n texto += '''\n \n \n \n Reporte Tokens\n \n \n \n \n \n \n \n \n \n
\n
\n

Tabla de Errores

\n
\n
A continuacion se presentan tabla de errores encontrados en el lenguaje:
\n \n \n \n \n \n \n \n \n '''\n for error in self.listaErrores:\n texto +='''\n \n \n \n \n \n '''\n texto +='''\n \n
DescripcionfilaCOLUMNA
'''+error.descripcion+''''''+str(error.fila)+''''''+str(error.columna)+'''
\n
\n \n \n
\n
\n
Josue Gramajo - 202000895
\n
Reporte generado:'''+time.ctime()+'''
\n
\n \n '''\n mensaje = texto \n f.write(mensaje)\n f.close()\n\n webbrowser.open_new_tab('ReporteErrores.html')","repo_name":"Ijosuer/LFP_PY1_202000895","sub_path":"Proyecto1/Analizador.py","file_name":"Analizador.py","file_ext":"py","file_size_in_byte":12622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26257087317","text":"import os\nimport json\n\n\ndef load_env() -> None:\n # in the prod workflow, GitHub secrets will pre-load the env vars for us\n # https://docs.github.com/en/actions/reference/encrypted-secrets\n # but locally, we have to source the env vars ourselves\n ENV_FILE = \"env.json\"\n if os.path.isfile(ENV_FILE):\n with open(ENV_FILE) as f:\n env_vars = json.load(f)\n\n for var_name, val in env_vars.items():\n os.environ[var_name] = val\n","repo_name":"tngzng/follow_4_follow","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16933991259","text":"from data import get_dataset\nfrom model import features,classifier,metrics\nimport tqdm\n# from model import get_features,classifier\n\npath=\"random_split/\"\n\ntest,val,train =get_dataset.get_df(path)\n\n#Due to limitation of computing capacities, we will only consider the 1000 most important classes\nclasses=features.get_classes_top1000(train)\n#apply those changes to train, test, val\ntest=features.reduce_dataset_1000topclasses(test,classes)\ntrain=features.reduce_dataset_1000topclasses(train,classes)\nval=features.reduce_dataset_1000topclasses(val,classes)\n\n#Process inputs \ntrain_processed=features.process_dataset(train, classes)\ntest_processed=features.process_dataset(test, classes)\nval_processed=features.process_dataset(val, classes)\nprint(train_processed.shape)\nprint(test_processed.shape)\nprint(val_processed.shape)\n\ntrain_2=features.additional_process(train_processed)\nval_2=features.additional_process(val_processed)\ntest_2=features.additional_process(test_processed)\n\nprint(train_2.shape)\nprint(test_2.shape)\nprint(val_2.shape)\n#Process labels\ny_train=features.process_labels(train,classes)\ny_test=features.process_labels(test,classes)\ny_val=features.process_labels(val,classes)\nprint(y_train.shape)\nprint(y_test.shape)\nprint(y_val.shape)\n\nmodel=classifier.build_model()\nprint(model.summary())\n\nhistory=classifier.train_model(model,train_processed,y_train,val_processed,y_val,model_name=\"protcnn\")\n\nclassifier.plot_accuracy_train_val(history)\n\nresult=model2.evaluate(test_2,y_test)","repo_name":"MarionSauvage/Pfam","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3740628016","text":"import sys\nsys.stdin = open('sample_input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n numbers = list(map(int, input()))\n empty_list = []\n\n for num in numbers:\n empty_list.append(numbers.count(num))\n\n num_count = max(empty_list)\n\n for number in numbers:\n if numbers.count(number) == num_count:\n if numbers.count(number) == 1:\n print(f'#{tc} {max(numbers)} {num_count}')\n else:\n print(f'#{tc} {number} {num_count}')\n break\n\n\n\n","repo_name":"liljw/TIL","sub_path":"Algorithms/swea/4834_숫자카드/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16603395641","text":"\"\"\"\nДана последовательность натуральных чисел.\nНайти последовательность подряд идущих чисел,\nчтобы их сумма была максимальной и делилась на 100\n\"\"\"\nwith open('27A.txt') as f:\n n = int(f.readline())\n s = 0\n max_sum = 0\n mods = [20 ** 5] * 1000\n for _ in range(n):\n x = int(f.readline())\n s += x\n if s % 1000 == 0:\n max_sum = max(max_sum, s)\n else:\n max_sum = max(max_sum, s - mods[s % 1000])\n mods[s % 1000] = min(mods[s % 1000], s)\nprint(max_sum)\n","repo_name":"sweeteri/solutions","sub_path":"27tasks/subsequences/27.1.py","file_name":"27.1.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9733194725","text":"\nfrom datetime import timedelta,datetime\nimport pandas as pd\nimport requests\nfrom typing import List\n\n\nclass RestDao:\n def __init__(self, address=None):\n self.address = address\n\n def get_trades(self, symbol, from_id):\n r = requests.get('https://api.binance.com/api/v3/aggTrades',\n params = {\n \"symbol\": symbol,\n \"limit\": 100,\n \"fromId\": from_id\n })\n return r.json()\n\n def get_first_trade_id_from_start_time(self, symbol, start_time):\n end_time = start_time + timedelta(seconds = 60)\n r = requests.get('https://api.binance.com/api/v3/aggTrades',\n params={\n 'symbol' : symbol,\n \"startTime\" : self.get_unix_ms(start_time),\n \"endTime\" : self.get_unix_ms(end_time)\n })\n response = r.json()\n if len(response) > 0:\n return response[0]['a']\n else:\n raise Exception('No trades found')\n\n @staticmethod\n def get_unix_ms(time):\n return int(time.timestamp()*1000)\n\n def process_data(self, column_name: List[str], data)-> pd.DataFrame :\n df = pd.DataFrame(data)\n df = df[['a','p','q']]\n df.a = df.a.astype(float)\n df.p = df.p.astype(float)\n df.q = df.q.astype(float)\n df.columns = column_name\n return df\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"webclinic017/Super-mini-hedge-fund","sub_path":"restdao.py","file_name":"restdao.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41093214728","text":"from MRS.settings import SITE_EMAIL\nfrom celery import shared_task\nfrom time import sleep\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.conf import settings\nfrom MRS.celery import app\n\n@app.task(name=\"send_staff_email_notification_task\")\ndef send_staff_email_notification_task(email, name):\n try:\n from_email = settings.SITE_EMAIL\n subject = \"Account Created\"\n message = f\"Hello {name}, your account has been created successfully , Your Employee Number is emp/ld/2022\"\n mail = EmailMessage(\n subject,\n message,\n from_email,\n [email]\n )\n mail.content_subtype = \"html\"\n mail.send(fail_silently=False)\n except Exception as e:\n raise e\n\n@app.task(name=\"send_student_email_notification_task\")\ndef send_student_email_notification_task(email, name):\n try:\n from_email = settings.SITE_EMAIL\n subject = \"Account Created\"\n message = f\"Hello {name}, your account has been created successfully , Your Login password is @Lodamscollegestudent2022\"\n mail = EmailMessage(\n subject,\n message,\n from_email,\n [email]\n )\n mail.content_subtype = \"html\"\n mail.send(fail_silently=False)\n except Exception as e:\n raise e ","repo_name":"mutungapeter/Lodams","sub_path":"main/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22472432254","text":"#!/usr/bin/python3\n\nimport sys\nimport numpy as np\nimport wave\nfrom scipy.signal import decimate, correlate\nfrom scipy.stats import mode\nimport os\n\ndef read_wav_file(filepath):\n wave_read = wave.open(filepath, mode = 'rb')\n length = wave_read.getnframes()\n sample_rate = wave_read.getframerate()\n \n if wave_read.getsampwidth() == 1:\n type = np.uint8\n elif wave_read.getsampwidth() == 2:\n type = np.int16\n elif wave_read.getsampwidth() == 4:\n type = np.int32\n else:\n type = np.int16\n \n data = np.frombuffer(wave_read.readframes(length), count = wave_read.getnchannels() * length, dtype = type)\n if wave_read.getnchannels() >= 2:\n data = np.reshape(data, (length, wave_read.getnchannels()))\n wave_read.close()\n return sample_rate, data\n\ndef fft(signal):\n n = len(signal)\n signal_fft = np.fft.fft(signal)\n signal_fft = np.abs(signal_fft)\n \n signal_fft = signal_fft * 2 / n\n signal_fft[0] = signal_fft[0] / 2\n return signal_fft\n\n#harmonic product spectrum\ndef hps(signal, sample_rate):\n n = len(signal)\n signal = fft(signal)\n \n n_harmonics = 4\n signal = signal[:len(signal)//2]\n result_signal = signal\n for i in range(2, n_harmonics + 1):\n downsampled = decimate(signal, i)\n result_signal = result_signal[:downsampled.shape[0]] * downsampled\n \n i_begin = int(60 * n / sample_rate)\n \n return (np.argmax(result_signal[i_begin:]) + i_begin) * sample_rate / n\n\n#cross correlation\ndef cross_correlation(signal, sample_rate):\n window_len = len(signal)\n window = signal\n window = window - np.mean(window)\n \n result = correlate(window, window, method = 'fft')\n \n high = sample_rate//300\n left = window_len - 1 + high\n right = window_len + sample_rate//60\n result = result[left:right]\n return sample_rate / (np.argmax(result) + high)\n\ndef classify_file(sample_rate, data, method = hps):\n if len(data.shape) == 1:\n result = method(data, sample_rate)\n else:\n result1 = method(data[:, 0], sample_rate)\n result2 = method(data[:, 1], sample_rate)\n result = (result1 + result2) / 2\n \n if result > 175:\n return 'K'\n else:\n return 'M'\n\ndef decision_tree_classification(w, signal):\n if(len(signal.shape)==2 and signal.shape[1]==2):\n signal = [s[0] for s in signal]\n signum=abs(np.fft.fft(signal))[:len(signal)//2]\n\n conn=(w/(len(signum)*2))\n d1=int(60//conn)\n d2=int(290//conn+1)\n\n dp=signum[d1:d2]*conn\n kv=sum(dp)\n asum=0\n\n p3=[0]*3\n for jj in range(len(dp)):\n asum+=dp[jj]\n for ij in range(1,4):\n if (asum>kv*ij*0.25 and p3[ij-1]==0):\n p3[ij-1]=jj*conn\n\n mode=(np.argmax(dp)+d1)*conn\n\n summa=0\n if (p3[0]<70):\n if (mode>207):\n cl='K'\n else:\n cl='M'\n\n elif (mode>=200):\n if (p3[0]<90):\n cl='M'\n else:\n cl='K'\n elif (p3[0]>95):\n cl='K'\n else:\n cl='M'\n return cl\n\ntry:\n filepath = str(sys.argv[1])\n sample_rate, data = read_wav_file(filepath)\n result1 = classify_file(sample_rate, data, method = hps)\n result2 = classify_file(sample_rate, data, method = cross_correlation)\n result3 = decision_tree_classification(sample_rate, data)\n result = mode([result1, result2, result3])[0][0]\nexcept:\n result = 'M'\nprint(result)\n\n\n","repo_name":"JonothorDarry/kacykowanie","sub_path":"signals/inf136770_inf136834_ensemble.py","file_name":"inf136770_inf136834_ensemble.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20856463177","text":"from os import listdir, mkdir\nfrom os.path import isdir, isfile, join\nimport shutil\nimport sys\n\nfrom flask import current_app as app\nimport yaml\n\nfrom ..model import (\n Categories, CategoryQuestion, QuestionsAll,\n Questions, Quizes\n)\nfrom .. import db\nfrom . import Utils\nfrom .question import QuestionFactory\n\n\nclass QuizFactory:\n\n def get_quiz(self, quiz_type):\n class_name = 'Quiz' + quiz_type[:1].upper() + quiz_type[1:]\n cls = getattr(sys.modules[__name__], class_name)\n return cls()\n\n def get_quiz_from_dir(self, from_dir, new_update='new'):\n quiz_def = QuizImport().get_quiz_def(from_dir)\n quiz = self.get_quiz(quiz_def['type']['name'])\n quiz.import_from_dir(from_dir, new_update, quiz_def)\n return quiz\n\n def get_actual_quiz(self, full=False):\n res = db.session.query(Quizes.quiz_type).order_by(Quizes.quiz_id.desc()).first()\n if res:\n quiz = self.get_quiz(res.quiz_type)\n quiz.load(full=full)\n else:\n quiz = QuizBase()\n return quiz\n\n\nclass QuizImport:\n \"\"\"\n Class used by importing quiz from github repo directory\n \"\"\"\n import_dir = ''\n questions_dir = 'questions'\n pictures_dir = 'pictures'\n app_files_dir = ''\n\n def __init__(self):\n with app.app_context():\n self.import_dir = app.config['IMPORT_DIR']\n self.app_files_dir = join(app.root_path, app.config['FILES_DIR'])\n\n def get_dirs(self):\n \"\"\"\n Get all github directories from import directory\n\n Returns:\n list: directory names\n \"\"\"\n result = []\n for f in listdir(self.import_dir):\n if isdir(join(self.import_dir, f)) and isdir(join(self.import_dir, f, '.git')):\n result.append(f)\n result.sort()\n return result\n\n def get_quiz_def(self, from_dir):\n \"\"\"\n Get definition in quiz.yaml from github repo directory\n\n Args:\n from_dir (str): github repo directory\n Returns:\n dict: quiz definition\n \"\"\"\n from_dir = join(self.import_dir, from_dir)\n with open(join(from_dir, 'quiz.yaml'), 'r') as stream:\n return yaml.safe_load(stream)\n return {}\n\n def get_question_def(self, from_dir, file_name):\n \"\"\"\n Get definition in question yaml from github repo directory\n\n Args:\n from_dir (str): github repo directory\n file_name (str): name of the question file\n Returns:\n dict: question definition\n \"\"\"\n from_dir = join(self.import_dir, from_dir, self.questions_dir)\n try:\n with open(join(from_dir, file_name), 'r') as stream:\n return yaml.safe_load(stream)\n except Exception as ex:\n app.logger.error(str(ex))\n return {}\n\n def import_file(self, from_dir, file_name, subdir):\n path_from = join(self.import_dir, from_dir, self.pictures_dir, file_name)\n if isfile(path_from):\n path_to = join(self.app_files_dir, subdir)\n if not isdir(path_to):\n mkdir(path_to)\n shutil.copyfile(path_from, join(path_to, file_name))\n\n\nclass QuizBase:\n _model = Quizes\n quiz_id = None\n quiz_type = None\n categories = []\n categories_count = 0\n questions = []\n questions_count = 0\n from_dir = None\n author = None\n status = None\n individual = 0\n\n def get_db_obj(self):\n res = self._model()\n Utils.set_vars(res, self)\n return res\n\n\nclass QuizDefault(QuizBase):\n title = None\n time_limit = None\n random_order = 0\n\n def __init__(self):\n self.quiz_type = 'default'\n self.categories = []\n self.categories_count = 0\n self.questions = []\n self.questions_count = 0\n\n def load(self, full=False):\n \"\"\"\n Load quiz from database\n\n Args:\n full (boolean): True - load also categories and questions, False - only main attributes\n \"\"\"\n if (self.quiz_id):\n quiz = db.session.query(self._model).filter_by(quiz_id=self.quiz_id).first()\n else:\n quiz = db.session.query(self._model).order_by(self._model.quiz_id.desc()).first()\n self.__dict__.update(quiz.__dict__)\n if full:\n self.load_questions()\n self.questions_count = len(self.questions)\n self.categories_count = self.categories.count()\n else:\n self.categories_count = db.session.query(Categories).filter_by(quiz_id=self.quiz_id).count()\n self.questions_count = db.session.query(QuestionsAll).filter_by(quiz_id=self.quiz_id).count()\n\n def load_questions(self):\n \"\"\"\n Load quiz questions and categories from database\n \"\"\"\n self.questions = []\n self.categories = []\n if self.quiz_id:\n self.categories = db.session.query(Categories).filter_by(quiz_id=self.quiz_id)\n number = 1\n qs = db.session.query(QuestionsAll).filter_by(quiz_id=self.quiz_id).order_by(\n QuestionsAll.category_id, QuestionsAll.question_id\n )\n for question in qs:\n question.number = number\n self.questions.append(question)\n number += 1\n\n def import_from_dir(self, from_dir, new_update='new', quiz_def=None):\n \"\"\"\n Load quiz attributes from github repo directory\n\n Args:\n from_dir (str): github repo directory\n new_update (str): new - load as new quiz, update - load last quiz from database and update attributes\n \"\"\"\n if (from_dir):\n self.from_dir = from_dir\n import_obj = QuizImport()\n if new_update != 'new':\n self.load()\n # update attributes from yaml\n if not quiz_def:\n quiz_def = import_obj.get_quiz_def(from_dir)\n self.title = quiz_def['title']\n self.author = quiz_def['author']\n self.time_limit = quiz_def['time_limit']\n self.random_order = int(quiz_def['type']['options']['random_order'])\n if 'individual' in quiz_def['type']['options']:\n self.individual = int(quiz_def['type']['options']['individual'])\n\n # get categories and questions\n self.categories = []\n self.questions = []\n for category in quiz_def['type']['options']['categories']:\n cc = Categories(name=category['name'])\n if 'picture' in category:\n cc.picture = category['picture']\n cc._questions = category['questions']\n self.categories.append(cc)\n if 'questions' in category and category['questions']:\n for question_file in category['questions']:\n question_def = import_obj.get_question_def(from_dir, question_file)\n if question_def:\n question = QuestionFactory().get_question(question_def['type']['name'])\n question._file = question_file\n question.task = self.parse_text(question_def['task'])\n question.answer = question_def['answer']\n for i in ('title', 'picture', 'answer_picture', 'time_limit'):\n if i in question_def:\n setattr(question, i, question_def[i])\n self.questions.append(question)\n else:\n app.logger.error('{} not loaded'.format(question_file))\n\n return True\n\n def save(self, full=False):\n \"\"\"\n Save quiz in database\n\n Args:\n full (boolean): True - save also categories and questions, False - only main attributes\n \"\"\"\n is_update = self.quiz_id\n if is_update:\n quiz = db.session.query(self._model).filter_by(quiz_id=self.quiz_id).first()\n Utils.set_vars(quiz, self)\n else:\n db_obj = self.get_db_obj()\n db.session.add(db_obj)\n db.session.flush()\n db.session.refresh(db_obj)\n self.quiz_id = db_obj.quiz_id\n db.session.commit()\n\n if full:\n import_obj = QuizImport()\n # questions\n if is_update:\n db.session.query(Questions).filter_by(quiz_id=self.quiz_id).delete()\n for question in self.questions:\n question.quiz_id = self.quiz_id\n question.save()\n if question.picture:\n import_obj.import_file(self.from_dir, question.picture, str(self.quiz_id))\n if question.answer_picture:\n import_obj.import_file(self.from_dir, question.answer_picture, str(self.quiz_id))\n\n # categories\n if is_update:\n db.session.query(Categories).filter_by(quiz_id=self.quiz_id).delete()\n for category in self.categories:\n if is_update:\n db.session.query(CategoryQuestion).filter_by(category_id=category.category_id).delete()\n category.quiz_id = self.quiz_id\n db.session.add(category)\n db.session.flush()\n if category.picture:\n import_obj.import_file(self.from_dir, category.picture, str(self.quiz_id))\n if category._questions:\n for question in self.questions:\n if question._file in category._questions:\n db.session.add(CategoryQuestion(\n question_id=question.question_id,\n category_id=category.category_id\n ))\n\n db.session.commit()\n\n def get_next_question(self, question_id=None):\n if question_id:\n res = db.session.query(QuestionsAll).filter(\n QuestionsAll.quiz_id == self.quiz_id, QuestionsAll.question_id > question_id\n ).order_by(QuestionsAll.question_id.asc()).first()\n else:\n qq = db.session.query(QuestionsAll).filter_by(quiz_id=self.quiz_id).order_by(QuestionsAll.question_id.asc())\n res = qq.first()\n return res\n\n def get_next_category(self, question_id=None):\n question = self.get_next_question(question_id)\n if question.category_id:\n return db.session.query(Categories).filter_by(category_id=question.category_id).first()\n return None\n\n def parse_text(self, text):\n return text.replace('\\n', '
')\n","repo_name":"leistnerova/virt-pubquiz","sub_path":"virt_pubquiz/utils/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32192784921","text":"import eseries as es\nfrom engineering_notation import EngNumber\n\n\"\"\"\nThis module helps to select resistor values for voltage dividers\n\nIn the simplest case, the devider is made up of two resistors. R1 and R2, where\none R1 terminal is connected to Vin and the node between R1 and R2 is Vout.\n\n\"\"\"\ndef resdiv_r1(Vin, Vout, R2):\n \"\"\"\n Calculate the exact value of R1 with R2 given.\n \"\"\"\n return R2 * (Vin/Vout - 1)\n\ndef resdiv_r2(Vin, Vout, R1):\n \"\"\"\n Calculate the exact value of R1 with R2 given.\n \"\"\"\n return R1 / (Vin/Vout - 1)\n\ndef resdev_vout(Vin, R1, R2):\n \"\"\"\n Calculate Vout with given R1, R2 and Vin\n \"\"\"\n return Vin * R2 / (R1 + R2)\n\ndef r1_p_r2(R1, R2):\n \"\"\"\n Calculate the Resistance of a parallel connection\n \"\"\"\n return R1 * R2 / (R1 + R2)\n\ndef resdev_rout(R1, R2):\n \"\"\"\n Calculate the output resistance of the voltage devider\n \"\"\"\n return r1_p_r2(R1, R2)\n\ndef calc_rp(R, Rp):\n \"\"\"\n Calculate a parallel resistor to match R (R = Rp//Rx)\n \"\"\"\n if R >= Rp : return None\n return R * Rp / (Rp - R)\n\ndef resdev_r1_r2(Vin, Vout, Rout_min = 1, Rout_max = 10, eser = es.E24):\n \"\"\"\n Returns the best matchin resistor pair of (R1, R2) with a given output\n resistance range and E-Series\n \"\"\"\n best_match_r2 = None\n best_match_r1 = None\n vdiv_err_best = Vin\n\n for r2 in es.erange(eser, Rout_min, Rout_max):\n r1 = es.find_nearest(eser, resdiv_r1(Vin, Vout, r2))\n rout = resdev_rout(r1, r2)\n if rout < Rout_min : continue\n if rout > Rout_max : break\n vdiv_err = abs(resdev_vout(Vin, r1, r2) - Vout)\n if vdiv_err < vdiv_err_best:\n vdiv_err_best = vdiv_err\n best_match_r1 = r1\n best_match_r2 = r2\n if vdiv_err_best == 0.0 : break\n\n return (best_match_r1, best_match_r2)\n\ndef resdev_r1_2r2(Vin, Vout, Rout_min = 1, Rout_max = 10, eser = es.E24):\n \"\"\"\n Returns the best matching resistor combination of (R1, R2_1, R2_2)\n with a given output resistance range and E-Series.\n R2_1 and R2_2 are connected in parallel.\n \"\"\"\n best_match_r2_1 = None\n best_match_r2_2 = None\n best_match_r1 = None\n vdiv_err_best = Vin\n\n for r2_1 in es.erange(eser, Rout_min, Rout_max * 2):\n if r2_1 == Rout_min or r2_1 == Rout_max : continue\n for r2_2 in es.erange(eser, calc_rp(Rout_min, r2_1), r2_1 * 1000):\n r2 = r2_1 * r2_2 / (r2_1 + r2_2)\n r1 = es.find_nearest(eser, resdiv_r1(Vin, Vout, r2))\n rout = resdev_rout(r1, r2)\n if rout < Rout_min : continue\n if rout > Rout_max : break\n vdiv_err = abs(resdev_vout(Vin, r1, r2) - Vout)\n if vdiv_err < vdiv_err_best:\n vdiv_err_best = vdiv_err\n best_match_r1 = r1\n best_match_r2_1 = r2_1\n best_match_r2_2 = r2_2\n\n return (best_match_r1, best_match_r2_1, best_match_r2_2)\n\ndef calc_resdev(Vin, Vout, Rout_min = 1, Rout_max = 10, Err_max = 1, eser = es.E24):\n R1, R2 = resdev_r1_r2(Vin, Vout, Rout_min, Rout_max, eser)\n err = resdev_vout(Vin, R1, R2) - Vout\n err_percent = abs(err)/Vout*100\n if err_percent <= Err_max:\n print(\"Resistor divider with two Resistors:\")\n print(\"R1: {}, R2: {}\".format(str(EngNumber(R1)), str(EngNumber(R2))))\n print(\"Vout error: {:.3f}% ({}V)\".format(err_percent, str(EngNumber(err))))\n print(\"Output resistance: {}\\n\".format(str(EngNumber(resdev_rout(R1, R2)))))\n R1, R2_1, R2_2 = resdev_r1_2r2(Vin, Vout, Rout_min, Rout_max, eser)\n err = resdev_vout(Vin, R1, r1_p_r2(R2_1, R2_2)) - Vout\n err_percent = abs(err)/Vout*100\n if err_percent <= Err_max:\n print(\"Resistor divider with three Resistors:\")\n print(\"R1: {}, R2: {}//{}\".format(str(EngNumber(R1)), str(EngNumber(R2_1)), str(EngNumber(R2_2))))\n print(\"Vout error: {:.3f}% ({}V)\".format(err_percent, str(EngNumber(err))))\n print(\"Output resistance: {}\".format(str(EngNumber(resdev_rout(R1, r1_p_r2(R2_1, R2_2))))))\n else:\n print(\"Cannot find a solution for given parameters\")","repo_name":"alexanderwachter/eecalc","sub_path":"resdev.py","file_name":"resdev.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"74416324487","text":"import unittest\nfrom functools import partial\n\nimport numpy as np\nfrom ddt import data, ddt, unpack\nfrom qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver\nfrom qiskit_nature.second_q.algorithms import GroundStateEigensolver\nfrom qiskit_nature.second_q.drivers import MethodType, PySCFDriver\nfrom qiskit_nature.second_q.mappers import ParityMapper\nfrom qiskit_nature.second_q.transformers import ActiveSpaceTransformer\nfrom qiskit_nature.settings import settings\n\nfrom dft_embedding_solver import DFTEmbeddingSolver\n\nsettings.tensor_unwrapping = False\nsettings.use_pauli_sum_op = False\nsettings.use_symmetry_reduced_integrals = True\n\n\ndef filter_criterion(\n eigenstate,\n eigenvalue,\n aux_values,\n expected_num_particles,\n expected_angular_momentum,\n):\n eval_num_particles = aux_values.get(\"ParticleNumber\", None)\n if eval_num_particles is None:\n return True\n num_particles_close = np.isclose(eval_num_particles[0], expected_num_particles)\n\n eval_angular_momentum = aux_values.get(\"AngularMomentum\", None)\n if eval_angular_momentum is None:\n return num_particles_close\n angular_momentum_close = np.isclose(\n eval_angular_momentum[0], expected_angular_momentum\n )\n\n return num_particles_close and angular_momentum_close\n\n\n@ddt\nclass TestDFTEmbeddingSolver(unittest.TestCase):\n \"\"\"Tests for the `DFTEmbeddingSolver`.\"\"\"\n\n def test_demo_example(self):\n \"\"\"Test the example provided in the `demo.py` file of this repo.\"\"\"\n omega = 1.0\n\n driver = PySCFDriver(\n atom=\"O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459\",\n basis=\"6-31g*\",\n method=MethodType.RKS,\n xc_functional=f\"ldaerf + lr_hf({omega})\",\n xcf_library=\"xcfun\",\n )\n\n active_space = ActiveSpaceTransformer(4, 4)\n\n mapper = ParityMapper(num_particles=(2, 2))\n solver = NumPyMinimumEigensolver()\n solver.filter_criterion = lambda state, val, aux: np.isclose(\n aux[\"ParticleNumber\"][0], 4.0\n )\n algo = GroundStateEigensolver(mapper, solver)\n\n dft_solver = DFTEmbeddingSolver(active_space, algo)\n\n result = dft_solver.solve(driver, omega)\n\n self.assertAlmostEqual(result.total_energies[0], -75.93044878549, places=6)\n\n def test_hf_limit(self):\n \"\"\"Test that the embedding converges to the HF value for large omega.\"\"\"\n omega = 10000.0\n\n driver = PySCFDriver(\n atom=\"H 0.0 0.0 0.0; H 0.0 0.0 0.735\",\n basis=\"sto-3g\",\n method=MethodType.RKS,\n xc_functional=f\"ldaerf + lr_hf({omega})\",\n xcf_library=\"xcfun\",\n )\n\n active_space = ActiveSpaceTransformer(2, 2)\n\n mapper = ParityMapper(num_particles=(1, 1))\n solver = NumPyMinimumEigensolver()\n solver.filter_criterion = partial(\n filter_criterion,\n expected_num_particles=2,\n expected_angular_momentum=0,\n )\n algo = GroundStateEigensolver(mapper, solver)\n\n dft_solver = DFTEmbeddingSolver(active_space, algo)\n\n result = dft_solver.solve(driver, omega)\n\n ref_problem = ActiveSpaceTransformer(2, 2).transform(driver.run())\n ref_result = algo.solve(ref_problem)\n\n self.assertAlmostEqual(\n result.total_energies[0], ref_result.total_energies[0], places=5\n )\n\n def test_dft_limit(self):\n \"\"\"Test that the embedding converges to the DFT value for small omega.\"\"\"\n omega = 0.01\n\n driver = PySCFDriver(\n atom=\"H 0.0 0.0 0.0; H 0.0 0.0 0.735\",\n basis=\"sto-3g\",\n method=MethodType.RKS,\n xc_functional=f\"ldaerf + lr_hf({omega})\",\n xcf_library=\"xcfun\",\n )\n\n active_space = ActiveSpaceTransformer(2, 2)\n\n mapper = ParityMapper(num_particles=(1, 1))\n solver = NumPyMinimumEigensolver()\n solver.filter_criterion = partial(\n filter_criterion,\n expected_num_particles=2,\n expected_angular_momentum=0,\n )\n algo = GroundStateEigensolver(mapper, solver)\n\n dft_solver = DFTEmbeddingSolver(active_space, algo)\n\n result = dft_solver.solve(driver, omega)\n\n self.assertAlmostEqual(\n result.total_energies[0], result.hartree_fock_energy, places=5\n )\n\n @unpack\n @data(\n (\n \"O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 0.1,\n (2, 2),\n -74.73098435889123,\n ),\n (\n \"O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 1.0,\n (2, 2),\n -74.8430435685246,\n ),\n (\n \"O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 10.0,\n (2, 2),\n -74.99908894924124,\n ),\n (\n \"N 0.0 0.0 0.539; N 0.0 0.0 -0.539\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 0.1,\n (2, 2),\n -107.13720604014377,\n ),\n (\n \"N 0.0 0.0 0.539; N 0.0 0.0 -0.539\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 1.0,\n (2, 2),\n -107.20511090905522,\n ),\n (\n \"N 0.0 0.0 0.539; N 0.0 0.0 -0.539\",\n \"sto-3g\",\n 0,\n MethodType.RKS,\n 10.0,\n (2, 2),\n -107.54791372130454,\n ),\n (\n \"O 0.0 0.0 0.609; O 0.0 0.0 -0.609\",\n \"sto-3g\",\n 2,\n MethodType.UKS,\n 0.1,\n (2, 3),\n -147.1976331919598,\n ),\n (\n \"O 0.0 0.0 0.609; O 0.0 0.0 -0.609\",\n \"sto-3g\",\n 2,\n MethodType.UKS,\n 1.0,\n (2, 3),\n -147.3192014235415,\n ),\n (\n \"O 0.0 0.0 0.609; O 0.0 0.0 -0.609\",\n \"sto-3g\",\n 2,\n MethodType.UKS,\n 10.0,\n (2, 3),\n -147.7044685785955,\n ),\n )\n def test_references(\n self, atom, basis, spin, method, omega, active_space, expected_value\n ):\n \"\"\"Some additional molecule tests.\n\n Args:\n atom: the atomic coordinates.\n basis: the basis set.\n spin: the spin of the system (in PySCF convention this is `2 * S`)\n method: the `MethodType` (should be either `RKS` or `UKS`).\n omega: the range-separation parameter.\n active_space: the active-space specification.\n expected_value: the expected total energy value.\n \"\"\"\n driver = PySCFDriver(\n atom=atom,\n basis=basis,\n spin=spin,\n method=method,\n xc_functional=f\"ldaerf + lr_hf({omega})\",\n xcf_library=\"xcfun\",\n )\n\n trafo = ActiveSpaceTransformer(*active_space)\n\n expected_num_particles = active_space[0]\n expected_angular_momentum = ((spin + 1) ** 2 - 1.0) / 4.0\n\n num_elec_total = active_space[0]\n num_beta = num_elec_total // 2 - spin\n if num_beta < 0:\n num_beta = 0\n num_alpha = num_elec_total - num_beta\n\n mapper = ParityMapper(num_particles=(num_alpha, num_beta))\n solver = NumPyMinimumEigensolver()\n solver.filter_criterion = partial(\n filter_criterion,\n expected_num_particles=expected_num_particles,\n expected_angular_momentum=expected_angular_momentum,\n )\n algo = GroundStateEigensolver(mapper, solver)\n\n dft_solver = DFTEmbeddingSolver(trafo, algo)\n\n result = dft_solver.solve(driver, omega)\n\n self.assertAlmostEqual(result.total_energies[0], expected_value, places=5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mrossinek/qiskit-nature-pyscf-dft-embedding","sub_path":"tests/test_dft_embedding_solver.py","file_name":"test_dft_embedding_solver.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"36707110546","text":"from typing import List\n\n\ndef rob(nums: List[int]) -> int:\n \"\"\"\n 计算小偷偷的最大金额\n 思路分析:单排排列。状态转移很简单,寻找前一个和后一个最大\n dp[i] = max(dp[i-1] + nums[i], dp[i])\n :param nums:\n :return:\n 算法分析:时间复杂度O(n),空间复杂度O(1)\n \"\"\"\n if not nums or len(nums) <= 0:\n return 0\n if len(nums) == 0:\n return nums[0]\n n = len(nums)\n\n # 定义状态\n dp_pre = nums[0]\n dp_cur = max(dp_pre, nums[1])\n\n for i in range(2, n):\n dp_pre, dp_cur = dp_cur, max(dp_pre+nums[i], dp_cur)\n\n return dp_cur\n\n\nif __name__ == '__main__':\n print(rob(nums=[1, 2, 3, 1]))\n","repo_name":"Cecilia520/algorithmic-learning-leetcode","sub_path":"cecilia-python/dynamic-programming/RobMaxMoney-I.py","file_name":"RobMaxMoney-I.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"19989325541","text":"from __future__ import annotations\n\nimport dask\nimport dask.array as da\nimport dask.dataframe as dd\nimport distributed\nimport numpy as np\nimport pandas as pd\nfrom dask.datasets import timeseries\nfrom dask.sizeof import sizeof\nfrom dask.utils import format_bytes, parse_bytes\n\n\ndef scaled_array_shape(\n target_nbytes: int | str,\n shape: tuple[int | str, ...],\n *,\n dtype: np.dtype | type = np.dtype(float),\n max_error: float = 0.1,\n) -> tuple[int, ...]:\n \"\"\"\n Given a shape with free variables in it, generate the shape that results in the target array size.\n\n Example\n -------\n >>> scaled_array_shape(1024, (2, \"x\"), dtype=bool)\n (2, 512)\n >>> scaled_array_shape(2048, (2, \"x\"), dtype=bool)\n (2, 1024)\n >>> scaled_array_shape(16, (\"x\", \"x\"), dtype=bool)\n (4, 4)\n >>> scaled_array_shape(256, (\"4x\", \"x\"), dtype=bool)\n (32, 8)\n >>> scaled_array_shape(\"10kb\", (\"x\", \"1kb\"), dtype=bool)\n (10, 1000)\n \"\"\"\n if isinstance(target_nbytes, str):\n target_nbytes = parse_bytes(target_nbytes)\n\n dtype = np.dtype(dtype)\n # Given a shape like:\n # (10, \"2x\", 3, \"x\", 50)\n # We're solving for x in:\n # `10 * 2x * 3 * x * 50 * dtype.itemsize == target_nbytes`\n # aka:\n # `3000x^2 * dtype.itemsize == target_nbytes`\n resolved_shape: list[int | None] = []\n x_locs_coeffs: list[tuple[int, float]] = []\n total_coeff = 1\n for i, s in enumerate(shape):\n if isinstance(s, str):\n if s[-1] == \"x\":\n coeff = 1 if len(s) == 1 else float(s[:-1])\n assert coeff > 0, coeff\n x_locs_coeffs.append((i, coeff))\n total_coeff *= coeff\n resolved_shape.append(None)\n continue\n else:\n s = parse_bytes(s) // dtype.itemsize\n\n assert s > 0, s\n total_coeff *= s\n resolved_shape.append(s)\n\n assert x_locs_coeffs, f\"Expected at least 1 `x` value in shape {shape}\"\n total_coeff *= dtype.itemsize\n x = (target_nbytes / total_coeff) ** (1 / len(x_locs_coeffs))\n\n # Replace `x` values back into shape\n for i, coeff in x_locs_coeffs:\n assert resolved_shape[i] is None\n resolved_shape[i] = round(coeff * x)\n\n final = tuple(s for s in resolved_shape if s is not None)\n assert len(final) == len(resolved_shape), resolved_shape\n\n actual_nbytes = np.prod(final) * dtype.itemsize\n error = (actual_nbytes - target_nbytes) / actual_nbytes\n assert abs(error) < max_error, (error, actual_nbytes, target_nbytes, final)\n return final\n\n\ndef wait(thing, client, timeout):\n \"Like `distributed.wait(thing.persist())`, but if any tasks fail, raises its error.\"\n p = thing.persist()\n try:\n distributed.wait(p, timeout=timeout)\n for f in client.futures_of(p):\n if f.status in (\"error\", \"cancelled\"):\n raise f.exception()\n finally:\n client.cancel(p)\n\n\ndef cluster_memory(client: distributed.Client) -> int:\n \"Total memory available on the cluster, in bytes\"\n return int(\n sum(w[\"memory_limit\"] for w in client.scheduler_info()[\"workers\"].values())\n )\n\n\ndef timeseries_of_size(\n target_nbytes: int | str,\n *,\n start=\"2000-01-01\",\n freq=\"1s\",\n partition_freq=\"1d\",\n dtypes={\"name\": str, \"id\": int, \"x\": float, \"y\": float},\n seed=None,\n **kwargs,\n) -> dd.DataFrame:\n \"\"\"\n Generate a `dask.demo.timeseries` of a target total size.\n\n Same arguments as `dask.demo.timeseries`, but instead of specifying an ``end`` date,\n you specify ``target_nbytes``. The number of partitions is set as necessary to reach\n approximately that total dataset size. Note that you control the partition size via\n ``freq``, ``partition_freq``, and ``dtypes``.\n\n Examples\n --------\n >>> timeseries_of_size(\n ... \"1mb\", freq=\"1s\", partition_freq=\"100s\", dtypes={\"x\": float}\n ... ).npartitions\n 278\n >>> timeseries_of_size(\n ... \"1mb\", freq=\"1s\", partition_freq=\"100s\", dtypes={i: float for i in range(10)}\n ... ).npartitions\n 93\n\n Notes\n -----\n The ``target_nbytes`` refers to the amount of RAM the dask DataFrame would use up\n across all workers, as many pandas partitions.\n\n This is typically larger than ``df.compute()`` would be as a single pandas\n DataFrame. Especially with many partions, there can be significant overhead to\n storing all the individual pandas objects.\n\n Additionally, ``target_nbytes`` certainly does not correspond to the size\n the dataset would take up on disk (as parquet, csv, etc.).\n \"\"\"\n if isinstance(target_nbytes, str):\n target_nbytes = parse_bytes(target_nbytes)\n\n start_dt = pd.to_datetime(start)\n partition_freq_dt = pd.to_timedelta(partition_freq)\n example_part = timeseries(\n start=start,\n end=start_dt + partition_freq_dt,\n freq=freq,\n partition_freq=partition_freq,\n dtypes=dtypes,\n seed=seed,\n **kwargs,\n )\n p = example_part.compute(scheduler=\"threads\")\n partition_size = sizeof(p)\n npartitions = round(target_nbytes / partition_size)\n assert npartitions > 0, (\n f\"Partition size of {format_bytes(partition_size)} > \"\n f\"target size {format_bytes(target_nbytes)}\"\n )\n\n ts = timeseries(\n start=start,\n end=start_dt + partition_freq_dt * npartitions,\n freq=freq,\n partition_freq=partition_freq,\n dtypes=dtypes,\n seed=seed,\n **kwargs,\n )\n assert ts.npartitions == npartitions\n return ts\n\n\ndef arr_to_devnull(arr: da.Array) -> dask.delayed:\n \"Simulate storing an array to zarr, without writing anything (just drops every block once it's computed)\"\n\n # NOTE: this class must be defined inside the function so it's cloudpickled as code,\n # otherwise `tests/utils_test` would have to be installed on the cluster.\n class _DevNull:\n def __setitem__(self, k, v):\n pass\n\n # TODO `da.store` should use blockwise to be much more efficient https://github.com/dask/dask/issues/9381\n return da.store(arr, _DevNull(), lock=False, compute=False)\n","repo_name":"phobson/coiled-runtime","sub_path":"tests/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"28437265160","text":"\nfrom datetime import datetime\nfrom logging import getLogger\nfrom numpy import size\nimport rich\nfrom rich.console import Console, ConsoleOptions, RenderableType\nfrom rich.panel import Panel\nfrom rich.repr import rich_repr, Result\nfrom rich.style import StyleType\nfrom rich.table import Table\nfrom rich.text import TextType , Text\nfrom textual import events\nfrom textual.widget import Widget\nfrom textual.reactive import watch, Reactive\n\n#------------------------------------------------------------------#\nfrom Messages import *\n\n\nclass NavBar(Widget):\n\n style: Reactive[StyleType] = Reactive(\"white on blue\")\n title: Reactive[str] = Reactive(\"\")\n mouse_down: Reactive[RenderableType] = Reactive(False)\n mouse_over: Reactive[RenderableType] = Reactive(False)\n mouse_x: Reactive[int] = Reactive(0)\n # selected_tab : Reactive[list] = Reactive([])\n\n #---------------------------------------------------------------------#\n #----------------------- Start Init ---------------------------------#\n #---------------------------------------------------------------------#\n def __init__(self,*,style: StyleType = \"white on dark_green\",selected_tab=None,title = None,tabs=[],name) -> None:\n super().__init__()\n self.style= style\n self.title= title \n self.tabs= tabs #------ all tabs name\n self.init_flag = 0 #------ for first init only\n self.selected_tab=selected_tab #------ the active tab\n self.mouse_x = 0\n self.name= name\n\n #----------------------------------------------------------------------#\n #-------------------- Handel mouse position and click -----------------#\n #----------------------------------------------------------------------#\n\n def on_mouse_down(self) -> None:\n self.mouse_down = True\n\n def on_mouse_up(self) -> None:\n self.mouse_down = False\n\n def on_enter(self) -> None:\n self.mouse_over = True\n\n def on_leave(self) -> None:\n self.mouse_over = False\n\n def _mouse_axis(self, event) -> int:\n x = event.x\n return x\n\n # async def active_tab(self)-> None:\n # return str('gh')\n\n async def on_mouse_move(self, event: events.MouseMove) -> None:\n self.mouse_x = self._mouse_axis(event)\n\n #----------------------------------------------------------------------#\n #-------------------- Rendering and events ----------------------------#\n #----------------------------------------------------------------------#\n\n def render(self) -> RenderableType:\n #---------------------------------------------------------------------#\n #----------------------- First Time to load currentValues ------------#\n #----------------------- If no selected_tab > go with first one ------#\n #---------------------------------------------------------------------#\n if self.init_flag == 0:\n if self.selected_tab in self.tabs:\n pass\n else :\n self.selected_tab = self.tabs[0]\n self.init_flag =1\n #-------------------- Creat the Table and give init values\n header_table = Table(title=self.selected_tab, box=None,min_width=self.size.width,show_edge=False,)\n #-------------------- Add All tabs name \n #-------------------- (totalScreenSize / 2)- (chars)/2\n total_chars = 0 \n for tab_name in self.tabs: \n total_chars += len(tab_name) #---------- Len of each tab\n total_chars+= (len(self.tabs) -1 ) * 4 #---------- how many space\n first_tabX = int(self.size.width / 2) - int(total_chars/2)\n tab_coord= {}\n for i in range(len(self.tabs)): ## self.tabs = ['tab1','tab2']\n tab_coord[self.tabs[i]] = {\n 'x_start':first_tabX + (4)*i + len(self.tabs[i])*i,\n 'x_end':first_tabX + (4)*i + len(self.tabs[i])*i + len(self.tabs[i]) }\n #---------------------------------------------------------------------# \n #--------------------- perform mouse click calculation ---------------# \n #---------------------------------------------------------------------#\n if self.mouse_down == True: \n for i in range(len(tab_coord)) : ## tab_coord = {'tab1':{'x_start':54 ,'x_end':58},'tab2':{'x_start':62 ,'x_end':66}}\n if self.mouse_x >= tab_coord[self.tabs[i]]['x_start'] and self.mouse_x < tab_coord[self.tabs[i]]['x_end']:\n self.selected_tab = str(self.tabs[i])\n # self.emit(Message(self.selected_tab))\n else :\n pass\n\n #-------------------- selected one will be blue\n new = rich.text.Text()\n for i in self.tabs :\n if self.selected_tab == i :\n new += (Text(text=(i + '\\t'),style=\"bold blue\")) \n else:\n new += (Text(text=(i + '\\t'),style=\"bold red\")) \n new.justify = 'center'\n\n header_table.add_row(new)\n\n #-------------------- RenderableType and but in panel to return it\n header: RenderableType\n\n header = Panel(\n renderable=header_table,\n style=self.style,\n title=self.title, \n ) \n\n return header\n\n # return Panel(\n # panel_group, \n # title=\"\", \n # title_align=\"center\", \n # height=4+len(self.currentValues),\n # style=self.style or \"\",\n # border_style=\"green\" if self.mouse_over else \"blue\", #-------------------- change color when mouse over it\n # box=rich.box.HEAVY if self.has_focus else rich.box.ROUNDED, #-------------------- change box style when on_focuse\n # )\n\n\n\n\n\n async def on_mount(self, event: events.Mount) -> None:\n self.set_interval(1.0, callback=self.refresh)\n\n async def on_click(self, event: events.Click) -> None:\n # self.tall = not self.tall\n pass\n\n\n\n","repo_name":"AbdelwahabAdam/admin_tui","sub_path":"Textual/Final_outbut/Navigation_tabs/Nav_bar.py","file_name":"Nav_bar.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73301403848","text":"\"\"\"Module with api resources.\"\"\"\n\nimport logging\nfrom collections import OrderedDict\n\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom looseserver.common.api import DEFAULT_VERSION, ResponseStatus, APIError\nfrom looseserver.common.rule import RuleError, RuleParseError\nfrom looseserver.common.response import ResponseError, ResponseParseError\n\n\nclass RulesManager(Resource):\n \"\"\"API resource to manage rules.\"\"\"\n def __init__(self, manager, rule_factory):\n self._manager = manager\n self._rule_factory = rule_factory\n\n def post(self):\n \"\"\"Create a new rule.\"\"\"\n logger = logging.getLogger(__name__)\n request_data = request.get_json(silent=True)\n if request_data is None:\n message = \"Failed to parse JSON data from the request\"\n logger.error(message)\n return build_response(error=APIError(message)), 400\n\n logger.debug(\"Try to create new rule\")\n try:\n rule = self._rule_factory.parse_rule(data=request_data)\n except RuleParseError as error:\n message = \"Failed to create a rule for specified parameters. Error: '{0}'\".format(error)\n logger.exception(message)\n return build_response(error=APIError(message)), 400\n except RuleError as error:\n message = \"Exception has been raised during rule creation\"\n logger.exception(message)\n return build_response(error=APIError(message)), 500\n\n rule_id = self._manager.add_rule(rule)\n logger.debug(\"Rule has been successfully created\")\n\n try:\n rule_data = self._rule_factory.serialize_rule(rule)\n except RuleError:\n self._manager.remove_rule(rule_id=rule_id)\n logger.debug(\"Rule has been removed because of a serialization error\")\n message = \"Rule may be created, but can't be serialized\"\n return build_response(error=APIError(message)), 500\n\n logger.info(\"Successfully handled request to create a rule\")\n\n response_data = {\"rule_id\": rule_id}\n response_data.update(rule_data)\n return build_response(data=response_data)\n\n\nclass Rule(Resource):\n \"\"\"API resource to manage single rule.\"\"\"\n def __init__(self, manager, rule_factory):\n self._manager = manager\n self._rule_factory = rule_factory\n\n def get(self, rule_id):\n \"\"\"Get rule by its ID.\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug(\"Try to get rule %s\", rule_id)\n try:\n rule = self._manager.get_rule(rule_id=rule_id)\n except KeyError:\n message = \"Failed to find rule with ID '{0}'\".format(rule_id)\n logger.error(message)\n return build_response(error=APIError(message)), 404\n\n try:\n rule_data = self._rule_factory.serialize_rule(rule)\n except RuleError:\n message = \"Exception has been raised during serialization of the rule\"\n logger.exception(message)\n return build_response(error=APIError(message)), 500\n\n logger.info(\"Rule %s has been successfully obtained\", rule_id)\n response_data = {\"rule_id\": rule_id}\n response_data.update(rule_data)\n return build_response(data=response_data)\n\n def delete(self, rule_id):\n \"\"\"Delete rule by its ID.\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug(\"Try to delete rule with ID %s\", rule_id)\n self._manager.remove_rule(rule_id)\n logger.info(\"Rule %s has been removed\", rule_id)\n return build_response()\n\n\nclass Response(Resource):\n \"\"\"API resource to manage rule responses.\"\"\"\n def __init__(self, manager, response_factory):\n self._manager = manager\n self._response_factory = response_factory\n\n def post(self, rule_id):\n \"\"\"Create a new response for the rule.\"\"\"\n logger = logging.getLogger(__name__)\n request_data = request.get_json(silent=True)\n if request_data is None:\n message = \"Failed to parse JSON data from the request\"\n logger.error(message)\n return build_response(error=APIError(message)), 400\n\n logger.debug(\"Try to create new response for the rule with ID %s\", rule_id)\n try:\n response = self._response_factory.parse_response(data=request_data)\n except ResponseParseError as error:\n message = \"Failed to create a response for specified parameters. Error: '{0}'\".format(\n error,\n )\n logger.exception(message)\n return build_response(error=APIError(message)), 400\n except ResponseError as error:\n message = \"Exception has been raised during response creation\"\n logger.exception(message)\n return build_response(error=APIError(message)), 500\n\n try:\n response_data = self._response_factory.serialize_response(response)\n except ResponseError:\n message = \"Response can't be serialized\"\n return build_response(error=APIError(message)), 500\n\n try:\n self._manager.set_response(rule_id=rule_id, response=response)\n except KeyError:\n message = \"Failed to create a response: Rule does not exist\"\n logger.exception(message)\n return build_response(error=APIError(message)), 400\n\n logger.debug(\"Response has been successfully created for the rule with ID %s\", rule_id)\n\n logger.info(\n \"Successfully handled request to set a response for the rule with ID %s\",\n rule_id,\n )\n\n return build_response(data=response_data)\n\n def get(self, rule_id):\n \"\"\"Get response by rule ID.\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug(\"Try to get the response for the rule with ID '%s'\", rule_id)\n try:\n response = self._manager.get_response(rule_id=rule_id)\n except KeyError:\n message = \"Failed to get response for the rule '{0}'\".format(rule_id)\n logger.exception(message)\n return build_response(error=APIError(message)), 404\n\n try:\n response_data = self._response_factory.serialize_response(response)\n except ResponseError:\n message = \"Response can't be serialized\"\n logger.exception(message)\n return build_response(error=APIError(message)), 500\n\n logger.info(\"Response for the rule with ID %s has been successfully obtained\", rule_id)\n return build_response(data=response_data)\n\n\ndef build_response(data=None, error=None, version=DEFAULT_VERSION):\n \"\"\"Build a response.\n\n :param data: JSON serializable object to include into the response.\n :param error: instance of :class:`APIError `.\n :param version: version of API.\n :returns: JSON serializable object with response.\n \"\"\"\n if error:\n status = ResponseStatus.FAILURE\n else:\n status = ResponseStatus.SUCCESS\n\n response_json = OrderedDict((\n (\"version\", version),\n (\"status\", status.name),\n ))\n\n if error:\n response_json[\"error\"] = {\n \"description\": error.description,\n }\n\n if data is not None:\n response_json[\"data\"] = data\n\n return response_json\n","repo_name":"KillAChicken/loose-server","sub_path":"looseserver/server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"14477238374","text":"#------------------------------------------------------#\r\n# magnetic dead layer (MDL) fit plot v0.1\t\t -----#\r\n# author: tatsunootoshigo, 7475un00705hi90@gmail.com #\r\n#------------------------------------------------------#\r\n\r\n#imports\r\nimport numpy as np\r\nfrom scipy import stats\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.widgets import Slider, Button, RadioButtons\r\nimport matplotlib.ticker as ticker\r\nimport matplotlib.gridspec as gridspec\r\nfrom matplotlib.font_manager import FontProperties\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nfrom matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter\r\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes, mark_inset\r\n\r\nimport matplotlib as mpl\r\nmpl.rcParams['mathtext.fontset'] = 'stix'\r\nmpl.rcParams['font.family'] = 'STIXGeneral'\r\nmpl.rcParams[\"font.serif\"] = \"STIX\"\r\nmpl.rcParams[\"mathtext.fontset\"] = \"stix\"\r\n\r\n# script version\r\nversion = '0.1'\r\nversion_name = 'mdl_fit_plot_' + version + '.py'\r\n\r\n# parameter adjustment step\r\nvalstep_R0 = 1e-2\r\n\r\ndataset_name = 'WnFe-SiAlOx'\r\nplot_title = 'MDL_' + dataset_name\r\n\r\nsample_R0 = np.array([877.0, 632.0, 388.0, 174.0, 139.0, 83.0])\r\nsample_rho = np.array([88.0, 105.0, 129.0, 174.0, 231.0, 194.0])\r\nFM_thickness = np.array([3, 5, 10, 30, 50, 70])\r\nMR_ratio = ([0.82, 0.609, 0.371, 0.183, 0.109, 0.118],[0.026, 0.035, 0.053, 0.016, 0.138, 0.038],[0.793, 0.584, 0.377, 0.176, 0.238, 0.142])\r\nDelta_R_R0 = ([0.456, 0.235, 0.092, 0.020, 0.01, 0.006],[0.015, 0.014, 0.013, 0.002, 0.012, 0.002],[0.442, 0.235, 0.084, 0.02, 0.021, 0.008]) \r\nFM_Ms = ([0.7109771, 0.7619016, 0.7858823, 0.703598, 0.718513, 0.6418686])\r\nFM_thickness_range = np.arange(-10.0, 75.0, 1.0)\r\n\r\nFM_thickness_cut = np.array([3, 5, 10])\r\nFM_Ms_cut = ([0.7109771, 0.7619016, 0.7858823])\r\nFM_thickness_range_10 = np.arange(-10.0, 15.0, 1.0)\r\n\r\nxmin = 0\r\nxmax = 80\r\n\r\nymin = 0\r\nymax = 50\r\n\r\nxprec = 0\r\nyprec = 1\r\n\r\ndesc_x = 0.1\r\ndesc_y = 0.1\r\n\r\nout_pdf = 'MDL_fit_plot_' + dataset_name + '.pdf'\r\nout1_pdf = 'MDL_fit_plot_pub_' + dataset_name + '.pdf'\r\nout_svg = 'MDL_fit_plot_' + dataset_name + '.svg'\r\nout1_svg = 'MDL_fit_plot_pub_' + dataset_name + '.svg'\r\n\r\n# plot label defs\r\naxis_label_theta = r'$\\theta\\, / \\, \\circ$'\r\naxis_label_nm = r'$d_F\\;/\\;nm$'\r\naxis_label_ohm = r'$R\\;/\\;\\Omega$'\r\naxis_label_msd = r'$\\mu_0M_s\\cdot d_F\\;/\\;T\\cdot nm$'\r\naxis_label_delta_Ryz = r'$\\Delta R_{SMR}\\;/\\;\\%$'\r\n\r\nlabel_xy = r'$xy$'\r\nlabel_xy_fit = r'$fit$'\r\nlabel_mdl = r'$MDL$'\r\n\r\n# formatting the plot axes\r\ndef custom_axis_formater(custom_title, custom_x_label, custom_y_label, xmin, xmax, ymin, ymax, xprec, yprec):\r\n\t\r\n\t# get axes and tick from plot \r\n\tax = plt.gca()\r\n\r\n\t# set the number of major and minor ticks for x,y axes\r\n\t# prune='lower' --> remove lowest tick label from x axis\r\n\txmajorLocator = MaxNLocator(12, prune='lower') \r\n\txmajorFormatter = FormatStrFormatter('%.'+ np.str(xprec) + 'f')\r\n\txminorLocator = MaxNLocator(24) \r\n\t\r\n\tymajorLocator = MaxNLocator(12) \r\n\tymajorFormatter = FormatStrFormatter('%.'+ np.str(yprec) + 'f')\r\n\tyminorLocator = MaxNLocator(24)\r\n\r\n\tax.xaxis.set_major_locator(xmajorLocator)\r\n\tax.yaxis.set_major_locator(ymajorLocator)\r\n\r\n\tax.xaxis.set_major_formatter(xmajorFormatter)\r\n\tax.yaxis.set_major_formatter(ymajorFormatter)\r\n\r\n\t# for the minor ticks, use no labels; default NullFormatter\r\n\tax.xaxis.set_minor_locator(xminorLocator)\r\n\tax.yaxis.set_minor_locator(yminorLocator)\r\n\t\r\n\t# format major and minor ticks width, length, direction \r\n\tax.tick_params(which='both', width=1, direction='in', labelsize=20)\r\n\tax.tick_params(which='major', length=6)\r\n\tax.tick_params(which='minor', length=4)\r\n\r\n\t# set axes thickness\r\n\tax.spines['top'].set_linewidth(1.5)\r\n\tax.spines['bottom'].set_linewidth(1.5)\r\n\tax.spines['right'].set_linewidth(1.5)\r\n\tax.spines['left'].set_linewidth(1.5)\r\n\r\n\t# grid and axes are drawn below the data plot\r\n\tax.set_axisbelow(True)\r\n\r\n\t# add x,y grids to plot area\r\n\tax.xaxis.grid(True, zorder=0, color='whitesmoke', linestyle='-', linewidth=1)\r\n\tax.yaxis.grid(True, zorder=0, color='whitesmoke', linestyle='-', linewidth=1)\r\n\r\n\t# set axis labels\r\n\tax.set_xlabel(custom_x_label, fontsize=20)\r\n\tax.set_ylabel(custom_y_label, fontsize=20)\r\n\r\n\t# set plot title\r\n\t#ax.set_title(custom_title, loc='right', fontsize=14)\r\n\r\n\treturn;\r\ndef custom_axis_formater_inset(custom_title, custom_x_label, custom_y_label, xmin, xmax, ymin, ymax, xprec, yprec):\r\n\t\r\n\t# get axes and tick from plot \r\n\tax = plt.gca()\r\n\t# set the number of major and minor bins for x,y axes\r\n\t# prune='lower' --> remove lowest tick label from x axis\r\n\txmajorLocator = MaxNLocator(6, prune='lower') \r\n\txmajorFormatter = FormatStrFormatter('%.'+ np.str(xprec) + 'f')\r\n\txminorLocator = MaxNLocator(12) \r\n\t\r\n\tymajorLocator = MaxNLocator(6) \r\n\tymajorFormatter = FormatStrFormatter('%.'+ np.str(yprec) + 'f')\r\n\tyminorLocator = MaxNLocator(12)\r\n\t\r\n\t# format major and minor ticks width, length, direction \r\n\tax.tick_params(which='both', width=1, direction='in', labelsize=20)\r\n\tax.tick_params(which='major', length=6)\r\n\tax.tick_params(which='minor', length=4)\r\n\r\n\t# set axes thickness\r\n\tax.spines['top'].set_linewidth(1.5)\r\n\tax.spines['bottom'].set_linewidth(1.5)\r\n\tax.spines['right'].set_linewidth(1.5)\r\n\tax.spines['left'].set_linewidth(1.5)\r\n\r\n\tax.xaxis.set_major_locator(xmajorLocator)\r\n\tax.yaxis.set_major_locator(ymajorLocator)\r\n\r\n\tax.xaxis.set_major_formatter(xmajorFormatter)\r\n\tax.yaxis.set_major_formatter(ymajorFormatter)\r\n\r\n\t# for the minor ticks, use no labels; default NullFormatter\r\n\tax.xaxis.set_minor_locator(xminorLocator)\r\n\tax.yaxis.set_minor_locator(yminorLocator)\r\n\r\n\t# grid and axes are drawn below the data plot\r\n\tax.set_axisbelow(True)\r\n\r\n\t# convert x axis units to radians\r\n\t#ax.convert_xunits(radians)\r\n\r\n\t# add x,y grids to plot area\r\n\tax.xaxis.grid(True, zorder=0, color='gainsboro', linestyle='-', linewidth=1)\r\n\tax.yaxis.grid(True, zorder=0, color='gainsboro', linestyle='-', linewidth=1)\r\n\r\n\t# set axis labels\r\n\t#ax.set_xlabel(custom_x_label, fontsize=20)\r\n\t#ax.set_ylabel(custom_y_label, fontsize=20)\r\n\r\n\t# set plot title\r\n\t#ax.set_title(custom_title, loc='right', fontsize=12)\r\n\r\n\treturn;\r\n\r\nslope_mdl, intercept_mdl, r_value_mdl, p_value_mdl, std_err_mdl = stats.linregress(FM_thickness[0:4], FM_Ms[0:4]*FM_thickness[0:4])\r\n#print('=================================')\r\n#\tprint('Ms12: ', intercept_q12, 'Ms34: ', intercept_q34, 'Ms_mean: ', 0.5*(np.absolute(intercept_q12) + np.absolute(intercept_q34)))\r\n\r\nprint('---------------------------------')\r\nprint('slope_mdl: ', slope_mdl)\r\nprint('intercept_mdl: ', intercept_mdl)\r\nprint('r_value_mdl: ', r_value_mdl)\r\nprint('std_err_mdl:', std_err_mdl)\r\nprint('---------------------------------')\r\n\r\nslope_mdl_cut, intercept_mdl_cut, r_value_mdl_cut, p_value_mdl_cut, std_err_mdl_cut = stats.linregress(FM_thickness[0:2], FM_Ms[0:2]*FM_thickness[0:2])\r\n#print('=================================')\r\n#\tprint('Ms12: ', intercept_q12, 'Ms34: ', intercept_q34, 'Ms_mean: ', 0.5*(np.absolute(intercept_q12) + np.absolute(intercept_q34)))\r\n\r\nprint('---------------------------------')\r\nprint('slope_mdl: ', slope_mdl_cut)\r\nprint('intercept_mdl: ', intercept_mdl_cut)\r\nprint('r_value_mdl: ', r_value_mdl_cut)\r\nprint('std_err_mdl:', std_err_mdl_cut)\r\nprint('---------------------------------')\r\n\r\n# create the fig and plot\r\nfig, ax = plt.subplots(figsize=(9, 9), dpi=72)\r\nspec = gridspec.GridSpec(ncols=1, nrows=1)\r\nfig.canvas.set_window_title('mdl_fit_plot_' + version_name) \r\nplt.figtext(0.80, 0.98, version_name, size=12)\r\n\r\n#tx2, = plt.plot(vsm_data[0], fit_params[0]*vsm_data[0] + fit_params[2], 'b--')\r\n\r\ntx1, = plt.plot(FM_thickness, FM_Ms*FM_thickness,'ko', mfc='lightgray', markersize=6, label=label_mdl)\r\ntx2, = plt.plot(FM_thickness_range, FM_thickness_range*slope_mdl + intercept_mdl,'k-', mfc='lightgray', markersize=6, label=label_mdl)\r\n# display the legend for the defined labels\r\nplt.legend([tx1, tx2], [label_mdl, 'fit'], loc='upper left', fontsize=14 , frameon=True)\r\nplt.figtext(0.05, 0.92, r'$MDL:$ ' + np.str(np.round(-1.0*intercept_mdl / slope_mdl,3)) + r'$\\;nm$' + '\ta: ' + np.str(slope_mdl) + '\tb: ' + np.str(intercept_mdl) + '\\n' + r'$R^2:$' + np.str(r_value_mdl*r_value_mdl) + '\tS: ' + np.str(std_err_mdl), size=14)\r\n\r\nplt.xlim(xmin, xmax)\r\nplt.ylim(ymin, ymax)\r\n\r\ncustom_axis_formater(plot_title, axis_label_nm, axis_label_msd, xmin, xmax, ymin, ymax, xprec, yprec)\r\n#-----------------------------------------------------------------------------------------------------\r\n\r\nfig1 = plt.figure(figsize=(9, 9), dpi=72)\r\nfig1.canvas.set_window_title(version_name)\r\nspec1 = gridspec.GridSpec(ncols=1, nrows=1)\r\nxy1 = fig1.add_subplot(spec1[0,0])\r\ntx3, = plt.plot(FM_thickness_range, FM_thickness_range*slope_mdl_cut + intercept_mdl_cut,'r-', mfc='lavenderblush', markersize=6, label=label_mdl)\r\ntx4, = plt.plot(FM_thickness, FM_Ms*FM_thickness,'ro', mfc='lavenderblush', markersize=6, label=label_mdl)\r\n\r\n\r\n# display the legend for the defined labels\r\nplt.legend([tx4, tx3], ['expt', 'fit'], loc='upper left', fontsize=20 , frameon=True)\r\n\r\nplt.xlim(xmin, xmax)\r\nplt.ylim(ymin, ymax)\r\n\r\ncustom_axis_formater(plot_title, axis_label_nm, axis_label_msd, xmin, xmax, ymin, ymax, xprec, yprec)\r\n#xy1.xaxis.grid(True, zorder=0, color='gainsboro', linestyle='-', linewidth=0)\r\n#xy1.yaxis.grid(True, zorder=0, color='gainsboro', linestyle='-', linewidth=0)\r\n\r\nfig1.tight_layout(pad=1.0, w_pad=6.0, h_pad=1.0)\r\nplt.subplots_adjust(left=0.1, bottom=0.07, wspace=0.0, hspace=0.0)\r\n\r\naxins = inset_axes(xy1,\r\n \t\t\t\t\t\twidth=\"40%\", # width = 30% of parent_bbox\r\n height=\"40%\", # height : 1 inch\r\n loc=\"lower right\",\r\n borderpad=5)\r\naxins.plot(FM_thickness_range, FM_thickness_range*slope_mdl_cut + intercept_mdl_cut, 'r-', lw=1.5, markersize=8, label=label_xy_fit)\r\naxins.plot(FM_thickness, FM_Ms*FM_thickness, 'ro', mfc='lavenderblush', markersize=8, label=label_xy)\r\ncustom_axis_formater_inset(plot_title, axis_label_nm, axis_label_delta_Ryz, xmin, xmax, ymin, ymax, xprec, yprec)\r\n\r\nplt.xlim(0, 12)\r\nplt.ylim(0, 12)\r\n\r\n#-----------------------------------------------------------------------------------------------------\r\n\r\n# create the fig and plot\r\nfig2, ax2 = plt.subplots(figsize=(9, 9), dpi=72)\r\nspec2 = gridspec.GridSpec(ncols=1, nrows=1)\r\nfig2.canvas.set_window_title('mdl_fit_plot_' + version_name) \r\nplt.figtext(0.80, 0.98, version_name, size=12)\r\n\r\n#tx2, = plt.plot(vsm_data[0], fit_params[0]*vsm_data[0] + fit_params[2], 'b--')\r\n\r\ntx5, = plt.plot(FM_thickness[0:3], FM_Ms[0:3]*FM_thickness[0:3],'ko', mfc='lightgray', markersize=6, label=label_mdl)\r\ntx6, = plt.plot(FM_thickness_range_10, FM_thickness_range_10*slope_mdl_cut + intercept_mdl_cut,'k-', mfc='lightgray', markersize=6, label=label_mdl)\r\n# display the legend for the defined labels\r\nplt.legend([tx5, tx6], [label_mdl, 'fit'], loc='upper left', fontsize=14 , frameon=True)\r\nplt.figtext(0.05, 0.92, r'$MDL:$ ' + np.str(np.round(-1.0*intercept_mdl_cut / slope_mdl_cut,3)) + r'$\\;nm$' + '\ta: ' + np.str(slope_mdl_cut) + '\tb: ' + np.str(intercept_mdl_cut) + '\\n' + r'$R^2:$' + np.str(r_value_mdl_cut*r_value_mdl_cut) + '\tS: ' + np.str(std_err_mdl_cut), size=14)\r\n\r\nplt.xlim(0, 12)\r\nplt.ylim(0, 12)\r\n\r\ncustom_axis_formater(plot_title, axis_label_nm, axis_label_msd, xmin, xmax, ymin, ymax, xprec, yprec)\r\n#-----------------------------------------------------------------------------------------------------\r\n\r\npp = PdfPages(out_pdf)\r\npp.savefig(fig)\r\npp.close()\r\npp1 = PdfPages(out1_pdf)\r\npp1.savefig(fig1)\r\npp1.close()\r\n\r\n# save as .svg too\r\nfig = plt.savefig(out_svg)\r\nfig1 = plt.savefig(out1_svg)\r\n\r\nplt.show()","repo_name":"tatsunootoshigoio/mdl","sub_path":"mdl.py","file_name":"mdl.py","file_ext":"py","file_size_in_byte":11663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"890844799","text":"from __future__ import unicode_literals\n\nimport collections\nimport logging\nimport time\n\nfrom mopidy import backend\n\nimport spotify\n\nfrom mopidy_spotify import translator, utils\n\n\n_API_BASE_URI = 'https://api.spotify.com/v1'\n\nlogger = logging.getLogger(__name__)\n\n\nCachedItem = collections.namedtuple('CachedItem', ['item', 'version', 'expires'])\n\n\nclass ItemCache(object):\n\n def __init__(self, lifetime):\n self._data = collections.OrderedDict()\n self.expires = 0\n self.lifetime = lifetime\n\n def get(self, uri, default=None):\n return self._data[uri] if uri in self._data else default\n\n def update(self, item=None, version=0):\n self.expires = time.time() + self.lifetime\n if item:\n self._data[item.uri] = CachedItem(item, version, self.expires)\n\n def clear(self):\n self._data.clear()\n self.expires = 0\n\n def valid(self, uri=None):\n if uri is None:\n expires = self.expires\n elif uri in self._data:\n expires = self._data[uri].expires\n else:\n return False\n return expires > time.time()\n\n @property\n def items(self):\n for v in self._data.values():\n yield v\n\n def validate(self, item):\n uri = item.get('uri')\n if uri in self._data:\n if self._data[uri].version != item.get('snapshot_id'):\n del self._data[uri]\n\n\nclass SpotifyPlaylistsProvider(backend.PlaylistsProvider):\n\n def __init__(self, backend):\n self._backend = backend\n self._ref_cache = ItemCache(60)\n self._full_cache = ItemCache(60*60)\n\n def as_list(self):\n with utils.time_logger('playlists.as_list()'):\n return list(self._get_flattened_playlist_refs())\n\n def _get_all_items(self, first_result, params=None):\n if params is None:\n params = {}\n items = first_result['items']\n uri = first_result['next']\n while uri is not None:\n logger.debug(\"Getting next page\")\n next_result = self._backend._web_client.get(uri, params=params)\n #for item in next_result.get('items', []):\n #yield item\n items.extend(next_result['items'])\n uri = next_result.get('next', None)\n return items\n\n def _get_flattened_playlist_refs(self):\n if self._ref_cache.valid():\n logger.debug(\"Getting playlist references using cache\")\n for p in self._ref_cache.items:\n yield p.item\n return\n\n logger.debug(\"Resetting playlist references cache\")\n self._ref_cache.clear()\n if self._backend._session is None:\n return\n\n username = self._backend._session.user_name\n\n result = self._backend._web_client.get('me/playlists', params={\n 'limit': 50 })\n\n if result is None:\n logger.error(\"No playlists found\") # is this an error condition or normal?\n self._ref_cache.update()\n return\n\n for web_playlist in self._get_all_items(result):\n self._full_cache.validate(web_playlist)\n playlist_ref = translator.web_to_playlist_ref(\n web_playlist, username=username)\n if playlist_ref is not None:\n self._ref_cache.update(playlist_ref)\n logger.info(\"Got playlist ref %s %s\" % (playlist_ref.name, playlist_ref.uri))\n yield playlist_ref\n\n def get_items(self, uri):\n with utils.time_logger('playlist.get_items(%s)' % uri):\n return self._get_playlist(uri, as_items=True)\n\n def lookup(self, uri):\n with utils.time_logger('playlists.lookup(%s)' % uri):\n return self._get_playlist(uri)\n\n def _get_playlist(self, uri, as_items=False):\n logger.debug(\"Getting playlist URI %s\", uri)\n def gen_fields(name, fields=[]):\n fields = ['uri', 'name'] + fields\n return '%s(%s)' % (name, ','.join(fields))\n\n\n fields = ['name', 'owner', 'type', 'uri', 'snapshot_id']\n if as_items:\n fields.append('tracks')\n\n link = translator.parse_uri(uri)\n web_playlist = self._full_cache.get(uri, None)\n\n if web_playlist is not None:\n if web_playlist.item.tracks:\n logger.debug('Playlist %s found in cache', uri)\n return web_playlist.item\n else:\n logger.debug('Cached copy for playlist %s without tracks so re-requesting', uri)\n web_playlist = None\n\n if web_playlist is None:\n if 'tracks' not in fields:\n fields.append('tracks')\n\n params = {'fields': ','.join(fields), 'market': 'from_token'}\n web_playlist = self._backend._web_client.get(\n 'users/%s/playlists/%s' % (link.owner, link.id),\n params=params)\n\n if web_playlist is not None and 'tracks' in web_playlist:\n web_playlist['tracks'] = [\n t['track'] for t in\n self._get_all_items(web_playlist['tracks'])]\n\n if web_playlist is None:\n logger.debug('Failed to lookup Spotify URI %s', uri)\n return\n\n username = self._backend._session.user_name\n playlist_ref = translator.web_to_playlist(\n web_playlist, username=username, bitrate=self._backend._bitrate,\n as_items=as_items)\n\n self._full_cache.update(playlist_ref, version=web_playlist['snapshot_id'])\n return playlist_ref\n\n def refresh(self):\n self._ref_cache.clear()\n\n def create(self, name):\n try:\n sp_playlist = (\n self._backend._session.playlist_container\n .add_new_playlist(name))\n except ValueError as exc:\n logger.warning(\n 'Failed creating new Spotify playlist \"%s\": %s', name, exc)\n except spotify.Error:\n logger.warning('Failed creating new Spotify playlist \"%s\"', name)\n else:\n username = self._backend._session.user_name\n return translator.to_playlist(sp_playlist, username=username)\n\n def delete(self, uri):\n pass # TODO\n\n def save(self, playlist):\n pass # TODO\n\n\ndef on_container_loaded(sp_playlist_container):\n # Called from the pyspotify event loop, and not in an actor context.\n logger.debug('Spotify playlist container loaded')\n\n # This event listener is also called after playlists are added, removed and\n # moved, so since Mopidy currently only supports the \"playlists_loaded\"\n # event this is the only place we need to trigger a Mopidy backend event.\n backend.BackendListener.send('playlists_loaded')\n\n\ndef on_playlist_added(sp_playlist_container, sp_playlist, index):\n # Called from the pyspotify event loop, and not in an actor context.\n logger.debug(\n 'Spotify playlist \"%s\" added to index %d', sp_playlist.name, index)\n\n # XXX Should Mopidy support more fine grained playlist events which this\n # event can trigger?\n\n\ndef on_playlist_removed(sp_playlist_container, sp_playlist, index):\n # Called from the pyspotify event loop, and not in an actor context.\n logger.debug(\n 'Spotify playlist \"%s\" removed from index %d', sp_playlist.name, index)\n\n # XXX Should Mopidy support more fine grained playlist events which this\n # event can trigger?\n\n\ndef on_playlist_moved(\n sp_playlist_container, sp_playlist, old_index, new_index):\n # Called from the pyspotify event loop, and not in an actor context.\n logger.debug(\n 'Spotify playlist \"%s\" moved from index %d to %d',\n sp_playlist.name, old_index, new_index)\n\n # XXX Should Mopidy support more fine grained playlist events which this\n # event can trigger?\n","repo_name":"jeremystover/mopidy-icecast","sub_path":"mpd-spt-fix/mopidy_spotify/playlists.py","file_name":"playlists.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41100015231","text":"import unittest\nfrom unittest import TestCase\nfrom engine.trie.trie import PrefixTree\n\n\nclass TrieTest(TestCase):\n def setUp(self) -> None:\n self.trie: PrefixTree = PrefixTree()\n\n def test_prefix_not_found_as_whole_word(self) -> None:\n self.trie.insert('apple')\n self.trie.insert('appreciate')\n self.assertEqual(self.trie.find('app'), None)\n \n def test_prefix_is_also_whole_word(self) -> None:\n word_list: tuple = (\n 'apple',\n 'appreciate',\n 'app',\n )\n for word in word_list:\n self.trie.insert(word)\n\n self.assertEqual(self.trie.find('app').is_word, True)\n\n def test_starts_with(self) -> None:\n word_list: tuple = (\n 'apple',\n 'appreciate',\n 'aposematic',\n 'apoplectic',\n 'appendix',\n )\n for word in word_list:\n self.trie.insert(word)\n\n self.assertEqual(self.trie.starts_with('app'),\n ['apple', 'appreciate', 'appendix'])\n\n def test_starts_with_self(self) -> None:\n self.trie.insert('app')\n self.assertEqual(self.trie.starts_with('app'), ['app'])\n\n def test_starts_with_empty_and_no_words(self) -> None:\n self.assertEqual(self.trie.starts_with(''), [])\n\n def test_starts_with_empty_returns_all_words(self) -> None:\n word_list = (\n 'bad',\n 'bat',\n 'cat',\n 'cage',\n )\n self.assertEqual(self.trie.starts_with(''), list(word_list))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"madjar-code/Search-Engine","sub_path":"engine/tests/test_trie.py","file_name":"test_trie.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2627861096","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn\n\nclass ComplexMeasurement(torch.nn.Module):\n def __init__(self, embed_dim, units=5, ortho_init=False, device = torch.device('cpu')):\n super(ComplexMeasurement, self).__init__()\n self.units = units\n self.embed_dim = embed_dim\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if ortho_init:\n self.kernel = torch.nn.Parameter(torch.stack([torch.eye(embed_dim).to(device),torch.zeros(embed_dim, embed_dim).to(device)],dim = -1))\n\n# self.real_kernel = torch.nn.Parameter(torch.eye(embed_dim))\n# self.imag_kernel = torch.nn.Parameter(torch.zeros(embed_dim, embed_dim))\n else:\n rand_tensor = torch.rand(self.units, self.embed_dim, 2).to(device)\n normalized_tensor = F.normalize(rand_tensor.view(self.units, -1), p=2, dim=1, eps=1e-10).view(self.units, self.embed_dim, 2)\n self.kernel = torch.nn.Parameter(normalized_tensor)\n# self.kernel = F.normalize(self.kernel.view(self.units, -1), p=2, dim=1, eps=1e-10).view(self.units, embed_dim, 2)\n\n\n# self.real_kernel = torch.nn.Parameter(torch.Tensor(self.units, embed_dim))\n# self.imag_kernel = torch.nn.Parameter(torch.Tensor(self.units, embed_dim))\n\n def forward(self, inputs, measure_operator=None):\n \n input_real = inputs[0]\n input_imag = inputs[1]\n \n real_kernel = self.kernel[:,:,0]\n imag_kernel = self.kernel[:,:,1]\n if measure_operator is None:\n real_kernel = real_kernel.unsqueeze(-1)\n imag_kernel = imag_kernel.unsqueeze(-1)\n else:\n real_kernel = measure_operator[0].unsqueeze(-1)\n imag_kernel = measure_operator[1].unsqueeze(-1)\n\n projector_real = torch.matmul(real_kernel, real_kernel.transpose(1, 2)) \\\n + torch.matmul(imag_kernel, imag_kernel.transpose(1, 2)) \n projector_imag = torch.matmul(imag_kernel, real_kernel.transpose(1, 2)) \\\n - torch.matmul(real_kernel, imag_kernel.transpose(1, 2))\n # only real part is non-zero\n # input_real.shape = [batch_size, seq_len, embed_dim, embed_dim] or [batch_size, embed_dim, embed_dim]\n # projector_real.shape = [num_measurements, embed_dim, embed_dim]\n output_real = torch.matmul(torch.flatten(input_real, start_dim = -2, end_dim = -1), torch.flatten(projector_real, start_dim = -2, end_dim = -1).t())\\\n - torch.matmul(torch.flatten(input_imag, start_dim = -2, end_dim = -1), torch.flatten(projector_imag, start_dim = -2, end_dim = -1).t())\n \n return output_real\n \nif __name__ == '__main__':\n model = ComplexMeasurement(6, units=3)\n a = torch.randn(5,6,6)\n b = torch.randn(5,6,6)\n#\n y_pred = model([a,b])\n print(y_pred.shape)\n# \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"qiuchili/qnn_torch","sub_path":"layers/complexnn/measurement.py","file_name":"measurement.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"16459523677","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Python code to find key with Maximum value in Dictionary\n\n# Dictionary Initialization\nMobile_data = {'motorola':5000, 'nokia':4500, 'oppo' : 6500, 'iphone': 100000, 'mi': 2500}\n\n#list of mobile values in m\nm = list(Mobile_data.values())\n \n# list of mobile keys in p\np = list(Mobile_data.keys())\n \nprint(\"The maximum mobile price is \" + p[m.index(max(m))])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Ramachalpandey/Assignment-sub-python-zero-to-hero","sub_path":"assignment3ramachal.py","file_name":"assignment3ramachal.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21907279395","text":"from sys import stdin\r\n\r\nreadline = stdin.readline\r\nn = int(readline())\r\na = list(map(int, readline().split()))\r\nb = list(map(int, readline().split()))\r\nq = int(readline())\r\nquery = [tuple(map(int, readline().split())) for _ in [0] * q]\r\n\r\na_idx_di = {v: n - i - 1 for i, v in enumerate(a[::-1])}\r\nb_idx_di = {v: n - i - 1 for i, v in enumerate(b[::-1])}\r\n\r\nab_idx_li = [0] * (n + 1)\r\nfor i, val in enumerate(a):\r\n idx = b_idx_di.get(val, n)\r\n ab_idx_li[i + 1] = max(ab_idx_li[i], idx)\r\n\r\nba_idx_li = [0] * (n + 1)\r\nfor i, val in enumerate(b):\r\n idx = a_idx_di.get(val, n)\r\n ba_idx_li[i + 1] = max(ba_idx_li[i], idx)\r\n\r\nfor x, y in query:\r\n yy = ab_idx_li[x]\r\n xx = ba_idx_li[y]\r\n if xx < x and yy < y:\r\n ans = \"Yes\"\r\n else:\r\n ans = \"No\"\r\n print(ans)\r\n","repo_name":"Balut-moko/procon-grassmaker-archive","sub_path":"atcoder/abc250/abc250_e/31573005.py","file_name":"31573005.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39950258125","text":"from src.utils.all_utils import read_yaml, create_directory\nimport argparse\nimport pandas as pd\nimport os\n\n# this function save data to our local directory\n\ndef get_data(config_path):\n config = read_yaml(config_path) # read config file to download data and save data to local \n \n remote_data_path = config['data_source'] # we have path of data\n df = pd.read_csv(remote_data_path, sep=\";\") # load data into dataframe. and this data seperate by ;\n\n # now we have to save this data in local using our artifacts directory\n # create path to directory = \"artifacts/raw_local_dir/data.csv\"\n artifacts_dir = config['artifacts']['artifacts_dir']\n raw_local_dir = config['artifacts']['raw_local_dir']\n\n raw_local_file = config['artifacts']['raw_local_file']\n\n # we got all informations. \n # create path of directory\n raw_local_dir_path = os.path.join(artifacts_dir, raw_local_dir)\n\n create_directory(dirs = [raw_local_dir_path])\n\n raw_local_file_path = os.path.join(raw_local_dir_path, raw_local_file)\n \n df.to_csv(raw_local_file_path, sep=\",\", index=False)\n\n\n\n\n\n# this entry calling point for this file\n# start exceution from here\nif __name__ == '__main__':\n\n # command line agrument\n args = argparse.ArgumentParser() # calling method\n\n args.add_argument(\"--config\", \"-c\", default=\"config/config.yaml\") # pass argument when program runs\n\n parsed_args = args.parse_args()\n\n get_data(config_path = parsed_args.config) # calling uper function","repo_name":"mihir3030/DVC_SIMPLE_MLOPS","sub_path":"src/stage_01_load_save.py","file_name":"stage_01_load_save.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10703149635","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport os\r\n\r\nclass MyNeuralNetwork:\r\n def __init__(self, num_layers, num_units, num_epochs, learning_rate, momentum, activation_function, validation_percentage):\r\n self.L = num_layers\r\n self.n = num_units\r\n self.num_epochs = num_epochs\r\n self.eta = learning_rate\r\n self.alpha = momentum\r\n self.fact = activation_function\r\n self.validation_percentage = validation_percentage\r\n self.xi = [np.zeros(layer_units) for layer_units in num_units]\r\n self.h = [np.zeros(layer_units) for layer_units in num_units]\r\n self.w = [None] + [np.zeros((num_units[i], num_units[i - 1])) for i in range(1, num_layers)]\r\n self.theta = [np.zeros(layer_units) for layer_units in num_units]\r\n self.delta = [np.zeros(layer_units) for layer_units in num_units]\r\n self.d_w = [None] + [np.zeros((num_units[i], num_units[i - 1])) for i in range(1, num_layers)]\r\n self.d_theta = [np.zeros(layer_units) for layer_units in num_units]\r\n self.d_w_prev = [None] + [np.zeros((num_units[i], num_units[i - 1])) for i in range(1, num_layers)]\r\n self.d_theta_prev = [np.zeros(layer_units) for layer_units in num_units]\r\n self.training_error = []\r\n self.validation_error = []\r\n\r\n # Sigmoid activation function\r\n def sigmoid(self, x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n def sigmoid_derivative(self, x):\r\n return x * (1 - x)\r\n\r\n def relu(self, x):\r\n return np.maximum(0, x)\r\n\r\n def relu_derivative(self, x):\r\n return (x > 0).astype(float)\r\n\r\n def linear(self, x):\r\n return x\r\n\r\n def linear_derivative(self, x):\r\n return np.ones_like(x)\r\n\r\n def tanh(self, x):\r\n return np.tanh(x)\r\n\r\n def tanh_derivative(self, x):\r\n return 1 - x**2\r\n\r\n def activation(self, x):\r\n if self.fact == 'sigmoid':\r\n return self.sigmoid(x)\r\n elif self.fact == 'relu':\r\n return self.relu(x)\r\n elif self.fact == 'linear':\r\n return self.linear(x)\r\n elif self.fact == 'tanh':\r\n return self.tanh(x)\r\n\r\n def activation_derivative(self, x):\r\n if self.fact == 'sigmoid':\r\n return self.sigmoid_derivative(x)\r\n elif self.fact == 'relu':\r\n return self.relu_derivative(x)\r\n elif self.fact == 'linear':\r\n return self.linear_derivative(x)\r\n elif self.fact == 'tanh':\r\n return self.tanh_derivative(x)\r\n\r\n def feed_forward(self, sample):\r\n self.xi[0] = sample\r\n for l in range(1, self.L):\r\n self.h[l] = np.dot(self.w[l], self.xi[l - 1]) - self.theta[l]\r\n self.xi[l] = self.activation(self.h[l])\r\n\r\n def backpropagate(self, target):\r\n self.delta[self.L - 1] = self.activation_derivative(self.xi[self.L - 1]) * (self.xi[self.L - 1] - target)\r\n for l in range(self.L - 2, 0, -1):\r\n self.delta[l] = self.activation_derivative(self.xi[l]) * np.dot(self.w[l + 1].T, self.delta[l + 1])\r\n\r\n def update_weights(self):\r\n for l in range(1, self.L):\r\n self.d_w[l] = -self.eta * np.outer(self.delta[l], self.xi[l - 1]) + self.alpha * self.d_w_prev[l]\r\n self.d_theta[l] = self.eta * self.delta[l] + self.alpha * self.d_theta_prev[l]\r\n self.w[l] += self.d_w[l]\r\n self.theta[l] += self.d_theta[l]\r\n self.d_w_prev[l] = self.d_w[l]\r\n self.d_theta_prev[l] = self.d_theta[l]\r\n\r\n def calculate_total_error(self, X, y):\r\n total_error = 0.0\r\n for i in range(X.shape[0]):\r\n self.feed_forward(X[i])\r\n total_error += 0.5 * np.sum((self.xi[self.L - 1] - y[i]) ** 2)\r\n return total_error\r\n\r\n def fit(self, X, y):\r\n # Split data into training and validation sets\r\n n_samples = X.shape[0]\r\n if self.validation_percentage > 0:\r\n n_train = int(n_samples * (1.0 - self.validation_percentage))\r\n X_train = X[:n_train]\r\n y_train = y[:n_train]\r\n X_val = X[n_train:]\r\n y_val = y[n_train:]\r\n else:\r\n X_train = X\r\n y_train = y\r\n X_val = np.array([])\r\n y_val = np.array([])\r\n\r\n for epoch in range(self.num_epochs):\r\n for i in range(X_train.shape[0]):\r\n sample = X_train[i]\r\n target = y_train[i]\r\n\r\n self.feed_forward(sample)\r\n self.backpropagate(target)\r\n self.update_weights()\r\n\r\n # Calculate training error\r\n train_error = self.calculate_total_error(X_train, y_train)\r\n self.training_error.append(train_error)\r\n\r\n # Calculate validation error\r\n if X_val.shape[0] > 0:\r\n val_error = self.calculate_total_error(X_val, y_val)\r\n self.validation_error.append(val_error)\r\n\r\n def predict(self, X):\r\n predictions = []\r\n for sample in X:\r\n self.feed_forward(sample)\r\n predictions.append(self.xi[self.L - 1].copy())\r\n return np.array(predictions)\r\n\r\n # Function to retrieve epoch-wise loss data in the required format\r\n def get_epoch_losses(self):\r\n epochs = list(range(1, self.num_epochs + 1))\r\n return np.column_stack((epochs, self.training_error, self.validation_error))\r\n\r\n # Modify the loss_epochs() function to use get_epoch_losses()\r\n def loss_epochs(self):\r\n return self.get_epoch_losses()\r\n\r\n# Function to train a neural network on a given dataset\r\ndef train_neural_network(dataset, input_columns, output_column, num_layers, num_units, num_epochs, learning_rate, momentum, activation_function, validation_percentage):\r\n X = dataset[input_columns].values\r\n y = dataset[output_column].values.reshape(-1, 1)\r\n\r\n nn = MyNeuralNetwork(num_layers, num_units, num_epochs, learning_rate, momentum, activation_function, validation_percentage)\r\n \r\n nn.fit(X, y)\r\n \r\n X_test = X # You can use a different test dataset if needed\r\n predictions = nn.predict(X_test)\r\n\r\n return nn, predictions\r\n\r\n# Load the three datasets\r\n#dataset1 = pd.read_csv('modified_A1-turbine.csv')\r\n#dataset2 = pd.read_csv('A1-synthetic.csv')\r\ndataset3 = pd.read_csv('A1-real_estate.csv')\r\n\r\n# Define input and output columns for each dataset\r\n#input_columns1 = ['height_over_sea_level', 'fall', 'net', 'fall_1', 'flow']\r\n#output_column1 = 'power_of_hydroelectrical_turbine'\r\n\r\n#input_columns2 = ['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9']\r\n#output_column2 = 'z'\r\n\r\ninput_columns3 = ['X1 transaction date', 'X2 house age', 'X3 distance to the nearest MRT station', 'X4 number of convenience stores', 'X5 latitude', 'X6 longitude']\r\noutput_column3 = 'Y house price of unit area'\r\n\r\n# Define neural network configurations for each dataset\r\n\r\n#num_units1 = [len(input_columns1)] + [10, 5, 1] # Adjust the architecture as needed\r\n#num_units2 = [len(input_columns2)] + [10, 5, 1]\r\nnum_units3 = [len(input_columns3)] + [10, 5, 1]\r\n\r\nnum_layers = 4\r\nnum_epochs = 1000\r\nlearning_rate = 0.1\r\nmomentum = 0.0\r\nactivation_function = \"sigmoid\"\r\nvalidation_percentage = 0.2\r\n\r\n# Train the neural network for each dataset\r\n#nn1, predictions1 = train_neural_network(dataset1, input_columns1, output_column1, num_layers, num_units1, num_epochs, learning_rate, momentum, activation_function, validation_percentage)\r\n\r\n#nn2, predictions2 = train_neural_network(dataset2, input_columns2, output_column2, num_layers, num_units2, num_epochs, learning_rate, momentum, activation_function, validation_percentage)\r\n\r\nnn3, predictions3 = train_neural_network(dataset3, input_columns3, output_column3, num_layers, num_units3, num_epochs, learning_rate, momentum, activation_function, validation_percentage)\r\n\r\n# For dataset 1\r\n#loss_data1 = nn1.loss_epochs()\r\n\r\n# For dataset 2 \r\n#ta2 = nn2.loss_epochs()\r\n\r\n# For dataset 3\r\nloss_data3 = nn3.loss_epochs()\r\n\r\n# Define headers for the loss data CSV file\r\nloss_headers = ['Epochs', 'Training Loss', 'Validation Loss']\r\n\r\n# Combine headers with the loss dataset 1\r\n#loss_data_with_headers = np.vstack([loss_headers, loss_data1])\r\n\r\n# Combine headers with the loss dataset 2\r\n#loss_data_with_headers = np.vstack([loss_headers, loss_data2])\r\n\r\n# Combine headers with the loss dataset 3\r\nloss_data_with_headers = np.vstack([loss_headers, loss_data3])\r\n\r\n# Save the loss data along with headers to a CSV file\r\nnp.savetxt(\"loss_data_real_estate.csv\", loss_data_with_headers, delimiter=\",\", fmt='%s')\r\n\r\n# Save predictions to files\r\n#np.savetxt(\"predictions_dataset1.csv\", predictions1, delimiter=\",\")\r\n#np.savetxt(\"predictions_dataset2.csv\", predictions2, delimiter=\",\")\r\nnp.savetxt(\"predictions_real_estate.csv\", predictions3, delimiter=\",\")\r\n\r\n# Generate line plots for training and validation losses vs. epochs\r\nplt.figure(figsize=(8, 6))\r\nplt.plot(loss_data3[:, 0], loss_data3[:, 1], label='Training Loss')\r\nplt.plot(loss_data3[:, 0], loss_data3[:, 2], label='Validation Loss')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Loss')\r\nplt.title('Training and Validation Loss Over Epochs (Real Estate)')\r\nplt.legend()\r\nplt.grid(True)\r\nplt.savefig('loss_plot_real_estate.png')\r\nplt.show()\r\n\r\n# Assuming predictions2 contains predicted values and dataset2 contains the real values\r\nreal_values = dataset3[output_column3].values.reshape(-1, 1)\r\n\r\n# Generate scatter plot for real values vs. predicted values\r\nplt.figure(figsize=(8, 6))\r\nplt.scatter(real_values, predictions3, alpha=0.5)\r\nplt.xlabel('Real Values')\r\nplt.ylabel('Predicted Values')\r\nplt.title('Correlation between Real and Predicted Values (Real Estate)')\r\nplt.grid(True)\r\n\r\nplt.savefig('correlation_plot_real_estate.png')\r\nplt.show() \r\n\r\n\r\n\r\n","repo_name":"Pawel712/NEC-assignments","sub_path":"Assignment1/Part3/neuralNetworkBpPlot.py","file_name":"neuralNetworkBpPlot.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72592431049","text":"from pymoo.algorithms.soo.nonconvex.pso import PSO\nfrom pymoo.core.problem import Problem\nfrom pymoo.core.termination import NoTermination\nfrom pymoo.problems.static import StaticProblem\n\nproblem = Problem(n_var=10, n_obj=1, n_ieq_constr=1, xl=-0, xu=1)\n\nalgorithm = PSO().setup(problem, termination=NoTermination(), verbose=False)\n\nfor k in range(20):\n\n if not algorithm.has_next():\n break\n\n infills = algorithm.ask()\n\n X = infills.get(\"X\")\n\n F = (X ** 2).sum(axis=1)\n G = - (X[:, 0] + X[:, 1]) - 0.3\n\n algorithm.evaluator.eval(StaticProblem(problem, F=F, G=G), infills)\n\n algorithm.tell(infills=infills)\n\n print(k + 1, algorithm.opt[0].F[0])\n\nprint(algorithm.opt.get(\"F\"))\n","repo_name":"anyoptimization/pymoo","sub_path":"examples/algorithms/ask_and_tell.py","file_name":"ask_and_tell.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1804,"dataset":"github-code","pt":"16"} +{"seq_id":"20819978802","text":"from dummy_webapp.settings.base import *\n\nDEBUG = True\n\n# CACHE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n }\n}\n# END CACHE CONFIGURATION\n\n# DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': root('default.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n# END DATABASE CONFIGURATION\n\n# EMAIL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n# END EMAIL CONFIGURATION\n\n# TOOLBAR CONFIGURATION\n# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html\nif os.environ.get('ENABLE_DJANGO_TOOLBAR', False):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n MIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n )\n\n DEBUG_TOOLBAR_PATCH_SETTINGS = False\n\nINTERNAL_IPS = ('127.0.0.1',)\n# END TOOLBAR CONFIGURATION\n\n# AUTHENTICATION\n# Set these to the correct values for your OAuth2/OpenID Connect provider (e.g., devstack)\nSOCIAL_AUTH_EDX_OIDC_KEY = 'replace-me'\nSOCIAL_AUTH_EDX_OIDC_SECRET = 'replace-me'\nSOCIAL_AUTH_EDX_OIDC_URL_ROOT = 'replace-me'\nSOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY = SOCIAL_AUTH_EDX_OIDC_SECRET\n\nENABLE_AUTO_AUTH = True\n\n#####################################################################\n# Lastly, see if the developer has any local overrides.\nif os.path.isfile(join(dirname(abspath(__file__)), 'private.py')):\n from .private import * # pylint: disable=import-error\n","repo_name":"edx-unsupported/dummy-webapp","sub_path":"dummy_webapp/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"11992480085","text":"\"\"\"\nThis module parses the dblp.xml file into sql and stores it in a postgres database.\nCreated by: Silvan Wiedmer\nCreated at: 17.03.2023\n\"\"\"\nimport os\nfrom time import time\nfrom multiprocessing import Process, Queue\n\nfrom psycopg2 import connect, sql, errors\nfrom dotenv import load_dotenv\nfrom lxml import etree\n\n# load environment variables from .env file\nload_dotenv()\n\n# define worker amount\nWORKERS: int = 6\n\nduplicates: list[str] = [\n \"cdrom\",\n \"cite\",\n \"publisher\",\n \"author\",\n \"note\",\n \"school\",\n \"editor\",\n \"url\",\n \"ee\",\n \"crossref\",\n \"isbn\",\n \"pages\",\n \"year\",\n \"title\",\n \"series\"\n]\n\ntypes: list[str] = [\n 'article',\n 'inproceedings',\n 'proceedings',\n 'book',\n 'incollection',\n 'phdthesis',\n 'mastersthesis',\n 'www'\n]\n\n# postgress connection\nconn = connect(\n database = \"Test\",\n host = \"localhost\",\n user = os.getenv('POSTGRES_USER'),\n password = os.getenv('POSTGRES_PASSWORD'),\n port=\"5432\"\n)\n\ndef elements(queue_sql: Queue, element) -> None:\n \"\"\"\n The elements function generates the sql for the entire element and sends it to the sql queue.\n\n Parameters:\n - queue_sql: Queue => The queue to transfer sql commands to different processes\n - element => The current xml element to parse\n \"\"\"\n # create sql query for insertion\n query = sql.SQL('INSERT INTO entry ({columns}) VALUES ({values});').format(\n columns = sql.SQL(\",\").join(map(sql.Identifier, element.attrib)),\n values = sql.SQL(\",\").join(map(sql.Literal, element.attrib.values()))\n )\n\n non_duplicate = []\n\n # check all childs of element\n for child in element:\n\n # check if tag is duplicate\n if child.tag in duplicates:\n tag: str = str(child.tag)\n\n insert_child_tag = sql.SQL(\n 'INSERT INTO {table} (name) VALUES ({text}) ON CONFLICT (name) DO NOTHING;'\n ).format(\n table = sql.Identifier(tag),\n text = sql.Literal(child.text)\n )\n\n insert_entry_child = sql.SQL(\n \"\"\"\n INSERT INTO {table} ({column}, entry_key) \n VALUES ((SELECT id FROM {table2} \n WHERE name = {text}), {key});\n \"\"\"\n ).format(\n table = sql.Identifier('entry_' + tag),\n column = sql.Identifier(tag + '_id'),\n table2 = sql.Identifier(tag),\n key = sql.Literal(element.attrib[\"key\"]),\n text = sql.Literal(child.text)\n )\n\n query += insert_child_tag\n query += insert_entry_child\n else:\n non_duplicate.append(child)\n\n columns = ['entry_key']\n values = []\n\n for childs in non_duplicate:\n columns.append(childs.tag)\n values.append(childs.text)\n\n if len(values) != 0:\n # add to according table\n insert_non_duplicates = sql.SQL(\n 'INSERT INTO {table} ({column}) VALUES ({id}, {value});'\n ).format(\n table = sql.Identifier(element.tag),\n column = sql.SQL(', '.join(columns)),\n id = sql.Literal(element.attrib[\"key\"]),\n value = sql.SQL(\",\").join(map(sql.Literal, values))\n )\n else:\n # add to according table\n insert_non_duplicates = sql.SQL('INSERT INTO {table} ({column}) VALUES ({id});').format(\n table = sql.Identifier(element.tag),\n column = sql.SQL(', '.join(columns)),\n id = sql.Literal(element.attrib[\"key\"])\n )\n\n query += insert_non_duplicates\n\n queue_sql.put(query)\n\ndef read(queue_sql: Queue):\n \"\"\"\n The read function reads the xml file and calls the elements function for every element\n\n Parameters: \n - queue_sql: Queue => The queue required for the elements function\n \"\"\"\n before = time()\n # read file\n for _, element in etree.iterparse('dblp.xml', dtd_validation=True):\n # check if current tag in types\n if element.tag in types:\n\n # process elements\n elements(queue_sql, element)\n\n # clear element from memory\n element.clear()\n\n after = time()\n\n print(f'Read file in {after - before:.3} seconds')\n print(\"-\" * 75)\n\n # stop workers\n for _ in range(WORKERS):\n queue_sql.put('STOP')\n\ndef insert(queue: Queue):\n \"\"\"\n The insert function executes all the sql querys from the provided queue until 'Stop' is received\n\n Parameters:\n - queue: Queue => The queue to get the queries from\n \"\"\"\n cursor = conn.cursor()\n before = time()\n for query in iter(queue.get, 'STOP'):\n try:\n cursor.execute(query)\n except errors.InvalidColumnReference as error:\n print(error)\n print(query)\n print(\"-\" * 75)\n except errors.DeadlockDetected as error:\n print(error)\n print(query)\n print(\"-\" * 75)\n # failed to insert try again\n queue.put(query)\n except errors.UndefinedColumn as error:\n print(error)\n print(query)\n print(\"-\" * 75)\n conn.commit()\n after = time()\n\n print(f'Executed SQL commands in {after - before:.3} seconds')\n return\n\nif __name__ == \"__main__\":\n sql_queue = Queue()\n\n read_process = Process(target=read, args=(sql_queue, ))\n read_process.start()\n\n insert_processes: list[Process] = []\n for _ in range(WORKERS):\n insert_processes.append(Process(target=insert, args=(sql_queue,)))\n\n for process in insert_processes:\n process.start()\n\n read_process.join()\n\n for process in insert_processes:\n process.join()\n","repo_name":"Xuyen21/Programming_semester_2","sub_path":"app/xmlParser/xml_parser.py","file_name":"xml_parser.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70617253128","text":"from lightgbm import LGBMRegressor\nfrom skforecast.ForecasterAutoreg import ForecasterAutoreg\nfrom typing import Tuple\n\nfrom src.data import load_credit\nfrom src.data import split_train_test\n\n\ndef credit_train_test_model(data_path: str) -> Tuple[ForecasterAutoreg, int]:\n data = load_credit(data_path)\n train, test = split_train_test(data)\n test_length = len(test)\n forecaster = ForecasterAutoreg(\n regressor = LGBMRegressor(\n lambda_l1=0.01,\n learning_rate=0.6,\n num_leaves=10,\n max_leaf_nodes=30,\n max_depth=10, \n n_estimators=500,\n random_state=123),\n lags = 10\n )\n\n forecaster.fit(y=train['Credit'])\n\n return forecaster, test_length\n\n\ndef credit_dataset_model(data_path: str) -> ForecasterAutoreg:\n data = load_credit(data_path)\n\n forecaster = ForecasterAutoreg(\n regressor=LGBMRegressor(\n lambda_l2=0.01,\n lambda_l1=0.01,\n learning_rate=0.6,\n num_leaves=32,\n max_leaf_nodes=30,\n max_depth=4,\n n_estimators=500,\n random_state=123,\n ),\n lags=10,\n )\n\n forecaster.fit(y=data[\"Credit\"])\n # from joblib import dump, load\n\n # dump(forecaster, filename='credit_forecaster.py')\n # forecaster_loaded = load('forecaster.py')\n # forecaster_loaded.predict(steps=3)\n \n return forecaster\n","repo_name":"beckmiller/payments_prediction","sub_path":"src/models/train_credit_model.py","file_name":"train_credit_model.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24131032366","text":"from rest_framework import viewsets, generics, views, status\nfrom rest_framework.response import Response\n\nfrom ivrs.models import IVR\nfrom ivrs.serializers import IVRSerializer\nfrom calls.models import Call\n\n\nclass IVRViewSet(viewsets.ModelViewSet):\n queryset = IVR.objects.order_by('-created_at')\n serializer_class = IVRSerializer\n\n\nclass BeneficiaryIVRsView(generics.ListAPIView):\n serializer_class = IVRSerializer\n\n def get(self, request, user_pk=None):\n queryset = IVR.objects.filter(beneficiary=user_pk)\n serializer = self.serializer_class(queryset, many=True)\n\n return Response(serializer.data)\n\n\nclass SentIVRsView(generics.ListAPIView):\n serializer_class = IVRSerializer\n\n def get(self, request, user_pk=None):\n queryset = IVR.objects.filter(sender=user_pk)\n serializer = self.serializer_class(queryset, many=True)\n\n return Response(serializer.data)\n\n\nclass FeedbackIVRView(views.APIView):\n number_of_questions = 6\n\n credits = {\n \"default\": 100/number_of_questions\n }\n\n grades = {\n \"default\": {\n \"1\": 1,\n \"2\": 0.4\n },\n \"q6\": {\n \"1\": 0.4,\n \"2\": 0.55,\n \"3\": 0.70,\n \"4\": 0.85,\n \"5\": 1\n }\n }\n\n def get(self, request, format=None):\n args_dict = dict(request.GET.iterlists())\n\n question = ''\n response = ''\n question_credits = 0\n final_credits = 0\n\n total_credits = 0\n credits_count = 0\n\n for key in self.credits.keys():\n if key != 'default':\n total_credits += self.credits[key]\n credits_count += 1\n\n while credits_count < self.number_of_questions:\n total_credits += 100/float(self.number_of_questions)\n credits_count += 1\n\n mobile = ''\n\n if 'question' in args_dict.keys():\n question = str(args_dict['question'][0])\n\n if 'response' in args_dict.keys():\n response = str(args_dict['response'][0])\n\n if 'From' in args_dict.keys():\n mobile = str(args_dict['From'][0])[1:]\n else:\n return Response({'status': 'mobile not available'}, status.HTTP_400_BAD_REQUEST)\n\n if question in self.credits.keys():\n question_credits = self.credits[question]\n else:\n question_credits = self.credits[\"default\"]\n\n if question in self.grades.keys():\n if response in self.grades[question].keys():\n final_credits = question_credits * self.grades[question][response]\n else:\n if response in self.grades[\"default\"].keys():\n final_credits = question_credits * self.grades[\"default\"][response]\n\n call = Call.objects.filter(beneficiary__profile__mobile=mobile).last()\n\n final_credits = final_credits / float(total_credits) * 5\n\n task = call.task\n task.assignee_rating += final_credits\n task.save()\n\n return Response({'status': 'ok'}, status.HTTP_200_OK)\n","repo_name":"Indus-Action/Campaign-Management-System","sub_path":"CMS/apps/ivrs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33641185738","text":"import re\nimport pandas as pd\n\n\nclass PrepareShifts:\n \"\"\"\n PrepareShifts is the class that implements the business logic of finding the optimum partitions and the\n associated shifts for the Barista demand data.\n \"\"\"\n\n def __init__(self, allocated_shifts, demand_data):\n \"\"\"\n Constructor for PrepareShifts\n :param allocated_shifts: The allocated shifts from shift allocator\n :type allocated_shifts: List\n :param demand_data: The Time column of the Dataframe\n :type demand_data: Series\n \"\"\"\n self.allocated_shifts = allocated_shifts\n self.demand_data = demand_data\n self.start_time = []\n self.end_time = []\n self.time_format = \"{:02d}:00:00\"\n\n def get_allocated_shifts(self):\n \"\"\"\n Getter for allocated_shifts\n :return: allocated_shifts\n :rtype: List\n \"\"\"\n return self.allocated_shifts\n\n def get_demand_data(self):\n \"\"\"\n Getter for demand_data\n :return: The actual dataframe data from the file\n :rtype: Dataframe\n \"\"\"\n return self.demand_data\n\n def set_start_time(self, start_time):\n \"\"\"\n Setter for start time of a shift\n :param start_time: The string formatted time for start time\n :type start_time: str\n :return: None\n :rtype: None\n \"\"\"\n self.start_time.append(start_time)\n\n def get_start_time(self):\n \"\"\"\n Getter for start time\n :return: The list for start_time\n :rtype: List\n \"\"\"\n return self.start_time\n\n def set_end_time(self, end_time):\n \"\"\"\n Setter for end time\n :param end_time: The string formatted time for end time\n :type end_time: str\n :return: None\n :rtype: None\n \"\"\"\n self.end_time.append(end_time)\n\n def get_end_time(self):\n \"\"\"\n Getter for end_time\n :return: List of end_time\n :rtype: List\n \"\"\"\n return self.end_time\n\n def get_time_format(self):\n \"\"\"\n Getter for time_format\n :return: The set time format\n :rtype: str\n \"\"\"\n return self.time_format\n\n def prepare_time_list_according_to_format(self):\n \"\"\"\n Method to convert the time formats into a 24 hour time format.\n :return: A list of the time formats without am|pm indicators and converted to 24 hour format timings\n :rtype: List\n \"\"\"\n final_time_list = []\n actual_data = list(self.get_demand_data()[\"Time\"])\n for each in actual_data:\n if \"pm\" in each and \"am\" not in each:\n sub_str = []\n res = re.sub('pm', '', each)\n res = res.split(\"-\")\n res = list(map(int, res))\n for each_split in res:\n if each_split < 12:\n each_split += 12\n sub_str.append(str(each_split))\n final_time_list.append(\"-\".join(sub_str))\n if \"am\" in each and \"pm\" not in each:\n res = re.sub('am', '', each)\n final_time_list.append(res)\n if \"am\" in each and \"pm\" in each:\n res = re.sub('am|pm', '', each)\n final_time_list.append(res)\n return final_time_list\n\n def get_string_number(self, time_string, position):\n \"\"\"\n Method to convert the extracted time numeric number into the required format.\n :param time_string: time numeric value\n :type time_string: str\n :param position: to indicate whether the time is to be split at the start or the end\n :type position: str\n :return: Formatted string time representation\n :rtype: str\n \"\"\"\n if position == \"start\":\n return self.get_time_format().format(int(time_string.split(\"-\")[0]))\n else:\n return self.get_time_format().format(int(time_string.split(\"-\")[1]))\n\n def prepare_shifts(self):\n \"\"\"\n Method to logically evaluates the demand data and the allocated shifts to make entries for start time and end\n time for shifts\n :return: None\n :rtype: None\n \"\"\"\n actual_data = self.prepare_time_list_according_to_format()\n for time, demand in self.get_allocated_shifts().items():\n demand = list(demand)\n time = list(time)\n start_index = 0\n for i in range(len(demand)):\n start_time_stamp = self.get_string_number(actual_data[start_index], \"start\")\n end_time_stamp = self.get_string_number(actual_data[start_index + time[i] - 1], \"end\")\n start_index = start_index + time[i]\n for j in range(demand[i]):\n self.set_start_time(start_time_stamp)\n self.set_end_time(end_time_stamp)\n\n def prepare_final_shift_dataframe(self):\n \"\"\"\n Method to create a Dataframe out of the lists prepared\n :return: Dataframe with start and end times for shifts\n :rtype: Dataframe\n \"\"\"\n shift_information = pd.DataFrame({'Shift Start': self.get_start_time(),\n 'Shift End': self.get_end_time()})\n return shift_information\n","repo_name":"girish1993/scheduler","sub_path":"prepare_output/prepare_shifts.py","file_name":"prepare_shifts.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27515383332","text":"import itertools\n\nimport numpy as np\nfrom addict import Dict\nfrom tqdm import tqdm\n\n\nclass Rollout_Generator:\n def __init__(self, model_paths):\n from environments.walker.walker_step import Walker_Step_Env\n self.env = Walker_Step_Env(speed_early_stop=False)\n\n self.model_paths = model_paths\n self.models = list()\n # load models\n from stable_baselines3 import TD3\n for model_path in model_paths:\n model = TD3.load(model_path)\n self.models.append((model_path, model))\n\n def gen_rollouts(self, step_sizes, speeds):\n rollouts = list()\n rollout_cfg = list(itertools.product(self.models, step_sizes, speeds))\n print(f'[INFO] {len(rollout_cfg)} configs to run')\n for e in tqdm(rollout_cfg, desc='Generating rollouts ...'):\n step_size, speed = e[1], e[2]\n success, expr_name, rollout_record = self.gen_single_rollout(model=e[0],\n step_size=step_size,\n speed=speed)\n if success:\n rollouts.append(rollout_record)\n print(f'[INFO] generated {len(rollouts)} successful rollouts.')\n return rollouts\n\n def gen_single_rollout(self, model, step_size, speed):\n model_path, policy_model = model[0], model[1]\n expr_name = f'{step_size}_{speed}'\n success = False\n\n self.env.set_target(step_size_range=(step_size, step_size + 0.036),\n target_speed_range=(speed, speed + 0.0056),\n ideal_height_range=(0.015, 0.015))\n state_sequence = []\n rgb_frames = []\n info_list = []\n\n policy_state = self.env.reset()\n obs = self.env.get_raw_obs()\n rgb_obs = self.env.get_frame_rgb()\n n_step = 0\n done = False\n final_step_size, softness_speed = -1, -1\n\n while not done and n_step < 100:\n action, _states = policy_model.predict(policy_state, deterministic=True)\n next_policy_state, reward, done, info = self.env.step(action)\n next_obs = self.env.get_raw_obs()\n next_rgb = self.env.get_frame_rgb()\n\n state_sequence.append(np.copy(obs))\n rgb_frames.append(np.copy(rgb_obs))\n info = Dict(info)\n info.update({'done': done,\n 'next_obs': np.copy(next_obs),\n 'reward': reward,\n 'action': action,\n 'timestep': n_step})\n info_list.append(info)\n\n if done and info.done_message == 'success':\n if info.feet_dist is not None and info.softness_speed is not None:\n final_step_size = info.feet_dist\n softness_speed = info.softness_speed\n success = True\n else:\n print(f'final_step_size ({final_step_size}) or softness_speed ({softness_speed}) is None.')\n success = False\n n_step += 1\n\n obs = next_obs\n policy_state = next_policy_state\n rgb_obs = next_rgb\n\n expr_record = Dict({\n 'ground_truth_attr': {\n 'step_size': final_step_size,\n 'softness': softness_speed\n },\n 'rgb_frames': rgb_frames,\n 'state_sequence': np.array(state_sequence),\n 'info_list': info_list,\n 'traj_meta': {\n 'model_path': model_path,\n 'expr_name': expr_name,\n 'target_step_size': step_size,\n 'target_speed': speed,\n 'final_step_size': final_step_size,\n 'softness_speed': softness_speed,\n 'success': success\n }\n })\n return success, expr_name, expr_record\n\n\n\n","repo_name":"GuanSuns/Relative-Behavioral-Attributes-ICLR-23","sub_path":"data/gen/walker_step/walker_step_rollout.py","file_name":"walker_step_rollout.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"15282600350","text":"#Goal is to crop images into segments into the format 1080x1350 (ratio 1.25)\n\nfrom PIL import Image\n\nimgToCrop = Image.open(\"./input/_0IM9922.jpg\")\n\nexportResize = \"./output/resized.jpg\"\nexportSegmentsPath = \"./output/segment$$x$$.jpg\"\nexportBanner = \"./output/banner.jpg\"\n\nsegmentHeight = 1350\nsegmentWidth = 1080\n\ndef make_banner(image, min_size=256, fill_color=(0, 0, 0, 0)):\n x, y = segmentWidth, segmentHeight\n size = max(min_size, x, y)\n new_im = Image.new('RGBA', (x, y), fill_color)\n new_im.paste(image, (0, int((y -image.size[1]) / 2)))\n return new_im\n\nimagesExported = 1\n\nwidth, height = imgToCrop.size\nprint(width)\nprint(height)\n\nnewWidth = int(width/(height/segmentHeight))\nnewSize = newWidth, segmentHeight\n\nsegments = newWidth/segmentWidth\nprint(segments)\nprint(\"Segments there will be\")\n\nif newWidth%segmentWidth > 0:\n print(\"Cant resize image into good segments, please try another format\")\n print(newWidth%segmentWidth)\n print(\" pixels left\")\n optimalWidth = (newWidth - newWidth%segmentWidth)*(height/segmentHeight)\n print(\"Optimal width would be:\")\n print(optimalWidth)\n optimalRatio = optimalWidth/height\n print(\"Optimal ratio (width/height) would be:\")\n print(optimalRatio)\n print(\"Or version with one more image:\")\n optimalWidth2 = ((newWidth - newWidth%segmentWidth) + segmentWidth)*(height/segmentHeight)\n print(\"Optimal width2 would be:\")\n print(optimalWidth2)\n optimalRatio2 = optimalWidth2/height\n print(\"Optimal ratio (width/height) would be:\")\n print(optimalRatio2)\n exit()\n\nresizedImage = imgToCrop.resize(newSize, Image.Resampling.LANCZOS)\nresizedImage.save(exportResize)\n\nsegments = int(segments)\nfor x in range(segments):\n top = segmentHeight\n left = segmentWidth*x\n bottom = 0\n right = segmentWidth*(x+1)\n crop = \"top: \" + str(top) + \" left: \" + str(left) + \" bottom: \" + str(bottom) + \" right: \" + str(right)\n print(crop)\n croppedImage = resizedImage.crop((left, bottom, right, top))\n exportImagePath = exportSegmentsPath.replace(\"$$x$$\", str(imagesExported))\n print(exportImagePath)\n imagesExported = imagesExported+1\n croppedImage.save(exportImagePath, \"JPEG\")\n print(imagesExported)\n \nbannerSize = segmentWidth, int(segmentHeight/(newWidth/segmentWidth))\nbannerImage = imgToCrop.resize(bannerSize, Image.Resampling.LANCZOS)\n\nbannerImage = make_banner(bannerImage)\nbannerImage = bannerImage.convert('RGB')\nbannerImage.save(exportBanner)","repo_name":"RomanL1/instagram-carousel-cropper","sub_path":"cropper.py","file_name":"cropper.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4878156480","text":"from django.conf.urls import patterns, url, include\nfrom rest_framework import routers\nfrom app import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'orders', views.OrderViewSet)\nrouter.register(r'lineitems', views.LineItemViewSet)\nrouter.register(r'products', views.ProductViewSet)\nrouter.register(r'catalogs', views.CatalogViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browseable API.\nurlpatterns = patterns('',\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n)\n","repo_name":"ruebenramirez/NOMS","sub_path":"NOMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18390338522","text":"class Data:\n def __init__(self, dia, mes, ano):\n self.dia = dia\n self.mes = mes\n self.ano = ano\n\n def show_data(self):\n dia_format = \"0\"+str(self.dia) if self.dia < 10 else self.dia\n mes_format = \"0\"+str(self.mes) if self.mes < 10 else self.mes\n print(f'{dia_format}/{mes_format}/{self.ano}')\n\n def __str__(self):\n dia_format = \"0\" + str(self.dia) if self.dia < 10 else self.dia\n mes_format = \"0\" + str(self.mes) if self.mes < 10 else self.mes\n return f'{dia_format}/{mes_format}/{self.ano}'\n\n\nd1 = Data(1, 5, 2021)\nd1.dia = 23\nd2 = Data(d1.dia, d1.mes, d1.ano)\nd2.dia = 24\nd1.show_data()\nd2.show_data()\nprint(d2)\n","repo_name":"bbnsdevelop/python_3_estudos","sub_path":"python3/poo/data_v1.py","file_name":"data_v1.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22864827848","text":"import os\nimport tempfile\nimport unittest\nimport uuid\n\nfrom requests.exceptions import HTTPError\n\nfrom modelscope.hub.api import HubApi\nfrom modelscope.hub.constants import Licenses, ModelVisibility\nfrom modelscope.hub.errors import GitError\nfrom modelscope.hub.file_download import model_file_download\nfrom modelscope.hub.repository import Repository\nfrom modelscope.hub.snapshot_download import snapshot_download\nfrom modelscope.utils.constant import ModelFile\nfrom modelscope.utils.test_utils import (TEST_ACCESS_TOKEN1,\n TEST_ACCESS_TOKEN2,\n TEST_MODEL_CHINESE_NAME,\n TEST_MODEL_ORG, delete_credential)\n\ndownload_model_file_name = 'test.bin'\n\n\nclass HubPrivateFileDownloadTest(unittest.TestCase):\n\n def setUp(self):\n self.old_cwd = os.getcwd()\n self.api = HubApi()\n self.token, _ = self.api.login(TEST_ACCESS_TOKEN1)\n self.model_name = 'pf-%s' % (uuid.uuid4().hex)\n self.model_id = '%s/%s' % (TEST_MODEL_ORG, self.model_name)\n self.revision = 'v0.1_test_revision'\n self.api.create_model(\n model_id=self.model_id,\n visibility=ModelVisibility.PRIVATE,\n license=Licenses.APACHE_V2,\n chinese_name=TEST_MODEL_CHINESE_NAME,\n )\n\n def prepare_case(self):\n temporary_dir = tempfile.mkdtemp()\n self.model_dir = os.path.join(temporary_dir, self.model_name)\n repo = Repository(self.model_dir, clone_from=self.model_id)\n os.system(\"echo 'testtest'>%s\"\n % os.path.join(self.model_dir, download_model_file_name))\n repo.push('add model')\n repo.tag_and_push(self.revision, 'Test revision')\n\n def tearDown(self):\n # credential may deleted or switch login name, we need re-login here\n # to ensure the temporary model is deleted.\n self.api.login(TEST_ACCESS_TOKEN1)\n os.chdir(self.old_cwd)\n self.api.delete_model(model_id=self.model_id)\n\n def test_snapshot_download_private_model(self):\n self.prepare_case()\n snapshot_path = snapshot_download(self.model_id, self.revision)\n assert os.path.exists(os.path.join(snapshot_path, ModelFile.README))\n\n def test_snapshot_download_private_model_no_permission(self):\n self.prepare_case()\n self.token, _ = self.api.login(TEST_ACCESS_TOKEN2)\n with self.assertRaises(HTTPError):\n snapshot_download(self.model_id, self.revision)\n\n def test_snapshot_download_private_model_without_login(self):\n self.prepare_case()\n delete_credential()\n with self.assertRaises(HTTPError):\n snapshot_download(self.model_id, self.revision)\n\n def test_download_file_private_model(self):\n self.prepare_case()\n file_path = model_file_download(self.model_id, ModelFile.README,\n self.revision)\n assert os.path.exists(file_path)\n\n def test_download_file_private_model_no_permission(self):\n self.prepare_case()\n self.token, _ = self.api.login(TEST_ACCESS_TOKEN2)\n with self.assertRaises(HTTPError):\n model_file_download(self.model_id, ModelFile.README, self.revision)\n\n def test_download_file_private_model_without_login(self):\n self.prepare_case()\n delete_credential()\n with self.assertRaises(HTTPError):\n model_file_download(self.model_id, ModelFile.README, self.revision)\n\n def test_snapshot_download_local_only(self):\n self.prepare_case()\n with self.assertRaises(ValueError):\n snapshot_download(\n self.model_id, self.revision, local_files_only=True)\n snapshot_path = snapshot_download(self.model_id, self.revision)\n assert os.path.exists(os.path.join(snapshot_path, ModelFile.README))\n snapshot_path = snapshot_download(\n self.model_id, self.revision, local_files_only=True)\n assert os.path.exists(snapshot_path)\n\n def test_file_download_local_only(self):\n self.prepare_case()\n with self.assertRaises(ValueError):\n model_file_download(\n self.model_id,\n ModelFile.README,\n self.revision,\n local_files_only=True)\n file_path = model_file_download(self.model_id, ModelFile.README,\n self.revision)\n assert os.path.exists(file_path)\n file_path = model_file_download(\n self.model_id,\n ModelFile.README,\n revision=self.revision,\n local_files_only=True)\n assert os.path.exists(file_path)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"modelscope/modelscope","sub_path":"tests/hub/test_hub_private_files.py","file_name":"test_hub_private_files.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"23797889312","text":"__all__ = [\"print_output\"]\n\n\ndef diff_to_str(diff):\n if diff[\"type\"] == \"set\":\n message = \"set to {to}\"\n elif diff[\"type\"] == \"del\":\n message = \"was removed (was {from})\"\n elif diff[\"type\"] == \"add\":\n return \"{} added {{{}}}\".format(diff[\"name\"], \", \".join(diff[\"added\"]))\n elif diff[\"type\"] == \"remove\":\n return \"{} removed {{{}}}\".format(diff[\"name\"], \", \".join(diff[\"removed\"]))\n elif diff[\"type\"] == \"change\":\n message = \"changed from {from} to {to}\"\n\n return \" \".join((diff[\"name\"], message.format(**diff)))\n\n\ndef join(sequence):\n items = [j for i in sequence for j in (i, \", \")]\n items.pop()\n\n if len(items) > 2:\n items[-2] = \" and \"\n\n return \"\".join(items)\n\n\ndef range_to_str(range):\n types = join(range[\"types\"])\n\n if range[\"type\"] == \"real\":\n return \"{} between {min} and {max}\".format(types, **range)\n\n return types\n\n\ndef print_step(step, timing=False, report=True):\n lineno, count, total = step[\"lineno\"], step[\"count\"], step[\"total\"]\n\n if timing:\n print(\n \"executed line {} {} time{}\".format(lineno, count, \"s\" if count > 1 else \"\")\n )\n print(\" total: {} secs\".format(total))\n print(\" avg: {} secs\".format(total / count))\n\n for diff in step[\"diffs\"]:\n print(\"line {}: {}\".format(lineno, diff_to_str(diff)))\n\n if report and step[\"type\"] == \"return\":\n code_name = step[\"code_name\"]\n\n for frame_value in step[\"frame_values\"]:\n print(\n \"{}: {} in {}\".format(\n frame_value[\"name\"], range_to_str(frame_value[\"range\"]), code_name\n )\n )\n\n for lineno, value in frame_value[\"values\"]:\n print(\" set to {} on line {}\".format(value, lineno))\n\n\ndef print_output(output, timing=False, report=True):\n for step in output[\"steps\"]:\n print_step(step, timing=timing, report=report)\n\n if timing:\n print(\"total running time: {}\".format(output[\"running_time\"]))\n\n for line in output[\"lines\"]:\n print(\n \"line {} was run {} time{}\".format(\n line[\"lineno\"], line[\"count\"], \"s\" if line[\"count\"] > 1 else \"\"\n )\n )\n\n if report:\n for var in output[\"vars\"]:\n print(\n \"{}: {} in {}\".format(\n var[\"name\"], range_to_str(var[\"range\"]), var[\"function\"],\n )\n )\n print(\" initial value: {}\".format(var[\"initial\"]))\n print(\" final value: {}\".format(var[\"final\"]))\n","repo_name":"cppio/debugger","sub_path":"debugger/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14210417371","text":"\n\ndef exam1(data,name,number):\n data.append((name,number))\n for step in range(len(data)):\n max_idx = step\n for i in range(step+1, len(data)):\n if data[i][1]> data[max_idx][1]:\n max_idx = i\n (data[step], data[max_idx]) = (data[max_idx], data[step])\n return data\n\ndef exam2(array):\n k=0\n for step in range(len(array)):\n min_idx = step\n\n for i in range(step+1, len(array)):\n if array[i] < array[min_idx]:\n min_idx = i\n if step != min_idx:\n (array[step], array[min_idx]) =(array[min_idx], array[step])\n k +=1\n return k\n\ndef exam3(mass):\n weight = [0, 6, 4, 3, 5]\n money = [0, 13, 8, 6, 12]\n\n array = [[0 for _ in range(mass+1)]for _ in range(5)]\n for row in range(1, 5):\n for col in range(1, mass+1):\n if weight[row]> col:\n array[row][col] = array[row-1][col]\n else:\n value1 = money[row] + array[row-1][col-weight[row]]\n value2 = array[row-1][col]\n array[row][col] = max(value1, value2)\n return array[4][mass]\n\ndef exam4(n,k):\n number = [ str(i) for i in range(1,n+1)]\n current = k-1\n\n for i in range(n-1):\n if len(number) == 1:\n return number[0]\n\n number[current]= '0'\n for j in range(k):\n current = (current+1)%n\n while( number[current] =='0'):\n current = (current+1)%n\n\n for i in range(n):\n if number[i] !='0':\n return int(number[i])\n\ndef exam5(array,x):\n low = 0\n high = len(array)-1\n k = 0\n while low <= high:\n mid = low +(high-low+1)//2\n k +=1\n if array[mid] == x:\n return k\n elif array[mid] < x:\n low = mid+1\n elif array[mid]>x:\n high = mid -1\n return -1\n\ndef exam6(str1, str2):\n#초기화\n dash = [[0 for _ in range(len(str1))] for _ in range(len(str2))]\n for i in range(len(str1)):\n dash[0][i] = i\n for j in range(len(str2)):\n dash[j][0] = j\n#행렬계산\n for j in range(1, len(str2)):\n for i in range(1, len(str1)):\n maintain_replace = dash[j-1][i-1] + (0 if(str1[i-1] == str2[j-1]) else 1) #대각선 전의 값을참조\n insert = dash[j-1][i] +1 # 바로 위의 값을 참조해서 +1, 삽입이라서 현재 칼럼에 위치\n delete = dash[j][i-1] +1 # 왼쪽 값을 참조해서 +1, 삭제라서 다음칼럼 위치\n dash[j][i] =min(maintain_replace, insert, delete) # 네가지 변경점중 가장 최소의 값을 적어줌\n\n return dash[-1][-1] # 마지막값이 최소변경값","repo_name":"tomatojams/python_project","sub_path":"자료구조/202220058 기말리포트.py","file_name":"202220058 기말리포트.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21380849185","text":"from fortnum import FortnumDescriptor\nfrom reportlab.platypus import Paragraph, Image\n\nfrom flexbox.options import AlignItems\nfrom flexbox.flex import FlexItem\n\nfrom PIL import Image as PILImage\n\n\nclass FlexFlowable(FlexItem):\n flowable = None\n\n vertical_align = FortnumDescriptor(\"vertical_align\", AlignItems, default=AlignItems.FlexCenter)\n horizontal_align = FortnumDescriptor(\"horizontal_align\", AlignItems, default=AlignItems.FlexCenter)\n\n def __init__(self, flowable, vertical_align=None, horizontal_align=None, **kwargs):\n super().__init__(**kwargs)\n\n self.flowable = flowable\n self.vertical_align = vertical_align or self.vertical_align\n self.horizontal_align = horizontal_align or self.horizontal_align\n\n def wrap_content(self, avail_width, avail_height):\n return self.flowable.wrap(avail_width, avail_height)\n\n def draw_content(self, avail_width, avail_height, requested_width, requested_height):\n self.flowable.drawOn(\n self.canv,\n self.vertical_align.point(requested_width, avail_width),\n self.horizontal_align.point(requested_height, avail_height)\n )\n\n\nclass FlexParagraph(FlexFlowable):\n def __init__(self, text, style, **kwargs):\n super().__init__(\n Paragraph(text, style),\n **kwargs\n )\n\n\nclass FlexImage(FlexFlowable):\n def __init__(self, image, **kwargs):\n self.image = image\n\n with PILImage.open(self.image) as img:\n img_width, img_height = img.size\n self.aspect = img_height / img_width\n\n super().__init__(None, **kwargs)\n\n def wrap_content(self, avail_width, avail_height):\n height = avail_height\n width = height / self.aspect\n\n if width > avail_width:\n height = height * avail_width / width\n width = avail_width\n\n self.flowable = Image(self.image, width, height)\n\n return width, height\n","repo_name":"SverkerSbrg/reportlab-flexbox","sub_path":"flexbox/flex_flowable.py","file_name":"flex_flowable.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21005284828","text":"\"\"\"\nInternal helper class for a simple periodic task.\nbase in https://medium.com/greedygame-engineering/an-elegant-way-to-run-periodic-tasks-in-python-61b7c477b679\n\"\"\"\n\nfrom threading import Event, Thread\nfrom time import time\nfrom typing import Callable\n\nfrom fbclient.utils import log\n\n\nclass RepeatableTask(Thread):\n\n def __init__(self, name: str, interval: float, callable: Callable, args=(), kwargs=None):\n super().__init__(name=name, daemon=True)\n self._interval = interval\n self._callable = callable\n self._stop = Event()\n self._args = args\n self._kwargs = {} if kwargs is None else kwargs\n\n def stop(self):\n log.info(\"FB Python SDK: %s repeatable task is stopping...\" % self.name)\n self._stop.set()\n\n def run(self):\n log.debug(\"%s repeatable task is starting...\" % self.name)\n stopped = self._stop.is_set()\n while not stopped:\n next_time = time() + self._interval\n try:\n self._callable(*self._args, **self._kwargs)\n except Exception as e:\n log.exception(\"FB Python SDK: unexpected exception on %s repeatable task: %s\" % (self.name, str(e)))\n delay = next_time - time()\n stopped = self._stop.wait(delay) if delay > 0 else self._stop.is_set()\n","repo_name":"featbit/featbit-python-sdk","sub_path":"fbclient/utils/repeatable_task.py","file_name":"repeatable_task.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"16104357502","text":"class Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n result ={}\n for word in strs:\n sorted_word = str(sorted(word))\n if sorted_word in result:\n result[sorted_word].append(word)\n else:\n result[sorted_word] = [word]\n \n return list(result.values())\n \n \ns = Solution()\nprint(s.groupAnagrams([\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]))","repo_name":"rengo540/problem-solving","sub_path":"Group Anagrams.py","file_name":"Group Anagrams.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19488302182","text":"#\n# @lc app=leetcode.cn id=496 lang=python3\n#\n# [496] 下一个更大元素 I\n#\n# https://leetcode-cn.com/problems/next-greater-element-i/description/\n#\n# algorithms\n# Easy (65.80%)\n# Likes: 288\n# Dislikes: 0\n# Total Accepted: 48.7K\n# Total Submissions: 74K\n# Testcase Example: '[4,1,2]\\n[1,3,4,2]'\n#\n# 给定两个 没有重复元素 的数组 nums1 和 nums2 ,其中nums1 是 nums2 的子集。找到 nums1 中每个元素在 nums2\n# 中的下一个比其大的值。\n# \n# nums1 中数字 x 的下一个更大元素是指 x 在 nums2 中对应位置的右边的第一个比 x 大的元素。如果不存在,对应位置输出 -1 。\n# \n# \n# \n# 示例 1:\n# \n# 输入: nums1 = [4,1,2], nums2 = [1,3,4,2].\n# 输出: [-1,3,-1]\n# 解释:\n# ⁠ 对于num1中的数字4,你无法在第二个数组中找到下一个更大的数字,因此输出 -1。\n# ⁠ 对于num1中的数字1,第二个数组中数字1右边的下一个较大数字是 3。\n# ⁠ 对于num1中的数字2,第二个数组中没有下一个更大的数字,因此输出 -1。\n# \n# 示例 2:\n# \n# 输入: nums1 = [2,4], nums2 = [1,2,3,4].\n# 输出: [3,-1]\n# 解释:\n# 对于 num1 中的数字 2 ,第二个数组中的下一个较大数字是 3 。\n# ⁠ 对于 num1 中的数字 4 ,第二个数组中没有下一个更大的数字,因此输出 -1 。\n# \n# \n# \n# \n# 提示:\n# \n# \n# nums1和nums2中所有元素是唯一的。\n# nums1和nums2 的数组大小都不超过1000。\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:## 从左到右入栈\n \n dic,stack = {},[]\n \n for i in range(len(nums2)):\n while stack and nums2[stack[-1]] <= nums2[i]:\n dic[nums2[stack.pop()]] = nums2[i]\n \n stack.append(i)\n \n return [dic.get(num1,-1) for num1 in nums1]\n \n \n\n \n# @lc code=end\n# def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:## 暴力解法\n# dic = dict()\n# res = []\n \n# for i in range(len(nums2)):\n# j = i + 1 \n# while j < len(nums2) and nums2[j] <= nums2[i]:\n# j += 1 \n \n# if j < len(nums2) and nums2[j] > nums2[i]:\n# dic[nums2[i]] = nums2[j]\n \n# for num1 in nums1:\n# res.append(dic[num1] if num1 in dic else -1)\n# return res \n \n \n\n\n # return res \n # def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:## 暴力解法\n # dic = dict()\n\n \n # for i in range(len(nums2)):\n # for j in range(i+1,len(nums2)):\n # if nums2[j] > nums2[i]:\n # dic[nums2[i]] = nums2[j]\n # break \n # res = [dic.get(num1,-1) for num1 in nums1]\n \n # return res \n \n \n \n # def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:## 倒入单调栈法\n # dic,stack = {},[]\n # for i in range(len(nums2)-1,-1,-1):\n # while stack and nums2[stack[-1]] <= nums2[i]:\n # stack.pop() \n # if stack:\n # dic[nums2[i]] = nums2[stack[-1]]\n # stack.append(i) \n \n # return [dic.get(num,-1) for num in nums1]\n \n #思路:单调栈法用于左边或右边第一个比自己大或比自己小的数的寻找。找右边第一个比自己大的数,数组从\n # 右向左倒入栈,保证栈顶到栈底是不单增的,因为栈底是最接近于右边的数的位置,如果新入栈的数大于\n #栈顶的数,那pop()栈。这样可以保证整个栈是递减的","repo_name":"ZhengyangXu/Algorithm-Daily-Practice","sub_path":"题源分类/LeetCode/LeetCode日刷/python/496.下一个更大元素-i.py","file_name":"496.下一个更大元素-i.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6600794633","text":"import re\nimport IPython.ipapi\n\ndef node_completer(self, event):\n from tree import Node\n symbol = event.symbol\n s = event.line\n if symbol:\n s = s[:-len(symbol)]\n quote = \"\"\n if s and s[-1] in [\"'\", '\"']:\n quote = s[-1]\n s = s[:-1]\n base = (re.findall(r'(\\w+)\\[\\Z', s) or [None])[-1]\n\n ## print \"symbol:\", symbol\n ## print \"line:\", event.line\n ## print \"s:\", s\n ## print \"quote:\", quote\n ## print \"base:\", base\n ## print \"obj:\", self._ofind(base).get(\"obj\")\n\n obj = None\n if base:\n obj = self._ofind(base).get(\"obj\")\n if obj and isinstance(obj, Node):\n completions = [\"'\"]\n if quote:\n completions = sorted([ x.label for x in obj.labeled() ])\n return completions\n\n raise IPython.ipapi.TryNext\n \ndef set_node_completer():\n IPython.ipapi.get().set_hook(\n \"complete_command\", node_completer, re_key=r'\\[*'\n )\n","repo_name":"rhr/ivy","sub_path":"ivy/ivy_completers.py","file_name":"ivy_completers.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"28758082532","text":"\"\"\"\r\nComplete the method/function so that it converts dash/underscore delimited words into camel casing. \r\nThe first word within the output should be capitalized only if the original word was capitalized \r\n(known as Upper Camel Case, also often referred to as Pascal case).\r\n\r\nExamples\r\n\"the-stealth-warrior\" gets converted to \"theStealthWarrior\"\r\n\"The_Stealth_Warrior\" gets converted to \"TheStealthWarrior\"\r\n\"\"\"\r\n\r\ndef to_camel_case(text):\r\n list = [c for c in text]\r\n\r\n for c in range(len(list)):\r\n if list[c] in ('_', '-'): # hello_world\r\n list[c + 1] = list[c + 1].upper() # hello_World\r\n\r\n return ''.join([c for c in list if c not in ('_', '-')])\r\n\r\n\r\nprint(to_camel_case('hello_world_My_name_is_Cheng'))\r\n","repo_name":"nmtrang/codewars","sub_path":"6kyu/Convert-string-to-camel-case.py","file_name":"Convert-string-to-camel-case.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11734232021","text":"import sys\nimport io\n\nINPUT = \"\"\"\\\n5\n1 2 3 4 5\n3\n3 4 1\n\"\"\"\n\nsys.stdin = io.StringIO(INPUT)\n\nn = int(input())\nS = list(map(int, input().split()))\nq = int(input())\nT = list(map(int, input().split()))\n\ndef binary_search(array: list[int], n: int, target: int):\n left = 0\n right = n\n\n while right > left:\n middle = (right + left) // 2\n if target == array[middle]:\n return True\n elif target > array[middle]:\n left = middle + 1\n else:\n right = middle\n\n return False\n\ncount = 0\nfor t_value in T:\n if binary_search(S, n, t_value):\n count += 1\n\nassert count == 3","repo_name":"kuritaeiji/algorithm","sub_path":"search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13399250652","text":"import pygame\nimport random\nfrom Bubble import Bubble\n\nclass Bicycle:\n def __init__(self, posX, posY, directionX=random.uniform(1, 3)):\n self.posX = posX\n self.posY = posY\n self.directionX = directionX\n self.width = 73\n self.height = 40\n self.image = pygame.image.load(\"sprites/bicycle.png\")\n\n if directionX < 0:\n self.image = pygame.transform.flip(self.image, True, False)\n\n def draw(self, screen):\n screen.blit(self.image, (self.posX, self.posY))\n \n \n def update(self, screen):\n self.posX += self.directionX\n screen_width, screen_height = screen.get_size()\n if self.directionX > 0:\n if self.posX + self.width + self.directionX > screen_width:\n self.directionX = -random.uniform(1, 3)\n self.image = pygame.transform.flip(self.image, True, False)\n else:\n if self.posX + self.directionX < 0:\n self.directionX = random.uniform(1, 3)\n self.image = pygame.transform.flip(self.image, True, False)\n\n","repo_name":"13hannes11/outyard-hackathon-rush-hour-co2","sub_path":"Bicycle.py","file_name":"Bicycle.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37063664663","text":"from django.core.urlresolvers import reverse\nfrom django.utils.translation import ungettext\n\n\ndef get_path_summary(path_obj):\n \"\"\"Return a list of sentences to be displayed for each ``path_obj``.\"\"\"\n goals_summary = []\n\n # Build URL for getting more summary information for the current path.\n url_path_summary_more = reverse('pootle-xhr-summary-more')\n\n if path_obj.is_dir:\n # Putting the next import at the top of the file causes circular\n # import issues.\n from pootle_tagging.models import Goal\n\n pootle_path = path_obj.pootle_path\n goal = Goal.get_most_important_incomplete_for_path(path_obj)\n\n if goal is not None:\n goal_words = goal.get_incomplete_words_in_path(path_obj)\n goal_url = goal.get_translate_url_for_path(pootle_path,\n state='incomplete')\n if goal_words > 0:\n goals_summary.extend([\n u'
' % {\n 'url': goal_url,\n },\n ungettext(u'Next most important goal (%(num)d word left)',\n u'Next most important goal (%(num)d words left)',\n goal_words,\n {'num': goal_words, }),\n ])\n\n return {'is_dir': path_obj.is_dir,\n 'goals_summary': u''.join(goals_summary),\n 'summary_more_url': url_path_summary_more,\n 'translate_url': path_obj.get_translate_url(state='all'),\n 'incomplete_url': path_obj.get_translate_url(state='incomplete'),\n 'suggestions_url': path_obj.get_translate_url(state='suggestions')}\n\n\ndef stats_message_raw(version, total, translated, fuzzy):\n \"\"\"Build a message of statistics used in VCS actions.\"\"\"\n return \"%s: %d of %d strings translated (%d need review).\" % \\\n (version, translated, total, fuzzy)\n","repo_name":"qdinar/pootle","sub_path":"pootle/apps/pootle_misc/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"18389614814","text":"from google_drive_downloader import GoogleDriveDownloader as gdd\ngdd.download_file_from_google_drive(file_id='0BxYys69jI14kYVM3aVhKS1VhRUk',\n dest_path='/content/UTKFace.tar.gz')\nimport tarfile\ntar = tarfile.open(\"/content/UTKFace.tar.gz\")\ntar.extractall()\ntar.close()\nimport numpy as np\nlabel = np.ones((23708,2),int)\na = os.listdir('/content/UTKFace')\nfor i in range(len(a)):\n b = str(a[i])\n if(ord(b[1])==95):\n label[i,0] = int(b[0])\n label[i,1] = int(b[2])\n elif(ord(b[2])==95):\n label[i,0] = int(b[0:2])\n label[i,1] = int(b[3])\n elif(ord(b[3])==95):\n label[i,0] = int(b[0:3])\n label[i,1] = int(b[4])\na = np.array(a)\na = np.reshape(a,(23708,1))\nc = np.concatenate((a,label),axis = 1)\nimport pandas as pd\ndata = pd.DataFrame(c)\ndata.to_csv('label.csv')","repo_name":"sanchit2843/MLBasics","sub_path":"AgeDetect/Label_creation.py","file_name":"Label_creation.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"16"} +{"seq_id":"71613359689","text":"import tensorflow as tf\nimport os\nimport time\nimport datetime\nimport argparse\nimport pandas as pd\nimport numpy as np\n\n#input example\n# %Run HW1_ex1_Group3.py --input dht.csv --output dhttf.tfrecord --normalize False\n\n#create argument parser\nparser = argparse.ArgumentParser(description='definition of parameters')\nparser.add_argument('--input', default = 'dht.csv',\n help='input csv file with recordings',\n required=True)\nparser.add_argument('--output', default = 'dhttf.tfrecord',\n help='output tfrecords file',\n required=True)\nparser.add_argument('--normalize', default = False,\n help='flag for normalize input')\nargs = parser.parse_args()\n\n\n\n# Taking the parameters from command line\ninput_file = args.input\noutput_file = args.output\nflg_normalization = args.normalize\n\n# creating a table using pandas frame\nd_types={'date':str, 'time':str, 'temperature':int, 'humidity':int}\ncolumns = ['date', 'time', 'temperature', 'humidity']\ndf = pd.read_csv(input_file, sep=',', dtype=d_types, header=None, names=columns)\n\n\n#\"%d/%m/%Y %H:%M:%S\"\n#convert datetime format to posix \n\ndt = df['date']+ \" \" +df['time']\nfor e in range(len(dt)):\n dt.loc[e] = time.mktime(datetime.datetime.strptime(dt.loc[e], \"%d/%m/%Y %H:%M:%S\").timetuple())\n\n#example output for timestamp\n#1635861286.0\n\n#create numpy array for tfrecord\nfilename = output_file #'/home/pi/WORK_DIR/homework1/dhttf.tfrecord'\nx1 = dt.to_numpy()\nx2 = df['temperature'].to_numpy()\nx3 = df['humidity'].to_numpy()\n\n#boundaries taken from datasheet\nmin_temp = 0\nmax_temp = 50\nmin_hum = 20\nmax_hum = 90\n\n#flag for normalization\nif (flg_normalization):\n for i in range (len(x2)):\n x2[i] = (x2[i] - min_temp) / (max_temp - min_temp) #for temperature\n x3[i] = (x3[i] - min_hum) / (max_hum - min_hum) #for humidity\n\nwith tf.io.TFRecordWriter(filename) as writer:\n for i in range(len(df)):\n #create features for tfrecord\n x1_feature = tf.train.Feature(float_list=tf.train.FloatList(value=[x1[i]]))\n x2_feature = tf.train.Feature(float_list=tf.train.FloatList(value=[x2[i]]))\n x3_feature = tf.train.Feature(float_list=tf.train.FloatList(value=[x3[i]]))\n mapping = {'float': x1_feature,\n 'float' : x2_feature,\n 'float' : x3_feature} \n example = tf.train.Example(features=tf.train.Features(feature=mapping))\n writer.write(example.SerializeToString())\n \n \n#print(\"CSV file size: \", str(os.path.getsize(input_file))+str(\"B\"))\n#print(\"TfRecord file size: \", str(os.path.getsize(output_file)) + str(\"B\"))\n \nprint(str(os.path.getsize(output_file)) + str(\"B\"))\n\n","repo_name":"fabioeterno/ML4IoT","sub_path":"HW1/HW1_ex1_Group3.py","file_name":"HW1_ex1_Group3.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28275152213","text":"# Perform a normal binary search. Do division and modulus when accessing the elements of the matrix\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n \n if not matrix or not matrix[0]:\n return False\n \n m, n = len(matrix), len(matrix[0])\n \n low, high = 0, (m*n)-1\n \n while low<=high:\n mid = (low+high)//2\n \n r = mid // n\n c = mid % n\n \n if target==matrix[r][c]:\n return True\n elif target < matrix[r][c]:\n high = mid - 1\n else:\n low = mid + 1\n \n return False","repo_name":"rohitpatwa/leetcode","sub_path":"74. Search a 2D Matrix.py","file_name":"74. Search a 2D Matrix.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35021631865","text":"# built on the basis of:\n# https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html\n\n# ---------- modify the following lines to adapt to your data\n\nfilename = '../tcat_trump_full.csv'\t\t\t\t\t\t# file to use for training and testing\ncol_text = 'text'\t\t\t\t\t\t\t\t\t\t# name of the text column\ncol_label = 'source'\t\t\t\t\t\t\t\t\t# name of the label column\ntype_classifier = 'svm'\t\t\t\t\t\t\t\t\t# use 'bayes' for the multinominal Bayes classifier and 'svm' for support vector machine\nuse_tfidf = False\t\t\t\t\t\t\t\t\t\t# whether to use tf*idf term weighting (depending on the data, one or the other may work better)\nfrequency_cutoff = 3\t\t\t\t\t\t\t\t\t# minimum frequency for word (and bigrams) to take into account\nno_features = 10\t\t\t\t\t\t\t\t\t\t# number of most important features to show\n\n\n\n# ---------- only modify what follows if you are confident\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import accuracy_score\n\n\n# read data file\ndf = pd.read_csv(filename,delimiter=',',encoding='utf-8')\n\n# some info about the dataset, in particular column names\nprint(df.info(),'\\n\\n---')\n\n# data selection\nX = df[col_text].astype('U')\t\t\t# text column\nY = df[col_label].astype(str)\t\t\t# label column\n\n# clean out the HTML for twitter source variable\nY.replace(\"<.+?>\",\"\", regex=True, inplace=True)\n\n# transform categories into numbers\nle = LabelEncoder()\t\t\t\t\nY = le.fit_transform(Y)\n\n# cutting the dataset into training (85%) and testing (15%) data \nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.15)\n\n# see: https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\nif use_tfidf == False:\n\tcount_vect = CountVectorizer(ngram_range=(1,2),min_df=frequency_cutoff,stop_words='english')\t\t# 1- and 2-gram vectorizer\nelse:\n\tcount_vect = TfidfVectorizer(ngram_range=(1, 2),min_df=frequency_cutoff,stop_words='english')\t\t# 1- and 2-gram vectorizer with tf-idf transformation (depending on the data, this may work better or not)\n\n# vectorize and weigh training data \nX_train_counts = count_vect.fit_transform(X_train)\n\n# vectorize and weigh test data\nX_test_counts = count_vect.transform(X_test)\n\n# train the classifier\nif type_classifier == 'bayes':\n\tclf = MultinomialNB()\nelse:\n\tclf = SGDClassifier(loss='hinge',penalty='l2',alpha=1e-3,random_state=42,max_iter=50,tol=1e-3)\n\n# train the model\nclf.fit(X_train_counts,Y_train)\n\n# apply model to the test data\npredicted = clf.predict(X_test_counts)\n\n# create output to get an idea\ncounter = 0\nfor doc, category in zip(X_test, predicted):\n\tprint('\\n%r => %s' % (doc, le.classes_[category]))\n\tcounter += 1\n\tif(counter > 5):\n\t\tbreak;\n\n# calculate and print accuracy score\nprint('\\n---\\n\\naccuracy score: %r\\n\\n---' % accuracy_score(Y_test,predicted))\n\n# show most informative features (may fail if there are sparse labels)\nfeature_names = count_vect.get_feature_names()\n\nprint('\\nmost informative features (high to low):\\n')\n\n# two paths needed since binary and multiclass result structures are quite different\nif len(le.classes_) == 2:\n\tout = \"\\t\"\n\tfor label in le.classes_:\n\t\tout += '%-28s' % label\n\tprint(out)\n\tcoefs_with_fns = sorted(zip(clf.coef_[0], feature_names))\n\ttop = zip(coefs_with_fns[:no_features], coefs_with_fns[:-(no_features + 1):-1])\n\tfor (coef_1, fn_1), (coef_2, fn_2) in top:\n\t\tprint('\\t%.4f\\t%-15s\\t\\t%.4f\\t%-15s' % (coef_1, fn_1, coef_2, fn_2))\nelse:\n\tlongest = len(max(le.classes_, key=len))\n\tfor i, class_label in enumerate(le.classes_):\n\t\ttop = np.argsort(clf.coef_[i])[-no_features:]\n\t\tprint('{0: <{1}}'.format(class_label, longest),\" \".join(feature_names[j] for j in top[::-1]))","repo_name":"bernorieder/hybridclassification","sub_path":"bayes/sklearn_test.py","file_name":"sklearn_test.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"34573375646","text":"\n#accpting the length of lsit form user\nfrom array import array\n\n\nn=int(input())\n# accepting the values of list from the user\nnum_list=map(int,input().split())\n\nnum_list=sorted(num_list,reverse=True)\n\nfor i in range(len(num_list)):\n if(num_list[i]==num_list[0]):\n continue\n else:\n print(\"Runner-Up in list is:\",num_list[i])\n break\n\n\n\nprint(\"\\nId : 20CS074 \")\nprint(\"Name : Mandar Sanghavi\\n\")\n","repo_name":"20CS074Mandar/CSE259-Python-Practicals-","sub_path":"Practical-4/20CS074_Practic_4_PIP.py","file_name":"20CS074_Practic_4_PIP.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75241261767","text":"import asyncio\nimport json\nimport logging\n\nfrom bpprosdk.websockets.subscriptions import OrderBookSubscription, Subscriptions\nfrom bpprosdk.websockets.websocket_client_advanced import AdvancedBitpandaProWebsocketClient\n\n\nasync def main():\n when_msg_received = asyncio.get_event_loop().create_future()\n\n async def handle_message(event: json):\n LOG.info(\"%s\", event)\n if event[\"type\"] == \"ORDER_BOOK_SNAPSHOT\":\n when_msg_received.set_result(\"snapshot received...\")\n\n bp_client = AdvancedBitpandaProWebsocketClient(\n api_token=None,\n wss_host=\"wss://streams.exchange.bitpanda.com\",\n callback=handle_message\n )\n\n order_book_subscription = OrderBookSubscription([\"BTC_EUR\"])\n # Order book subscription without ACCOUNT_HISTORY, TRADING & ORDERS channel\n await bp_client.start_with(Subscriptions([order_book_subscription]), False)\n\n await when_msg_received\n LOG.info(\"asks book BTC_EUR: %s\", bp_client.get_order_book(\"BTC_EUR\").asks)\n LOG.info(\"bids BTC_EUR: %s\", bp_client.get_order_book(\"BTC_EUR\").bids)\n await bp_client.close()\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s\\t%(levelname)-5s\\t%(name)s\\t%(message)s\")\n LOG = logging.getLogger(__name__)\n\n asyncio.run(main())\n","repo_name":"Tibi-Bitpanda/bitpanda-pro-sdk-py","sub_path":"bpprosdk/examples/websockets/advanced_client_order_book_channel.py","file_name":"advanced_client_order_book_channel.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1218516392","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth import authenticate, login, logout\nimport re\nfrom apps.user.models import User, Address\nfrom apps.goods.models import GoodsSKU\nfrom apps.order.models import OrderInfo, OrderGoods\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom itsdangerous import SignatureExpired\nfrom apps.user.tasks import email_to_activate_user\n# from utils.mixin import LoginRequiredMixin\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django_redis import get_redis_connection\n\n\n# /user/register\nclass RegisterView(View):\n \"\"\"注册\"\"\"\n def get(self, request):\n \"\"\"显示注册页面\"\"\"\n return render(request, 'register.html')\n\n def post(self, request):\n \"\"\"注册处理\"\"\"\n # 1.获取数据\n username = request.POST.get('user_name')\n password = request.POST.get('pwd')\n cpassword = request.POST.get('cpwd')\n email = request.POST.get('email')\n allow = request.POST.get('allow')\n\n # 2.数据校验\n if not all([username, password, cpassword, email]):\n return render(request, 'register.html', {'err_msg': '数据不完整!'})\n\n if password != cpassword:\n return render(request, 'register.html', {'err_msg': '两次输入的密码不一致!'})\n\n if not re.match(r'^[a-z0-9][\\w.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', email):\n return render(request, 'register.html', {'err_msg': '邮箱格式不正确!'})\n\n if allow != 'on':\n return render(request, 'register.html', {'err_msg': '请同意用户协议!'})\n\n # 校验用户是否已存在\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n user = None\n\n if user:\n # 用户已存在\n return render(request, 'register.html', {'err_msg': '用户已存在!'})\n\n # 3.业务处理\n user = User.objects.create_user(username, email, password)\n user.is_active = 0\n user.save()\n\n # 4.激活用户\n email_to_activate_user.delay(username, user.id, email)\n\n # 5.返回应答\n return redirect(reverse('goods:index'))\n\n\nclass ActiveView(View):\n \"\"\"用户激活\"\"\"\n def get(self, request, token):\n \"\"\"进行用户激活\"\"\"\n # 1.对token进行解密\n serializer = Serializer(settings.SECRET_KEY, 3600)\n try:\n user_info = serializer.loads(token)\n user_id = user_info['confirm']\n print(user_info)\n user = User.objects.get(id=user_id)\n user.is_active = 1\n user.save()\n\n # 激活完成后,跳转到登录页面\n return redirect(reverse('user:login'))\n\n except SignatureExpired as e:\n # 激活时间超时\n return HttpResponse('激活链接已过期!')\n\n\n# user/login\nclass LoginView(View):\n \"\"\"登录\"\"\"\n def get(self, request):\n \"\"\"显示登录页面\"\"\"\n # 判断是否记住了用户名\n if 'username' in request.COOKIES:\n username = request.COOKIES.get('username')\n checked = 'checked'\n else:\n username = ''\n checked = ''\n\n return render(request, 'login.html', {'username': username, 'checked': checked})\n\n def post(self, request):\n # 1.获取数据\n username = request.POST.get('username')\n password = request.POST.get('pwd')\n\n # 2.数据校验\n if not all([username, password]):\n return render(request, 'login.html', {'err_msg': '数据不完整!'})\n\n # 3.业务处理:登录校验\n user = authenticate(username=username, password=password)\n if user is not None:\n # 用户名、密码正确\n if user.is_active:\n # 用户已激活\n login(request, user) # 记录用户的登录状态\n\n # 获取登录成功需要跳转的页面\n # 有next就跳转到next页面,没有next就跳转到首页\n next_url = request.GET.get('next', reverse('goods:index'))\n response = redirect(next_url)\n\n # 是否记住用户名\n remember = request.POST.get('remember')\n if remember == 'on':\n response.set_cookie('username', username, max_age=7*24*3600)\n else:\n response.delete_cookie('username')\n\n # 跳转到首页\n return response\n else:\n # 用户未激活\n return render(request, 'login.html', {'err_msg': '用户未激活!'})\n else:\n # 用户名、密码错误\n return render(request, 'login.html', {'err_msg': '用户名或密码错误!'})\n\n\n# user/logout\nclass LogoutView(View):\n \"\"\"退出登录\"\"\"\n def get(self, request):\n logout(request)\n return redirect(reverse('goods:index'))\n\n\n# /user\nclass UserInfoView(LoginRequiredMixin, View):\n \"\"\"用户中心-信息页\"\"\"\n\n def get(self, request):\n # 显示\n current_page = 'info'\n\n # 基本信息\n user = request.user\n address = Address.objects.get_default_address(user)\n\n # 最近浏览:使用redis的list存储\n # listName[item, item, ....]----->history_userID[skuID1, skuID2, ...]\n con = get_redis_connection('default')\n history_key = 'history_%d' % user.id\n\n # 获取用户最新浏览的5个商品id\n sku_ids = con.lrange(history_key, 0, 4) # ->[2, 4, 1]\n\n # 从数据库中查询商品的具体信息\n goods_list = []\n for sku_id in sku_ids:\n goods = GoodsSKU.objects.get(id=sku_id)\n goods_list.append(goods)\n\n context = {\n 'current_page': current_page,\n 'address': address,\n 'goods_list': goods_list\n }\n\n return render(request, 'user_center_info.html', context)\n\n\n# /user/order\nclass UserOrderView(LoginRequiredMixin, View):\n \"\"\"用户中心-订单页\"\"\"\n def get(self, request, page):\n # 显示\n\n # 获取订单信息\n user = request.user\n orders = OrderInfo.objects.filter(user=user)\n\n # 遍历获取订单中商品的信息\n for order in orders:\n order_skus = OrderGoods.objects.filter(order_id=order.order_id) # 每个订单中的商品集合\n\n # 遍历商品集合,获取每个商品的小计\n for order_sku in order_skus:\n order_sku.amount = order_sku.count * order_sku.price # 动态绑定小计到商品模型上\n\n order.order_skus = order_skus # 每个订单对应的商品集合\n\n order.status_name = OrderInfo.ORDER_STATUS[order.order_status]\n\n # 分页\n paginator = Paginator(orders, 1)\n\n # 获取第page页的Page实例对象\n try:\n page = int(page)\n except Exception as e:\n page = 1\n\n if page > paginator.num_pages:\n page = 1\n\n order_page = paginator.page(page)\n\n # 自定义页码控制,最多显示5个页码(分页器的paginator.page_range显示的是所有的页码)\n # 1.如果总页数少于5,显示所有页码\n # 2.如果是前3页,显示1-5页\n # 3.如果是后3页,显示后5页\n # 4.其他情况,显示当前页前2页、当前页、当前页后2页\n num_pages = paginator.num_pages\n if num_pages < 5:\n pages = range(1, num_pages + 1)\n elif page < 3:\n pages = range(1, 6)\n elif page >= num_pages - 2:\n pages = range(num_pages - 4, num_pages + 1)\n else:\n pages = range(page - 2, page + 3)\n\n current_page = 'order'\n\n # 组织参数\n context = {\n 'order_page': order_page,\n 'pages': pages,\n 'current_page': current_page\n }\n\n return render(request, 'user_center_order.html', context)\n\n\n# /user/address\nclass UserAddressView(LoginRequiredMixin, View):\n \"\"\"用户中心-地址页\"\"\"\n def get(self, request):\n \"\"\" 收货地址显示 \"\"\"\n current_page = 'address'\n\n user = request.user\n\n address = Address.objects.get_default_address(user)\n\n return render(request, 'user_center_site.html', {'current_page': current_page, 'address': address})\n\n # TODO 插入表情有问题\n def post(self, request):\n \"\"\" 新增收货地址 \"\"\"\n # 接收数据\n receiver = request.POST.get('receiver')\n addr = request.POST.get('addr')\n zip_code = request.POST.get('zip_code')\n phone = request.POST.get('phone')\n\n # 校验数据\n if not all([receiver, addr, phone]):\n # 校验数据完整性\n return render(request, 'user_center_site.html', {'err_msg': '数据不完整!'})\n\n if not re.match(r'^1[3|4|5|7|8|][0-9]{9}$', phone):\n # 验证手机号\n return render(request, 'user_center_site.html', {'err_msg': '手机格式不正确!'})\n\n # 业务处理:地址添加\n # 如果用户已存在默认收货地址,添加的地址不作为默认地址;否则作为默认收货地址\n user = request.user\n\n address = Address.objects.get_default_address(user)\n\n if address:\n is_default = False\n else:\n is_default = True\n\n Address.objects.create(user=user,\n receiver=receiver,\n addr=addr,\n zip_code=zip_code,\n phone=phone,\n is_default=is_default)\n\n # 返回应答\n return redirect(reverse('user:address'))\n\n","repo_name":"DamonMok/dailyfresh","sub_path":"apps/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40131967624","text":"import os\nimport time\n\nimport jinja2\n\nfrom lib.notion_client import Notion\nfrom exactractor import github\nfrom notifier.qiita import Qiita\n\n\ndef main():\n program_languages = os.getenv(\"PROGRAM_LANGUAGES\").split(\",\")\n notion_database_id = os.getenv(\"NOTION_DATABAE_ID\")\n article_ids = os.getenv(\"ARTICLE_IDS\").split(\",\")\n notion_client = Notion()\n all_languages_trends = github.fetch_trends()\n # NOTE: notionへ投入\n for trend in all_languages_trends:\n print(trend)\n notion_properties = {\n \"repo_name\": {\"title\": [{\"text\": {\"content\": trend[\"title\"]}}]},\n \"language\": {\"multi_select\": [{\"name\": trend[\"language\"]}]},\n \"url\": {\"url\": trend[\"url\"]},\n \"description\": {\"rich_text\": [{\"text\": {\"content\": trend[\"description\"]}}]},\n \"description_ja\": {\n \"rich_text\": [{\"text\": {\"content\": trend[\"description_ja\"]}}]\n },\n \"star\": {\"rich_text\": [{\"text\": {\"content\": trend[\"star\"]}}]},\n }\n if trend[\"twitter_url\"] is not None:\n notion_properties[\"twitter_url\"] = {\"url\": trend[\"twitter_url\"]}\n print(notion_properties)\n notion_client.create_database(\n database_id=notion_database_id, notion_properties=notion_properties\n )\n time.sleep(0.5)\n\n # NOTE: qiitaへ投入\n for program_language, article_id in zip(program_languages, article_ids):\n rs = github.get_trends(language=program_language)\n # NOTE: markdown 組み立て\n fileSystemLoader = jinja2.FileSystemLoader(\n searchpath=f\"{os.getcwd()}/notify_ghtrend/resource\"\n )\n env = jinja2.Environment(loader=fileSystemLoader)\n template = env.get_template(\"test.tpl\")\n md_content = template.render({\"language\": program_language, \"repos\": rs})\n\n # NOTE: 外部サービスにpost\n title = f\"{program_language} GitHubトレンドデイリーランキング!!【自動更新】\"\n # Qiita().post(program_language, title, md_content)\n Qiita().update(program_language, title, md_content, article_id)\n time.sleep(1.5)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Higakinn/notify-github-trend","sub_path":"notify_ghtrend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14321552033","text":"def duck_no(s):\n\tif s[0]=='0':\n\t\treturn False\n\telif s[1::].count('0')!=0:\n\t\treturn True\n\telse:\n\t\treturn False\nn=int(input(\"enter range:-\"))\nfor i in range(1,n+1):\n\tif duck_no(str(i)):\n\t\tprint(i)","repo_name":"Sur818/Coding-Projects","sub_path":"python programming/func85duck_no range.py","file_name":"func85duck_no range.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10932308053","text":"# coding=utf-8\n\nimport urllib.request\nimport urllib.parse\n\n\npost_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname'\n\ncity = input('城市:')\n\nform_data = {\n 'cname': city,\n 'pid': '',\n 'pageIndex': '1',\n 'pageSize': '10',\n}\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n}\n\nrequest = urllib.request.Request(url=post_url, headers=headers)\nform_data = urllib.parse.urlencode(form_data).encode()\nresponse = urllib.request.urlopen(request, data=form_data)\n\nprint(response.read().decode())\n","repo_name":"pipoted/spider2","sub_path":"learn/python3/ajax_post.py","file_name":"ajax_post.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74884866889","text":"import sys\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom PyQt4.QtCore import QThread,SIGNAL\n\nfrom patient import Patient\n\nclass GSW(QtGui.QWidget):\n\n\n def __init__(self):\n super(GSW,self).__init__()\n self.patient=Patient(self)\n self.initUI()\n\n # the GUI contains two major layout, patient_states and operation_options\n # the main thread here is the GUI\n # the second thread is the patient\n # we grab all the data we have from the second thread and display it on GUI(main thread)\n # the GUI interact with second thread using signals\n def initUI(self):\n self.mainlayout=QtGui.QVBoxLayout()\n self.patient_states=QtGui.QBoxLayout(0x1)\n self.mainlayout.addLayout(self.patient_states)\n self.add_States_to_layout()\n self.operation_options=QtGui.QBoxLayout(0x1)\n self.mainlayout.addLayout(self.operation_options)\n self.setLayout(self.mainlayout)\n self.setWindowTitle('GSW')\n self.connect(self.patient,SIGNAL(\"patient_state_changed\"),self.State_changed)\n self.connect(self.patient,SIGNAL(\"Patient Cured\"),self.Patient_Cured)\n self.emit(SIGNAL(\"Initialize patient\"))\n self.show()\n\n def State_changed(self):\n print(\"show changed states on GUI\")\n self.add_Button_to_layout()\n self.set_States_to_layout()\n\n\n def set_States_to_layout(self):\n print(\"start set_States_to_layout\")\n self.HR.setPlainText(\"HR\"+\"\\r\\n\"+str(self.patient.states[0]))\n self.BPH.setPlainText(\"BPH\"+\"\\r\\n\"+str(self.patient.states[1]))\n self.BPL.setPlainText(\"BPL\"+\"\\r\\n\"+str(self.patient.states[2]))\n self.RR.setPlainText(\"RR\"+\"\\r\\n\"+str(self.patient.states[3]))\n self.SatsPercentage.setPlainText(\"SatsPercentage\"+\"\\r\\n\"+str(self.patient.states[4]))\n self.SatsDescription.setPlainText(\"SatsDescription\"+\"\\r\\n\"+str(self.patient.states[5]))\n print(\"finished set_States_to_layout\")\n\n\n def add_States_to_layout(self):\n print(\"start add_States_to_layout\")\n self.HR = QtGui.QPlainTextEdit(\"HR\")\n self.BPH = QtGui.QPlainTextEdit(\"BPH\")\n self.BPL = QtGui.QPlainTextEdit(\"BPL\")\n self.RR = QtGui.QPlainTextEdit(\"RR\")\n self.SatsPercentage = QtGui.QPlainTextEdit(\"SatsPercentage\")\n self.SatsDescription = QtGui.QPlainTextEdit(\"SatsDescription\")\n self.patient_states.addWidget(self.HR )\n self.patient_states.addWidget(self.BPH)\n self.patient_states.addWidget(self.BPL)\n self.patient_states.addWidget(self.RR)\n self.patient_states.addWidget(self.SatsPercentage)\n self.patient_states.addWidget(self.SatsDescription)\n\n def add_Button_to_layout(self):\n i=0\n print(\"starting loop of adding buttons\")\n for i in reversed(range(self.operation_options.count())):\n self.operation_options.itemAt(i).widget().setParent(None)\n if self.patient.next_states!=[] :\n for name in self.patient.next_states:\n print(\"adding Buttons\")\n new_buttons=QtGui.QPushButton(name)\n self.operation_options.addWidget(new_buttons)\n new_buttons.clicked.connect(self.button_clicked)\n print(\"finished adding\")\n\n def button_clicked(self):\n self.emit(SIGNAL(self.sender().text()))\n\n def Patient_Cured(self):\n reply = QtGui.QMessageBox.question(self, 'Message',\"Patient is cured\")\n self.past_treatment.setText(self.past_treatment.toPlainText()+\"\\r\\n\"+self.sender().text())\n self.mainlayout.patientstate.adjustSize()\n\n \nif __name__ == '__main__':\n \n app = QtGui.QApplication(sys.argv)\n ex = GSW()\n sys.exit(app.exec_())\n","repo_name":"ssdfx007008/Patient_Simulation","sub_path":"GSW_orig.py","file_name":"GSW_orig.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27045268258","text":"#looping through string\n\nfruit = 'banana'\nindex = 0\nwhile index < len(fruit):\n letter = fruit[index]\n print(index , letter)\n index = index +1\n\n#counting letters\n\nword = 'freedman'\ncount= 0\n\nfor letters in word:\n if letters == 'e':\n count= count + 1\nprint(count)\n\n#slicing strings. note the space in between the words are counted too\ns = 'Monty python'\nprint(s[0:4])\n\nprint(s[6:12])\n\nprint(s[8:])\n\nprint(s[:])","repo_name":"Kaynzei/python","sub_path":"strings1.py","file_name":"strings1.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10120143341","text":"import threading\n\nfrom neuro_comma.predict import RepunctPredictor\n\n\nclass ModelCache(object):\n \"\"\" \"\"\"\n __shared_state = {\n \"_model\": None,\n \"_lock\": threading.Lock()\n }\n\n def __init__(self):\n self.__dict__ = self.__shared_state\n\n def load_model(self):\n if self._model is None:\n with self._lock:\n if self._model is None:\n # загружаем модель\n self._model = RepunctPredictor('repunct-model', model_weights='weights_ep4_9910.pt')\n\n @property\n def model(self) -> RepunctPredictor:\n self.load_model()\n return self._model\n","repo_name":"sviperm/neuro-comma","sub_path":"src/neuro_comma/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"16"} +{"seq_id":"3043840102","text":"import numpy as np\nimport gym\nimport operator\nimport random, copy\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom common.networks import get_model\nfrom dqn import DQN, DQNBase\nfrom equilibrium_solver import NashEquilibriumECOSSolver\n\nclass NashDQN(DQN):\n \"\"\"\n Nash-DQN algorithm\n \"\"\"\n def __init__(self, env, args):\n super().__init__(env, args)\n\n if args.num_process > 1:\n self.model.share_memory()\n self.target.share_memory()\n self.num_agents = env.num_agents[0] if isinstance(env.num_agents, list) else env.num_agents\n self.env = env\n self.args = args\n\n # don't forget to instantiate an optimizer although there is one in DQN\n self._init_optimizer(args)\n\n def _init_model(self, env, args):\n \"\"\"Overwrite DQN's models\n\n :param env: environment\n :type env: object\n :param args: arguments\n :type args: dict\n \"\"\"\n self.model = NashDQNBase(env, args.net_architecture, args.num_envs, two_side_obs = args.marl_spec['global_state']).to(self.device)\n print(self.model)\n self.target = copy.deepcopy(self.model).to(self.device)\n\n def choose_action(self, state, Greedy=False, epsilon=None):\n if Greedy:\n epsilon = 0.\n elif epsilon is None:\n epsilon = self.epsilon_scheduler.get_epsilon()\n if not isinstance(state, torch.Tensor):\n state = torch.Tensor(state).to(self.device)\n if self.args.ram:\n if self.args.num_envs == 1: # state: (agents, state_dim)\n state = state.unsqueeze(0).view(1, -1) # change state from (agents, state_dim) to (1, agents*state_dim)\n else: # state: (agents, envs, state_dim)\n state = torch.transpose(state, 0, 1) # to state: (envs, agents, state_dim)\n state = state.view(state.shape[0], -1) # to state: (envs, agents*state_dim)\n else: # image-based input\n if self.args.num_envs == 1: # state: (agents, C, H, W)\n state = state.unsqueeze(0).view(1, -1, state.shape[-2], state.shape[-1]) # (1, agents*C, H, W)\n\n else: # state: (agents, envs, C, H, W)\n state = torch.transpose(state, 0, 1) # state: (envs, agents, C, H, W)\n state = state.view(state.shape[0], -1, state.shape[-2], state.shape[-1]) # state: (envs, agents*C, H, W)\n\n if random.random() > epsilon: # NoisyNet does not use e-greedy\n with torch.no_grad():\n q_values = self.model(state).detach().cpu().numpy() # needs state: (batch, agents*state_dim)\n\n try: # nash computation may report error and terminate the process\n actions, dists, ne_vs = self.compute_nash(q_values)\n except:\n print(\"Invalid nash computation.\")\n actions = np.random.randint(self.action_dim, size=(state.shape[0], self.num_agents))\n\n else:\n actions = np.random.randint(self.action_dim, size=(state.shape[0], self.num_agents)) # (envs, agents)\n \n if self.args.num_envs == 1:\n actions = actions[0] # list of actions to its item\n else:\n actions = np.array(actions).T # to shape: (agents, envs, action_dim)\n return actions\n\n def compute_nash(self, q_values, update=False):\n q_tables = q_values.reshape(-1, self.action_dim, self.action_dim)\n all_actions = []\n all_dists = []\n all_ne_values = []\n\n for q_table in q_tables:\n dist, value = NashEquilibriumECOSSolver(q_table)\n all_dists.append(dist)\n all_ne_values.append(value)\n\n if update:\n return all_dists, all_ne_values\n else:\n # Sample actions from Nash strategies\n for ne in all_dists:\n actions = []\n for dist in ne: # iterate over agents\n try:\n sample_hist = np.random.multinomial(1, dist) # return one-hot vectors as sample from multinomial\n except:\n print('Not a valid distribution from Nash equilibrium solution: ', dist)\n a = np.where(sample_hist>0)\n actions.append(a)\n all_actions.append(np.array(actions).reshape(-1))\n\n return np.array(all_actions), all_dists, all_ne_values\n\n def update(self):\n DoubleTrick = False\n state, action, reward, next_state, done = self.buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state).to(self.device)\n next_state = torch.FloatTensor(next_state).to(self.device)\n action = torch.IntTensor(action).to(self.device)\n reward = torch.FloatTensor(reward).to(self.device)\n done = torch.FloatTensor(np.float32(done)).to(self.device)\n\n # Q-Learning with target network\n q_values = self.model(state)\n target_next_q_values_ = self.model(next_state) if DoubleTrick else self.target(next_state)\n target_next_q_values = target_next_q_values_.detach().cpu().numpy()\n\n action_ = torch.LongTensor([a[0]*self.action_dim+a[1] for a in action]).to(self.device)\n q_value = q_values.gather(1, action_.unsqueeze(1)).squeeze(1)\n\n # solve matrix Nash equilibrium\n try: # nash computation may encounter error and terminate the process\n next_dist, next_q_value = self.compute_nash(target_next_q_values, update=True)\n except: \n print(\"Invalid nash computation.\")\n next_q_value = np.zeros_like(reward)\n\n if DoubleTrick: # calculate next_q_value using double DQN trick\n next_dist = np.array(next_dist) # shape: (#batch, #agent, #action)\n target_next_q_values = target_next_q_values.reshape((-1, self.action_dim, self.action_dim))\n left_multi = np.einsum('na,nab->nb', next_dist[:, 0], target_next_q_values) # shape: (#batch, #action)\n next_q_value = np.einsum('nb,nb->n', left_multi, next_dist[:, 1]) \n\n next_q_value = torch.FloatTensor(next_q_value).to(self.device)\n\n expected_q_value = reward + (self.gamma ** self.multi_step) * next_q_value * (1 - done)\n\n loss = F.mse_loss(q_value, expected_q_value.detach(), reduction='none')\n loss = loss.mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if self.update_cnt % self.target_update_interval == 0:\n self.update_target(self.model, self.target)\n self.update_cnt += 1\n return loss.item()\n\nclass NashDQNBase(DQNBase):\n \"\"\"\n Nash-DQN for parallel env sampling\n\n parameters\n ---------\n env environment(openai gym)\n \"\"\"\n def __init__(self, env, net_args, number_envs=2, two_side_obs=True):\n super().__init__(env, net_args)\n self.number_envs = number_envs\n try:\n if two_side_obs:\n self._observation_shape = tuple(map(operator.add, env.observation_space.shape, env.observation_space.shape)) # double the shape\n else:\n self._observation_shape = env.observation_space.shape\n self._action_shape = (env.action_space.n)**2\n except:\n if two_side_obs:\n self._observation_shape = tuple(map(operator.add, env.observation_space[0].shape, env.observation_space[0].shape)) # double the shape\n else:\n self._observation_shape = env.observation_space[0].shape\n self._action_shape = (env.action_space[0].n)**2\n self._construct_net(env, net_args)\n\n def _construct_net(self, env, net_args):\n input_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape = self._observation_shape)\n output_space = gym.spaces.Discrete(self._action_shape)\n if len(self._observation_shape) <= 1: # not 3d image\n self.net = get_model('mlp')(input_space, output_space, net_args, model_for='discrete_q')\n else:\n self.net = get_model('cnn')(input_space, output_space, net_args, model_for='discrete_q')\n","repo_name":"quantumiracle/nash-dqn","sub_path":"nash_dqn.py","file_name":"nash_dqn.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"70059256648","text":"import jittor as jt\nfrom jittor import Module\nfrom jittor import nn\nfrom jittor_operations import *\nfrom genotypes import DOTS_final_C10\njt.flags.use_cuda = 1\nimport time\n\nclass Cell(Module):\n\n def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):\n super(Cell, self).__init__()\n print(C_prev_prev, C_prev, C)\n\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(C_prev_prev, C)\n else:\n self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)\n self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)\n\n if reduction:\n op_names, indices = zip(*genotype.reduce)\n concat = genotype.reduce_concat\n else:\n op_names, indices = zip(*genotype.normal)\n concat = genotype.normal_concat\n self._compile(C, op_names, indices, concat, reduction)\n\n def _compile(self, C, op_names, indices, concat, reduction):\n assert len(op_names) == len(indices)\n self._steps = len(op_names) // 2\n self._concat = concat\n self.multiplier = len(concat)\n\n self._ops = nn.ModuleList()\n for name, index in zip(op_names, indices):\n stride = 2 if reduction and index < 2 else 1\n op = OPS[name](C, stride, True)\n self._ops.append(op)\n self._indices = indices\n\n def execute(self, s0, s1):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n for i in range(self._steps):\n h1 = states[self._indices[2 * i]]\n h2 = states[self._indices[2 * i + 1]]\n op1 = self._ops[2 * i]\n op2 = self._ops[2 * i + 1]\n h1 = op1(h1)\n h2 = op2(h2)\n s = h1 + h2\n states += [s]\n return jt.contrib.concat([states[i] for i in self._concat], dim=1)\n\n\nclass NetworkCIFAR(Module):\n\n def __init__(self, C, num_classes, layers, genotype):\n super(NetworkCIFAR, self).__init__()\n self._layers = layers\n\n stem_multiplier = 3\n C_curr = stem_multiplier * C\n self.stem = nn.Sequential(\n nn.Conv2d(3, C_curr, 3, padding=1, bias=False),\n nn.BatchNorm2d(C_curr)\n )\n\n C_prev_prev, C_prev, C_curr = C_curr, C_curr, C\n self.cells = nn.ModuleList()\n reduction_prev = False\n for i in range(layers):\n if i in [layers // 3, 2 * layers // 3]:\n C_curr *= 2\n reduction = True\n else:\n reduction = False\n cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)\n reduction_prev = reduction\n self.cells.append(cell)\n C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(C_prev, num_classes)\n\n def execute(self, input):\n s0 = s1 = self.stem(input)\n for i, cell in enumerate(self.cells):\n s0, s1 = s1, cell(s0, s1)\n out = self.global_pooling(s1)\n logits = self.classifier(out.view(out.size(0), -1))\n return logits\n\n\nif __name__==\"__main__\":\n jittor_model=NetworkCIFAR(36,10,20,DOTS_final_C10)\n jittor_model.load('resnet101_rbn_best_model.pth')\n\n jittor_model.eval()\n\n\n\n","repo_name":"guyuchao/DOTS","sub_path":"Jittor/jittor_model.py","file_name":"jittor_model.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"20918793171","text":"import vcfreader\n\n\ndef sort(filename, key, order, saved_path):\n headers, data = vcfreader.vcfread(filename)\n # x : num of rows\n # str_1: prefix of key like 'AF='\n x = len(data)\n str_1 = key + '='\n\n # # loop to trace the key we want in each line\n # # find target column j\n # # get the target key value number and insert to head position\n for i in range(x):\n flag = 0\n for j in range(6, len(data[i]) - 1): # ignore first 7 column\n if data[i][j].startswith(str_1): # locate the key\n num = data[i][j].replace(str_1, '') # use '' to replace prefix then can get key value\n if num != '.': # some value is '.', convert string key value to float to sort\n num = float(num)\n data[i].insert(0, num)\n else:\n data[i].insert(0, 0.0) # Use 0 to replace \".\"\n flag = 1\n break\n # since we need to sort the value, we must use a float number to replace \".\"\n # use 0 maybe not a good solution, still need more work to feed a good solution\n if flag == 0: # to check if key is missing\n data[i].insert(0, 0)\n\n # SORT\n # Timsort is used: Timsort is a hybrid stable sorting algorithm, derived from merge sort and insertion sort,\n # designed to perform well on many kinds of real-world data.\n\n # sort the head column\n data = sorted(data, key=lambda k: k[0], reverse=order)\n\n # delete duplicate first column after sorting\n for i in range(x):\n data[i].pop(0)\n\n # recover\n # connect to vcf format\n result = ''\n for i in range(x):\n for j in range(0, 7): # first 7 columns delimiter as \\t\n result += str(data[i][j])\n result += '\\t'\n for j in range(7, len(data[i]) - 3): # info column delimiter as ;\n result += data[i][j] + ';'\n for j in range(len(data[i]) - 3, len(data[i]) - 1): # two columns after info, delimiter as \\t\n result += '\\t' + data[i][j]\n result += '\\n'\n print('finished!')\n\n # save to file\n saved_path += 'SortBy'\n saved_path += key\n saved_path += '.vcf'\n f = open(saved_path, 'w')\n str1 = ''.join(headers)\n f.write(str1) # headers\n f.write(result) # data\n f.close()\n print('file saved at' + saved_path)\n","repo_name":"GaoxiangLi/VCF_Exercise","sub_path":"vcfsort.py","file_name":"vcfsort.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18602426191","text":"import pymorphy2\n\n\nclass Singleton(object):\n\n \"\"\"\n Класс для выдачи единственного экземпляра анализатора слов\n\n \"\"\"\n\n __instance = None\n\n def __init__(self):\n if not Singleton.__instance:\n print('Class has been initialized')\n else:\n print(\"Instance already created:\", self.get_instance())\n\n @classmethod\n def get_instance(cls):\n if not cls.__instance:\n cls.__instance = pymorphy2.MorphAnalyzer()\n return cls.__instance\n","repo_name":"Dope-Bass/GrammarAnalyzer","sub_path":"singletonMorph.py","file_name":"singletonMorph.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37337840415","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n#\n\n__author__ = 'Jean-Gabriel Ganascia'\n\nimport sqlite3\nimport time\n\n\nclass noeud: # permet d'énumérer les noeuds du graphe\n def __init__(self):\n self.val = 0\n self.tempsIni = time.clock()\n self.pasNbreNoeud = 10000\n\n def nouvelleValeur(self):\n self.val += 1\n if divmod(self.val, self.pasNbreNoeud)[1] == 0:\n self.temps = time.clock()\n print(\"Nombre noeuds du graphe: \" + str(self.val) + \"; temps de construction du graphe pour les \" + str(\n self.pasNbreNoeud) + \" derniers noeuds: \" + format(self.temps - self.tempsIni, 'f') + \" sec.\")\n self.tempsIni = self.temps\n return self.val\n\n def valeur(self):\n return self.val\n\n\ndef construction_graphe(project_path):\n \"\"\"\n Fonction qui va construire le Graphe\n \"\"\"\n t1 = time.clock()\n connexion = sqlite3.connect(project_path + '/BDs/galaxie.db', 1, 0, 'EXCLUSIVE')\n cursor = connexion.cursor()\n n = noeud()\n cursor.execute('''SELECT rowid FROM livres''')\n idReference = cursor.fetchone() # permet de prendre le premier\n while idReference != None: # On va parcourir toute notre TABLE livres\n fusion_sources_cibles(idReference, connexion, n)\n idReference = cursor.fetchone()\n cursor.execute('''INSERT INTO maxNoeud values (?)''', (n.val,))\n connexion.commit()\n connexion.close()\n t2 = time.clock()\n print(\"Temps de construction du graphe: \" + format(t2 - t1, 'f') + \" sec.\")\n return n.val\n\n\ndef fusion_sources_cibles(idRef, c, n):\n \"\"\"\n\n idRef : l'id d'une des lignes du tableau qui est de la forme (n,)\n c : un pipe qui est connecté a notre BD\n n : la class Noeud qui permet de compter les noeuds de notre graphe\n \"\"\"\n\n curseurSource = c.cursor() # On creer deux curseur\n curseurCible = c.cursor()\n curseurSource.execute('''SELECT ordonneeSource, empanSource, rowid FROM grapheReutilisations WHERE idRefSource = \n ?''', idRef)\n curseurCible.execute('''SELECT ordonneeCible, empanCible, rowid FROM grapheReutilisations WHERE idRefCible = ?''',\n idRef)\n\n listeReutilisationSource = curseurSource.fetchall()\n listeReutilisationCible = curseurCible.fetchall()\n listeReutilisationMarquee = marquage(listeReutilisationCible, 'cible') + listeReutilisationSource\n listeReutilisationMarquee.sort()\n R = fusion(listeReutilisationMarquee, n)\n\n if R:\n n.nouvelleValeur()\n for X in R:\n if X[-2] > 0:\n ajoutSource(X, curseurSource)\n elif X[-2] < 0:\n ajoutCible(X, curseurCible)\n else:\n print(\"Attention, erreur fusion sur noeud \" + str(n.val) + \" avec X=\" + str(X))\n\n\ndef marquage(L, M):\n R = []\n for X in L:\n if M == 'cible':\n R.append((X[0], X[1], -X[2]))\n else:\n R.append(X)\n return R\n\n\ndef fusion(L, n):\n if L is []:\n return []\n elif len(L) == 1:\n return [[L[0][0], L[0][1], L[0][2], n.val]]\n else:\n Tete = L[0]\n Suivant = L[1]\n return fusion_aux(Tete, Suivant, L[2:], n, [])\n\n\ndef fusion_aux(Tete, Suivant, Entree, Noeud, Resultat):\n R = Resultat\n E = Entree\n T = Tete\n S = Suivant\n while len(E) >= 1:\n if T[0] + T[1] >= S[0]:\n NReutilisation = [T[0], max(T[1], S[1] - T[0] + S[0]), T[2], Noeud.val]\n R = R + [NReutilisation]\n T = [T[0], max(T[1], S[1] - T[0] + S[0]), S[2]]\n S = E[0]\n E = E[1:]\n else:\n NReutilisation = [T[0], T[1], T[2], Noeud.val]\n R = R + [NReutilisation]\n T = S\n S = E[0]\n E = E[1:]\n Noeud.nouvelleValeur()\n else:\n if T[0] + T[1] >= S[0]:\n NReutilisation = [T[0], max(T[1], S[1] - T[0] + S[0]), T[2], Noeud.val]\n R = R + [NReutilisation]\n T = [T[0], max(T[1], S[1] - T[0] + S[0]), S[2]]\n else:\n NReutilisation = [T[0], T[1], T[2], Noeud.val]\n R = R + [NReutilisation]\n T = S\n Noeud.nouvelleValeur()\n\n return R + [[T[0], T[1], T[2], Noeud.val]]\n\n\ndef ajoutSource(L, Curseur):\n Curseur.execute('''INSERT INTO grapheGalaxiesSource values (?,?)''', (L[2], L[3],))\n\n\ndef ajoutCible(L, Curseur):\n Curseur.execute('''INSERT INTO grapheGalaxiesCible values (?,?)''', (abs(L[2]), L[3],))\n\n\ndef sauvegarde_graphe(project_path):\n connexion = sqlite3.connect(project_path + '/BDs/galaxie.db', 1, 0, 'EXCLUSIVE')\n curseur_arc = connexion.cursor()\n curseur_noeud = connexion.cursor()\n curseur_graphe = connexion.cursor()\n curseur_arc.execute('''SELECT * FROM maxNoeud''')\n maxNoeud = curseur_arc.fetchone()[0]\n n = noeud()\n while n.val < maxNoeud:\n curseur_arc.execute('''SELECT idReutilisation FROM grapheGalaxiesSource WHERE idNoeud = (?)''', (n.val,))\n reutilisation = curseur_arc.fetchone()\n while reutilisation != None:\n curseur_noeud.execute('''SELECT idNoeud FROM grapheGalaxiesCible WHERE idReutilisation = (?)''',\n (reutilisation[0],))\n nouveau_noeud = curseur_noeud.fetchone()\n curseur_graphe.execute('''INSERT INTO grapheGalaxies values (?,?)''', (n.val, nouveau_noeud[0],))\n reutilisation = curseur_arc.fetchone()\n n.nouvelleValeur()\n print(\"graphe sauvé\")\n n = noeud()\n while n.val < maxNoeud:\n curseur_arc.execute('''SELECT idReutilisation FROM grapheGalaxiesCible WHERE idNoeud = (?)''', (n.val,))\n reutilisation = curseur_arc.fetchone()\n while reutilisation != None:\n curseur_noeud.execute('''SELECT idNoeud FROM grapheGalaxiesSource WHERE idReutilisation = (?)''',\n (reutilisation[0],))\n nouveau_noeud = curseur_noeud.fetchone()\n curseur_graphe.execute('''INSERT INTO grapheGalaxies values (?,?)''', (n.val, nouveau_noeud[0],))\n reutilisation = curseur_arc.fetchone()\n n.nouvelleValeur()\n connexion.commit()\n connexion.close()\n print(\"graphe transposé sauvé\")\n","repo_name":"marc-treu/Galaxies2.0","sub_path":"Galaxies/src/grapheGalaxies.py","file_name":"grapheGalaxies.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10476194431","text":"from flask import Flask, jsonify, request, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\nfrom datetime import datetime\n\nimport models\n\nfrom execute_models import execute_models_api\n\napp = Flask(__name__)\nCORS(app)\n\napp.register_blueprint(execute_models_api)\n\n\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///project_database.db'\ndb = SQLAlchemy(app)\n\n\n@app.route('/')\ndef index():\n return \"Hello, World!\"\n\n\n@app.route('/aircadia')\ndef aaa():\n return \"Hello, AirCADia!\"\n\n\n\n# @app.route('/aaa//')\n# def createUser(name, location):\n# user = User(name = name, location = location)\n# db.session.add(user)\n# db.session.commit()\n# return '

Added New User

'\n\n\n# @app.route('/bbb/')\n# def GetUser(name):\n# user = User.query.filter_by(name = name).first()\n# return f'

The user is located in: {user.location}

'\n\n# @app.route('/ccc/')\n# def delete_user(name):\n# user = User.query.filter_by(name = name).first()\n# db.session.delete(user)\n# db.session.commit()\n# return f'

The user is located in: {user.location}

'\n\n@app.route('/create-project', methods=[\"POST\"])\ndef create_project():\n if request.is_json:\n req_json = request.get_json()\n key = req_json.get(\"key\")\n value = req_json[\"value\"]\n \n \n data = ProjectSetting(key=key, value=value)\n db.session.add(data)\n db.session.commit()\n\n response_body = {\"Result\": \"Data Created\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"message\": \"Request body must be JSON\"}), 400)\n\n@app.route('/get-project', methods=[\"GET\"])\ndef get_project():\n project = {}\n\n # project settings\n projectSettings = ProjectSetting.query.all()\n for projectSetting in projectSettings:\n key = projectSetting.key\n value = projectSetting.value\n if key == \"name\":\n project[\"name\"] = value\n if key == \"endPoint\":\n project[\"endPoint\"] = value\n\n # data\n dataList = []\n dataVariables = DataVariable.query.all()\n for dataVariable in dataVariables:\n data = {\n \"name\": dataVariable.name,\n \"category\": dataVariable.category,\n \"description\": dataVariable.description,\n \"type\": dataVariable.type,\n \"value\": dataVariable.value,\n \"unit\": dataVariable.unit,\n \"minValue\": dataVariable.minValue,\n \"maxValue\": dataVariable.maxValue,\n }\n dataList.append(data)\n print(dataVariable)\n project[\"data\"] = dataList\n\n # models\n modelsList = []\n models = ComputationalModel.query.all()\n for computationalModel in models:\n model = {}\n model[\"name\"] = computationalModel.name\n model[\"category\"] = computationalModel.category\n model[\"description\"] = computationalModel.description\n model[\"endPoint\"] = computationalModel.endPoint\n modelInputs = []\n for input in computationalModel.inputs:\n modelInputs.append({\"name\": input.name})\n model[\"inputs\"] = modelInputs\n modelOutputs = []\n for output in computationalModel.outputs:\n modelOutputs.append({\"name\": output.name})\n model[\"outputs\"] = modelOutputs\n print(len(computationalModel.inputs))\n print(len(computationalModel.inputs))\n modelsList.append(model)\n project[\"models\"] = modelsList\n\n # workflows\n workflowsList = []\n workflows = ComputationalWorkflow.query.all()\n for computationalWorkflow in workflows:\n workflow = {}\n workflow[\"name\"] = computationalWorkflow.name\n workflow[\"category\"] = computationalWorkflow.category\n workflow[\"description\"] = computationalWorkflow.description\n\n # data objects\n dataObjects = []\n for input in computationalWorkflow.inputs:\n for data in dataList:\n if data[\"name\"] == input.name:\n dataObjects.append(data)\n break\n for output in computationalWorkflow.outputs:\n for data in dataList:\n if data[\"name\"] == output.name:\n dataObjects.append(data)\n break\n workflow[\"data\"] = dataObjects\n\n # inputs\n modelInputs = []\n for input in computationalWorkflow.inputs:\n modelInputs.append({\"name\": input.name})\n workflow[\"inputs\"] = modelInputs\n\n # outputs\n modelOutputs = []\n for output in computationalWorkflow.outputs:\n modelOutputs.append({\"name\": output.name})\n workflow[\"outputs\"] = modelOutputs\n\n \n\n executableComponents = []\n for executableComponent in computationalWorkflow.executable_components:\n model = {}\n model[\"name\"] = executableComponent.name\n model[\"category\"] = executableComponent.category\n model[\"description\"] = executableComponent.description\n model[\"endPoint\"] = executableComponent.endPoint\n modelInputs = []\n for input in executableComponent.inputs:\n modelInputs.append({\"name\": input.name})\n model[\"inputs\"] = modelInputs\n modelOutputs = []\n for output in executableComponent.outputs:\n modelOutputs.append({\"name\": output.name})\n model[\"outputs\"] = modelOutputs\n executableComponents.append(model)\n workflow[\"executableComponents\"] = executableComponents\n\n scheduledComponents = []\n for scheduledComponent in computationalWorkflow.scheduled_components:\n model = {}\n model[\"name\"] = scheduledComponent.name\n model[\"category\"] = scheduledComponent.category\n model[\"description\"] = scheduledComponent.description\n model[\"endPoint\"] = scheduledComponent.endPoint\n modelInputs = []\n for input in scheduledComponent.inputs:\n modelInputs.append({\"name\": input.name})\n model[\"inputs\"] = modelInputs\n modelOutputs = []\n for output in scheduledComponent.outputs:\n modelOutputs.append({\"name\": output.name})\n model[\"outputs\"] = modelOutputs\n scheduledComponents.append(model)\n workflow[\"scheduledComponents\"] = scheduledComponents\n workflowsList.append(workflow)\n project[\"workflows\"] = workflowsList\n\n # studies\n \n res = make_response(jsonify(project), 200)\n return res\n \n\n\n@app.route('/create-data', methods=[\"POST\"])\ndef create_data():\n if request.is_json:\n req_json = request.get_json()\n \n # create data for the given json\n _create_data(req_json)\n\n response_body = {\"Result\": \"Data Created\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"message\": \"Request body must be JSON\"}), 400)\n@app.route('/create-dataObjects', methods=[\"POST\"])\ndef create_dataObjects():\n if request.is_json:\n req_json = request.get_json()\n \n for dataJson in req_json:\n # create data for the given json\n _create_data(dataJson)\n\n response_body = {\"Result\": \"Data Created\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"message\": \"Request body must be JSON\"}), 400)\ndef _create_data(data_json):\n name = data_json.get(\"name\")\n category = data_json.get(\"category\")\n description = data_json.get(\"description\")\n type = data_json[\"type\"]\n value = data_json[\"value\"]\n unit = data_json[\"unit\"]\n minValue = data_json[\"minValue\"]\n maxValue = data_json[\"maxValue\"]\n \n # create database entry for \"data variable\" \n data = DataVariable(name=name, category=category, description=description, type=type, value=value, unit=unit, minValue=minValue, maxValue=maxValue)\n db.session.add(data)\n db.session.commit()\n\n@app.route('/delete-data', methods=[\"POST\"])\ndef delete_data():\n if request.is_json:\n req_json = request.get_json()\n name = req_json.get(\"name\")\n category = req_json.get(\"category\")\n description = req_json.get(\"description\")\n type = req_json[\"type\"]\n value = req_json[\"value\"]\n unit = req_json[\"unit\"]\n minValue = req_json[\"minValue\"]\n maxValue = req_json[\"maxValue\"]\n \n # create database entry for \"data variable\"\n DataVariable.query.filter_by(name = name).delete()\n # data = DataVariable(name=name, category=category, description=description, type=type, value=value, unit=unit, minValue=minValue, maxValue=maxValue)\n # db.session.delete(data)\n db.session.commit()\n\n response_body = {\"Result\": \"Data Deleted\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"message\": \"Request body must be JSON\"}), 400)\n\n@app.route('/create-model', methods=[\"POST\"])\ndef create_model():\n if request.is_json:\n req_json = request.get_json()\n\n _create_model(req_json)\n\n response_body = {\"Result\": \"Model Created\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"error\": \"Request body must be JSON\"}), 400)\ndef _create_model(model_json):\n name = model_json.get(\"name\")\n category = model_json.get(\"category\")\n description = model_json.get(\"description\")\n endPoint = model_json[\"endPoint\"]\n inputs = model_json[\"inputs\"]\n outputs = model_json[\"outputs\"]\n \n # create database entry for \"computational model\"\n model = ComputationalModel(name=name, category=category, description=description, endPoint=endPoint)\n for i in range(0, len(inputs)):\n dataVariable = DataVariable.query.filter_by(name = inputs[i][\"name\"]).first()\n if dataVariable is None:\n return make_response(jsonify({\"error\": \"Data variable '\" + inputs[i][\"name\"] + \"' not Present\"}), 400)\n model.inputs.append(dataVariable)\n for i in range(0, len(outputs)):\n dataVariable = DataVariable.query.filter_by(name = outputs[i][\"name\"]).first()\n if dataVariable is None:\n return make_response(jsonify({\"error\": \"Data variable '\" + outputs[i][\"name\"] + \"' not Present\"}), 400)\n model.outputs.append(dataVariable)\n db.session.add(model)\n db.session.commit()\n\n@app.route('/create-workflow', methods=[\"POST\"])\ndef create_workflow():\n if request.is_json:\n print('Atifee')\n req_json = request.get_json()\n name = req_json.get(\"name\")\n category = req_json.get(\"category\")\n description = req_json.get(\"description\")\n workflow_data_json = req_json[\"data\"]\n inputs = req_json[\"inputs\"]\n outputs = req_json[\"outputs\"]\n executable_components = req_json[\"executableComponents\"]\n scheduled_components = req_json[\"scheduledComponents\"]\n print('Atif')\n # create database entry for \"computational workflow\"\n\n workflow = ComputationalWorkflow(name=name, category=category, description=description)\n \n for i in range(0, len(inputs)):\n dataVariable = DataVariable.query.filter_by(name = inputs[i][\"name\"]).first()\n if dataVariable is None:\n return make_response(jsonify({\"error\": \"Data variable '\" + inputs[i][\"name\"] + \"' not Present\"}), 400)\n workflow.inputs.append(dataVariable)\n for i in range(0, len(outputs)):\n dataVariable = DataVariable.query.filter_by(name = outputs[i][\"name\"]).first()\n if dataVariable is None:\n return make_response(jsonify({\"error\": \"Data variable '\" + outputs[i][\"name\"] + \"' not Present\"}), 400)\n workflow.outputs.append(dataVariable)\n for i in range(0, len(executable_components)):\n computationalModel = ComputationalModel.query.filter_by(name = executable_components[i][\"name\"]).first()\n if computationalModel is None:\n return make_response(jsonify({\"error\": \"Computational Model '\" + executable_components[i][\"name\"] + \"' not Present\"}), 400)\n workflow.executable_components.append(computationalModel)\n\n db.session.add(workflow)\n print(workflow)\n db.session.commit()\n\n response_body = {\"Result\": \"Workflow Created\"}\n res = make_response(jsonify(response_body), 200)\n return res\n else:\n return make_response(jsonify({\"error\": \"Request body must be JSON\"}), 400)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# class User(db.Model):\n# id = db.Column(db.Integer, primary_key = True)\n# name = db.Column(db.String(50))\n# location = db.Column(db.String(50))\n# #date_created = db.Column(db.DateTime, dafault = datetime.now)\n\n\n\n# Table for storing project attributes\nclass ProjectSetting(db.Model):\n __tablename__ = 'ProjectSettings'\n id = db.Column(db.Integer, primary_key = True)\n key = db.Column(db.String(50))\n value = db.Column(db.String(50))\n\nclass DataVariable(db.Model):\n __tablename__ = 'DataVariables'\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(50))\n category = db.Column(db.String(50))\n description = db.Column(db.String(50))\n type = db.Column(db.String(50))\n value = db.Column(db.String(50))\n unit = db.Column(db.String(50))\n minValue = db.Column(db.String(50))\n maxValue = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n #ComputationalModels = db.relationship(\"ComputationalModel\", secondary=\"ModelInputs\")\n\n\nclass ComputationalModel(db.Model):\n __tablename__ = 'ComputationalModels'\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(50))\n category = db.Column(db.String(50))\n description = db.Column(db.String(50))\n endPoint = db.Column(db.String(50))\n #location = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n #inputs = db.relationship('Data', backref='owner')\n inputs = db.relationship(\"DataVariable\", secondary=\"ModelInputs\")\n outputs = db.relationship(\"DataVariable\", secondary=\"ModelOutputs\")\n\n\nclass ModelInput(db.Model):\n __tablename__ = 'ModelInputs'\n id = db.Column(db.Integer, primary_key=True)\n model_id = db.Column(db.Integer, db.ForeignKey('ComputationalModels.id'))\n data_id = db.Column(db.Integer, db.ForeignKey('DataVariables.id'))\n model = db.relationship(ComputationalModel, backref=db.backref(\"ModelInputs\", cascade=\"all, delete-orphan\"))\n data = db.relationship(DataVariable, backref=db.backref(\"ModelInputs\", cascade=\"all, delete-orphan\"))\n\n\nclass ModelOutput(db.Model):\n __tablename__ = 'ModelOutputs'\n id = db.Column(db.Integer, primary_key=True)\n model_id = db.Column(db.Integer, db.ForeignKey('ComputationalModels.id'))\n data_id = db.Column(db.Integer, db.ForeignKey('DataVariables.id'))\n model = db.relationship(ComputationalModel, backref=db.backref(\"ModelOutputs\", cascade=\"all, delete-orphan\"))\n data = db.relationship(DataVariable, backref=db.backref(\"ModelOutputs\", cascade=\"all, delete-orphan\"))\n\n\nclass ComputationalWorkflow(db.Model):\n __tablename__ = 'ComputationalWorkflows'\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(50))\n category = db.Column(db.String(50))\n description = db.Column(db.String(500))\n #location = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n # inputs = db.Column(db.String(5000))\n # outputs = db.Column(db.String(5000))\n inputs = db.relationship(\"DataVariable\", secondary=\"WorkflowInputs\")\n outputs = db.relationship(\"DataVariable\", secondary=\"WorkflowOutputs\")\n executable_components = db.relationship(\"ComputationalModel\", secondary=\"WorkflowExecutableComponents\")\n scheduled_components = db.relationship(\"ComputationalModel\", secondary=\"WorkflowScheduledComponents\")\n\n\nclass WorkflowInput(db.Model):\n __tablename__ = 'WorkflowInputs'\n id = db.Column(db.Integer, primary_key=True)\n workflow_id = db.Column(db.Integer, db.ForeignKey('ComputationalWorkflows.id'))\n data_id = db.Column(db.Integer, db.ForeignKey('DataVariables.id'))\n workflow = db.relationship(ComputationalWorkflow, backref=db.backref(\"WorkflowInputs\", cascade=\"all, delete-orphan\"))\n data = db.relationship(DataVariable, backref=db.backref(\"WorkflowInputs\", cascade=\"all, delete-orphan\"))\n\n\nclass WorkflowOutput(db.Model):\n __tablename__ = 'WorkflowOutputs'\n id = db.Column(db.Integer, primary_key=True)\n workflow_id = db.Column(db.Integer, db.ForeignKey('ComputationalWorkflows.id'))\n data_id = db.Column(db.Integer, db.ForeignKey('DataVariables.id'))\n workflow = db.relationship(ComputationalWorkflow, backref=db.backref(\"WorkflowOutputs\", cascade=\"all, delete-orphan\"))\n data = db.relationship(DataVariable, backref=db.backref(\"WorkflowOutputs\", cascade=\"all, delete-orphan\"))\n\n\nclass WorkflowExecutableComponent(db.Model):\n __tablename__ = 'WorkflowExecutableComponents'\n id = db.Column(db.Integer, primary_key=True)\n workflow_id = db.Column(db.Integer, db.ForeignKey('ComputationalWorkflows.id'))\n model_id = db.Column(db.Integer, db.ForeignKey('ComputationalModels.id'))\n workflow = db.relationship(ComputationalWorkflow, backref=db.backref(\"WorkflowExecutableComponents\", cascade=\"all, delete-orphan\"))\n model = db.relationship(ComputationalModel, backref=db.backref(\"WorkflowExecutableComponents\", cascade=\"all, delete-orphan\"))\n\nclass WorkflowScheduledComponent(db.Model):\n __tablename__ = 'WorkflowScheduledComponents'\n id = db.Column(db.Integer, primary_key=True)\n workflow_id = db.Column(db.Integer, db.ForeignKey('ComputationalWorkflows.id'))\n model_id = db.Column(db.Integer, db.ForeignKey('ComputationalModels.id'))\n workflow = db.relationship(ComputationalWorkflow, backref=db.backref(\"WorkflowScheduledComponents\", cascade=\"all, delete-orphan\"))\n model = db.relationship(ComputationalModel, backref=db.backref(\"WorkflowScheduledComponents\", cascade=\"all, delete-orphan\"))\n\nif __name__ == '__main__':\n app.run(debug=True, port=3002)","repo_name":"Atif-Aerospace/WindTurbine","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21860080658","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\nempire_to_idx = {}\r\nidx_to_empire = {}\r\nfor i in range(n):\r\n emp_name = input().strip()\r\n \r\n empire_to_idx[emp_name] = i\r\n idx_to_empire[i] = emp_name\r\n\r\ndef find(x):\r\n if parents[x] == x:\r\n return x\r\n \r\n parents[x] = find(parents[x])\r\n return parents[x]\r\n\r\n# 두 왕국이 싸운 후 속국을 이룸\r\ndef union(x, y, w):\r\n px, py = find(x), find(y)\r\n\r\n # 문제 조건 상 두 나라는 종주국 - 속국 / 종주국 - 종주국 관계임\r\n # 만일 종주국 - 속국 관계라면, 속국의 parents 정보는 자기 자신이 아니라 종주국일 것\r\n # 즉, px와 py가 같은 경우임\r\n\r\n # x 왕국이 승리 -> y의 종주국은 x\r\n if w == '1':\r\n if px == py and px != x:\r\n parents[x] = x\r\n px = x\r\n parents[py] = px\r\n \r\n # y 왕국이 승리 -> x의 종주국은 y\r\n else:\r\n if px == py and py != y:\r\n parents[y] = y\r\n py = y\r\n parents[px] = py\r\n \r\nparents = [i for i in range(n)]\r\n\r\nfor _ in range(m):\r\n emp_name_1, emp_name_2, w = input().strip().split(',')\r\n emp_idx_1 = empire_to_idx[emp_name_1]\r\n emp_idx_2 = empire_to_idx[emp_name_2] \r\n \r\n union(emp_idx_1, emp_idx_2, w)\r\n \r\n for i in range(n):\r\n find(i)\r\n\r\nnot_dependency_list = []\r\n# 전쟁 결과 종주국 리스트\r\n# 국가 번호 -> 국가 이름으로 append\r\nfor idx, emp_no in enumerate(parents):\r\n if idx == emp_no:\r\n not_dependency_list.append(idx_to_empire[emp_no])\r\n \r\n# ascii 기준 종주국 정렬\r\nnot_dependency_list.sort(key=lambda x: x.split()[-1])\r\n\r\nprint(len(not_dependency_list))\r\nfor emp in not_dependency_list:\r\n print(emp)","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/16402. 제국/제국.py","file_name":"제국.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41864249428","text":"\"\"\"\nHarsha Puvvada\nCSC 481 Final Project\nConverts digital image to Pointillistic Image\nFunctions are on top and main method is at the bottom of script\nDependencies: vectorField.py\n\"\"\"\n\nimport math,random,cv2, scipy, bisect, progressbar, colorsys\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom vectorField import vectorField\n\n\n#functions used/main script at bottom\ndef limitSize(img, ratio):\n '''Downsamples image based on given ratio'''\n if ratio == 1:\n return img\n else:\n height, width, depth = img.shape\n shape = (int(width * ratio), int(height * ratio))\n return cv2.resize(img, shape, interpolation=cv2.INTER_AREA)\n\ndef paletteGenerator(img, colors):\n '''Finds the dominant colors using Kmeans Clustering given image and number of colors'''\n #convert from bgr to rgb\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n #reshape to list of pixels\n img = img.reshape((img.shape[0] * img.shape[1], 3))\n kmeans = KMeans(n_clusters = colors)\n out = kmeans.fit(img)\n return out.cluster_centers_\n\ndef showPalette(palette, paletteSize, name):\n '''Displays the cluster centers as colors and saves it into file'''\n cols = paletteSize\n rows = int(math.ceil(len(palette) / cols))\n res = np.zeros((rows * 80, cols * 80, 3), dtype=np.uint8)\n for y in range(rows):\n for x in range(cols):\n if y * cols + x < len(palette):\n color = [int(c) for c in palette [y * cols + x]]\n cv2.rectangle(res, (x * 80, y * 80), (x * 80 + 80, y * 80 + 80), color, -1)\n fig = plt.figure()\n plt.axis(\"off\")\n plt.imshow(res)\n \n if name == 'boosted':\n plt.title('Boosted color palette')\n fig.savefig('./output/boostedPaletteColors.jpg')\n else:\n plt.title('Original color palette')\n fig.savefig('./output/originalPaletteColors.jpg')\n return res\n\ndef boostColors(palette):\n '''boosts colors in palette'''\n boostedPalette = []\n for item in palette:\n b = item[0]; g = item[1]; r = item[2]\n h,s,v = colorsys.rgb_to_hsv(r,g,b)\n #increase hue, saturation and brightness\n# h = h * 1.2\n s = s * 1.2\n v = v * 1.4\n newr,newg,newb = colorsys.hsv_to_rgb(h,s,v)\n boostedPalette.append([newr,newg,newb])\n return np.asarray(boostedPalette, dtype=np.float64)\n\n\ndef randomized_grid(h, w, scale):\n '''creates a randomized grid on which to paint. This is the canvas'''\n assert (scale > 0)\n r = scale//2\n grid = []\n for i in range(0, h, scale):\n for j in range(0, w, scale):\n y = random.randint(-r, r) + i\n x = random.randint(-r, r) + j\n\n grid.append((y % h, x % w))\n\n random.shuffle(grid)\n return grid\n\ndef compute_color_probabilities(pixels, palette, k=9):\n '''compute the probabilities of the colors in grid'''\n distances = scipy.spatial.distance.cdist(pixels, palette)\n maxima = np.amax(distances, axis=1)\n\n distances = maxima[:, None] - distances\n summ = np.sum(distances, 1)\n distances /= summ[:, None]\n\n distances = np.exp(k*len(palette)*distances)\n summ = np.sum(distances, 1)\n distances /= summ[:, None]\n\n return np.cumsum(distances, axis=1, dtype=np.float32)\n\ndef color_select(probabilities, palette):\n '''picks the nearest color that is most similar to original image'''\n r = random.uniform(0, 1)\n i = bisect.bisect_left(probabilities, r)\n return palette[i] if i < len(palette) else palette[-1]\n\n###################### Main Method ########################################\n\n###read in image\nimg = cv2.imread(\"./images/image3.jpg\")\n\n#### Step 1: Downsample Image\n#downsample by 0.5 of image to speed up Kmeans\nratio = 0.2\nshrunkImg = limitSize(img, ratio)\n#Save Shrunk Image\ncv2.imwrite('./output/shrunkImage.jpg', shrunkImg)\n\n\n### Step 2: Compute color palette using Kmeans\npaletteSize = 20\npalette = paletteGenerator(shrunkImg, paletteSize)\noriginalPaletteColors = showPalette(palette, paletteSize, 'original')\n\n\n### Step 3: Boost color saturation by converting from rgb to hsv and then increasing saturation\nboosted = boostColors(palette)\n\n### Step 4: Get vector field of image using Scharr Image derivative\n#convert image to grayscale to compute image gradient \ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngradient = vectorField.from_gradient(gray)\n\n\n### Step 5: Smooth vector field using Gaussian Blur\nradius = 0; #0 is automatic\ngradient.smooth(radius)\n\n\n### Step 6: Paint image\n# cartoonized version is used as a base. Made using medianBlur\nres = cv2.medianBlur(img, 11)\ncv2.imwrite('./output/baseImage.jpg', res)\n\n\n# define a randomized grid of locations for the brush strokes\ngrid = randomized_grid(img.shape[0], img.shape[1], scale=3)\nbatch_size = 10000\n\nbar = progressbar.ProgressBar()\nfor h in bar(range(0, len(grid), batch_size)):\n # get the pixel colors at each point of the grid\n pixels = np.array([img[x[0], x[1]] for x in grid[h:min(h + batch_size, len(grid))]])\n # precompute the probabilities for each color in the palette\n # lower values of k means more randomnes\n color_probabilities = compute_color_probabilities(pixels, boosted, k=9) \n #define stroke scale:\n stroke_scale = int(math.ceil(max(img.shape) / 900))\n \n for i, (y, x) in enumerate(grid[h:min(h + batch_size, len(grid))]):\n color = color_select(color_probabilities[i], boosted)\n angle = math.degrees(gradient.direction(y, x)) + 90\n length = int(round(stroke_scale + stroke_scale * math.sqrt(gradient.magnitude(y, x))))\n # draw ellipse stroke\n #cv2.ellipse(res, (x, y), (length, stroke_scale), angle, 0, 360, color, -1, cv2.LINE_AA)\n \n # draw circle stroke\n cv2.circle(res, (x,y), stroke_scale, color, thickness=-1, lineType= 8, shift=0)\n### Step 7: save Final Image\ncv2.imwrite('./output/pointillizedImage.jpg', res)\n\n\n\n","repo_name":"puvvadaharsha/pointillismProject","sub_path":"pointillizingDigitalImages.py","file_name":"pointillizingDigitalImages.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75202532167","text":"\"\"\"\nFaça um programa em python para ler os dados de um usuário: nome, telefone e endereço.\nAo final, o programa deve imprimir na tela um relatório com os dados do usuário.\n\"\"\"\n\nnome = input(\"Informe o nome do usuário: \")\ntelefone = input(\"Informe o telefone do usuário: \")\nendereço = input(\"Informe o endereço do usuário: \")\nprint(\"\\n\")\nprint(\"Dados do usuário: -Nome: \", nome, \"-Telefone: \", telefone, \"-Endereço: \", endereço)\n","repo_name":"KatiuceBarbosa/Logica-em-Programacao-Python","sub_path":"Exercício 6.py","file_name":"Exercício 6.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9322999499","text":"# just comment out one of the two lines below to switch between the two implementations (prac1 and prac2)\n#from metrorail_network1 import *\n\nimport turtle\nfrom metrorail_network2 import *\nstation_coordinates=[[-325.0, 60.0,cpt],[-267.0, 83.0, esp],[-238.0,105.0,yst]]\nstns=[cpt,cpt]\n\nstate=['from','to']\n\nindex=0\nrun=turtle.Turtle()\nrun.shape('circle')\nrun.shapesize(0.5,0.5,1)\n##print(eval('cpt'))\n\n\n\n\nreader=open('coordinates.txt',mode='r')\nfor i in reader.readlines():\n if i not in station_coordinates:\n a=i.split(',')\n a[-1]=eval((a[-1].split('\\n')[0]))\n ##print(a[-1,])\n station_coordinates.append(a)\nreader.close()\n##print(station_coordinates)\n\n\n\n'''\nroute_on_single_line = central_BLV.number_stops_and_direction(uni, esp)\nprint(\"Unibell to Esplanade on central_BLV: direction {0}, {1} stops\\n\".format(route_on_single_line[1].name, route_on_single_line[0]))\n\nuni.route_to(esp)\nooz.route_to(cpt)\nuni.route_to(pen)\ncpt.route_to(blv)\nyst.route_to(chn)\n\nuni.route_to(sto)\nuni.route_to(slt)\n\n# the test below is very inefficient with the first version of the function (going through Cape Town, total of 30 stops)\nkuy.route_to(kpt)\n\n# now best routes (you can see that the output differs between different runs when using the second version of the prac):\nkuy.best_route_to(kpt)\nuni.best_route_to(slt)\nclr.best_route_to(haz)\n'''\nallStations=[]\nlines=cpt.get_lines()\nfor i in lines:\n stations=i.get_stations()\n for j in stations:\n if j not in allStations:\n allStations.append(j)\n\ndef route(x,y):\n\n if state[0]=='from':\n run.clear()\n stns[0]=whichStation(x,y)\n run.color('red')\n run.goto(x,y)\n run.stamp()\n\n\n if state[0] =='to':\n stns[1]=whichStation(x,y)\n run.color('purple')\n run.goto(x,y)\n temp=state[1]\n state[1]=state[0]\n state[0]=temp\n return stns[0].best_route_to(stns[1])\n temp=state[1]\n state[1]=state[0]\n state[0]=temp\n\n\n\ndef whichStation(x,y):\n add=[]\n if state=='to':\n print('hello')\n\n for i in station_coordinates:\n dist=int(sqdist(float(x),float(y),float(i[0]),float(i[1])))\n ##print(dist)\n add.append(dist)\n lowest=add[0]\n ind=0\n for j in range(len(add)):\n if add[j]10:\n\n print('doesnt exist')\n stn=input('Enter 3 letter code').upper()\n print(stn)\n\n for i in allStations: ##AllStations has the string of all the object names\n ##cc=exec(str(i)+'.code')\n ##print(i.code)##Its printing None\n if i.code.lstrip()==stn:\n station_coordinates.append([x,y,(i)])\n print('done')\n ind=len(station_coordinates)-1\n break\n return(station_coordinates[ind][2])\n\n\n\ndef sqdist(x1,y1,x2,y2):\n return(((x2-x1)**2)+((y2-y1)**2))**(0.5)\n\n\n\n\n\ndef where(x,y):\n print(x,y)\n\ndef pwrite(x,y):\n stream=open('coordinates.txt',mode='w')\n for i in station_coordinates:\n stream.write('{},{},{}\\n'.format(i[0],i[1],i[2]))\n stream.close()\n print('printed')\n\n\n\n\n\n\nrun.penup()\nwin=turtle.Screen()\n\nwin.bgpic('CT_RailMap1.gif')\nwin.setup(width=900 , height=650, startx=None, starty=0)\nwin.onclick(route)\n\n##win.onclick(pwrite,btn=3).\n\nwin.listen()\nwin.mainloop()\n\n ##print(clr.get_connecting_time(southern, central_BLV)) # raises one of our user-defined exceptions\n##while(stnfrom==None):","repo_name":"bngweny/Metrorail_app_python","sub_path":"sources/metrorail_tests.py","file_name":"metrorail_tests.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39983038460","text":"from django.conf import settings\nfrom django.http import Http404\nimport pyuv\nfrom gaffer.httpclient import Server, GafferNotFound, ProcessId\n\n\ndef get_gaffer_server():\n loop = pyuv.Loop.default_loop()\n s = Server(settings.GAFFER_SERVER, loop=loop)\n return s\n\n\ndef get_process_or_404(proc_name, server=None):\n s = server\n if not s:\n s = get_gaffer_server()\n try:\n process = s.get_process(proc_name)\n return process\n except GafferNotFound:\n raise Http404\n\n\ndef get_process_list():\n s = get_gaffer_server()\n return s.processes()\n\n\ndef get_pid_or_404(pid, proc_name):\n s = get_gaffer_server()\n process = get_process_or_404(proc_name, s)\n pid_object = ProcessId(s, pid, process)\n return pid_object\n","repo_name":"martync/gaffer-django-client","sub_path":"gafferclient/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13802454748","text":"import math\r\n\r\nwhile True:\r\n num1 = float(input(\"Enter num1: \"))\r\n op = input(\"Enter your desired operation (+, -, *, /, radical, sin, cos, tan, cot, factorial, or exit)\") \r\n\r\n if op == \"+\":\r\n num2 = float(input(\"Enter num2: \"))\r\n result = num1 + num2\r\n\r\n elif op == \"-\":\r\n num2 = float(input(\"Enter num2: \"))\r\n result = num1 - num2\r\n\r\n elif op == \"*\":\r\n num2 = float(input(\"Enter num2: \"))\r\n result = num1 * num2\r\n\r\n elif op == \"/\":\r\n num2 = float(input(\"Enter num2: \"))\r\n if num2 == 0:\r\n result = \"Error\"\r\n else :\r\n result = num1 / num2\r\n\r\n elif op == \"radical\":\r\n if num1 < 0:\r\n result = \"Error\"\r\n else:\r\n result = math.sqrt(num1)\r\n\r\n elif op == \"sin\":\r\n result = math.tan(num1)\r\n\r\n elif op == \"cos\":\r\n result = math.cos(num1)\r\n\r\n elif op == \"tan\":\r\n result = math.tan(num1)\r\n\r\n elif op == \"cot\":\r\n result = math.cot(num1)\r\n\r\n elif op == \"factorial\":\r\n if num1 < 0:\r\n result = \"Error\"\r\n else:\r\n result = math.factorial(num1)\r\n \r\n elif op == \"exit\":\r\n break\r\n\r\n print(result)","repo_name":"FatemehJomhouri/Python-Assignments","sub_path":"Assignment 2/Assignment 2a.py","file_name":"Assignment 2a.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29163422494","text":"\"\"\"Define Payload Factory for tests\"\"\"\nimport json\nfrom django.test import Client\n\n\nclass PayloadFactory(Client):\n \"\"\"A factory for generating payloads\"\"\"\n\n TYPE = 'greeting'\n DEFAULT_ID = '1'\n DEFAULT_MESSAGE = 'An Arbitrary Message'\n\n def __init__(self, overrides={}):\n self.overrides = overrides\n\n def create_payload(self):\n return {\n 'data': {\n 'type': self._get_type(),\n 'attributes': {\n 'message': self._get_message()\n }\n }\n }\n\n def update_payload(self):\n return {\n 'data': {\n 'type': self._get_type(),\n 'id': self._get_id(),\n 'attributes': {\n 'message': self._get_message()\n }\n }\n }\n\n def _get_type(self):\n return self.TYPE\n\n def _get_id(self):\n if 'id' in self.overrides:\n return self.overrides['id']\n else:\n return self.DEFAULT_ID\n\n def _get_message(self):\n if 'message' in self.overrides:\n return self.overrides['message']\n else:\n return self.DEFAULT_MESSAGE\n\n","repo_name":"zhao-li/test-backend","sub_path":"greetings/tests/helpers/payload_factory.py","file_name":"payload_factory.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37100700584","text":"import yaml\nimport os\nimport numpy as np\nfrom scipy import interpolate\n\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSDurabilityPolicy, QoSHistoryPolicy, QoSReliabilityPolicy\nfrom rclpy.qos import QoSProfile\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom tf2_ros.buffer import Buffer\nfrom tf2_ros.transform_listener import TransformListener\nfrom std_msgs.msg import Float64MultiArray, String\nfrom geometry_msgs.msg import PointStamped\nfrom walker_msgs.msg import ForceStamped, StepStamped \n\nclass PartialLoads(Node):\n\n def __init__(self):\n super().__init__('partial_loads')\n self.get_logger().fatal(\"USE CPP VERSION! ...\")\n \n self.declare_parameters(\n namespace='',\n parameters=[\n ('handle_calibration_file', os.path.join(get_package_share_directory('walker_loads'), \"config\", \"params.yaml\") ),\n ('left_loads_topic_name', '/left_loads'),\n ('right_loads_topic_name', '/right_hand_loads'),\n ('left_hand_loads_topic_name', '/left_hand_loads'),\n ('right_hand_loads_topic_name', '/right_loads'), \n ('left_handle_topic_name', '/left_handle'),\n ('right_handle_topic_name', '/right_handle'),\n ('left_steps_topic_name', '/detected_step_left'),\n ('right_steps_topic_name', '/detected_step_right'),\n ('user_desc_topic_name', '/user_desc'),\n ('period', 0.05),\n ('speed_delta', 0.05)\n ]\n )\n\n\n self.left_loads_topic_name = self.get_parameter('left_loads_topic_name').value\n self.right_loads_topic_name = self.get_parameter('right_loads_topic_name').value\n self.left_hand_loads_topic_name = self.get_parameter('left_hand_loads_topic_name').value\n self.right_hand_loads_topic_name = self.get_parameter('right_hand_loads_topic_name').value\n self.left_handle_topic_name = self.get_parameter('left_handle_topic_name').value\n self.right_handle_topic_name= self.get_parameter('right_handle_topic_name').value\n self.left_steps_topic_name= self.get_parameter('left_steps_topic_name').value\n self.right_steps_topic_name= self.get_parameter('right_steps_topic_name').value\n self.user_desc_topic_name= self.get_parameter('user_desc_topic_name').value\n self.period = self.get_parameter('period').value\n self.speed_delta = self.get_parameter('speed_delta').value\n self.handle_calibration_file = self.get_parameter('handle_calibration_file').value\n\n with open(self.handle_calibration_file, 'r') as stream:\n try:\n parsed_yaml=yaml.safe_load(stream)\n self.weight_points = parsed_yaml['weight_points'] \n self.left_handle_points = parsed_yaml['left_handle_points']\n self.right_handle_points = parsed_yaml['right_handle_points']\n except yaml.YAMLError as exc:\n print(exc)\n \n self.fr = interpolate.interp1d(self.right_handle_points, self.weight_points, fill_value = \"extrapolate\")\n self.fl = interpolate.interp1d(self.left_handle_points, self.weight_points, fill_value = \"extrapolate\")\n\n # stored data\n self.left_handle_msg = None\n self.right_handle_msg = None\n self.left_step_msg = None\n self.right_step_msg = None\n\n self.speed_diff = 0\n self.right_speed = None\n self.left_speed = None\n\n self.weight = 100\n self.right_handle_weight = 0\n self.left_handle_weight = 0\n self.leg_load = 0\n\n self.new_data_available = False\n self.first_data_ready = False\n \n # ROS stuff\n self.left_load_pub = self.create_publisher(StepStamped, self.left_loads_topic_name, 10)\n self.right_load_pub = self.create_publisher(StepStamped, self.right_loads_topic_name, 10)\n self.left_hand_load_pub = self.create_publisher(StepStamped, self.left_hand_loads_topic_name, 10)\n self.right_hand_load_pub = self.create_publisher(StepStamped, self.right_hand_loads_topic_name, 10)\n\n self.left_handle_sub = self.create_subscription(ForceStamped, self.left_handle_topic_name, self.handle_lc, 10) \n self.right_handle_sub = self.create_subscription(ForceStamped, self.right_handle_topic_name, self.handle_lc, 10) \n\n self.left_steps_sub = self.create_subscription(StepStamped, self.left_steps_topic_name, self.l_steps_lc, 10) \n self.right_steps_sub = self.create_subscription(StepStamped, self.right_steps_topic_name, self.r_steps_lc, 10)\n\n\n # mimics a ROS1 'latched' topic\n latched_profile = QoSProfile(depth=1)\n latched_profile.history = QoSHistoryPolicy.KEEP_LAST\n latched_profile.reliability = QoSReliabilityPolicy.RELIABLE\n latched_profile.durability = QoSDurabilityPolicy.TRANSIENT_LOCAL\n\n self.user_desc_sub = self.create_subscription(String, self.user_desc_topic_name, self.user_desc_lc, latched_profile) \n \n self.tmr = self.create_timer(self.period, self.timer_callback)\n\n\n self.get_logger().info(\"load detector started\") \n\n def user_desc_lc(self, msg):\n user_fields = msg.data.split(':')\n if len(user_fields)>2:\n new_weight = int(user_fields[2])\n if (new_weight != self.weight):\n self.weight = new_weight\n self.new_data_available = True\n\n def l_steps_lc(self, msg):\n self.steps_lc(msg,0)\n\n def r_steps_lc(self, msg):\n self.steps_lc(msg,1)\n\n def handle_lc(self, msg):\n if ('right' in msg.header.frame_id ):\n self.right_handle_msg = msg \n self.right_handle_weight = self.fr(msg.force)\n self.new_data_available = True\n elif ('left' in msg.header.frame_id):\n self.left_handle_msg = msg\n self.left_handle_weight = self.fl(msg.force) \n self.new_data_available = True\n else:\n self.get_logger().error(\"Don't know about which handle are you talking [\" + msg.header.frame_id + \"]\") \n return \n\n def steps_lc(self, msg, id):\n if (id==1):\n self.right_step_msg = msg\n self.right_speed = self.right_step_msg.speed\n self.new_data_available = True\n\n hand_msg = self.right_handle_msg\n hand_msg.force = self.right_handle_weight\n self.right_hand_load_pub.publish(hand_msg)\n self.get_logger().error(\"handle published(r)]\") \n elif (id==0):\n self.left_step_msg = msg\n self.left_speed = self.left_step_msg.speed\n self.new_data_available = True\n\n hand_msg = self.left_handle_msg\n hand_msg.force = self.left_handle_weight\n self.left_hand_load_pub.publish(hand_msg)\n self.get_logger().error(\"handle published(r)]\") \n\n else:\n self.get_logger().error(\"Don't know about which step are you talking [\" + id + \"]\") \n return \n \n if ((self.left_step_msg is not None) and (self.right_step_msg is not None) ): \n speed_diff_x = self.left_speed.x - self.right_speed.x\n #speed_diff_y = self.left_speed.y - self.right_speed.y\n #speed_diff_z = self.left_speed.z - self.right_speed.z\n #self.speed_diff = pow(speed_diff_x * speed_diff_x + speed_diff_y * speed_diff_y + speed_diff_z * speed_diff_z , 0.5)\n self.speed_diff = speed_diff_x\n\n def timer_callback(self):\n if (self.new_data_available):\n self.new_data_available = False\n\n if (not self.first_data_ready ):\n self.first_data_ready = (self.left_handle_msg is not None) and (self.right_handle_msg is not None) and (self.left_step_msg is not None) and (self.right_step_msg is not None) \n\n if ( self.first_data_ready ):\n # amount of weight on legs\n self.leg_load = self.weight - self.left_handle_weight - self.right_handle_weight\n\n # assign weight to supporting leg...\n if (self.speed_diff>self.speed_delta):\n self.right_leg_load = self.leg_load\n self.left_leg_load = 0.0\n elif (self.speed_diff<-self.speed_delta):\n self.right_leg_load = 0.0\n self.left_leg_load = self.leg_load\n else:\n # Both leg supporting: We can't be sure about how much on each one!\n self.right_leg_load = 0.5 * self.leg_load\n self.left_leg_load = 0.5 * self.leg_load\n \n # Build msgs and publish\n # left\n msg = self.left_step_msg \n msg.load = self.left_leg_load\n self.left_load_pub.publish(msg)\n\n #right\n msg = self.right_step_msg \n msg.load = self.right_leg_load\n self.right_load_pub.publish(msg)\n\n #self.get_logger().warn(\"Weight distribution on legs L(\" + str(self.left_leg_load) + \") - R(\" + str(self.right_leg_load) + \")\")\n else: \n pass\n #self.get_logger().error(\"Not all data received yet ...\")\n else:\n pass\n #self.get_logger().error(\"Not all data received yet ...\")\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_subscriber = PartialLoads()\n\n rclpy.spin(minimal_subscriber)\n \n minimal_subscriber.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TaISLab/WalKit","sub_path":"walker_loads/scripts/partial_loads.py","file_name":"partial_loads.py","file_ext":"py","file_size_in_byte":9930,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"3315793534","text":"from django.template.loader import render_to_string\nfrom pico import miditags\n\n\n@miditags.register('embed')\nclass EmbedHandler(miditags.HandlerBase):\n def handle(self, url):\n return render_to_string(\n 'podcasts/embed_iframe.html',\n {\n 'url': url,\n 'height': 600\n }\n )\n","repo_name":"OriginPodcasts/pico","sub_path":"pico/podcasts/miditags.py","file_name":"miditags.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29689156280","text":"from beidaqingniao.oop.d01.book import Book\nfrom beidaqingniao.oop.d01.magazine import Magazine\n\n\nclass BookFactory:\n __new = None\n __inited = False\n\n def __new__(cls, *args, **kwargs):\n if cls.__new is None:\n cls.__new = object.__new__(cls)\n return cls.__new\n\n def __init__(self):\n if BookFactory.__inited:\n return\n BookFactory.__inited = True\n\n def product(self, type=\"Book\", title=\"Book\", publisher=\"NJUP\", price=10, period=4):\n if type == \"Book\":\n if title == \"Book\":\n return Book(title, publisher, price)\n return Book(title, publisher, price)\n elif type == \"Magazine\":\n if title == \"Book\":\n title = \"Magazine\"\n return Magazine(title, publisher, price, period)\n return Magazine(title, publisher, price, period)\n else:\n return None\n\n\nif __name__ == '__main__':\n d1 = BookFactory()\n print(d1.product(\"Book\", 2))\n d2 = BookFactory()\n print(d2.product(\"Magazine\", 1, 2, 3, 5))\n\n\n\n\n","repo_name":"sczhan/ss11","sub_path":"beidaqingniao/oop/d02/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28626912942","text":"import numpy as np\nimport pandas as pd\nimport glob\nimport os\nimport cv2\nfrom skimage import io\nimport scipy.misc as scm\nfrom sklearn.model_selection import KFold\n\nfrom utilities import *\n\ndef train_generator(batch_size, img_h, img_w, cur_val_fold=N_FOLDS - 1, validate=True):\n folds = prepare_folds()\n if validate:\n train_names = [name for fold in folds[0:cur_val_fold] + folds[cur_val_fold + 1:] for name in fold]\n else:\n train_names = [name for fold in folds for name in fold]\n print('CVF: ', cur_val_fold, ', n_train: ', len(train_names)) \n print(np.array(train_names))\n # img_mean, img_std = get_mean_std(cur_val_fold=cur_val_fold)\n while True:\n for start in range(0, len(train_names), batch_size):\n x_batch = []\n y_batch = []\n end = min(start + batch_size, len(train_names))\n names_train_batch = train_names[start:end]\n for name in names_train_batch:\n img = cv2.imread(os.path.join(TRAIN_DIR, '{}_RGB.tif'.format(name)))\n # img = (img - img_mean) / (img_std + EPS)\n mask = io.imread(os.path.join(TRAIN_DIR, '{}_GTI.tif'.format(name)))\n mask = np.array(mask != 0, np.uint8)\n \n mask_crop = [0]\n while (np.sum(mask_crop) < 10000):\n img_crop, mask_crop = random_crop(img, mask, img_h)\n\n img_crop, mask_crop = randomHorizontalFlip(img_crop, mask_crop, u=0.5)\n\n dump_imgs(img_crop, mask_crop, name, dirname='train_imgs', tile_id='')\n \n mask_crop = np.expand_dims(mask_crop, axis=2)\n x_batch.append(img_crop)\n y_batch.append(mask_crop)\n\n x_batch = np.array(x_batch, np.float32) / 255.0\n # x_batch = np.array(x_batch, np.float32)\n y_batch = np.array(y_batch, np.float32)\n yield x_batch, y_batch\n\ndef val_generator(batch_size, img_h, img_w, cur_val_fold=N_FOLDS - 1):\n folds = prepare_folds()\n val_names = [name for name in folds[cur_val_fold]]\n print('CVF: ', cur_val_fold)\n print(np.array(val_names))\n # img_mean, img_std = get_mean_std(cur_val_fold=cur_val_fold)\n while True:\n for start in range(0, len(val_names), batch_size):\n x_batch = []\n y_batch = []\n end = min(start + batch_size, len(val_names))\n names_valid_batch = val_names[start:end]\n for name in names_valid_batch:\n img = cv2.imread(os.path.join(TRAIN_DIR, '{}_RGB.tif'.format(name)))\n # img = (img - img_mean) / (img_std + EPS)\n mask = io.imread(os.path.join(TRAIN_DIR, '{}_GTI.tif'.format(name)))\n mask = np.array(mask != 0, np.uint8)\n\n img_crops, mask_crops = get_val_crops(img, mask, tile_size=img_w)\n\n for tile_id, zipped in enumerate(zip(img_crops, mask_crops)):\n cur_img, cur_mask = zipped\n cur_mask = np.expand_dims(cur_mask, axis=2)\n x_batch.append(cur_img)\n y_batch.append(cur_mask)\n\n dump_imgs(cur_img, cur_mask, name, dirname='val_imgs', tile_id=tile_id)\n\n if len(x_batch) >= batch_size:\n x_batch = np.array(x_batch, np.float32) / 255.0\n # x_batch = np.array(x_batch, np.float32)\n y_batch = np.array(y_batch, np.float32)\n \n # print('!!!!!!!!!!!!! YIELD: ', x_batch.shape, ' - ', y_batch.shape)\n yield x_batch, y_batch\n \n x_batch = []\n y_batch = []\n\n if len(x_batch) > 0:\n x_batch = np.array(x_batch, np.float32) / 255.0\n # x_batch = np.array(x_batch, np.float32)\n y_batch = np.array(y_batch, np.float32)\n\n yield x_batch, y_batch\n\ndef test_generator(batch_size, img_h, img_w, cur_val_fold=N_FOLDS - 1):\n imgs = glob.glob(os.path.join(TEST_DIR, '*_RGB.tif'))\n imgs = [img.split('_RGB')[0].split('\\\\')[1] for img in imgs]\n # print (len(imgs))\n # img_mean, img_std = get_mean_std(cur_val_fold=cur_val_fold)\n batch_names = []\n img_n = 0\n for i, name in enumerate(imgs):\n # print (i, name)\n img = cv2.imread(os.path.join(TEST_DIR, '{}_RGB.tif'.format(name)))\n # img = (img - img_mean) / (img_std + EPS)\n tiles = get_tiles(img, mask=None, tile_size=img_h)\n batch_names.append(name)\n\n for j in range(0, tiles.shape[0], batch_size):\n x_batch = tiles[j:j+batch_size]\n x_batch = np.array(x_batch, np.float32) / 255.0\n # x_batch = np.array(x_batch, np.float32)\n yield img_n, x_batch, batch_names\n batch_names = []\n\n img_n += 1\n","repo_name":"mifril/topcoder_urban3d","sub_path":"generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23476633373","text":"#!/usr/bin/env python3\n# -+-coding: utf-8 -+-\n\n\"\"\"\n\"\"\"\n\n#--------------------------------------------\n# Authors: Frank Boers \n#\n#-------------------------------------------- \n# Date: 13.12.18\n#-------------------------------------------- \n# License: BSD (3-clause)\n#--------------------------------------------\n# Updates\n#--------------------------------------------\n\nimport os,sys,copy\nfrom pubsub import pub\nfrom jumeg.base.template.jumeg_template import JuMEG_Template\nfrom jumeg.base.jumeg_logger import get_logger\n\nlogger = get_logger()\n\n__version__=\"2020.04.22.001\"\n\n__DEFAULT_EXPERIMENT_TEMPLATE__={\n\"info\":{\n \"time\":None,\n \"user\": None,\n \"version\": None\n },\n\"experiment\":{\n \"name\" : \"default\",\n #\"ids\" : [], not used jet\n \"scans\": [],\n \"stages\":[\"${JUMEG_PATH_MNE_IMPORT1}/exp\",\"${JUMEG_PATH_MNE_IMPORT2}/exp\",\"${JUMEG_PATH_LOCAL_DATA}/exp\"],\n \"bads_list\":[], #\"MEG 010\",\"MEG 142\",\"MEG 156\",\"RFM 011\"],\n \"segmentation\":{\n \"path\":{\n \"mrdata\" : \"mrdata\",\n \"dicom\" : \"mrdata/dicom\",\n \"freesurfer\" : \"mrdata/freesurfer\"\n }\n },\n \"path\":{\n \"mne\" : \"mne\",\n \"eeg\" : \"eeg\",\n \"mft\" : \"mft\",\n \"report\" : \"report\",\n \"doc\" : \"doc\",\n \"source\" : \"source\",\n \"stimuli\" : \"stimuli\"\n }\n },\n\n\"bti_export\": {\n \"bti_path\" : [\"${JUMEG_PATH_BTI_EXPORT}\",\"${JUMEG_PATH_LOCAL_DATA}/megdaw_data21\"],\n \"pdf_name\" : \"c,rfDC\",\n \"config_fname\" : \"config\",\n \"head_shape_fname\" : \"hs_file\",\n \"rotation_x\" : None,\n \"translation\" : None,\n \"ecg_ch\" : None,\n \"eog_ch\" : None,\n \"fif_extention\" : \"-raw.fif\",\n \"emptyroom\" : \"-empty.fif\",\n \"overwrite\" : False,\n \"fakesHS\" : False\n },\n}\n\n'''\nToDo : setup BIDS and PrePorc\n \"preprocessing\": {\n \"meeg_merger\": {},\n \"epocher\": {},\n \"suggest_bads\": {},\n \"noise_reducer\": {},\n \"artifact_rejection\": {},\n \"events\": {}\n }\n'''\n\n\nclass JuMEG_Template_Experiments(JuMEG_Template):\n \"\"\"\n class to work with \n overwrite _init(**kwargs) for you settings\n\n Example\n -------\n from jumeg.template.jumeg_template import JuMEG_Template_Experiments\n\n class JuMEG_ExpTemplate(JuMEG_Template_Experiments):\n def __init__(self,**kwargs):\n super().__init__()\n\n def update_from_kwargs(self,**kwargs):\n self.template_path = kwargs.get(\"template_path\",self.template_path)\n\n def _init(self,**kwargs):\n self.update_from_kwargs(**kwargs)\n\n TMP = JuMEG_ExpTemplate()\n print(TMP.template_path)\n\n \"\"\"\n def __init__ (self,**kwargs):\n super().__init__()\n self.template_path = os.getenv('JUMEG_PATH_TEMPLATE_EXPERIMENTS',self.template_path_default + '/jumeg_experiments')\n self.template_name = 'default'\n self.template_postfix = 'jumeg_experiment_template'\n self._init(**kwargs)\n\n def _init(self,**kwargs):\n pass\n \nclass JuMEG_ExpTemplate(JuMEG_Template_Experiments):\n def __init__(self,**kwargs):\n super().__init__()\n \n @property\n def data(self): return self.template_data['experiment']\n\n #--- tmp path default\n @property\n def template_path_default(self):\n return os.getenv(\"JUMEG_PATH_TEMPLATE_EXPERIMENTS\",os.getenv(\"JUMEG_PATH\") + '/data/templates/jumeg_experiments')\n\n @property\n def bti_data(self):\n return self.template_data['bti_export']\n\n @property\n def name(self ): return self.data.get(\"name\")\n @property\n def scans(self): return self.data.get(\"scans\")\n @property\n def bads(self): return self.data.get(\"bads_list\")\n\n @property\n def stages(self):return self.data.get(\"stages\")\n @stages.setter\n def stages(self,v):\n if isinstance(v,(list)):\n self.data['stages']=v\n else:\n self.data['stages'].append(v)\n \n @property\n def paths(self):return self.data.get('paths')\n\n @property\n def segmentation_paths(self):\n return self.data.get('paths',[])\n\n def update_from_kwargs( self, **kwargs ):\n self.template_path = kwargs.get(\"template_path\", self.template_path)\n self._pubsub_error_msg = kwargs.get(\"pubsub_error_msg\", \"MAIN_FRAME.MSG.ERROR\")\n\n def _init( self, **kwargs ):\n self.update_from_kwargs(**kwargs)\n\n def get_experiments(self,**kwargs):\n \"\"\"\n find experimnet template files in ${JUMEG_PATH}/templates/jumeg_experiment\n if no template file found:\n reset template data wit default parameter dict\n init experimnet name list with \n \n :param issorted: \n :return: experiment template name list\n \"\"\"\n exp = self.get_sorted_experiments(**kwargs)\n if exp:\n return exp\n self.template_data_reset()\n # print(self.template_name)\n return [ self.template_name ]\n \n def get_sorted_experiments(self,issorted=True,default_on_top=True):\n \"\"\"\n :param issorted sort the list \n :param default_on_top list first \n \n Result\n -------\n sorted list of scans\n \"\"\"\n exps = self.template_update_name_list()\n \n if issorted:\n exps = sorted( exps )\n if default_on_top:\n dname= __DEFAULT_EXPERIMENT_TEMPLATE__[\"experiment\"][\"name\"]\n try:\n i = exps.index( dname )\n except:\n exps.append(dname)\n i = len(exps)-1\n \n a = exps[0]\n exps[0]= exps[i]\n exps[i]= a\n \n return exps\n\n def get_sorted_scans(self,issorted=True):\n \"\"\"\n :param issorted sort the list \n Result\n -------\n sorted list of scans\n \"\"\"\n try:\n if isinstance( self.scans, (list)):\n if issorted:\n return sorted( self.scans )\n return self.scans\n else:\n return [ self.scans ]\n\n except:\n return []\n\n def template_check_experiment_data(self):\n \"\"\"\n check's template for structure e.g.:\n \"experiment\":{\n \"name\" : experiment name,\n \"scans\" :[ list of scans],\n \"stages\":[ list of start dirs]\n }\n Result:\n -------\n True/False\n \"\"\"\n error_msg=[]\n if not self.template_data:\n error_msg.append(\"No template data found : \" + self.template_full_filename)\n elif not self.template_data.get('experiment',None):\n error_msg.append(\"No structure found : \"+self.template_name)\n else:\n exp = self.template_data.get('experiment',None)\n for k in[\"scans\",\"stages\"]:\n if not exp.get(k):\n error_msg.append(\"No <{}> found\".format(k))\n if error_msg:\n error_msg.insert(0,\"Checking Experiment Template\")\n error_msg.append(\"Module : \"+sys._getframe(1).f_code.co_name )\n error_msg.append(\"Function: check_experiment_template_data\")\n logger.error(\"\\n\".join(error_msg))\n pub.sendMessage(self._pubsub_error_msg,data=error_msg)\n return False\n return True\n\n def template_update(self,exp,path=None,verbose=False):\n \"\"\" update a JuMEG template\n\n Parameters\n ----------\n exp : name of experiment\n path : \n verbose:\n\n Result\n ------\n template data dict\n \"\"\"\n \n self.template_name = exp\n if path:\n self.template_path = path\n self.verbose = verbose\n if not exp == \"default\":\n if not self.template_update_file(exit_on_error=False):\n return False\n if self.template_check_experiment_data():\n return self.template_data\n return False\n\n def template_data_reset(self):\n \"\"\"\n reset template to parameter defined on to of this python script\n eg if no experiment template exists\n :return:\n \"\"\"\n self.template_data = {}\n self.template_data = copy.deepcopy(__DEFAULT_EXPERIMENT_TEMPLATE__)\n self.template_name = self.template_data[\"experiment\"][\"name\"]\n \n \n\nexperiment = JuMEG_Template_Experiments()","repo_name":"jdammers/jumeg","sub_path":"jumeg/base/template/jumeg_template_experiments.py","file_name":"jumeg_template_experiments.py","file_ext":"py","file_size_in_byte":9137,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"37317054688","text":"\nnames = ['juan', 'malala', 'rosa']\nprint('rosa' in names)\n\nfor name in names:\n print(name)\n \nnumeros = [x for x in range(1, 11)]\nprint(numeros)\n\ncuadrados = [x**2 for x in range(1,11)]\nprint(cuadrados)\n\npares = [x for x in range(1,21) if x % 2 == 0]\nprint(pares)\n\nnames = ['Roberto', 'Alberto', 'Ricardo', 'Rafa','Mario']\nfor name in names:\n if name == 'Alberto':\n continue\n print('Hola' ' '+ name)","repo_name":"MiguelAngel3378/CURSO_ANALISIS_BD2","sub_path":"m2_python/Estructuras_control/for.py","file_name":"for.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8213632651","text":"#!/usr/bin/env python\n\"\"\"Workload testing.\n\nTest heavy transfer workloads over a multiple associations to look for slowdowns\nand memory leaks.\n\nResults:\n17-03-2018:\n 1 association of 400 datasets 68.34 s\n 400 associations of 1 dataset 200.7 s\n17-03-2018: 6fe488a\n 1 association of 400 datasets 79.3 s\n 400 associations of 1 dataset 200.5 s\n\"\"\"\n\nimport logging\nimport os\nimport time\n\nfrom pydicom import read_file\n\n#from dummy_c_scp import DummyStorageSCP\nfrom pynetdicom3 import AE\nfrom pynetdicom3.sop_class import CTImageStorage, RTImageStorage\n\nLOGGER = logging.getLogger('pynetdicom3')\nLOGGER.setLevel(logging.CRITICAL)\n\nTEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')\nBIG_DATASET = read_file(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm')) # 2.1 MB\nDATASET = read_file(os.path.join(TEST_DS_DIR, 'CTImageStorage.dcm')) # 39 kB\n\n#scp = DummyStorageSCP(11112)\n#scp.start()\n\nno_runs = 400\nds_per_run = 1\nresults = []\n\nae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])\nprint('Starting...')\nfor ii in range(no_runs):\n start_time = time.time()\n assoc = ae.associate('localhost', 11112)\n for jj in range(ds_per_run):\n if assoc.is_established:\n assoc.send_c_store(DATASET)\n assoc.release()\n end_time = time.time()\n delta_time = end_time - start_time\n results.append(delta_time)\n\ntotal_time = 0.0\nfor result in results:\n total_time += result\n\nprint('Total time: %.2f seconds' %total_time)\n\n#scp.stop()\n","repo_name":"zdalih/wolfpacs","sub_path":"pnd3/pynetdicom3/tests/heavy_workload_assoc.py","file_name":"heavy_workload_assoc.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73588459209","text":"from constants import *\r\nfrom board_and_rules import Board, GameRules, PlayGame\r\nfrom simulation import Simulation\r\nimport math\r\n\r\nclass Node(GameRules):\r\n\r\n def __init__(self, gamestate, parent_move, ind, parent_ind, wins=0, visits=0):\r\n super().__init__()\r\n\r\n self.gamestate = gamestate.deepcopy_self()\r\n self.wins = wins\r\n self.visits = visits\r\n self.parent_move = parent_move[:]\r\n self.children = []\r\n self.children_moves = self.get_legal_moves(self.gamestate, self.parent_move)\r\n self.player = [\"o\", \"x\"][parent_move[1] == \"o\"]\r\n self.ind = ind\r\n self.parent_ind = parent_ind\r\n\r\nclass MCTS(Simulation):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.tree = []\r\n\r\n def argmax(self, lst):\r\n runmax = lst[0]\r\n maxind = 0\r\n for i, el in enumerate(lst):\r\n if el > runmax:\r\n runmax = el\r\n maxind = i\r\n \r\n return maxind\r\n \r\n def create_root_node_(self, gamestate, parent_move):\r\n self.tree.append(Node(gamestate, parent_move, 0, None))\r\n\r\n def UCB1(self, node):\r\n ret = [0]*len(node.children)\r\n\r\n for i, tree_ind in enumerate(node.children):\r\n child = self.tree[tree_ind]\r\n if child.visits == 0:\r\n ret[i] = float('inf')\r\n continue\r\n\r\n if node.player == \"x\":\r\n wr = child.wins / child.visits\r\n else:\r\n wr = 1 - (child.wins / child.visits)\r\n \r\n ret[i] = wr + C_PARAMETER * math.sqrt(math.log(node.visits) / child.visits)\r\n\r\n return ret\r\n\r\n def selection(self, node):\r\n while True:\r\n if len(node.children_moves):\r\n return node\r\n if len(node.children):\r\n i = self.argmax(self.UCB1(node))\r\n node = self.tree[node.children[i]]\r\n continue\r\n break\r\n return node\r\n\r\n def expansion_(self, node):\r\n if self.is_terminal(node.gamestate) != '-':\r\n return\r\n if len(node.children_moves):\r\n move = node.children_moves.pop()\r\n gamestate = node.gamestate.deepcopy_self()\r\n self.make_move_(gamestate, move)\r\n child = Node(gamestate, move, len(self.tree), node.ind)\r\n self.tree.append(child)\r\n node.children.append(child.ind)\r\n\r\n def simulation(self, node, num_sims=1):\r\n return self.play_n_randoms(num_sims, node.gamestate, node.parent_move)\r\n\r\n def backpropagation_(self, node, result):\r\n node.wins += result[\"x\"]\r\n node.visits += sum(result.values())\r\n\r\n if node.parent_ind is None:\r\n return\r\n \r\n self.backpropagation_(self.tree[node.parent_ind], result)\r\n\r\n def print_evaluation(self):\r\n pass\r\n\r\n def search(self, gamestate, prev_move, num_iters, num_sims=1, hide_evaluations=True):\r\n self.tree = []\r\n self.create_root_node_(gamestate, prev_move)\r\n root = self.tree[0]\r\n\r\n for _ in range(num_iters):\r\n node = self.selection(root)\r\n self.expansion_(node)\r\n if node.children != []:\r\n node = self.tree[node.children[self.argmax(self.UCB1(node))]]\r\n result = self.simulation(node, num_sims)\r\n self.backpropagation_(node, result)\r\n\r\n wght_visits = [self.tree[x].visits for x in root.children]\r\n\r\n if not hide_evaluations:\r\n self.print_evaluations()\r\n\r\n best_child = self.tree[root.children[self.argmax(wght_visits)]]\r\n return best_child.parent_move\r\n\r\nif __name__ == '__main__':\r\n run = MCTS()\r\n printer = PlayGame()\r\n\r\n gamestate = Board()\r\n parent_move = [40, \"o\"]\r\n run.make_move_(gamestate, parent_move)\r\n printer.set_gamestate_(gamestate)\r\n\r\n print(run.search(gamestate, parent_move, 10000))\r\n for node in run.tree[0:9]:\r\n printer.set_gamestate_(node.gamestate)\r\n printer.print_board()\r\n print(node.parent_move)","repo_name":"BevandaIvan/uttt-mcts","sub_path":"mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"251517882","text":"import csv\nimport json\nfrom datetime import datetime\n\"\"\"\n# JPeZlL4HJfU,HcyeaHxtVpg,UNK,UNK,UNK,UNK,UNK,UNK,UNK,UNK\n\n\"\"\"\nclass VideoData:\n def __init__(self, id, description, thumbnail, title, genre, noID):\n self.id = id\n self.genre = genre\n self.thumbnail = thumbnail\n self.description = description\n self.title = title\n self.noID = noID\n # 01/06/2022 18:05:14\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=2)\n\nVideoArray = []\ndateArray = []\n\nswitcher = {\n \"Kuliner\": 2,\n \"Homecare\": 3,\n \"Healthcare\": 4,\n \"Tutorial\": 1,\n \"Ecommerce\": 5,\n \"Marketing\": 7,\n \"Review\": 6\n}\n\nwith open('data.csv', encoding=\"utf-8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';')\n line_count = 0\n id = 1\n for row in csv_reader:\n dateArray.append(datetime.strptime(row[5], \"%d/%m/%Y %H:%M:%S\"))\n tempVidData = VideoData(row[0], row[1], row[2], row[3], row[4], id)\n id += 1\n VideoArray.append(tempVidData)\n\ndef getGenre(id):\n return VideoArray[id-1].genre\n\ndef getID(id):\n return VideoArray[id-1].id\n\ndef genreSplice(genre):\n x = genre.split(\"|\")\n return x\n\ndef genretoInt(array):\n returnValue = []\n\n for element in array:\n returnValue.append(switcher.get(element,0))\n\n return returnValue\n \n\ndef genreConcat(array):\n returnValue = []\n for element in array:\n returnValue.extend(genreSplice(getGenre(element)))\n len_genre = (len(returnValue))\n print(len_genre)\n\n \n if len_genre < 20:\n for i in range(len_genre,20):\n returnValue.append(\"UNK\")\n print(returnValue)\n return genretoInt(returnValue[:21])\n\ndef getDetails(array):\n datas = []\n # str = \"[\"\n # i = 0\n # for element in array:\n # if i != 0:\n # str += \",\"\n # str += VideoArray[element-1].toJSON()\n # i += 1\n # str += \"]\" \n for e in array:\n datas.append(VideoArray[e-1])\n # print(VideoArray[e-1].toJSON())\n return datas\n\ndef sortArray(array):\n for i in range(len(array)):\n for j in range(0, len(array) - i - 1):\n if dateArray[array[j] - 1]< dateArray[array[j + 1] - 1]:\n temp = array[j]\n array[j] = array[j + 1]\n array[j + 1] = temp\n\ndef genreFilter(genre):\n returnValue = []\n for element in VideoArray:\n x = genreSplice(element.genre)\n genrePresent = False\n for i in x:\n if switcher.get(i, 0) == genre:\n genrePresent = True\n if genrePresent:\n returnValue.append(element.noID)\n\n sortArray(returnValue) \n return returnValue\n\ndef getDates(array):\n for element in array:\n print(VideoArray[element - 1].uploadTime)\n\n# print(genreFilter(4))","repo_name":"BUMI-Team/BUMI-Machine-Learning","sub_path":"API/BUMI-API/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"12288530636","text":"from py2neo import Graph,Node,Relationship\nimport nltk \nfrom nltk.corpus import wordnet as wn\nimport random\nimport requests\nimport urllib.request\nfrom generators.Q2FetchData import * \n\ndef retrieveSubWords(word, levelDown, numberWords, graph): # retieve hyponyms at a level \n arrayWords = []\n\n query = ''' Match (t1:Term)<-[:ISA*1..'''+str(levelDown)+''']-(t2:Term) Where t1.name =\"'''+word+'''\" Return t2''' # fetch hyponyms for a level \n nodes = graph.run(query)\n\n for node in nodes:\n arrayWords.append(node[0]['name'])\n\n random.shuffle(arrayWords)\n arrayWords = arrayWords[:numberWords] # take n words of hyponyms \n return arrayWords\n\n\ndef createQuestion3Words(word, levelDown, numberWordsTotal):\n typeW = typeWord(word, connect())\n numberSubWords = random.randint(1,numberWordsTotal)\n wordsCategory = retrieveSubWords(word,levelDown,numberSubWords, connect()) # correct words \n randomWords = chooseRandomWords(fetchAllWords(),numberWordsTotal-len(wordsCategory), typeW) # random words \n dictWords = {}\n dictWords[word] = wordsCategory\n dictWords['random'] = randomWords\n return dictWords\n\n#print(createQuestion3Words(\"apple\", 2, 12))\n","repo_name":"olexstet/BSP6ProjectFlaskReactNeo4j","sub_path":"source_codes/flask-backend/generators/Q3FetchData.py","file_name":"Q3FetchData.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30988897032","text":"#! python3\n# renameDates.py - rename american dates MM-DD-YYYY to european dates DD-MM-YYYY\n\nimport shutil, os, re\n\n#Create a regex that fits american datetypes\ndatePattern = re.compile(f\"\"\" \n ^(.*?) # all text before the date\n ((0|1)?\\d)- # matches 2 digits with 0 or 1 first digit \n ((0|1|2|3)?\\d)- # matches 2 digits with 0-3 first digit \n ((19|20)\\d\\d) # matches 4 digits with 19-20 first 2 digits\n (.*?)$ # all text after the date \n \"\"\",re.VERBOSE)\n\n#Loop over files in workind directory\nfor amerFilename in os.listdir():\n mo = datePattern.search(amerFilename)\n\n#skip files that are none\n if mo == None:\n continue\n\n#get the different parts of the filename\n beforePart = mo.group(1)\n monthPart = mo.group(2)\n dayPart = mo.group(4)\n yearPart = mo.group(6)\n afterPart = mo.group(8) \n#Form european-style form filename\n euroFilename = beforePart + dayPart + \"-\" + monthPart + \"-\" + yearPart + afterPart\n\n#Get the full absolute filepaths\n absWorkingDir = os.path.abspath('.')\n amerFilename = os.path.join(absWorkingDir, amerFilename)\n euroFilename = os.path.join(absWorkingDir, euroFilename)\n\n# Rename the files.\n print(f'Renaming \"{amerFilename}\" to \"{euroFilename}\"...')\n #shutil.move(amerFilename, euroFilename) # uncomment after testing","repo_name":"Sinsnickers/Automatetheboringstuff","sub_path":"Chapter10/renameDates.py","file_name":"renameDates.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31309475000","text":"from __future__ import annotations\n\nimport argparse\nimport asyncio\nimport concurrent.futures\nimport dataclasses\nimport datetime\nimport json\nimport shlex\nimport subprocess\nfrom typing import Sequence, Set\n\nimport websockets.legacy.protocol as ws_protocol\nimport websockets.server as ws_server\n\nfrom flask_livetw.config import Config\nfrom flask_livetw.util import Term, pkgprint, set_default_env\n\nFLASK_BASE_EXCLUDE_PATTERNS = (\"*/**/dev.py\",)\n\nLR_CONNECTIONS: Set[ws_server.WebSocketServerProtocol] = set()\n\n\nasync def handle_connection(websocket: ws_server.WebSocketServerProtocol):\n LR_CONNECTIONS.add(websocket)\n try:\n await websocket.wait_closed()\n finally:\n LR_CONNECTIONS.remove(websocket)\n\n\nasync def live_reload_server(host: str, port: int):\n async with ws_server.serve(handle_connection, host, port) as server:\n pkgprint(\n f\"Live reload {Term.G}ready{Term.END} on {Term.C}ws://{host}:{Term.BOLD}{port}{Term.END}\"\n )\n\n await server.wait_closed()\n\n pkgprint(f\"Live reload {Term.G}closed{Term.END}\")\n\n\ndef handle_tailwind_output(process: subprocess.Popen[bytes]):\n if process.stdout is None:\n return\n\n for line in iter(process.stdout.readline, b\"\"):\n if process.poll() is not None:\n break\n\n if line.startswith(b\"Done\"):\n ws_protocol.broadcast(\n LR_CONNECTIONS,\n json.dumps(\n {\n \"type\": \"TRIGGER_FULL_RELOAD\",\n \"data\": datetime.datetime.now().isoformat(),\n }\n ),\n )\n\n print(f'{Term.C}[twcss]{Term.END} {line.decode(\"utf-8\")}', end=\"\")\n\n\ndef handle_flask_output(process: subprocess.Popen[bytes]):\n if process.stdout is None:\n return\n\n for line in iter(process.stdout.readline, b\"\"):\n if process.poll() is not None:\n break\n\n print(f'{Term.G}[flask]{Term.END} {line.decode(\"utf-8\")}', end=\"\")\n\n\n@dataclasses.dataclass\nclass DevConfig:\n no_live_reload: bool\n live_reload_host: str\n live_reload_port: int\n\n no_flask: bool\n flask_host: str | None\n flask_port: int | None\n flask_mode: str\n flask_exclude_patterns: Sequence[str] | None\n\n no_tailwind: bool\n tailwind_input: str | None\n tailwind_output: str\n tailwind_minify: bool\n\n\nasync def dev_server(config: DevConfig):\n def live_reload_coroutine():\n if config.no_live_reload or config.no_tailwind:\n return None\n\n host = config.live_reload_host\n port = config.live_reload_port\n\n return live_reload_server(host, port)\n\n def tailwind_cli_executor(\n loop: asyncio.AbstractEventLoop,\n pool: concurrent.futures.ThreadPoolExecutor,\n ):\n if config.no_tailwind:\n return None\n\n input_arg = \"\"\n if config.tailwind_input is not None:\n input_arg = f\"-i {config.tailwind_input}\"\n\n output_arg = f\"-o {config.tailwind_output}\"\n\n minify_arg = \"--minify\" if config.tailwind_minify else \"\"\n\n cmd = f\"tailwindcss --watch {input_arg} {output_arg} {minify_arg}\"\n\n process = subprocess.Popen(\n shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n\n return loop.run_in_executor(pool, handle_tailwind_output, process)\n\n def flask_server_executor(\n loop: asyncio.AbstractEventLoop,\n pool: concurrent.futures.ThreadPoolExecutor,\n ):\n if config.no_flask:\n return None\n\n host_arg = \"\"\n if config.flask_host is not None:\n host_arg = f\"--host {config.flask_host}\"\n\n port_arg = \"\"\n if config.flask_port is not None:\n port_arg = f\"--port {config.flask_port}\"\n\n debug_arg = \"--debug\" if config.flask_mode == \"debug\" else \"\"\n\n exclude_patterns: list[str] = list(FLASK_BASE_EXCLUDE_PATTERNS)\n if config.flask_exclude_patterns is not None:\n exclude_patterns.extend(config.flask_exclude_patterns)\n\n exclude_patterns_arg = (\n f\"--exclude-patterns {';'.join(exclude_patterns)}\"\n )\n\n cmd = f\"\\\n flask run {host_arg} {port_arg} {debug_arg} {exclude_patterns_arg}\"\n\n process = subprocess.Popen(\n shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n\n return loop.run_in_executor(pool, handle_flask_output, process)\n\n loop = asyncio.get_running_loop()\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=3) as pool:\n maybe_future_like = (\n live_reload_coroutine(),\n tailwind_cli_executor(loop, pool),\n flask_server_executor(loop, pool),\n )\n\n futures = (\n future for future in maybe_future_like if future is not None\n )\n\n pkgprint(\"Starting dev server...\")\n\n _ = await asyncio.gather(*futures, return_exceptions=True)\n\n\ndef dev(cli_args: argparse.Namespace) -> int:\n set_default_env(\"LIVETW_DEV\", \"TRUE\")\n\n project_config = Config.try_from_pyproject_toml()\n if project_config is None:\n pkgprint(\n \"Project config not found. Dev server not started.\",\n )\n pkgprint(\n \"Try checking your current working directory or running 'flask-livetw init' to configure the project.\"\n )\n return 1\n\n no_live_reload = cli_args.no_live_reload\n live_reload_host = (\n cli_args.live_reload_host or project_config.live_reload_host\n )\n live_reload_port = (\n cli_args.live_reload_port or project_config.live_reload_port\n )\n\n no_flask = cli_args.no_flask\n flask_host = cli_args.flask_host or project_config.flask_host\n flask_port = cli_args.flask_port or project_config.flask_port\n flask_mode = cli_args.flask_mode\n flask_exclude_patterns = (\n cli_args.flask_exclude_patterns\n or project_config.flask_exclude_patterns\n )\n\n no_tailwind = cli_args.no_tailwind\n tailwind_input = (\n cli_args.tailwind_input or project_config.full_globalcss_file\n )\n tailwind_output = (\n cli_args.tailwind_output or project_config.full_tailwind_file\n )\n tailwind_minify = cli_args.tailwind_minify\n\n dev_config = DevConfig(\n no_live_reload=no_live_reload,\n live_reload_host=live_reload_host,\n live_reload_port=live_reload_port,\n no_flask=no_flask,\n flask_host=flask_host,\n flask_port=flask_port,\n flask_mode=flask_mode,\n flask_exclude_patterns=flask_exclude_patterns,\n no_tailwind=no_tailwind,\n tailwind_input=tailwind_input,\n tailwind_output=tailwind_output,\n tailwind_minify=tailwind_minify,\n )\n\n asyncio.run(dev_server(dev_config))\n return 0\n\n\ndef add_command_args(parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--no-live-reload\",\n dest=\"no_live_reload\",\n action=\"store_true\",\n default=False,\n help=\"Disable live reload server.\",\n )\n parser.add_argument(\n \"-lrh\",\n \"--live-reload-host\",\n dest=\"live_reload_host\",\n type=str,\n help=\"Hostname for live reload server.\",\n )\n parser.add_argument(\n \"-lrp\",\n \"--live-reload-port\",\n dest=\"live_reload_port\",\n type=int,\n help=\"Port for live reload server.\",\n )\n\n parser.add_argument(\n \"--no-flask\",\n dest=\"no_flask\",\n action=\"store_true\",\n default=False,\n help=\"Disable flask server.\",\n )\n parser.add_argument(\n \"-fh\",\n \"--flask-host\",\n dest=\"flask_host\",\n type=str,\n help=\"Hostname for flask server.\",\n )\n parser.add_argument(\n \"-fp\",\n \"--flask-port\",\n dest=\"flask_port\",\n type=int,\n help=\"Port for flask server.\",\n )\n parser.add_argument(\n \"-fm\",\n \"--flask-mode\",\n dest=\"flask_mode\",\n choices=(\"debug\", \"no-debug\"),\n default=\"debug\",\n help=\"If debug mode is enabled, the flask server will be started with --debug flag. Default: debug.\",\n )\n parser.add_argument(\n \"--flask-exclude-patterns\",\n dest=\"flask_exclude_patterns\",\n type=str,\n nargs=\"+\",\n help=\"File exclude patterns for flask server. Base: */**/dev.py\",\n )\n\n parser.add_argument(\n \"--no-tailwind\",\n dest=\"no_tailwind\",\n action=\"store_true\",\n default=False,\n help=\"Disable tailwindcss generation. If tailwindcss is disabled the live reload server will not be started.\",\n )\n parser.add_argument(\n \"-ti\",\n \"--tailwind-input\",\n dest=\"tailwind_input\",\n type=str,\n help=\"Input path for global css file. Includes glob patterns.\",\n )\n parser.add_argument(\n \"-to\",\n \"--tailwind-output\",\n dest=\"tailwind_output\",\n type=str,\n help=\"Output path for the generated css file.\",\n )\n parser.add_argument(\n \"-tm\",\n \"--tailwind-minify\",\n dest=\"tailwind_minify\",\n action=\"store_true\",\n default=False,\n help=\"Enables minification of the generated css file.\",\n )\n\n\ndef add_command(\n subparser: argparse._SubParsersAction[argparse.ArgumentParser],\n) -> None:\n parser = subparser.add_parser(\n name=\"dev\",\n description=\"\"\"\n Extended dev mode for flask apps.\n By default runs the flask app in debug mode,\n tailwindcss in watch mode and live reload server.\n \"\"\",\n help=\"Run a development server.\",\n allow_abbrev=True,\n formatter_class=argparse.MetavarTypeHelpFormatter,\n )\n\n add_command_args(parser)\n\n\ndef main(args: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Extended dev mode for flask apps.\n By default runs the flask app in debug mode,\n tailwindcss in watch mode and live reload server.\n \"\"\",\n allow_abbrev=True,\n formatter_class=argparse.MetavarTypeHelpFormatter,\n )\n\n add_command_args(parser)\n\n parsed_args = parser.parse_args(args)\n\n return dev(parsed_args)\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"J-Josu/flask-livetw","sub_path":"flask_livetw/dev_server.py","file_name":"dev_server.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73413392967","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreating a module to call on different machine learning models.\r\nIn creation stage... will likely split apart to different modules of their own.\r\n\r\nWill also have a data cleaning module.\r\n\r\nIf available, data imaging will be available.\r\n\r\nThis will prompt the user to fill in information about the data, then it will\r\nask the user which type of analysis should be performed.\r\n\r\nInformation about the data requested:\r\n - Data ready for analysis (Y / N)\r\n - Columns of data\r\n - Columns of dependent data\r\n - Column(s) of independent data\r\n\r\nHints will be available about which type of predictive analytics platform to choose from.\r\n\r\nThe modules that will be called will have all of the options available... in lamens terms\r\n\r\n\r\n\r\nThere are nine main sections to call from:\r\n 1. Regression\r\n 2. Classification\r\n 3. Clustering\r\n 4. Association Rule Learning\r\n 5. Reinforcement Learning\r\n 6. Natural Language Processing\r\n 7. Deep Learning\r\n 8. Dimensionality Reduction\r\n 9. Model Selection & Boosting\r\n\r\nEach section will likely have their own components:\r\n 1. Regression\r\n a. Decision Tree Regression\r\n b. Multiple Linear Regression\r\n c. Polynomial Regression\r\n d. Random Forest Regression\r\n e. Support Vector Regression\r\n 2. Classification\r\n a. Decision Tree Classification\r\n b. K Nearest Neighbors Classification (KNN)\r\n c. Kernel SVM Classification\r\n d. Logistic Regression Classification\r\n e. Naive Bayes Classification\r\n f. Random Forest Classification\r\n g. Support Vector Machine Classification\r\n 3. Clustering\r\n a. K Means Clustering\r\n b. Hierarchical Clustering\r\n 4. Association Rule Learning\r\n a. Apriori\r\n b. Eclat\r\n 5. Reinforcement Learning\r\n a. Upper Confidence Bound\r\n b. Thompson Sampling\r\n 6. Natural Language Processing\r\n a. Natural Language Processing\r\n 7. Deep Learning\r\n a. Artificial Neural Networks (ANN)\r\n b. Convolutional Neural Networks (CNN)\r\n 8. Dimensionality Reduction\r\n a. (PCA)\r\n b. (LDA)\r\n C. (Kernel PCA)\r\n 9. Model Selection & Boosting\r\n a. Model Selection - Grid Search\r\n b. Model Selection - K Fold Cross Validation\r\n c. Boosting - Xg Boost\r\n\r\n- Klein\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef decision_tree_regression(path, size_select):\r\n dataset = pd.read_csv(path)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n\r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = size_select, random_state = 0)\r\n\r\n # Training the Decision Tree Regression model on the Training set\r\n from sklearn.tree import DecisionTreeRegressor\r\n regressor = DecisionTreeRegressor(random_state = 0)\r\n regressor.fit(X_train, y_train)\r\n\r\n # Predicting the Test set results\r\n y_pred = regressor.predict(X_test)\r\n np.set_printoptions(precision=2)\r\n print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n\r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n score = r2_score(y_test, y_pred)\r\n print(f'The r2 score is {score}.')\r\n \r\n return regressor\r\n\r\n\r\ndef multiple_linear_regression(path, size_select):\r\n dataset = pd.read_csv(path)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n\r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = size_select, random_state = 0)\r\n\r\n # Training the Multiple Linear Regression model on the Training set\r\n from sklearn.linear_model import LinearRegression\r\n regressor = LinearRegression()\r\n regressor.fit(X_train, y_train)\r\n \r\n # Predicting the Test set results\r\n y_pred = regressor.predict(X_test)\r\n np.set_printoptions(precision=2)\r\n print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n \r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n score = r2_score(y_test, y_pred)\r\n print(f'The r2 score is {score}.')\r\n \r\n return regressor\r\n\r\n\r\ndef polynomial_regression(path, size_select):\r\n dataset = pd.read_csv(path)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n \r\n degree_select = int(input('Type polynomial degree to use\\n'))\r\n\r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = size_select, random_state = 0)\r\n\r\n # Training the Polynomial Regression model on the Training set\r\n from sklearn.preprocessing import PolynomialFeatures\r\n from sklearn.linear_model import LinearRegression\r\n poly_reg = PolynomialFeatures(degree = degree_select)\r\n X_poly = poly_reg.fit_transform(X_train)\r\n regressor = LinearRegression()\r\n regressor.fit(X_poly, y_train)\r\n \r\n # Predicting the Test set results\r\n y_pred = regressor.predict(poly_reg.transform(X_test))\r\n np.set_printoptions(precision=2)\r\n print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n \r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n score = r2_score(y_test, y_pred)\r\n print(f'The r2 score is {score}.')\r\n \r\n return regressor\r\n\r\n\r\ndef random_forest_regression(path, size_select):\r\n dataset = pd.read_csv(path)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n \r\n estimators_select = int(input('Type number of estimators to use\\n'))\r\n\r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = size_select, random_state = 0)\r\n\r\n # Training the Random Forest Regression model on the whole dataset\r\n from sklearn.ensemble import RandomForestRegressor\r\n regressor = RandomForestRegressor(n_estimators = estimators_select, random_state = 0)\r\n regressor.fit(X_train, y_train)\r\n \r\n # Predicting the Test set results\r\n y_pred = regressor.predict(X_test)\r\n np.set_printoptions(precision=2)\r\n print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n \r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n score = r2_score(y_test, y_pred)\r\n print(f'The r2 score is {score}.')\r\n \r\n return regressor\r\n\r\n\r\ndef support_vector_regression(path, size_select):\r\n dataset = pd.read_csv(path)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n y = y.reshape(len(y), 1)\r\n \r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = size_select, random_state = 0)\r\n\r\n # Feature Scaling\r\n from sklearn.preprocessing import StandardScaler\r\n sc_X = StandardScaler()\r\n sc_y = StandardScaler()\r\n X_train = sc_X.fit_transform(X_train)\r\n y_train = sc_y.fit_transform(y_train)\r\n \r\n # Training the SVR model on the Training set\r\n from sklearn.svm import SVR\r\n regressor = SVR(kernel = 'rbf')\r\n regressor.fit(X_train, y_train.ravel())\r\n \r\n # Predicting the Test set results\r\n y_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(X_test)).reshape(-1,1))\r\n np.set_printoptions(precision=2)\r\n print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n \r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n score = r2_score(y_test, y_pred)\r\n print(f'The r2 score is {score}.')\r\n \r\n return regressor\r\n \r\n \r\ndef regression_wizard(path, size_select):\r\n regression_select = 0\r\n while regression_select not in ['a', 'b', 'c', 'd', 'e', 'f']:\r\n regression_select = input('Which regression would you like to use? (Type Letter):\\n a. Decisiion Tree Regression \\n b. Multiple Linear Regression \\n c. Polynomial Regression \\n d. Random Forest Regression \\n e. Support Vector Regression \\n f. EXIT \\n')\r\n \r\n if regression_select == 'a':\r\n print('Decision Tree Regression Selected')\r\n model = decision_tree_regression(path, size_select)\r\n elif regression_select == 'b':\r\n print('Multiple Linear Regression Selected')\r\n model = multiple_linear_regression(path, size_select)\r\n elif regression_select == 'c':\r\n print('Polynomial Regression Selected')\r\n model = polynomial_regression(path, size_select)\r\n # have them pick degrees in the function\r\n elif regression_select == 'd':\r\n print('Random Forest Regression Selected')\r\n model = random_forest_regression(path, size_select)\r\n # have them pick number of estimators in the function\r\n elif regression_select == 'e':\r\n print('Support Vector Regression Selected')\r\n model = support_vector_regression(path, size_select)\r\n else:\r\n print('Leaving Model Wizard')\r\n \r\n return model\r\n \r\n \r\ndef model_wizard(path, size_select):\r\n import numpy as np\r\n import pandas as pd\r\n import matplotlib.pyplot as plt\r\n \r\n model_select = 0\r\n while model_select not in [1, 2, 3, 4]:\r\n model_select = int(input('Which type of model would you like to use? (Type Number):\\n 1. Regression \\n 2. Classification \\n 3. Clustering \\n 4. EXIT \\n'))\r\n \r\n if model_select == 1:\r\n print('Regression Selected')\r\n model = regression_wizard(path, size_select)\r\n elif model_select == 2:\r\n print('Classification Selected')\r\n # model = classification_wizard()\r\n elif model_select == 3:\r\n print('Clustering Selected')\r\n # model = clustering_wizard()\r\n else:\r\n print('Leaving Model Wizard')\r\n \r\n return model","repo_name":"clickityKlein/Machine_Learning_Templates","sub_path":"machine_learning.py","file_name":"machine_learning.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30691730042","text":"def display_board(board):\r\n list1 = [\"| \", board[7], \" | \", board[8], \" | \", board[9],\" |\"]\r\n list3 = [\"| \",board[4], \" | \",board[5], \" | \",board[6],\" |\"]\r\n list5 = [\"| \",board[1], \" | \", board[2], \" | \",board[3],\" |\"]\r\n line1 = ''.join(list1)\r\n line2 = \"------------------\"\r\n line3 = ''.join(list3)\r\n line4 = \"------------------\"\r\n line5 = ''.join(list5)\r\n board2 = line1 + \"\\n\" + line2 + \"\\n\" + line3 + \"\\n\" + line4 + \"\\n\" + line5\r\n print(board2)\r\n\r\n\r\ndef players_symbols():\r\n player1choice=\"\"\r\n while(player1choice!=\"X\" or player1choice!=\"O\"):\r\n player1choice=input(\"Player 1 Do you want to be X or O\").upper()\r\n if (player1choice == \"X\"):\r\n return (\"X\",\"O\")\r\n\r\n else:\r\n return (\"O\",\"X\")\r\n\r\n\r\n\r\ndef insert_symbol(board,symbol,position):\r\n board[position]=symbol\r\n\r\n\r\n\r\ndef win_check(board,symbol):# check for rows coloumns and diagonals\r\n return((board[7]==symbol and board[8]==symbol and board[9]==symbol)or\r\n (board[4] == symbol and board[5] == symbol and board[6] == symbol)or\r\n (board[1] == symbol and board[2] == symbol and board[3] == symbol)or\r\n\r\n (board[7] == symbol and board[4] == symbol and board[1] == symbol)or\r\n (board[8] == symbol and board[5] == symbol and board[2] == symbol)or\r\n (board[9] == symbol and board[6] == symbol and board[3] == symbol)or\r\n\r\n (board[7] == symbol and board[5] == symbol and board[3] == symbol)or\r\n (board[9] == symbol and board[5] == symbol and board[1] == symbol)\r\n\r\n )\r\n\r\n\r\n\r\ndef space_check(board,position):\r\n return board[position]== \"\"\r\n\r\n\r\n\r\ndef full_board_check(board):\r\n boardfull=True\r\n for i in range(9):\r\n if (board[i]==\"\"):\r\n boardfull=False\r\n return boardfull\r\n\r\ndef player_choice(board):\r\n position=0\r\n while ((position not in range(1,10)) or (not space_check(board, position))):\r\n position = int(input('Choose your next position: (1-9) '))\r\n return position\r\n\r\ndef replay():\r\n return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y')\r\n\r\n#\r\n# test_board = ['#','','O','X','O','X','O','X','O','X']\r\n# display_board(test_board)\r\n#\r\n# print(player_choice(test_board))\r\n\r\ndef main():\r\n firstboard=\"| 7 | 8 | 9 |\"+\"\\n\"+\"----------------\"+\"\\n\"+\"| 4 | 5 | 6 |\"+\"\\n\"+\"----------------\"+\"\\n\"+\"| 1 | 2 | 3 |\"\r\n print('Welcome to Tic Tac Toe!')\r\n while True:\r\n playboard = [\"#\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]\r\n playerssymbols=players_symbols()\r\n player1symbol=playerssymbols[0]\r\n player2symbol=playerssymbols[1]\r\n playersready=input(\"Are you ready to play? Enter(Yes or No)\").lower().startswith('y')\r\n print(\"Player 1 goes first.\")\r\n print(firstboard)\r\n for i in range(9):\r\n if (i%2==0):\r\n insert_symbol(playboard,player1symbol,player_choice(playboard))\r\n\r\n elif (i%2==1):\r\n insert_symbol(playboard, player2symbol, player_choice(playboard))\r\n\r\n\r\n display_board(playboard)\r\n wincheck1=win_check(playboard, player1symbol)\r\n wincheck2=win_check(playboard, player2symbol)\r\n if(wincheck1):\r\n print(\"Player 1 has won!\")\r\n break\r\n elif (wincheck2):\r\n print(\"Player 2 has won!\")\r\n break\r\n elif(full_board_check(playboard)):\r\n print(\"The game has been a Tie!\")\r\n break\r\n if not replay():\r\n break\r\n\r\n\r\n\r\nmain()\r\n\r\n","repo_name":"AlyAssem/Tic-Tac-Toe-game","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8256600017","text":"from django.test import TestCase\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom rest_serializers.serializers import ManyToManySerializer\nfrom rest_serializers.validators import LazyUniqueTogetherValidator\nfrom tests.models import Child, Parent\n\n\nclass ChildSerializer(serializers.ModelSerializer):\n \"\"\"\n a child serializer that we do not want to pass parent id in the data\n we have to use the lazy validator here which will skip validation\n till attempting to save\n \"\"\"\n\n class Meta:\n model = Child\n fields = (\"id\", \"name\")\n validators = [\n LazyUniqueTogetherValidator(\n queryset=model.objects.all(), fields=(\"name\", \"parent\")\n )\n ]\n\n\nclass ParentSerializer(ManyToManySerializer):\n children = ChildSerializer(many=True)\n\n class Meta:\n model = Parent\n fields = (\"id\", \"name\", \"children\")\n\n\nclass UniqueTogetherTests(TestCase):\n def test_can_validate_and_save(self):\n data = {\"name\": \"Freddy Star\", \"children\": [{\"name\": \"Bob\"}, {\"name\": \"Sally\"}]}\n serializer = ParentSerializer(data=data)\n\n self.assertTrue(serializer.is_valid())\n\n serializer.save()\n\n # correct rows exist\n self.assertEqual(Parent.objects.count(), 1)\n self.assertEqual(Child.objects.count(), 2)\n\n def test_invalidates_correctly(self):\n data = {\n \"name\": \"Freddy Star\",\n \"children\": [{\"name\": \"Sally\"}, {\"name\": \"Sally\"}],\n }\n serializer = ParentSerializer(data=data)\n\n # initially valid as parent on child is missing\n self.assertTrue(serializer.is_valid())\n\n # however saving it will raise the correct validation error\n with self.assertRaises(ValidationError) as cm:\n serializer.save()\n\n self.assertEqual(\n cm.exception.detail,\n {\"non_field_errors\": [\"The fields name, parent must make a unique set.\"]},\n )\n\n # nothing should save\n self.assertEqual(Parent.objects.count(), 0)\n self.assertEqual(Child.objects.count(), 0)\n\n def test_adding_to_existing_validates_and_saves(self):\n parent = Parent.objects.create(name=\"Freddy Star\")\n child = Child.objects.create(name=\"Bob\", parent=parent)\n\n data = {\n \"id\": parent.pk,\n \"name\": parent.name,\n \"children\": [{\"id\": child.pk, \"name\": child.name}, {\"name\": \"Sally\"}],\n }\n\n serializer = ParentSerializer(instance=parent, data=data)\n\n self.assertTrue(serializer.is_valid())\n\n serializer.save()\n\n # correct rows exist\n self.assertEqual(Parent.objects.count(), 1)\n self.assertEqual(Child.objects.count(), 2)\n\n def test_adding_invalid_to_existing_raises_correct_validation_error(self):\n parent = Parent.objects.create(name=\"Freddy Star\")\n child = Child.objects.create(name=\"Bob\", parent=parent)\n\n data = {\n \"id\": parent.pk,\n \"name\": parent.name,\n \"children\": [{\"id\": child.pk, \"name\": child.name}, {\"name\": child.name}],\n }\n\n serializer = ParentSerializer(instance=parent, data=data)\n\n # initially valid as parent on child is missing\n self.assertTrue(serializer.is_valid())\n\n # however saving it will raise the correct validation error\n with self.assertRaises(ValidationError) as cm:\n serializer.save()\n\n self.assertEqual(\n cm.exception.detail,\n {\"non_field_errors\": [\"The fields name, parent must make a unique set.\"]},\n )\n\n # correct rows exist\n self.assertEqual(Parent.objects.count(), 1)\n self.assertEqual(Child.objects.count(), 1)\n","repo_name":"AccentDesign/Accent_RestSerializers","sub_path":"tests/test_unique_together_validator.py","file_name":"test_unique_together_validator.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11065874523","text":"from odoo import fields, models, api, _\nfrom odoo.exceptions import UserError\n\n\nclass UserRoles(models.Model):\n _name = 'user.roles'\n _description = 'User Roles'\n manager_id = fields.Many2many('res.users', string='User')\n line_user_ids = fields.One2many('user.roles.line', 'role_id', string='Role user line')\n name = fields.Char(string='Name')\n groups_user = fields.Many2many('res.groups', string='GroupUser')\n\n @api.model\n def create(self, vals):\n check_user = self.check_user_in_user_role()\n user_role = super(UserRoles, self).create(vals)\n\n # check user đã thuộc 1 user role hay chưa\n for all_user_role in check_user:\n for user_pice in user_role.manager_id:\n if all_user_role == user_pice:\n raise UserError('User already exists in other User role records')\n if user_role.manager_id and user_role.groups_user:\n for group in user_role.groups_user:\n for user in user_role:\n group.sudo().update({\n 'users': [(4, user.id)]\n })\n return user_role\n\n def write(self, vals):\n self = self.sudo()\n user_old = self.update_roles()\n\n group_old = self.groups_old()\n user_line_old = self.del_user()\n res = super(UserRoles, self).write(vals)\n\n #Xóa group trong user role tự động xóa user ra khỏi group\n if self.groups_user:\n group_unlink = group_old - self.groups_user\n for user_del in self.update_roles():\n group_unlink.sudo().update({\n 'users': [(3, user_del.id)]\n })\n\n # Thêm và Xóa User trong user roles thì user tự động thêm, xóa khỏi groups\n if self.line_user_ids.user_id:\n user_line_unlink = user_line_old - self.line_user_ids.user_id\n else:\n user_line_unlink = user_line_old\n for group in self.groups_user:\n for list_user_line in user_line_unlink:\n group.sudo().update({\n 'users': [(3, list_user_line.id)]\n })\n for group in self.groups_user:\n for user_add in self.line_user_ids.user_id:\n if user_add.independent:\n raise UserError(_(\"Not add user in groups\"))\n if group.name == \"#1 . Admin User Roles\":\n continue\n group.sudo().update({\n 'users': [(4, user_add.id)]\n })\n\n # Thêm và xóa cho user được quyền assign nhiệm vụ tự động thêm và xóa user ra khoi gruops\n if self.manager_id or self.groups_user:\n\n user_unlink = user_old - self.manager_id\n if len(user_unlink) > 0:\n for group in self.groups_user:\n for list_user in user_unlink:\n list_user.set_user_role = False\n group.sudo().update({\n 'users': [(3, list_user.id)]\n })\n for group in self.groups_user:\n for user in self.manager_id:\n user.set_user_role = True\n if user.independent:\n raise UserError(_(\"Not add user in groups\"))\n else:\n group.sudo().update({\n 'users': [(4, user.id)]\n })\n return res\n\n # function check tồn tại user\n\n def check_user_in_user_role(self):\n list_user_check = []\n user_role = self.env['user.roles'].search([])\n for rec in user_role:\n for result in rec.manager_id:\n list_user_check.append(result)\n return list_user_check\n\n def groups_old(self):\n return self.groups_user\n\n def update_roles(self):\n return self.manager_id\n\n def del_user(self):\n return self.line_user_ids.user_id\n","repo_name":"sagarpise/datn","sub_path":"user_roles/models/user_roles.py","file_name":"user_roles.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23939138521","text":"#!/usr/bin/env python3\nimport requests\nimport json\n\ndef geocode(address):\n parameters = {'address': address, 'key': ''}\n base = 'http://restapi.amap.com/v3/geocode/geo'\n response = requests.get(base, parameters)\n answer = response.json()\n print(address + \"的经纬度:\", answer['geocodes'][0]['location'])\n result =answer['geocodes'][0]['location']\n json_str = json.dumps(result)\n fileObject = open('result.json', 'a')\n fileObject.write(json_str+'\\n')\n fileObject.close()\n\n\nif __name__=='__main__':\n json_data = \"\"\" \"\"\"\n data = json.loads(json_data)\n for value in data['addressAll']:\n address = value['address']+value['name']\n geocode(address)\n\n\n","repo_name":"XCtwelve/Test","sub_path":"高德地图地理编码.py","file_name":"高德地图地理编码.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16952280569","text":"#!/usr/bin/env python\n\nfrom __future__ import division\n\nfrom numpy import array, argsort, zeros\n\n__author__ = \"Sam Way\"\n__copyright__ = \"Copyright 2013, The American Gut Project\"\n__credits__ = [\"Sam Way\"]\n__license__ = \"BSD\"\n__version__ = \"unversioned\"\n__maintainer__ = \"Sam Way\"\n__email__ = \"samuel.way@colorado.edu\"\n\n\ndef get_filtered_taxa_summary(mapping_file, taxa_summary_file,\n metadata_category, metadata_value,\n top_n_taxa=7, select_taxa=None):\n \"\"\" Get a simplified taxonomy table.\n\n Inputs:\n mapping_file - Input mapping file (sample ids match taxa file)\n taxa_summary_file - Taxonomy summary file\n metadata_category - Used to select specific samples from the taxa file\n metadata_value - Value used to select specific samples\n from the taxa file\n top_n_taxa - If taxonomy groups aren't specified use the\n top N most abundant\n select_taxa - List of desired taxonomic groups\n\n Outputs:\n filtered_sample_ids - selected sample ids\n taxa_labels - taxonomic labels (including \"Other\")\n collapsed_taxa_table - simplified taxonomy table\n \"\"\"\n\n mapping_fp = open(mapping_file, 'rU')\n mapping_dict, _ = parse_mapping_file_to_dict(mapping_fp)\n taxa_fp = open(taxa_summary_file, 'rU')\n sample_ids, taxa_ids, taxa_table = parse_taxa_summary_table(taxa_fp)\n taxa_ids = [taxa_id.split('__')[-1] for taxa_id in taxa_ids]\n\n selected_ids = set([k for k, v in mapping_dict.iteritems()\n if v[metadata_category] == metadata_value])\n\n if not selected_ids:\n raise ValueError(\"No sample ids match metadata_value='%s'\"\n \" in metadata_category='%s'\" %\n (metadata_value, metadata_category))\n\n sample_id_indices = [i for i, sample_id in enumerate(sample_ids)\n if sample_id in selected_ids]\n filtered_taxa_table = taxa_table[:, sample_id_indices]\n filtered_sample_ids = [sample_ids[idx] for idx in sample_id_indices]\n\n if select_taxa is None:\n # If select_taxa is None, take the top N most abundant\n totals = filtered_taxa_table.sum(axis=1)\n taxa_indices = argsort(-totals)\n\n if top_n_taxa > len(taxa_indices):\n raise ValueError(\"Number of taxa to select exceeds \"\n \"actual count\")\n\n top_taxa = taxa_indices[:top_n_taxa]\n other_taxa = taxa_indices[top_n_taxa:]\n taxa_labels = [taxa_ids[idx] for idx in top_taxa]\n else:\n # List of taxa was supplied, use those\n top_taxa = [taxa_ids.index(x) for x in select_taxa]\n other_taxa = [t for t in xrange(len(taxa_ids)) if t not in top_taxa]\n taxa_labels = select_taxa\n\n taxa_labels.append('Other')\n N = len(taxa_labels) # Number of classes/labels\n\n # Sort samples by most_abundant_taxa\n sort_sample_indices = argsort(-filtered_taxa_table[top_taxa[0], :])\n filtered_taxa_table = filtered_taxa_table[:, sort_sample_indices]\n filtered_sample_ids = [filtered_sample_ids[idx] for idx\n in sort_sample_indices]\n\n # Collapse \"Others\" rows into single row\n collapsed_taxa_table = zeros((N, filtered_taxa_table.shape[1]))\n collapsed_taxa_table[:-1, :] = filtered_taxa_table[top_taxa, :]\n collapsed_taxa_table[-1, :] = \\\n filtered_taxa_table[other_taxa, :].sum(axis=0)\n total = collapsed_taxa_table.sum(axis=0)\n collapsed_taxa_table = collapsed_taxa_table / total\n\n return filtered_sample_ids, taxa_labels, collapsed_taxa_table\n\n\ndef parse_taxa_summary_table(taxa_summary, cast_as=float):\n \"\"\" Parse a taxa summary table\n\n Inputs:\n taxa_summary - (Open) taxa_summary file.\n cast_as - variable type for table elements.\n\n Outputs:\n 3-element tuple of (sample_ids, otu_ids,\n matrix of OTUs(rows) by samples(cols))\n \"\"\"\n sample_ids = []\n taxa_ids = []\n taxa_table = []\n\n header_line = taxa_summary.readline()\n sample_ids = header_line.strip().split('\\t')[1:]\n num_samples = len(sample_ids)\n\n for line in taxa_summary:\n line = line.strip()\n if not line:\n continue\n line_pieces = line.split('\\t')\n if len(line_pieces[1:]) != num_samples:\n raise ValueError(\"Error in taxa summary file - \"\n \"number of values does not \"\n \"match the number of samples\")\n # Cast row as cast_as elements and append row to table\n taxa_table.append(array(map(cast_as, line_pieces[1:])))\n taxa_ids.append(line_pieces[0])\n\n return sample_ids, taxa_ids, array(taxa_table)\n\n\ndef parse_mapping_file_to_dict(mapping_file):\n \"\"\" Takes an open mapping file and parses it to a dictionary structure \"\"\"\n\n metadata_dict = {}\n metadata_categories = []\n comments = []\n\n for line in mapping_file:\n line = line.strip()\n if not line:\n continue\n\n if line.startswith('#'):\n line = line[1:]\n if not metadata_categories:\n metadata_categories = line.split('\\t')[1:]\n num_categories = len(metadata_categories)\n else:\n comments.append(line)\n else:\n line_pieces = line.split('\\t')\n if len(line_pieces[1:]) != num_categories:\n raise ValueError(\"Error in mapping file - \"\n \"number of metadata values does not \"\n \"match the number of metadata categories\")\n\n sample_id = line_pieces[0]\n metadata_values = line_pieces[1:]\n metadata_dict[sample_id] = {key: value for key, value in\n zip(metadata_categories,\n metadata_values)}\n\n return metadata_dict, comments\n","repo_name":"biocore/American-Gut","sub_path":"americangut/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"16"} +{"seq_id":"36109188201","text":"# My first ATM machine program.\nimport random\n\nprint(''' \n ***************************************************** \n * WELCOME TO FIRSTBIT BANK *\n *****************************************************\n ''')\n \nprint(\"\\nPlease insert your ATM card\")\ncreation =\" ACCOUNT CREATION \"\nprint(creation.center(100,\"*\"))\nname = input(\"Enter your name: \")\npin = int(input(\"Enter a pin you want to set:\"))\naccount_number = random.randint(0000000000,9999999999)\nprint(\"\\nCongratulations! Account created successfully......\")\n\nbalance = 0\n\nprint('''\n ****************************\n * 1. Account details *\n * 2. Balance enquiry *\n * 3. Deposite cash *\n * 4. Withdraw cash * \n * 5. Set new pin * \n * 6. Exit * \n ****************************\n ''') \nch = 0\nwhile (ch != 6):\n ch = int(input(\"\\nEnter the choice you want to do: \"))\n if (ch == 1):\n print(\"Account name:\",name)\n print(\"Account number:\",account_number)\n print(\"Available balance:\",balance)\n \n elif (ch == 2):\n print(\"Total balance is Rs.\",balance)\n\n elif (ch == 3):\n ammount = float(input(\"\\nEnter the ammount of money you want to deposite:Rs.\"))\n if (ammount > 0):\n balance = balance + ammount\n print(\"Rs.\",ammount,\"is deposite sucessfully\")\n print(\"Available balance is Rs.\",balance)\n\n elif(ch == 4):\n ammount = float(input(\"\\nEnter the amount of money you want to widthdraw:Rs.\"))\n if (ammount > balance):\n print(\"You don't have sufficient balance to make this widthdrawal\")\n else:\n balance = balance - ammount\n print(\"Rs.\",ammount,\"is withdraw sucessfully\")\n print(\"Available balance is Rs.\",balance)\n\n elif (ch == 5):\n otp = random.randint(1111,9999)\n print(\"\\nOne Time Password(OTP):\",otp)\n i = 1\n while (i <= 3):\n otp_enter = int(input(\"\\nPlease enter OTP here:\"))\n i+=1\n if (otp == otp_enter):\n new_pin = int(input(\"\\nEnter your new pin:\"))\n print(\"\\nYour new pin is set sucessfully.\")\n break\n else:\n print(\"\\nPlease check your OTP.\")\n print(\"Retry after sometime\")\n \n \n elif (ch == 6):\n s = \" PRINT RECEIPT \"\n print(s.center(100,\"*\"))\n\n print(\"Account name:\",name)\n print(\"Account number:\",account_number)\n print(\"Available balance:\",balance)\n\n s1 = \" THANK YOU FOR CHOOSING US AS YOUR BANK \"\n print(s1.center(100,\"*\"))\n else:\n print(\"\\nPlease enter a correct value shown\")\n\n","repo_name":"omkar5252/mini_project_atm","sub_path":"atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35367625047","text":"\"\"\"\"Define the GraphEncoder class.\"\"\"\n\nfrom typing import Tuple\n\nimport torch\n\nfrom meshnets.modules.mlp import MLP\n\n\nclass GraphEncoder(torch.nn.Module):\n \"\"\"Encoder for graphs.\n \n Encode graphs to latent graphs by applying an MLP encoder to each\n node features and a second MLP encoder to each mesh features.\n \"\"\"\n\n def __init__(self, node_feats_size, edge_feats_size, latent_size,\n num_mlp_layers):\n \"\"\"Initialize the node features encoder and the edge features encoder\n given the features sizes, latent size and number of MLP layers.\"\"\"\n super().__init__()\n\n node_encoder_widths = [node_feats_size\n ] + (num_mlp_layers + 1) * [latent_size]\n self.node_encoder = MLP(node_encoder_widths, layer_norm=True)\n\n edge_encoder_widths = [edge_feats_size\n ] + (num_mlp_layers + 1) * [latent_size]\n self.edge_encoder = MLP(edge_encoder_widths, layer_norm=True)\n\n def forward(self, x: torch.Tensor,\n edge_attr: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Encode the node and edge features of graphs by applying\n the corresponding MLP encoder to each feature.\n \n Return the encoded features.\"\"\"\n\n x = self.node_encoder(x)\n edge_attr = self.edge_encoder(edge_attr)\n\n return x, edge_attr\n","repo_name":"inductiva/meshnets","sub_path":"meshnets/modules/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"3585611293","text":"def vigenere_encrypt(plaintext, keyword):\n \"\"\"\n Encrypts plaintext using the Vigenere cipher with the provided keyword.\n\n Args:\n plaintext (str): The plaintext to be encrypted (all uppercase letters, no spaces).\n keyword (str): The keyword to be used for encryption (all uppercase letters).\n\n Returns:\n str: The encrypted ciphertext.\n \"\"\"\n ciphertext = []\n keyword = keyword.upper()\n\n for i, char in enumerate(plaintext):\n # Calculate the shift value based on the corresponding letter in the keyword\n shift = ord(keyword[i % len(keyword)]) - ord('A')\n\n # Encrypt the current character using the Vigenere cipher algorithm\n encrypted_char = chr((ord(char) - ord('A') + shift) % 26 + ord('A'))\n\n ciphertext.append(encrypted_char)\n\n return ''.join(ciphertext)\n\n\ndef get_valid_input(prompt, validator_func):\n \"\"\"\n Get user input and validate it using the provided validator function.\n\n Args:\n prompt (str): The prompt message to display to the user.\n validator_func (function): The function used to validate the user input.\n\n Returns:\n str: The user input that passed the validation.\n \"\"\"\n while True:\n user_input = input(prompt).strip().upper()\n if validator_func(user_input):\n return user_input\n print(\"Invalid input. Please try again.\")\n\n\ndef is_valid_uppercase(text):\n \"\"\"\n Check if the input text contains only uppercase letters.\n\n Args:\n text (str): The input text to be validated.\n\n Returns:\n bool: True if the input text contains only uppercase letters, False otherwise.\n \"\"\"\n return text.isalpha() and text.isupper()\n\n\ndef main():\n \"\"\"\n Main function to take user input and perform Vigenere encryption.\n \"\"\"\n print(\"=== Vigenere Cipher Encryption Program ===\")\n \n plaintext_prompt = \"Enter the plaintext (all uppercase letters, no spaces): \"\n keyword_prompt = \"Enter the keyword (all uppercase letters): \"\n\n plaintext = get_valid_input(plaintext_prompt, is_valid_uppercase)\n keyword = get_valid_input(keyword_prompt, is_valid_uppercase)\n\n ciphertext = vigenere_encrypt(plaintext, keyword)\n print(\"Ciphertext:\", ciphertext)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Steinwx/Vignere","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3744660237","text":"# from typing import List\n#\n#\n# \"\"\"\n# yellow의 넓이를 기준으로 brown의 갯수를 구하여 카펫의 크기를 알아냄\n# width * height = yellow\n# 2width + 2height + 4 = brown\n# \"\"\"\n#\n#\n# def solution(brown: int, yellow: int) -> List[int]:\n# # yellow로 만들 수 있는 세로 길이\n# for height in range(1, yellow + 1):\n# print(height)\n# if yellow % height == 0:\n# width = yellow // height\n# if 2 * (width + height) + 4 == brown:\n# return [width + 2, height + 2]\n# return []\n\n\nfrom typing import List\nimport timeit\n\n\"\"\"\nyellow의 넓이를 기준으로 brown의 갯수를 구하여 카펫의 크기를 알아냄\nwidth * height = yellow\n2 * (width + height) + 4 = brown\n\"\"\"\n\n\n# def solution(brown: int, yellow: int) -> List[int]:\n# for height in range(1, yellow + 1):\n# width = yellow / height\n# if 2 * (width + height) + 4 == brown:\n# return [width + 2, height + 2]\n\n\ndef solution(brown: int, yellow: int) -> List[int]:\n for height in range(1, yellow + 1):\n # width는 자연수이므로 나머지가 없어야 함\n # width = yellow / height\n if yellow % height == 0:\n width = yellow // height\n print(f'width: {width}, height: {height}, 2*({width}+{height})+4 = {2 * (width + height) + 4}, brown: {brown}')\n if 2 * (width + height) + 4 == brown:\n return [width + 2, height + 2]\n\n\n# def solution(brown: int, yellow: int) -> List[int]:\n# for height in range(1, yellow + 1):\n# width, remainder = divmod(yellow, height)\n# if remainder == 0:\n# if 2 * (height + width) + 4 == brown:\n# return [width + 2, height + 2]\n\n\nif __name__ == \"__main__\":\n brown = 10\n yellow = 2\n start_time = timeit.default_timer()\n print(solution(brown, yellow))\n terminate_time = timeit.default_timer()\n print(\"%f초 걸렸습니다.\" % (terminate_time - start_time))\n\n","repo_name":"enirobot/codingtest","sub_path":"python/카펫.py","file_name":"카펫.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10894432752","text":"def init(app):\n \"\"\"\n Initialize logging for the application, (only if its not in debug mode)\n \"\"\"\n if not app.debug:\n from logging import Formatter\n from logging.handlers import SMTPHandler, TimedRotatingFileHandler\n \n # File handler\n file_formatter = Formatter('%(asctime)s %(levelname)s: %(message)s ' \\\n '[in %(pathname)s:%(lineno)d]')\n \n file_handler = TimedRotatingFileHandler(app.config['LOG_FILE_NAME'], \n when='midnight', \n backupCount=31)\n \n file_handler.setFormatter(file_formatter)\n file_handler.setLevel(app.config['LOG_FILE_LEVEL'])\n app.logger.addHandler(file_handler)\n \n # Email handler\n mail_formatter = Formatter('''\nMessage type: %(levelname)s\nLocation: %(pathname)s:%(lineno)d\nModule: %(module)s\nFunction: %(funcName)s\nTime: %(asctime)s\n\nMessage:\n\n%(message)s\n''')\n mail_handler = SMTPHandler(app.config['LOG_EMAIL_SERVER'], \n app.config['LOG_EMAIL_SENDER'],\n app.config['ADMIN_EMAILS'], \n '[%s] Error' % app.config['HOST_DOMAIN'])\n \n mail_handler.setFormatter(mail_formatter)\n mail_handler.setLevel(app.config['LOG_EMAIL_LEVEL'])\n app.logger.addHandler(mail_handler)\n ","repo_name":"localprojects/Civil-Debate-Wall","sub_path":"cdw/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"7327573645","text":"# -*- coding: utf-8 -*-\nimport qrcode\nimport pandas as pd\nimport uuid\nfrom pathlib import Path\nimport os\nfrom PIL import Image\n\n\ndef add_icon(img, icon_path):\n \"\"\"\n 为二维码添加icon\n \"\"\"\n icon = Image.open(icon_path)\n img_w, img_h = img.size\n factor = 6\n size_w = int(img_w / factor)\n size_h = int(img_h / factor)\n\n icon_w, icon_h = icon.size\n if icon_w > size_w:\n icon_w = size_w\n if icon_h > size_h:\n icon_h = size_h\n icon = icon.resize((icon_w, icon_h), Image.ANTIALIAS)\n\n w = int((img_w - icon_w) / 2)\n h = int((img_h - icon_h) / 2)\n img.paste(icon, (w, h), icon)\n\n\ndef make_qr_code(item_id, guid):\n \"\"\"\n 根据url 绘制二维码\n \"\"\"\n url = \"http://m.wz.qichecdn.com/darkhouse/app/index?no=%s\" % guid\n qr = qrcode.QRCode(version=2,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=4,\n border=4)\n qr.add_data(url)\n qr.make(fit=True)\n img = qr.make_image()\n img = img.convert(\"RGBA\")\n print(url)\n directory = Path(\"../pic\")\n if not directory.exists():\n os.makedirs(directory)\n add_icon(img, \"../pic/nuo.png\")\n img.save(\"../pic/%d_%s.png\" % (item_id, guid))\n\n\ndef save_uuid():\n # 保存guid 为excel\n file_path = r'guid.xlsx'\n directory = Path(file_path)\n if not directory.exists():\n guid_array = []\n for i in range(1, 31):\n dic = {'id': i, \"guid\": str(uuid.uuid4()).replace(\"-\", \"\")}\n guid_array.append(dic)\n writer = pd.ExcelWriter(file_path)\n df = pd.DataFrame(guid_array)\n df.to_excel(writer, columns=['id', 'guid'], index=False, encoding=\"utf-8\", sheet_name=\"Sheet\")\n writer.save()\n\n\nif __name__ == '__main__':\n \"\"\"\n 方法主入口\n \"\"\"\n save_uuid()\n guid_list = pd.read_excel(\"guid.xlsx\")\n for guid_item in guid_list.values:\n make_qr_code(guid_item[0], guid_item[1])\n print(\"done\")\n","repo_name":"RucLuke/MLStudy","sub_path":"data_utils/make_qr_code.py","file_name":"make_qr_code.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16317580545","text":"from drf_yasg import openapi\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n# Create your views here.\nfrom noah import responses\nfrom noah.custom_status_codes import PROJECTS_LIST_SENT, TEMPLATE_ID_NOT_PROVIDED, TEMPLATE_WITH_SPECIFY_ID_NOT_FOUND, \\\n TEMPLATE_DOES_NOT_HAVE_PARTS, PROJECT_CREATED_SUCCESSFULLY, PROJECT_DOES_NOT_EXISTS, INVALID_PROJECT_DETAIL, \\\n PROJECT_UPDATED_SUCCESSFULLY, PROJECT_PART_DOES_NOT_EXISTS, EMPTY_BLOCK_ERROR, PROJECT_PART_BLOCK_CREATED, \\\n PROJECT_PART_BLOCK_UPDATED, INVALID_PROJECT_PART_BLOCK_DETAIL, PROJECT_PART_BLOCK_DOES_NOT_EXISTS, \\\n ONE_PROJECT_PART_BLOCK_NEED\nfrom noah.permissions import IsAuthenticated\nfrom projects.models import Project, ProjectPart, ProjectPartBlock\nfrom projects.serializers import CreateProjectSerializer, GetProjectSerializer, UpdateProjectSerializer, \\\n GetProjectPartBlockSerializer, UpdateProjectPartBlockSerializer\nfrom templates.models import Template, TemplatePart\n\nfrom drf_yasg.utils import swagger_auto_schema\n\n\nclass ProjectsCreateListAPI(APIView):\n permission_classes = (IsAuthenticated,)\n\n @swagger_auto_schema(tags=['Project'], operation_summary='Create project from templates',\n request_body=CreateProjectSerializer)\n def post(self, request):\n serializer = CreateProjectSerializer(data=request.data)\n\n if not serializer.is_valid():\n # template id isn't provided\n return Response(responses.generate_failure_response(TEMPLATE_ID_NOT_PROVIDED, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n try:\n # Initiate project\n template = Template.objects.get(id=serializer.data['template'])\n\n template_parts = TemplatePart.objects.filter(template=template, is_deleted=False)\n\n if template_parts.count() <= 0:\n return Response(responses.generate_failure_response(TEMPLATE_DOES_NOT_HAVE_PARTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n project = Project.objects.init_project(template, request.user)\n\n for template_part in template_parts:\n project_part = ProjectPart.objects.init_project_part(template_part, project)\n\n project_part_block = ProjectPartBlock.objects.init_project_part_block(project_part)\n\n # Get created project\n project_serializer = GetProjectSerializer(project)\n\n return Response(\n responses.generate_success_response(PROJECT_CREATED_SUCCESSFULLY, payload=project_serializer.data),\n status=status.HTTP_201_CREATED)\n\n except Template.DoesNotExist:\n return Response(responses.generate_failure_response(TEMPLATE_WITH_SPECIFY_ID_NOT_FOUND, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n @swagger_auto_schema(tags=['Project'], operation_summary='Get project list based on status', manual_parameters=[\n openapi.Parameter('status', openapi.IN_QUERY, description=\"Project status. (Draft: 0, Completed: 1)\",\n type=openapi.TYPE_INTEGER)])\n def get(self, request):\n\n projects = Project.objects.exclude(is_deleted=True)\n\n project_status = self.request.query_params.get('status', None)\n if project_status is not None:\n projects = projects.filter(status=project_status)\n\n serializer = GetProjectSerializer(projects, many=True)\n\n return Response(\n responses.generate_success_response(PROJECTS_LIST_SENT, payload=serializer.data))\n\n\nclass ProjectGetUpdateAPI(APIView):\n permission_classes = (IsAuthenticated,)\n\n @swagger_auto_schema(tags=['Project'], operation_summary='Get particular project')\n def get(self, request, project_id=None):\n\n projects = Project.objects.exclude(is_deleted=True)\n\n if project_id is not None:\n projects = projects.filter(id=project_id)\n\n serializer = GetProjectSerializer(projects, many=True)\n\n return Response(\n responses.generate_success_response(PROJECTS_LIST_SENT, payload=serializer.data))\n\n @swagger_auto_schema(tags=['Project'], operation_summary=\"Update project details.\",\n operation_description='Update project details. e.g., Title. Delete project using is_deleted.',\n request_body=UpdateProjectSerializer)\n def patch(self, request, project_id=None):\n try:\n project = Project.objects.get(id=project_id)\n\n serializer = UpdateProjectSerializer(project, data=request.data, partial=True)\n\n if not serializer.is_valid():\n return Response(responses.generate_failure_response(INVALID_PROJECT_DETAIL, payload=serializer.errors),\n status=status.HTTP_400_BAD_REQUEST)\n\n serializer.save()\n\n return Response(\n responses.generate_success_response(PROJECT_UPDATED_SUCCESSFULLY, payload=serializer.data),\n status=status.HTTP_201_CREATED)\n\n except Project.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProjectPartBlocksCreateAPI(APIView):\n permission_classes = (IsAuthenticated,)\n\n @swagger_auto_schema(tags=['Project Blocks'], operation_summary='Create/Add new block to part of project')\n def post(self, request, project_id, part_id):\n try:\n project = Project.objects.exclude(is_deleted=True).get(id=project_id)\n project_part = ProjectPart.objects.exclude(is_deleted=True).get(id=part_id, project=project)\n\n project_part_blocks = ProjectPartBlock.objects.filter(project_part=project_part, is_deleted=False).order_by(\n '-block_letter')\n\n # If there is only one block and empty description then don't allow to create a block\n if project_part_blocks.count() > 0 and not project_part_blocks.first().description:\n serializer = GetProjectPartBlockSerializer(project_part_blocks.first())\n return Response(responses.generate_failure_response(EMPTY_BLOCK_ERROR, payload=serializer.data),\n status=status.HTTP_400_BAD_REQUEST)\n\n # Get next block letter\n block_letter = 'A'\n if project_part_blocks.count() > 0:\n block_letter = project_part_blocks.first().block_letter\n\n next_block_letter = chr(ord(block_letter) + 1)\n\n # Create block with next block letter\n project_part_block = ProjectPartBlock.objects.create_project_part_block(project_part, next_block_letter)\n\n serializer = GetProjectPartBlockSerializer(project_part_block)\n\n return Response(\n responses.generate_success_response(PROJECT_PART_BLOCK_CREATED, payload=serializer.data),\n status=status.HTTP_201_CREATED)\n\n except Project.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n except ProjectPart.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_PART_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProjectPartBlockUpdateAPI(APIView):\n permission_classes = (IsAuthenticated,)\n\n @swagger_auto_schema(tags=['Project Blocks'], operation_summary='Update/Delete block details',\n request_body=UpdateProjectPartBlockSerializer)\n def patch(self, request, project_id, part_id, block_id):\n try:\n project = Project.objects.exclude(is_deleted=True).get(id=project_id)\n project_part = ProjectPart.objects.exclude(is_deleted=True).get(id=part_id, project=project)\n\n # Create block with next block letter\n project_part_block = ProjectPartBlock.objects.exclude(is_deleted=True).get(id=block_id,\n project_part=project_part)\n\n serializer = UpdateProjectPartBlockSerializer(project_part_block, data=request.data, partial=True)\n\n if not serializer.is_valid():\n return Response(\n responses.generate_failure_response(INVALID_PROJECT_PART_BLOCK_DETAIL, payload=serializer.errors),\n status=status.HTTP_400_BAD_REQUEST)\n\n # Block delete procedure\n if 'is_deleted' in request.data and request.data['is_deleted']:\n total_project_part_blocks = ProjectPartBlock.objects.exclude(is_deleted=True).filter(\n project_part=project_part)\n if total_project_part_blocks.count() <= 1:\n return Response(\n responses.generate_failure_response(ONE_PROJECT_PART_BLOCK_NEED,\n payload=serializer.data),\n status=status.HTTP_400_BAD_REQUEST)\n\n serializer.save()\n\n return Response(\n responses.generate_success_response(PROJECT_PART_BLOCK_UPDATED, payload=serializer.data),\n status=status.HTTP_201_CREATED)\n\n except Project.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n except ProjectPart.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_PART_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n except ProjectPartBlock.DoesNotExist:\n return Response(responses.generate_failure_response(PROJECT_PART_BLOCK_DOES_NOT_EXISTS, payload={}),\n status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"console360/Noah-Media","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18527264905","text":"import sys \nimport os\nimport glob\nimport shutil\nimport random \n\nSUBSAMPLE_RATIO = 15\n\ndef copy_videos(source, destination_memory, destination_target, data_set_percent_size = float(0.07)):\n\n # Get the subdirectory names using list comprehension\n video_ids = [d for d in os.listdir(source)]\n\n for video_id in video_ids:\n scene_dir = os.path.join(source, video_id)\n frame_folder = scene_dir + \"/\" + f\"{video_id}_frames/lowres_wide\"\n \n\n files = [f for f in os.listdir(frame_folder) if f.endswith('.png')]\n\n # Subsample before shuffling to get a more even temporally spread \n files = files[::SUBSAMPLE_RATIO]\n\n random.seed(0)\n random.shuffle(files)\n \n split_num = int(len(files)*data_set_percent_size)\n files_memory = files[:split_num]\n files_target = files[split_num:]\n\n for file in files_memory: \n src_path = os.path.join(frame_folder, file)\n dst_path = destination_memory\n shutil.copy(src_path, dst_path)\n \n for file in files_target: \n src_path = os.path.join(frame_folder, file)\n dst_path = destination_target\n shutil.copy(src_path, dst_path)\n\n\nif __name__ == \"__main__\":\n source = \"../ARKitScenes/data/3dod/Training/\"\n destination_memory = \"../ARKitScenes/memory/\"\n destination_target = \"../ARKitScenes/target/\"\n os.makedirs(os.path.dirname(destination_memory), exist_ok=True)\n os.makedirs(os.path.dirname(destination_target), exist_ok=True)\n\n copy_videos(source, destination_memory, destination_target, data_set_percent_size = float(0.9))\n\n source1 = \"../ARKitScenes/data/3dod/Training/40777060/40777060_frames/lowres_wide\"\n source2 = \"../ARKitScenes/data/3dod/Training/40777065/40777065_frames/lowres_wide\"\n\n original_count1 = 0\n for root_dir, cur_dir, files in os.walk(source1):\n original_count1 += len(files)\n print('file count:', original_count1)\n \n original_count2 = 0\n for root_dir, cur_dir, files in os.walk(source2):\n original_count2 += len(files)\n print('file count:', original_count2)\n\n print(\"original count:\", original_count1+original_count2)\n\n memory_count = 0\n for root_dir, cur_dir, files in os.walk(destination_memory):\n memory_count += len(files)\n print('memory count:', memory_count)\n\n target_count = 0\n for root_dir, cur_dir, files in os.walk(destination_target):\n target_count += len(files)\n print('target count:', target_count)","repo_name":"John-HarringtonNZ/CMU_16833_Semantic_Scene_Recognition","sub_path":"scripts/copy_files_to_directory_utils.py","file_name":"copy_files_to_directory_utils.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6719106401","text":"import webbrowser\r\nimport webbrowser as web\r\nimport time\r\ndef forin(x,y,z):\r\n for j in range(int(x), int(y)):\r\n web.register('chrome', None, webbrowser.BackgroundBrowser(z))\r\n url = '192.168.44.' + str(j)\r\n time.sleep(1)\r\n web.get('chrome').open_new_tab(url)\r\n\r\nfirst = input('first var:')\r\ndouble = input('double var:(input var + 1 )')\r\npath = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'\r\nforin(first,double,path)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"SHLL/Python_toStudy","sub_path":"test1/test/forbrowser.py","file_name":"forbrowser.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39119161101","text":"\"\"\"\nEn este segundo ejercicio, tendréis que crear un archivo py y dentro crearéis una clase Vehículo,\n\nharéis un objeto de ella, lo guardaréis en un archivo y luego lo cargamos.\n\n\"\"\"\nimport pickle\nimport time\nclass Vehiculo():\n Ahora = time.localtime()\n ahora = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\")\n print(ahora)\n def __int__(self,ahora):\n self.ahora = ahora\n\n def getLaHora(self):\n return self.ahora\n\nLaHoraes = Vehiculo()\n\n\"\"\"\nla siguiente linea permite guardar los datos en un arcchivo \nwb{escribe datos y los guarda de forma binaria}\n\"\"\"\ndef GuardaBin():\n f = open(\"Guardar_la_Hora.bin\", 'wb')\n pickle.dump(LaHoraes, f)\n f.close()\ndef AbreBin():\n f = open(\"Guardar_la_Hora.bin\", 'rb')\n Abre=pickle.load(f)\n f.close()\n print(Abre)\nprint(AbreBin())","repo_name":"odvr/campus.open-bootcamp","sub_path":"Curso de Python/Entrada_Salida_E2.py","file_name":"Entrada_Salida_E2.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27558754203","text":"import numpy as np\nfrom copy import deepcopy as dc\nimport random\n\n\nclass Memory:\n def __init__(self, capacity, k_future, env):\n self.capacity = capacity\n self.memory = []\n self.memory_counter = 0\n self.memory_length = 0\n self.env = env\n\n self.future_p = 1 - (1. / (1 + k_future))\n\n def sample(self, batch_size):\n\n ep_indices = np.random.randint(0, len(self.memory), batch_size)\n time_indices = np.random.randint(0, len(self.memory[0][\"next_state\"]), batch_size)\n states = []\n actions = []\n desired_goals = []\n next_states = []\n next_achieved_goals = []\n\n for episode, timestep in zip(ep_indices, time_indices):\n states.append(dc(self.memory[episode][\"state\"][timestep]))\n actions.append(dc(self.memory[episode][\"action\"][timestep]))\n desired_goals.append(dc(self.memory[episode][\"desired_goal\"][timestep]))\n next_achieved_goals.append(dc(self.memory[episode][\"next_achieved_goal\"][timestep]))\n next_states.append(dc(self.memory[episode][\"next_state\"][timestep]))\n\n states = np.vstack(states)\n actions = np.vstack(actions)\n desired_goals = np.vstack(desired_goals)\n next_achieved_goals = np.vstack(next_achieved_goals)\n next_states = np.vstack(next_states)\n\n her_indices = np.where(np.random.uniform(size=batch_size) < self.future_p)\n future_offset = np.random.uniform(size=batch_size) * (len(self.memory[0][\"next_state\"]) - time_indices)\n future_offset = future_offset.astype(int)\n future_t = (time_indices + 1 + future_offset)[her_indices]\n\n future_ag = []\n for episode, f_offset in zip(ep_indices[her_indices], future_t):\n future_ag.append(dc(self.memory[episode][\"achieved_goal\"][f_offset]))\n future_ag = np.vstack(future_ag)\n\n desired_goals[her_indices] = future_ag\n rewards = np.expand_dims(self.env.compute_reward(next_achieved_goals, desired_goals, None), 1)\n\n return self.clip_obs(states), actions, rewards, self.clip_obs(next_states), self.clip_obs(desired_goals)\n\n def add(self, transition):\n self.memory.append(transition)\n if len(self.memory) > self.capacity:\n self.memory.pop(0)\n assert len(self.memory) <= self.capacity\n\n def __len__(self):\n return len(self.memory)\n\n @staticmethod\n def clip_obs(x):\n return np.clip(x, -200, 200)\n\n def sample_for_normalization(self, batch):\n size = len(batch[0][\"next_state\"])\n ep_indices = np.random.randint(0, len(batch), size)\n time_indices = np.random.randint(0, len(batch[0][\"next_state\"]), size)\n states = []\n desired_goals = []\n\n for episode, timestep in zip(ep_indices, time_indices):\n states.append(dc(batch[episode][\"state\"][timestep]))\n desired_goals.append(dc(batch[episode][\"desired_goal\"][timestep]))\n\n states = np.vstack(states)\n desired_goals = np.vstack(desired_goals)\n\n her_indices = np.where(np.random.uniform(size=size) < self.future_p)\n future_offset = np.random.uniform(size=size) * (len(batch[0][\"next_state\"]) - time_indices)\n future_offset = future_offset.astype(int)\n future_t = (time_indices + 1 + future_offset)[her_indices]\n\n future_ag = []\n for episode, f_offset in zip(ep_indices[her_indices], future_t):\n future_ag.append(dc(batch[episode][\"achieved_goal\"][f_offset]))\n future_ag = np.vstack(future_ag)\n\n desired_goals[her_indices] = future_ag\n\n return self.clip_obs(states), self.clip_obs(desired_goals)\n","repo_name":"alirezakazemipour/DDPG-HER","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"16"} +{"seq_id":"35615005604","text":"import enum\n\n\nclass Shape(enum.IntEnum):\n Club = 0\n Diamond = 1\n Heart = 2\n Spade = 3\n\n\nclass Rank(enum.IntEnum):\n Two = 1\n Three = 2\n Four = 3\n Five = 4\n Six = 5\n Seven = 6\n Eight = 7\n Nine = 8\n Ten = 9\n Jack = 10\n Queen = 11\n King = 12\n Ace = 13\n\n\nStrToShapeDict = {\n 'c': Shape.Club,\n 'd': Shape.Diamond,\n 'h': Shape.Heart,\n 's': Shape.Spade\n}\n\nShapeToStrDict = {\n Shape.Club: 'c',\n Shape.Diamond: 'd',\n Shape.Heart: 'h',\n Shape.Spade: 's'\n}\n\nStrToRankDict = {\n 'T': Rank.Ten,\n 'J': Rank.Jack,\n 'Q': Rank.Queen,\n 'K': Rank.King,\n 'A': Rank.Ace\n}\n# other ranks\nfor number in range(2, 10):\n StrToRankDict[f'{number}'] = number - 1\n","repo_name":"hdmun/poker-server-with-akka.net","sub_path":"scripts/bit-count-table-generator/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31807358930","text":"#!/usr/bin/env python\nimport cv2\nimport pandas as pd\nimport numpy as np\nimport cv_bridge\nimport rospy\nimport sensor_msgs.msg\nimport ibmmpy.msg\nimport fixation_detector\n\n\ndef get_fixation_from_point_msg(msg):\n if not isinstance(msg.id, int):\n print('unexpected msg type: {}'.format(type(msg.id)))\n print(msg)\n raise Exception\n return pd.DataFrame({\n 'id': msg.id,\n 'start_timestamp': msg.start_timestamp.to_sec(),\n 'duration': msg.duration,\n 'x_center': msg.x_center,\n 'y_center': msg.y_center\n }, index=[msg.id]\n ).dropna()\n \n\nclass FixationVisualizer:\n __COLORS__ = ( (0, 0, 255), (0, 255, 0), (255, 0, 0), (255, 255, 0), (255, 0, 255), (0, 255, 255) )\n __UNUSED_COLOR__ = ( 192, 192, 192 )\n __GAZE_RADIUS__ = 5\n __FIX_RADIUS__ = 15\n __LINGER_COUNT__ = len(__COLORS__)\n __MAX_PUB_PERIOD__ = rospy.Duration(0.1)\n\n def __init__(self, \n image_topic='/image',\n image_subtopic='image_raw',\n fixation_topic='/fixations',\n gaze_topic='/gaze'):\n \n self.cam_info = None\n \n self.fixations = pd.DataFrame()\n self.raw_data = pd.DataFrame()\n \n self.bridge = cv_bridge.CvBridge()\n\n if image_topic is not None:\n self.cam_info_sub = rospy.Subscriber(image_topic + '/camera_info', sensor_msgs.msg.CameraInfo, self._cam_info_callback)\n self.image_sub = rospy.Subscriber(image_topic + '/' + image_subtopic, sensor_msgs.msg.Image, self._frame_callback)\n self._last_image = None\n else:\n self.cam_info = sensor_msgs.msg.CameraInfo(height=768, width=1024)\n self.timer = rospy.Timer(rospy.Duration.from_sec(0.1), self._timer_callback, oneshot=False)\n self._last_image = np.zeros((self.cam_info.height, self.cam_info.width, 3), dtype=np.uint8)\n \n self.fix_sub = rospy.Subscriber(fixation_topic, ibmmpy.msg.FixationDataPoint, self._fix_callback)\n self.gaze_sub = rospy.Subscriber(gaze_topic, ibmmpy.msg.GazeData, self._gaze_callback)\n self.image_pub = rospy.Publisher('~image_overlay', sensor_msgs.msg.Image, queue_size=1)\n\n self._last_time = None\n self._last_pub_time = None\n \n def draw_fixations(self, frame, fixations):\n for fix in fixations.itertuples():\n pos = (int(fix.x_center * self.cam_info.width), int( (1-fix.y_center) * self.cam_info.height ) )\n color = FixationVisualizer.__COLORS__[ fix.id % len(FixationVisualizer.__COLORS__) ]\n cv2.circle(frame, pos, FixationVisualizer.__FIX_RADIUS__, color, 3)\n \n def draw_raw_gaze(self, frame, raw_data, fixations):\n\n fix_iter = fixations.itertuples()\n try:\n fix = next(fix_iter)\n except StopIteration:\n fix, fix_iter = None, None\n\n for gaze in raw_data.itertuples():\n while fix_iter is not None and fix.start_timestamp + 1e-3*fix.duration < gaze.timestamp:\n try:\n fix = next(fix_iter)\n except StopIteration:\n fix, fix_iter = None, None\n if fix is None or gaze.timestamp < fix.start_timestamp:\n color = FixationVisualizer.__UNUSED_COLOR__\n else:\n color = FixationVisualizer.__COLORS__[fix.id % len(FixationVisualizer.__COLORS__)]\n pos = (int(gaze.x * self.cam_info.width), int( (1-gaze.y) * self.cam_info.height ) )\n cv2.circle(frame, pos, FixationVisualizer.__GAZE_RADIUS__, color, -1)\n \n \n def draw_frame(self, frame, cur_time):\n # handle if an addl fix is added while we're running\n fix = self.fixations\n self.draw_fixations(frame, fix) \n \n raw_gaze = self.raw_data\n self.draw_raw_gaze(frame, raw_gaze, fix)\n \n return frame\n\n def compile_data_and_publish(self, tm):\n # call this from all the callbacks\n # basically bc if we're running in sim time, and pause the bag, the timer callback pauses\n # but the data is still coming in so problems\n if self._last_image is None:\n return\n if self._last_pub_time is not None and (tm - self._last_pub_time) < FixationVisualizer.__MAX_PUB_PERIOD__:\n return\n self._last_pub_time = tm\n frame = self._last_image.copy()\n new_frame = self.draw_frame(frame, tm.to_sec())\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(new_frame, encoding='bgr8'))\n \n def _fix_callback(self, msg):\n self.fixations = pd.concat((self.fixations, get_fixation_from_point_msg(msg)))\n if len(self.fixations) > FixationVisualizer.__LINGER_COUNT__:\n self._last_time = self.fixations.iloc[-FixationVisualizer.__LINGER_COUNT__-1].start_timestamp + self.fixations.iloc[-FixationVisualizer.__LINGER_COUNT__-1].duration*1e-3\n self.fixations = self.fixations[-FixationVisualizer.__LINGER_COUNT__:]\n self.compile_data_and_publish(msg.header.stamp)\n \n def _gaze_callback(self, msg):\n new_data = pd.DataFrame(fixation_detector.gaze_data_from_msg(msg)['world']).dropna().set_index('timestamp', drop=False)\n if len(new_data) > 0 and len(self.raw_data) > 0:\n filt = np.hstack((self.raw_data.iloc[-1].timestamp, new_data.iloc[:-1].timestamp.values)) <= new_data.timestamp.values\n if not np.all(filt):\n rospy.logwarn('new data went backwards: {}'.format(new_data.iloc[~filt]))\n new_data = new_data.iloc[filt,:]\n self.raw_data = pd.concat((self.raw_data, new_data))\n if not self.raw_data.index.is_monotonic:\n rospy.logwarn(''.join('new data not monotonic: ',\n self.raw_data.assign(tm=lambda r: r.timestamp-msg.header.stamp.to_sec()).tail(),\n new_data.assign(tm=lambda r: r.timestamp-msg.header.stamp.to_sec())))\n _last_time = self._last_time\n if _last_time is not None:\n self.raw_data = self.raw_data.truncate(before=_last_time)\n \n self.compile_data_and_publish(msg.header.stamp)\n \n def _cam_info_callback(self, msg):\n self.cam_info = msg\n self.cam_info_sub.unregister()\n \n def _frame_callback(self, frame):\n if self.cam_info is None:\n rospy.logwarn('No camera info received yet')\n return\n self._last_image = self.bridge.imgmsg_to_cv2(frame, desired_encoding='bgr8')\n self.compile_data_and_publish(frame.header.stamp)\n \n def _timer_callback(self, event):\n rospy.logdebug('got timer callback at {}'.format(event.current_real))\n self.compile_data_and_publish(event.current_real)\n\n\ndef main():\n if rospy.get_param('~overlay'):\n image_topic = rospy.resolve_name('raw_image')\n image_subtopic = rospy.get_param('~image_subtopic', 'image_raw')\n rospy.loginfo('reading image from topic {}'.format(image_topic))\n else:\n image_topic = None\n image_subtopic = None\n rospy.loginfo('generating empty fixation image')\n fixation_topic = rospy.get_param('~fixation_topic', '/fixations')\n gaze_topic = rospy.get_param('~gaze_topic', '/gaze')\n # gaze_color = tuple(int(x) for x in rospy.get_param('~gaze_color', [255, 255, 0]))\n # gaze_radius = int(rospy.get_param('~gaze_radius', 5))\n # fix_color = tuple(int(x) for x in rospy.get_param('~fixation_color', [255, 0, 0]))\n # fix_radius = int(rospy.get_param('~fixation_radius', 15))\n # linger_time = rospy.get_param('~linger_time', 5)\n \n viz = FixationVisualizer(image_topic=image_topic, image_subtopic=image_subtopic,\n fixation_topic=fixation_topic, gaze_topic=gaze_topic)\n \n rospy.spin()\n\nif __name__ == \"__main__\":\n rospy.init_node('fixation_visualizer')\n main()\n \n \n ","repo_name":"HARPLab/ibmmpy","sub_path":"scripts/fixation_visualizer.py","file_name":"fixation_visualizer.py","file_ext":"py","file_size_in_byte":7927,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"34302982323","text":"# coding=utf-8\nimport os\nimport simplejson\nimport collections\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass InterparkAPI(object):\n base_url = 'http://ticket.globalinterpark.com/Global/Play/Goods/GoodsInfoXml.asp'\n\n def __init__(self, interpark_parameters):\n self.interpark_parameters = interpark_parameters\n\n @property\n def default_headers(self):\n return {\n \"Accept\": \"text/javascript, text/html, application/xml, text/xml, */*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\",\n \"X-Prototype-Version\": \"1.6.1\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n\n @property\n def place_code(self):\n return self.interpark_parameters['PlaceCode']\n\n @property\n def language_type(self):\n return self.interpark_parameters['LanguageType']\n\n @property\n def goods_codes(self):\n return self.interpark_parameters['GoodsCode']\n\n @property\n def ignored_goods_codes(self):\n return self.interpark_parameters['IgnoredGoodsCode']\n\n def reset(self):\n session = requests.session()\n session.keep_alive = False\n\n def get_play_date(self, goods_code, place_code, language_type):\n parameters = {\n 'Flag': 'PlayDate',\n 'GoodsCode': goods_code,\n 'PlaceCode': place_code,\n 'LanguageType': language_type,\n 'OnlyDeliver': '',\n 'DelyDay': '',\n 'ExpressDelyDay': ''\n }\n response = requests.get(InterparkAPI.base_url, parameters, headers=self.default_headers, timeout=30)\n response.raise_for_status()\n response_text = response.text\n response_soup = BeautifulSoup(response_text, 'lxml-xml')\n play_date = response_soup.find('PlayDate').get_text()\n return play_date\n\n def get_play_seq(self, goods_code, place_code, play_date, language_type):\n parameters = {\n 'Flag': 'PlaySeq',\n 'GoodsCode': goods_code,\n 'PlaceCode': place_code,\n 'PlayDate': play_date,\n 'LanguageType': language_type\n }\n response = requests.get(InterparkAPI.base_url, parameters, headers=self.default_headers, timeout=30)\n response.raise_for_status()\n response_text = response.text\n response_soup = BeautifulSoup(response_text, 'lxml-xml')\n play_seq = response_soup.find('PlaySeq').get_text()\n return play_seq\n\n def get_seat_status_list(self, goods_code, place_code, play_seq, language_type):\n parameters = {\n 'Flag': 'RemainSeat',\n 'GoodsCode': goods_code,\n 'PlaceCode': place_code,\n 'PlaySeq': play_seq,\n 'LanguageType': language_type\n }\n response = requests.get(InterparkAPI.base_url, parameters, headers=self.default_headers, timeout=30)\n response.raise_for_status()\n response_text = response.text\n response_soup = BeautifulSoup(response_text, 'lxml-xml')\n\n seat_status_list = []\n for seat_status_soup in response_soup.find_all('Table'):\n seat_status = {}\n for tag in seat_status_soup.children:\n seat_status[tag.name] = tag.text\n seat_status_list.append(seat_status)\n\n return seat_status_list\n\n\nif __name__ == '__main__':\n current_folder = os.path.split(__file__)[0]\n interpark_parameters_filepath = os.path.join(current_folder, 'InterparkParameters.json')\n with open(interpark_parameters_filepath, mode='r') as interpark_parameters_file:\n interpark_parameters = simplejson.load(interpark_parameters_file, encoding='utf-8')\n\n if not interpark_parameters:\n exit()\n\n interpark_api = InterparkAPI(interpark_parameters)\n place_code = interpark_api.place_code\n language_type = interpark_api.language_type\n goods_codes = interpark_api.goods_codes\n ignored_goods_codes = interpark_api.ignored_goods_codes\n ticket_matrix = collections.OrderedDict()\n seat_names = {}\n for goods_name, goods_code in goods_codes.items():\n if goods_code in ignored_goods_codes:\n continue\n\n ticket_matrix[goods_name] = {}\n\n play_date = interpark_api.get_play_date(goods_code, place_code, language_type)\n play_seq = interpark_api.get_play_seq(goods_code, place_code, play_date, language_type)\n seat_status_list = interpark_api.get_seat_status_list(goods_code, place_code, play_seq, language_type)\n\n for seat_status in seat_status_list:\n ticket_matrix[goods_name][seat_status['SeatGrade']] = seat_status['RemainCnt']\n seat_names[seat_status['SeatGrade']] = seat_status['SeatGradeName']\n\n header = 'Ticket'\n for seat_grade, seat_name in seat_names.items():\n header += '\\t%s' % seat_name\n print(header)\n\n for goods_name, ticket_status in ticket_matrix.items():\n row = goods_name\n for seat_grade, seat_remain_count in ticket_matrix[goods_name].items():\n row += '\\t%s' % seat_remain_count\n\n print(row)\n","repo_name":"linvsky/Enterpark","sub_path":"interpark_api.py","file_name":"interpark_api.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"72377923528","text":"import numpy as np\nimport scipy\nimport math\n\n### Solutions\n# See Gradescope submission for answers to questions\n\nx_gamma = 8\n\ndef gamma_int(t, x=x_gamma):\n return (t**(x - 1)*(math.e**(-t)))\n\nend_test = 36\nn = 1000\n\ndef comp_trap(f, n, end):\n parts = np.linspace(0, end, n)\n #print(part)\n h = (parts[-1] - parts[0])/n\n sum_total = 0\n \n for i in range(1, n):\n sum_total += f(parts[0] + i*h)\n \n eval_t = (h/2)*(f(parts[0]) + 2*sum_total + f(parts[-1])) \n return (eval_t, abs(scipy.special.gamma(x_gamma) - eval_t))\n\n\n\n\nprint(f'Gamma function true value for x = {x_gamma}: {scipy.special.gamma(x_gamma)}')\nprint(f'Composite trapezoid approximation truncated at {end_test} for x = {x_gamma} (value, error): {comp_trap(gamma_int, n, end_test)}')\nprint(f'Adaptive quadrature routine on 0 to {end_test} for x = {x_gamma} (value, error): {scipy.integrate.quadrature(gamma_int, 0, end_test)}')\n\n","repo_name":"LukeStuckenbruck/APPM_4600_repository","sub_path":"Homework/Homework_11/HW11_Q3_a_b.py","file_name":"HW11_Q3_a_b.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35647137749","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 28 09:32:57 2019\n\n@author: Krishna Kishore Vissa\n\"\"\"\n\nimport os\nimport requests\nimport re\nimport csv\nfrom bs4 import BeautifulSoup\nfrom numpy import nan as NaN\n\nos.chdir('C:/02_study/datascience/innomatics/assignments/webscrapping_project')\nos.getcwd()\n\ndef get_oyo_hotel_info(country, city, hotel):\n row = {'Country' : country, 'Provider': 'OYO', 'City' : city }\n \n ###################### Hotel Info ###############################\n hotel_name = hotel.find('h3').text.strip()\n row['Hotel Name'] = hotel_name\n # print(f'{country} {city} {hotel_name}')\n address = hotel.find('span').text.strip()\n row['Address'] = address\n \n ###################### Ratings ################################\n ratings_wrapper = hotel.find('div', class_='hotelRating__wrapper')\n if ratings_wrapper:\n ratings = re.search(r'(\\d.\\d)\\s\\((\\d+)\\sRatings\\).(\\w+)', ratings_wrapper.text)\n rating = ratings.group(1)\n n_ratings = ratings.group(2)\n rating_summary = ratings.group(3)\n else:\n rating = 'NEW'\n n_ratings = NaN\n rating_summary = NaN\n\n row['Rating'] = rating\n row['No Of Ratings'] = n_ratings\n row['Rating Summary'] = rating_summary\n \n ############## Amenities ##############################\n amenity_wrapper = hotel.find('div', class_='amenityWrapper')\n if amenity_wrapper:\n amenities = ', '.join(amenity.text.strip() for amenity in amenity_wrapper)\n else:\n amenities = NaN\n row['Amenities'] = amenities\n \n ############## Room type and Prices ######################\n room_type = hotel.find('div', class_='listingHotelDescription__HotelCategory').text.strip()\n \n if hotel.find('span', class_='listingHotelDescription__soldOut'):\n final_price = NaN\n slashed_price = NaN\n discount_percent_str = NaN\n else: \n final_price = hotel.find('span', class_='listingPrice__finalPrice').text.strip()\n slashed_price = hotel.find('span', class_='listingPrice__slashedPrice d-body-lg')\n if slashed_price:\n slashed_price = slashed_price.text.strip()\n else:\n slashed_price = NaN\n \n discount_percent_str = hotel.find('span', class_='listingPrice__percentage')\n if discount_percent_str:\n discount_percent_str = discount_percent_str.text.strip()\n else:\n discount_percent_str = NaN\n \n row['Room Type'] = room_type\n row['Final Price'] = final_price\n row['Slashed Price'] = slashed_price\n row['Discount(%)'] = discount_percent_str\n \n global no_records\n no_records += 1\n return row\n# ############## Print All Values #######################\n# print(\"Hotel Name:\", hotel_name)\n# print(\"Address:\", address)\n# print(\"Rating:\", float(rating))\n# print(\"No of Ratings:\", int(n_ratings))\n# print(\"Ratings Summary:\", rating_summary)\n# print(\"Amenities:\", amenities)\n# print(\"Room Type:\", room_type) \n# print(\"Final Price: \", final_price.text)\n# print(\"Slashed Price:\", slashed_price) \n# print(\"Discount(%):\", re.search(r'(\\d\\d)', discount_percent).group())\n# return row\n\ndef hotels_iter(country, city, hotels_soup):\n for hotel in hotels_soup:\n yield get_oyo_hotel_info(country, city, hotel)\n\ndef to_inr(price):\n # First 3 letters is the country code, rest is price value\n country_code = price[0:3]\n price = float(price[3:])\n \n conversion_rates = {'NPR' : 0.63,\n '' : 0.0,\n '' : 0.0,\n '' : 0.0,\n '' : 0.0,\n '' : 0.0,\n '' : 0.0,\n }\n if country_code not in conversion_rates.keys():\n return 0.0\n \n return conversion_rates[country_code] * price\n\ncountries = {\n 'india' : 'India',\n 'np' : 'Nepal',\n 'my' : 'Malaysia',\n 'id' : 'Indonesia',\n 'ae' : 'UAE'\n }\n\nbase_url = 'https://www.oyorooms.com/'\n\ncountry_cities = {}\ncount = 0\n\n# Fetch all the URLs for all the Cities in all the countries\nfor ck, cv in countries.items():\n all_cities_url = base_url + ck + '/allcities/'\n all_cities_soup = BeautifulSoup(requests.get(all_cities_url).text, features = 'lxml')\n# print('URL = ', all_cities_url)\n# print(f\"Fetching city urls for {cv} ...\", end = ' ')\n cities = [ (city.text, city.attrs['href']) for city in all_cities_soup.find_all('a', class_='c-kpzrw5') ]\n# print(f'Number of cities in {cv}: {len(cities)}')\n# print(cities)\n count += len(cities)\n country_cities[cv] = cities\n\nfor country, cities in country_cities.items():\n print(country)\n for city in cities:\n print(' ', city[0], ':', city[1] )\n\nprint(f\"Total Number of cities: {count}\\n\")\n\ndef write_hotels_info_to_csv(url, csvwriter, country, city):\n '''Writes the hotels data to csv file. This method iterates over the hotel urls and fetches the \n information.\n '''\n # print('Fetching contents of the URL:', url)\n data_soup = BeautifulSoup(requests.get(url).text, features = 'lxml')\n hotels_class_name = 'oyo-row oyo-row--no-spacing listingHotelDescription'\n hotels_soup = data_soup.find_all('div', class_=hotels_class_name)\n csvwriter.writerows([ hotel for hotel in hotels_iter(country, city, hotels_soup)])\n\n############### Open CSV File in Write mode and prepare the CSV Header ####################\noutput_csv = open('oyo_hotels_final.csv', 'w', newline='', encoding='utf-8')\ncolumns = ['Country', 'Provider', 'City', 'Hotel Name', 'Address', 'Rating', 'No Of Ratings',\n 'Rating Summary', 'Amenities', 'Room Type', 'Final Price', 'Slashed Price', 'Discount(%)']\n\n# prepare the header\ncsvwriter = csv.DictWriter(output_csv, fieldnames = columns)\ncsvwriter.writeheader()\n\n# Fetches the complete data country and city wise \ncompleted = 0\nno_records = 0\n\nfor country, cities in country_cities.items():\n print(f'\\nFetching {country} Hotels . ', end='')\n citycount = 0\n for city_url in country_cities[country]:\n city_base_url = base_url\n \n match = re.search(r'/india/(.+\\b)', city_url[1])\n if match:\n city_base_url += match.group(1)\n else:\n city_base_url += city_url[1]\n # print(city_base_url)\n # try:\n page_soup = BeautifulSoup(requests.get(city_base_url).text, features = 'lxml')\n hotels_soup = page_soup.find_all('div', \n class_='oyo-row oyo-row--no-spacing listingHotelDescription')\n csvwriter.writerows([ hotel for hotel in hotels_iter(country, city_url[0], hotels_soup)])\n pages = page_soup.find_all('a', class_='link ListingPagination__pageContainer--page')\n if pages:\n for page in pages:\n page_url = base_url + page.attrs['href']\n # print(\"Page URL: \", page_url)\n write_hotels_info_to_csv(page_url, csvwriter, country, city_url[0])\n print('.', end=' ')\n citycount += 1\n completed += 1\n # except:\n # print(f'\\nIgnoring the url {city_base_url} since maximum retry attempts failed.')\n # pass\n print(f'City Count in {country}: {citycount}')\n print(f'\\nFinished {int(completed / count * 100)}%')\n \n\nprint(f'\\nSuccessfully fetched {no_records} records.')\n\n#output_csv = open('sample_hotels.csv', \"w\", newline='', encoding=\"utf-8\")\n#columns = ['Country', 'Provider', 'City', 'Hotel Name', 'Address', 'Rating', 'No Of Ratings',\n# 'Rating Summary', 'Amenities', 'Room Type', 'Final Price', 'Slashed Price', 'Discount(%)']\n## prepare the header\n#csvwriter = csv.DictWriter(output_csv, fieldnames = columns)\n#csvwriter.writeheader()\n#\n#write_hotels_info_to_csv('https://www.oyorooms.com//ae/hotels-in-dubai/?page=2', csvwriter, 'nepal', 'pok')\n\noutput_csv.close()\n","repo_name":"krishna8git/datascience","sub_path":"datascience_misc/innomatics/assignments/webscrapping_project/scrap_oyorooms.py","file_name":"scrap_oyorooms.py","file_ext":"py","file_size_in_byte":7945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23635510250","text":"import json\nimport struct\nfrom base64 import b64encode, b64decode\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\ncomplex_number_length = len(b64encode(struct.pack(\"ff\", 0, 0)).decode(\"utf-8\")) + 1\nif np is not None:\n complex1 = np.array([1, 1j])\n\n\nclass BetterEncoder(json.JSONEncoder):\n def default(self, obj):\n if np is not None and isinstance(obj, np.ndarray):\n if \"complex\" in str(obj.dtype):\n shape = obj.shape\n values = obj.ravel()\n values = np.concatenate([np.real(values), np.imag(values)]).reshape((2, len(values))).T.ravel()\n encoded = b64encode(struct.pack(\"f\" * len(values), *values)).decode(\"utf-8\")\n res = dict(_decode_type=\"numpy_complex\", _shape=shape, _content=encoded)\n return res\n return obj.tolist()\n if pd is not None and np is not None:\n typename = obj.__class__.__name__\n res = None\n if isinstance(obj, pd.DataFrame):\n content = dict(zip(obj.columns, obj.values.T.tolist()))\n for key, value in content.items():\n unique = np.unique(value)\n if len(unique) == 1:\n content[key] = unique[0]\n res = dict(_decode_type=typename, _content=content)\n elif isinstance(obj, pd.Series):\n res = dict(_decode_type=typename, _content=obj.to_dict())\n if res:\n return res\n if type(obj) == int:\n return int(obj)\n elif type(obj) == float:\n return float(obj)\n elif type(obj) == bytes:\n return dict(_decode_type=\"bytes\", _content=obj.hex())\n elif type(obj) == bytearray:\n return dict(_decode_type=\"bytearray\", _content=obj.hex())\n elif type(obj) == complex and np is not None:\n encoded = b64encode(struct.pack(\"ff\", np.real(obj), np.imag(obj)))\n encoded = \"c\" + encoded.decode(\"utf-8\")\n return encoded\n return json.JSONEncoder.default(self, obj)\n\n\nclass BetterDecoder(json.JSONDecoder):\n def __init__(self, *args, **kwargs):\n super(BetterDecoder, self).__init__(object_hook=self.default, *args, **kwargs)\n\n def invalid_type(self):\n raise Exception(\"invalid _decode_type\")\n\n def default(self, obj):\n if type(obj) == str and len(obj) == complex_number_length and obj[0] == \"c\":\n obj = np.array(struct.unpack(\"ff\", b64decode(obj[1:])))\n if \"_decode_type\" in obj and obj[\"_decode_type\"] == \"numpy_complex\":\n content = obj[\"_content\"]\n shape = obj[\"_shape\"]\n content = b64decode(content)\n content = struct.unpack(\"f\" * (len(content) // 4), content)\n content = np.reshape(content, (-1, 2))\n content = content * complex1[None, :]\n content = np.sum(content, axis=1)\n content = np.reshape(content, shape)\n return content\n elif \"_decode_type\" in obj and obj[\"_decode_type\"] == \"bytes\":\n return bytes.fromhex(obj[\"_content\"])\n elif \"_decode_type\" in obj and obj[\"_decode_type\"] == \"bytearray\":\n return bytearray.fromhex(obj[\"_content\"])\n if pd is not None and isinstance(obj, dict):\n if \"_decode_type\" in obj and \"_content\" in obj:\n func = getattr(pd, obj[\"_decode_type\"], self.invalid_type)\n obj = func(obj[\"_content\"])\n return obj\n\n\ndef serialize(data):\n try:\n res = json.dumps(data, cls=BetterEncoder).encode('utf-8')\n except (TypeError, ValueError) as e:\n raise Exception('You can only send JSON-serializable data. Error is : %s' % e)\n return res\n\n\ndef deserialize(data):\n if type(data) == bytes:\n data = data.decode('utf-8')\n try:\n res = json.loads(data, cls=BetterDecoder)\n except (TypeError, ValueError) as e:\n raise Exception('Data received was not in JSON format. Error is %s' % str(e))\n return res\n","repo_name":"Phalelashvili/jsonsocket","sub_path":"jsonsocket/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"2941439869","text":"import peachy\r\nimport peachy.geo\r\n\r\nfrom game.config import GRAVITY, MAX_GRAVITY\r\nfrom game.utility import collision_resolution, solid_below\r\n\r\n\r\nclass Dog(peachy.Entity, peachy.geo.Rect):\r\n\r\n AGGRO_COOLDOWN = 3 * 60\r\n AGGRO_RADIUS = 36\r\n\r\n STATE_STANDARD = 'standard'\r\n STATE_AGGRO = 'aggro'\r\n STATE_STUNNED = 'distracted'\r\n\r\n SPEED_WALK = 0.5\r\n SPEED_RUN = 1.5\r\n\r\n def __init__(self, x, y):\r\n super(peachy.Entity).__init__()\r\n super(peachy.geo.Rect).__init__(x, y, 16, 10)\r\n self.group = 'enemy liftable dog can-slow can-stun'\r\n self.order = 2\r\n\r\n self.facing_x = -1\r\n self.facing_y = 0\r\n\r\n self.state = Dog.STATE_STANDARD\r\n\r\n self.aggro_timer = peachy.utils.Counter(0, Dog.AGGRO_COOLDOWN)\r\n self.stun_timer = 300\r\n\r\n self.sprite = peachy.fs.get_image('assets/img/dog.png')\r\n\r\n def bite(self, player):\r\n if player.member_of('duplicate'):\r\n player.destroy()\r\n self.state = Dog.STATE_STUNNED\r\n self.velocity_x = 0\r\n else:\r\n player.kill(self)\r\n\r\n def change_state(self, state, **kwargs):\r\n self.state = state\r\n\r\n def render(self):\r\n peachy.graphics.set_color(254, 154, 4)\r\n peachy.graphics.draw_entity_rect(self)\r\n\r\n x, y = self.center\r\n if self.state == Dog.STATE_AGGRO:\r\n peachy.graphics.set_color(255, 0, 0, 25)\r\n elif self.state == Dog.STATE_STUNNED:\r\n peachy.graphics.set_color(255, 0, 255, 25)\r\n else:\r\n peachy.graphics.set_color(255, 255, 255, 25)\r\n peachy.graphics.draw_circle(x - Dog.AGGRO_RADIUS, y - Dog.AGGRO_RADIUS,\r\n Dog.AGGRO_RADIUS)\r\n\r\n if self.facing_x == -1:\r\n peachy.graphics.draw(self.sprite, self.x, self.y - 3,\r\n args=peachy.graphics.FLIP_X)\r\n else:\r\n peachy.graphics.draw(self.sprite, self.x, self.y - 3)\r\n\r\n def update(self):\r\n temp_x = self.x\r\n temp_y = self.y\r\n cx, cy = self.center\r\n\r\n players = self.container.get_group('player')\r\n\r\n target = None\r\n target_distance = None\r\n target_visible = False\r\n tcx, tcy = (0, 0)\r\n\r\n # Aggro closest player\r\n for player in players:\r\n temp_distance = self.distance_from(player)\r\n if target_distance is None or temp_distance < target_distance:\r\n target = player\r\n target_distance = temp_distance\r\n tcx, tcy = target.center\r\n\r\n # Visibility\r\n target_distance\r\n if target_distance <= Dog.AGGRO_RADIUS:\r\n if target_distance <= Dog.AGGRO_RADIUS:\r\n target_visible = True\r\n # Bite\r\n if self.collides(target, self.x, self.y) and \\\r\n self.state != Dog.STATE_STUNNED:\r\n self.bite(target)\r\n\r\n # Update state\r\n if self.state == Dog.STATE_STANDARD:\r\n # Movement\r\n if solid_below(self, temp_x + self.width * self.facing_x, temp_y) and not \\\r\n self.collides_solid(temp_x + self.width * self.facing_x, self.y):\r\n self.velocity_x = Dog.SPEED_WALK * self.facing_x\r\n else:\r\n self.velocity_x = 0\r\n self.facing_x *= -1\r\n\r\n # Detect player\r\n if target_visible:\r\n self.change_state(Dog.STATE_AGGRO)\r\n\r\n elif self.state == Dog.STATE_AGGRO:\r\n if target_distance > Dog.AGGRO_RADIUS:\r\n if not self.aggro_timer.tick():\r\n self.change_state(Dog.STATE_STANDARD)\r\n self.velocity_x = 0\r\n else:\r\n self.aggro_timer.reset()\r\n\r\n x_dist = abs(cx - tcx)\r\n if x_dist > (self.width / 2):\r\n x_dist = cx - tcx\r\n if x_dist < 0:\r\n self.facing_x = 1\r\n else:\r\n self.facing_x = -1\r\n\r\n if solid_below(self, temp_x + self.width * self.facing_x, temp_y):\r\n self.velocity_x = Dog.SPEED_RUN * self.facing_x\r\n else:\r\n self.velocity_x = 0\r\n else:\r\n self.velocity_x = 0\r\n\r\n elif self.state == Dog.STATE_STUNNED:\r\n self.stun_timer -= 1\r\n if self.stun_timer <= 0:\r\n self.state = Dog.STATE_STANDARD\r\n\r\n # Finalize\r\n if self.velocity_y < MAX_GRAVITY:\r\n self.velocity_y += GRAVITY\r\n\r\n temp_x += self.velocity_x\r\n temp_y += self.velocity_y\r\n\r\n c, self.x, self.y, self.velocity_x, self.velocity_y = \\\r\n collision_resolution(self, temp_x, temp_y)\r\n","repo_name":"shelsoloa/AgentObie","sub_path":"game/entities/dog.py","file_name":"dog.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4801480533","text":"from collections import namedtuple\n\nfrom variables import ROUND, COLS, SQUARE\n\nMove=namedtuple(\"Move\",\"column, shape\")\ndef human_player(state):\n while(True):\n raw = input(\"Prossima mossa (\"+state.to_move+\"):\")\n mossa = raw.strip().split()\n if (len(mossa) != 2):\n print(\"Formato mossa errato, riprova\")\n continue\n if (not mossa[0].isdigit() or (mossa[1]!=SQUARE and mossa[1]!=ROUND)):\n print(\"Formato mossa errato, riprova\")\n continue\n col = int(mossa[0])-1\n shape=mossa[1]\n if (col < 0 or col > COLS-1):\n print(\"Colonna non esistente, riprova\")\n continue\n \n if (not state.grid.get_square(0,col).is_empty()):\n print(\"Colonna già piena, riprova\")\n continue\n \n if (state.pieces[state.to_move][shape] == 0):\n print(\"Hai terminato i pezzi\",shape,\" ! Riprova\")\n continue\n\n break \n return Move(shape=shape,column=col)\n","repo_name":"filippolelli/simplexity-ia","sub_path":"human_player.py","file_name":"human_player.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28992518910","text":"from threading import Thread\r\nfrom tkinter import *\r\nfrom random import randint\r\nfrom time import sleep, time\r\n\r\n\r\nclass ReactionTime:\r\n def __init__(self):\r\n self.window = Tk()\r\n self.window.geometry('1280x720')\r\n self.window.title(\"Reaction Time Test\")\r\n self.window.config(bg='#1E272E')\r\n self.scores = []\r\n self.react_ready = False\r\n self.start_time = None\r\n self.valid_round = True\r\n self.round = 0\r\n\r\n self.start_button = Button(self.window, text='S T A R T', fg='#1E272E', bg='WHITE', font='Bahnschrift 26 bold', bd=0, width=20, command=lambda: (self.start(), self.start_button.place_forget()))\r\n self.start_button.place(relx=.340625, rely=.425)\r\n\r\n self.window.mainloop()\r\n\r\n def reset(self):\r\n self.window.unbind(\"\")\r\n self.start_button.place(relx=.340625, rely=.425)\r\n self.scores = []\r\n self.round = 0\r\n self.valid_round = True\r\n\r\n def _start(self):\r\n sleep(randint(750, 2250) / 1000)\r\n if self.valid_round:\r\n self.window.config(bg='#576574')\r\n self.start_time = time()\r\n self.react_ready = True\r\n\r\n def start(self):\r\n if self.round != 5:\r\n self.window.bind(\"\", lambda event: self.register())\r\n Thread(target=self._start).start()\r\n else:\r\n self.end()\r\n\r\n def register(self):\r\n if self.react_ready:\r\n self.scores.append(time() - self.start_time)\r\n self.window.config(bg='#1E272E')\r\n self.react_ready = False\r\n self.round += 1\r\n self.start()\r\n else:\r\n self.valid_round = False\r\n self.early()\r\n\r\n def _early(self):\r\n self.window.config(bg='#1E272E')\r\n warning = Label(self.window, text=\"!\", bg='white', fg='#1E272E', font='Bahnschrift 60 bold', width=2)\r\n warning.place(relx=.27, rely=.4)\r\n early = Label(self.window, text='You clicked too early!\\nRestarting in 3 seconds...', justify=LEFT, bg='#1E272E', fg='WHITE', font='Bahnschrift 30 bold')\r\n early.place(relx=.37, rely=.4)\r\n sleep(3)\r\n warning.place_forget()\r\n early.place_forget()\r\n self.reset()\r\n\r\n def early(self):\r\n Thread(target=self._early).start()\r\n\r\n def end(self):\r\n score_items = []\r\n score_avg = Label(self.window, text=' '.join(f'AVERAGE {int((sum(self.scores) / 5) * 1000)}ms'), bg='#1E272E', fg='WHITE', font='Bahnschrift 24 bold')\r\n score_avg.place(relx=.25, rely=.35)\r\n for score in self.scores:\r\n score_lbl = Label(self.window, text=f'{int(score * 1000)}ms', bg='white', fg='#1E272E', font='Bahnschrift 18 bold')\r\n score_lbl.place(relx=.25 + self.scores.index(score) * .1, rely=.45, width=100)\r\n score_items.append(score_lbl)\r\n restart = Button(self.window, text='▶', fg='WHITE', bg='#1E272E', font='Bahnschrift 30', height=1, bd=0, command=lambda: ([item.place_forget() for item in score_items], self.reset()))\r\n restart.place(relx=.691, rely=.32)\r\n score_items.extend((score_avg, restart))\r\n self.window.unbind(\"\")\r\n\r\n\r\nif __name__ == '__main__':\r\n ReactionTime()\r\n","repo_name":"Shivam-M/reaction-time-tester","sub_path":"reaction_time.py","file_name":"reaction_time.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27984901401","text":"import numpy as np\n\ndef getAckleyResult(x):\n \n # Valores recomendados, a = 20, b = 0.2, c = 2*pi\n\n a = 20\n b = 0.2\n c = 2 * np.pi\n\n # Primer sumatoria\n firstSum = 0\n for xi in x: \n firstSum += np.power(xi,2)\n\n firstSum = -b * np.sqrt((1/len(x))*firstSum)\n\n # Segunda sumatoria\n secondSum = 0\n for xi in x:\n secondSum += (np.cos(c*xi))\n\n secondSum = (1/len(x)) * secondSum \n\n resultOfAckleyFunction = - a * np.exp(firstSum) - np.exp(secondSum) + a + np.exp(1)\n\n return resultOfAckleyFunction","repo_name":"rdmarcos49/ComputacionEvolutiva","sub_path":"Ackley.py","file_name":"Ackley.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25489530970","text":"import sys\nreadline = sys.stdin.readline\n\ncroatia = ['c=','c-','dz=','d-','lj','nj','s=','z='];\n\nword = readline().strip()\n\nret = len(word)\n\nfor c in croatia :\n ret -= word.count(c)\nprint(ret)","repo_name":"dygmm4288/python_algorithm","sub_path":"Implementation/silver/2941_크로아티아 알파벳.py","file_name":"2941_크로아티아 알파벳.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10782136014","text":"import pdb;\nx_axis = 6\ny_axis = 8\nfor x in range(x_axis):\n print(\"\")\n for y in range(y_axis):\n if y == y_axis:\n print(y)\n else:\n print(y,end=\" \")\n pdb.set_trace() #continue, step, next","repo_name":"cribas1/Python_hints","sub_path":"Axis_test.py","file_name":"Axis_test.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21270545634","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom .models import ApplicationUser, Question, QuestionReport\nfrom .modules.exams import ExamCreate\n\n\nclass RegistrationForm(forms.ModelForm):\n \"\"\" Form for user registration \"\"\"\n class Meta:\n model = ApplicationUser\n fields = ['username', 'password']\n widgets = {'password': forms.PasswordInput()}\n\n\nclass ExamSetupForm(forms.Form):\n question_number = forms.IntegerField(label='Question number')\n\n def __init__(self, *args, **kwargs):\n super(ExamSetupForm, self).__init__(*args, **kwargs)\n self.exam_id = kwargs.get('exam_id')\n\n def clean_question_number(self) -> bool:\n \"\"\" Verify requested question number is less or equal total number of questions in exam \"\"\"\n cleaned_data = super(ExamSetupForm, self).clean()\n questions_requested = int(cleaned_data['question_number'])\n total_question_number = Question.objects.get(exam_id=self.exam_id).count()\n if questions_requested > total_question_number:\n raise ValidationError('Requested number of questions exceeds number of questions in exam')\n return questions_requested\n\n\nclass UploadForm(forms.Form):\n \"\"\" Form for uploading data of a new exams\"\"\"\n exam_title = forms.CharField(label='Exam title')\n questions_file = forms.FileField(label='File with questions')\n exam_source = forms.CharField(label='Source of exam', required=False)\n\n def __init__(self, *args, **kwargs):\n self.uploader = kwargs.pop('user', None)\n super(UploadForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n \"\"\"\n Get exam title and JSON file with questions from form.\n Parse the file, create new exam, questions and question_json answer variants.\n In case of exceptions raised, they should be passed to form view without saving exam data.\n \"\"\"\n cleaned_data = super(UploadForm, self).clean()\n exam_title = cleaned_data.get('exam_title')\n exam_source = cleaned_data.get('exam_source', None)\n try:\n file = cleaned_data.get('questions_file')\n if file.content_type != 'application/json':\n raise ValidationError('Question file must be in JSON format')\n exam_create = ExamCreate()\n parsing_errors = exam_create.create_exam(exam_title, file, exam_source, is_user_uploaded=True,\n uploader=self.uploader)\n if parsing_errors:\n for error in parsing_errors:\n self.add_error(None, error)\n except AttributeError:\n self.add_error(None, ValidationError('File with questions data wasn\\'t provided.'))\n\n\nclass QuestionReportCreateUpdateForm(forms.ModelForm):\n \"\"\" Form for filling or updating exam question report \"\"\"\n\n class Meta:\n model = QuestionReport\n fields = ['text']\n\n text = forms.CharField(label='Report text')\n\n\nclass QuestionReportUpdateFormAdmin(forms.ModelForm):\n \"\"\" Form for updating report as admin \"\"\"\n\n class Meta:\n model = QuestionReport\n fields = ['resolution', 'status']\n\n resolution = forms.CharField(label='Report text')\n status = forms.CharField(label='Status')\n","repo_name":"ConstructCP/django_exams","sub_path":"exam_site/exams/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30073740105","text":"from flask import Blueprint, jsonify, request,json, redirect, url_for, session\nfrom config.db import db, app, ma\nfrom models.Tipo_de_Actividad import Tipo_de_Actividad, TipodeActividadSchema\n\nruta_TipoA = Blueprint(\"ruta_Tipo_de_Actividad\",__name__)\n\ntipodea_schema = TipodeActividadSchema()\ntipodeas_Schema = TipodeActividadSchema(many=True)\n\n@ruta_TipoA.route(\"/Tipo_de_Actividades//\", methods=[\"GET\"])\ndef tipo_de_actividades(id, tipo):\n resultall = Tipo_de_Actividad.query.all()\n result = tipodeas_Schema.dump(resultall)\n session['actividades'] = result\n return redirect(url_for(\"ruta_item.items\", id= id, tipo= tipo))\n@ruta_TipoA.route(\"/saveTipodeActividad\", methods = [\"POST\"])\ndef saveTipodeActividad():\n \n name = request.json['name']\n \n activi = db.session.query(Tipo_de_Actividad.id).filter(Tipo_de_Actividad.nombre == name).all()\n result = tipodeas_Schema.dump(activi)\n\n if len(result)==0:\n new_activi = Tipo_de_Actividad(name)\n db.session.add(new_activi)\n db.session.commit()\n resultall = Tipo_de_Actividad.query.all()\n result = tipodeas_Schema.dump(resultall)\n session['actividades'] = result\n\n activi = db.session.query(Tipo_de_Actividad.id).filter(Tipo_de_Actividad.nombre == name).all()\n result = tipodeas_Schema.dump(activi)\n\n act = result[0]['id']\n\n return jsonify({'mensaje': act}) \n else:\n return jsonify({'error': 'Opss... nombre en uso'}), 401\n\n@ruta_TipoA.route(\"/updateTipodeActividad\", methods = [\"PUT\"])\ndef updateTipodeActividad():\n id = request.json['id']\n nactivi = Tipo_de_Actividad.query.get(id)\n nactivi.nombre = request.json['name']\n db.session.commit()\n return \"Datos Actualizado con exitos\"\n\n@ruta_TipoA.route(\"/deleteTipodeActividad/\", methods = [\"GET\"])\ndef deleteTipodeActividad(id):\n activi = Tipo_de_Actividad.query.get(id)\n db.session.delete(activi)\n db.session.commit()\n return jsonify(tipodea_schema.dump(activi))","repo_name":"MauricioMolina12/ClassroomProject","sub_path":"src/api/Tipo_de_Actividad.py","file_name":"Tipo_de_Actividad.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17692751485","text":"from typing import List\n\nfrom .. import Provider as AutomotiveProvider\n\n\nclass Provider(AutomotiveProvider):\n \"\"\"Implement automotive provider for ``pl_PL`` locale.\n\n Sources:\n\n - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Poland\n \"\"\"\n\n license_formats = (\n \"?? #####\",\n \"?? ####?\",\n \"?? ###??\",\n \"?? #?###\",\n \"?? #??##\",\n \"??? ?###\",\n \"??? ##??\",\n \"??? #?##\",\n \"??? ##?#\",\n \"??? #??#\",\n \"??? ??##\",\n \"??? #####\",\n \"??? ####?\",\n \"??? ###??\",\n )\n\n def license_plate_regex_formats(self) -> List[str]:\n \"\"\"Return a regex for matching license plates.\n\n .. warning::\n This is technically not a method that generates fake data, and it\n should not be part of the public API. User should refrain from using\n this method.\n \"\"\"\n return [plate.replace(\"?\", \"[A-Z]\").replace(\"#\", \"[0-9]\") for plate in self.license_formats]\n","repo_name":"joke2k/faker","sub_path":"faker/providers/automotive/pl_PL/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":16539,"dataset":"github-code","pt":"16"} +{"seq_id":"19121537013","text":"import random\r\nimport matplotlib.pyplot as plt\r\n\r\n# DNA dizisini oluşturma\r\ndna_sequence = \"ATCGATCGATCGATCGATCG\"\r\n\r\n# Primerlerin belirlenmesi\r\nforward_primer = \"ATCG\"\r\nreverse_primer = \"CGAT\"\r\n\r\n# Boş listeler oluşturma\r\namplified_sequences = []\r\ncycle_numbers = []\r\ntotal_length = len(dna_sequence) # Başlangıçta toplam DNA uzunluğu\r\n\r\n# PCR simülasyonu\r\ndef pcr_simulation(dna_seq, forward, reverse, cycles, total_length):\r\n print(\"Başlangıç DNA Dizisi:\", dna_seq)\r\n for i in range(cycles):\r\n # Primerlerin eşleşmesi\r\n forward_match = dna_seq.find(forward)\r\n reverse_match = dna_seq.find(reverse)\r\n\r\n # DNA amplifikasyonu\r\n amplified_sequence = dna_seq[forward_match:reverse_match + len(reverse)]\r\n amplified_sequences.append(total_length)\r\n cycle_numbers.append(i)\r\n\r\n print(f\"Iterasyon {i + 1} - Amplifikasyon: {amplified_sequence}\")\r\n\r\n # DNA replikasyonu\r\n dna_seq += amplified_sequence\r\n total_length = len(dna_seq)\r\n\r\n return dna_seq, amplified_sequences, cycle_numbers\r\n\r\n# Simülasyonu çalıştırma\r\namplified_dna, amplified_sequences, cycle_numbers = pcr_simulation(dna_sequence, forward_primer, reverse_primer, 10, total_length)\r\nprint(\"Son DNA Dizisi: \", amplified_dna)\r\n\r\n# Grafik oluşturma\r\nplt.figure(figsize=(10, 6))\r\nplt.plot(cycle_numbers, amplified_sequences, marker='o', linestyle='--', color='b')\r\nplt.title('PCR Amplifikasyonu')\r\nplt.xlabel('Amplifikasyon Döngüsü')\r\nplt.ylabel('Toplam DNA Uzunluğu')\r\nplt.show()\r\n","repo_name":"erent8/biotechnological-projects","sub_path":"pcr_tech.py","file_name":"pcr_tech.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71429995207","text":"import time,datetime,os,shutil,json\r\nfrom salt import client,config,loader,key\r\nfrom saltstack.models import Accepted_minion,PlayBook,Project,Async_jobs\r\nfrom saltstack.salt_sls_ret_format import PlayBookResponse\r\nfrom OPcenter.settings import BASE_DIR,SUCCESS_DATA,EXCEPT_DATA\r\n\r\n# 管理key\r\nclass Key_manage(object):\r\n def __init__(self):\r\n self.opts = config.master_config('/etc/salt/master')\r\n self.keys = key.get_key(self.opts)\r\n\r\n # key列出----------------------------------\r\n # 列出全部minion的key\r\n def all_keys(self):\r\n all_keys = self.keys.list_keys()\r\n return all_keys\r\n\r\n # 已添加的minion\r\n def accepted_minion(self):\r\n accepted = self.all_keys()['minions']\r\n return accepted\r\n\r\n # 待添加的minion\r\n def unaccepted_minion(self):\r\n unaccepted = self.all_keys()['minions_pre']\r\n return unaccepted\r\n\r\n # 已经拒绝的minion\r\n def rejected_minion(self):\r\n rejected = self.all_keys()['minions_rejected']\r\n return rejected\r\n\r\n # key操作---------------------------------\r\n # 添加minion\r\n def accept_key(self,minion_id):\r\n try:\r\n self.keys.accept(minion_id)\r\n return True\r\n except Exception as err:\r\n return err\r\n\r\n # 拒绝minion的key\r\n def reject_key(self,minion_id):\r\n try:\r\n self.keys.reject(minion_id)\r\n return True\r\n except Exception as err:\r\n return err\r\n\r\n # 删除minion的key\r\n def delete_key(self,minion_id):\r\n try:\r\n self.keys.delete_key(minion_id)\r\n return True\r\n except Exception as err:\r\n return err\r\n\r\n# Grains的使用----------------\r\nclass Grains(client.LocalClient):\r\n # grains获取节点信息\r\n def get_minion_items(self,minion_id_list):\r\n # 定义需要获取的信息\r\n items = ['id', 'osfinger', 'cpu_model', 'num_cpus', 'mem_total']\r\n # 获取minion_id的硬件信息\r\n result = self.cmd(minion_id_list, 'grains.item', items,tgt_type='list')\r\n for id in result.keys():\r\n items = result[id]\r\n now_time = datetime.datetime.fromtimestamp(time.time())\r\n if type(items) == dict:\r\n items['datetime'] = now_time\r\n mem_gib = 0.5 if round((items['mem_total'])/1024) == 0 else round((items['mem_total'])/1024)\r\n items['mem_gib'] = mem_gib\r\n items['status'] = 1\r\n Accepted_minion.objects.filter(id=id).update(**items)\r\n else:\r\n errinfo={'id':id,'datetime': now_time, 'status': 0, 'cpu_model': '主机不在线', 'osfinger': '主机不在线', 'mem_gib': 0, 'mem_total': 0,'num_cpus': 0,}\r\n Accepted_minion.objects.filter(id=id).update(**errinfo)\r\n return result\r\n\r\n# test模块\r\nclass Test_ping(client.LocalClient):\r\n # 获取主机的状态\r\n def get_status(self,minion_id_list):\r\n # id不能为空,避免异常\r\n if minion_id_list is not None:\r\n # 接收test.ping的返回结果\r\n result = self.cmd(minion_id_list,'test.ping',[],tgt_type='list')\r\n #result必然是一个字典\r\n # 更新数据库\r\n for id in result.keys():\r\n now_time = datetime.datetime.fromtimestamp(time.time())\r\n status = 1 if result[id] else 0 # True=1;False=0\r\n # 检测状态更新到数据库\r\n Accepted_minion.objects.filter(id=id).update(status=status,datetime=now_time)\r\n return result\r\n\r\n# master配置\r\nclass Master_manage():\r\n def __init__(self):\r\n # 配置文件导入目录 sls_conf\r\n self.init_conf = '%s/saltstack/init_conf/' % BASE_DIR\r\n self.state_sls = '%s/saltstack/state_sls/' % BASE_DIR\r\n self.salt_etc = '/etc/salt/'\r\n self.srv_salt = '/srv/salt/'\r\n # master配置初始化\r\n def master_init(self):\r\n # 判断salt安装与否\r\n if os.path.exists(self.salt_etc):\r\n\r\n # Grains自定义目录\r\n grains_path = self.srv_salt+'_grains/'\r\n os.makedirs(grains_path) if not os.path.exists(grains_path) else 'grains path existed'\r\n # 初始化目录\r\n init = self.srv_salt+'init'\r\n os.makedirs(init) if not os.path.exists(init) else 'init path existed'\r\n # 备份master配置文件\r\n if os.path.exists(self.salt_etc+'master'):\r\n master_bak = 'master_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')\r\n os.rename(self.salt_etc+'master', self.salt_etc+'master_bak')\r\n # 删除90天前的master_前缀的配置文件备份\r\n for master_bak in os.listdir(self.salt_etc):\r\n if master_bak.startswith('master_') and time.time() - os.path.getmtime(self.salt_etc+'master_bak') > 90*24*3600:\r\n os.remove(master_bak)\r\n # 导入master的配置文件\r\n shutil.copyfile(self.init_conf + 'master', self.salt_etc+'master')\r\n return True\r\n else:\r\n return False\r\n\r\n def grains_defined(self):\r\n # 导入自定义grains文件\r\n grains_path = '/srv/salt/_grains/'\r\n grains_file = '/srv/salt/_grains/grains_defined.py'\r\n if os.path.exists(grains_file):\r\n grains_bak = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f') + '_grains'\r\n os.rename(grains_file, grains_path + grains_bak)\r\n shutil.copyfile(self.init_conf + 'grains_defined.py', '/srv/salt/_grains/grains_defined.py')\r\n\r\n result = client.LocalClient().cmd_async('*', 'saltutil.sync_all', [])\r\n #result = client.LocalClient().cmd(minion_id, 'grains.item', ['md_op_linux_beijing_opcenter-slave','md_op_linux_shanghai_opcenter-slave','md_op_linux_qingdao_opcenter-slave','md_op_linux_shenzhen_opcenter-slave'])\r\n return result\r\n\r\nclass PlayBook_manage():\r\n def __init__(self):\r\n # 配置文件导入目录 sls_conf\r\n self.init_conf = '%s/saltstack/init_conf/' % BASE_DIR\r\n self.state_sls = '%s/saltstack/state_sls/' % BASE_DIR\r\n self.salt_etc = '/etc/salt/'\r\n self.srv_salt = '/srv/salt/'\r\n\r\n # 可执行的剧本\r\n def sls_list(self):\r\n sls_all = PlayBook.objects.all().values()\r\n for sls_items in sls_all:\r\n if not os.path.exists(sls_items['applied_file']):\r\n PlayBook.objects.filter(id=sls_items['id']).update(status=0)\r\n sls_list = PlayBook.objects.filter(status=1).values()\r\n return sls_list\r\n\r\n\r\n def file_upload(self,file_obj):\r\n file_info = file_obj['file_context'].chunks().__next__()\r\n try:\r\n project = file_info.decode().splitlines()[0][2:]\r\n description = file_info.decode().splitlines()[1][2:]\r\n except Exception as error:\r\n EXCEPT_DATA['data'] = '必须是utf-8编码:{} '.format(file_obj['file_context'].name)+ str(error)[0:70]\r\n return EXCEPT_DATA\r\n project_obj = Project.objects.filter(name=project).first()\r\n if project_obj is None :\r\n EXCEPT_DATA['data'] = '不存在的分组:{} '.format(str(project)[0:52])\r\n return EXCEPT_DATA\r\n applied_file = os.path.join(self.srv_salt,project_obj.name,file_obj['file_context'].name)\r\n os.makedirs(os.path.dirname(applied_file)) if not os.path.exists(os.path.dirname(applied_file)) else None\r\n applied_file_count = PlayBook.objects.filter(applied_file=applied_file).count()\r\n if applied_file_count != 0 :\r\n # 返回结果\r\n EXCEPT_DATA['data'] = '文件名已存在:{} '.format(file_obj['file_context'].name)\r\n return EXCEPT_DATA\r\n else:\r\n # 保存为文件\r\n with open(applied_file, 'wb') as f:\r\n for chunk in file_obj['file_context'].chunks():\r\n f.write(chunk)\r\n # 保存到数据库\r\n # 剧本名\r\n sls = applied_file[9:-4] if applied_file[-4:] == '.sls' else '%s文件' % applied_file.split('.')[-1]\r\n PlayBook.objects.create(project_id=project_obj.id,description=description,applied_file=applied_file,sls=sls)\r\n SUCCESS_DATA['data'] = '上传成功'\r\n return SUCCESS_DATA\r\n\r\n # 查看和编辑\r\n def save(self, playbook_path,playbook_context):\r\n try:\r\n with open(playbook_path, 'w') as f:\r\n f.write(playbook_context)\r\n return True\r\n except Exception as error:\r\n return error\r\n\r\n def delete(self,playbook_path):\r\n try:\r\n # 删除文件(移动到回收目录)\r\n playbook_file = os.path.basename(playbook_path)\r\n recycling = '/srv/salt/Recycling/'\r\n os.makedirs(recycling) if not os.path.exists(recycling) else None\r\n shutil.move(playbook_path,recycling+playbook_file)\r\n # 删除数据库记录\r\n PlayBook.objects.filter(applied_file=playbook_path).delete()\r\n return True\r\n except Exception as error:\r\n return error\r\n\r\n# 执行剧本\r\nclass Minion_state(object):\r\n def __init__(self):\r\n self.client = client.LocalClient()\r\n # 执行剧本\r\n def exe_sls(self,number,minion_id_list, playbook_id):\r\n # 异步执行���态,minion_list是一个列表,playbook是一个字符串\r\n try:\r\n playbook = PlayBook.objects.get(id=playbook_id)\r\n except Exception as error:\r\n return (error, number)\r\n minion_dict_values= Accepted_minion.objects.in_bulk(minion_id_list).values()\r\n minion_list = []\r\n for minion_id in minion_dict_values:\r\n minion_list.append(str(minion_id))\r\n if len(minion_id_list) == len(minion_list):\r\n # 异步执行状态,minion_list是一个id列表,playbook.sls是一个剧本字符串\r\n jid = self.client.cmd_async(minion_list, 'state.sls', [playbook.sls],tgt_type='list')\r\n # 更新jid到数据库\r\n start_time = datetime.datetime.fromtimestamp(time.time())\r\n Async_jobs.objects.filter(number=number).update(jid=jid, start_time=start_time, status=1)\r\n return (int(jid),number)\r\n else:\r\n for salt_id in minion_id_list:\r\n if Accepted_minion.objects.filter(salt_id=salt_id).values():\r\n continue\r\n else:\r\n return (int(salt_id),number)\r\n else:\r\n return (0, number)\r\n # 保存执行结果\r\n def save_sls(self,number,information):\r\n finish_time = datetime.datetime.fromtimestamp(time.time())\r\n if 'ERROR' in information:\r\n Async_jobs.objects.filter(number=number).update(finish_time=finish_time, status=3,success_total=0,information=json.dumps(information))\r\n else:\r\n format_info = PlayBookResponse(information)\r\n try:\r\n Async_jobs.objects.filter(number=number).update(finish_time=finish_time, status=2, success_total=format_info.all['success'],information=format_info.all)\r\n except Exception:\r\n Async_jobs.objects.filter(number=number).update(finish_time=finish_time, status=2, success_total=0, information=format_info.all)\r\n","repo_name":"yin6516008/OPcenter","sub_path":"saltstack/salt_manage.py","file_name":"salt_manage.py","file_ext":"py","file_size_in_byte":11401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32955359470","text":"from datetime import timedelta\nfrom functools import cached_property\n\nfrom PySide2.QtCore import Qt, Signal\nfrom PySide2.QtGui import QPen, QTransform\nfrom PySide2.QtWidgets import QGraphicsDropShadowEffect, QGraphicsSceneMouseEvent\n\nfrom LevityDash.lib.ui.frontends.PySide.Modules.Handles.Incrementer import IncrementerGroup, Incrementer\n\nfrom LevityDash.lib.utils.data import TimeFrameWindow\nfrom LevityDash.lib.ui.Geometry import LocationFlag\nfrom LevityDash.lib.ui.frontends.PySide.utils import colorPalette\n\n__all__ = [\"GraphZoom\", \"TimeframeIncrementer\"]\n\n\"\"\"\nAdjusts the scope of a graph\nEvery Item added to a graph must connect to the action signal and have a slot that accepts an Axis object\nFigureRects, Plots, and PeakTroughLists are all connected to this signal\n\"\"\"\n\n\nclass TimeframeIncrementer(Incrementer):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(TimeframeIncrementer, self).__init__(*args, **kwargs)\n\t\tself.setVisible(True)\n\t\tself.setEnabled(True)\n\n\tdef toolTip(self) -> str:\n\t\tif self.location.isLeft:\n\t\t\treturn \"Zoom out\"\n\t\telse:\n\t\t\treturn \"Zoom in\"\n\n\tdef hoverMoveEvent(self, event) -> None:\n\t\tpos = event.pos()\n\t\tif pos.x() < 1 or pos.y() < 1:\n\t\t\tself.setToolTip(self.toolTip())\n\t\tsuper(TimeframeIncrementer, self).hoverMoveEvent(event)\n\n\tdef increase(self):\n\t\tacceptedButtons = self.acceptedMouseButtons()\n\t\tself.setAcceptedMouseButtons(Qt.NoButton)\n\t\tself.parent.timeframe.decrease(self.parent.incrementValue)\n\t\tsuper(TimeframeIncrementer, self).increase()\n\t\tself.ensureFramed()\n\t\tself.setAcceptedMouseButtons(acceptedButtons)\n\n\tdef decrease(self):\n\t\tacceptedButtons = self.acceptedMouseButtons()\n\t\tself.setAcceptedMouseButtons(Qt.NoButton)\n\t\tself.parent.timeframe.increase(self.parent.incrementValue)\n\t\tsuper(TimeframeIncrementer, self).decrease()\n\t\tself.ensureFramed()\n\t\tself.setAcceptedMouseButtons(acceptedButtons)\n\n\tdef ensureFramed(self):\n\t\treturn\n\t\tgraph = self.surface\n\t\ttd = max((f.figureTimeRangeMax for f in graph.figures), default=graph.timeframe.range)\n\t\tr = td.total_seconds()/3600*graph.pixelsPerHour\n\t\tif r < graph.width():\n\t\t\tgraph.timeframe.range = td\n\n\t@cached_property\n\tdef _shape(self):\n\t\ts = super(TimeframeIncrementer, self)._shape\n\t\tT = QTransform()\n\t\tT.scale(1.3, 1.3)\n\t\tS = T.map(s)\n\t\tcenterDiff = s.boundingRect().center() - S.boundingRect().center()\n\t\tS.translate(centerDiff)\n\t\treturn S\n\n\t@property\n\tdef position(self):\n\t\treturn super(TimeframeIncrementer, self).position\n\n\tdef paint(self, painter, option, widget):\n\t\tpainter.setPen(QPen(Qt.black, 10))\n\t\tpainter.drawPath(self._path)\n\t\tsuper(TimeframeIncrementer, self).paint(painter, option, widget)\n\n\nclass GraphZoom(IncrementerGroup):\n\thandleClass = TimeframeIncrementer\n\toffset = -60\n\tlocations = [LocationFlag.BottomLeft, LocationFlag.BottomRight]\n\n\tdef __init__(self, parent: 'Panel', timeframe: TimeFrameWindow, *args, **kwargs):\n\t\tself.timeframe = timeframe\n\t\tsuper(GraphZoom, self).__init__(parent=parent, offset=-60, *args, **kwargs)\n\t\tself.setVisible(True)\n\t\tself.setEnabled(True)\n\n\t@property\n\tdef incrementValue(self) -> int:\n\t\tdays = self.timeframe.rangeSeconds/60/60/24\n\t\tif days < 1:\n\t\t\treturn timedelta(hours=3)\n\t\telif days < 2:\n\t\t\treturn timedelta(hours=6)\n\t\telif days < 5:\n\t\t\treturn timedelta(hours=12)\n\t\telif days < 10:\n\t\t\treturn timedelta(days=1)\n\t\thour = self.timeframe.rangeSeconds/60/60\n\t\tif hour < 24:\n\t\t\treturn timedelta(days=1)\n\t\telif hour < 18:\n\t\t\treturn timedelta(hours=1)\n\t\telif hour < 12:\n\t\t\treturn timedelta(minutes=30)\n\t\telif hour < 6:\n\t\t\treturn timedelta(minutes=15)\n\t\telse:\n\t\t\treturn timedelta(minutes=5)\n","repo_name":"noblecloud/LevityDash","sub_path":"src/LevityDash/lib/ui/frontends/PySide/Modules/Handles/Timeframe.py","file_name":"Timeframe.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"41589864141","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.hashers import make_password\nfrom django.shortcuts import render, redirect\nfrom django.views.generic.base import View\nfrom .models import UserProfile\n\nfrom .forms import LoginForm, RegisterForm, UserInfoForm\n\n\ntitle='学习分析在线服务系统'\nphoneNumber='123'\nclass LoginView(View):\n def get(self,request):\n login_form=LoginForm()\n return render(request,'login.html',{'login_form':login_form,'title':title,'phoneNumber':phoneNumber})\n def post(self,request):\n login_form=LoginForm(request.POST)\n if login_form.is_valid():\n user_name=request.POST.get('username','')\n user_password=request.POST.get('password','')\n user=authenticate(username=user_name,password=user_password)\n if user is not None:\n login(request,user)\n return render(request, \"index.html\")\n else:\n return render(request, \"login.html\", {\"msg\": \"用户名或密码错误\", \"login_form\": login_form})\n else:\n return render(request, \"login.html\", {\"login_form\": login_form})\nclass LogoutView(View):\n def get(self, request):\n logout(request)\n return render(request,'login.html',{\"msg\":\"您已经成功退出登录状态\" })\nclass RegisterView(View):\n def get(self,request):\n register_form=RegisterForm()\n return render(request, \"register.html\", {\"register_form\": register_form, \"title\": title})\n def post(self,request):\n register_form=RegisterForm(request.POST)\n if request.method=='POST':\n if register_form.is_valid():\n user_name=request.POST.get('username','')\n if UserProfile.objects.filter(username=user_name):\n return render(request,'register.html',{'title':title,'register_form':register_form,'msg':'该账号已经被注册'})\n user_password = request.POST.get(\"password\", \"\")\n user_nickname = request.POST.get(\"nickname\", \"\")\n user_danwei = request.POST.get(\"danwei\", \"\")\n user_email=request.POST.get('email','')\n user_profile = UserProfile()\n user_profile.username = user_name\n user_profile.nickname = user_nickname\n user_profile.danwei = user_danwei\n user_profile.email = user_email\n user_profile.is_active = True\n user_profile.password = make_password(user_password)\n user_profile.save()\n return render(request, \"register.html\", {\"title\": title, \"msg\": u\"注册成功\"})\n else:\n return render(request, \"register.html\",{\"register_form\":register_form, \"title\": title})\n else:\n form = UserInfoForm(\n initial={\n\n 'danwei': '一中',\n 'leibie': '管理员',\n }\n )\n return render(request, \"register.html\", {\"register_form\": form, \"title\": title})\ndef userviews(request):\n user_form=UserInfoForm(request.POST)\n user=UserProfile.objects.get(username=request.user)\n if request.method=='POST':\n if user_form.is_valid():\n nick_name=user_form.cleaned_data['nick_name']\n birthday=user_form.cleaned_data['birthday']\n gender=user_form.cleaned_data['gender']\n leibie=user_form.cleaned_data['leibie']\n danwei = user_form.cleaned_data['danwei']\n nianji = user_form.cleaned_data['nianji']\n user.nickname = nick_name\n user.birthday = birthday\n user.gender = gender\n user.leibie = leibie\n user.danwei = danwei\n user.nianji = nianji\n user.save()\n message = '修改成功'\n return redirect('/user_center/',{'message':message})\n else:\n message = '修改失败'\n user_form = UserInfoForm()\n return render(request, 'usercenter-info.html',{'Edit_FormInput':user_form,'message':message})\n else:\n nick_name = user.nickname\n birthday = user.birthday\n gender = user.gender\n leibie = user.leibie\n danwei = user.danwei\n nianji = user.nianji\n form = UserInfoForm(\n initial={\n\t 'nick_name' : nick_name,\n\t 'birthday' : birthday,\n\t 'gender': gender,\n\t 'leibie': leibie,\n\t 'danwei': danwei,\n\t 'nianji': nianji,\n }\n )\n return render(request, 'usercenter-info.html', {'Edit_FormInput':form})\n\ndef index(request):\n userinfo = UserProfile.objects.all()\n return render(request, 'index.html', locals())\n\n","repo_name":"newwuchaoping/test2","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6241257305","text":"#!/usr/bin/env python3\nimport unittest\nfrom unittest.mock import patch\n\nfrom generator import ReverseShellGenerator\nfrom netifaces import AF_INET\nfrom termcolor import colored\n\n\n\nclass TestReverseShellGenerator(unittest.TestCase):\n\n def setUp(self):\n self.generator = ReverseShellGenerator()\n\n @patch('builtins.print')\n @patch('reverseshellgenerator.generator.ni.ifaddresses')\n @patch('reverseshellgenerator.generator.ni.interfaces')\n def test_do_ip_interface(self, mock_interfaces, mock_ifaddresses, mock_print):\n # mock network interface and corresponding IP address\n mock_interfaces.return_value = ['eth0']\n mock_ifaddresses.return_value = {AF_INET: [{'addr': '192.168.0.1'}]}\n\n self.generator.do_ip('eth0')\n mock_print.assert_called_with(colored(\"The IP address has been set as '192.168.0.1'\", 'green'))\n self.assertEqual(self.generator.ip, '192.168.0.1')\n\n @patch('builtins.print')\n @patch('reverseshellgenerator.generator.ni.interfaces')\n def test_do_ip_address(self, mock_interfaces, mock_print):\n # mock no network interface\n mock_interfaces.return_value = []\n\n self.generator.do_ip('192.168.0.1')\n mock_print.assert_called_with(colored(\"The IP address has been set as '192.168.0.1'\", 'green'))\n self.assertEqual(self.generator.ip, '192.168.0.1')\n\n @patch('builtins.print')\n @patch('reverseshellgenerator.generator.ni.interfaces')\n def test_do_ip_invalid(self, mock_interfaces, mock_print):\n # mock no network interface\n mock_interfaces.return_value = []\n\n self.generator.do_ip('999.999.999.999')\n mock_print.assert_called_with(colored(\"'999.999.999.999' does not appear to be an IPv4 or IPv6 address\", 'red'))\n self.assertIsNone(self.generator.ip)\n\n @patch('builtins.print')\n def test_do_ip_empty(self, mock_print):\n self.generator.do_ip('')\n mock_print.assert_called_with(colored('The IP address must not be empty', 'red'))\n self.assertIsNone(self.generator.ip)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"totekuh/reverse-shell-generator","sub_path":"src/tests/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"17792701221","text":"import torch.nn as nn\nfrom khrylib.rl.core.distributions import DiagGaussian\nfrom khrylib.rl.core.policy import Policy\nfrom khrylib.rl.core.running_norm import RunningNorm\nfrom khrylib.models.mlp import MLP\nfrom khrylib.utils.math import *\nfrom design_opt.models.utils import *\nfrom design_opt.models.pos_enc import PositionalEncoding\n\n\nclass PolicyTransformer(Policy):\n def __init__(self, cfg, agent):\n super().__init__()\n self.type = 'gaussian'\n self.cfg = cfg\n self.agent = agent\n self.attr_obs_dim = agent.attr_obs_dim\n self.sim_obs_dim = agent.sim_obs_dim\n self.design_obs_dim = agent.design_obs_dim\n self.control_state_dim = self.attr_obs_dim + self.sim_obs_dim + self.design_obs_dim\n self.design_state_dim = self.attr_obs_dim + self.design_obs_dim\n self.control_action_dim = agent.control_action_dim\n self.model_action_dim = agent.design_obs_dim\n self.action_dim = self.control_action_dim + self.model_action_dim\n\n \"\"\" control \"\"\"\n self.control_norm = RunningNorm(self.control_state_dim) if cfg.get('control_norm', True) else None\n cur_dim = self.control_state_dim\n # transformer\n tf_cfg = cfg['control_transformer']\n self.control_pos_enc = PositionalEncoding(tf_cfg['model_dim'], cur_dim, enc_type='original')\n tf_layers = nn.TransformerEncoderLayer(tf_cfg['model_dim'], tf_cfg['nhead'], tf_cfg['ff_dim'], tf_cfg['dropout'])\n self.control_tf = nn.TransformerEncoder(tf_layers, tf_cfg['nlayer'])\n cur_dim = tf_cfg['model_dim']\n # dist\n self.control_action_mean = nn.Linear(cur_dim, self.control_action_dim)\n self.control_action_log_std = nn.Parameter(torch.ones(1, self.control_action_dim) * cfg['control_log_std'], requires_grad=not cfg['fix_control_std'])\n init_fc_weights(self.control_action_mean)\n\n \"\"\" design \"\"\"\n self.model_norm = RunningNorm(self.design_state_dim) if cfg.get('model_norm', True) else None\n cur_dim = self.design_state_dim\n # transformer\n tf_cfg = cfg['model_transformer']\n self.model_pos_enc = PositionalEncoding(tf_cfg['model_dim'], cur_dim, enc_type='original')\n tf_layers = nn.TransformerEncoderLayer(tf_cfg['model_dim'], tf_cfg['nhead'], tf_cfg['ff_dim'], tf_cfg['dropout'])\n self.model_tf = nn.TransformerEncoder(tf_layers, tf_cfg['nlayer'])\n cur_dim = tf_cfg['model_dim']\n # dist\n self.model_action_mean = nn.Linear(cur_dim, self.model_action_dim)\n self.model_action_log_std = nn.Parameter(torch.ones(1, self.model_action_dim) * cfg['model_log_std'], requires_grad=not cfg['fix_model_std'])\n init_fc_weights(self.model_action_mean)\n\n def batch_data(self, x):\n obs, edges, use_design_action, num_nodes = zip(*x)\n obs = torch.cat(obs)\n use_design_action = np.concatenate(use_design_action)\n num_nodes = np.concatenate(num_nodes)\n edges_new = torch.cat(edges, dim=1)\n num_nodes_cum = np.cumsum(num_nodes)\n return obs, edges_new, use_design_action, num_nodes, num_nodes_cum\n\n def forward(self, x):\n # attr_obs, sim_obs = torch.split(obs, [self.attr_obs_dim, self.sim_obs_dim])\n control_x, design_x = [], []\n node_design_mask = []\n design_mask = []\n total_num_nodes = 0\n for i, x_i in enumerate(x):\n num = x_i[-1].item()\n is_design = x_i[-2].item() == 1\n (control_x, design_x)[is_design].append(x_i)\n node_design_mask += [is_design] * num\n design_mask.append(is_design)\n total_num_nodes += num\n node_design_mask = torch.BoolTensor(node_design_mask)\n design_mask = torch.BoolTensor(design_mask)\n # control\n if len(control_x) > 0:\n obs, edges, use_design_action, num_nodes, num_nodes_cum_control = self.batch_data(control_x)\n x = obs\n if self.control_norm is not None:\n x = self.control_norm(x)\n \n n = int(num_nodes.mean())\n x = x.view(-1, n, x.shape[-1]).transpose(0, 1).contiguous()\n x = self.control_pos_enc(x)\n x = self.control_tf(x)\n x = x.transpose(0, 1).reshape(-1, x.shape[-1])\n\n control_action_mean = self.control_action_mean(x)\n control_action_std = self.control_action_log_std.expand_as(control_action_mean).exp()\n control_dist = DiagGaussian(control_action_mean, control_action_std)\n else:\n num_nodes_cum_control = None\n control_dist = None\n \n # design\n if len(design_x) > 0:\n obs, edges, use_design_action, num_nodes, num_nodes_cum_design = self.batch_data(design_x)\n obs = torch.cat((obs[:, :self.attr_obs_dim], obs[:, -self.design_obs_dim:]), dim=-1)\n x = obs\n if self.model_norm is not None:\n x = self.model_norm(x)\n \n n = int(num_nodes.mean())\n x = x.view(-1, n, x.shape[-1]).transpose(0, 1).contiguous()\n x = self.model_pos_enc(x)\n x = self.model_tf(x)\n x = x.transpose(0, 1).reshape(-1, x.shape[-1])\n\n model_action_mean = self.model_action_mean(x)\n model_action_std = self.model_action_log_std.expand_as(model_action_mean).exp()\n model_dist = DiagGaussian(model_action_mean, model_action_std)\n else:\n num_nodes_cum_design = None\n model_dist = None\n return control_dist, model_dist, node_design_mask, design_mask, total_num_nodes, num_nodes_cum_control, num_nodes_cum_design, x[0][0].device\n\n def select_action(self, x, mean_action=False):\n \n control_dist, model_dist, node_design_mask, _, total_num_nodes, _, _, device = self.forward(x)\n if control_dist is not None:\n control_action = control_dist.mean_sample() if mean_action else control_dist.sample()\n else:\n control_action = None\n\n if model_dist is not None:\n model_action = model_dist.mean_sample() if mean_action else model_dist.sample()\n else:\n model_action = None\n\n action = torch.zeros(total_num_nodes, self.action_dim).to(device)\n if control_action is not None:\n action[~node_design_mask, :self.control_action_dim] = control_action\n if model_action is not None:\n action[node_design_mask, self.control_action_dim:] = model_action\n return action\n\n def get_log_prob(self, x, action):\n action = torch.cat(action)\n control_dist, model_dist, node_design_mask, design_mask, total_num_nodes, num_nodes_cum_control, num_nodes_cum_design, device = self.forward(x)\n action_log_prob = torch.zeros(design_mask.shape[0], 1).to(device)\n # control log prob\n if control_dist is not None:\n control_action = action[~node_design_mask, :self.control_action_dim]\n control_action_log_prob_nodes = control_dist.log_prob(control_action)\n control_action_log_prob_cum = torch.cumsum(control_action_log_prob_nodes, dim=0)\n control_action_log_prob_cum = control_action_log_prob_cum[torch.LongTensor(num_nodes_cum_control) - 1]\n control_action_log_prob = torch.cat([control_action_log_prob_cum[[0]], control_action_log_prob_cum[1:] - control_action_log_prob_cum[:-1]])\n action_log_prob[~design_mask] = control_action_log_prob\n # model log prob\n if model_dist is not None:\n model_action = action[node_design_mask, self.control_action_dim:]\n model_action_log_prob_nodes = model_dist.log_prob(model_action)\n model_action_log_prob_cum = torch.cumsum(model_action_log_prob_nodes, dim=0)\n model_action_log_prob_cum = model_action_log_prob_cum[torch.LongTensor(num_nodes_cum_design) - 1]\n model_action_log_prob = torch.cat([model_action_log_prob_cum[[0]], model_action_log_prob_cum[1:] - model_action_log_prob_cum[:-1]])\n action_log_prob[design_mask] = model_action_log_prob\n return action_log_prob\n\n\n","repo_name":"ZhengyiLuo/UniversalHumanoidControl","sub_path":"uhc/models/tf_policy.py","file_name":"tf_policy.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"16"} +{"seq_id":"71210961927","text":"# https://leetcode.com/problems/insert-delete-getrandom-o1/\n\nfrom collections import defaultdict\nimport random\n\n\nclass RandomizedSet:\n def __init__(self):\n self.dataMap = defaultdict(\n int\n ) # dictionary, aka map, aka hashtable, aka hashmap\n self.data = [] # list aka array\n\n def insert(self, val: int) -> bool:\n if val in self.dataMap:\n return False\n self.dataMap[val] = len(self.data)\n self.data.append(val)\n return True\n\n def remove(self, val: int) -> bool:\n if val not in self.dataMap:\n return False\n\n lastEle = self.data[-1]\n indexToRemove = self.dataMap[val]\n\n self.data[indexToRemove] = lastEle\n self.dataMap[lastEle] = indexToRemove\n self.data.pop()\n self.dataMap.pop(val)\n return True\n\n def getRandom(self) -> int:\n print(self.data)\n return random.choice(self.data)\n\n\ns = RandomizedSet()\nprint(s.insert(0))\nprint(s.insert(1))\nprint(s.remove(0))\nprint(s.insert(2))\nprint(s.remove(1))\nprint(s.getRandom())\n","repo_name":"codingpeasant/likou","sub_path":"py/InsertDeleteGetRandom.Py","file_name":"InsertDeleteGetRandom.Py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39692623277","text":"import scrapy\nimport json\nimport os\nfrom scrapy.spiders import Spider\nfrom scrapy.http import FormRequest\nfrom scrapy.http import Request\nfrom chainxy.items import ChainItem\nfrom lxml import etree\nfrom selenium import webdriver\nfrom lxml import html\nimport usaddress\n\nclass discountdrugmart(scrapy.Spider):\n\tname = 'discountdrugmart'\n\tdomain = ''\n\thistory = []\n\n\tdef start_requests(self):\n\t\tinit_url = 'https://discount-drugmart.com/our-store/store-locator/'\n\t\tyield scrapy.Request(url=init_url, callback=self.body) \n\n\tdef body(self, response):\n\t\tdata = response.body.split('var stores =')[1].strip().split('var stores_meta =')[0].strip()[:-1]\n\t\tstore_list = json.loads(data)\t\n\t\tfor store in store_list:\n\t\t\titem = ChainItem()\n\t\t\titem['store_name'] = 'Discount Drug Mart'\n\t\t\titem['store_number'] = self.validate(store['store'])\n\t\t\titem['address'] = self.validate(store['address'])\n\t\t\titem['city'] = self.validate(store['city'])\n\t\t\titem['state'] = self.validate(store['state'])\n\t\t\titem['zip_code'] = self.validate(store['zip'])\n\t\t\titem['country'] = 'United States'\n\t\t\titem['phone_number'] = self.validate(store['phone'])\n\t\t\titem['latitude'] = self.validate(store['latitude'])\n\t\t\titem['longitude'] = self.validate(store['longitude'])\n\t\t\titem['store_hours'] = 'Store Hours : ' + self.validate(store['store_hours']) + ' Pharmacy Hours : ' + self.validate(store['pharmacy_hours'])\n\t\t\tyield item\t\t\t\n\n\tdef validate(self, item):\n\t\ttry:\n\t\t\treturn item.strip()\n\t\texcept:\n\t\t\treturn ''\n\n\tdef eliminate_space(self, items):\n\t\ttmp = []\n\t\tfor item in items:\n\t\t\tif self.validate(item) != '':\n\t\t\t\ttmp.append(self.validate(item))\n\t\treturn tmp\n","repo_name":"coralisland-git/Alscrapy-store-locations","sub_path":"172700(BS10)/template/chainxy/spiders/discountdrugmart.py","file_name":"discountdrugmart.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"11810192329","text":"\r\nimport cv2\r\nimport numpy as np\r\n#import image\r\nimage = cv2.imread('alo/2.png')\r\ncv2.imshow('orig', image)\r\ncv2.waitKey(0)\r\n\r\nif image.shape[0] < 40:\r\n print(image.shape[0])\r\n image = cv2.resize(image, (image.shape[1] * 2, image.shape[0] * 2))\r\n\r\n#grayscale\r\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\ncv2.imshow('gray',gray)\r\ncv2.waitKey(0)\r\n\r\n#binary\r\nret,thresh = cv2.threshold(gray, 109, 255, cv2.THRESH_BINARY_INV)\r\ncv2.imshow('second',thresh)\r\ncv2.waitKey(0)\r\n\r\n\r\n\r\n#dilation\r\nkernel = np.ones((9,9), np.uint8)\r\nimg_dilation = cv2.dilate(thresh, kernel, iterations=1)\r\ncv2.imshow('dilated',img_dilation)\r\ncv2.waitKey(0)\r\nim2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\nprev = 0\r\n\r\nkernel = np.ones((5,2), np.uint8)\r\ni=1\r\nwhile prev != len(ctrs):\r\n prev = len(ctrs)\r\n img_dilation = cv2.dilate(img_dilation, kernel, iterations=-1)\r\n im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n print (prev, len(ctrs))\r\n cv2.imshow('dilated', img_dilation)\r\n cv2.waitKey(0)\r\n if i==1:\r\n i=2\r\n else:\r\n i=1\r\n\r\n#sort contours)\r\nsorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0], reverse=True)\r\n\r\nfor i, ctr in enumerate(sorted_ctrs):\r\n # Get bounding box\r\n x, y, w, h = cv2.boundingRect(ctr)\r\n\r\n # Getting ROI\r\n roi = image[y:y+h, x:x+w]\r\n\r\n # show ROI\r\n cv2.imshow('segment no:'+str(i),roi)\r\n cv2.rectangle(image,(x,y),( x + w, y + h ),(90,0,255),2)\r\n cv2.waitKey(0)\r\n cv2.imwrite(\"roi/\" + str(y) + \"-\" + str(y+h) + \".png\", roi)\r\n\r\ncv2.imshow('marked areas',image)\r\ncv2.waitKey(0)\r\n\r\ncv2.imwrite(\"alo.png\", image)\r\n","repo_name":"moranzargari/Handwriting-detection-recognition","sub_path":"lines detection/FindConturs.py","file_name":"FindConturs.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14086193259","text":"# 数据bug问题,亚洲数据\r\n# 中国疫情地区数据\r\n# 确诊人数,排序,从到高低\r\n# 改变成比较好理解的列名\r\n# top10国家sheet\r\nimport pandas as pd\r\n\r\n\r\ndef fix_issues_from_boss():\r\n # 读取亚洲的数据\r\n data = pd.read_excel('./excel_files/report.xlsx', sheet_name=None, engine='openpyxl')\r\n # 初始化新的报告文件\r\n writer = pd.ExcelWriter('./excel_files/report_fixed.xlsx', engine='openpyxl')\r\n # 依次拿到所有sheet的数据。\r\n for key, df in data.items():\r\n df = df.rename(columns={'confirmedCount': '累计确诊', 'curedCount': '治愈',\r\n 'currentConfirmedCount': '当前确诊', 'deadCount': '死亡'})\r\n if key == '亚洲':\r\n # 找到中国的地区数据\r\n df_china_details = df[(df['countryName'] == '中国') & (df['provinceName'] != '中国')]\r\n # 从亚洲数据里面,删除中国的地区数据\r\n df = df.drop(df_china_details.index)\r\n # 把中国地区数据存入excel\r\n df_china_details = df_china_details.sort_values(by='累计确诊', ascending=False)\r\n df_china_details.to_excel(writer, sheet_name='中国', index=False)\r\n\r\n # 保存进新的report, 按照累计确诊排序\r\n df = df.sort_values(by='累计确诊', ascending=False)\r\n df.to_excel(writer, sheet_name=key, index=False)\r\n\r\n writer.save()\r\n\r\n\r\ndef make_top10_country():\r\n # 读取所有的sheet\r\n all_df_dict = pd.read_excel('./excel_files/report_fixed.xlsx', sheet_name=None, engine='openpyxl')\r\n\r\n # 去掉不需要的sheet 汇总和中国\r\n del all_df_dict['汇总']\r\n del all_df_dict['中国']\r\n # 确保top10sheet是不存在\r\n if 'top10' in all_df_dict.keys():\r\n del all_df_dict['top10']\r\n # 整合数据\r\n all_df = pd.concat(all_df_dict, ignore_index=True)\r\n # 排序,找到top10的国家\r\n all_df = all_df.sort_values(by='累计确诊', ascending=False)\r\n top_10_df = all_df.head(10)\r\n # 保存回report_fixed\r\n writer = pd.ExcelWriter('./excel_files/report_fixed.xlsx', engine='openpyxl', mode='a')\r\n top_10_df.to_excel(writer, sheet_name='top10', index=False)\r\n writer.save()\r\n\r\n\r\nfix_issues_from_boss()\r\nmake_top10_country()\r\n","repo_name":"Akagilnc/python1.0","sub_path":"lesson6.py","file_name":"lesson6.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6539928095","text":"from concurrent.futures import ThreadPoolExecutor\nfrom datetime import date\nfrom datetime import datetime\nimport pandas as pd\nimport requests\nimport streamlit as st\nimport yfinance as yf\nfrom plotly import graph_objs as go\nfrom prophet import Prophet\nfrom prophet.diagnostics import cross_validation\nfrom prophet.diagnostics import performance_metrics\nfrom prophet.plot import plot_cross_validation_metric\nfrom prophet.plot import plot_plotly\n\ndiagnostics_run = False\npage_tabs = [\"**Forecast**\", \"**Recommendation**\", \"**Latest News**\", \"**Diagnostics**\"]\nst.set_page_config(page_title=\"Stock Sense Analytics\", layout=\"wide\")\nst.markdown('

Stock Sense Analytics

', unsafe_allow_html=True)\n\n\nclass ForecastStockPrice:\n\n def __init__(self):\n self.csv_file_path = 'ind_nifty500list.csv'\n self.START = datetime(2020, 1, 1).date()\n self.TODAY = date.today()\n\n @st.cache_data\n def get_stocks_from_csv_data(_self):\n \"\"\"\n returns a dictionary with stock_name:stock_symbol and a list with industry name\n :return:\n \"\"\"\n df = pd.read_csv(_self.csv_file_path, sep=',')\n stocks = dict(zip(df['Company Name'], df['Symbol'].astype(str) + '.NS'))\n return stocks\n\n def return_ticker_and_data(self, stocks):\n \"\"\"\n\n :param stocks:\n :return:\n \"\"\"\n stock_name = tuple(stocks.keys())\n selected_stock = st.sidebar.selectbox(\"**Select stock**\", stock_name)\n ticker = stocks[selected_stock]\n start_date = st.sidebar.date_input(\"**Start Date**\", self.START)\n end_date = st.sidebar.date_input(\"**End Date**\", self.TODAY)\n return ticker, start_date, end_date\n\n @staticmethod\n @st.cache_data\n def fetch_stock_news(ticker):\n \"\"\"\n\n :return:\n \"\"\"\n r = requests.get(f\"https://api.stocktwits.com/api/2/streams/symbol/{ticker}.json\")\n data = r.json()\n ticker_symbol_name = ticker.split(\".\")\n st.subheader(f\"Latest news for {ticker_symbol_name[0]}\")\n for message in data['messages']:\n st.image(message['user']['avatar_url'])\n st.write(message['user']['username'])\n st.write(message['created_at'])\n st.write(message['body'])\n st.write(\"---\")\n\n @staticmethod\n @st.cache_resource\n def show_ticker_data(ticker, start_date, end_date):\n \"\"\"\n\n :param ticker:\n :param start_date:\n :param end_date:\n :return:\n \"\"\"\n # create ticker object\n tickerData = yf.Ticker(ticker)\n historical_data = tickerData.history(start=start_date, end=end_date)\n historical_data = historical_data.reset_index()\n historical_data['Date'] = historical_data['Date'].dt.tz_localize(None)\n\n # st.write(tickerData.info)\n string_name = tickerData.info['longName']\n st.header(f'**{string_name}**')\n\n string_rec = tickerData.info['recommendationKey']\n st.info(f\"Recommendation: {string_rec}\")\n\n # Ticker Data\n st.subheader('Ticker data')\n historical_data = historical_data.rename_axis('S.No.')\n st.dataframe(historical_data.tail(), use_container_width=True)\n return historical_data\n\n @staticmethod\n @st.cache_data\n def plot_raw_data(historical_data):\n \"\"\"\n\n :return:\n \"\"\"\n # Create the candlestick chart figure\n fig = go.Figure(data=go.Candlestick(x=historical_data['Date'],\n open=historical_data['Open'],\n high=historical_data['High'],\n low=historical_data['Low'],\n close=historical_data['Close']))\n # Customize the layout\n fig.update_layout(\n title=\"Price Action Graph\",\n xaxis_title=\"Date\",\n yaxis_title=\"Price\"\n )\n # Render the chart in Streamlit\n st.plotly_chart(fig, use_container_width=True)\n\n @staticmethod\n def outlier_data(ticker):\n lockdowns = pd.DataFrame([\n {'holiday': 'lockdown_1', 'ds': '2020-03-21', 'lower_window': 0, 'ds_upper': '2020-06-06'},\n {'holiday': 'lockdown_2', 'ds': '2020-07-09', 'lower_window': 0, 'ds_upper': '2020-10-27'},\n {'holiday': 'lockdown_3', 'ds': '2021-02-13', 'lower_window': 0, 'ds_upper': '2021-02-17'},\n {'holiday': 'lockdown_4', 'ds': '2021-05-28', 'lower_window': 0, 'ds_upper': '2021-06-10'},\n {'holiday': 'war_start', 'ds': '2022-02-23', 'lower_window': 0, 'ds_upper': '2022-03-09'},\n ])\n if 'ADANI' in ticker:\n lockdown_adani = {'holiday': 'Hindunburg', 'ds': '2023-01-25', 'lower_window': 0, 'ds_upper': '2023-05-22'}\n new_row = pd.DataFrame([lockdown_adani])\n lockdowns = pd.concat([lockdowns, new_row], ignore_index=True)\n\n lockdowns[['ds', 'ds_upper']] = lockdowns[['ds', 'ds_upper']].apply(pd.to_datetime)\n lockdowns['upper_window'] = (lockdowns['ds_upper'] - lockdowns['ds']).dt.days\n return lockdowns\n\n @staticmethod\n def get_data_to_predict_future(historical_data, ticker):\n # lockdowns = ForecastStockPrice.outlier_data()\n\n st.subheader(\"Forecast Stock Price\")\n df_train = historical_data[['Date', 'Close']].rename(columns={\"Date\": \"ds\", \"Close\": \"y\"})\n\n st.subheader(\"Model Forecast Parameters\")\n col01, col11, col12 = st.columns(3)\n n_days = col01.slider(\"**Days of prediction**\", 1, 365, 180)\n changepoint_prior_scale = col11.slider(\"**Changepoint Prior Scale**\", 0.05, 0.5, 0.2)\n changepoint_range = col12.slider(\"**Changepoint Range**\", 0.8, 0.95, 0.95)\n return changepoint_range, changepoint_prior_scale, df_train, n_days\n\n @staticmethod\n @st.cache_data\n def predict_future(changepoint_range, changepoint_prior_scale, lockdowns, df_train, n_days):\n \"\"\"\n\n :param changepoint_range:\n :param changepoint_prior_scale:\n :param lockdowns:\n :param df_train:\n :param n_days:\n :return:\n \"\"\"\n with st.spinner(\"Future Share Price prediction in progress...\"):\n stock_mod = Prophet(changepoint_range=changepoint_range,\n changepoint_prior_scale=changepoint_prior_scale,\n holidays=lockdowns)\n stock_mod.fit(df_train)\n future = stock_mod.make_future_dataframe(periods=n_days)\n forecast = stock_mod.predict(future)\n return forecast, stock_mod\n\n @staticmethod\n @st.cache_data\n def plot_forcast_data(forecast_var, _stock_mod):\n st.subheader('Forecast data')\n forecast_var.index.name = 'S.No.'\n with st.spinner(\"Forcast data in progress...\"):\n st.dataframe(\n forecast_var[['ds', 'yhat', 'yhat_lower', 'yhat_upper', 'trend', 'trend_lower',\n 'trend_upper']].tail().style.set_properties(**{'text-align': 'center'}),\n use_container_width=True)\n st.success(\"Forcast data is fetched successfully!\")\n\n st.subheader('Forecast graph')\n with st.spinner(\"Plotting forcast graph...\"):\n fig1 = plot_plotly(_stock_mod, forecast_var, changepoints=True)\n st.plotly_chart(fig1, use_container_width=True)\n st.success(\"Forcast data is plotted successfully!\")\n\n st.subheader(\"Forecast components\")\n with st.spinner(\"Plotting forecast components...\"):\n fig2 = _stock_mod.plot_components(forecast_var)\n st.write(fig2)\n st.success(\"Forcast components are fetched successfully!\")\n\n def run_forecast(self):\n st.title('Forecast')\n stocks = self.get_stocks_from_csv_data()\n ticker, start_date, end_date = self.return_ticker_and_data(stocks)\n historical = ForecastStockPrice.show_ticker_data(ticker, start_date, end_date)\n ForecastStockPrice.plot_raw_data(historical)\n lockdowns = ForecastStockPrice.outlier_data(ticker)\n changepoint_range, changepoint_prior_scale, df_train, n_days = ForecastStockPrice.get_data_to_predict_future(\n historical, ticker)\n forecast, stock_mod = ForecastStockPrice.predict_future(changepoint_range, changepoint_prior_scale,\n lockdowns,\n df_train, n_days)\n ForecastStockPrice.plot_forcast_data(forecast, stock_mod)\n return ticker, stock_mod, historical\n\n @staticmethod\n @st.cache_data\n def run_diagnostics(initial_training_period, change_period, forecast_horizon, _stock_mod):\n \"\"\"\n\n :param initial_training_period:\n :param change_period:\n :param forecast_horizon:\n :param _stock_mod:\n :return:\n \"\"\"\n st.write(\"Cross Validation\")\n df_cv = cross_validation(_stock_mod, initial=f\"{initial_training_period} days\",\n period=f\"{change_period} days\",\n horizon=f\"{forecast_horizon} days\")\n df_cv.index.name = 'S.No.'\n st.dataframe(df_cv.head().style.set_properties(**{'text-align': 'center'}), use_container_width=True)\n\n st.write(\"Performance Metrics\")\n df_p = performance_metrics(df_cv)\n df_p['horizon'] = df_p['horizon'].astype(str)\n df_p.index.name = 'S.No'\n st.dataframe(df_p.head().style.set_properties(**{'text-align': 'center'}), use_container_width=True)\n\n st.write(\"Plot Cross Validation Metrics\")\n fig3 = plot_cross_validation_metric(df_cv, metric='mape')\n st.write(fig3)\n\n def diagnostics_data(self, stock_mod):\n st.subheader(\"Do You want to run Diagnostics?\")\n diagnostics_option = st.selectbox(\"**Choose an option**\", ('Yes', 'No'), key='diagnostics')\n\n if diagnostics_option == 'Yes':\n st.subheader(\"Diagnostics parameters\")\n col1, col2, col3 = st.columns(3)\n initial_training_period = col1.slider(\"**Initial Training Period**\", 100, 2500, 500)\n change_period = col2.slider(\"**Spacing Between Cutoff Dates**\", 25, 100, 50)\n forecast_horizon = col3.slider(\"**Forecast Horizon**\", 7, 365, 180)\n\n if st.button('Run Diagnostics'):\n with st.spinner(\"Backtesting is in progress...\"):\n self.run_diagnostics(initial_training_period, change_period, forecast_horizon, stock_mod)\n else:\n st.write(\"No Diagnostics to run.\")\n\n @staticmethod\n @st.cache_data\n def get_recommendation_key(symbol):\n try:\n financial_data = yf.Ticker(symbol).info\n recommendation_key = financial_data.get('recommendationKey', None)\n return recommendation_key\n except Exception as e:\n return str(e)\n\n @st.cache_data\n def get_recommendations(_self, stocks):\n # Use ThreadPoolExecutor to fetch recommendation keys for multiple symbols concurrently\n with ThreadPoolExecutor() as executor:\n recommendations = list(executor.map(_self.get_recommendation_key, stocks.values()))\n\n # Create a pandas DataFrame with symbols and their recommendations\n data = {\n 'Company Name': stocks.keys(),\n 'Recommendation': recommendations\n }\n df = pd.DataFrame(data)\n\n # Filter symbols with recommendation as 'Strong Buy' or 'Underperform'\n filtered_df = df[df['Recommendation'].isin(['strong_buy', 'underperform'])]\n return filtered_df\n\n def show_recommendations(_self):\n stocks = _self.get_stocks_from_csv_data()\n with st.spinner(\"Fetching recommendations...\"):\n recommendations = _self.get_recommendations(stocks)\n # Display the filtered symbols and their recommendations using st.dataframe\n if not recommendations.empty:\n st.dataframe(recommendations, use_container_width=True)\n else:\n st.write(\"No symbols with 'Strong Buy' or 'Underperform' recommendation available.\")\n\n def run_recommendations(self, df):\n st.title(\"Recommendation\")\n # if st.button(\"Recommendations Based On Analysts Recommendations\"):\n st.write(\"**Symbols with recommendation as 'Strong Buy' or 'Underperform'**\")\n self.show_recommendations()\n\n def run_app(self):\n tab1, tab2, tab3, tab4 = st.tabs(page_tabs)\n with tab1:\n ticker, stock_mod, df = self.run_forecast()\n with tab2:\n self.run_recommendations(df)\n with tab3:\n ForecastStockPrice.fetch_stock_news(f\"{ticker}E\")\n with tab4:\n self.diagnostics_data(stock_mod)\n\n\nif __name__ == \"__main__\":\n app = ForecastStockPrice()\n app.run_app()\n","repo_name":"anujteotia/streamlit_financial_dashboard","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37985594916","text":"armstrong = int(input());\n\n\narmlength = len(str(armstrong));\n\nbuffer = armstrong;\nsum =0;\nwhile (buffer > 0):\n digit = buffer % 10;\n sum += digit ** armlength;\n buffer //=10;\n\nif(sum == armstrong):\n print(\"yes\");\nelse:\n print(\"no\"); ","repo_name":"eragon11/codekata","sub_path":"Beginner/set2/armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14710079396","text":"import time\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom sklearn import linear_model\n\nfrom config import enable_plotter\n\n\nclass GraphAnalyser:\n def __init__(self, graph):\n self.graph = graph\n self._degree_sequence = None\n\n def print_stats(self):\n print(\"Graph stats:\")\n self._stats(self.graph)\n\n @staticmethod\n def _stats(g):\n print(\"nodes_cnt (rzad) = {nodes} edges_cnt (rozmiar) = {edges}\"\n .format(nodes=g.number_of_nodes(),\n edges=g.number_of_edges()))\n\n def connected_components(self):\n print(\"Connected components:\")\n start = time.time()\n print(\"total = {cnt}\".format(cnt=nx.number_connected_components(self.graph)))\n\n max_sub_graphs = max(nx.connected_component_subgraphs(self.graph), key=len)\n print(\"TIME:\", time.time() - start)\n self._stats(max_sub_graphs)\n\n def all_connected_components(self):\n print(\"All connected components:\")\n print(\"total = {cnt}\".format(cnt=nx.number_connected_components(self.graph)))\n\n sub_graphs = nx.connected_component_subgraphs(self.graph)\n for sg in sub_graphs:\n self._stats(sg)\n\n def degree_plot(self):\n print(\"Degree rank plot\")\n degree_sequence = sorted([degree for node, degree in self.graph.degree()])\n degree_cnt = dict(Counter(degree_sequence))\n degree_cnt = [(d, degree_cnt.get(d)) for d in degree_cnt.keys()]\n print(degree_cnt)\n\n plt.plot(*zip(*degree_cnt), \".\")\n plt.title(\"Degree distribution plot\")\n plt.xlabel(\"degree\")\n plt.ylabel(\"count\")\n plt.grid()\n plt.show()\n\n def degree_rank_log_regression(self):\n print(\"Rank plot\")\n degree_sequence = sorted([degree for node, degree in self.graph.degree()])\n degree_cnt = dict(Counter(degree_sequence))\n print(degree_cnt)\n\n xx = list(degree_cnt.keys())\n yy = list(degree_cnt.values())\n assert len(xx) == len(yy)\n\n xx, yy = self._log_binning(xx, yy, 20)\n\n s = np.sum(yy)\n cdf = np.cumsum(yy)\n ccdf = s - cdf\n yy = ccdf\n\n xx = np.array(xx)\n yy = np.array(yy) + 1\n samples_cnt = len(yy)\n\n x_log = np.log10(xx)\n x_log.shape = (samples_cnt, 1)\n\n y_log = np.log10(yy)\n y_log.shape = (samples_cnt, 1)\n\n model = linear_model.LinearRegression()\n model.fit(x_log, y_log)\n\n print('DEBUG coefficients: ', model.coef_)\n print('DEBUG intercept: ', model.intercept_)\n\n a = model.intercept_\n b = model.coef_[0][0]\n\n # minus because exponential function = C * exp(-alpha)\n alpha = -b\n print(\"alpha: \", alpha)\n\n if enable_plotter:\n plt.plot(x_log, y_log, \".\")\n\n x = [x_log[0], x_log[-1]]\n y = [xi * b + a for xi in x]\n plt.plot(x, y, \"r-\")\n\n plt.title(\"Degree rank plot\")\n plt.ylabel(\"ccdf\")\n plt.xlabel(\"rank\")\n plt.grid()\n plt.show()\n\n def _log_binning(self, xx, yy, bins_cnt):\n x_min = np.min(xx)\n x_max = np.max(xx)\n thresholds = np.logspace(np.log10(x_min), np.log10(x_max), bins_cnt)\n\n x_bin = list()\n y_bin = list()\n x_begin = 0\n x_end = 0\n thresholds[-1] += 1\n thresholds = thresholds[1:]\n for i in range(len(thresholds)):\n while x_end < len(xx) and xx[x_end] < thresholds[i]:\n x_end += 1\n if x_begin == x_end:\n continue\n x = np.mean(xx[x_begin:x_end])\n y = np.mean(yy[x_begin:x_end])\n\n x_bin.append(x)\n y_bin.append(y)\n\n x_begin = x_end\n return x_bin, y_bin\n\n def hill_plot(self):\n print(\"Hill plot\")\n\n k_alpha = list()\n degree_sequence = sorted([degree for node, degree in self.graph.degree()])\n degree_cnt = dict(Counter(degree_sequence))\n degree_cnt = sorted(list(degree_cnt.values()), reverse=True)\n print(degree_cnt)\n\n nodes_cnt = len(degree_cnt)\n for consider_cnt in range(2, nodes_cnt + 1):\n x = np.sum(np.log(degree_cnt[:consider_cnt]))\n gamma = x / consider_cnt - np.log(degree_cnt[consider_cnt - 1])\n alpha = 1.0 + 1.0 / gamma\n\n if consider_cnt % 25 == 0 and consider_cnt >= 100:\n print(\"alpha(k = {k}) = {alpha}\".format(k=consider_cnt,\n alpha=alpha))\n k_alpha.append(alpha)\n\n if enable_plotter:\n plt.plot(k_alpha, 'b')\n plt.title(\"Hill diagram\")\n plt.ylabel(\"alpha\")\n plt.xlabel(\"k\")\n plt.grid()\n plt.show()\n","repo_name":"unitatem/tass-analysis","sub_path":"src/graph_analyser.py","file_name":"graph_analyser.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10585822509","text":"#!/usr/bin/python\n# Reverse engineering phase 3\n\nimport sys\nimport random\n\n\nvalid_combinations = [\n ('q', 777),\n ('b', 214),\n ('b', 755),\n ('k', 251),\n ('o', 160),\n ('t', 458),\n ('v', 780),\n ('b', 524),\n]\n\n\ndef explode_bomb():\n print(\"\\nBOOM!!!\\nThe bomb exploded.\")\n\n \ndef phase_3(answer):\n # answer is a string %d %c %d\n answer = answer.split()\n if len(answer) < 3:\n explode_bomb()\n\n a = int(answer[0])\n b = answer[1]\n c = int(answer[2])\n\n if a > 7 or len(b) > 1:\n explode_bomb()\n\n # one way of implementing a switch statement..\n if valid_combinations[a] != (b, c):\n explode_bomb()\n\n print(\"Phase defused.\")\n\n \ndef solve():\n index = random.randint(0, len(valid_combinations) - 1)\n return \"{} {} {}\".format(index, *valid_combinations[index])\n\n\nif __name__ == \"__main__\":\n phase_3(solve())\n","repo_name":"johnsjc/reverse-eng","sub_path":"bomb/python/phase3.py","file_name":"phase3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19538573092","text":"from PIL import Image\r\nimport streamlit as st\r\nimport json\r\nimport own_functions\r\nimport pyaudio\r\nfrom pathlib import Path\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import signal\r\nfrom scipy.io import wavfile\r\nimport shutil\r\nimport os\r\nimport numpy as np\r\nimport librosa\r\nimport librosa.display\r\n\r\nimport serial # pip install pyserial, NOT serial!!!\r\nimport time\r\nimport multiprocessing\r\n\r\ndef G1_withprint(x_pos, y_pos, z_pos, speed):\r\n x_pos = round(x_pos,1)\r\n y_pos = round(y_pos,1)\r\n z_pos = round(z_pos,1)\r\n tmp_GCode = \"G1\"+\" \"+\"X\"+str(x_pos)+\" \"+\"Y\"+str(y_pos)+\" \"+\"Z\"+str(z_pos)+\" \"+\"F\"+str(speed)+ \"\\n\" #Position anfahren\r\n tmp_GCode = tmp_GCode + \"M400\" + \"\\n\" #Warten mit Ausführung des nächsten G-Codes bis Beendigung des vorigen\r\n tmp_GCode = tmp_GCode + \"M118\"+\" TX: \"+\"X\"+str(x_pos)+\" \"+\"Y\"+str(y_pos)+\" \"+\"Z\"+str(z_pos)+\" \"+ \"\\n\" #M118 für Json\r\n return tmp_GCode\r\n\r\ndef G1_withoutprint(x_pos, y_pos, z_pos, speed):\r\n x_pos = round(x_pos,1)\r\n y_pos = round(y_pos,1)\r\n z_pos = round(z_pos,1)\r\n tmp_GCode = \"G1\"+\" \"+\"X\"+str(x_pos)+\" \"+\"Y\"+str(y_pos)+\" \"+\"Z\"+str(z_pos)+\" \"+\"F\"+str(speed)+ \"\\n\" #Position anfahren\r\n tmp_GCode = tmp_GCode + \"M400\" + \"\\n\" #Warten mit Ausführung des nächsten G-Codes bis Beendigung des vorigen\r\n return tmp_GCode\r\n\r\n\r\n#####Worker=Timestampgenerator & Karl I Ansteuerung\r\ndef worker(temppath):\r\n temp = temppath #temporären Übergabepfad nutzen\r\n json_array = {} #json array to save important information\r\n startms = int(round(time.time() * 1000)) #start time of operation in ms #####MIGHT BE BETTER IN GCODE GENERATION\r\n lastms = startms\r\n\r\n #read G-Code from temporary file\r\n gcode_array_position = 0 #flag to store where we are in the gcode_array right now\r\n gcode_array = [] #file to array\r\n tempsave = os.path.join(temp, \"temporary_file_gcode.g\") \r\n with open(tempsave) as my_file:\r\n gcode_array = my_file.readlines()\r\n\r\n #read G-Code waiter from temporary file\r\n #prepare for \"ok\"s to expect from printer to stream gcode without error\r\n gcode_waiter_array_positon = 0 #flag to store where we are in the gcode_waiter_array right now\r\n gcode_waiter_array = [] #file to array\r\n temp_gcode_waiter_file = []\r\n tempsave = os.path.join(temp, \"temporary_file_gcode_waiter.g\") \r\n with open(tempsave) as my_file:\r\n temp_gcode_waiter_file = my_file.readlines()\r\n temp_gcode_waiter_file = temp_gcode_waiter_file[0]\r\n temp_gcode_waiter_file = temp_gcode_waiter_file.strip(\"[\")\r\n temp_gcode_waiter_file = temp_gcode_waiter_file.strip(\"]\")\r\n gcode_waiter_array = temp_gcode_waiter_file.split(\",\")\r\n\r\n #Open Serial\r\n port = \"COM3\" #Hardcoded port, change if other port used #COM4 X230 #COM3 Unirechner linker USB-Port\r\n ser = serial.Serial(port, 250000, timeout = 10) #start serial connection\r\n print(\"start of operation.\") #debug print\r\n print(\"serial port opened.\") #debug print\r\n time.sleep(2) #sleept to give time to initialize #do not delete or expect trouble\r\n\r\n ok_counter = 0 #count \"ok\" from demonstrator to not overflow serial buffer and check if action is executed\r\n new_gcode_flag = 1 #flag to signal new gcode should be transmitted\r\n timestampart = \"TSL\" #Konvention zur Benennung links: TSL rechts: TSR\r\n timstampcount = 1 #Counting timestamps starting with 1\r\n leseserial = 1 #if 1 keep reading\r\n\r\n while leseserial == 1:\r\n textreceived = ser.readline() #read serial output of demonstrator line by line\r\n print(\"demonstrator output: \" + str(textreceived)) #print the output to console\r\n with open(\"SerialData.txt\", \"a+\") as myfile: #open logging file\r\n myfile.write(str(textreceived)+\"\\n\") #write to logging file\r\n\r\n if b'SETTINGS' in textreceived: #receive the settings output\r\n splitters = textreceived.split() #split the string based on spaces\r\n json_array['feedrate'] = int(splitters[1][1:]) #save to json\r\n json_array['acceleration'] = int(splitters[2][1:]) #save to json\r\n json_array['turns'] = int(splitters[3][1:]) #save to json\r\n port1 = \"COM4\"\r\n ser1 = serial.Serial(port1, 115200, timeout = 10)\r\n time.sleep(2)\r\n print(\"opened Serial to Arduino.\")\r\n ser1.write(bytes(\"<1>\".encode('ascii'))) #Ventil auf und LED an\r\n ser1.close() #close Serial Port\r\n\r\n if b'ok' in textreceived: #okcounter to check if code was successfully transmitted and executed\r\n ok_counter = ok_counter + 1 #add 1 to okcounter\r\n if(int(gcode_waiter_array[int(gcode_waiter_array_positon)]) == ok_counter): #compare the \"ok\" received to the expected \"ok\"\r\n ok_counter = 0 #reset ok_counter for the next block\r\n gcode_waiter_array_positon = gcode_waiter_array_positon + 1 #next position in gcode_waiter_array_positon\r\n new_gcode_flag = 1 #set flag to start new gcode transmission\r\n else:\r\n continue #do nothing here if we have not reached the expected \"ok\" amount\r\n\r\n if(new_gcode_flag == 1):\r\n new_gcode_flag = 0\r\n with open(\"SerialData.txt\", \"a\") as myfile: #to make serial data more readable\r\n myfile.write(\"BLOCK NEU\"+\"\\n\")\r\n for a in range(int(gcode_waiter_array[int(gcode_waiter_array_positon)])): #send commands based on amount previously specified\r\n try:\r\n ser.write(bytes(gcode_array[gcode_array_position], 'utf-8'))\r\n gcode_array_position = gcode_array_position + 1 #next gcode\r\n except:\r\n print(\"A Error occured while trying to send data to demonstrator.\")\r\n continue\r\n \r\n if b'TX' in textreceived: #Timestamp generation\r\n now = int(round(time.time() * 1000)) #current time in ms\r\n tookms = now - lastms #calculate used ms between M118 with \"TX\"\r\n lastms = now #set lastms to current time\r\n print('time between last M118 with \"TX\": ' + str(tookms)) #print for comparision and debug to console\r\n if(timestampart == \"TSL\"): #timestamp on left side\r\n timestampname = timestampart + str(timstampcount) #create jsoneintrag name\r\n json_array[timestampname] = now -startms #create timestamp with time since start \r\n timestampart = \"TSR\" #change for next timestamp\r\n else: #timestamp on right side\r\n timestampname = timestampart + str(timstampcount) #create jsoneintrag name\r\n json_array[timestampname] = now - startms #create timestamp with time since start\r\n timstampcount = timstampcount + 1 #always one timestamp left then one right then next turn so we add +1\r\n timestampart = \"TSL\" #change for next timestamp\r\n\r\n\r\n if b'START' in textreceived: #START GCODE BAUSTEIN\r\n continue\r\n\r\n if b'END' in textreceived: #END GCODE M118 Baustein\r\n leseserial = 0 #end the while loop\r\n ser.close() #close the serial port\r\n print(\"end of operation.\")\r\n print(\"serial port closed.\")\r\n\r\n port1 = \"COM4\"\r\n ser1 = serial.Serial(port1, 115200, timeout = 10)\r\n time.sleep(2)\r\n print(\"opened Serial to Arduino.\")\r\n ser1.write(bytes(\"<0>\".encode('ascii'))) #Ventil aus und LED an\r\n ser1.close() #close Serial Port\r\n\r\n tempsave = os.path.join(temp, \"temporary_file_timestamp.json\") #create temporary path\r\n with open(tempsave, \"w+\") as outfile: #create file\r\n outfile.write(json.dumps(json_array)) #dump json file\r\n return #end this part of code\r\n#################\r\n\r\n# function main() that is run by Main.py:\r\ndef main():\r\n # selectable metadata regarding the testobject and testing hardware:\r\n loaded_objects = (\"Sandvik CNMG 12 04 08-MM 2025\", \"Mischkoerper\", \"other\")\r\n loaded_testing_rigs = (\"KARL 1\",)\r\n loaded_nozzles = (\r\n \"Schlick Modellreihe 629\",\r\n \"Schlick Modellreihe 650\",\r\n \"FOS-45°-Flachstrahldüse 25mm Länge\",\r\n \"FOS-Flachstrahldüse 35mm Länge\",\r\n \"FOS-Flachstrahldüse 60mm Länge\",\r\n \"Jet-Düse MV6, Innendurchm. 6mm\",\r\n \"Jet-Düse MV12, Innendurchm. 6mm\",\r\n \"Düse 1200SSF 1/8 verstellbar, Edelstahl\",\r\n \"Silvent MJ4\",\r\n \"Silvent MJ5\",\r\n \"\",\r\n )\r\n \r\n loaded_recording_modes = (\"Recording by time\", \"Recording on Karl I\")\r\n\r\n # selectable audio recording settings:\r\n loaded_microphones = (\r\n \"RØDE Microphones VideoMic\",\r\n \"Renkforce UM-80 USB-Mikrofon\",\r\n \"other\")\r\n loaded_sampling_rates = (\"44100\", \"48000\", \"88200\", \"96000\")\r\n loaded_sample_formats = (\"16\", \"24\")\r\n loaded_chunks = (\"1024\", \"2048\")\r\n\r\n # header and subheader of the page:\r\n st.header(\"Metadata Capture of Audio Files\")\r\n st.write(\r\n \"Name your recording and select or type in the right parameters\"\r\n )\r\n col_meta_1, col_meta_2, col_meta_3, = st.beta_columns((2, 2, 2,))\r\n \r\n # select metadata regarding the testobject and testing hardware:\r\n test_series_name = str(col_meta_1.text_input(\"Test Series Name\", \"TestserienNr\", help='The test series name designates a series of tests on comparable bodies. For example \"width_Schlick_629\" for the test of several inserts of the same model. Consider carefully how to name the test series.Subsequent changes are not possible or lead to incorrect processing of the metadata. Avoid letters like \"ä, ö, ü, ß\" and special characters.'))\r\n audio_file_name_raw = col_meta_1.text_input(\"File Name\", \"example\", help='The file name of the recording is based on the name to be entered here and the number of the test object in the line below.')\r\n test_object_number = str(col_meta_1.text_input(\"Test Object Number\", \"1\", help='The number of the test object is specified here. The numbering helps to distinguish between different objects of the same type or model and is included in the file name of the recording.'))\r\n test_deviation = col_meta_1.number_input(\r\n \"Target Value [µm]\", step=0.01, help='Insert the measured target value of the test object [µm]. For example, the optically measured deviation of the test object'\r\n )\r\n \r\n\r\n #col_meta_1, col_meta_2, col_meta_3 = st.beta_columns((2, 2, 2))\r\n\r\n \r\n test_nozzle = col_meta_2.selectbox(\"Nozzle\", loaded_nozzles, help='Select the nozzle used for compressed air blasting. If the desired model is not selectable, it must be added to the \"loaded_nozzles\" list in the source code of the \"audio_recorder.py\" file.')\r\n test_object = col_meta_2.selectbox(\"Test Object\", loaded_objects, help='Select the test tested object / model that is to be tested. If the desired model is not selectable, it must be added to the \"loaded_objects\" list in the source code of the \"audio_recorder.py\" file.')\r\n test_testing_rig = col_meta_2.selectbox(\"Test Rig\", loaded_testing_rigs, help='Select the test rig on which the recording will be taken. If the desired model is not selectable, it must be added to the \"loaded_testing_rigs\" list in the source code of the \"audio_recorder.py\" file.')\r\n \r\n \r\n \r\n # test_recording_mode = st.selectbox(\"Recording mode\", loaded_recording_modes)\r\n \r\n st.header(\"Recording Mode, Recording Time and G-Code Settings\")\r\n \r\n test_recording_mode = st.radio(\"Recording mode\", loaded_recording_modes)\r\n \r\n if test_recording_mode == \"Recording by time\":\r\n #col_record_settings.write(\"Manual setting of the recording time\")\r\n col_mode_1, col_mode_2, = st.beta_columns((2, 1))\r\n test_required_recording_time = col_mode_1.number_input(\r\n \"Recording Time [s]\", help='Enter the desired recording length here. This can be between 0 and 240 seconds.',\r\n step=0.01,\r\n min_value=0.01,\r\n max_value=240.00,\r\n value=2.00\r\n )\r\n mode_path_name = \"0_recording_by_time\"\r\n\r\n elif test_recording_mode == \"Recording on Karl I\":\r\n #col_record_settings.header(\"Recording with Karl I\")\r\n col_mode_1, col_mode_2, col_mode_3, = st.beta_columns((1, 1, 1,))\r\n\r\n Infoimage = Image.open(\"images\\info_cutting_parameters.png\")\r\n col_mode_3.image(Infoimage, caption=None, use_column_width=True)#, width=600, height=600)\r\n\r\n x_start = col_mode_1.number_input(\"start position of x axis [mm]\", min_value = 0.00, max_value = 120.00, value = 12.00, step = 0.1) \r\n y_start = col_mode_1.number_input(\"start position of y axis [mm]\", min_value = 0.00, max_value = 130.00, value = 0.00, step = 0.1) \r\n z_start = col_mode_1.number_input(\"start position of z axis [mm]\", min_value = 0.00, max_value = 100.00, value = 10.00, step = 0.1) \r\n\r\n test_feed_speed = col_mode_2.number_input(\r\n \"Feed Speed [mm/s]\",\r\n step=0.01,\r\n min_value=0.01,\r\n max_value=10000.00,\r\n value=8.00,\r\n )\r\n test_feed_acceleration = col_mode_2.number_input(\r\n \"Acceleration [mm/s²]\",\r\n step=0.01,\r\n min_value=0.01,\r\n max_value=2000.00,\r\n value=25.00,\r\n )\r\n test_measuring_path = col_mode_2.number_input(\r\n \"Measuring Path (l) [mm]\",\r\n step=0.01,\r\n min_value=0.01,\r\n max_value=1000.00,\r\n value=16.00,\r\n )\r\n test_offset = col_mode_2.number_input(\r\n \"Offset (b) [mm]\", min_value=0.01, max_value=1000.00, value=1.00\r\n )\r\n test_number_of_measuring_paths = col_mode_1.slider(\r\n \"Number of Measuring Paths (n)\", 0, 50, 1, 1\r\n )\r\n #st.write(test_number_of_measuring_paths, \"Measuring paths per audiofile\")\r\n n = test_number_of_measuring_paths\r\n ss_p = test_measuring_path # Messstrecke\r\n ss_o = test_offset # Versatz/Abstand zwischen den Messstrecken\r\n vs = test_feed_speed # eingestellte Vorschubgeschwindigkeit\r\n acs = test_feed_acceleration # eingestellte Beschleunigung\r\n ta = (\r\n vs / acs\r\n ) # benötigte Zeit, bis mit gegebener Beschleunigung die Vorschubgeschwindigkeit erreicht ist\r\n # Strecke, die während Beschleunigungsphase auf Vorschubgeschwindigkeit zurückgelegt wird.\r\n sa = 0.5 * acs * (ta ** 2)\r\n # Bei den Versatzwegen wird die Sollvorschubgeschwindigkeit in der Regel nicht erreicht, daher dieser Ausnahmefall:\r\n if sa >= (0.5 * ss_o):\r\n tv_o = 0 # Keine Zeit beim Versatz wird dann mit der Sollvorschubgeschwindigkeit zurückgelegt\r\n # Die Zeit, die für das Zurücklegen des halben Versatzes mit der Beschleunigung zurückgelegt wird\r\n ta_o = (ss_o / acs) ** 0.5\r\n ts_o = (\r\n 2 * ta_o\r\n ) # 2x, jeweils einmal für Beschleunigung und einmal für Bremsen\r\n else:\r\n sv_o = ss_o - (2 * sa)\r\n tv_o = sv_o / vs\r\n ts_o = tv_o + (2 * ta)\r\n\r\n sv_p = ss_p - (2 * sa) # path length while having the set feed speed\r\n tv_p = sv_p / vs # measuring time while having the set feed speed\r\n ts_p = tv_p + (2 * ta) # total measuring time per path\r\n\r\n ttotal = (\r\n 2 * n * ts_p + (n - 1) * ts_o\r\n ) # 2 times the measuring path length times its iterations plus the offsets\r\n if ttotal <= 0:\r\n ttotal = 0\r\n \r\n st.write(\"Adjust recording time manually:\")\r\n col_mode_1, col_mode_2, col_mode_3, = st.beta_columns((1, 1, 1,))\r\n \r\n ttolerance_fix = col_mode_1.number_input(\r\n \"One-Time Time Tolerance [s]\", step=0.01, value=25.00\r\n )\r\n ttolerance_per_path = col_mode_2.number_input(\r\n \"Time Tolerance per Path [s]\", step=0.01, value=1.25\r\n )\r\n ttotal_with_tolerance = ttotal + ttolerance_fix + n * ttolerance_per_path\r\n\r\n st.write(\"\")\r\n col_mode_1, col_mode_2, = st.beta_columns((2, 1,))\r\n\r\n col_mode_1.write(\r\n f\"The calculated recording time including tolerances counts {ttotal_with_tolerance} seconds\"\r\n )\r\n test_required_recording_time = ttotal_with_tolerance\r\n mode_path_name = \"1_recording_on_karl_I\"\r\n demonstrator_active = col_mode_1.radio(\"Is the demonstrator connected?\", (\"yes\", \"no\"))\r\n # select audio recording settings:\r\n\r\n audio_file_name = f\"{audio_file_name_raw}_objectnr_{test_object_number}.wav\"\r\n\r\n st.header(\"Microphone and Recording Settings\")\r\n \r\n col_rec_set_1, col_rec_set_2, col_rec_set_3, col_rec_set_4, col_rec_set_5 = st.beta_columns((2, 2, 2, 2, 4))\r\n\r\n test_microphone = col_rec_set_1.selectbox(\"Microphone\", loaded_microphones)\r\n \r\n test_sampling_rate = int(\r\n col_rec_set_2.selectbox(\"Sampling rate [kHz]\", loaded_sampling_rates, help='The sample rate is the number of samples played in each second. Sample rates are measured in \"Hertz\" (abbreviated \"Hz\"), which means \"per second,\" or in \"kilohertz\" (abbreviated \"kHz\"), which means \"per second, times one thousand.\" The sample rate used on audio CDs can be written as 44 100 Hz, or 44.1 kHz, which both have the same meaning. Common sample rates are 44.1 kHz, 48 kHz, and 96 kHz. Other possible sample rates include 22 kHz, 88.2 kHz, and 192 kHz.')\r\n )\r\n selected_test_sample_format = col_rec_set_3.selectbox(\r\n \"Sample format [bit]\", loaded_sample_formats, help=\"The sample format is the number of bits used to describe each sample. The greater the number of bits, the more data will be stored in each sample. Common sample formats are 16 bits and 24 bits. 8-bit samples are low-quality, and not used often. 20-bit samples are not commonly used on computers. 32-bit samples are possible, but not supported by most audio interfaces.\"\r\n )\r\n test_chunk = int(col_rec_set_4.selectbox(\"Buffer [byte]\", loaded_chunks, help=\"The chunk is like a buffer, so therefore each buffer will contain 1024 samples, which you can then either keep or throw away. We use chunks of data, instead of a continuous amount of audio because of processing power.\"))\r\n # test_channels = int(st.selectbox('Number of channels', loaded_channels))\r\n\r\n # translating the input of the selectbox for the sample format into they right variable for the recording algorithm:\r\n if selected_test_sample_format == \"8\":\r\n test_sample_format = pyaudio.paInt8\r\n elif selected_test_sample_format == \"16\":\r\n test_sample_format = pyaudio.paInt16\r\n elif selected_test_sample_format == \"24\":\r\n test_sample_format = pyaudio.paInt24\r\n elif selected_test_sample_format == \"32\":\r\n test_sample_format = pyaudio.paInt32\r\n else:\r\n test_sample_format = None\r\n\r\n # reminder to check if all settings are done correctly and button to start the recording:\r\n\r\n # loaded_npersegs = (\"512\", \"1024\", \"2048\", \"4096\")\r\n # npersegs=int(st.selectbox('Length of each segment (nperseg)', loaded_npersegs))\r\n npersegs = 4096\r\n noverlaps = npersegs / 2\r\n\r\n st.write(\r\n 'Check if everything is filled in correctly and save your metadata by pressing the \"Save\" button below.'\r\n )\r\n\r\n col_rec_1, col_rec_2, col_rec_3, col_rec_4 = st.beta_columns((1.3, 1, 1, 12))\r\n\r\n start_recording = col_rec_1.button(\"Record\")\r\n\r\n \r\n if start_recording == True:\r\n if test_recording_mode == \"Recording by time\":\r\n # determining the path for temporary files\r\n temp = Path(f\"audio-files/{mode_path_name}/0_temporary_files\")\r\n # creating the path if it doesn't exist yet\r\n temp.mkdir(parents=True, exist_ok=True)\r\n # path and name for the temporary recording file\r\n temporary_file_path = temp / \"temporary_file.wav\"\r\n elif test_recording_mode == \"Recording on Karl I\":\r\n # determining the path for temporary files\r\n temp = Path(f\"audio-files/{mode_path_name}/0_temporary_files\")\r\n # creating the path if it doesn't exist yet\r\n temp.mkdir(parents=True, exist_ok=True)\r\n ################################################################################################ \r\n ################################################################################################\r\n #Parameter aus Streamlit übernehmen\r\n x_length = float(round(ss_p,1))\r\n y_length = float(round(ss_o,1))\r\n speed = int(round(vs*60,0)) #Umrechnung Geschwindigkeit in mm/min\r\n accel = int(round(acs,0)) #in mm/s2\r\n turns = int(n) # nur gerade Zahlen, damit auch zurückkommt\r\n ################################################## \r\n #Startpunkt definieren\r\n # x_start = 12.00\r\n # y_start = 0.00\r\n # z_start = 10.00\r\n #Safety Z definieren\r\n Z_safe = 5\r\n ##################################################\r\n #maximale Werte festlegen\r\n x_max = 120 #not used at this time\r\n y_max = 130 #not used at this time\r\n z_max = 100 #not used at this time\r\n ################################################## \r\n #Datei\r\n G_codefile = \"\"\r\n G_codewaiter = []\r\n\r\n #Start G-Code - 4 Operationen\r\n tmp_GCode = \"M118 START\" + \"\\n\" #Start M118 für Json Generation\r\n tmp_GCode = tmp_GCode + \"G28\" + \"\\n\" #Home Operation in X,Y,Z\r\n tmp_GCode = tmp_GCode + \"M204 T\" + str(accel) + \"\\n\" #Beschleunigung festlegen\r\n tmp_GCode = tmp_GCode + \"M118 SETTINGS F\"+ str(int(round(speed/60,0))) + \" A\" + str(accel) + \" T\" + str(turns) + \"\\n\" #M118 für Json\r\n G_codefile = G_codefile + tmp_GCode #zu G_codefile hinzufügen\r\n G_codewaiter.append(4)\r\n\r\n #Startpositon G-Code - 3 Operationen\r\n x_pos, y_pos, z_pos = x_start, y_start, z_start #Ausgangswerte für Operationen definieren\r\n G_codefile = G_codefile + G1_withprint(x_pos, y_pos, z_pos, speed) #zu G_codefile hinzufügen\r\n G_codewaiter.append(3)\r\n\r\n ####Start Operations G-Code\r\n for n in range(turns):\r\n #+X Verfahrweg + M118 - 3 Operationen\r\n x_pos = x_pos + x_length #Ausgangswerte für Operationen definieren\r\n G_codefile = G_codefile + G1_withprint(x_pos, y_pos, z_pos, speed) #zu G_codefile hinzufügen\r\n G_codewaiter.append(3)\r\n #-X Verfahrweg - 2 Operationen\r\n x_pos = x_pos - x_length #Ausgangswerte für Operationen definieren\r\n G_codefile = G_codefile + G1_withoutprint(x_pos, y_pos, z_pos, speed) #zu G_codefile hinzufügen\r\n G_codewaiter.append(2) \r\n if (n= GOOD_DEPTH:\n\t\tnew_cov = [['GOOD', cov[0][0], cov[0][0]]]\n\telse:\n\t\tnew_cov = [cov[0]]\n\n\tfor line in cov[1:]:\n\t\tif line[1] >= GOOD_DEPTH:\n\t\t\tif new_cov[-1][0] == 'GOOD' and line[0] == new_cov[-1][2] + 1:\n\t\t\t\tnew_cov[-1][2] = line[0]\n\t\t\telse:\n\t\t\t\tnew_cov.append(['GOOD', line[0], line[0]])\n\t\telse:\n\t\t\tnew_cov.append(line)\n\n\tfor i in range(len(new_cov)):\n\t\tfor j in range(len(new_cov[i])):\n\t\t\tnew_cov[i][j] = str(new_cov[i][j])\n\n\tfile = open(PATH + id + '.txt', 'w')\n\n\tfor el in new_cov:\n\t\tfile.write(' '.join(el) + '\\n')\n\n\tfile.close()\n\t\t\t\n\n\nParallel(n_jobs=-1)(delayed(write_cov_in_new_format)(id, PATH) for id in subset)\n\n","repo_name":"chukrello/MTB-diagnostic-software","sub_path":"2_coverage/write_cov_in_new_format.py","file_name":"write_cov_in_new_format.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32625167351","text":"N, K, P = map(int, input().split())\nA = list(map(int, input().split()))\n\nimport bisect\n\nn = N//2\nm = N - n\nzenhan = [[] for _ in range(n + 1)]\nkohan = [[] for _ in range(m + 1)]\n\n\nfor i in range(1 << n):\n total = 0\n count = 0\n for j in range(n):\n if i >> j & 1:\n total += A[j]\n count += 1\n\n zenhan[count].append(total)\n\nfor i in range(1 << m):\n total = 0\n count = 0\n for j in range(m):\n if i >> j & 1:\n total += A[n+j]\n count += 1\n\n kohan[count].append(total)\n\nfor i in range(len(zenhan)):\n zenhan[i].sort()\n\nfor i in range(len(kohan)):\n kohan[i].sort()\n\n\nans = 0\nfor i in range(len(zenhan)):\n for money in zenhan[i]:\n if money > P:\n break\n if 0 <= K-i < len(kohan): \n ans += bisect.bisect_right(kohan[K-i], P-money)\n\nprint(ans)\n","repo_name":"tamlog06/Atcoder-Beginner-Contest","sub_path":"problems/Typical/typical90/ay/typical90_ay.py","file_name":"typical90_ay.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37137335789","text":"from scipy.stats import norm\nimport numpy as np\nimport pandas as pd\ndef extract_cols(h5mat, col_names, target_names):\n col_names = np.array(col_names)\n target_names = np.array(target_names)\n target_idx = np.where(np.isin(col_names, target_names))[0].flatten().tolist()\n subcols = col_names[target_idx]\n submat = h5mat[:, target_idx]\n return submat, subcols\ndef remove_constant_row(mat, row_names):\n row_names = np.array(row_names)\n std = np.std(mat, axis = (1))\n return mat[std != 0, :], row_names[std != 0]\ndef inv_norm_row(mat):\n return np.apply_along_axis(inv_norm_vec, 1, mat)\ndef inv_norm_vec(vec, offset = 1):\n rank = myrank(vec)\n return norm.ppf(rank / (len(rank) + offset), loc = 0, scale = 1)\ndef myrank(vec):\n argsort = np.argsort(vec)\n ranks = np.empty_like(argsort)\n ranks[argsort] = np.arange(len(vec))\n return ranks + 1 # rank starts from 1\ndef format_to_gcta_grm(mat):\n nobs = mat.shape[0]\n nindiv = mat.shape[1]\n grm = np.dot(mat.transpose(), mat) / nobs\n# row_index = [ i for j in range(nindiv - i) for i in range(nindiv) ] # 0-base\n# col_index = [ j for j in range(i, nindiv) for i in range(nindiv) ] # 0-base\n indices = np.tril_indices(nindiv)\n grm_values = grm[indices]\n nobs_vec = np.ones(grm_values.shape[0]) * nobs\n df = pd.DataFrame({'idx1': indices[0] + 1, 'idx2': indices[1] + 1, 'nobs': nobs_vec.astype(int), 'gr': grm_values})\n return df\ndef get_header(filename):\n f = open(filename, 'r')\n samples = f.readline().strip().split('\\t')\n f.close()\n return samples\ndef read_pred_expr_as_mt(filename):\n # the file should be TSV\n # it should have a column named 'gene'\n # all other columns are considered as samples\n # it returns hail MatrixTable with columns as samples (key 's') and rows as gene (key 'gene')\n # and entry 'pred_expr'\n samples = get_header(filename)\n samples.pop(samples.index('gene')) # we remove 'gene' here since we only want samples\n type_dic = {\n k : hl.tfloat for k in samples\n }\n type_dic['gene'] = hl.tstr\n pred_expr = hl.import_table(filename, types = type_dic)\n pred_expr = (pred_expr\n .key_by('gene')\n .to_matrix_table_row_major(columns = samples, entry_field_name = 'pred_expr', col_field_name = 's')\n )\n return pred_expr\ndef read_tsv(filename, sample_col):\n # the file should be TSV\n # it should have one column named sample_col for sample ID\n # all other columns are considered as float!\n # it returns hail Table with row as sample (key 's') \n float_cols = get_header(filename)\n float_cols.pop(float_cols.index(sample_col)) # we remove the column for sample ID here\n type_dic = {\n k : hl.tfloat for k in float_cols\n }\n type_dic[sample_col] = hl.tstr\n ht = hl.import_table(filename, types = type_dic)\n ht = ht.annotate(s = ht[sample_col])\n ht = ht.key_by('s')\n ht = ht.drop(sample_col)\n ht = ht.repartition(ht.n_partitions())\n return ht\ndef struct_to_np_array(struct, exclude_cols):\n m = len(struct[0].collect())\n colnames = list(struct.keys())\n n = np.sum(np.logical_not(np.isin(colnames, exclude_cols)))\n out_array = np.empty([m, n])\n counter = 0\n for i in range(len(colnames)):\n if colnames[i] not in exclude_cols:\n out_array[:, i] = struct[i].collect()\n return out_array\ndef tsv_to_pd_df(filename, indiv_col):\n return pd.read_csv(filename, header = 0, sep = '\\t', dtype = {indiv_col: str})\ndef standardize_row(mat):\n return np.apply_along_axis(standardize_vec, 1, mat)\ndef standardize_vec(vec):\n return _divide(vec - np.mean(vec), np.std(vec))\ndef _divide(a, b):\n return np.divide(a, b, out = np.zeros_like(a), where = (b != 0))\ndef truncate_evd(w, v, lambda_max_over_lambda = 1 / 30):\n w_max = np.max(w)\n w_scaled = w / w_max\n idx = np.where(w_scaled > lambda_max_over_lambda)[0]\n if idx.size == 0:\n w_keep = None\n v_keep = None\n else:\n idx = np.min(idx)\n w_keep = w[idx:]\n v_keep = v[:, idx:]\n return w_keep, v_keep\n","repo_name":"liangyy/ptrs-ukb","sub_path":"code/gcta_helper.py","file_name":"gcta_helper.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11082547143","text":"from thefuzz import fuzz, process\r\nimport re\r\n\r\noriginalAdd = \"#1991, Datta Nagar, Koregaon Bhima, Pune, 412216\" # doc address\r\nmanualAdd = \"1991, Data Nagar, Koregaonn Bhimaa, Pune, 412216\" # manual address\r\n\r\nprint(originalAdd)\r\nprint(manualAdd)\r\n\r\n#function for extract house no , pincode\r\ndef numnerList(address):\r\n str4 = re.findall(r'\\d+', address) # extract numbers from addresses\r\n res = list(map(int, str4)) #convert string into integer and then map to make a list\r\n return res #it will return [house no , pincode , any other number...]\r\n\r\nlistOfNumbers1 = numnerList(originalAdd) # it will atore the results for str1\r\n#print(listOfNumbers1) \r\nlistOfNumbers2 = numnerList(manualAdd) # it will atore the results for str1\r\n#print(listOfNumbers2)\r\n\r\n# Extract words from string\r\ndef wordExtract(word):\r\n word1 = re.findall(r'[a-zA-Z]+', word) # extract word from string \r\n return word1\r\n\r\n\r\nlistOfWords1 = wordExtract(originalAdd) # list of words for original address\r\n#print(listOfWords1)\r\nlistOfWords2 = wordExtract(manualAdd) # list of words for manual address\r\n#print(listOfWords2)\r\n\r\n#converting list of word into string \r\noriginalAddText = \" \".join(map(str, listOfWords1))\r\n#print(originalAddText)\r\nmanualAddText = \" \".join(map(str, listOfWords2))\r\n#print(manualAddText)\r\n\r\n\r\n# compare digit lists house no, pincode\r\n# if listOfNumbers1 != listOfNumbers2:\r\n# print(\"house no and pincode matched\")\r\n# else:\r\n# print(\"house no and pincode not matched\")\r\n\r\n# it will return ratio by comparing only addresses string\r\nmatch = fuzz.ratio(originalAddText, manualAddText)\r\n\r\n#check the approvable\r\n# if match>=90 and listOfNumbers1[0] == listOfNumbers2[0]:\r\n# print(\"Loan approved\")\r\n# else:\r\n# print(\"Please write correct address\")\r\n\r\n\r\n\r\n\r\n# compare digit lists house no, pincode\r\nif listOfNumbers1 == listOfNumbers2:\r\n print(\"house number amd pin code matched\")\r\n if listOfNumbers1[0] == listOfNumbers2[0] and listOfNumbers2[-1] == listOfNumbers2[-1]:\r\n if match>=90:\r\n print(\"document matched with property address\")\r\n else:\r\n print(\"\") \r\n \r\nelse: \r\n if listOfNumbers1[0] != listOfNumbers2[0]:\r\n print(\"house number not matched\")\r\n print(\"------------------\")\r\n if listOfNumbers1[-1] != listOfNumbers2[-1]:\r\n print(\"pin code not matched\")\r\n print(\"------------------\")\r\n \r\n\r\n\r\n\r\n ","repo_name":"sagarpol3008/BasicPrograms","sub_path":"stringmatch.py","file_name":"stringmatch.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10500285882","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\n\n\n# 중위순회하면서 노드 수 더하는 함수\ndef tree_search(node):\n global count\n if node > 0:\n tree_search(left[node])\n count += 1\n tree_search(right[node])\n\n\nfor tc in range(1, T+1):\n E, N = map(int, input().split())\n info = list(map(int, input().split()))\n # 두 개의 리스트로 트리 구현\n left = [0] * (E+2)\n right = [0] * (E+2)\n # 인덱스: 부모, 값: 자식\n while info:\n child = info.pop()\n parent = info.pop()\n if left[parent] == 0:\n left[parent] = child\n else:\n right[parent] = child\n # 노드 수 count 초기화\n count = 0\n # 중위순회\n tree_search(N)\n\n print(\"#{} {}\".format(tc, count))\n\n","repo_name":"mann-WOO/SWEA","sub_path":"5174_Subtree/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15805364194","text":"import os, sys\nimport time, math\nimport multiprocessing\nfrom abc import ABC, abstractmethod\n\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nfrom .utils import build_worker_name, build_summaries, Summary\n\n\nclass AbstractDistributedWorker(ABC):\n\n GLOBAL_NET_NAME = \"global_net\"\n\n def __init__(self, _job_name, _task_id, _is_chief, _is_evaluator,\n _env_wrapper, _network_wrapper, _on_policy, \n batch_size, unroll_length, \n opt_epochs=1, episodic_training=False, render=False,\n _seed=None, _log_dir=None, _max_samples=None#, _max_iterations=None\n ):\n self.name = build_worker_name(_job_name, _task_id)\n self.job_name = _job_name\n self.task_id = _task_id\n self.is_chief = _is_chief\n self.is_evaluator = _is_evaluator\n self.seed = _seed\n self.log_dir = _log_dir\n\n # self.max_iterations = _max_iterations\n\n self.env_wrapper = _env_wrapper\n self.network_wrapper = _network_wrapper\n\n self.on_policy = _on_policy\n\n self.batch_size = batch_size\n self.unroll_length = unroll_length\n self.opt_epochs = opt_epochs\n self.episodic_training = episodic_training\n\n self.render = render\n\n self.env, self.global_net= None, None\n self.logger = None\n self.opt_op = None\n self.summary_op = None\n self.global_step = None\n self.init_ops = None\n self.pull_weights = None\n self.update_ops = None\n \n self.hooks = []\n\n self.max_samples = _max_samples\n\n @property\n def off_policy(self):\n return not self.on_policy\n\n @off_policy.setter\n def off_policy(self, v):\n self.on_policy = not v\n\n def set_env_seed(self, seed):\n if callable(getattr(self.env, \"seed\", None)): self.env.seed(seed)\n try:\n self.env.action_space.np_random.seed(seed)\n except:\n pass\n\n def set_seed(self, seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED']=str(seed)\n tf.set_random_seed(seed)\n self.set_env_seed(seed)\n np.random.seed(seed)\n\n def init(self):\n self.env = self.env_wrapper(self.name, self.render)\n self.set_seed(self.seed)\n if self.log_dir:\n self.logger = Summary(os.path.join(self.log_dir, self.name))\n else:\n self.logger = None\n if callable(getattr(self.env, \"init\", None)):\n self.env.init()\n with tf.variable_scope(\"step\"):\n self.global_step = tf.train.get_or_create_global_step()\n self.global_net, self.inc_global_episode, inc_total_samples = \\\n self.build_global_net(not self.is_evaluator, self.network_wrapper, self.env)\n if inc_total_samples is None:\n self.inc_total_samples = None\n else:\n self.inc_total_samples = lambda step_context: step_context.session.run(inc_total_samples)\n self.local_net = self.target_net = self.global_net\n if self.global_net.trainable and hasattr(self.global_net, \"init_ops\") and self.global_net.init_ops:\n self.init_ops = lambda step_context: step_context.session.run(self.global_net.init_ops)\n \n @staticmethod\n def build_global_net(trainable, network_wrapper, env):\n with tf.variable_scope(AbstractDistributedWorker.GLOBAL_NET_NAME):\n net = network_wrapper(env, trainable)\n net.init()\n with tf.variable_scope(\"episode\"):\n global_episode = tf.Variable(0, dtype=tf.int32, trainable=False, name=\"episode\")\n if trainable:\n inc_global_episode = tf.assign_add(global_episode, 1, name=\"inc_episode\")\n else:\n inc_global_episode = None\n with tf.variable_scope(\"samples\"):\n if trainable:\n total_samples = tf.Variable(0, dtype=tf.int64, trainable=False, name=\"samples\")\n inc_total_samples = tf.assign_add(total_samples, 1, name=\"inc_samples\")\n else:\n inc_total_samples = None\n return net, inc_global_episode, inc_total_samples\n \n def build_local_net(self, name):\n pass \n\n def build_sync_vars(self, local_net_name):\n global_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.GLOBAL_NET_NAME)\n local_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, local_net_name)\n sync_local_vars, sync_global_vars = [], []\n for v in global_vars:\n try:\n for lv in local_vars:\n if local_net_name + \"/\" + v.name.split(\"/\", 1)[1] in lv.name and lv != v:\n sync_local_vars.append(lv)\n sync_global_vars.append(v)\n except:\n print(\"[WARN] Failed to find the local copy of the global variable: {}\".format(v))\n return sync_local_vars, sync_global_vars\n \n @abstractmethod\n def build_optimizer(self, local_net, local_vars, target_vars):\n raise NotImplementedError\n \n def build_summaries(self, target_net):\n return build_summaries(target_net)\n \n def stop(self):\n if self.logger:\n self.logger.flush()\n try:\n self.env.close()\n except:\n pass\n if not self.is_evaluator:\n print(\"[SYSTEM] Stopping {}\".format(self.name))\n \n def work(self, sess):\n self.batch_size = None if self.batch_size is None else int(self.batch_size)\n self.unroll_length = None if self.unroll_length is None else int(self.unroll_length)\n self.opt_epochs = None if self.opt_epochs is None else int(self.opt_epochs)\n self.request_stop = False\n self.before_work(sess)\n while not (sess.should_stop() or self.request_stop):\n state = self.before_episode(sess)\n terminal = False\n while not terminal:\n action, *_ = self.sample_action(sess, state)\n state, _, terminal, info = self.interact_with_env(sess, state, action)\n if self.target_net.trainable:\n need_train = self.need_train(sess, terminal, state, info)\n if self.sequence_length is not None:\n if terminal or self.episodic_terminal:\n n = len(self.exp[\"state\"]) - sum(self.sequence_length)\n if self.unroll_length is not None:\n remaining = self.unroll_length - n\n for _, buf in self.exp.items():\n for __ in range(remaining):\n buf.append(np.zeros_like(buf[0]))\n self.sequence_length.append(n)\n self.sequence_terminal.append(terminal)\n if need_train:\n if self.sequence_length is not None:\n if self.unroll_length is None:\n max_n = max(self.sequence_length)\n for i, n in enumerate(self.sequence_length):\n if n < max_n:\n remaining = max_n - n\n for _, buf in self.exp.items():\n for __ in range(remaining):\n buf.insert(i*max_n+n, np.zeros_like(buf[0]))\n self.episodic_train(sess)\n else:\n self.flat_train(sess)\n self.after_train(sess)\n self.after_episode(sess, info)\n self.after_work(sess)\n \n def sync(self, sess):\n if self.pull_weights:\n sess.run_step_fn(self.pull_weights)\n \n def setup_buffers(self):\n if self.exp_buffers is not None:\n if self.sequence_length is not None:\n buf_size = self.batch_size*self.unroll_length if self.unroll_length is not None else 4096\n else:\n buf_size = self.unroll_length if self.unroll_length is not None else 4096\n self.exp = {k: [None]*buf_size for k in self.exp_buffers}\n for _, v in self.exp.items(): v.clear()\n \n\n def clear_buffers(self):\n if self.on_policy:\n if self.exp_buffers is not None:\n for _, v in self.exp.items(): v.clear()\n if hasattr(self, \"last_training_state\"):\n self.sequence_length.clear()\n self.sequence_terminal.clear()\n self.sample_counter = 0\n \n def before_work(self, sess):\n if self.episodic_training:\n self.sequence_length = []\n self.sequence_terminal = []\n self.last_training_state = None\n else:\n self.sequence_length = None\n self.evaluator_counter = 0\n self.sample_counter = 0\n self.setup_buffers()\n if self.init_ops: sess.run_step_fn(self.init_ops)\n self.sync(sess)\n \n def after_work(self, sess):\n self.stop()\n\n def before_episode(self, sess):\n self.local_net.reset_running_state()\n if not self.evaluator_counter:\n self.total_reward, self.episode_step = 0.0, 0\n self.episode_buffer_stamp = 0\n return self.env.reset()\n\n def after_episode(self, sess, info):\n if self.is_evaluator and hasattr(self, \"total_samples\"):\n self.evaluator_counter += 1\n if self.evaluator_counter >= self.is_evaluator:\n self.episode_step /= self.evaluator_counter\n self.total_reward /= self.evaluator_counter\n summaries = [\n tf.Summary.Value(tag=\"performance_test/reward\", simple_value=self.total_reward),\n tf.Summary.Value(tag=\"performance_test/reward_avg\", simple_value=self.total_reward/self.episode_step),\n tf.Summary.Value(tag=\"performance_test/frames\", simple_value=self.episode_step)\n ]\n summaries.append(tf.Summary.Value(tag=\"performance_test/samples\", simple_value=self.total_samples))\n print(\"[PERFORM] Life Time: {}; Total Reward: {:.4f}; Avg Reward: {:.4f}; Step: {}; Samples: {}; {}\".format(\n self.episode_step, self.total_reward, self.total_reward/self.episode_step, self.global_step, self.total_samples,\n time.strftime(\"%m-%d %H:%M:%S\", time.localtime())\n ))\n self.summary = tf.Summary(value=summaries)\n # self.request_stop = True\n self.evaluator_counter = 0\n\n def sample_action(self, sess, state):\n return self.local_net.run(sess, state, self.update_ops)\n\n def interact_with_env(self, sess, state, action):\n self.sample_counter += 1\n self.episode_step += 1\n \n if hasattr(self.env.action_space, \"low\"):\n a = np.clip(action, self.env.action_space.low, self.env.action_space.high)\n else:\n a = action\n\n state_, reward, terminal, info = self.env.step(a)\n \n self.exp[\"state\"].append(state)\n self.exp[\"action\"].append(action)\n self.exp[\"reward\"].append(reward)\n if \"episode_reward\" in info:\n if info[\"episode_reward\"] != 0:\n for i in range(self.episode_buffer_stamp, len(self.exp[\"reward\"])):\n self.exp[\"reward\"][i] += info[\"episode_reward\"]\n self.total_reward += (len(self.exp[\"reward\"])-self.episode_buffer_stamp)*info[\"episode_reward\"]\n self.episode_buffer_stamp = len(self.exp[\"reward\"])\n \n self.total_reward += reward\n \n if self.inc_total_samples is not None:\n self.total_samples = sess.run_step_fn(self.inc_total_samples)\n\n return state_, reward, terminal, info\n\n def need_train(self, sess, terminal, last_state, info):\n if self.episodic_training:\n if terminal or self.episodic_terminal:\n return len(self.sequence_length)+1 >= self.batch_size\n return False\n if self.unroll_length is None:\n return terminal\n return self.sample_counter >= self.unroll_length\n \n @property\n def episodic_terminal(self):\n if hasattr(self, \"sequence_length\") and self.sequence_length is not None and self.unroll_length is not None:\n n = len(self.sequence_length)\n if n > 0: n *= max(self.sequence_length)\n return len(self.exp[\"state\"]) - n >= self.unroll_length\n return False\n\n def after_train(self, sess):\n self.sync(sess)\n self.clear_buffers()\n if self.inc_total_samples is not None and self.max_samples is not None:\n if hasattr(self, \"total_samples\") and self.total_samples > self.max_samples:\n #self.request_stop = True\n for h in sess._hooks:\n if isinstance(h, tf.train.StopAtStepHook):\n h._last_step = self.global_step\n break\n\n def flat_train(self, sess):\n n = len(self.exp[\"state\"])\n exp_bak = self.exp\n if self.on_policy: # disposable experience replay buffer\n exp = {\n k: np.asarray(v, dtype=np.float32) for k, v in exp_bak.items()\n }\n ids = np.arange(n)\n self.current_opt_epoch = 0\n while self.current_opt_epoch < self.opt_epochs:\n np.random.shuffle(ids)\n if self.batch_size:\n for s in range(0, n, self.batch_size):\n e = s + self.batch_size\n cand = ids[s:e]\n self.exp = {\n k: buf[cand] for k, buf in exp.items()\n }\n self.train(sess)\n else:\n self.exp = {\n k: buf[ids] for k, buf in exp.items()\n }\n self.train(sess)\n self.current_opt_epoch += 1\n elif n >= self.batch_size:\n for _ in range(self.opt_epochs if self.opt_epochs else self.sample_counter):\n ids = np.random.choice(n, self.batch_size)\n self.exp = {\n k: list(map(v.data.__getitem__, ids)) for k, v in exp_bak.items()\n }\n self.train(sess)\n self.exp = exp_bak\n\n def episodic_train(self, sess):\n assert(self.on_policy)\n for _ in range(self.opt_epochs):\n #self.batch_size x (self.unroll_length or self.sequence_length[0:])\n if not hasattr(self, \"last_training_state\") or self.last_training_state is None or self.batch_size > 1:\n # the training state should be reset to zero state\n # for multiple-batch cases, since some previous training\n # batches may lead the terminal state\n self.target_net.reset_training_state()\n else:\n self.target_net.reset_training_state(self.last_training_state)\n self.train(sess)\n if hasattr(self, \"last_training_state\") and self.batch_size <= 1:\n if len(self.sequence_terminal) < 1 or self.sequence_terminal[-1]:\n self.last_training_state = None\n else:\n self.last_training_state = self.target_net.training_state\n\n def train(self, sess):\n if sess.should_stop() or self.request_stop:\n return None, None\n ops = self.train_ops\n loss, result = self.target_net.train(\n sess, self.opt_op, ops, *self.train_args\n )\n summary = result[ops.index(self.summary_op)]\n global_step = result[ops.index(self.global_step)]\n if self.logger and summary:\n self.logger.add_summary(summary, global_step)\n # if self.max_iterations and global_step >= self.max_iterations:\n # self.request_stop = True\n return loss, result\n \n @property\n def exp_buffers(self):\n return [\"state\", \"action\", \"reward\"]\n\n @property\n def train_ops(self):\n return [self.summary_op, self.global_step]\n \n @property\n def train_args(self):\n args = [\n self.exp[\"state\"], self.exp[\"action\"],\n self.exp[\"reward\"]\n ]\n if self.sequence_length is not None:\n args.append(self.sequence_length)\n return args\n\n\nclass DistributedModel(object):\n\n def __init__(self, worker_wrapper, env_wrapper, network_wrapper,\n checkpoint_dir, save_checkpoint_interval, max_iterations, device,\n log_dir=None, debug=\"chief\", max_samples=None,\n use_evaluator=True, seed=None):\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n self.worker_wrapper = worker_wrapper\n self.env_wrapper = env_wrapper\n self.network_wrapper = network_wrapper\n\n self.log_dir = log_dir\n self.debug = debug\n self.checkpoint_dir = checkpoint_dir\n self.save_checkpoint_interval = save_checkpoint_interval\n self.max_iterations = max_iterations\n self.max_samples = max_samples\n self.device = device.split(\";\")\n\n self.use_evaluator = use_evaluator\n self.seed = seed\n\n def start(self, distributions=None):\n if distributions is None:\n self.test()\n else:\n self.train(distributions)\n \n def test(self):\n worker = self.worker_wrapper(_job_name=\"evaluator\", _task_id=0, _is_chief=False, _is_evaluator=1,\n _env_wrapper=self.env_wrapper, _network_wrapper=self.network_wrapper,\n _seed=self.seed,\n render=True)\n worker.init()\n while True:\n with tf.train.SingularMonitoredSession(checkpoint_dir=self.checkpoint_dir) as sess:\n worker.work(sess)\n \n def evaluate(self, n=10, evaluation_cond=lambda : True):\n from tensorflow.python import pywrap_tensorflow\n\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n worker = self.worker_wrapper(_job_name=\"evaluator\", _task_id=0, _is_chief=False, _is_evaluator=n,\n _env_wrapper=self.env_wrapper, _network_wrapper=self.network_wrapper,\n _seed=self.seed, _log_dir=self.log_dir, _max_samples=self.max_samples,\n render=False)\n worker.init()\n rng = np.random.RandomState(self.seed)\n class Evaluator(tf.train.SessionRunHook):\n def __init__(self, checkpoint_dir):\n self.lastest_checkpoint = None\n self.saver = tf.train.Saver()\n self.checkpoint_dir = checkpoint_dir\n\n def after_create_session(self, session, coord):\n self.sess = session\n\n def before_run(self, run_context):\n if self.lastest_checkpoint is None or (worker.evaluator_counter == 0 and worker.episode_step == 0):\n if self.lastest_checkpoint is not None and worker.logger:\n worker.logger.add_summary(worker.summary, worker.global_step)\n worker.logger.flush()\n\n checkpoint = None\n while checkpoint is None and evaluation_cond():\n time.sleep(30)\n checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)\n while self.lastest_checkpoint == checkpoint and evaluation_cond():\n time.sleep(30)\n checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)\n\n if checkpoint is None or self.lastest_checkpoint == checkpoint:\n worker.request_stop = True\n # worker.after_episode = lambda *args, **kwargs: run_context.request_stop()\n # run_context.request_stop()\n else:\n self.saver.restore(self.sess, checkpoint)\n self.lastest_checkpoint = checkpoint\n worker.global_step = int(self.lastest_checkpoint.split('-')[-1])\n reader = pywrap_tensorflow.NewCheckpointReader(self.lastest_checkpoint)\n worker.total_samples = reader.get_tensor(worker.GLOBAL_NET_NAME+\"/samples/samples\")\n worker.set_env_seed(rng.randint(1))\n\n h = Evaluator(self.checkpoint_dir)\n config = self.build_tf_sess_config()\n with tf.train.SingularMonitoredSession(hooks=[h], config=config) as sess:\n sess.run_step_fn = None\n worker.work(sess)\n\n @staticmethod\n def build_tf_sess_config(**kwargs):\n return tf.ConfigProto(\n #inter_op_parallelism_threads=1,\n #intra_op_parallelism_threads=1,\n # log_device_placement=True,\n **kwargs,\n allow_soft_placement=True,\n gpu_options=tf.GPUOptions(allow_growth=True)\n )\n \n def train(self, distributions):\n n_workers = sum([len(v) for _, v in distributions.items()])\n if len(self.device) == 1:\n self.device = self.device*n_workers\n workers = []\n for job, d in distributions.items():\n for task_id in range(len(d)):\n workers.append(multiprocessing.Process(\n target=self.dispatch,\n args=(distributions, job, task_id, self.device)\n ))\n for w in workers: w.start()\n print(\"==== Process ID List ===\")\n for w in workers:\n print(w.pid)\n print(\"========================\")\n\n try:\n if self.use_evaluator:\n self.evaluate(evaluation_cond=lambda : all(w.is_alive() for w in workers))\n print(\"evaluator work is done\")\n for w in workers:\n print(w.pid, w.is_alive())\n else:\n for w in workers: w.join()\n except KeyboardInterrupt:\n pass\n finally:\n print(\"closing worker threads...\")\n for w in workers:\n try:\n print(\"finishing \", w.pid)\n w.terminate()\n w.join()\n except:\n pass\n print(\"training is done\")\n \n\n def dispatch(self, distributions, job_name, task_id, device):\n print(\"[SYSTEM] {} {} with PID {} started.\".format(job_name, task_id, os.getpid()))\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n config = self.build_tf_sess_config()\n cluster = tf.train.ClusterSpec(distributions)\n server = tf.train.Server(cluster, job_name=job_name, task_index=task_id, config=config)\n if job_name == \"ps\":\n with tf.device(\"/job:ps/task:{}\".format(task_id)):\n queue = tf.FIFOQueue(1, tf.int32, shared_name=\"done_queue{}\".format(task_id))\n with tf.Session(server.target) as sess:\n for _ in range(cluster.num_tasks(\"worker\")):\n sess.run(queue.dequeue())\n print(\"PS {} {} work is done.\".format(task_id, os.getpid()))\n else:\n # Important!\n # By default, numpy shares the same random state among different sub-processing\n if self.seed is None: \n seed = int.from_bytes(os.urandom(4), byteorder=\"big\")\n else:\n seed = self.seed+task_id+1\n\n target_device = self.target_device(job_name, task_id, device)\n # if \":\" in target_device and target_device.split(\":\")[-2][-3:].lower() == \"gpu\":\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = target_device.split(\":\")[-1]\n # target_device = \"device:gpu:0\"\n # else:\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n done_queues = []\n if \"ps\" in distributions:\n for i in range(cluster.num_tasks(\"ps\")):\n with tf.device(\"/job:ps/task:{}\".format(i)):\n done_queues.append(tf.FIFOQueue(1, tf.int32, shared_name=\"done_queue{}\".format(i)).enqueue(1))\n\n is_chief = (task_id == 0)\n n_workers = cluster.num_tasks(job_name)\n worker = self.worker_wrapper(_job_name=job_name, _task_id=task_id, _is_chief=is_chief, _is_evaluator=False,\n _env_wrapper=self.env_wrapper, _network_wrapper=self.network_wrapper, _seed=seed,\n _log_dir=None if not self.debug or (self.debug != \"all\" and not is_chief) else self.log_dir,\n _max_samples=self.max_samples,\n **self.worker_wrapper_kwargs(job_name, task_id, is_chief, cluster),\n render=False)\n with tf.device(tf.train.replica_device_setter(cluster=cluster,\n worker_device=\"/job:worker/task:{}/{}\".format(task_id, target_device)\n )):\n worker.init()\n \n if is_chief:\n for i in range(n_workers):\n with tf.device(\"/job:worker/task:{}/{}\".format(i, target_device if i == task_id else self.target_device(job_name, i, device))):\n worker_name = build_worker_name(job_name, i)\n worker.build_local_net(worker_name)\n else:\n with tf.device(\"/job:worker/task:{}/{}\".format(task_id, target_device)):\n worker.build_local_net(worker.name)\n\n chief_hooks = []\n hooks = worker.hooks + [tf.train.StopAtStepHook(last_step=self.max_iterations)]\n \n saver = tf.train.Saver(max_to_keep=1)\n class EndNotificationHook(tf.train.SessionRunHook):\n def end(self, sess):\n print(\"end notifier from \", worker.name)\n worker.stop()\n sess.run(done_queues)\n hooks.append(EndNotificationHook())\n scaffold = tf.train.Scaffold(saver=saver)\n with tf.train.MonitoredTrainingSession(server.target, is_chief, self.checkpoint_dir,\n config=config, hooks=hooks, chief_only_hooks=chief_hooks,\n save_checkpoint_steps=self.save_checkpoint_interval,\n log_step_count_steps=5000, scaffold=scaffold, max_wait_secs=60) as sess:\n try:\n worker.work(sess)\n except KeyboardInterrupt:\n worker.stop()\n sess.run(done_queues)\n print(\"{} {} work is done.\".format(worker.name, os.getpid()))\n\n def worker_wrapper_kwargs(self, job_name, task_id, is_chief, cluster):\n return {} \n \n def target_device(self, job_name, task_id, device):\n if job_name == \"ps\": return None\n tar = device[0] if len(device) == 1 else device[task_id]\n tar = tar.upper()\n if tar.startswith(\"GPU\"):\n return \"device:GPU:{}\".format(tar[(4 if tar[3]==\":\" else 3):])\n else:\n assert(tar.startswith(\"GPU\") or tar == \"CPU\" or tar == \"CPU:0\")\n return \"cpu:0\"\n","repo_name":"xupei0610/PFPN","sub_path":"models/distributed_model.py","file_name":"distributed_model.py","file_ext":"py","file_size_in_byte":27772,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"34192334645","text":"import glob\nimport os.path\nimport shutil\n\nimport pytest\n\nfrom app.main import init\n\n\n@pytest.fixture(name=\"file_prefix\", scope=\"module\")\ndef get_file_prefix():\n return \"../\"\n\n\n@pytest.fixture(name=\"csv_gen\", scope=\"module\")\ndef get_csv_cases(file_prefix):\n files = glob.glob(os.path.join(file_prefix, 'tests/csv/*.csv'))\n\n for name in files:\n shutil.copy(name, os.path.abspath(os.path.join(file_prefix, 'csv/')))\n\n yield glob.glob(os.path.join(file_prefix, 'csv/*.csv'))\n\n for name in files:\n idx = file_prefix.count(os.path.sep) + 1\n name = os.path.sep.join(name.split(os.path.sep)[idx:])\n os.remove(os.path.join(file_prefix, name))\n\n\n@pytest.fixture(name=\"app\", scope=\"module\")\ndef get_app():\n app = init()\n yield app\n","repo_name":"vpplatonov/data_stream_processor","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34343144014","text":"def nacti_cislo():\n odpoved = int(input(\"Zadej cislo:\"))\n\n try:\n cislo = int(odpoved)\n except ValueError:\n print(\"To neni cislo\")\n else:\n print(\"vyjimka nenastala\")\n finally:\n print(\"tohle je kod, ktery se spusti, at je nebo neni chyba\")\nnacti_cislo()\n","repo_name":"hanakysela/pyladies","sub_path":"vyjimky.py","file_name":"vyjimky.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20612719584","text":"'''\nWordsData.py\n\nData object that represents word counts across a collection of documents.\n\nTerminology\n-------\n* Vocab : The finite collection of possible words. \n {apple, berry, cardamom, fruit, pear, walnut}\n We assume this set has a fixed ordering, so each word is associated \n with a particular integer in the set 0, 1, ... vocab_size-1\n 0: apple 3: fruit\n 1: berry 4: pear\n 2: cardamom 5: walnut\n* Document : a collection of words, observed together from the same source\n For example: \n \"apple, berry, berry, pear, pear, pear, walnut\"\n\n* nDoc : number of documents in the current, in-memory dataset\n* nDocTotal : total number of docs, in entire dataset (for online applications)\n'''\n\nfrom .AdmixMinibatchIterator import AdmixMinibatchIterator\nfrom .DataObj import DataObj\nimport numpy as np\nimport scipy.sparse\nfrom ..util import RandUtil\n\nclass WordsData(DataObj):\n\n ######################################################### Constructor\n #########################################################\n def __init__(self, word_id=None, word_count=None, doc_range=None,\n vocab_size=0, vocab_dict=None, \n nDocTotal=None, TrueParams=None, **kwargs):\n ''' Constructor for WordsData object\n\n Args\n -------\n word_id : nDistinctWords-length vector \n entry i gives VocabWordID for distinct word i in corpus\n word_count : nDistinctWords-length vector\n entry i gives count for word_id[i] in that document\n doc_range : nDoc x 2 matrix\n doc_range[d,:] gives (start,stop) for document d\n where start/stop index rows in word_id,word_count\n vocab_size : integer size of set of possible vocabulary words\n vocab_dict : dict mapping integer vocab ids to strings\n nDocTotal : int total size of the corpus \n (in case this obj represents a minibatch)\n TrueParams : None [default], or dict of attributes\n '''\n self.word_id = np.asarray(np.squeeze(word_id), dtype=np.uint32)\n self.word_count = np.asarray(np.squeeze(word_count), dtype=np.float64)\n self.doc_range = np.asarray(doc_range, dtype=np.uint32)\n self.vocab_size = int(vocab_size)\n \n self._set_corpus_size_attributes(nDocTotal)\n self._verify_attributes()\n \n # Save \"true\" parameters that generated toy-data, if provided\n if TrueParams is not None:\n self.TrueParams = TrueParams\n\n # Add dictionary of vocab words, if provided\n if vocab_dict is not None:\n self.vocab_dict = vocab_dict\n\n def _set_corpus_size_attributes(self, nDocTotal=None):\n ''' Sets nDoc, nObs, and nDocTotal attributes of this WordsData object\n\n Args\n -------\n nDocTotal : int size of total corpus \n if None, nDocTotal is set equal to nDoc\n '''\n self.nDoc = self.doc_range.shape[0]\n self.nObs = len(self.word_id)\n if nDocTotal is None:\n self.nDocTotal = self.nDoc\n else:\n self.nDocTotal = int(nDocTotal)\n\n def _verify_attributes(self):\n ''' Basic runtime checks to make sure dimensions are set correctly\n for attributes word_id, word_count, doc_range, etc.\n '''\n assert self.vocab_size > 0\n assert self.word_id.ndim == 1\n assert self.word_id.min() >= 0\n assert self.word_id.max() < self.vocab_size\n assert self.word_count.ndim == 1\n assert self.word_count.min() > 0\n assert self.nDoc == self.doc_range.shape[0]\n assert self.nObs == len(self.word_id)\n assert self.doc_range.shape[1] == 2\n assert np.all( self.doc_range[:-1,1] == self.doc_range[1:,0])\n\n\n ######################################################### Sparse matrix\n ######################################################### representations\n def to_sparse_matrix(self, doBinary=False):\n ''' Make sparse matrix counting vocab usage across all words in dataset\n\n Returns\n --------\n C : sparse (CSC-format) matrix, size nObs x vocab_size\n C[n,v] = word_count[n] iff word_id[n] = v\n 0 otherwise\n That is, each word token n is represented by one entire row\n with only one non-zero entry: at column word_id[n]\n\n '''\n if hasattr(self, \"__sparseMat__\") and not doBinary:\n return self.__sparseMat__\n if hasattr(self, '__sparseBinMat__') and doBinary:\n return self.__sparseBinMat__\n\n indptr = np.arange(self.nObs+1) # define buckets for one entry per row\n if doBinary:\n self.__sparseBinMat__ = scipy.sparse.csc_matrix(\n (np.ones(self.nObs), np.int64(self.word_id), indptr),\n shape=(self.vocab_size, self.nObs))\n return self.__sparseBinMat__\n\n else:\n self.__sparseMat__ = scipy.sparse.csc_matrix(\n (self.word_count, np.int64(self.word_id), indptr),\n shape=(self.vocab_size, self.nObs))\n return self.__sparseMat__\n \n def to_sparse_docword_matrix(self, weights=None, thr=None, **kwargs):\n ''' Make sparse matrix counting vocab usage for each document in dataset\n Used for efficient initialization of global parameters.\n\n Returns\n -------\n C : sparse (CSR-format) matrix, of shape nDoc-x-vocab_size, where\n C[d,v] = total count of vocab word v in document d\n '''\n if hasattr(self, \"__sparseDocWordMat__\") and weights is None:\n return self.__sparseDocWordMat__\n row_ind = list()\n col_ind = list()\n doc_range = self.doc_range\n word_count = self.word_count\n for d in xrange(self.nDoc):\n numDistinct = doc_range[d,1] - doc_range[d,0]\n doc_ind_temp = [d]*numDistinct\n row_ind.extend(doc_ind_temp)\n col_ind.extend(self.word_id[doc_range[d,0]:doc_range[d,1]])\n if weights is None:\n weights = self.word_count\n else:\n if thr is not None:\n mask = np.flatnonzero(weights > thr)\n weights = weights[mask] * self.word_count[mask]\n row_ind = np.asarray(row_ind)[mask]\n col_ind = np.asarray(col_ind)[mask]\n else:\n weights = weights * self.word_count\n sparseDocWordmat = scipy.sparse.csr_matrix(\n (weights, (row_ind,col_ind)),\n shape=(self.nDoc, self.vocab_size), \n dtype=np.float64)\n if weights is None:\n self.__sparseDocWordMat__ = sparseDocWordmat\n return sparseDocWordmat\n\n def get_nObs2nDoc_mat(self):\n ''' Returns nDoc x nObs sparse matrix\n '''\n data = np.ones(self.nObs)\n # row_ind will look like 0000, 111, 22, 33333, 444, 55\n col_ind = np.arange(self.nObs)\n\n indptr = np.hstack([Data.doc_range[0,0], Data.doc_range[:,1]])\n return scipy.sparse.csr_matrix( (data, (row_ind, col_ind)),\n shape=(self.nDoc, self.nObs),\n dtype=np.float64)\n\n ######################################################### DataObj interface\n ######################################################### methods\n def to_minibatch_iterator(self, **kwargs):\n ''' Return AdmixMinibatchIterator for this WordsData object,\n so we can traverse subsets of this document collection.\n Args\n -------\n see AdmixMinibatchIterator\n '''\n return AdmixMinibatchIterator(self, **kwargs)\n \n def add_data(self, WData):\n ''' Append provided WordsData to the end of this dataset\n '''\n assert self.vocab_size == WData.vocab_size\n self.word_id = np.hstack([self.word_id, WData.word_id])\n self.word_count = np.hstack([self.word_count, WData.word_count])\n startLoc = self.doc_range[-1,1]\n self.doc_range = np.vstack([self.doc_range, startLoc + WData.doc_range])\n self.nDoc += WData.nDoc\n self.nObs += WData.nObs\n self.nDocTotal += WData.nDocTotal\n self._verify_attributes()\n\n def get_random_sample(self, nDoc, randstate=np.random, candidates=None):\n ''' Create WordsData object for random subsample of this dataset\n\n Args\n -----\n nDoc : number of documents to choose\n randstate : numpy random number generator\n\n Returns\n -------\n WordsData : bnpy WordsData instance, with at most nDoc documents\n '''\n if candidates is None:\n docMask = randstate.permutation(self.nDoc)[:nDoc]\n else:\n docMask = randstate.permutation(candidates)[:nDoc]\n return self.select_subset_by_mask(docMask=docMask,\n doTrackFullSize=False)\n\n def select_subset_by_mask(self, docMask=None, wordMask=None,\n doTrackFullSize=True):\n ''' Returns WordsData object representing a subset of this object,\n \n Args\n -------\n docMask : None, or list of document ids to select\n wordMask : None, or list of words to select\n each entry is an index into self.word_id\n\n doTrackFullSize : boolean indicator for whether output dataset\n should retain nDocTotal size of this object,\n or should be self-contained (nDoc=nDocTotal) \n\n Returns\n --------\n WordsData object, where\n nDoc = number of documents in the subset (=len(mask))\n nObs = nDistinctWords in the subset of docs\n nDocTotal defines size of entire dataset (not subset)\n '''\n if docMask is None and wordMask is None:\n raise ValueError(\"Must provide either docMask or wordMask\")\n\n if docMask is not None:\n nDoc = len(docMask)\n nObs = np.sum(self.doc_range[docMask,1] - self.doc_range[docMask,0])\n word_id = np.zeros(nObs)\n word_count = np.zeros(nObs)\n doc_range = np.zeros((nDoc,2))\n \n # Fill in new word_id, word_count, and doc_range\n startLoc = 0\n for d in xrange(nDoc):\n start,stop = self.doc_range[docMask[d],:]\n endLoc = startLoc + (stop - start)\n word_count[startLoc:endLoc] = self.word_count[start:stop]\n word_id[startLoc:endLoc] = self.word_id[start:stop]\n doc_range[d,:] = [startLoc,endLoc]\n startLoc += (stop - start)\n\n elif wordMask is not None:\n wordMask = np.sort(wordMask)\n nObs = len(wordMask)\n docIDs = self.getDocIDs(wordMask)\n uDocIDs = np.unique(docIDs)\n nDoc = uDocIDs.size\n doc_range = np.zeros((nDoc,2))\n\n # Fill in new word_id, word_count, and doc_range\n word_id = self.word_id[wordMask]\n word_count = self.word_count[wordMask]\n startLoc = 0\n for dd in range(nDoc):\n nWordsInCurDoc = np.sum(uDocIDs[dd] == docIDs)\n doc_range[dd,:] = startLoc, startLoc + nWordsInCurDoc\n startLoc += nWordsInCurDoc \n\n nDocTotal=None\n if doTrackFullSize:\n nDocTotal = self.nDocTotal\n return WordsData(word_id, word_count, doc_range, self.vocab_size,\n nDocTotal=nDocTotal)\n\n def getDocIDs(self, wordLocs=None):\n ''' Retrieve document ids for all word tokens, \n or for a particular subset (if specified)\n\n Args\n -------\n wordLocs : None or ndarray of integer locations in range (0, self.nObs)\n \n Returns\n -------\n docIDs : 1-dim ndarray of integer document ids in range (0, nDoc)\n '''\n # Retrieve for entire dataset\n if wordLocs is None:\n if hasattr(self, \"__docid__\"):\n return self.__docid__\n self.__docid__ = np.zeros(self.word_id.size, dtype=np.uint32)\n for dd in range(self.nDoc):\n self.__docid__[self.doc_range[dd,0]:self.doc_range[dd,1]] = dd\n return self.__docid__\n\n # Retrieve for specified subset\n docIDs = np.zeros(len(wordLocs))\n for dd in range(self.nDoc):\n if dd == 0:\n matchMask = wordLocs < self.doc_range[dd,1] \n else:\n matchMask = np.logical_and(wordLocs < self.doc_range[dd,1],\n wordLocs >= self.doc_range[dd-1,1])\n docIDs[matchMask] = dd\n return docIDs \n\n ######################################################### Text summary\n ######################################################### \n def get_text_summary(self, doCommon=True):\n ''' Returns human-readable summary of this object\n '''\n if hasattr(self, 'summary') and doCommon:\n s = self.summary\n elif doCommon:\n s = \" nDoc %d, vocab_size %d\\n\" % (self.nDoc, self.vocab_size)\n else:\n s = ''\n return s + self.get_doc_stats_summary()\n\n def get_doc_stats_summary(self, pRange=[0,5, 50, 95, 100]):\n ''' Returns human-readable string summarizing word-count statistics\n e.g. word counts for the smallest, largest, and median-length doc\n '''\n nDistinctWordsPerDoc = np.zeros(self.nDoc)\n nTotalWordsPerDoc = np.zeros(self.nDoc)\n for d in range(self.nDoc):\n drange = self.doc_range[d,:]\n nDistinctWordsPerDoc[d] = drange[1] - drange[0]\n nTotalWordsPerDoc[d] = self.word_count[drange[0]:drange[1]].sum()\n assert np.sum(nDistinctWordsPerDoc) == self.word_id.size\n assert np.sum(nTotalWordsPerDoc) == np.sum(self.word_count)\n s = ''\n for p in pRange:\n if p == 0:\n sp = 'min'\n elif p == 100:\n sp = 'max'\n else:\n sp = \"%d%%\" % (p)\n s += \"%5s \" % (sp)\n s += '\\n'\n for p in pRange:\n s += \"%5s \" % (\"%.0f\" % (np.percentile(nDistinctWordsPerDoc, p))) \n s += ' nDistinctWordsPerDoc\\n'\n for p in pRange:\n s += \"%5s \" % (\"%.0f\" % (np.percentile(nTotalWordsPerDoc, p))) \n s += ' nTotalWordsPerDoc'\n return s\n\n ######################################################### Create from MAT\n ######################################################### (class method)\n @classmethod\n def read_from_mat(cls, matfilepath, **kwargs):\n ''' Creates an instance of WordsData from Matlab matfile\n '''\n import scipy.io\n InDict = scipy.io.loadmat(matfilepath, **kwargs)\n return cls(**InDict)\n\n ######################################################### Create from DB\n ######################################################### (class method)\n @classmethod\n def read_from_db(cls, dbpath, sqlquery, vocab_size=None, nDocTotal=None):\n ''' Creates an instance of WordsData from an SQL database\n '''\n import sqlite3\n # Connect to sqlite database and retrieve results as doc_data\n conn = sqlite3.connect(dbpath)\n conn.text_factory = str\n result = conn.execute(sqlquery)\n doc_data = result.fetchall()\n conn.close()\n \n # Repackage the doc_data into word_id, word_count attributes\n word_id = list()\n word_count = list()\n nDoc = len(doc_data)\n doc_range = np.zeros((nDoc,2), dtype=np.uint32)\n ii = 0\n for d in xrange( nDoc ):\n # make sure we subtract 1 for word_ids since python indexes by 0\n temp_word_id = [(int(n)-1) for n in doc_data[d][1].split()]\n temp_word_count = [int(n) for n in doc_data[d][2].split()]\n word_id.extend(temp_word_id)\n word_count.extend(temp_word_count)\n nUniqueWords = len(temp_word_id)\n doc_range[d,:] = [ii, ii + nUniqueWords]\n ii += nUniqueWords\n return cls(word_id=word_id, word_count=word_count,\n doc_range=doc_range, vocab_size=vocab_size, nDocTotal=nDocTotal)\n\n ######################################################### Create Toy Data\n ######################################################### (class method)\n @classmethod\n def CreateToyDataSimple(cls, nDoc=10, nWordsPerDoc=10, \n vocab_size=12, **kwargs):\n ''' Creates a simple toy instance of WordsData (good for debugging)\n Args\n --------\n nDoc : int num of documents to create\n nWordsPerDoc : int num of distinct words in each document\n vocab_size : int size of vocabulary\n '''\n PRNG = np.random.RandomState(0)\n word_id = list()\n word_count = list()\n doc_range = np.zeros((nDoc, 2))\n for dd in range(nDoc):\n wID = PRNG.choice(vocab_size, size=nWordsPerDoc, replace=False)\n wCount = PRNG.choice(np.arange(1,5), size=nWordsPerDoc, replace=True)\n word_id.extend(wID)\n word_count.extend(wCount)\n start = nWordsPerDoc * dd\n doc_range[dd,:] = [start, start + nWordsPerDoc]\n return cls(word_id=word_id, word_count=word_count, \n doc_range=doc_range, vocab_size=vocab_size)\n\n @classmethod\n def CreateToyDataFromLDAModel(cls, seed=101, \n nDocTotal=None, nWordsPerDoc=None, \n topic_prior=None, topics=None,\n **kwargs):\n ''' Generates WordsData dataset via LDA generative model,\n given specific global parameters\n\n Args\n --------\n topic_prior : K-length vector of positive reals,\n \\pi_d \\sim \\Dir( topic_prior )\n topics : KxV matrix of positive reals, where rows sum to one\n topics[k,v] := probability of vocab word v in topic k\n '''\n PRNG = np.random.RandomState(seed)\n\n K = topics.shape[0]\n V = topics.shape[1]\n # Make sure topics sum to one\n topics = topics / topics.sum(axis=1)[:,np.newaxis]\n assert K == topic_prior.size\n \n doc_range = np.zeros((nDocTotal, 2))\n wordIDsPerDoc = list()\n wordCountsPerDoc = list()\n\n alphaLP = np.zeros((nDocTotal,K))\n respPerDoc = list()\n\n # startPos : tracks start index for current doc within corpus-wide lists\n startPos = 0\n for d in xrange(nDocTotal):\n # Draw topic appearance probabilities for this document\n alphaLP[d,:] = PRNG.dirichlet(topic_prior)\n\n # Draw the topic assignments for this doc\n ## Npercomp : K-vector, Npercomp[k] counts appearance of topic k\n Npercomp = RandUtil.multinomial(nWordsPerDoc, alphaLP[d,:], PRNG)\n\n # Draw the observed words for this doc\n ## wordCountBins: V x 1 vector, entry v counts appearance of word v\n wordCountBins = np.zeros(V)\n for k in xrange(K):\n wordCountBins += RandUtil.multinomial(Npercomp[k], \n topics[k,:], PRNG)\n\n # Record word_id, word_count, doc_range\n wIDs = np.flatnonzero(wordCountBins > 0)\n wCounts = wordCountBins[wIDs]\n assert np.allclose( wCounts.sum(), nWordsPerDoc)\n wordIDsPerDoc.append(wIDs)\n wordCountsPerDoc.append(wCounts)\n doc_range[d,0] = startPos\n doc_range[d,1] = startPos + wIDs.size \n startPos += wIDs.size\n \n # Record expected local parameters (LP)\n curResp = (topics[:, wIDs] * alphaLP[d,:][:,np.newaxis]).T \n respPerDoc.append(curResp)\n\n word_id = np.hstack(wordIDsPerDoc)\n word_count = np.hstack(wordCountsPerDoc)\n\n respLP = np.vstack(respPerDoc)\n respLP /= respLP.sum(axis=1)[:,np.newaxis]\n\n TrueParams = dict(K=K, topics=topics, beta=topic_prior,\n word_variational=respLP, alphaPi=alphaLP)\n return WordsData(word_id, word_count, doc_range, V,\n nDocTotal=nDocTotal, TrueParams=TrueParams)\n\n\n ######################################################### Write to file\n ######################################################### (instance method)\n def WriteToFile_ldac(self, filepath, min_word_index=0):\n ''' Write contents of this dataset to plain-text file in \"ldac\" format.\n \n Args\n\n Returns\n -------\n None. Writes to file instead.\n\n Each line of file represents one document, and has format\n [U] [term1:count1] [term2:count2] ... [termU:countU]\n '''\n word_id = self.word_id\n if min_word_index > 0:\n word_id = word_id + min_word_index\n with open(filepath, 'w') as f:\n for d in xrange(self.nDoc):\n dstart = self.doc_range[d,0]\n dstop = self.doc_range[d,1]\n nUniqueInDoc = dstop - dstart\n idct_list = [\"%d:%d\" % (word_id[n], self.word_count[n]) \\\n for n in xrange(dstart, dstop)]\n docstr = \"%d %s\" % (nUniqueInDoc, ' '.join(idct_list)) \n f.write(docstr + '\\n')\n","repo_name":"daeilkim/refinery","sub_path":"refinery/bnpy/bnpy-dev/bnpy/data/WordsData.py","file_name":"WordsData.py","file_ext":"py","file_size_in_byte":19975,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"16"} +{"seq_id":"74033362569","text":"from vernac.openai import complete_chat\nfrom vernac.util import (\n normalize_progress,\n replace_ext,\n)\nfrom vernac.stages.interface import (\n VernacStage,\n StageContext,\n StageAction,\n StageOutput,\n)\n\nSYSTEM_PROMPT = \"\"\"\nYou are an expert programmer working on contract. The user, your client, will provide the description and source code of an existing module. Respond with clear documentation of the module interface. Your documentation should be structed as two Markdown sections, as follows:\n\n## Module interface\n\n\n\n## Module notes\n\n\n\"\"\"\n\nUSER_PROMPT_TEMPLATE = \"\"\"\nFilename: `{py_name}`\n\n{description}\n\n# Module source\n \n{source}\n\"\"\"\n\nclass DocumentModuleStage(VernacStage):\n steps = 100\n\n def __init__(self, title: str):\n self.title = title\n\n def run(\n self,\n context: StageContext,\n english: str,\n python: str,\n vn_name: str,\n ) -> StageOutput:\n py_name = replace_ext(vn_name, \"py\").replace(\"-\", \"_\")\n\n user_prompt = USER_PROMPT_TEMPLATE.format(\n py_name=py_name,\n description=english,\n source=python,\n )\n\n chat_messages = [\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": user_prompt},\n ]\n\n context.log_json(\"prompt.json\", chat_messages)\n\n # run the prompt and make some code\n def on_token(i: int):\n context.update_progress(completed=normalize_progress(i))\n\n chat_completion = complete_chat(\n chat_messages,\n model=\"gpt-4\",\n on_token=on_token,\n )\n\n context.log_text(\"completion.txt\", chat_completion)\n\n return StageAction.NEXT.out(\n py_name=py_name,\n documentation=chat_completion,\n )\n","repo_name":"bsilverthorn/vernac","sub_path":"src/vernac/stages/document_module.py","file_name":"document_module.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"40718992683","text":"import numpy as np\nimport pandas as pd\nfrom index import app,db,es,redis_store,limiter,home_cache\n\nimport psycopg2\nimport psycopg2.extras\nimport json\nfrom collections import OrderedDict\nfrom sqlalchemy import create_engine\n\n\n\ndef get_city_sources(city):\n\n RENT, SALE, SOLD = 1,2,4\n ROOMS_LENGTH = 20000\n\n query_rent = {\n \"query\": {\n \"bool\": {\n \"must\": [{\"match_phrase\":{\"city\":city}}, {\"match_phrase\":{\"status\":RENT}}],\n \"must_not\": [{\"match_phrase\":{\"room_type\":{\"query\": \"\"}}}]\n }\n }\n }\n\n rent = es.search(\n body=query_rent,\n size=ROOMS_LENGTH,\n )['hits']['hits']\n\n query_sale = {\n \"query\": {\n \"bool\": {\n \"must\": [{\"match_phrase\":{\"city\":city}}, {\"match_phrase\":{\"status\":SALE}}],\n \"must_not\": [{\"match_phrase\":{\"room_type\":{\"query\": \"\"}}}]\n }\n }\n }\n\n sale = es.search(\n body=query_sale,\n size=ROOMS_LENGTH,\n )['hits']['hits']\n\n query_sold = {\n \"query\": {\n \"bool\": {\n \"must\": [{\"match_phrase\":{\"city\":city}}, {\"match_phrase\":{\"status\":SOLD}}],\n \"must_not\": [{\"match_phrase\":{\"room_type\":{\"query\": \"\"}}}]\n }\n }\n }\n\n sold = es.search(\n body=query_sold,\n size=ROOMS_LENGTH,\n )['hits']['hits']\n\n listing_price = np.median(np.array(list(item['_source']['house_price_dollar'] for item in sale if isinstance(item['_source']['house_price_dollar'], float))))\n sold_price = np.median(np.array(list(item['_source']['house_price_dollar'] for item in sold if isinstance(item['_source']['house_price_dollar'], float))))\n rent = np.median(np.array(list(item['_source']['rent'] for item in rent if isinstance(item['_source']['rent'], float))))\n\n return {\"listing_price\" : listing_price, \"sold_price\" : sold_price, \"rent\" : rent}\n\n\n\ndef reject_outliers(data, m = 2):\n return data[abs(data - np.mean(data)) < m * np.std(data)]\n\ndef scoring_neighborhood(city):\n\n HIGH, MED, LOW = 3, 2, -3\n SCORE_OFFSET = 2\n ADJUST = 30\n\n res = db.session.execute(\"select * from neighbor where city = '%s'\" % city)\n data = res.fetchall()\n\n df = pd.DataFrame(columns = ['id', 'neighbor_id', 'centroid', 'name', 'url', 'crime', 'demographic',\n 'real_estate', 'overview', 'school', 'property', 'city'])\n\n columns_zc = ['id', 'neighbor_id', 'centroid', 'name', 'url', 'crime', 'demographic',\n 'real_estate', 'overview', 'school', 'property', 'city']\n\n for i in range(0, 12):\n df[columns_zc[i]] = list(data[j][i] for j in range(0, len(data)))\n\n centers = list('(' + item + ')' for item in df['centroid'])\n df['centroid'] = centers\n df['general_score'] = df['crime'] * LOW + df['demographic'] * MED + df['real_estate'] + df['school'] * HIGH\n # adjust score\n if min(df['general_score']) < 0:\n df['general_score'] = (df['general_score'] - min(df['general_score'])) * 100 / (max(df['general_score'] - min(df['general_score'])) + SCORE_OFFSET)\n\n df['school_score'] = df['school'] * 9.8\n df['crime_score'] = 100 - (df['crime'] * 9.8)\n\n overview = df.sort_values('general_score', ascending=False)[:3]\n school = df.sort_values('school_score', ascending=False)[:3]\n crime = df.sort_values('crime_score', ascending=False)[:3]\n\n mg_score, ms_score, mc_score = np.mean(reject_outliers(df['general_score'])) + ADJUST, np.mean(reject_outliers(df['school_score'])) + ADJUST, np.mean(reject_outliers(df['crime_score'])) + ADJUST\n\n stats = {\n\n \"mg_score\" : '%.2f' % mg_score,\n \"ms_score\" : '%.2f' % ms_score,\n \"mc_score\" : '%.2f' % mc_score,\n \"g_first\" : overview.loc[overview.index.values[0]]['name'],\n \"g_second\" : overview.loc[overview.index.values[1]]['name'],\n \"g_third\" : overview.loc[overview.index.values[2]]['name'],\n \"s_first\" : school.loc[school.index.values[0]]['name'],\n \"s_second\" : school.loc[school.index.values[1]]['name'],\n \"s_third\" : school.loc[school.index.values[2]]['name'],\n \"c_first\" : crime.loc[crime.index.values[0]]['name'],\n \"c_second\" : crime.loc[crime.index.values[1]]['name'],\n \"c_third\" :crime.loc[crime.index.values[2]]['name']\n\n\n }\n\n return stats\n\n\ndef map_geoid(engine, city, state, schema='acs2016_1yr'):\n\n us_state_abbrev = {\n\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\n\n states = {v: k for k, v in us_state_abbrev.items()}\n search_str = city + ' city, ' + states[state]\n print(search_str)\n cur = create_engine(engine)\n rs = cur.execute(\"SELECT geoid FROM {schema}.geoheader WHERE name='{str}';\".format(schema=schema, str=search_str))\n\n data = rs.fetchone()\n if not data:\n return None\n else:\n return rs.fetchone().values()[0]\n\n","repo_name":"ace-gabriel/chrome-extension","sub_path":"application/finance/search_city.py","file_name":"search_city.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"17692625055","text":"from .. import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n cities = (\n \"Warszawa\",\n \"Kraków\",\n \"Łódź\",\n \"Wrocław\",\n \"Poznań\",\n \"Gdańsk\",\n \"Szczecin\",\n \"Bydgoszcz\",\n \"Lublin\",\n \"Katowice\",\n \"Białystok\",\n \"Gdynia\",\n \"Częstochowa\",\n \"Radom\",\n \"Sosnowiec\",\n \"Toruń\",\n \"Kielce\",\n \"Gliwice\",\n \"Rzeszów\",\n \"Zabrze\",\n \"Bytom\",\n \"Olsztyn\",\n \"Bielsko-Biała\",\n \"Ruda Śląska\",\n \"Rybnik\",\n \"Tychy\",\n \"Dąbrowa Górnicza\",\n \"Gorzów Wielkopolski\",\n \"Elbląg\",\n \"Płock\",\n \"Opole\",\n \"Wałbrzych\",\n \"Zielona Góra\",\n \"Włocławek\",\n \"Tarnów\",\n \"Chorzów\",\n \"Koszalin\",\n \"Kalisz\",\n \"Legnica\",\n \"Grudziądz\",\n \"Słupsk\",\n \"Jaworzno\",\n \"Jastrzębie-Zdrój\",\n \"Nowy Sącz\",\n \"Jelenia Góra\",\n \"Konin\",\n \"Piotrków Trybunalski\",\n \"Siedlce\",\n \"Inowrocław\",\n \"Mysłowice\",\n \"Piła\",\n \"Lubin\",\n \"Ostrów Wielkopolski\",\n \"Ostrowiec Świętokrzyski\",\n \"Gniezno\",\n \"Stargard Szczeciński\",\n \"Siemianowice Śląskie\",\n \"Suwałki\",\n \"Głogów\",\n \"Pabianice\",\n \"Chełm\",\n \"Zamość\",\n \"Tomaszów Mazowiecki\",\n \"Leszno\",\n \"Przemyśl\",\n \"Stalowa Wola\",\n \"Kędzierzyn-Koźle\",\n \"Łomża\",\n \"Żory\",\n \"Mielec\",\n \"Tarnowskie Góry\",\n \"Tczew\",\n \"Bełchatów\",\n \"Świdnica\",\n \"Ełk\",\n \"Pruszków\",\n \"Będzin\",\n \"Biała Podlaska\",\n \"Zgierz\",\n \"Piekary Śląskie\",\n \"Racibórz\",\n \"Legionowo\",\n \"Ostrołęka\",\n \"Świętochłowice\",\n \"Starachowice\",\n \"Zawiercie\",\n \"Wejherowo\",\n \"Puławy\",\n \"Wodzisław Śląski\",\n \"Starogard Gdański\",\n \"Skierniewice\",\n \"Tarnobrzeg\",\n \"Skarżysko-Kamienna\",\n \"Radomsko\",\n \"Krosno\",\n \"Rumia\",\n \"Dębica\",\n \"Kołobrzeg\",\n \"Kutno\",\n \"Nysa\",\n \"Ciechanów\",\n \"Otwock\",\n \"Piaseczno\",\n \"Zduńska Wola\",\n \"Sieradz\",\n \"Świnoujście\",\n \"Żyrardów\",\n \"Szczecinek\",\n \"Świdnik\",\n \"Chojnice\",\n \"Nowa Sól\",\n \"Oświęcim\",\n \"Bolesławiec\",\n \"Mińsk Mazowiecki\",\n \"Mikołów\",\n \"Jarosław\",\n \"Sanok\",\n \"Knurów\",\n \"Malbork\",\n \"Żary\",\n \"Kwidzyn\",\n \"Chrzanów\",\n \"Sopot\",\n \"Sochaczew\",\n \"Wołomin\",\n \"Oleśnica\",\n \"Brzeg\",\n \"Olkusz\",\n \"Jasło\",\n \"Cieszyn\",\n \"Kraśnik\",\n \"Lębork\",\n \"Czechowice-Dziedzice\",\n \"Dzierżoniów\",\n \"Ostróda\",\n \"Police\",\n \"Nowy Targ\",\n \"Iława\",\n \"Czeladź\",\n \"Myszków\",\n \"Żywiec\",\n \"Zgorzelec\",\n \"Oława\",\n \"Bielawa\",\n \"Swarzędz\",\n \"Mława\",\n \"Ząbki\",\n \"Łuków\",\n \"Augustów\",\n \"Śrem\",\n \"Bochnia\",\n \"Luboń\",\n \"Giżycko\",\n \"Grodzisk Mazowiecki\",\n \"Łowicz\",\n \"Krotoszyn\",\n \"Września\",\n \"Turek\",\n \"Pruszcz Gdański\",\n \"Brodnica\",\n \"Gorlice\",\n \"Czerwionka-Leszczyny\",\n \"Kłodzko\",\n \"Marki\",\n \"Nowy Dwór Mazowiecki\",\n \"Kętrzyn\",\n \"Zakopane\",\n \"Wyszków\",\n \"Biłgoraj\",\n \"Żagań\",\n \"Bielsk Podlaski\",\n \"Świecie\",\n \"Wałcz\",\n \"Jarocin\",\n \"Pszczyna\",\n \"Wągrowiec\",\n \"Szczytno\",\n \"Białogard\",\n \"Sandomierz\",\n \"Bartoszyce\",\n \"Kluczbork\",\n \"Lubliniec\",\n \"Skawina\",\n \"Jawor\",\n \"Kościan\",\n \"Wieluń\",\n \"Kościerzyna\",\n \"Nowa Ruda\",\n \"Świebodzice\",\n \"Koło\",\n \"Piastów\",\n \"Goleniów\",\n \"Ostrów Mazowiecka\",\n \"Polkowice\",\n \"Lubartów\",\n \"Zambrów\",\n \"Płońsk\",\n \"Reda\",\n \"Łaziska Górne\",\n \"Środa Wielkopolska\",\n )\n\n street_prefixes = (\n \"ulica\",\n \"aleja\",\n \"plac\",\n )\n\n streets = (\n \"Polna\",\n \"Leśna\",\n \"Słoneczna\",\n \"Krótka\",\n \"Szkolna\",\n \"Ogrodowa\",\n \"Lipowa\",\n \"Brzozowa\",\n \"Łąkowa\",\n \"Kwiatowa\",\n \"Sosnowa\",\n \"Kościelna\",\n \"Akacjowa\",\n \"Parkowa\",\n \"Zielona\",\n \"Kolejowa\",\n \"Sportowa\",\n \"Dębowa\",\n \"Kościuszki\",\n \"Maja\",\n \"Mickiewicza\",\n \"Cicha\",\n \"Spokojna\",\n \"Klonowa\",\n \"Spacerowa\",\n \"Swierkowa\",\n \"Kasztanowa\",\n \"Nowa\",\n \"Piaskowa\",\n \"Sienkiewicza\",\n \"Rózana\",\n \"Topolowa\",\n \"Wiśniowa\",\n \"Dworcowa\",\n \"Wiejska\",\n \"Graniczna\",\n \"Słowackiego\",\n \"Długa\",\n \"Wrzosowa\",\n \"Konopnickiej\",\n \"Boczna\",\n \"Wąska\",\n \"Wierzbowa\",\n \"Jaśminowa\",\n \"Wspólna\",\n \"Modrzewiowa\",\n \"Kopernika\",\n \"Jana Pawła II\",\n \"Poprzeczna\",\n \"Wesoła\",\n \"Pogodna\",\n \"Żeromskiego\",\n \"Rynek\",\n \"Bukowa\",\n \"Wojska Polskiego\",\n \"Sadowa\",\n \"Górna\",\n \"Jodłowa\",\n \"Wolności\",\n \"Glówna\",\n \"Młyńska\",\n \"Strażacka\",\n \"Prusa\",\n \"Jesionowa\",\n \"Przemysłowa\",\n \"Osiedlowa\",\n \"Wiosenna\",\n \"Sikorskiego\",\n \"Chopina\",\n \"Południowa\",\n \"Malinowa\",\n \"Stawowa\",\n \"Reymonta\",\n \"Piłsudskiego\",\n \"Zacisze\",\n \"Cmentarna\",\n \"Okrężna\",\n \"Kochanowskiego\",\n \"Armii Krajowej\",\n \"Miła\",\n \"Jasna\",\n \"Wodna\",\n \"Zamkowa\",\n \"Witosa\",\n \"Reja\",\n \"Warszawska\",\n \"Miodowa\",\n \"Partyzantów\",\n \"Krzywa\",\n \"Kilińskiego\",\n \"Dolna\",\n \"Podgórna\",\n \"Kreta\",\n \"Jarzębinowa\",\n \"Moniuszki\",\n \"Targowa\",\n \"Prosta\",\n \"Orzeszkowej\",\n \"Spółdzielcza\",\n \"Jagodowa\",\n \"Działkowa\",\n \"Staszica\",\n \"Orzechowa\",\n \"Rzemieślnicza\",\n \"Rzeczna\",\n \"Bolesława Chrobrego\",\n \"Fabryczna\",\n \"Tęczowa\",\n \"Chabrowa\",\n \"Poziomkowa\",\n \"Konwaliowa\",\n \"Wyszyńskiego\",\n \"Kalinowa\",\n \"Północna\",\n \"Matejki\",\n \"Grunwaldzka\",\n \"Cisowa\",\n \"Nadrzeczna\",\n \"Pocztowa\",\n \"Zachodnia\",\n \"Dąbrowskiego\",\n \"Grabowa\",\n \"Norwida\",\n \"Źródlana\",\n \"Asnyka\",\n \"Gajowa\",\n \"Paderewskiego\",\n \"Listopada\",\n \"Wyspiańskiego\",\n \"Mostowa\",\n \"Broniewskiego\",\n \"Tuwima\",\n \"Wschodnia\",\n \"Jaworowa\",\n \"Poznańska\",\n \"Makowa\",\n \"Bema\",\n \"Jeziorna\",\n \"Piękna\",\n \"Czereśniowa\",\n \"Mała\",\n \"Krakowska\",\n \"Radosna\",\n \"Leszczynowa\",\n \"Traugutta\",\n \"Jadwigi\",\n \"Rolna\",\n \"Wyzwolenia\",\n \"Piastowska\",\n \"Grzybowa\",\n \"Krasickiego\",\n \"Podleśna\",\n \"Żytnia\",\n \"Złota\",\n \"Bursztynowa\",\n \"Żwirowa\",\n \"Stycznia\",\n \"Widokowa\",\n \"Kazimierza Wielkiego\",\n \"Kamienna\",\n \"Jałowcowa\",\n \"Morelowa\",\n \"Mieszka I\",\n \"Myśliwska\",\n \"Łączna\",\n \"Szpitalna\",\n \"Wczasowa\",\n \"Żurawia\",\n \"Fiołkowa\",\n \"Głowackiego\",\n \"Rolnicza\",\n \"Tulipanowa\",\n \"Władysława Jagiełły\",\n \"Dworska\",\n \"Letnia\",\n \"Liliowa\",\n \"Owocowa\",\n \"Pułaskiego\",\n \"Stefana Batorego\",\n \"Harcerska\",\n \"Kołłątaja\",\n \"Strzelecka\",\n \"Kraszewskiego\",\n \"Władysława Łokietka\",\n \"Żwirki i Wigury\",\n \"Wrocławska\",\n \"Gdańska\",\n \"Turystyczna\",\n \"Niepodległości\",\n \"Poniatowskiego\",\n \"Korczaka\",\n \"Rybacka\",\n \"Narutowicza\",\n \"Okrzei\",\n \"Krucza\",\n \"Jagiellońska\",\n \"Świerczewskiego\",\n \"Kasprowicza\",\n \"Szeroka\",\n \"Jana III Sobieskiego\",\n \"Młynarska\",\n \"Olchowa\",\n \"Powstańców Śląskich\",\n \"Rumiankowa\",\n \"Stroma\",\n \"Starowiejska\",\n \"Mazowiecka\",\n \"Lawendowa\",\n \"Robotnicza\",\n \"Zbożowa\",\n \"Mokra\",\n \"Powstańców Wielkopolskich\",\n \"Towarowa\",\n \"Dobra\",\n \"Środkowa\",\n \"Willowa\",\n \"Zielna\",\n \"Zdrojowa\",\n \"Opolska\",\n \"Agrestowa\",\n \"Księżycowa\",\n \"Zwycięstwa\",\n \"Fredry\",\n \"Letniskowa\",\n \"Andersa\",\n \"Baczynskiego\",\n \"Batalionów Chłopskich\",\n \"Dąbrowskiej\",\n \"Orla\",\n \"Skłodowskiej-Curie\",\n \"Błękitna\",\n \"Rubinowa\",\n \"Brzoskwiniowa\",\n \"Urocza\",\n \"Gałczynskiego\",\n \"Krasińskiego\",\n \"Pomorska\",\n \"Szymanowskiego\",\n \"Jeżynowa\",\n \"Czarnieckiego\",\n \"Nałkowskiej\",\n \"Zaciszna\",\n \"Porzeczkowa\",\n \"Krańcowa\",\n \"Jesienna\",\n \"Klasztorna\",\n \"Irysowa\",\n \"Niecała\",\n \"Wybickiego\",\n \"Nadbrzeżna\",\n \"Szarych Szeregów\",\n \"Wałowa\",\n \"Słowicza\",\n \"Strumykowa\",\n \"Drzymały\",\n \"Gołębia\",\n \"Torowa\",\n \"Cegielniana\",\n \"Cyprysowa\",\n \"Słowianska\",\n \"Diamentowa\",\n \"Waryńskiego\",\n \"Częstochowska\",\n \"Dojazdowa\",\n \"Przechodnia\",\n \"Hallera\",\n \"Lubelska\",\n \"Plater\",\n \"Popiełuszki\",\n \"Borówkowa\",\n \"Chełmońskiego\",\n \"Daszyńskiego\",\n \"Plażowa\",\n \"Tartaczna\",\n \"Jabłoniowa\",\n \"Kossaka\",\n \"Skargi\",\n \"Ludowa\",\n \"Sokola\",\n \"Azaliowa\",\n \"Szmaragdowa\",\n \"Lipca\",\n \"Staffa\",\n \"Tysiąclecia\",\n \"Brzechwy\",\n \"Jastrzębia\",\n \"Kusocińskiego\",\n \"Storczykowa\",\n \"Wilcza\",\n \"Górnicza\",\n \"Szafirowa\",\n \"Długosza\",\n \"Handlowa\",\n \"Krokusowa\",\n \"Składowa\",\n \"Widok\",\n \"Perłowa\",\n \"Skośna\",\n \"Wypoczynkowa\",\n \"Chmielna\",\n \"Jaskółcza\",\n \"Nowowiejska\",\n \"Piwna\",\n \"Śląska\",\n \"Zaułek\",\n \"Głogowa\",\n \"Górska\",\n \"Truskawkowa\",\n \"Kaszubska\",\n \"Kosynierów\",\n \"Mazurska\",\n \"Srebrna\",\n \"Bociania\",\n \"Ptasia\",\n \"Cedrowa\",\n \"Rycerska\",\n \"Wieniawskiego\",\n \"Żabia\",\n \"Toruńska\",\n \"Podmiejska\",\n \"Słonecznikowa\",\n \"Sowia\",\n \"Stolarska\",\n \"Powstańców\",\n \"Sucharskiego\",\n \"Bolesława Krzywoustego\",\n \"Konarskiego\",\n \"Szczęśliwa\",\n \"Lazurowa\",\n \"Miarki\",\n \"Narcyzowa\",\n \"Browarna\",\n \"Konstytucji 3 Maja\",\n \"Majowa\",\n \"Miłosza\",\n \"Malczewskiego\",\n \"Orkana\",\n \"Skrajna\",\n \"Bankowa\",\n \"Bydgoska\",\n \"Piekarska\",\n \"Żeglarska\",\n \"Jana\",\n \"Turkusowa\",\n \"Tylna\",\n \"Wysoka\",\n \"Zakątek\",\n \"Maczka\",\n \"Morska\",\n \"Rataja\",\n \"Szewska\",\n \"Podwale\",\n \"Pałacowa\",\n \"Magnoliowa\",\n \"Ceglana\",\n \"Sawickiej\",\n \"Ściegiennego\",\n \"Wiklinowa\",\n \"Zakole\",\n \"Borowa\",\n \"Kolorowa\",\n \"Lisia\",\n \"Lotnicza\",\n \"Sarnia\",\n \"Wiązowa\",\n \"Grottgera\",\n \"Kolonia\",\n \"Królewska\",\n \"Promienna\",\n \"Daleka\",\n \"Jana Sobieskiego\",\n \"Rejtana\",\n \"Wiatraczna\",\n \"Kaliska\",\n \"Łanowa\",\n \"Średnia\",\n \"Wiślana\",\n \"Wróblewskiego\",\n \"Koralowa\",\n \"Kruczkowskiego\",\n \"Lelewela\",\n \"Makuszyńskiego\",\n \"Sybiraków\",\n \"Kowalska\",\n \"Morcinka\",\n \"Odrzańska\",\n \"Okulickiego\",\n \"Solidarnosci\",\n \"Zapolskiej\",\n \"Łabędzia\",\n \"Wojciecha\",\n \"Bałtycka\",\n \"Lwowska\",\n \"Rajska\",\n \"Korfantego\",\n \"Pszenna\",\n \"Ciasna\",\n \"Floriana\",\n \"Hutnicza\",\n \"Kielecka\",\n )\n\n regions = (\n \"Dolnośląskie\",\n \"Kujawsko - pomorskie\",\n \"Lubelskie\",\n \"Lubuskie\",\n \"Łódzkie\",\n \"Małopolskie\",\n \"Mazowieckie\",\n \"Opolskie\",\n \"Podkarpackie\",\n \"Podlaskie\",\n \"Pomorskie\",\n \"Śląskie\",\n \"Świętokrzyskie\",\n \"Warmińsko - mazurskie\",\n \"Wielkopolskie\",\n \"Zachodniopomorskie\",\n )\n\n building_number_formats = (\"##\", \"###\", \"##/##\")\n postcode_formats = (\"##-###\",)\n street_address_formats = (\n \"{{street_prefix}} {{street_name}} {{building_number}}\",\n \"{{street_prefix_short}} {{street_name}} {{building_number}}\",\n )\n address_formats = (\"{{street_address}}\\n{{postcode}} {{city}}\",)\n\n def street_prefix(self) -> str:\n \"\"\"\n Randomly returns a street prefix\n :example: 'aleja'\n \"\"\"\n return self.random_element(self.street_prefixes)\n\n def street_prefix_short(self) -> str:\n \"\"\"\n Randomly returns an abbreviation of the street prefix.\n :example: 'al.'\n \"\"\"\n return self.random_element(self.street_prefixes)[:2] + \".\" # type: ignore\n\n def street_name(self) -> str:\n \"\"\"\n Randomly returns a street name\n :example: 'Wróblewskiego'\n \"\"\"\n return self.random_element(self.streets)\n\n def city(self) -> str:\n \"\"\"\n Randomly returns a street name\n :example: 'Konin'\n \"\"\"\n return self.random_element(self.cities)\n\n def administrative_unit(self) -> str:\n \"\"\"\n :example: 'Wielkopolskie'\n \"\"\"\n return self.random_element(self.regions)\n\n def postcode(self) -> str:\n \"\"\"\n :example: '62-200'\n \"\"\"\n return \"%02d-%03d\" % (self.generator.random.randint(1, 99), self.generator.random.randint(1, 999))\n\n def zipcode(self) -> str:\n \"\"\"\n :example: '62-200'\n \"\"\"\n return self.postcode()\n\n def postalcode(self) -> str:\n \"\"\"\n :example: '62-200'\n \"\"\"\n return self.postcode()\n\n region = administrative_unit\n","repo_name":"joke2k/faker","sub_path":"faker/providers/address/pl_PL/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15066,"program_lang":"python","lang":"hr","doc_type":"code","stars":16539,"dataset":"github-code","pt":"16"} +{"seq_id":"43918443902","text":"from collections import deque\n\nGRAY, BLACK = 0, 1\n\ndef topological(graph):\n order, enter, state = deque(), set(graph), {}\n\n def dfs(node):\n state[node] = GRAY\n for k in graph.get(node, ()):\n sk = state.get(k, None)\n if sk == GRAY: raise ValueError(\"cycle\")\n if sk == BLACK: continue\n enter.discard(k)\n dfs(k)\n order.appendleft(node)\n state[node] = BLACK\n\n while enter: dfs(enter.pop())\n return order\n\nquery_count = 1\nfor _ in range(query_count):\n program_count, _ = [int(p) for p in input().split()]\n program_graph = {}\n for i in range(1, program_count+1):\n dependencies = [int(p) for p in input().split()]\n program_graph[i] = []\n if dependencies[0] == 0:\n pass\n else:\n program_graph[i] = list(reversed(sorted(dependencies[1:])))\n wanted_programs = [int(p) for p in input().split()]\ngraph = program_graph\nprint(program_graph)\nprint(topological(graph))","repo_name":"stanislavkozlovski/python_exercises","sub_path":"hackerrank/CodeAgon/fifth_problem.py","file_name":"fifth_problem.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"70738140808","text":"import os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google.auth import impersonated_credentials, default\nimport pickle\nfrom datetime import datetime;\nimport pytz\nimport os\nimport os.path\nfrom pathlib import Path\nfrom __future__ import print_function\n\n#for scraper\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport pprint\n\n#USER SPECIFICATIONS - INFO ABT SPREADSHEET\n#assumption: a column is dedicated for the exchange\nSPREADSHEET_ID = 'Google Sheet ID'\nSHEET_NAME = 'Name of the Google Sheet'\nSTARTING_ROW_NUM = '3'\nENDING_ROW_NUM = '136'\n\nTICKER_COLUMN = 'A'\nEXCHANGE_COLUMN = 'B'\nPREV_CLOSE_COLUMN = 'E'\nDAY_MIN_COLUMN = 'G'\nDAY_MAX_COLUMN = 'F'\nTIME_STAMP_COLUMN = 'C'\nSTATUS_MARKER_COLUMN = 'D'\n\nWRITE_START_COLUMN = 'C'\nWRITE_END_COLUMN = 'G'\n\n\n#CUSTOM CONSTANTS\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly https://www.googleapis.com/auth/spreadsheets'\n\nTICKER_EXCHANGE_RANGE = SHEET_NAME+'!'+TICKER_COLUMN+STARTING_ROW_NUM+':'+TIME_STAMP_COLUMN+ENDING_ROW_NUM\nWRITE_RANGE_NAME = SHEET_NAME+'!'+WRITE_START_COLUMN+STARTING_ROW_NUM+':'+WRITE_END_COLUMN+ENDING_ROW_NUM\n\n\n\nif __name__ == '__main__':\n main()\n\n\ndef main():\n \n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n \n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n if Path('token.json').exists():\n os.remove('token.json')\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'client_secret_1018816297994-fil2doubov5rppaeqtpmjmcv2788ak6s.apps.googleusercontent.com.json', SCOPES)\n creds = flow.run_local_server(port=0)\n \n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n \n \n \n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n \n \n #READ TICKERS, EXCHANGES, TIMESTAMPS\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range=TICKER_EXCHANGE_RANGE).execute()\n tickerExchangeList = result.get('values', [])\n\n if not tickerExchangeList:\n print('No data found.')\n else:\n writeValues = []\n \n classNames = [\"hrc1Nd\", \"M2CUtd\"]\n labels = [\"Previous close\", \"Day range\"]\n \n for tickerExchange in tickerExchangeList:\n \n values = retrieveData(tickerExchange[0], tickerExchange[1], classNames, labels, 3, \"₹\")\n \n #UPDATE STATUS & TIMESTAMP\n if '' in values:\n values.insert(0,\"ERR\")\n try:\n values.insert(0, tickerExchange[2])\n except:\n values.insert(0, \"\")\n else:\n values.insert(0,\"OK\")\n IST_timezone = pytz.timezone(\"Asia/Kolkata\")\n IST_timestamp = datetime.now(IST_timezone)\n timestamp = str(IST_timestamp)\n values.insert(0, timestamp)\n \n writeValues.append(values)\n \n \n #WRITE TIMESTAMP, STATUS, PREV CLOSE, MAX, MIN TO SHEETS\n body = {\n 'values': writeValues\n }\n result = service.spreadsheets().values().update(\n spreadsheetId=SPREADSHEET_ID, range=WRITE_RANGE_NAME,\n valueInputOption='USER_ENTERED', body=body).execute()\n\n \n\n#SCRAPING & PARSING\n\ndef getURL(ticker, exchange):\n template = \"https://g.co/finance/\"\n url = template+ticker+\":\"+exchange\n return url\n\ndef scrapePage(url):\n req = requests.get(url)\n soup = BeautifulSoup(req.text, \"html.parser\")\n return soup;\n\n#labels = [label1, label2]\ndef getValue(labelList, valueList, labels):\n values = []\n \n for label in labels:\n labelIndex = labelList.index(label)\n value = valueList[labelIndex]\n values.append(value)\n \n return values;\n\n#classNames = [className1, className2]\n#labels = [label1, label2]\ndef retrieveData(ticker, exchange, classNames, labels, numValsToReturn, currencySymbol):\n \n try:\n url = getURL(ticker, exchange)\n html = scrapePage(url)\n\n classLists = []\n\n for className in classNames:\n classItems = html.findAll(class_=className)\n classItemsText = []\n for item in classItems:\n classItemsText.append(item.text)\n classLists.append(classItemsText)\n \n values = getValue(classLists[0], classLists[1], labels)\n \n for label in labels:\n if \"range\" in label:\n valueToSplit = values[labels.index(label)]\n splitValues = valueToSplit.split(\" - \")\n values.remove(valueToSplit)\n values.append(splitValues[1])\n values.append(splitValues[0])\n \n values = [s.strip(currencySymbol) for s in values]\n values = [float(v.replace(',','')) for v in values]\n \n except:\n values = []\n i = 0\n while i 0:\n NPix += 1\n return_string += \"hit pixels: {}\".format( NPix )\n return return_string\n\n\nclass CentralTriggerData(Container):\n def __init__(self, name='CentralTriggerData'):\n super().__init__(name)\n self.add_item('gps_time')\n self.add_item('tels_with_trigger')\n\n\nclass RawCameraData(Container):\n \"\"\"\n Storage of raw data from a single telescope\n\n Parameters\n ----------\n\n adc_sums : dict by channel\n (masked) arrays of all integrated ADC data (n_pixels)\n adc_samples : dict by channel\n (masked) arrays of non-integrated ADC sample data (n_pixels, n_samples)\n num_channels : int\n number of gain channels in camera\n\n \"\"\"\n def __init__(self, tel_id):\n super().__init__(\"CT{:03d}\".format(tel_id))\n self.add_item('adc_sums', dict())\n self.add_item('adc_samples', dict())\n self.add_item('num_channels')\n\n\nclass CalibratedCameraData(Container):\n \"\"\"\n Storage of calibrated (p.e.) data from a single telescope\n\n Parameters\n ----------\n\n pe_charge : dict (only one channel)\n arrays of all calibrated data (n_pixels)\n tom : time of maximum\n\n \"\"\"\n def __init__(self, tel_id):\n super(CalibratedCameraData, self).__init__(\"CT{:03d}\".format(tel_id))\n self.add_item('run_id')\n self.add_item('event_id')\n self.add_item('tels_with_data')\n self.add_item('pe_charge', dict())\n self.add_item('tom', dict())\n","repo_name":"hlaffon/ctapipe","sub_path":"ctapipe/io/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"71306542407","text":"from flask import Flask, render_template, request\nfrom azure.storage.blob import BlobClient\nimport urllib.request\nimport json\nimport urllib.request\nimport json\nimport os\nimport ssl\n\nstorkey = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nimagestorage = \"https://{storageaccountname}.blob.core.windows.net/\"\ninferenceurl = \"https://{AzureMLInferenceEndpoint}.northcentralus.inference.ml.azure.com/score\"\namlwebservicekey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxx'\n\napp = Flask(__name__)\n\n@app.route('/upload')\ndef upload_file():\n return render_template('upload.html')\n\n@app.route('/uploader', methods = ['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n f = request.files['file']\n storage_account_key = storkey\n storage_url = imagestorage\n blob_client = BlobClient(storage_url, container_name=\"test\", blob_name=f.filename, credential=storage_account_key)\n with open(f.filename, \"rb\") as data:\n blob_client.upload_blob(data)\n url = inferenceurl\n api_key = amlwebservicekey # Replace this with the API key for the web service\n headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}\n data = {\"image\":f.filename}\n body = str.encode(json.dumps(data))\n req = urllib.request.Request(url, body, headers)\n response = urllib.request.urlopen(req)\n result = response.read()\n return result\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"bcprescott/MSDS","sub_path":"Capstone_COVID19/website/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40655656821","text":"CONNECTED = 1\n\nclass Solution:\n\n def get_neighbors(self, grid, city):\n\n other_cities = len(grid[0])\n\n neighbors = []\n \n for other_city in range(other_cities):\n\n if city == other_city:\n continue\n\n if grid[city][other_city] != CONNECTED:\n continue\n\n neighbors.append(other_city) \n \n return neighbors \n\n def mark_connected_cities(self, grid, visited, city):\n \n if city in visited:\n return \n \n visited.add(city)\n\n neighbors = self.get_neighbors(grid, city)\n for neighbor in neighbors: \n if neighbor in visited:\n continue\n \n self.mark_connected_cities(grid, visited, neighbor)\n\n\n\n def findCircleNum(self, grid: List[List[int]]) -> int:\n\n num_rows, num_cols = len(grid), len(grid[0])\n\n num_provinces = 0\n\n visited = set()\n\n for city in range(num_rows):\n for other_city in range(num_cols):\n if grid[city][other_city] == CONNECTED and city not in visited:\n num_provinces += 1\n self.mark_connected_cities(grid, visited, city)\n \n return num_provinces","repo_name":"mh4535/practice","sub_path":"dfs/Number Of Provinces.py","file_name":"Number Of Provinces.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44790248599","text":"\"\"\"\n请实现两个函数,分别用来序列化和反序列化二叉树。\n\"\"\"\nimport collections\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# 2021.04.14 K神的解法\nclass Codec:\n def serialize(self, root):\n if not root: return \"[]\"\n queue = collections.deque()\n queue.append(root)\n res = []\n while queue:\n node = queue.popleft()\n if node:\n res.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n else: res.append(\"null\")\n return '[' + ','.join(res) + ']'\n\n def deserialize(self, data):\n if data == \"[]\": return\n vals, i = data[1:-1].split(','), 1\n root = TreeNode(int(vals[0]))\n queue = collections.deque()\n queue.append(root)\n while queue:\n node = queue.popleft()\n if vals[i] != \"null\":\n node.left = TreeNode(int(vals[i]))\n queue.append(node.left)\n i += 1\n if vals[i] != \"null\":\n node.right = TreeNode(int(vals[i]))\n queue.append(node.right)\n i += 1\n return root","repo_name":"ZhiyuSun/leetcode-practice","sub_path":"剑指Offer/37_序列化二叉树.py","file_name":"37_序列化二叉树.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"6551145494","text":"from typing import Any, Dict, List, Optional\n\nimport numpy as np\nfrom pydantic.typing import Literal\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_array, check_is_fitted\n\n\nclass CricularTransformer(BaseEstimator, TransformerMixin):\n def __init__(\n self,\n period: Optional[List[np.number]] = None,\n nan_policy: Literal[\"propagate\", \"raise\", \"omit\"] = \"propagate\", # type: ignore\n ):\n self.period = period\n self.nan_policy = nan_policy\n\n def fit(self, X, y=None) -> \"CricularTransformer\":\n\n if hasattr(X, \"columns\"):\n self.feature_names_in_ = list(X.columns)\n cos_names = [\"cos_\" + f for f in self.feature_names_in_]\n sin_names = [\"sin_\" + f for f in self.feature_names_in_]\n self.feature_names_out_ = cos_names + sin_names\n\n # Check X,y shape\n X = check_array(X, accept_sparse=True)\n self.n_features_in_ = X.shape[1]\n self.n_features_out_ = self.n_features_in_ * 2\n\n # Compute period\n if self.period is None:\n self.period_ = np.max(X, axis=0) - np.min(X, axis=0)\n else:\n if len(self.period) != self.n_features_in_:\n raise ValueError(\n \"Unexpected input shape. Got %d, expected %d (period arg)\" % (X.shape[1], self.n_features_in_)\n )\n self.period_ = np.array(self.period)\n\n return self\n\n def transform(self, X) -> np.ndarray:\n\n check_is_fitted(self, \"period_\")\n\n X = check_array(X, accept_sparse=True)\n if X.shape[1] != self.n_features_in_:\n raise ValueError(\n \"Unexpected input shape. Got %d, expected %d (from fit)\" % (X.shape[1], self.n_features_in_)\n )\n\n X = X / self.period_ * (2 * np.pi)\n X_cos = np.cos(X)\n X_sin = np.sin(X)\n return np.concatenate([X_cos, X_sin], axis=1)\n\n def _more_tags(self) -> Dict[str, Any]:\n return {\"allow_nan\": True, \"X_types\": [\"2darray\", \"2dlabels\"]}\n","repo_name":"Luis-RL/lrl-toolbox","sub_path":"lrl_toolbox/preprocessing/circular_transformer.py","file_name":"circular_transformer.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70897488648","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 23 18:18:57 2019\n\n@author: Alfandra's Life\n\"\"\"\n\nimport csv\n\nclass dicky(object):\n def karyawanterbaik(self): \n with open('dicky.txt', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Nama column adalah {\", \".join(row)}')\n line_count += 1\n print(row[1],\"bekerja dibagin\",row[2],\",adalah karyawan terbaik bulan\",row[3])\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n","repo_name":"awangga/belajarpython","sub_path":"kelas_2c/dicky.py","file_name":"dicky.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"10400000090","text":"import logging\nimport os\n\nfrom oidcmsg.storage.init import init_storage\nimport requests\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\nfrom oidcmsg.context import OidcContext\n\nfrom oidcendpoint import authz\nfrom oidcendpoint import rndstr\nfrom oidcendpoint.id_token import IDToken\nfrom oidcendpoint.scopes import SCOPE2CLAIMS\nfrom oidcendpoint.scopes import Scopes\nfrom oidcendpoint.session.claims import STANDARD_CLAIMS\nfrom oidcendpoint.session.claims import ClaimsInterface\nfrom oidcendpoint.session.manager import create_session_manager\nfrom oidcendpoint.template_handler import Jinja2TemplateHandler\nfrom oidcendpoint.user_authn.authn_context import populate_authn_broker\nfrom oidcendpoint.util import allow_refresh_token\nfrom oidcendpoint.util import build_endpoints\nfrom oidcendpoint.util import get_http_params\nfrom oidcendpoint.util import importer\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_path(url, path):\n if url.endswith(\"/\"):\n if path.startswith(\"/\"):\n return \"{}{}\".format(url, path[1:])\n\n return \"{}{}\".format(url, path)\n\n if path.startswith(\"/\"):\n return \"{}{}\".format(url, path)\n\n return \"{}/{}\".format(url, path)\n\n\ndef init_user_info(conf, cwd):\n kwargs = conf.get(\"kwargs\", {})\n\n if \"db_file\" in kwargs:\n kwargs[\"db_file\"] = os.path.join(cwd, kwargs[\"db_file\"])\n\n if isinstance(conf[\"class\"], str):\n return importer(conf[\"class\"])(**kwargs)\n\n return conf[\"class\"](**kwargs)\n\n\ndef init_service(conf, endpoint_context=None):\n kwargs = conf.get(\"kwargs\", {})\n\n if endpoint_context:\n kwargs[\"endpoint_context\"] = endpoint_context\n\n if isinstance(conf[\"class\"], str):\n return importer(conf[\"class\"])(**kwargs)\n\n return conf[\"class\"](**kwargs)\n\n\ndef get_token_handlers(conf):\n th_args = conf.get(\"token_handler_args\", None)\n if not th_args:\n # create 3 keys\n keydef = [\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"code\"},\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"token\"},\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"refresh\"},\n ]\n\n jwks_def = {\n \"private_path\": \"private/token_jwks.json\",\n \"key_defs\": keydef,\n \"read_only\": False,\n }\n th_args = {\"jwks_def\": jwks_def}\n for typ, tid in [(\"code\", 600), (\"token\", 3600), (\"refresh\", 86400)]:\n th_args[typ] = {\"lifetime\": tid}\n\n return th_args\n\n\nclass EndpointContext(OidcContext):\n def __init__(\n self,\n conf,\n keyjar=None,\n cwd=\"\",\n cookie_dealer=None,\n httpc=None,\n ):\n OidcContext.__init__(self, conf, keyjar, entity_id=conf.get(\"issuer\", \"\"))\n self.conf = conf\n self.db_conf = conf.get(\"db_conf\", {})\n\n # For my Dev environment\n self.cdb = None\n self.jti_db = None\n self.registration_access_token = None\n self.session_db = None\n\n self.add_boxes(\n {\n \"client\": \"cdb\",\n \"jti\": \"jti_db\",\n \"registration_access_token\": \"registration_access_token\",\n \"session\": \"session_db\"\n },\n self.db_conf,\n )\n\n self.cwd = cwd\n\n # Those that use seed wants bytes but I can only store str.\n try:\n self.set(\"seed\", conf[\"seed\"])\n except KeyError:\n self.set(\"seed\", rndstr(32))\n\n # Default values, to be changed below depending on configuration\n self.endpoint = {}\n self.issuer = \"\"\n self.httpc = httpc or requests\n self.jwks_uri = None\n self.sso_ttl = 14400 # 4h\n self.symkey = rndstr(24)\n # self.id_token_schema = IdToken\n self.idtoken = None\n self.authn_broker = None\n self.authz = None\n self.endpoint_to_authn_method = {}\n self.cookie_dealer = cookie_dealer\n self.login_hint_lookup = None\n self.login_hint2acrs = None\n self.userinfo = None\n self.scope2claims = SCOPE2CLAIMS\n # arguments for endpoints add-ons\n self.args = {}\n\n for param in [\n \"issuer\",\n \"sso_ttl\",\n \"symkey\",\n \"client_authn\",\n # \"id_token_schema\",\n ]:\n try:\n setattr(self, param, conf[param])\n except KeyError:\n pass\n\n self.th_args = get_token_handlers(conf)\n\n # self.cdb = self.get_db(db_conf, 'client')\n # self.registration_access_token = self.get_db(db_conf, 'registration_access_token')\n # self.jti_db = self.get_db(db_conf, 'jti')\n\n # session db\n self._sub_func = {}\n self.do_sub_func()\n\n # has to be after the above\n self.set_session_db()\n\n if \"cookie_name\" in conf:\n self.cookie_name = conf[\"cookie_name\"]\n else:\n self.cookie_name = {\n \"session\": \"oidcop\",\n \"register\": \"oidc_op_rp\",\n \"session_management\": \"sman\",\n }\n\n try:\n self.template_handler = conf[\"template_handler\"]\n except KeyError:\n try:\n loader = conf[\"template_loader\"]\n except KeyError:\n template_dir = conf[\"template_dir\"]\n loader = Environment(\n loader=FileSystemLoader(template_dir), autoescape=True\n )\n self.template_handler = Jinja2TemplateHandler(loader)\n\n self.setup = {}\n jwks_uri_path = conf[\"keys\"][\"uri_path\"]\n\n try:\n if self.issuer.endswith(\"/\"):\n self.jwks_uri = \"{}{}\".format(self.issuer, jwks_uri_path)\n else:\n self.jwks_uri = \"{}/{}\".format(self.issuer, jwks_uri_path)\n except KeyError:\n self.jwks_uri = \"\"\n\n for item in [\n \"cookie_dealer\",\n \"authz\",\n \"authentication\",\n \"id_token\",\n \"scope2claims\",\n ]:\n _func = getattr(self, \"do_{}\".format(item), None)\n if _func:\n _func()\n\n _cap = self.do_endpoints()\n\n self.provider_info = self.create_providerinfo(_cap)\n\n _token_endp = self.endpoint.get(\"token\")\n if _token_endp:\n _token_endp.allow_refresh = allow_refresh_token(self)\n\n for item in [\"userinfo\", \"login_hint_lookup\", \"login_hint2acrs\", \"add_on\"]:\n _func = getattr(self, \"do_{}\".format(item), None)\n if _func:\n _func()\n\n # which signing/encryption algorithms to use in what context\n self.jwx_def = {}\n\n # special type of logging\n self.events = None\n\n # The HTTP clients request arguments\n _cnf = conf.get(\"httpc_params\")\n if _cnf:\n self.httpc_params = get_http_params(_cnf)\n else: # Backward compatibility\n self.httpc_params = {\"verify\": conf.get(\"verify_ssl\")}\n\n self.set_scopes_handler()\n # self.set_claims_handler()\n\n # If pushed authorization is supported\n if \"pushed_authorization_request_endpoint\" in self.provider_info:\n self.par_db = None\n self.add_boxes({\"par\": \"par_db\"}, self.db_conf)\n\n # If device authentication is supported\n if \"device_authorization_supported\" in self.provider_info:\n self.dev_auth_db = None\n self.add_boxes({\"dev_auth\": \"dev_auth_db\"}, self.db_conf)\n\n self.claims_interface = ClaimsInterface(self)\n\n def add_boxes(self, boxes, db_conf):\n for key, attr in boxes.items():\n setattr(self, attr, init_storage(db_conf, key))\n\n def set_scopes_handler(self):\n _spec = self.conf.get(\"scopes_handler\")\n if _spec:\n _kwargs = _spec.get(\"kwargs\", {})\n _cls = importer(_spec[\"class\"])(**_kwargs)\n self.scopes_handler = _cls(_kwargs)\n else:\n self.scopes_handler = Scopes()\n\n # def set_claims_handler(self):\n # _spec = self.conf.get(\"claims_handler\")\n # if _spec:\n # _kwargs = _spec.get(\"kwargs\", {})\n # _cls = importer(_spec[\"class\"])(**_kwargs)\n # self.claims_handler = _cls(_kwargs)\n # else:\n # self.claims_handler = Claims()\n\n def set_session_db(self):\n self.do_session_manager()\n # append userinfo db to the session db\n self.do_userinfo()\n\n def do_add_on(self):\n if self.conf.get(\"add_on\"):\n for spec in self.conf[\"add_on\"].values():\n if isinstance(spec[\"function\"], str):\n _func = importer(spec[\"function\"])\n else:\n _func = spec[\"function\"]\n _func(self.endpoint, **spec[\"kwargs\"])\n\n def do_login_hint2acrs(self):\n _conf = self.conf.get(\"login_hint2acrs\")\n\n if _conf:\n self.login_hint2acrs = init_service(_conf)\n else:\n self.login_hint2acrs = None\n\n def do_login_hint_lookup(self):\n _conf = self.conf.get(\"login_hint_lookup\")\n if _conf:\n self.login_hint_lookup = init_service(_conf)\n if self.userinfo:\n self.login_hint_lookup.user_info = self.userinfo\n\n def do_userinfo(self):\n _conf = self.conf.get(\"userinfo\")\n if _conf:\n if self.session_manager:\n self.userinfo = init_user_info(_conf, self.cwd)\n self.session_manager.userinfo = self.userinfo\n else:\n logger.warning(\"Cannot init_user_info if no session_db was provided.\")\n\n def do_id_token(self):\n _conf = self.conf.get(\"id_token\")\n if _conf:\n self.idtoken = init_service(_conf, self)\n else:\n self.idtoken = IDToken(self)\n\n def do_authentication(self):\n _conf = self.conf.get(\"authentication\")\n if _conf:\n self.authn_broker = populate_authn_broker(\n _conf, self, self.template_handler\n )\n else:\n self.authn_broker = {}\n\n self.endpoint_to_authn_method = {}\n for method in self.authn_broker:\n try:\n self.endpoint_to_authn_method[method.action] = method\n except AttributeError:\n pass\n\n def do_cookie_dealer(self):\n _conf = self.conf.get(\"cookie_dealer\")\n if _conf:\n if not self.cookie_dealer:\n self.cookie_dealer = init_service(_conf)\n\n def do_sub_func(self):\n _conf = self.conf.get(\"sub_func\", {})\n for key, args in _conf.items():\n if \"class\" in args:\n self._sub_func[key] = init_service(args)\n elif \"function\" in args:\n if isinstance(args[\"function\"], str):\n self._sub_func[key] = importer(args[\"function\"])\n else:\n self._sub_func[key] = args[\"function\"]\n\n def do_session_manager(self, db=None):\n if self.session_db is None:\n self.session_manager = create_session_manager(\n self, self.th_args, db=db, sub_func=self._sub_func\n )\n else:\n self.session_manager = create_session_manager(\n self, self.th_args, db=self.session_db, sub_func=self._sub_func\n )\n\n def do_endpoints(self):\n self.endpoint = build_endpoints(\n self.conf[\"endpoint\"], endpoint_context=self, issuer=self.conf[\"issuer\"],\n )\n\n _cap = self.conf.get(\"capabilities\", {})\n\n for endpoint, endpoint_instance in self.endpoint.items():\n if endpoint in [\"webfinger\", \"provider_config\"]:\n continue\n\n if endpoint_instance.endpoint_info:\n for key, val in endpoint_instance.endpoint_info.items():\n if key not in _cap:\n _cap[key] = val\n\n return _cap\n\n def do_authz(self):\n authz_spec = self.conf.get(\"authz\")\n if authz_spec:\n self.authz = init_service(authz_spec, self)\n else:\n self.authz = authz.Implicit(self)\n\n def create_providerinfo(self, capabilities):\n \"\"\"\n Dynamically create the provider info response\n\n :param capabilities:\n :return:\n \"\"\"\n\n _provider_info = capabilities\n _provider_info[\"issuer\"] = self.issuer\n _provider_info[\"version\"] = \"3.0\"\n\n # acr_values\n if self.authn_broker:\n acr_values = self.authn_broker.get_acr_values()\n if acr_values is not None:\n _provider_info[\"acr_values_supported\"] = acr_values\n\n if self.jwks_uri and self.keyjar:\n _provider_info[\"jwks_uri\"] = self.jwks_uri\n\n _provider_info.update(self.idtoken.provider_info)\n if \"scopes_supported\" not in _provider_info:\n _provider_info[\"scopes_supported\"] = [s for s in self.scope2claims.keys()]\n if \"claims_supported\" not in _provider_info:\n _provider_info[\"claims_supported\"] = STANDARD_CLAIMS[:]\n\n return _provider_info\n\n def set(self, key, val):\n setattr(self, key, val)\n\n def get(self, key):\n return getattr(self, key)","repo_name":"IdentityPython/oidcendpoint","sub_path":"src/oidcendpoint/endpoint_context.py","file_name":"endpoint_context.py","file_ext":"py","file_size_in_byte":13304,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"14763473295","text":"import csv\n\n#list the path to the CSV file\ncsv_file_path = '/Users/laurenpescarus_razeware/Desktop/MSU Course/Classwork/python-challenge/python-challenge/PyBank/Resources/budget_data.csv'\n\n#list variables to store financial analysis data\ntotal_months = 0\nprofitloss_net = 0\nprevious_profitloss = None\nprofitloss_changes = []\nmonths = []\n\n#open and read the CSV file\nwith open(csv_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n\n #skip the header row\n header = next(csv_reader)\n\n #create the output file path\n output_file_path = '/Users/laurenpescarus_razeware/Desktop/MSU Course/Classwork/python-challenge/python-challenge/PyBank/Analysis/output.txt'\n\n #count the total number of months by iterating through the rows in the CSV file\n for row in csv_reader:\n months.append(row[0])\n\n #get data from the current row\n date = row[0]\n profitloss = int(row[1]) \n\n #calculate the total profit/loss over all months\n profitloss_net += profitloss\n\n #calculate profit/loss changes\n if previous_profitloss is not None:\n profitloss_change = profitloss - previous_profitloss\n profitloss_changes.append(profitloss_change)\n\n #update previous profit/loss for the next run\n previous_profitloss = profitloss\n\n#calculate the total number of months\ntotal_months = len(months)\n\n#calculate the average change in profit/loss\naverage_change = sum(profitloss_changes) / len(profitloss_changes)\n\n#find the greatest increase and decrease in profit/loss\nmax_increase = max(profitloss_changes)\nmax_decrease = min(profitloss_changes)\n\n#find the months for the greatest increase and decrease\nincrease_month = months[profitloss_changes.index(max_increase)]\ndecrease_month = months[profitloss_changes.index(max_decrease)]\n\n#create the output text\noutput_text = f\"Financial Analysis\\n\" \\\n f\"----------------------------\\n\" \\\n f\"Total Months: {total_months}\\n\" \\\n f\"Total: ${profitloss_net}\\n\" \\\n f\"Average Change: ${average_change:.2f}\\n\" \\\n f\"Greatest Increase in Profits: {increase_month} (${max_increase})\\n\" \\\n f\"Greatest Decrease in Profits: {decrease_month} (${max_decrease})\"\n\n#write the output to a text file\nwith open(output_file_path, 'w') as output_file:\n output_file.write(output_text)\n\n#display the results in the terminal with each variable on a new line\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: ${profitloss_net}\")\nprint(f\"Average Change: ${average_change:.2f}\")\nprint(f\"Greatest Increase in Profits: {increase_month} (${max_increase})\")\nprint(f\"Greatest Decrease in Profits: {decrease_month} (${max_decrease})\")","repo_name":"yourdailylauren/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34386280862","text":"# CLC Table Formatting Functions\n# Author: George Dzavashvili\n# Email: dzavashviligeorge@gmail.com\n# Twitter: @redpix_\n\n# Coding Region Change Validation Exception\n\n\nclass ValidationException(Exception):\n def __init__(self, message, error_str):\n super(ValidationException, self).__init__(message)\n\n self.errors = error_str\n self.message = message\n","repo_name":"dz4va/clc-vtable","sub_path":"table_formatter_exceptions.py","file_name":"table_formatter_exceptions.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1591390650","text":"import sys\nimport math\n\n\n# -------------------------------------------------------------------------------------------------\n# Skynet class: with a dictionary of all the nodes and all the gateways in the skynet\n# -------------------------------------------------------------------------------------------------\nclass Skynet:\n\n def __init__(self, numnodes, numlinks, numgateways):\n\n self.size = numnodes\n self.numlinks = numlinks\n self.numgateways = numgateways\n self.gateways = {}\n self.nodes = {_: Node(_) for _ in range(0, self.size)}\n\n def addLink(self, pair):\n\n # We link the nodes and assign a weight to the link.\n # For this Skynet scenario weight will always be 1 but it mught be usefull in other scenarios\n self.nodes[pair[0]].links[pair[1]] = [pair[1], 1]\n self.nodes[pair[1]].links[pair[0]] = [pair[0], 1]\n return\n\n def addGateway(self, gate):\n\n self.gateways[gate] = gate\n return\n\n # -------------------------------------------------------------------------------------------------\n # Method to find (dijkstra) all the shortest paths from a given node (gate) to all the other nodes\n # in the Skynet graph\n # -------------------------------------------------------------------------------------------------\n def shortestPaths(self, gate):\n\n # We create a dictionay of nodes (tablenodes) with a double value:\n # 1.- The size of the path (initialized to infinit)\n # 2.- The optimal path (\"\" to start with)\n # We also create a dictionary of visited nodes (visitednodes) where we keep the nodes not visited yet\n tablenodes = {}\n visitednodes = {}\n\n # We initialize both dictionaries\n for node in self.nodes.keys():\n tablenodes[node] = [math.inf, \"\"]\n visitednodes[node] = False\n\n # We apply initial values to the gateway node (0 distance and no path to itself)\n tablenodes[gate] = [0, str(gate) + \";\"]\n\n # we delete all the gate nodes from the visited dict (except the one we are working with)\n # so we do not go through them when finding the different paths\n for restgates in self.gateways:\n if restgates != gate:\n del visitednodes[restgates]\n\n # We repeat the process while there are nodes in the visited dictionary\n while len(visitednodes) > 0:\n\n # We set the pointer to the next node with shorter path and that is still on the visited dict\n # For that, we go through the visitednodes dict. We call the node to work with: \"minnode\"\n # The first time it will select the gate node since it is the only one with value different from Inf\n minnode = \"\"\n for node in visitednodes.keys():\n if minnode == \"\":\n minnode = node\n else:\n if tablenodes[node][0] < tablenodes[minnode][0]:\n minnode = node\n\n # We delete the selected node from the visited dict\n del visitednodes[minnode]\n\n # We analyse all the links associated to the minimum distance node\n for link in self.nodes[minnode].links.keys():\n\n # But we process only those nodes that have not been visited yet\n if link in visitednodes.keys():\n\n # We calculate the distance: current distance to the node + the weight of the link (always 1)\n\n # Note: For the size/distance of the path:\n # - we will add 1 if the node is not linked with any gateway\n # - we will add 0 the node if it has a link to a gate\n # - we will add -1 if the node is linked to two gates\n\n # if the node shares a link with any gateway we substract one from \"foundgateways\"\n foundgateways = 1\n for thelinks in self.nodes[link].links:\n if thelinks in self.gateways:\n foundgateways -= 1\n\n distance = tablenodes[minnode][0] + foundgateways\n\n # If the difference is lower than the one registered on the nodes path table\n # or if the distance is equal but the path is shorter.\n # We update the nodes path table (\"tablenodes\") for the current node (link)\n pathsizemin = len(tablenodes[minnode][1].split(\";\")[:-1])\n pathsizelink = len(tablenodes[link][1].split(\";\")[:-1])\n\n if distance < tablenodes[link][0] or \\\n (distance == tablenodes[link][0] and pathsizelink > pathsizemin):\n tablenodes[link][0] = distance\n tablenodes[link][1] = tablenodes[minnode][1] + str(link) + \";\"\n\n # We exit the while loop if all the nodes have been visited.\n # The table of nodes path is now updated with all the\n # possible minimum paths. We return the node table.\n return tablenodes\n\n # -------------------------------------------------------------------------------------------------\n # Method to call the generic shortestPaths method for every single gateway\n # returning only the shortest path to the Agent containing the link thas has to be severed\n # -------------------------------------------------------------------------------------------------\n def bestPath(self, agent):\n\n # We keep track of the minimum length of the path and the bestpath\n minlen = math.inf\n bestpath = []\n\n # For every gate in the Skynet\n for gate in self.gateways.keys():\n\n # Obtaning all the paths and choosing the one driving to the agent\n tablenodes = self.shortestPaths(gate)\n path = tablenodes[agent][1].split(\";\")[:-1]\n\n # If the calculated value of the length of the path is lower...\n # ...taking into account the gateway logic in \"shortestPaths\"\n if tablenodes[agent][0] < minlen:\n minlen = tablenodes[agent][0]\n closestgate = gate\n bestpath = path\n # If it is equal, we compare the actual length of the path\n elif tablenodes[agent][0] == minlen:\n if len(path) < len(bestpath):\n minlen = tablenodes[agent][0]\n closestgate = gate\n bestpath = path\n\n # we delete the link (both ways) so it is not considered in next turns as a valid link\n del self.nodes[int(bestpath[0])].links[int(bestpath[1])]\n del self.nodes[int(bestpath[1])].links[int(bestpath[0])]\n\n return bestpath\n\n def __str__(self):\n\n string = \"Skynet size:\" + str(self.size) + \"\\n\\n\"\n for node in self.nodes.values():\n string = string + str(node) + \"\\n\"\n\n string = string + \"\\nSkynet Gateways: \" + str(self.numgateways) + \" => \" + str(self.gateways)\n return string\n\n\nclass Node:\n\n def __init__(self, key):\n self.key = key\n self.links = {}\n\n def __str__(self):\n string = str(self.key) + \" linked to: \"\n for i in self.links:\n string = string + str(i) + \",\"\n\n return (string[:-1])\n\n# --------------- Main program -----------------------\n\nnumnodes, numlinks, numgates = [int(i) for i in input().split()]\n\n# We create the Skynet network\nmySky = Skynet(numnodes, numlinks, numgates)\n\n# We add all links to the Skynet\nfor i in range(numlinks):\n mySky.addLink([int(j) for j in input().split()])\n\n# We add al gateways to the Skynet\nfor i in range(numgates):\n mySky.addGateway(int(input()))\n\n# game loop\nwhile True:\n # Find the best path for the node on which the Skynet agent is positioned this turn (int(input()))\n thepath = mySky.bestPath(int(input()))\n print (thepath[0]+\" \"+thepath[1])\n\n\n","repo_name":"fgsdt1/Codingame","sub_path":"Skynet2.py","file_name":"Skynet2.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24177251943","text":"from odoo import fields, models, api, _\nfrom datetime import datetime, timedelta\nfrom odoo.exceptions import ValidationError\n\nGLOBAL_FIELDS_NAME = [\"Basic Salary\", \"Housing\", \"Annual Gross\", \"Transport Allowance\", \"Allowance\",\n \"Utility Allowance\", \"Total Annual Earnings\", \"Leave Allowance\", \"PIT\",\n \"Transport & Comm Allowance (Sales Only)\", \"Hazard Allowance\",\n \"Employee Pension Contribution\", \"Employer Pension Contribution\",\n \"Annual Gross of CRA\", \"Total Pension\", \"TAXABLE INCOME\", \"Annual Pension\",\n \"Annual PAYE\", \"Monthly PAYE\", \"Monthly Gross Income\", \"Surcharge\", \"Loan deduction\",\n \"Gross\", \"Attachment of Salary\", \"Assignment of Salary\", \"Child Support\", \"Deduction\",\n \"Reimbursement\", \"Net Salary\"]\n\nGLOBAL_LABELS_NAME = {\"Basic Salary\":\"amt_total_basic_salary\", \"Housing\":\"amt_total_housing\",\n \"Annual Gross\":\"amt_total_annual_gross\", \"Transport Allowance\":\"amt_total_transport_allowance\",\n \"Allowance\":\"amt_total_allowance\", \"Utility Allowance\":\"amt_total_utility_allowance\",\n \"Total Annual Earnings\":\"amt_total_total_annual_earnings\", \"Leave Allowance\":\"amt_total_leave_allowance\",\n \"PIT\":\"amt_total_pit\", \"Transport & Comm Allowance (Sales Only)\":\"amt_total_transport_comm_allowance\",\n \"Hazard Allowance\":\"amt_total_hazard_allowance\",\n \"Employee Pension Contribution\":\"amt_total_employee_pension_contribution\",\n \"Employer Pension Contribution\":\"amt_total_employer_pension_contribution\",\n \"Annual Gross of CRA\":\"amt_total_annual_gross_of_cra\", \"Total Pension\":\"amt_total_total_pension\",\n \"TAXABLE INCOME\":\"amt_total_taxable_income\", \"Annual Pension\":\"amt_total_annual_pension\",\n \"Annual PAYE\":\"amt_total_annual_paye\", \"Monthly PAYE\":\"amt_total_monthly_paye\",\n \"Monthly Gross Income\":\"amt_total_monthly_gross_income\", \"Surcharge\":\"amt_total_surcharge\",\n \"Loan deduction\":\"amt_total_load_deduction\", \"Gross\":\"amt_total_gross\",\n \"Attachment of Salary\":\"amt_total_attachment_salary\", \"Assignment of Salary\":\"amt_total_assignment_salary\",\n \"Child Support\":\"amt_total_child_support\", \"Deduction\":\"amt_total_deduction\",\n \"Reimbursement\":\"amt_total_reimbursement\", \"Net Salary\":\"amt_total_net_salary\"}\n\n\nclass HRPayslip(models.Model):\n _inherit = \"hr.payslip\"\n\n amt_total_basic_salary = fields.Float(\"Basic Salary\", compute=\"_wb_computation_total\")\n amt_total_housing = fields.Float(\"Housing\", compute=\"_wb_computation_total\")\n amt_total_annual_gross = fields.Float(\"Annual Gross\", compute=\"_wb_computation_total\")\n amt_total_transport_allowance = fields.Float(\"Transport Allowance\", compute=\"_wb_computation_total\")\n amt_total_allowance = fields.Float(\"Allowance\", compute=\"_wb_computation_total\")\n amt_total_utility_allowance = fields.Float(\"Utility Allowance\", compute=\"_wb_computation_total\")\n amt_total_total_annual_earnings = fields.Float(\"Total Annual Earnings\", compute=\"_wb_computation_total\")\n amt_total_leave_allowance = fields.Float(\"Leave Allowance\", compute=\"_wb_computation_total\")\n amt_total_pit = fields.Float(\"PIT\", compute=\"_wb_computation_total\")\n amt_total_transport_comm_allowance = fields.Float(\"Transport & Comm Allowance (Sales Only)\",\n compute=\"_wb_computation_total\")\n amt_total_hazard_allowance = fields.Float(\"Hazard Allowance\", compute=\"_wb_computation_total\")\n amt_total_employee_pension_contribution = fields.Float(\"Employee Pension Contribution\",\n compute=\"_wb_computation_total\")\n amt_total_employer_pension_contribution = fields.Float(\"Employer Pension Contribution\",\n compute=\"_wb_computation_total\")\n amt_total_annual_gross_of_cra = fields.Float(\"Annual Gross of CRA\", compute=\"_wb_computation_total\")\n amt_total_total_pension = fields.Float(\"Total Pension\", compute=\"_wb_computation_total\")\n amt_total_taxable_income = fields.Float(\"TAXABLE INCOME\", compute=\"_wb_computation_total\")\n amt_total_annual_pension = fields.Float(\"Annual Pension\", compute=\"_wb_computation_total\")\n amt_total_annual_paye = fields.Float(\"Annual PAYE\", compute=\"_wb_computation_total\")\n amt_total_monthly_paye = fields.Float(\"Monthly PAYE\", compute=\"_wb_computation_total\")\n amt_total_monthly_gross_income = fields.Float(\"Monthly Gross Income\", compute=\"_wb_computation_total\")\n amt_total_surcharge = fields.Float(\"Surcharge\", compute=\"_wb_computation_total\")\n amt_total_load_deduction = fields.Float(\"Loan deduction\", compute=\"_wb_computation_total\")\n amt_total_gross = fields.Float(\"Gross\", compute=\"_wb_computation_total\")\n amt_total_attachment_salary = fields.Float(\"Attachment of Salary\", compute=\"_wb_computation_total\")\n amt_total_assignment_salary = fields.Float(\"Assignment of Salary\", compute=\"_wb_computation_total\")\n amt_total_child_support = fields.Float(\"Child Support\", compute=\"_wb_computation_total\")\n amt_total_deduction = fields.Float(\"Deduction\", compute=\"_wb_computation_total\")\n amt_total_reimbursement = fields.Float(\"Reimbursement\", compute=\"_wb_computation_total\")\n amt_total_net_salary = fields.Float(\"Net Salary\", compute=\"_wb_computation_total\")\n \n def _wb_computation_total(self):\n for rec in self:\n labels = GLOBAL_LABELS_NAME.keys()\n prepare_vals = {}\n for line in GLOBAL_LABELS_NAME.values():\n if not rec[line]:\n prepare_vals[line] = 0\n for line in rec.line_ids:\n if line.name in labels:\n if prepare_vals.get(line.name, 0):\n prepare_vals[GLOBAL_LABELS_NAME.get(line.name)] += line.total\n else:\n prepare_vals[GLOBAL_LABELS_NAME.get(line.name)] = line.total\n if prepare_vals:\n rec.write(prepare_vals)\n\n\nclass Tickets(models.Model):\n _inherit = \"helpdesk.ticket\"\n\n exp_finish_date = fields.Datetime(string=\"~Expected Finish Date\", compute=\"_onchange_sla_status_ids\")\n exp_finish_sla_date = fields.Datetime(string=\"~Expected Finish Date2\", compute=\"_onchange_sla_status_ids\")\n\n @api.onchange(\"sla_status_ids\")\n def _onchange_sla_status_ids(self):\n for rec in self:\n today = rec.create_date\n for sla in rec.sla_status_ids:\n if sla.sla_id.time:\n today += timedelta(hours=sla.sla_id.time)\n rec.exp_finish_sla_date = today\n today = rec.create_date\n if rec.ticket_type_id:\n working_days = rec.ticket_type_id.time / 24\n # today += timedelta(hours=rec.ticket_type_id.time)\n today = self.date_by_adding_working_days(rec.create_date, working_days)\n rec.exp_finish_date = today\n\n def date_by_adding_working_days(self, from_date, add_days):\n import datetime\n business_days_to_add = add_days\n current_date = from_date\n while business_days_to_add > 0:\n current_date += datetime.timedelta(days=1)\n weekday = current_date.weekday()\n if weekday >= 5: # sunday = 6\n continue\n business_days_to_add -= 1\n return current_date\n\n\nclass TicketType(models.Model):\n _inherit = \"helpdesk.ticket.type\"\n\n time = fields.Float('In', help='Time to reach given stage based on ticket creation date', default=0, required=True)\n\n\nclass HRExpense(models.Model):\n _inherit = \"hr.expense.sheet\"\n\n state = fields.Selection([\n ('draft', 'Draft'),\n ('submit', 'Submitted'),\n ('approved_manager', 'Approved By Manager'),\n ('interal_audit', 'Internal Audit'),\n ('approve', 'Approved'),\n ('post', 'Posted'),\n ('done', 'Paid'),\n ('cancel', 'Refused')\n ], string='Status', index=True, readonly=True, track_visibility='onchange', copy=False, default='draft',\n required=True, help='Expense Report State')\n\n def _do_approve(self):\n self._check_can_approve()\n\n notification = {\n 'type': 'ir.actions.client',\n 'tag': 'display_notification',\n 'params': {\n 'title': _('There are no expense reports to approve.'),\n 'type': 'warning',\n 'sticky': False, #True/False will display for few seconds if false\n },\n }\n\n filtered_sheet = self.filtered(lambda s: s.state in ['submit', 'draft', 'approved_manager', 'interal_audit'])\n if not filtered_sheet:\n return notification\n for sheet in filtered_sheet:\n sheet.write({'state': 'approve', 'user_id': sheet.user_id.id or self.env.user.id})\n notification['params'].update({\n 'title': _('The expense reports were successfully approved.'),\n 'type': 'success',\n 'next': {'type': 'ir.actions.act_window_close'},\n })\n\n self.activity_update()\n return notification\n\n def action_manager_approve(self):\n for rec in self:\n if rec.state == \"submit\":\n rec.state = \"approved_manager\"\n\n def action_audit_manager_approve(self):\n for rec in self:\n if rec.state == \"approved_manager\":\n rec.state = \"interal_audit\"\n\n\nclass Purchase(models.Model):\n _inherit = \"purchase.order\"\n\n is_approval_by_ceo = fields.Boolean(string=\"Approval by CEO?\", compute=\"_is_approval_by_ceo\")\n\n def _is_approval_by_ceo(self):\n amount = float(self.env['ir.config_parameter'].sudo().get_param('po_approval_config', '0'))\n for rec in self:\n rec.is_approval_by_ceo = False\n if rec.amount_total < amount:\n rec.is_approval_by_ceo = True\n\n def approved_by_ceo(self):\n for rec in self:\n amount = float(self.env['ir.config_parameter'].sudo().get_param('po_approval_config', '0'))\n if rec.amount_total < amount:\n rec.x_studio_selection_field_m7jU2 = \"Approved\"\n else:\n rec.x_studio_selection_field_m7jU2 = \"Awaiting MD Approval\"\n\n def approved_by_manager(self):\n for rec in self:\n rec.x_studio_selection_field_m7jU2 = \"Approved\"\n\n def checkWritablePermission(self):\n not_editable = False\n if self.x_studio_selection_field_m7jU2 in (\"Awaiting CFO Approval\",\"Awaiting MD Approval\",\"Approved\") \\\n and not self.env.user.has_group('wb_general.po_editor_group'):\n not_editable = True\n if not_editable:\n raise ValidationError(_(\"You don't have access rights to edit purchase order.\"))\n\n def write(self, vals):\n for rec in self:\n rec.checkWritablePermission()\n return super(Purchase, self).write(vals)\n\n\nclass Partner(models.Model):\n _inherit = \"res.partner\"\n\n @api.model\n def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):\n args = args or []\n if name:\n args = ['|',('name',operator, name), ('x_studio_customer_id', operator, name)]\n name = ''\n return super(Partner, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)","repo_name":"basseyroro/fob-kkon5","sub_path":"wb_general/models/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":11697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10319921385","text":"from __future__ import division\n\nimport PyCEGUI\nimport timeit\n\nfrom config.loadingscreens import loading_screens_by_map\n\nclass GUILoading:\n\tdef __init__(self, application):\n\t\tself.application = application\n\t\tself.window = PyCEGUI.WindowManager.getSingleton().loadLayoutFromFile(\"Loading.layout\")\n\t\tif int(self.application.settings.get(\n\t\t\t\t\t\t\t\t\"FIFE\", \"ScreenResolution\", \"1024x768\").split(\"x\")[0]) > 1024:\n\t\t\tself.window.setProperty(\"Image\", \"Loadscreen01/full_image\")\n\t\tself.default_image = self.window.getProperty(\"Image\")\n\t\tself.action = None\n\t\tself.visible = False\n\t\tself.fade_duration = 1\n\t\tself.fade_start = 0\n\n\tdef show(self):\n\t\tself.window.show()\n\t\tself.window.moveToFront()\n\t\tself.window.setAlpha(1)\n\t\tself.visible = True\n\n\tdef showFade(self, action, map=None):\n\t\tself.show()\n\t\tself.window.setAlpha(0)\n\t\tself.fade_start = timeit.default_timer()\n\t\tself.action = action\n\t\timage = loading_screens_by_map.get(map)\n\t\tif image is not None:\n\t\t\tself.window.setProperty(\"Image\", image + \"/full_image\")\n\t\telse:\n\t\t\tself.window.setProperty(\"Image\", self.default_image)\n\t\t\n\tdef hideFade(self):\n\t\tself.visible = False\n\t\tself.fade_start = 0\n\n\tdef update(self):\n\t\tif not self.window.isVisible():\n\t\t\treturn\n\t\tif self.visible:\n\t\t\tself.window.setAlpha((timeit.default_timer() - self.fade_start) / self.fade_duration)\n\t\t\tif (self.window.getAlpha() >= 1) and self.action:\n\t\t\t\tself.action()\n\t\t\t\tself.action = None\n\t\telse:\n\t\t\tif self.fade_start == 0:\n\t\t\t\tself.fade_start = timeit.default_timer()\n\t\t\tself.window.setAlpha(\n\t\t\t\t1 - (timeit.default_timer() - self.fade_start) / self.fade_duration)\n\t\t\tif self.window.getAlpha() <= 0:\n\t\t\t\tself.window.hide()\n","repo_name":"Niektory/steamfolktales","sub_path":"scripts/guiloading.py","file_name":"guiloading.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72337471049","text":"\"\"\"\nTask 3.\nWrite a program that queries the user for three numbers and outputs the smallest of them.\nExample run:\nGive the first number: 11\nGive the second number: 4\nGive the last number: 7\nSmallest number is: 4\nMake sure it works with all the following numbers (given in this order, one line per program\nrun):\n1,2,3\n3,2,1\n1,3,2\nAlso:\n3,3,2\n3,2,3\n2,3,3\nIf you succesfully get the smallest output in all these six cases, you most likely get any other\narrangement correct as well.\nIf you feel like it, test with these remaining cases as well:\n2,3,1\n2,1,3\n3,1,2\n\"\"\"\n# queries the user for three numbers\nfirstNumber = int(input(\"Give the first number: \"))\nsecondNumber = int(input(\"Give the second number: \"))\nthirdNumber = int(input(\"Give the third number: \"))\n\n# Check shortest number and print\nif firstNumber < ( secondNumber and thirdNumber):\n smallestNumber = firstNumber\n print(\"smallest number is\", smallestNumber)\n\nif secondNumber < ( firstNumber and thirdNumber):\n smallestNumber = secondNumber\n print(\"smallest number is\", smallestNumber)\n\nif thirdNumber < ( secondNumber and firstNumber):\n smallestNumber = thirdNumber\n print(\"smallest number is\", smallestNumber)","repo_name":"humayun-rashid/python-problems","sub_path":"Assignment-1/task-3.py","file_name":"task-3.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23610122030","text":"import os\r\nimport io\r\nimport flask\r\nimport socket\r\nimport concurrent.futures\r\n\r\nservers=os.environ.get(\"ZOOKEEPER\", \"localhost:2181\").split(\",\")\r\n\r\ndef query(hostport):\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\thp = hostport.split(\":\", 1)\r\n\ttry:\r\n\t\ts.settimeout(3)\r\n\t\tif len(hp)==2:\r\n\t\t\ts.connect((hp[0], int(hp[1])))\r\n\t\telse:\r\n\t\t\ts.connect((hp[0], 2181))\r\n\t\t\r\n\t\ts.send(b\"srvr\")\r\n\t\treturn s.recv(4096).decode(\"UTF-8\")\r\n\texcept Exception as e:\r\n\t\treturn \"Error: %s\" % e\r\n\tfinally:\r\n\t\ts.close()\r\n\r\ndef safe_label(name):\r\n\treturn name.replace(\"-\",\"_\").replace(\".\",\"_\")\r\n\r\ndef collect():\r\n\tex = concurrent.futures.ThreadPoolExecutor(max_workers=3)\r\n\tmresult = ex.map(query, servers)\r\n\t\r\n\tvalues = \"\"\r\n\tmodes = {}\r\n\tfor server,data in zip(servers, mresult):\r\n\t\tfor line in io.StringIO(data):\r\n\t\t\tk,v = [v.strip() for v in line.split(\":\", 1)]\r\n\t\t\t\r\n\t\t\tif k == \"Mode\":\r\n\t\t\t\tmodes[server] = v\r\n\t\t\tif k in [\"Received\", \"Sent\", \"Connections\", \"Outstanding\", \"Node count\"]:\r\n\t\t\t\tvalues += 'zk_%s{server=\"%s\"} %s\\r\\n' % (\r\n\t\t\t\t\tk.lower().replace(\" \",\"_\"),\r\n\t\t\t\t\tserver,\r\n\t\t\t\t\tv)\r\n\t\t\tif k == \"Error\":\r\n\t\t\t\tvalues += 'zk_%s{server=\"%s\",message=\"%s\"} 1\\r\\n' % (\r\n\t\t\t\t\tk.lower().replace(\" \",\"_\"),\r\n\t\t\t\t\tserver,\r\n\t\t\t\t\tv)\r\n\tlut = [\"leader\", \"follower\"]\r\n\tparams = \",\".join(['%s=\"%s\"' % (safe_label(k), lut.index(v)) for k,v in modes.items()])\r\n\tvalues += \"zk_modes{%s} 1\\r\\n\" % params\r\n\treturn values\r\n\r\napp = flask.Flask(__name__)\r\n\r\n@app.route(\"/metrics\", methods=[\"GET\"])\r\ndef metrics():\r\n\treturn collect()\r\n\r\ndef cli():\r\n\tprint(collect())\r\n\r\nif __name__==\"__main__\":\r\n\tcli()\r\n","repo_name":"hkwi/zookeeper_exporter","sub_path":"zookeeper_exporter.py","file_name":"zookeeper_exporter.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73553394568","text":"import fileinput;\nimport sys\nimport numpy as np;\n\n# Infile and outfile declaration from system args\nfilein = sys.argv[1]\n\nf = open(filein,'r')\n\nfiledata = f.readline()\n\ntimes = np.zeros(int(filedata)/2);\n\nfor i in range(0, int(filedata)/2):\n a = f.readline();\n b = f.readline();\n times[i] = (int(b) - int(a))\n\n\nwhole = 0;\nfor j in range(0, int(filedata)/2):\n whole = whole + times[j];\n\navg = whole / (int(filedata)/2);\n\nprint(\"The avg of all the file creates was \" + str(avg) + \" milliseconds\");\n\n\nf.close();\n","repo_name":"DanielThurau/DTG-Benchmarking","sub_path":"Meta/Linux/scripts/avgResults.py","file_name":"avgResults.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15974549054","text":"from entity import Size, Attack, DamageType, AbilityType\n\nBUGBEAR = {\n 'ac': 16,\n 'max_hp': 27,\n 'size': Size.MEDIUM,\n # 'attack': Attack('Morningstar', 2, 8, 2, DamageType.PIERCING),\n 'attacks': [\n Attack('Morningstar', 2, 8, 2, DamageType.PIERCING),\n ],\n 'name': 'Bugbear',\n 'faction': 'monster',\n 'abilities': {'str': 15, 'dex': 14, 'con': 13, 'int': 8, 'wis': 11, 'cha': 9}\n}\n\nSKELETON = {\n 'ac': 13,\n 'max_hp': 13,\n 'size': Size.MEDIUM,\n 'attacks': [Attack('Shortsword', 1, 6, 2, DamageType.PIERCING, AbilityType.DEX), ],\n 'name': 'Skeleton',\n 'faction': 'monster',\n 'abilities': {'str': 10, 'dex': 14, 'con': 15, 'int': 6, 'wis': 8, 'cha': 5}\n}\n\nZOMBIE = {\n 'ac': 8,\n 'max_hp': 22,\n 'size': Size.MEDIUM,\n 'attacks': [Attack('Slam', 1, 6, 1, DamageType.BLUDGEONING), ],\n 'name': 'Zombie',\n 'faction': 'monster',\n 'abilities': {'str': 13, 'dex': 6, 'con': 16, 'int': 3, 'wis': 6, 'cha': 5}\n}\n\nGOBLIN = {\n 'ac': 15,\n 'max_hp': 7,\n 'size': Size.SMALL,\n 'attacks': [Attack('Scimitar', 1, 6, 2, DamageType.SLASHING, AbilityType.DEX), ],\n 'name': 'Goblin',\n 'faction': 'monster',\n 'abilities': {'str': 8, 'dex': 14, 'con': 10, 'int': 10, 'wis': 8, 'cha': 8}\n}\n\nLIZARDFOLK = {\n 'ac': 15,\n 'max_hp': 22,\n 'size': Size.MEDIUM,\n 'attacks': [\n Attack('Heavy Club', 1, 8, 3, DamageType.BLUDGEONING),\n Attack('Spiked Shield', 1, 8, 3, DamageType.PIERCING)\n ],\n 'name': 'Lizardfolk',\n 'faction': 'monster',\n 'abilities': {'str': 15, 'dex': 10, 'con': 13, 'int': 7, 'wis': 12, 'cha': 7}\n}\n\n# Villagers\n\nCOMMONER = {\n 'ac': 10,\n 'max_hp': 4,\n 'size': Size.MEDIUM,\n 'attacks': [Attack('Club', 1, 6, 0, DamageType.BLUDGEONING),],\n 'name': 'Commoner',\n 'faction': 'villager',\n}\n\nVETERAN = {\n 'ac': 17,\n 'max_hp': 58,\n 'size': Size.MEDIUM,\n 'attacks': [\n Attack('Longsword', 1, 8, 3, DamageType.SLASHING),\n Attack('Longsword', 1, 8, 3, DamageType.SLASHING),\n Attack('Shortsword', 1, 6, 3, DamageType.SLASHING),\n ],\n 'name': 'Veteran',\n 'faction': 'villager',\n 'abilities': {'str': 16, 'dex': 13, 'con': 14, 'int': 10, 'wis': 11, 'cha': 10}\n}\n\nGUARD = {\n 'ac': 16,\n 'max_hp': 11,\n 'size': Size.MEDIUM,\n 'attacks': [\n Attack('Spear', 1, 6, 1, DamageType.SLASHING),\n ],\n 'name': 'Guard',\n 'faction': 'villager',\n 'abilities': {'str': 13, 'dex': 12, 'con': 12, 'int': 10, 'wis': 11, 'cha': 10}\n}","repo_name":"Streamweaver/FightClub5e","sub_path":"mobs.py","file_name":"mobs.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8239889640","text":"import tensorflow_probability as tfp\nfrom tensorflow.python.keras.layers import Dense, Flatten, Conv2D, ReLU, InputLayer\nimport tensorflow as tf\n\nclass Encoder(tf.keras.layers.Layer):\n \"\"\"\"Encoder class for use in convolutional VAE\n\n Args:\n latent_dim: dimensionality of latent distribution\n\n Attributes:\n encoder_conv: convolution layers of encoder\n fc_mu: fully connected layer for mean in latent space\n fc_log_var: fully connceted layers for log variance in latent space\n \"\"\"\n\n def __init__(self, latent_dim=6):\n super().__init__()\n self.latent_dim = latent_dim\n\n self.encoder_conv = tf.keras.Sequential(\n [\n # shape: [batch_size, 56, 56, 1]\n InputLayer(input_shape=(56, 56, 1)),\n\n # shape: [batch_size, 28, 28, 64 ]\n Conv2D(filters = 64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n activation='relu'),\n\n # shape: [batch_size, 14, 14, 64]\n Conv2D(filters = 64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n activation='relu'),\n\n # shape: [batch_size, 7, 7, 64]\n Conv2D(filters = 64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n activation='relu'),\n\n # shape: [batch_size, 4, 4, 64]\n Conv2D(filters = 64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n activation='relu'),\n\n # shape: [batch_size, 1024]\n Flatten(),\n\n # shape: [batch_size, 256]\n Dense(256),\n ReLU()\n ]\n )\n\n\n # shape: [batch_size, self.latent_dim]\n self.fc_mu = tf.keras.Sequential(\n Dense(self.latent_dim),\n )\n self.fc_log_var = tf.keras.Sequential(\n Dense(self.latent_dim),\n )\n\n def forward(self, inp):\n out = self.encoder_conv(inp)\n mu = self.fc_mu(out)\n log_var = self.fc_log_var(out)\n return [mu, log_var]\n\n\nclass SpatialBroadcastDecoder(tf.keras.layers.Layer):\n \"\"\"SBD class for use in VAE, structure based on paper\n https://arxiv.org/pdf/1901.07017.pdf\n\n Args:\n latent_dim: dimensionality of latent distribution\n\n Attributes:\n img_size: image size (necessary for tiling)\n decoder_conv: convolution layers of decoder (also upsampling)\n \"\"\"\n\n def __init__(self, latent_dim):\n super().__init__()\n\n self.latent_dim = latent_dim\n self.img_size = 56\n\n\n self.decoder_conv = tf.keras.Sequential(\n [\n # Input_shape [batch_size, 56, 56, latent_dim + 2]\n\n\n # shape: [batch_size, 56, 56, 64]\n Conv2D(filters = 64,\n strides=(1, 1),\n kernel_size=(3,3),\n padding=\"same\",\n activation = \"relu\"),\n\n # shape [batch_size, 56, 56, 64]\n Conv2D(filters = 64,\n strides=(1,1),\n kernel_size=(3, 3),\n padding=\"same\",\n activation = \"relu\"),\n\n # shape [batch_size, 56, 56, 1]\n Conv2D(filters = 1,\n strides=(1,1),\n kernel_size=(3, 3),\n padding=\"same\"),\n\n ]\n )\n\n\n def call(self, z):\n\n batch_size = z.shape[0]\n\n # broadcast (tile) latent sample of size k to image width w, height h\n\n h = w = self.img_size\n\n # z.shape [batch_size, latent_dim] LATENTS\n # z_b.shape [batch_size, 25088] TILED LATENTS\n z_b = tf.tile(z, [1, h * w])\n\n\n # Reshape tensor\n # z_b.shape [batch_size, 56, 56, latent_dim]\n z_b = tf.reshape(z_b, [batch_size, h, w, self.latent_dim])\n\n\n # Fixed coordinate channels --> X, Y COORDINATE CHANNELS\n x = tf.linspace(tf.constant(-1, tf.float32), tf.constant(1, tf.float32), w)\n y = tf.linspace(tf.constant(-1, tf.float32), tf.constant(1, tf.float32), w)\n\n\n # Reshape operations\n\n # shape [56, 56]\n xb, yb = tf.meshgrid(x, y)\n\n # shape [56, 56, 1]\n xb = tf.expand_dims(xb, 2)\n yb = tf.expand_dims(yb, 2)\n\n\n def concat(element):\n \"\"\" This function concatenates z_b, xb, y_b\n --> TILED LATENTS + X,Y COORDINATES \"\"\"\n\n # shape [56, 56, latent_dim +2]\n res = tf.concat(axis=2, values=[element, xb, yb])\n return res\n\n\n # shape [batch_size, 56, 56, latent_dim +2)]\n z_sb = tf.map_fn(lambda m: concat(m), z_b)\n\n\n # Apply convolutional layers (!unstrided!)\n mu_D = self.decoder_conv(z_sb)\n\n return mu_D\n\n\nclass BroadcastVAE(tf.keras.Model):\n \"\"\"A simple VAE class\n\n Args:\n vae_tpe: type of VAE either 'Standard' or 'SBD'\n latent_dim: dimensionality of latent distribution\n fixed_var: fixed variance of decoder distribution\n \"\"\"\n\n def __init__(self, latent_dim):\n super().__init__()\n self.latent_dim = latent_dim\n\n self.model_name = \"spatial-VAE\"\n\n self.decoder = SpatialBroadcastDecoder(latent_dim=latent_dim)\n self.encoder = Encoder(latent_dim=latent_dim)\n\n\n @tf.function\n def sample(self, epsilon=None):\n if epsilon is None:\n epsilon = tf.random.normal(shape=(100, self.latent_dim))\n \n return self.decode(epsilon, apply_sigmoid=True)\n\n\n def encode(self, x):\n mean, logvar = self.encoder.forward(x)\n return mean, logvar\n\n\n def decode(self, z, apply_sigmoid=False):\n # get decoder distribution parameters\n mu_D = self.decoder(z)\n\n # if sigmoid is applied\n if apply_sigmoid:\n probs = tf.sigmoid(mu_D)\n return probs\n\n return mu_D\n","repo_name":"goody139/IANNWTF","sub_path":"Final Project/models/broadcast_vae.py","file_name":"broadcast_vae.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2325174230","text":"\"\"\"\n========================================\nUsing Subject Space ROIs from Freesurfer\n========================================\n\nAn example using the AFQ API to find bundles\nas defined by endpoint ROIs from freesurfer.\nThis example can be modified to work with ROIs\nin subject space from pipelines other than freesurfer.\n\"\"\"\nimport os.path as op\n\nimport nibabel as nib\nimport plotly\nimport numpy as np\n\nfrom AFQ.api.group import GroupAFQ\nimport AFQ.data.fetch as afd\nfrom AFQ.definitions.image import RoiImage\nimport AFQ.api.bundle_dict as abd\n\n##########################################################################\n# Get some example data\n# ---------------------\n#\n# Retrieves High angular resolution diffusion imaging (HARDI) dataset from\n# Stanford's Vista Lab\n#\n# see https://purl.stanford.edu/ng782rw8378 for details on dataset.\n#\n# The data for the first subject and first session are downloaded locally\n# (by default into the users home directory) under:\n#\n# ``.dipy/stanford_hardi/``\n#\n# Anatomical data (``anat``) and Diffusion-weighted imaging data (``dwi``) are\n# then extracted, formatted to be BIDS compliant, and placed in the AFQ\n# data directory (by default in the users home directory) under:\n#\n# ``AFQ_data/stanford_hardi/``\n#\n# This data represents the required preprocessed diffusion data necessary for\n# intializing the GroupAFQ object (which we will do next)\n#\n# The clear_previous_afq is used to remove any previous runs of the afq object\n# stored in the AFQ_data/stanford_hardi/ BIDS directory. Set it to None if\n# you want to use the results of previous runs. Setting it to \"track\"\n# as here will only clear derivatives that depend on the tractography stage\n# (i.e., bundle delination and tract profile calculation),\n# as well as the tractography itself, to save time on recomputation.\n# If you want to only clear derivatives that depend on bundle delineation,\n# and keep the tractography, you can set clear_previous_afq to\n# \"recog\" instead.\n\nafd.organize_stanford_data(clear_previous_afq=\"track\")\n\n##########################################################################\n# Generate left thalamus ROI from freesurfer segmentation file\n# ------------------------------------------------------------\n# 1. Load the segmentation file that was generated by Freesurfer for\n# the specific subject.\n# 2. Identify the left thalamus within the file, which has the label\n# number 41\n# 3. Create a Nifti image representing the left thalamus ROI:\n# - Assign a value of 1 to the voxels that Freesurfer\n# has labeled as 41 (i.e., the left thalamus).\n# - Assign a value of 0 to all other voxels.\n# This binary mask format is the expected input for pyAFQ when\n# dealing with subject space ROIs. If it's already in binary format,\n# there is no need to do this step.\n\nfreesurfer_subject_folder = op.join(\n afd.afq_home, \"stanford_hardi\",\n \"derivatives\", \"freesurfer\",\n \"sub-01\", \"ses-01\",\n \"anat\")\n\nseg_file = nib.load(op.join(\n freesurfer_subject_folder, \"sub-01_ses-01_seg.nii.gz\"))\nleft_thal = seg_file.get_fdata() == 41\nnib.save(\n nib.Nifti1Image(\n left_thal.astype(np.float32),\n seg_file.affine),\n op.join(\n freesurfer_subject_folder,\n \"sub-01_ses-01_desc-leftThal_mask.nii.gz\"))\n\n# Fetch LV1 ROI\n# which was already generated using the process above\nafd.fetch_stanford_hardi_lv1()\n\n##########################################################################\n# Set tractography parameters (optional)\n# ---------------------\n# We make this tracking_params which we will pass to the GroupAFQ object\n# which specifies that we want 10,000 seeds randomly distributed\n# only within the endpoint ROIs and not throughout the white matter.\n# This is controlled by passing\n# `\"seed_mask\": RoiImage()` in the `tracking_params` dict.\n#\n# We only do this to make this example faster and consume less space.\n\ntracking_params = dict(n_seeds=10000,\n random_seeds=True,\n rng_seed=42,\n seed_mask=RoiImage(use_endpoints=True))\n\n#############################################################################\n# Define custom `BundleDict` object\n# --------------------------------\n# In a typical `BundleDict` object, ROIs are passed as paths to Nifti files.\n# Here, we define ROIs as dictionaries instead, containing BIDS filters.\n# Then pyAFQ can find the respective ROI for each subject and session.\n\nbundles = abd.BundleDict({\n \"L_OR\": {\n \"start\": {\n \"scope\": \"freesurfer\",\n \"suffix\": \"mask\",\n \"desc\": \"leftThal\"},\n \"end\": {\n \"scope\": \"freesurfer\",\n \"suffix\": \"anat\",\n \"desc\": \"LV1\"\n },\n \"cross_midline\": False,\n \"space\": \"subject\"\n }})\n\n##########################################################################\n# Initialize a GroupAFQ object:\n# -------------------------\n#\n# Creates a GroupAFQ object, that encapsulates tractometry,\n# passing in our custom bundle info. Then we run the pipeline\n# and generate a visualization of the bundle we found.\n\nmyafq = GroupAFQ(\n bids_path=op.join(afd.afq_home, 'stanford_hardi'),\n preproc_pipeline='vistasoft',\n tracking_params=tracking_params,\n bundle_info=bundles)\n\nbundle_html = myafq.export(\"indiv_bundles_figures\")\nplotly.io.show(bundle_html[\"01\"][\"L_OR\"])\n","repo_name":"yeatmanlab/pyAFQ","sub_path":"examples/tutorial_examples/use_subject_space_rois_from_freesurfer.py","file_name":"use_subject_space_rois_from_freesurfer.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"16"} +{"seq_id":"29418684763","text":"n = int(input())\n\ntimes = []\n\nfor _ in range(n):\n x, y = map(int, input().strip().split(\" \"))\n\n times.append((x, y))\n\ntimes.sort()\n\nadj = []\n\nfor i in range(n):\n temp = []\n adj.append(temp)\n\n\nfor i in range(n-1):\n for j in range(i+1, n):\n if times[i][1] <= times[j][0]:\n adj[i].append(j)\n # adj[j].append(i)\n\n\ndef dfs(n):\n global visited, count\n \n if n in visited:\n return\n \n count += 1\n visited.append(n)\n\n for i in adj[n]:\n if i not in visited:\n dfs(i)\n\n\nm = -1\nfor i in range(n):\n visited = []\n count = 0\n\n dfs(i)\n\n if count > m:\n m = count\n\nprint(m)\n\n","repo_name":"KATTA-00/Competitive-Programming","sub_path":"Platforms/CSES Problem Set/Sorting and Seraching/Movie Festival.py","file_name":"Movie Festival.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22984215522","text":"expr = str(input('Escreva uma expressão: '))\ncont = []\nfor simb in expr:\n if simb == '(':\n cont.append('(')\n elif simb == ')':\n if len(cont) > 0:\n cont.pop()\n else:\n cont.append(')')\n break\nif len(cont) == 0:\n print('Sua expressão está VÁLIDA')\nelse:\n print('Sua expressão está INVÁLIDA')","repo_name":"alaanlimaa/Python_CVM1-2-3","sub_path":"Aula 17/ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"47244660","text":"TREE = '#'\r\nEMPTY = '.'\r\n\r\n\r\ndef check_slop(col_offset, row_offset, rows):\r\n length = len(rows[0])\r\n height = len(rows)\r\n col_index = (0 + col_offset) % length\r\n row_index = 0 + row_offset\r\n tree_count = 0\r\n\r\n while row_index < height:\r\n if rows[row_index][col_index] == TREE:\r\n tree_count += 1\r\n row_index += row_offset\r\n col_index = (col_index + col_offset) % length\r\n return tree_count\r\n\r\n\r\ndef main():\r\n with open('./input') as input_file:\r\n rows = list(map(str.strip, input_file.readlines()))\r\n\r\n print(check_slop(1, 1, rows) *\r\n check_slop(3, 1, rows) *\r\n check_slop(5, 1, rows) *\r\n check_slop(7, 1, rows) *\r\n check_slop(1, 2, rows))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"rughciatuk/AdventOfCode2020","sub_path":"day3/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12180933419","text":"# random points\r\ndef number_generator():\r\n a = 290797\r\n while True:\r\n yield a\r\n a = a * a % 50515093\r\n\r\ndef random_points():\r\n points = []\r\n gen = number_generator()\r\n for i in range(2_000_000):\r\n points.append((next(gen), next(gen)))\r\n return points\r\n\r\n# merge sort\r\ndef merge(a_1, a_2, axis):\r\n result = []\r\n i, j = 0, 0\r\n while i < len(a_1) and j < len(a_2):\r\n if a_1[i][axis] <= a_2[j][axis]:\r\n result.append(a_1[i])\r\n i += 1\r\n else:\r\n result.append(a_2[j])\r\n j += 1\r\n while i < len(a_1):\r\n result.append(a_1[i])\r\n i += 1\r\n while j < len(a_2):\r\n result.append(a_2[j])\r\n j += 1\r\n return result\r\n\r\ndef merge_sort(a, index):\r\n if len(a) == 1:\r\n return a\r\n midpt = len(a) // 2\r\n a_1 = merge_sort(a[:midpt], index)\r\n a_2 = merge_sort(a[midpt:], index)\r\n return merge(a_1, a_2, index)\r\n\r\n\r\n\r\n# minimum distance\r\ndef euclid(a, b):\r\n e_a = (b[0]-a[0])**2\r\n e_b = (b[1]-a[1])**2\r\n return (e_a + e_b)**0.5\r\n\r\ndef minimum_distance(P, X, Y):\r\n #base case\r\n if len(P) <= 1:\r\n return (0,0), (0,0), float('inf')\r\n elif len(P) in [2, 3]:\r\n dist1 = euclid(P[0], P[1])\r\n if len(P) == 2:\r\n return P[0], P[1], dist1\r\n else:\r\n dist2 = euclid(P[0], P[2])\r\n dist3 = euclid(P[1], P[2])\r\n if dist2 < dist1:\r\n if dist3 < dist2:\r\n return P[1], P[2], dist3\r\n return P[0], P[2], dist2\r\n return P[0], P[1], dist1\r\n else:\r\n # recursive case\r\n mid_index = (len(X)) // 2\r\n mid_line = X[mid_index][0]\r\n \r\n # divide\r\n PL, PR = [], []\r\n for p in range(len(P)):\r\n if P[p][0] <= mid_line:\r\n PL.append(P[p])\r\n else:\r\n PR.append(P[p])\r\n \r\n XL, XR = [], []\r\n for p in range(len(X)):\r\n if X[p][0] <= mid_line:\r\n XL.append(X[p])\r\n else:\r\n XR.append(X[p])\r\n \r\n YL, YR = [], []\r\n for p in range(len(Y)):\r\n if Y[p][0] <= mid_line:\r\n YL.append(Y[p])\r\n else:\r\n YR.append(Y[p])\r\n\r\n # conquer\r\n Lpt_a, Lpt_b, deltaL = minimum_distance(PL, XL, YL)\r\n Rpt_a, Rpt_b, deltaR = minimum_distance(PR, XR, YR)\r\n if deltaL < deltaR:\r\n pt_a = Lpt_a\r\n pt_b = Lpt_b\r\n delta = deltaL\r\n else:\r\n pt_a = Rpt_a\r\n pt_b = Rpt_b\r\n delta = deltaR\r\n \r\n # combine\r\n Y_prime = []\r\n for p in range(len(Y)):\r\n if (mid_line - delta) <= Y[p][0] and Y[p][0] <= (mid_line + delta):\r\n Y_prime.append(Y[p])\r\n \r\n for p in range(len(Y_prime)):\r\n j = 1\r\n while (not (p+j) == len(Y_prime)) and j <= 7:\r\n delta_temp = euclid(Y_prime[p], Y_prime[p + j])\r\n if delta_temp < delta:\r\n pt_a = Y_prime[p]\r\n pt_b = Y_prime[p + j]\r\n delta = delta_temp\r\n j += 1\r\n \r\n return pt_a, pt_b, delta\r\n \r\n\r\n\r\n# test case\r\npts = random_points()\r\n\r\nX = merge_sort(pts, 0)\r\nprint(\"merge sorted X\")\r\n\r\nY = merge_sort(pts, 1)\r\nprint(\"merge sorted Y\")\r\n\r\npt_a, pt_b, res = minimum_distance(pts, X, Y)\r\nprint(\"best points:\", pt_a, pt_b)\r\nprint(\"min distance:\", res)","repo_name":"ElijahTyler/csi3610-code","sub_path":"min_distance.py","file_name":"min_distance.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35253262477","text":"alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef caesar(start_text, shift_amount, cipher_direction):\n end_text = \"\"\n if cipher_direction == \"decode\":\n shift_amount *= -1\n for char in start_text:\n if char in alphabet :\n position = alphabet.index(char)\n new_position = position + shift_amount\n end_text += alphabet[new_position]\n else:\n end_text+=char\n print(f\"你是要 {cipher_direction}d 結果是: {end_text}\")\n\n\nimport art\nprint(art.logo)\nprint(\"-----------------------------------------\")\nprint(\" \")\nprint('歡迎來到密碼遊戲,使用這個小程式你可以將你的資料加密或解密,快來試試吧!')\n\nshould_continue = True\nwhile should_continue:\n direction = input(\"你要加密或解密? 加密輸入: 'encode' 解密輸入: 'decode' \\n\")\n text = input(\"輸入你想要打的文字,請輸入英文、數字或符號:\\n\").lower()\n shift = (int(input(\"你想要順移的單位:\\n\")))%len(alphabet)\n\n\n# new_shift= shift%len(alphabet)\n caesar(start_text=text, shift_amount=shift, cipher_direction=direction)\n ans = input(\"想要再試一次嗎? (yes or no)\").lower()\n if ans ==\"no\":\n should_continue = False\n print('Good Bye!!')\n","repo_name":"sarawuwu/little_games_python","sub_path":"caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34138332803","text":"#\n# @lc app=leetcode id=785 lang=python3\n#\nfrom collections.abc import Mapping, Set\nclass NodeView(Mapping, Set):\n __slots__ = '_nodes',\n\n def __getstate__(self):\n return {'_nodes': self._nodes}\n\n def __setstate__(self, state):\n self._nodes = state['_nodes']\n\n def __init__(self, graph):\n self._nodes = graph._node\n\n # Mapping methods\n def __len__(self):\n return len(self._nodes)\n\n def __iter__(self):\n return iter(self._nodes)\n\n def __getitem__(self, n):\n return self._nodes[n]\n\n # Set methods\n def __contains__(self, n):\n return n in self._nodes\n\n @classmethod\n def _from_iterable(cls, it):\n return set(it)\n\n # DataView method\n def __call__(self, data=False, default=None):\n if data is False:\n return self\n return NodeDataView(self._nodes, data, default)\n\n def data(self, data=True, default=None):\n if data is False:\n return self\n return NodeDataView(self._nodes, data, default)\n\n def __str__(self):\n return str(list(self))\n\n def __repr__(self):\n return '%s(%r)' % (self.__class__.__name__, tuple(self))\n\nclass Graph:\n\n def __init__(self):\n self._node = {}\n self._adj = {}\n\n def add_edge(self, u_of_edge, v_of_edge, **attr):\n u, v = u_of_edge, v_of_edge\n # add nodes\n if u not in self._node:\n self._adj[u] = {}\n self._node[u] = {}\n if v not in self._node:\n self._adj[v] = {}\n self._node[v] = {}\n # add the edge\n datadict = self._adj[u].get(v, {})\n datadict.update(attr)\n self._adj[u][v] = datadict\n self._adj[v][u] = datadict\n\n\n @property\n def nodes(self):\n nodes = NodeView(self)\n self.__dict__['nodes'] = nodes\n return nodes\n\n def neighbors(self, n):\n return iter(self._adj[n])\n\nfrom collections import defaultdict\nclass Solution:\n # def isBipartite(self, graph: List[List[int]]) -> bool:\n def isBipartite(self, graph):\n\n\n n = len(graph)\n # if n % 2 != 0:\n # return False\n\n # from networkx import nx\n # g = nx.Graph()\n g = Graph()\n for u in range(n):\n for v in graph[u]:\n g.add_edge(u, v)\n\n # print(nx.info(g))\n red = set()\n explored = defaultdict(lambda: False)\n \n def dfs(node, set_red):\n # print('start exploring', node, 'set_red=', set_red, 'red=', red)\n explored[node] = True\n if set_red:\n red.add(node)\n\n for neighbor in g.neighbors(node):\n if not explored[neighbor]:\n res = dfs(neighbor, set_red = not set_red)\n # print('finish exploring', node, 'set_red=', set_red, 'red=', red, 'res=', res)\n return res\n else:\n # True means not bipartite\n if set_red and neighbor in red:\n # print('finish exploring', node, 'set_red=', set_red, 'red=', red, 'res=', True)\n return True\n elif not set_red and neighbor not in red:\n # print('finish exploring', node, 'set_red=', set_red, 'red=', red, 'res=', True)\n return True \n\n for n in g.nodes:\n if not explored[n]:\n if all([not explored[neighbor] for neighbor in g.neighbors(n)]):\n if dfs(n, set_red=True):\n return False\n else:\n for neighbor in g.neighbors(n):\n if explored[neighbor]:\n if neighbor in red:\n if dfs(n, set_red=False):\n return False\n else:\n if dfs(n, set_red=True):\n return False\n return True\n\n\n\n\n# s = Solution()\n# graph = [[1,3], [0,2], [1,3], [0,2]]\n# print(s.isBipartite(graph))\n\n\n\n# graph = [[1,2,3], [0,2], [0,1,3], [0,2]]\n# print(s.isBipartite(graph) == False)\n\n\n\n\n# graph = [[3],[2,4],[1],[0,4],[1,3]]\n# print(s.isBipartite(graph)) #True\n\n\n\n# graph = [[1,4],[0,2],[1],[4],[0,3]]\n# print(s.isBipartite(graph)) #True\n\n\n\n\n\n\n\n\n \n","repo_name":"nickyfoto/lc","sub_path":"python/785.is-graph-bipartite.py","file_name":"785.is-graph-bipartite.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11873754343","text":"import torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\nfrom models.modules import build_mlp \n\n\nclass GoalReconstructor(nn.Module):\n def __init__(self, opts, img_feat_input_dim, img_fc_dim, img_fc_use_batchnorm, img_dropout, fc_bias=True):\n super(GoalReconstructor, self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n enc_kwargs = OrderedDict([\n ('input_dim', img_feat_input_dim),\n ('hidden_dims', img_fc_dim),\n ('use_batchnorm', img_fc_use_batchnorm),\n ('dropout', img_dropout),\n ('fc_bias', fc_bias),\n ('relu', opts.mlp_relu)\n ]) \n dec_kwargs = OrderedDict([\n ('input_dim', img_fc_dim[-1]),\n ('hidden_dims', img_fc_dim[:-1][::-1]+[img_feat_input_dim]),\n ('use_batchnorm', img_fc_use_batchnorm),\n ('dropout', img_dropout),\n ('fc_bias', fc_bias),\n ('relu', opts.mlp_relu)\n ]) \n self.enc = build_mlp(**enc_kwargs)\n self.dec = build_mlp(**dec_kwargs)\n\n def forward(self, img_feat):\n encoding = self.enc(img_feat)\n pred = self.dec(img_feat)\n return encoding, pred \n\nclass GoalPredictor(nn.Module):\n def __init__(self, opts, img_feat_input_dim, ctx_input_dim, mlp_fc_dim, mlp_fc_use_batchnorm, mlp_dropout, fc_bias=True):\n super(GoalReconstructor, self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n mlp_kwargs = OrderedDict([\n ('input_dim', img_feat_input_dim + ctx_input_dim),\n ('hidden_dims', mlp_fc_dim),\n ('use_batchnorm', mlp_fc_use_batchnorm),\n ('dropout', mlp_dropout),\n ('fc_bias', fc_bias),\n ('relu', opts.mlp_relu)\n ]) \n self.mlp = build_model(**mlp_kwargs)\n \n def forward(self, img_feat, ctx_feat):\n x = torch.cat([img_feat, ctx_feat], dim=1)\n pred = self.mlp(x)\n return pred\n","repo_name":"princetonvisualai/VLNActionPriors","sub_path":"tasks/R2R-pano/models/goal_model.py","file_name":"goal_model.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70860937928","text":"import sys\nimport os\nimport re\nimport time\nimport xbmc\nimport xbmcgui\nimport xbmcaddon\nimport shutil\nimport json\n\nIconPath = \"/storage/.xbmc/media/tv/au/\"\nADDON = xbmcaddon.Addon()\n__cwd__ = xbmc.translatePath( ADDON.getAddonInfo('path') )\n__lib__ = os.path.join( __cwd__, 'resources')\nUSERPATH1 = os.path.join('/storage/.xbmc/addons')\nUSERPATH2 = os.path.join('/storage/.xbmc/userdata/addon_data')\n\ndef replace(fName, srcStr, desStr):\n fi=open(fName)\n text=fi.read()\n text=re.subn(srcStr,desStr,text)[0]\n fi.close()\n\n fo=open(fName,\"w\")\n fo.write(text)\n fo.close()\n\ndef updateICON(targetFile):\n RESOURCES = os.path.join(__lib__,targetFile)\n targetPath = os.path.join(IconPath,targetFile)\n if not (os.path.exists(targetPath)):\n shutil.copy(RESOURCES,IconPath)\n\ndef TargetFileUpdate(tFile,tPath):\n Flag = False;\n sPath=os.path.join(__lib__, tFile)\n if os.path.exists(sPath):\n if not os.path.exists(os.path.join(tPath,tFile)):\n #xbmc.executebuiltin(\"XBMC.InstallAddon(%s)\" % tFile)\n os.system(\"cp -r \"+sPath+\" \"+tPath)\n Flag = True\n\n os.system(\"rm -rf \"+sPath)\n\n return Flag\n\ndef TargetFileUpdate2(tFile,tPath):\n Flag = False\n sPath=os.path.join(__lib__, tFile)\n if os.path.exists(sPath):\n if not os.path.exists(os.path.join(tPath,tFile)):\n #xbmc.executebuiltin(\"XBMC.UpdateAddonRepos()\")\n #time.sleep(2)\n #xbmc.executebuiltin(\"XBMC.InstallAddon(%s)\" % tFile)\n os.system(\"cp -r \"+sPath+\" \"+tPath)\n Flag = True\n\n os.system(\"rm -rf \"+sPath)\n\n return Flag\n\ndef TargetFileRemove(tFile,tPath):\n sPath=os.path.join(tPath,tFile)\n if os.path.exists(sPath):\n os.system(\"rm -rf \"+sPath)\n\n\ndef dis_or_enable_addon(addon_id, enable=\"true\"):\n addon = '\"%s\"' % addon_id\n if xbmc.getCondVisibility(\"System.HasAddon(%s)\" % addon_id) and enable == \"true\":\n return xbmc.log(\"### Skipped %s, reason = allready enabled\" % addon_id)\n elif not xbmc.getCondVisibility(\"System.HasAddon(%s)\" % addon_id) and enable == \"false\":\n xbmc.log(\"### Skipped %s, reason = not installed\" % addon_id)\n quit()\n else:\n do_json = '{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"Addons.SetAddonEnabled\",\"params\":{\"addonid\":%s,\"enabled\":%s}}' % (addon, enable)\n query = xbmc.executeJSONRPC(do_json)\n response = json.loads(query)\n if enable == \"true\":\n xbmc.log(\"### Enabled %s, response = %s\" % (addon_id, response))\n else:\n xbmc.log(\"### Disabled %s, response = %s\" % (addon_id, response))\n return xbmc.executebuiltin('Container.Update(%s)' % xbmc.getInfoLabel('Container.FolderPath'))\n\ndef installAddon(version,addon):\n sFile = addon+\"-\"+version+\".zip\"\n fullpath = \"http://services.seebo.com.au/seebo_addons/hex/repo/\"+addon+\"/\"+sFile\n zipPath = USERPATH1+\"/packages/\"+sFile\n os.system(\"curl \"+fullpath+\" > \"+zipPath)\n time.sleep(1)\n os.system(\"unzip \"+zipPath+\" -d \"+USERPATH1)\n\ndef dialog4Addons(name):\n str=\"The system will be restarted to enable this add-on.\"\n dialog = xbmcgui.Dialog()\n return dialog.ok(\"Update new add-on for movies TV shows\",\"[\"+name+\"]\",str)\n\ndef addons4MagicDragon():\n addon = \"plugin.video.themagicdragon\"\n if not os.path.exists(os.path.join(USERPATH1,addon)):\n installAddon(\"1.30\",addon)\n return dialog4Addons(\"The Magic Dragon\")\n return False\n\ndef addons4Yoda():\n addon = \"plugin.video.yoda\"\n if not os.path.exists(os.path.join(USERPATH1,addon)):\n installAddon('1.0.3.1',addon)\n installAddon('1.1.0','script.module.yoda')\n installAddon('1.0.0','script.yoda.artwork')\n installAddon('1.0.0','script.yoda.metadata')\n installAddon('1.1.2','script.realdebrid.mod')\n return dialog4Addons(\"Yoda\")\n return False\n\ndef addons4Tempest():\n addon = \"plugin.video.tempest\"\n if not os.path.exists(os.path.join(USERPATH1,addon)):\n installAddon('4.0.88a',addon)\n installAddon('1.0.2','script.tempest.artwork')\n installAddon('1.0.1','script.tempest.metadata')\n return dialog4Addons(\"Tempest\")\n return False\n\ndef addons4ExodusRedux():\n addon = \"plugin.video.exodusredux\"\n if not os.path.exists(os.path.join(USERPATH1,addon)):\n installAddon('2.0.3a',addon)\n installAddon('1.0.2','script.exodusredux.artwork')\n installAddon('1.0.2','script.exodusredux.metadata')\n installAddon('2.0.3','script.module.exodusredux')\n installAddon('0.0.1.94','script.module.openscrapers')\n return dialog4Addons(\"Exodus-Redux\")\n return False\n\n#======================================================================================================================\nif __name__=='__main__':\n updateICON(\"SBS ONE HD.png\")\n updateICON(\"SBS VICELAND HD.png\")\n updateICON(\"ABC NEWS.png\")\n updateICON(\"ABC COMEDY.png\")\n updateICON(\"7food network.png\")\n updateICON(\"10.png\")\n updateICON(\"10 BOLD.png\")\n updateICON(\"10 Peach.png\")\n updateICON(\"10 HD.png\")\n updateICON(\"Your Money.png\")\n updateICON(\"SBS Food.png\")\n\n #Check the ABCComedy/Kids channel\n srcPath=\"/storage/.xbmc/userdata/addon_data/service.multimedia.vdr-addon/config/channels.conf\"\n if os.path.exists(srcPath):\n replace(srcPath,\"ABCComedy/Kids\",\"ABC COMEDY\")\n\n #Add-ons\n RebootFlag = False\n sPath=os.path.join(USERPATH1,\"plugin.video.afl-video\")\n if os.path.exists(sPath):\n str1=\"AFL / NRL /Cricket / netball\"\n\n dialog = xbmcgui.Dialog()\n yes = dialog.yesno(\"Remove add-ons\",str1,\"These Add-ons will no longer be operating.\",\"Please refer [http://aussieaddons.com]\")\n if yes:\n TargetFileRemove(\"plugin.video.afl-video\",USERPATH1)\n TargetFileRemove(\"plugin.video.cricketaustralia\",USERPATH1)\n TargetFileRemove(\"plugin.video.netball-live\",USERPATH1)\n TargetFileRemove(\"plugin.video.nrl-live\",USERPATH1)\n time.sleep(1)\n RebootFlag = True\n\n sPath=os.path.join(USERPATH1,\"plugin.video.covenant\")\n if os.path.exists(sPath):\n str1=\"StreamHub / Survivor / Covenant\"\n\n dialog = xbmcgui.Dialog()\n yes = dialog.yesno(\"Remove add-ons\",str1,\"These Add-ons will no longer be operating.\")\n if yes:\n TargetFileRemove(\"plugin.video.streamhub\",USERPATH1)\n TargetFileRemove(\"plugin.video.survivor\",USERPATH1)\n TargetFileRemove(\"plugin.video.covenant\",USERPATH1)\n time.sleep(1)\n RebootFlag = True\n\n sPath=os.path.join(USERPATH1,\"plugin.video.neptune\")\n if os.path.exists(sPath):\n str1=\"Neptune Rising\"\n dialog = xbmcgui.Dialog()\n yes = dialog.yesno(\"Remove add-ons\",str1,\"This Add-on will no longer be operating.\")\n if yes:\n TargetFileRemove(\"plugin.video.neptune\",USERPATH1)\n TargetFileRemove(\"script.neptune.artwork\",USERPATH1)\n TargetFileRemove(\"script.neptune.metadata\",USERPATH1)\n TargetFileRemove(\"plugin.video.foodnetwork\",USERPATH1)\n TargetFileRemove(\"plugin.video.footballreplays\",USERPATH1)\n time.sleep(1)\n RebootFlag = True\n\n sPath=os.path.join(USERPATH1,\"plugin.video.mc1080p\")\n if os.path.exists(sPath):\n str1=\"[MC 1080P]\"\n dialog = xbmcgui.Dialog()\n yes = dialog.yesno(\"Remove add-ons\",str1,\"This Add-on will no longer be operating.\")\n if yes:\n TargetFileRemove(\"plugin.video.mc1080p\",USERPATH1)\n time.sleep(1)\n RebootFlag = True\n\n sPath=os.path.join(USERPATH1,\"plugin.video.placenta\")\n if os.path.exists(sPath):\n str1=\"[Placenta]\"\n dialog = xbmcgui.Dialog()\n yes = dialog.yesno(\"Remove add-ons\",str1,\"This Add-on will no longer be operating.\")\n if yes:\n TargetFileRemove(\"script.placenta.artwork\",USERPATH1)\n TargetFileRemove(\"script.placenta.metadata\",USERPATH1)\n TargetFileRemove(\"plugin.video.placenta\",USERPATH1)\n TargetFileRemove(\"script.module.placenta\",USERPATH1)\n time.sleep(1)\n RebootFlag = True\n\n # sPath=os.path.join(USERPATH1,\"plugin.video.exodus\")\n # if os.path.exists(sPath):\n # str1=\"Exodus\"\n # dialog = xbmcgui.Dialog()\n # yes = dialog.yesno(\"Replace add-ons\",str1,\"This Add-on will be replaced with Exodus-Redux\")\n # if yes:\n # TargetFileRemove(\"plugin.video.exodus\",USERPATH1)\n # TargetFileRemove(\"script.exodus.artwork\",USERPATH1)\n # TargetFileRemove(\"script.exodus.metadata\",USERPATH1)\n # TargetFileRemove(\"script.module.exodus\",USERPATH1)\n # TargetFileRemove(\"script.module.exoscrapers\",USERPATH1)\n # TargetFileRemove(\"script.module.exodusscrapers\",USERPATH1)\n # TargetFileRemove(\"plugin.video.mc1080p\",USERPATH1)\n # time.sleep(1)\n # installAddon('2.0.3a','plugin.video.exodusredux')\n # installAddon('1.0.2','script.exodusredux.artwork')\n # installAddon('1.0.2','script.exodusredux.metadata')\n # installAddon('2.0.3','script.module.exodusredux')\n # installAddon('0.0.1.94','script.module.openscrapers')\n # RebootFlag = True\n\n RebootFlag = addons4MagicDragon()\n RebootFlag = addons4Yoda()\n RebootFlag = addons4Tempest()\n RebootFlag = addons4ExodusRedux()\n\n str1 = \"plugin.video.kayo.sports\"\n sPath=os.path.join(USERPATH1,str1)\n if not os.path.exists(sPath) and not os.path.exists(\"/storage/DontAskAgain\"):\n str2=\"This is an add-on that requires a paid subscription.\"\n str3=\"For more information, please visit https://kayosports.com.au\"\n str4=\"Would you like to install it?\"\n dialog = xbmcgui.Dialog()\n if dialog.yesno(\"[Kayo Sports] add-on is available now\",str2,str3,str4):\n TargetFileUpdate(str1,USERPATH1)\n TargetFileUpdate('script.module.arrow',USERPATH1)\n dialog = xbmcgui.Dialog()\n dialog.ok(\"Update new add-on for Spotrs\",\"Kayo Sports\",\"The system will be restarted to enable this add-on.\")\n time.sleep(1)\n RebootFlag = True\n else:\n str2=\"You can install it at any time.\"\n str3=\"Through [On Demand] > [+] menu if you want.\"\n dialog.ok(\"[Kayo Sports] add-on is available now\",str2,str3)\n os.system(\"touch /storage/DontAskAgain\")\n\n TargetFileUpdate('inputstream.adaptive',USERPATH1)\n\n if os.path.exists(os.path.join(__lib__, \"usr/plugin.video.youtube\")):\n TargetFileUpdate(\"usr/plugin.video.youtube\",USERPATH2)\n\n if RebootFlag:\n os.system(\"reboot\")\n\n dis_or_enable_addon(\"pvr.iptvsimple\", \"true\")\n","repo_name":"csu-anzai/LilacTV","sub_path":"Addons/SeeboTV/repo/script.manageChannelIcons/script.manageChannelIcons/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":10159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70550069449","text":"import qkdutils as util\nfrom math import pi\nimport qit\nimport numpy as np\nimport protocols\nimport bb84, b92, e91\n\ndef test_runBB84():\n numTrials = 20\n numBits = 2048\n\n for j in range(numTrials):\n assert(len(simulations.runBB84(numBits, False, 0.0, False)) >= 3*numBits/4)\n assert(simulations.runBB84(numBits, True, 0.0, False) == -1)\n\n\ndef test_simulateB92():\n numTrials = 20\n numBits = 2048\n\n for j in range(numTrials):\n assert(len(simulations.runB92(numBits, False, 0.0, False)) >= 3*numBits/4)\n assert(simulations.runB92(numBits, True, 0.0, False) == -1)\n\n\ndef test_decodeStateB92():\n # Test probabilistic results match our expectations:\n # If sent == basis, measure the value of sent 50% of the time\n # If sent not measured, nothing comes through the filter\n numTrials = 50\n numBits = 1024\n sent = util.getRandomBits(numBits)\n bases = util.getRandomBits(numBits)\n tolerance = 0.05\n\n for j in range(numTrials):\n seen = 0\n\n for k in range(numBits):\n q = qit.state('0')\n if sent[k]: q = q.u_propagate(qit.H)\n\n result = b92.decodeState(q, bases[k])\n if result != None:\n seen += 1\n assert(result == sent[k])\n\n # Expect to get ~1/4 of the original key material\n print(float(seen)/numBits)\n assert(abs(float(seen)/numBits - 0.25) < tolerance)\n\n\ndef test_decodeStateBB84_deterministic():\n # Test deterministic cases\n q = qit.state('0')\n assert(bb84.decodeState(q, 0) == False)\n q = q.u_propagate(qit.H)\n assert(bb84.decodeState(q, 1) == False)\n q = qit.state('1')\n assert(bb84.decodeState(q, 0) == True)\n q = q.u_propagate(qit.H)\n assert(bb84.decodeState(q, 1) == True)\n\n\ndef test_decodeStateBB84_probabilistic():\n # Test probabilistic measurement is roughly even\n numTrials = 10000\n tolerance = 0.1 * numTrials\n\n q = qit.state('0')\n counts = [0, 0]\n for j in range(numTrials):\n counts[bb84.decodeState(q, 1)] += 1\n assert abs(counts[0] - counts[1]) < tolerance\n\n q = q.u_propagate(qit.H)\n counts = [0, 0]\n for j in range(numTrials):\n counts[bb84.decodeState(q, 0)] += 1\n assert(abs(counts[0] - counts[1]) < tolerance)\n\n q = qit.state('1')\n counts = [0, 0]\n for j in range(numTrials):\n counts[bb84.decodeState(q, 1)] += 1\n assert(abs(counts[0] - counts[1]) < tolerance)\n\n q = q.u_propagate(qit.H)\n counts = [0, 0]\n for j in range(numTrials):\n counts[bb84.decodeState(q, 0)] += 1\n assert(abs(counts[0] - counts[1]) < tolerance)\n\n\ndef test_discloseHalf():\n numTrials = 128\n\n for j in range(numTrials):\n key1 = util.getRandomBits(j)\n key2 = util.getRandomBits(j)\n announce1, key1, announce2, key2 = util.discloseHalf(key1, key2)\n assert(len(key1) + len(announce1) == j)\n assert(len(key2) + len(announce2) == j)\n\n\ndef test_simulateE91():\n numTrials = 20\n numBits = 2048\n\n for j in range(numTrials):\n assert(len(simulations.runE91(numBits)) >= 3*numBits/4)\n\n\ndef test_encodeBitBB84():\n # Only test the 4 cases in our encoding strategy\n assert(util.equivState(bb84.encodeBit(0,0), qit.state('0')))\n assert(util.equivState(bb84.encodeBit(1,0), qit.state('1')))\n assert(util.equivState(bb84.encodeBit(0,1), qit.state('0').u_propagate(qit.H)))\n assert(util.equivState(bb84.encodeBit(1,1), qit.state('1').u_propagate(qit.H)))\n\n\ndef test_getRandomBits():\n counts = [0, 0]\n numBits = 1024\n numTrials = 100\n tolerance = 0.1 * numBits\n\n for j in range(numTrials):\n bits = util.getRandomBits(numBits)\n counts[0] = len([j for j in bits if j==0])\n counts[1] = len([j for j in bits if j==1])\n print(abs(counts[0]-counts[1]))\n assert(abs(counts[0]-counts[1]) < tolerance)\n\n\ndef test_matchKeysBB84():\n numTrials = 100\n numBits = 256\n\n for j in range(numTrials):\n key1 = util.getRandomBits(numBits)\n bases1 = util.getRandomBits(numBits)\n sent = bb84.encodeKey(key1, bases1)\n bases2 = util.getRandomBits(numBits)\n key2 = []\n for k in range(numBits):\n key2.append(bb84.decodeState(sent[k], bases2[k]))\n\n result1, result2 = bb84.matchKeys(key1, key2, bases1, bases2)\n assert(result1 == result2)\n\n\ndef test_measureEntangledState():\n numTrials = 10000\n\n for j in range(numTrials):\n (basisA, basisB) = e91.chooseAxes(1)\n (A, B) = e91.measureEntangledState(basisA[0], basisB[0])\n if basisA == basisB:\n assert(A != B) # Bob's result must be anti-correlated with Alice's\n","repo_name":"cotaylor/qkdsim","sub_path":"tests/test_qkd.py","file_name":"test_qkd.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"32193618721","text":"from imports import *\n\ndef get_titanic_data(use_cache=True):\n # guard clause\n if os.path.exists('titanic.csv') and use_cache:\n print('Using cached csv')\n return pd.read_csv('titanic.csv')\n print('Acquiring data from SQL database')\n df=pd.read_sql('SELECT * FROM passengers',get_db_url('titanic_db'))\n df.to_csv('titanic.csv', index=False)\n return df\n\ndef get_iris_data(use_cache=True):\n if os.path.exists('iris.csv') and use_cache:\n print('Using cached csv')\n return pd.read_csv('iris.csv')\n print('Acquiring data from SQL database')\n df=pd.read_sql(\n '''\n SELECT * FROM measurements\n LEFT JOIN species USING (species_id)\n ''',\n get_db_url('iris_db')) \n df.to_csv('iris.csv', index=False)\n return df\n\ndef get_telco_data(use_cache=True):\n if os.path.exists('telco.csv') and use_cache:\n print('Using cached csv')\n return pd.read_csv('telco.csv')\n print('Acquiring data from SQL database')\n df=pd.read_sql(\n '''\n SELECT *\n FROM customers\n LEFT JOIN internet_service_types USING (internet_service_type_id)\n LEFT JOIN contract_types USING (contract_type_id)\n LEFT JOIN payment_types USING (payment_type_id)\n ''',\n get_db_url('telco_churn')) \n df.to_csv('telco.csv', index=False)\n return df\n\n","repo_name":"hinzle/classification-exercises","sub_path":"acquire.py","file_name":"acquire.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20104664763","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport datetime\nimport freshroastsr700\n\n\nclass Roaster(object):\n def __init__(self, kp=0.06, ki=0.0075, kd=0.01):\n \"\"\"Creates a freshroastsr700 object passing in methods included in this\n class. Set the PID values above to set them in the freshroastsr700\n object.\"\"\"\n self.roaster = freshroastsr700.freshroastsr700(\n self.update_data,\n self.next_state,\n thermostat=True,\n kp=kp,\n ki=ki,\n kd=kd)\n # test vector for driving sr700\n # quick recipe simulation\n # self.recipe = [\n # {\n # 'time_remaining': 60,\n # 'target_temp': 350,\n # 'fan_speed': 9,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 60,\n # 'target_temp': 390,\n # 'fan_speed': 5,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 30,\n # 'target_temp': 420,\n # 'fan_speed': 5,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 60,\n # 'target_temp': 450,\n # 'fan_speed': 5,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 60,\n # 'target_temp': 480,\n # 'fan_speed': 3,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 60,\n # 'target_temp': 500,\n # 'fan_speed': 3,\n # 'state': 'roasting'\n # },\n # {\n # 'time_remaining': 60,\n # 'target_temp': 150,\n # 'fan_speed': 9,\n # 'state': 'cooling'\n # },\n # {\n # 'time_remaining': 1,\n # 'target_temp': 150,\n # 'fan_speed': 1,\n # 'state': 'idle'\n # }\n # ]\n # for dialing in pid params - 3 min. short cycle\n self.recipe = [\n {\n 'time_remaining': 60,\n 'target_temp': 300,\n 'fan_speed': 9,\n 'state': 'roasting'\n },\n {\n 'time_remaining': 60,\n 'target_temp': 350,\n 'fan_speed': 5,\n 'state': 'roasting'\n },\n {\n 'time_remaining': 60,\n 'target_temp': 400,\n 'fan_speed': 5,\n 'state': 'roasting'\n },\n {\n 'time_remaining': 60,\n 'target_temp': 450,\n 'fan_speed': 4,\n 'state': 'roasting'\n },\n {\n 'time_remaining': 30,\n 'target_temp': 150,\n 'fan_speed': 9,\n 'state': 'cooling'\n },\n {\n 'time_remaining': 1,\n 'target_temp': 150,\n 'fan_speed': 1,\n 'state': 'idle'\n }\n ]\n # to set up process to begin, call next state to load first state\n self.active_recipe_item = -1\n # open file to write temps in CSV format\n self.file = open(\"sr700_pid_tune.csv\", \"w\")\n self.file.write(\"Time,crntTemp,targetTemp,heaterLevel\\n\")\n # get start timestamp\n self.start_time = datetime.datetime.now()\n\n def __del__(self):\n self.file.close()\n\n def update_data(self):\n \"\"\"This is a method that will be called every time a packet is opened\n from the roaster.\"\"\"\n time_elapsed = datetime.datetime.now() - self.start_time\n crntTemp = self.roaster.current_temp\n targetTemp = self.roaster.target_temp\n heaterLevel = self.roaster.heater_level\n # print(\n # \"Time: %4.6f, crntTemp: %d, targetTemp: %d, heaterLevel: %d\" %\n # (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel))\n self.file.write(\n \"%4.6f,%d,%d,%d\\n\" %\n (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel))\n\n def next_state(self):\n \"\"\"This is a method that will be called when the time remaining ends.\n The current state can be: roasting, cooling, idle, sleeping, connecting,\n or unkown.\"\"\"\n self.active_recipe_item += 1\n if self.active_recipe_item >= len(self.recipe):\n # we're done!\n return\n # show state step on screen\n print(\"--------------------------------------------\")\n print(\"Setting next process step: %d\" % self.active_recipe_item)\n print(\"time:%d, target: %ddegF, fan: %d, state: %s\" %\n (self.recipe[self.active_recipe_item]['time_remaining'],\n self.recipe[self.active_recipe_item]['target_temp'],\n self.recipe[self.active_recipe_item]['fan_speed'],\n self.recipe[self.active_recipe_item]['state']\n ))\n print(\"--------------------------------------------\")\n # set values for next state\n self.roaster.time_remaining = (\n self.recipe[self.active_recipe_item]['time_remaining'])\n self.roaster.target_temp = (\n self.recipe[self.active_recipe_item]['target_temp'])\n self.roaster.fan_speed = (\n self.recipe[self.active_recipe_item]['fan_speed'])\n # set state\n if(self.recipe[self.active_recipe_item]['state'] == 'roasting'):\n self.roaster.roast()\n elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'):\n self.roaster.cool()\n elif(self.recipe[self.active_recipe_item]['state'] == 'idle'):\n self.roaster.idle()\n elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'):\n self.roaster.sleep()\n\n def recipe_total_time(self):\n total_time = 0\n for step in self.recipe:\n total_time += step['time_remaining']\n return total_time\n\n\nif __name__ == \"__main__\":\n\n # Create a roaster object.\n r = Roaster()\n\n # Conenct to the roaster.\n r.roaster.auto_connect()\n\n # Wait for the roaster to be connected.\n while(r.roaster.connected is False):\n print(\"Please connect your roaster...\")\n time.sleep(1)\n\n # get the roast started\n r.roaster.roast()\n\n # This ensures the example script does not end before the roast.\n time.sleep(10+r.recipe_total_time())\n\n # Disconnect from the roaster.\n r.roaster.disconnect()\n","repo_name":"Roastero/freshroastsr700","sub_path":"examples/pid_tune_aid.py","file_name":"pid_tune_aid.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"} +{"seq_id":"20188654441","text":"# 블랙잭(2798)\n# https://www.acmicpc.net/problem/2798\n# 해결 / 20분 소요 / 21.12.10\n'''\n접근 방법 1:\n내림차순 정렬 후에 작은 카드를 한장 씩 바꾸는 순으로 해서 3장을 가져가기\n\n접근 방법 2:\n접근 방법 1로는 정확하게 접근하기 어려웠다.\nm 보다 크거나 같은 수들의 경우 따로 max라는 변수에 비교 후 기존 값보다 큰 경우 집어 넣어줬다.\n\n'''\n\nfrom sys import stdin\nfrom itertools import permutations\n\nn, m = map(int, stdin.readline().rstrip().split())\nnums = list(map(int, stdin.readline().rstrip().split()))\nanswer = []\nmax = -1 \n\nfor datas in list(permutations(nums, 3)):\n if sum(datas) > m:\n continue\n else:\n tmp = sum(datas)\n if max < tmp:\n max = tmp\n\nprint(max)\nexit()\n\n\n# 블랙잭(2798)\n# https://www.acmicpc.net/problem/2798\n\ndef find_num(cards):\n max_num = -1\n for i in range(0, len(cards)):\n for j in range(i+1, len(cards)):\n for k in range(j+1, len(cards)):\n tmp = cards[i] + cards[j] + cards[k]\n if tmp <= call_num:\n if max_num < tmp:\n max_num = tmp\n return max_num\n\n\n# n장의 카드, 딜러가 부른 숫자\ncard_num, call_num = map(int, input().split())\ncards = list(map(int, input().split()))\nprint(find_num(cards))\n","repo_name":"jongwanra/TIL","sub_path":"python_baekjoon/단계별로풀어보기/11-브루트포스/블랙잭(2798).py","file_name":"블랙잭(2798).py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"7342027014","text":"import argparse\nimport functools\nimport json\nimport os\nimport os.path\nimport random\nimport string\nimport StringIO\nimport urllib\n\nimport flask\nfrom flask import request\n\nimport bplib\nimport bplib.json\nimport bplib.objects\nimport bpgen\n\nall_tags = [] # sorted set\ncss_cache = {} # source_name -> str\n\ndef info_for(emote):\n if hasattr(emote, \"image_url\"):\n return (emote.image_url, emote.offset[1], emote.offset[0], emote.size[0], emote.size[1])\n else:\n return (-1, -1, -1, -1)\n\ndef make_tag_list():\n global all_tags\n tmp = set()\n for source in context.sources.values():\n for emote in source.emotes.values():\n tmp |= emote.tags\n all_tags = sorted(tmp)\n\ncontext = bplib.objects.Context()\ncontext.load_config()\ncontext.load_sources()\nmake_tag_list()\n\nfor source in context.sources.values():\n source.group_emotes()\n\ndef sync_tags(source):\n path = \"tags/%s.json\" % (source.name.split(\"/\")[-1])\n file = open(path, \"w\")\n bplib.json.dump(source.dump_tags(), file, indent=0, max_depth=1, sort_keys=True)\n\ndef get_css(source_name):\n if source_name not in css_cache:\n css_rules = bpgen.build_css(context.sources[source_name].emotes.values())\n stream = StringIO.StringIO()\n bpgen.dump_css(stream, css_rules)\n css_cache[source_name] = stream.getvalue()\n return css_cache[source_name]\n\napp = flask.Flask(__name__, static_folder=\"tagapp-static\", static_url_path=\"/static\")\napp.jinja_env.globals[\"sorted\"] = sorted\napp.jinja_env.globals[\"urlquote\"] = lambda s: urllib.quote(s, \"\")\n\nsecret_key = \"\".join(random.choice(string.letters) for _ in range(32))\nprint(\"SECRET KEY: %s\" % (secret_key))\n\ndef check_auth(username, password):\n return str(username) == \"admin\" and str(password) == secret_key\n\ndef requires_auth(f):\n @functools.wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth or not check_auth(auth.username, auth.password):\n return flask.Response(\"Access denied\", 401, {\"WWW-Authenticate\": \"Basic realm=\\\"Login Required\\\"\"})\n return f(*args, **kwargs)\n return decorated\n\n@app.route(\"/\")\ndef index():\n return flask.render_template(\"index.html\", sources=sorted(context.sources), all_tags=all_tags)\n\n@app.route(\"/source/\")\ndef tag(source_name):\n source_name = urllib.unquote(str(source_name))\n source = context.sources[source_name]\n emotes = list(source.undropped_emotes())\n tags = {emote.name: sorted(emote.tags) for emote in emotes}\n return flask.render_template(\"tag.html\", source=source, given_emotes=sorted(source.emote_groups.items()), tags=tags)\n\n@app.route(\"/source//write\", methods=[\"POST\"])\ndef write(source_name):\n source_name = urllib.unquote(str(source_name))\n data = json.loads(request.form[\"tags\"])\n source = context.sources[source_name]\n for (name, tags) in data.items():\n assert isinstance(name, unicode)\n assert isinstance(tags, list) and all([isinstance(r, unicode) for r in tags])\n emote = source.emotes[str(name)]\n emote.tags = set(map(str, tags))\n sync_tags(source)\n make_tag_list()\n return flask.redirect(flask.url_for(\"index\"))\n\n@app.route(\"/source//css\")\ndef css(source_name):\n source_name = urllib.unquote(str(source_name))\n return flask.Response(get_css(source_name), mimetype=\"text/css\")\n\n@app.route(\"/tag/\")\ndef taginfo(tag):\n tag = str(tag)\n data = {}\n for source in context.sources.values():\n data[source] = []\n for emote in source.unignored_emotes():\n if tag in emote.tags:\n data[source].append(emote)\n data[source].sort(key=lambda e: e.name)\n if not data[source]:\n del data[source]\n data = sorted(data.items(), key=lambda i: i[0].name)\n return flask.render_template(\"taginfo.html\", tag=tag, data=data)\n\n@app.route(\"/tag//rename\", methods=[\"POST\"])\n@requires_auth\ndef rename_tag(tag):\n tag = str(tag)\n to = str(request.form[\"to\"])\n if not to.startswith(\"+\"):\n to = \"+\" + to\n for (source_name, source) in context.sources.items():\n dirty = False\n for (name, emote) in source.emotes.items():\n if tag in emote.tags:\n emote.tags = emote.tags - {tag} | {to}\n dirty = True\n if dirty:\n sync_tags(source)\n all_tags.remove(tag)\n if to not in all_tags:\n all_tags.append(to)\n all_tags.sort()\n return flask.redirect(flask.url_for(\"taginfo\", tag=to))\n\n@app.route(\"/tag//delete\", methods=[\"POST\"])\n@requires_auth\ndef delete_tag(tag):\n tag = str(tag)\n if not tag.startswith(\"+\"):\n tag = \"+\" + tag\n for (source_name, source) in context.sources.items():\n dirty = False\n for (name, emote) in source.emotes.items():\n if tag in emote.tags:\n emote.tags = emote.tags - {tag}\n dirty = True\n if dirty:\n sync_tags(source)\n all_tags.remove(tag)\n return flask.redirect(flask.url_for(\"index\"))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Emote tagger webapp\")\n parser.add_argument(\"-d\", \"--debug\", help=\"Enable debug mode\", default=False, action=\"store_true\")\n args = parser.parse_args()\n\n app.debug = args.debug\n\n app.run()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"thejbw/bpm-master","sub_path":"tagapp.py","file_name":"tagapp.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41220201738","text":"#!/usr/bin/env python3\n\n# Created by: Liam Hearty\n# Created on: October 2019\n# This program Moves sprites on the PyBadge (with borders)\n\n# Circuitpython screen size is 160x120 and sprites are 16x16\nSCREEN_X = 160\nSCREEN_Y = 120\nSCREEN_GRID_X = 16\nSCREEN_GRID_Y = 8\nSPRITE_SIZE = 16\nTOTAL_NUMBER_OF_ALIENS = 5\nFPS = 60\n","repo_name":"Liam-Hearty/ICS3U---FP-Lessons-5","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36780192557","text":"import math\r\nimport time\r\nfrom asyncio.windows_events import NULL\r\nfrom datetime import datetime\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\nfrom pymodbus.client import ModbusSerialClient\r\n\r\n\r\ndef createcall():\r\n wb = Workbook()\r\n sheet1 = wb.active\r\n sheet1.title = \"measurement\"\r\n top = [\"Date\", \"Time\", \"Slave Address\", \"Poll Count\", \"Temperature\", \"Pressure\"]\r\n col = 1\r\n for var in top:\r\n sheet1.cell(row=count, column=col, value=var)\r\n col += 1\r\n wb.save(name)\r\n\r\n\r\ndef filldata(vars):\r\n wb = openpyxl.load_workbook(name)\r\n sheet = wb['measurement']\r\n col = 1\r\n for var in vars:\r\n sheet.cell(row=count + 1, column=col, value=var)\r\n col += 1\r\n wb.save(name)\r\n\r\n\r\ndef endcall():\r\n end = time.time()\r\n totaltime = end - start\r\n wb = openpyxl.load_workbook(name)\r\n sheet2 = wb.create_sheet(\"sheet2\")\r\n sheet2.title = \"results\"\r\n top = [\"Completion Date\", \"Completion Time\", \"Duration\", \"Total Readings\", \"Errors count\", \"Error %\",\r\n \"No response count\", \"No response %\"]\r\n col = 1\r\n for var in top:\r\n sheet2.cell(row=1, column=col, value=var)\r\n col += 1\r\n dati = datetime.now()\r\n date = dati.strftime(\"%d-%m-%Y\")\r\n timee = dati.strftime(\"%H:%M:%S\")\r\n m = (errorcnt / (count - 1)) * 100\r\n n = (norespcnt / (count - 1)) * 100\r\n data = [date, timee, f\"{round(totaltime / 60, 2)} mins\", count - 1, errorcnt, f\"{m}%\", norespcnt, f\"{n}%\"]\r\n col = 1\r\n for var in data:\r\n sheet2.cell(row=2, column=col, value=var)\r\n col += 1\r\n wb.save(name)\r\n print(\"Report generated!\\n\")\r\n\r\n\r\ndef temp():\r\n result = client.read_holding_registers(address=20, slave=add, count=1, unit=1)\r\n ans = result.registers\r\n return round(ans[0] * 0.1, 2)\r\n\r\n\r\ndef pressure():\r\n d1 = {0: 1, 1: 0.1, 2: 0.01, 3: 0.001}\r\n d2 = {0: \"MPa\", 1: \"kPa\", 2: \"Pa\", 3: \"bar\", 4: \"mbar\", 5: \"kg/cm2\", 6: \"psi\", 7: \"mH20\", 8: \"mmH20\"}\r\n pres = client.read_holding_registers(address=2, slave=add, count=3, unit=1)\r\n pf = pres.registers\r\n return [str(pf[2] * d1.get(pf[1])) + \" \" + d2.get(pf[0]), pf[2] * d1.get(pf[1])]\r\n\r\n\r\ndef main():\r\n p = pressure()\r\n te = temp()\r\n if p == NULL or te == NULL:\r\n global norespcnt\r\n norespcnt = norespcnt + 1\r\n if p[1] >= 1.5 or p[1] <= 0.5 or te <= 25 or te >= 32:\r\n global errorcnt\r\n errorcnt = errorcnt + 1\r\n dati = datetime.now()\r\n date = dati.strftime(\"%d-%m-%Y\")\r\n timee = dati.strftime(\"%H:%M:%S\")\r\n\r\n s = [date, timee, add, count, f\"{te} C\", p[0]]\r\n filldata(s)\r\n\r\n print(\"++++++++++++++++\")\r\n print(\"poll count: \", count)\r\n print(\"date & time: \", date, timee)\r\n print(\"slave address: \", add)\r\n print(\"temperature: \", te, \"C\")\r\n print(\"pressure: \", p[0])\r\n print(\"++++++++++++++++\\n\")\r\n\r\n\r\n# defining variables\r\ncount = 1\r\nerrorcnt = 0\r\nnorespcnt = 0\r\n\r\n# connecting with serial port\r\nclient = ModbusSerialClient(method='rtu', port='COM2', stopbits=1, bytesize=8, baudrate=9600)\r\nprint(\"\\nSerial port connected: \", client.connect(), \"\\n\")\r\n\r\n# getting slave address of the sensor\r\nadd = int(input(\"enter slave add: \"))\r\nfrequency = int(input(\"Enter frequency in seconds: \"))\r\ntimet = int(input(\"Enter test durations in mins: \"))\r\n\r\n# creating a new file\r\ndt = datetime.now()\r\nd = dt.strftime(\"%d-%m-%Y\")\r\nt = dt.strftime(\"%H-%M-%S\")\r\nname = f\"{add}_{d}_{t}.xlsx\"\r\ncreatecall()\r\n\r\n# starting the test and appending sensor readings in the newly created file\r\nstart = time.time()\r\nprint(\"Testing starts.... \\n\")\r\nwhile count <= math.ceil((timet * 60) / frequency):\r\n main()\r\n time.sleep(frequency)\r\n count = count + 1\r\nprint(\"Testing completed!\\n\")\r\n\r\n# generating a report\r\nendcall()\r\n","repo_name":"atishaygupta123/rs485code","sub_path":"rs495py (1).py","file_name":"rs495py (1).py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19415914469","text":"# nested_spiral.py\nfrom turtle import *\n\nbgcolor('black')\ncolors = ['yellow', 'red', 'green', 'blue', 'pink', 'orange', 'white', 'purple']\nwidth(1)\nspeed(0)\n\ndef spiral(side, round):\n for i in range(int(round/2)):\n pencolor(colors[i % side])\n # pencolor(colors[0])\n forward(i * 2)\n left(360/side + 2)\n\nside = 4\nfor i in range(100):\n penup() \n forward(i * 4)\n pos = position()\n head = heading()\n\n print(pos, head)\n\n pendown()\n spiral(side, i)\n\n setpos(pos)\n setheading(head)\n left(360/side + 8)\n\ndone()","repo_name":"adadesions/python-afternoon","sub_path":"worksheet/2021/nested_spiral.py","file_name":"nested_spiral.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42321595251","text":"__all__ = (\n 'Mroute'\n)\n\n# Python\nimport weakref\nimport functools\n\n# Genie\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import ConfigurableBase\nfrom genie.libs.conf.base import IPv4Address, IPv6Address, IPv4Interface, IPv6Interface\n\n\n@functools.total_ordering\nclass Mroute(ConfigurableBase):\n\n @property\n def testbed(self):\n return self.device.testbed\n\n @property\n def device(self):\n return self._device()\n\n # mroute_address\n mroute_address = managedattribute(\n name='mroute_ip_address',\n default=None,\n type=(None, IPv4Address, IPv6Address),\n doc=\"Configure 'ip mroute' or 'ipv6 mroute' on the device.\")\n\n # mroute_prefix_mask\n mroute_prefix_mask = managedattribute(\n name='mroute_prefix_mask',\n default=None,\n type=(None, managedattribute.test_istype(int)),\n doc=\"Configure 'ip/ipv6 mroute' prefix mask on the device.\")\n\n # mroute_neighbor_address\n mroute_neighbor_address = managedattribute(\n name='mroute_neighbor_address',\n default=None,\n type=(None, IPv4Address, IPv6Address),\n doc=\"Configure 'ip/ipv6 mroute' neighbor address on the device.\")\n \n # mroute_interface_name\n mroute_interface_name = managedattribute(\n name='mroute_interface_name',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc=\"Configure 'ip/ipv6 mroute' interface name on the device.\")\n \n # mroute_admin_distance\n mroute_admin_distance = managedattribute(\n name='mroute_admin_distance',\n default=None,\n type=(None, managedattribute.test_istype(int)),\n doc=\"Configure 'ip/ipv6 mroute' admin distance on the device.\")\n\n # mroute_vrf\n mroute_vrf = managedattribute(\n name='mroute_vrf',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc=\"Configure 'ip/ipv6 mroute' VRF on the device.\")\n\n # ==========================================================================\n\n # Overload __eq__\n def __eq__(self, other):\n if not isinstance(other, Mroute):\n raise NotImplemented\n \n return (self.mroute_address,\n self.mroute_prefix_mask,\n self.mroute_neighbor_address,\n self.mroute_admin_distance,\n self.mroute_vrf,\n self.mroute_interface_name,\n self.device) == \\\n (other.mroute_address,\n other.mroute_prefix_mask,\n other.mroute_neighbor_address,\n other.mroute_admin_distance,\n other.mroute_vrf,\n other.mroute_interface_name,\n other.device)\n\n # Overload __lt__\n def __lt__(self, other):\n if not isinstance(other, Mroute):\n raise NotImplemented(\"Cannot compare '{s}' to a '{o}'\".format(s=type(self), o=type(other)))\n\n # Comparing same types (both v4 or both v6)\n if type(self.mroute_address) == type(other.mroute_address):\n return self.mroute_address < other.mroute_address\n # Comparing mistmatch types\n else:\n self_addr = str(self.mroute_address)\n other_addr = str(other.mroute_address)\n return self_addr < other_addr\n \n # Overload __hash__\n def __hash__(self):\n return hash((self.mroute_address,\n self.mroute_prefix_mask,\n self.mroute_neighbor_address,\n self.mroute_admin_distance,\n self.mroute_vrf,\n self.mroute_interface_name,\n self.device))\n\n # Overload __repr__\n def __repr__(self):\n if isinstance(self.mroute_address, IPv6Address):\n return '%s object at 0x%x with ipv6 address %s/%s' % (\n self.__class__.__name__,\n id(self),\n self.mroute_address,\n self.mroute_prefix_mask)\n else:\n return '%s object at 0x%x with ip address %s/%s' % (\n self.__class__.__name__,\n id(self),\n self.mroute_address,\n self.mroute_prefix_mask)\n\n\n def __init__(self, device, *args, **kwargs):\n self._device = weakref.ref(device)\n super().__init__(*args, **kwargs)","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/conf-pkg/src/genie/libs/conf/mcast/mroute.py","file_name":"mroute.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"32852198065","text":"def find(k):\n while(1):\n n=k\n while(n>0):\n r=n%10\n if(r!=0 and k%r!=0):\n break\n n=n//10\n if(n==0):\n return(k)\n break\n k+=1\nt=int(input())\nfor _ in range(t):\n n=int(input())\n print(find(n))\n \n \n","repo_name":"Pulkit3108/Codeforces-Problems","sub_path":"Round#692(Div. 2)B.py","file_name":"Round#692(Div. 2)B.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40708423771","text":"'''\r\nComputer Animation: Project 6, Python Scripting\r\n\r\nAuthor: Alex Fantine\r\n\r\nDate: 4/24/2020\r\n'''\r\n\r\nimport maya.cmds as cmds\r\nimport random\r\nimport functools\r\n\r\n'''\r\nCreates the UI to select the number of 'shield' pieces in the animation.\r\n'''\r\ndef createUI( pWindowTitle, pApplyCallback ):\r\n windowID = 'cells_window'\r\n if cmds.window( windowID, exists=True ):\r\n cmds.deleteUI( windowID )\r\n\r\n cmds.window( windowID, title=pWindowTitle, sizeable=False, resizeToFitChildren=True )\r\n\r\n #determines layout for UI, columnWidth =[(column_index, width in px)...]\r\n cmds.rowColumnLayout( numberOfColumns=2, columnWidth=[ (1,100), (2,100) ])\r\n cmds.text( label='Num Blood Cells:' )\r\n num_shield_pieces = cmds.intField(value = 50)\r\n\r\n cmds.separator( h=10, style='none' ) #blank space, for aesthetics\r\n cmds.separator( h=10, style='none' )\r\n cmds.separator( h=10, style='none' )\r\n cmds.separator( h=10, style='none' )\r\n\r\n cmds.button( label='Animate', command=functools.partial( pApplyCallback,\r\n num_shield_pieces,\r\n windowID ) )\r\n\r\n '''\r\n Allows the user to exit before the animation is generated.\r\n '''\r\n def cancelCallback( *pArgs ):\r\n if cmds.window( windowID, exists=True ):\r\n cmds.deleteUI( windowID )\r\n\r\n cmds.button( label='Exit', command=cancelCallback )\r\n cmds.showWindow()\r\n\r\n'''\r\nCallback function to being creating the animation. Sets the number of 'shield' pieces\r\nand calls method to start the animation. This solution was used to prevent the animation\r\nfrom starting until the user had selected a number of pieces in the UI window.\r\n'''\r\ndef applyCallback( p_num_shield_pieces, p_window_ID, *pArgs ):\r\n p_num_shield_pieces = cmds.intField( p_num_shield_pieces, query=True, value=True )\r\n\r\n #deletes the UI window once generate has been pressed\r\n if cmds.window( p_window_ID, exists=True ):\r\n cmds.deleteUI( p_window_ID )\r\n\r\n #run the rest of the script once generate has been pressed\r\n run_script(p_num_shield_pieces)\r\n\r\n'''\r\nCreates a new shader of specified p_shader_type, and sets the shader of p_object to\r\nthe newly created shader object.\r\n'''\r\ndef applyMaterial(p_object, p_color, p_shader_type):\r\n if cmds.objExists(p_object):\r\n shd = cmds.shadingNode(p_shader_type, name=\"%s_%s\" % (p_object, p_shader_type), asShader=True)\r\n\r\n #p_color is a list of [r,b,g]\r\n cmds.setAttr(shd+\".color\", p_color[0], p_color[1], p_color[2], type=\"double3\")\r\n shdSG = cmds.sets(name='%sSG' % shd, empty=True, renderable=True, noSurfaceShader=True)\r\n\r\n cmds.connectAttr('%s.outColor' % shd, '%s.surfaceShader' % shdSG)\r\n cmds.sets(p_object, e=True, forceElement=shdSG)\r\n\r\n'''\r\nCreates the 'shield' of shapes outside the core.\r\np_shield_shape is the shape to be duplicated\r\nThe shape of the shield pieces is based on the passed in parameter, so it can be\r\nadjusted more efficiently when this method is called.\r\n\r\nReturns a python list of each shield piece, along with the name of the shield group.\r\n'''\r\ndef generate_shield(p_shield_shape, p_num_shield_pieces):\r\n random.seed( 1234 )\r\n transformName = p_shield_shape[0]\r\n instanceGroupName = cmds.group( empty=True, name=transformName + '_instance_grp#' )\r\n\r\n #to keep track of the actual instances for returning\r\n instance_group = []\r\n\r\n for i in range( 0, p_num_shield_pieces ):\r\n instanceResult = cmds.instance( transformName, name=transformName + '_instance#' )\r\n cmds.parent( instanceResult, instanceGroupName )\r\n\r\n x = random.uniform( -10, 10 )\r\n y = random.uniform( 0, 20 )\r\n z = random.uniform( -10, 10 )\r\n\r\n cmds.move( x, y, z, instanceResult )\r\n\r\n xRot = random.uniform( 0, 360 )\r\n yRot = random.uniform( 0, 360 )\r\n zRot = random.uniform( 0, 360 )\r\n\r\n #no rotation needed since the pieces will be aimed at the core later\r\n\r\n scalingFactor = random.uniform( 0.3, 1.15 )\r\n cmds.scale( scalingFactor, 1.0, scalingFactor, instanceResult )\r\n instance_group.append(instanceResult)\r\n\r\n cmds.hide( transformName )\r\n cmds.xform( instanceGroupName, centerPivots=True )\r\n return instance_group, instanceGroupName\r\n\r\n'''\r\nMethod to create aim constraints from the shield pieces to the core.\r\n'''\r\ndef aim_at_first(p_shield_core, p_shield_pieces):\r\n targetName = p_shield_core\r\n for objectName in p_shield_pieces:\r\n cmds.aimConstraint( targetName, objectName, aimVector=[0,1,0] )\r\n\r\n'''\r\nMethod used in the main rotation of the shield around the core.\r\n'''\r\ndef key_rotation(p_rotation_obj, pTargetAttribute, p_time_in, p_time_out):\r\n cmds.cutKey( p_rotation_obj, time=(p_time_in, p_time_out), attribute=pTargetAttribute )\r\n cmds.setKeyframe( p_rotation_obj, time=p_time_in, attribute=pTargetAttribute, value=0 )\r\n cmds.setKeyframe( p_rotation_obj, time=p_time_out, attribute=pTargetAttribute, value=360 )\r\n cmds.selectKey( p_rotation_obj, time=(p_time_in, p_time_out), attribute=pTargetAttribute, keyframe=True )\r\n cmds.keyTangent( inTangentType='linear', outTangentType='linear' )\r\n\r\n'''\r\nProvides the shield pieces with the ability to 'expand' away from the core.\r\n\r\nReturns the name of the locator group.\r\n'''\r\ndef expand_from_first(p_target, p_shield_pieces):\r\n targetName = p_target[0] #just get the target obj, not its underlying shape (led to error)\r\n locatorGroupName = cmds.group( empty=True, name='expansion_locator_grp#' )\r\n maxExpansion = 100\r\n newAttributeName = 'expansion'\r\n #initial creation of the expansion attribute\r\n if not cmds.objExists( '%s.%s' % ( targetName, newAttributeName ) ):\r\n cmds.select( targetName )\r\n cmds.addAttr( longName=newAttributeName, shortName='exp',\r\n attributeType='double', min=0, max=maxExpansion,\r\n defaultValue=maxExpansion, keyable=True )\r\n\r\n for objectName in p_shield_pieces:\r\n #somehow the objects are each their own list, so just get the name of the obj\r\n objectName = objectName[0]\r\n coords = cmds.getAttr( '%s.translate' % ( objectName ) )[0]\r\n locatorName = cmds.spaceLocator( position=coords, name='%s_loc#' % ( objectName ) )[0]\r\n cmds.xform( locatorName, centerPivots=True )\r\n cmds.parent( locatorName, locatorGroupName )\r\n pointConstraintName = cmds.pointConstraint( [ targetName, locatorName ], objectName, name='%s_pointConstraint#' % ( objectName ) )[0]\r\n cmds.expression( alwaysEvaluate=True,\r\n name='%s_attractWeight' % ( objectName ),\r\n object=pointConstraintName,\r\n string='%sW0=%s-%s.%s' % ( targetName, maxExpansion, targetName, newAttributeName ) )\r\n\r\n cmds.connectAttr( '%s.%s' % ( targetName, newAttributeName ),\r\n '%s.%sW1' % ( pointConstraintName, locatorName ) )\r\n\r\n\r\n cmds.xform( locatorGroupName, centerPivots=True )\r\n return locatorGroupName\r\n\r\n'''\r\nKeyframes the expansion attribute at a lower value and a higher value.\r\n'''\r\ndef expansion(p_shield_center, time_in, time_out, exp_in_value = 0, exp_out_value = 100):\r\n cmds.setKeyframe( p_shield_center, time=time_in, attribute='expansion', value=exp_in_value )\r\n cmds.setKeyframe( p_shield_center, time=time_out, attribute='expansion', value=exp_out_value )\r\n\r\n'''\r\nCreates the hearbeat effect, given some starting time for the beat.\r\n'''\r\ndef heartbeat(p_core, p_start_time):\r\n expansion(p_core, str(p_start_time) + 'sec', str(p_start_time) + '.5sec', 10, 100)\r\n expansion(p_core, str(p_start_time) + '.6sec', str(p_start_time + 1) + 'sec', 50, 100)\r\n expansion(p_core, str(p_start_time + 1) + '.1sec', str(p_start_time + 1) + '.9sec', 80, 10)\r\n\r\n'''\r\nMain execution of the animation, called immediately after the UI window has been closed.\r\n'''\r\ndef run_script(num_shield_pieces = 50):\r\n RED = [1, 0, 0] #red blood cells\r\n BLUE = [0, 0, 1] #blue for the core, aka Sonic\r\n DARK_RED = [.545, 0, 0]\r\n\r\n #create the lighting for the scene\r\n main_light = cmds.directionalLight(name = \"main_light\", intensity = 5)\r\n cmds.move(-5.711, 14.564, 11.097, \"main_light\")\r\n cmds.rotate('-67.367deg', '-24.33deg', '54.557deg', \"main_light\")\r\n\r\n #create the shape of the core and the shield\r\n shield_shape = cmds.polyTorus (sr = 0.2, name=\"myRing#\") #shape of shield\r\n shield_center = cmds.polyPlatonicSolid (radius=2.55, st=1, name=\"core\")\r\n applyMaterial(shield_center[0], DARK_RED, 'lambert')\r\n cmds.move(0, 9, 0, \"core\") #move the core higher\r\n\r\n #add decorative cylinder to core\r\n core_piece_1 = cmds.polyCylinder(name=\"tube_1\")\r\n applyMaterial(core_piece_1[0], BLUE, 'lambert')\r\n cmds.move(0.195, 11.014, -1.221, \"tube_1\")\r\n cmds.rotate('-30.351deg', 0, 0, \"tube_1\")\r\n cmds.scale(0.505, 0.619, 0.505, \"tube_1\")\r\n\r\n #add another decorative cylinder to core\r\n core_piece_2 = cmds.polyCylinder(name=\"tube_2\")\r\n applyMaterial(core_piece_2[0], RED, 'lambert')\r\n cmds.move(-0.917, 11.185, -0.216, \"tube_2\")\r\n cmds.rotate('-3.436deg', '14.201deg', '24.834deg', \"tube_2\")\r\n cmds.scale(0.505, 0.619, 0.505, \"tube_2\")\r\n\r\n #generate random shield fragments\r\n shield_pieces, shield_pieces_group = generate_shield(shield_shape, num_shield_pieces)\r\n\r\n #coloring the shield pieces\r\n for piece_obj in shield_pieces:\r\n applyMaterial(piece_obj[0], RED, 'phong')\r\n\r\n #aim fragments at core\r\n aim_at_first(shield_center, shield_pieces)\r\n\r\n #create and link expansion attribute\r\n locator_group = expand_from_first(shield_center, shield_pieces)\r\n\r\n #must group locators so they can be rotated with the fragments\r\n cmds.parent(locator_group, shield_pieces_group)\r\n\r\n startTime = cmds.playbackOptions( query=True, minTime=True )\r\n endTime = cmds.playbackOptions( query=True, maxTime=True )\r\n\r\n #second param is rotation param\r\n key_rotation(shield_pieces_group, 'rotateY', startTime, endTime)\r\n\r\n #create heartbeat animation pattern\r\n cmds.cutKey( shield_center, time=(startTime, endTime), attribute='expansion')\r\n heartbeat(shield_center, 0)\r\n heartbeat(shield_center, 2)\r\n heartbeat(shield_center, 4)\r\n heartbeat(shield_center, 6)\r\n heartbeat(shield_center, 8)\r\n heartbeat(shield_center, 10)\r\n cmds.selectKey( shield_center, time=(startTime, endTime), attribute='expansion', keyframe=True )\r\n cmds.keyTangent( inTangentType='linear', outTangentType='linear' )\r\n\r\nif __name__ == '__main__':\r\n #set length of animation to 12 seconds\r\n cmds.playbackOptions( minTime='0sec', maxTime='12sec' )\r\n\r\n #start the UI \r\n createUI( 'Blood Cells', applyCallback )\r\n","repo_name":"ajfantine/maya_animation","sub_path":"full_script.py","file_name":"full_script.py","file_ext":"py","file_size_in_byte":10812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"69947458570","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\nimport sqlite3\nimport os, sys\n\nwork_dir = os.path.dirname(os.path.abspath(__file__))\n\nMOVIE_TABLE = '''\n CREATE TABLE DoubanMovie(\n id INT PRIMARY KEY NOT NULL,\n title TEXT NOT NULL,\n alias TEXT,\n cover_url TEXT,\n year INT,\n date CHAR(12),\n rating CHAR(6),\n imdb CHAR(12),\n type CHAR(6),\n abstract TEXT,\n abstract_2 TEXT,\n descript TEXT\n );\n'''\n\n\n\nclass database():\n # 初始化打开数据库 \n def __init__(self, file):\n if not os.path.exists(file):\n self._create_db(file)\n else:\n self._open_db(file)\n\n # 打开或者创建sqlite3数据库 \n def _open_db(self, file):\n self.db = sqlite3.connect(file)\n def _create_db(self, file):\n self._open_db(file)\n self.exec_write(MOVIE_TABLE)\n\n # 读写原始操作 \n def exec_read(self, sql):\n return self.db.execute(sql)\n def exec_write(self, sql):\n c = self.db.cursor()\n c.execute(sql)\n c.close()\n self.db.commit()\n \n # 关闭时候处理 \n def __del__(self):\n self.db.close()\n\nclass movie():\n def __init__(self):\n self.mdb_file = os.path.abspath(os.path.join(work_dir, '..', 'data', 'movie.db'))\n self.mdb = database(self.mdb_file)\n\n def __del__(self):\n pass\n \n # 检测电影id号是否在数据库 \n def check_movie_in_sql(self, movie_id):\n sqlstr = 'select count(*) from DoubanMovie where id=\"{0}\";'.format(movie_id)\n conn = self.mdb.exec_read(sqlstr)\n for row in conn:\n if row[0] != 0:\n return True\n return False\n return False\n\n # 晚与某年的所有电影 \n def get_movie_after_year(self, year):\n sqlstr = 'select id from DoubanMovie where year>{0};'.format(int(year))\n return self.mdb.exec_read(sqlstr)\n \n def get_all_movie_id(self):\n sqlstr = 'select id from DoubanMovie'\n return self.mdb.exec_read(sqlstr).fetchall()\n\n # 插入数据 \n def save_movie2sql(self, info):\n sqlstr = 'INSERT INTO DoubanMovie (id,title,alias,cover_url,year,date,rating,imdb,type,abstract,abstract_2,descript) VALUES ({});'.format(\n '{},'.format(info.get('id')) + \n '\"{}\",'.format(info.get('title').replace('\"', '"')) +\n '\"{}\",'.format(info.get('alias').replace('\"', '"')) +\n '\"{}\",'.format(info.get('cover_url')) +\n '{},'.format(info.get('year')) + \n '\"{}\",'.format(info.get('date')) + \n '\"{}\",'.format(info.get('rating')) +\n '\"{}\",'.format(info.get('imdb')) +\n '\"{}\",'.format(info.get('type')) +\n '\"{}\",'.format(info.get('abstract').replace('\"', '"')) + \n '\"{}\",'.format(info.get('abstract_2').replace('\"', '"')) + \n '\"{}\"'.format(info.get('descript').replace('\"', '"'))\n )\n # print(sqlstr)\n try:\n self.mdb.exec_write(sqlstr)\n except:\n print(info)\n exit(0)\n \n # 更新数据 \n def update_move2sql(self, info):\n sqlstr = 'UPDATE INTO DoubanMovie SET {0} WHERE {1};'.format(\n 'title=\"{}\",'.format(info.get('title')) +\n 'alias=\"{}\",'.format(info.get('alias')) +\n 'cover_url=\"{}\",'.format(info.get('cover_url')) +\n 'year={},'.format(info.get('year')) + \n 'date=\"{}\",'.format(info.get('date')) + \n 'rating=\"{}\",'.format(info.get('rating')) +\n 'imdb=\"{}\",'.format(info.get('imdb')) +\n 'type=\"{}\",'.format(info.get('type')) +\n 'abstract=\"{}\",'.format(info.get('abstract')) + \n 'abstract_2=\"{}\"'.format(info.get('abstract_2')) + \n 'descript=\"{}\"'.format(info.get('descript')),\n 'id={},'.format(info.get('id'))\n )\n self.mdb.exec_write(sqlstr)\n \n # 时间完善 \n def get_movie_year(self):\n sqlstr = 'SELECT id,year,date FROM DoubanMovie'\n return self.mdb.exec_read(sqlstr)\n\n def update_year(self, id, year):\n sqlstr = \"UPDATE DoubanMovie SET year='{0}' WHERE id='{1}';\".format(year, id)\n # print(sqlstr)\n self.mdb.exec_write(sqlstr)","repo_name":"Kerrbty/movie_spy","sub_path":"src/module/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28765872965","text":"def solution(gems):\n answer = [1, len(gems)]\n\n dic = dict()\n \n total = len(set(gems))\n start, end = 0, 0\n \n while end <= len(gems):\n if len(dic) == total:\n if end - start - 1 < answer[1] - answer[0]:\n answer = [start + 1, end]\n\n if dic[gems[start]] <= 1:\n del dic[gems[start]]\n else:\n dic[gems[start]] -= 1\n start += 1\n\n else:\n if end < len(gems):\n if gems[end] not in dic.keys():\n dic[gems[end]] = 1\n else:\n dic[gems[end]] += 1 \n end += 1\n\n return answer","repo_name":"namu1714/Algorithm-Problem-Solving","sub_path":"기타/python/programmers_보석쇼핑.py","file_name":"programmers_보석쇼핑.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"845986273","text":"\"\"\"\nThis module contains base classes for library events.\n\"\"\"\nfrom __future__ import annotations\n\nimport abc\nimport dataclasses\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Type\n from typing import Callable\n from .models import OAuthToken\n\n__all__ = (\n \"Eventable\",\n \"BaseEvent\",\n \"ClientAddEvent\",\n \"ClientUpdateEvent\",\n)\n\n\nclass Eventable(abc.ABC):\n \"\"\"Abstract for classes that handle events\"\"\"\n\n __slots__ = (\"_listeners\",)\n\n def __init__(self) -> None:\n self._listeners: dict[str, list[Callable]] = {}\n\n def _register_event(self, event: Type[BaseEvent]) -> None:\n r\"\"\"Registers an event\n\n :param event: Event type to register\n :type event: Type[BaseEvent]\n \"\"\"\n self._listeners[event._name] = []\n\n def _register_listener(self, func: Callable, event: Type[BaseEvent]) -> None:\n r\"\"\"Registers an event listener\n\n :param func: Function to call when event is emitted\n :type func: Callable\n :param event: Event type to listen for\n :type event: Type[BaseEvent]\n\n :raises NotImplementedError: If event is not implemented\n \"\"\"\n if event._name not in self._listeners:\n raise NotImplementedError(f\"{event!r}\")\n self._listeners[event._name].append(func)\n\n async def _process_event(self, event: BaseEvent) -> None:\n r\"\"\"Processes an event\n\n :param event: Event to process\n :type event: BaseEvent\n\n :raises NotImplementedError: If event is not implemented\n \"\"\"\n if event._name not in self._listeners:\n raise NotImplementedError(f\"{event!r}\")\n for listener in self._listeners[event._name]:\n await listener(event)\n\n\n@dataclasses.dataclass\nclass BaseEvent(abc.ABC):\n \"\"\"Abstract for event classes\"\"\"\n\n _name = \"BaseEvent\"\n\n\n@dataclasses.dataclass\nclass ClientAddEvent(BaseEvent):\n \"\"\"Event for when a client is added\"\"\"\n\n _name = \"ClientAddEvent\"\n session_id: int\n \"\"\"0 if app client\"\"\"\n client: Eventable\n \"\"\"The client that was added\"\"\"\n\n\n@dataclasses.dataclass\nclass ClientUpdateEvent(BaseEvent):\n \"\"\"Event for when a client is updated\"\"\"\n\n _name = \"ClientUpdateEvent\"\n client: Eventable\n old_token: OAuthToken\n new_token: OAuthToken\n","repo_name":"NiceAesth/aiosu","sub_path":"aiosu/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"70950748168","text":"# USAGE\n# python text_detection.py -i oriImgs/lebron_james.jpg \n\nimport cv2\nimport time\nimport numpy as np\nimport argparse\nfrom imutils.object_detection import non_max_suppression\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", type = str, help = \"path to input image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ncv2.imshow('image', image)\ncv2.waitKey(0)\nori = image.copy()\n(H, W) = image.shape[:2]\n\n(newW, newH) = (320, 320)\nrW = W / float(newW)\nrH = H / float(newH)\n\nimage = cv2.resize(image, (newW, newH))\n(H, W) = image.shape[:2]\n\n# 为EAST检测模型定义两个输出层名字:第一个是文本得分,第二个是文本形状\nlayerNames = [\n \"feature_fusion/Conv_7/Sigmoid\",\n \"feature_fusion/concat_3\"\n]\n\n# 加载预训练EAST文本检测模型\nnet = cv2.dnn.readNet('./models/frozen_east_text_detection.pb')\n\n# 从图像中构建一个blob,然后执行一个forward,获得两个集合\nblob = cv2.dnn.blobFromImage(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB = True, crop = False)\nstart = time.time()\nnet.setInput(blob)\n(scores, geometry) = net.forward(layerNames)\nend = time.time()\nprint(\"[INFO] text detection took {:.6f} seconds\".format(end - start))\n\n(numRows, numCols) = scores.shape[2:4]\nrects = []\nconfidences = []\nminConfidence = 0.5\n\nfor y in range(0, numRows):\n # 提取分数和包围文本的方框几何坐标\n scoresData = scores[0, 0, y]\n xData0 = geometry[0, 0, y]\n xData1 = geometry[0, 1, y]\n xData2 = geometry[0, 2, y]\n xData3 = geometry[0, 3, y]\n anglesData = geometry[0, 4, y]\n\n for x in range(0, numCols):\n if scoresData[x] < minConfidence:\n continue\n\n # 特征图比输入图小四倍,所以需要乘4\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n angle = anglesData[x]\n cos = np.cos(angle)\n sin = np.sin(angle)\n\n h = xData0[x] + xData2[x]\n w = xData1[x] + xData3[x]\n\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n startX = int(endX - w)\n startY = int(endY - h)\n\n rects.append((startX, startY, endX, endY))\n confidences.append(scoresData[x])\n\n# 使用极大值抑制\nboxes = non_max_suppression(np.array(rects), probs = confidences)\n\nfor (startX, startY, endX, endY) in boxes:\n startX = int(startX * rW)\n startY = int(startY * rH)\n endX = int(endX * rW)\n endY = int(endY * rH)\n\n cv2.rectangle(ori, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\ncv2.imshow('Text Detection', ori)\ncv2.imwrite('./resImgs/result.jpg', ori)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Ayonksh/pycv-training","sub_path":"08.text_detection/text_detection.py","file_name":"text_detection.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"19581430171","text":"class PersonTitles:\n TITLES = ('Dr', 'Mr', \"Mrs\", \"Ms\")\n\n def __init__(self, title, name, surname, allowed_titles=TITLES):\n if title not in allowed_titles:\n raise ValueError(\"%s is not a valid title. \" % title)\n\n self.title = title\n self._name = name\n self.surname = surname\n\n def getName(self):\n return self._name\n\n def setName(self, new_name):\n # validate the input...\n self._name = new_name\n\n def __str__(self):\n return f\"{self.title} {self._name}\"\n\n\nif __name__ == '__main__':\n try:\n error = PersonTitles(\"Sir\", \"John\", \"Smith\")\n except Exception as err:\n print(err)\n person = PersonTitles(\"Dr\", \"john\", \"Smith\")\n print(person)\n person.setName(\"John\")\n print(person)","repo_name":"EmeraldVoltron/PythonFall2023Module10","sub_path":"lecture/person_2.py","file_name":"person_2.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74535879368","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.views import defaults as default_views\n\nfrom gobgift.api.views import FacebookLogin, GoogleLogin\nfrom gobgift.core.views import home, logout, done, app, privacy_policy\nfrom gobgift.groups.views import ListGroupAutocomplete\nfrom gobgift.groups.views import UserAutocomplete\n\nurlpatterns = [\n path('', home, name=\"home\"),\n path('app/', app, name=\"app\"),\n path('login/', home),\n path('logout/', logout),\n path('done/', done, name='done'),\n path('privacy/', privacy_policy, name='privacy'),\n path(settings.ADMIN_URL, admin.site.urls),\n path(r'accounts', include('allauth.urls')),\n\n path(r'lists/', include(('gobgift.wishlists.urls', 'gobgift.wishlists'), namespace='lists')),\n path(r'gifts/', include(('gobgift.gifts.urls', 'gobgift.gifts'), namespace='gifts')),\n path(r'groups/', include(('gobgift.groups.urls', 'gobgift.groups'), namespace='groups')),\n\n # DjangoRestFramework\n path(r'api/', include(('gobgift.api.urls', 'gobgift.api'), namespace='api')),\n path(r'api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path(r'rest-auth/', include('rest_auth.urls')),\n path(r'rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'),\n path(r'rest-auth/google/', GoogleLogin.as_view(), name='rest_google_login'),\n\n # Django autocomplete light\n path(r'user-autocomplete/', UserAutocomplete.as_view(), name=\"user-autocomplete\"),\n path(r'listgroup-autocomplete/', ListGroupAutocomplete.as_view(), name=\"listgroup-autocomplete\"),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path('400/', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),\n path('403/', default_views.permission_denied,\n kwargs={'exception': Exception('Permission Denied')}),\n path('404/', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),\n path('500/', default_views.server_error),\n path('__debug__/', include(debug_toolbar.urls)),\n # url(r'^docs/$', serve, {'document_root': settings.DOCS_ROOT, 'path': 'index.html'}),\n # url(r'^docs/(?P.*)$', serve, {'document_root': settings.DOCS_ROOT}),\n ]\n","repo_name":"kimond/gobgift","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27444431979","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams[\"figure.figsize\"] = (20,10)\n\n# Copied from local .ipynb\n\n# PPO_TF2\n\n# Comparison with Different Number of Neurons in Hidden Layer\n# Dense(512, activation=\"elu\", kernel_initializer='he_uniform')\n#mean_rewards_experiment_0 = pd.read_csv('PPO_TF2_mean_rewards_20220810213523.csv', index_col='Unnamed: 0') # 100 Eps\nmean_rewards_experiment_05 = pd.read_csv('PPO_TF2_mean_rewards_20220811_091620.csv', index_col='Unnamed: 0')\n\n# Dense(64, activation=\"elu\", kernel_initializer='he_uniform')\nmean_rewards_experiment_1 = pd.read_csv('PPO_TF2_mean_rewards_20220810222837.csv', index_col='Unnamed: 0') # 100 Eps\nmean_rewards_experiment_2 = pd.read_csv('PPO_TF2_mean_rewards_20220811074852.csv', index_col='Unnamed: 0') # 100 Eps\n\n# Dense(256, activation=\"elu\", kernel_initializer='he_uniform')(X)\nmean_rewards_experiment_3 = pd.read_csv('PPO_TF2_mean_rewards_20220811_091518.csv', index_col='Unnamed: 0')\n\n# Dense(4, activation=\"elu\", kernel_initializer='he_uniform')\nmean_rewards_experiment_4 = pd.read_csv('PPO_TF2_mean_rewards_20220811_083648.csv', index_col='Unnamed: 0') # 800 Eps, saved model: PongDeterministic-v4_APPO_0.0001_Actor_Dense4\n\nplt.plot(mean_rewards_experiment_05, label = \"512 Neurons\")\nplt.plot(mean_rewards_experiment_1, label = \"64 Neurons\")\nplt.plot(mean_rewards_experiment_2, label = \"64 Neurons\") \nplt.plot(mean_rewards_experiment_3, label = \"256 Neurons\")\nplt.plot(mean_rewards_experiment_4, label = \"4 Neurons\")\nplt.legend()\nplt.title('Performance for Differing Counts of Neurons')\nplt.show()\n\n#------------------------------------------------------------------------------------------------------------\n# Outcome reliability analysis\n\norc1 = pd.read_csv('mean_rewards_0813_12-41-59_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison.csv', index_col='Episode')\norc2 = pd.read_csv('mean_rewards_0814_07-43-45_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison.csv', index_col='Episode')\norc3 = pd.read_csv('mean_rewards_0814_22-42-14_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc4 = pd.read_csv('mean_rewards_0815_08-00-13_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc5 = pd.read_csv('mean_rewards_0815_18-21-55_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc6 = pd.read_csv('mean_rewards_0816_06-58-45_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc7 = pd.read_csv('mean_rewards_0816_14-39-46_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc8 = pd.read_csv('mean_rewards_0816_22-45-19_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\norc9 = pd.read_csv('mean_rewards_0817_23-07-13_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison0.csv', index_col='Episode')\n\nwins = []\nfor i in range(9):\n df_no = i+1\n series_MoAv = eval('orc{}'.format(df_no))['Moving Average Score (n=50)'] #['Score']#\n col = (0,df_no/9,df_no/9)\n plt.plot(series_MoAv, label = df_no, color=col)\n #get first positive value above 0\n first_win = series_MoAv[series_MoAv>0].index[0]\n plt.vlines(x=first_win, ymin=-21, ymax=0, color=col)\n print(first_win)\n wins.append(first_win)\n \n\nplt.axhline(y = 0, color = 'r', linestyle = '-')\nplt.legend()\nplt.xlabel(\"Episodes\")\nplt.ylabel(\"Reward\")\nprint(\"average: \" + str(sum(wins)/len(wins)))\nplt.title('Outcome Reliability Analysis: Nine Runs with Same Hyperparameters')\nplt.show()\n\n\n#------------------------------------------------------------------------------------------------------------\n# Plotting 4th run only \nplt.plot(orc4['Score'], label = 'Score')\nplt.plot(orc4['Moving Average Score (n=50)'], color = (0,4/9,4/9), label = 'Moving Average Score (n=50)')\n\nplt.legend()\nplt.xlabel(\"Episodes\")\nplt.ylabel(\"Reward\")\nplt.title('Fourth Run of Outcome Reliability Analysis')\nplt.show()\n\n#------------------------------------------------------------------------------------------------------------\n# PPO With Excessively Large EPOCHS Hyperparameter\nlarge_epoch = pd.read_csv('mean_rewards_0822_18-53-58_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_150EpochsForNN0.csv', index_col='Episode')\nsmall_epoch = pd.read_csv('mean_rewards_0823_07-08-10_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_2EpochsForNN0.csv', index_col='Episode')\n\nplt.plot(orc4['Score'], color = (0,4/9,4/9), label = 'Standard Score')\nplt.plot(orc4['Moving Average Score (n=50)'], color = (0,4/9,4/9), label = 'Standard MA Score (n=50)')\n\nplt.plot(large_epoch['Score'], color = 'r', label = '150 EPOCHS Score')\nplt.plot(large_epoch['Moving Average Score (n=50)'], color = 'r', label = '150 EPOCHS MA Score (n=50)')\n\nplt.plot(small_epoch['Score'], color = 'black', label = '2 EPOCHS Score')\nplt.plot(small_epoch['Moving Average Score (n=50)'], color = 'black', label = '2 EPOCHS MA Score (n=50)')\n\nplt.legend()\nplt.xlabel(\"Episodes\")\nplt.ylabel(\"Reward\")\nplt.title('PPO With Excessively Large & Small EPOCHS Hyperparameter')\nplt.show()\n\n#------------------------------------------------------------------------------------------------------------\n# # Separate NN Comparison\nsep1 = pd.read_csv('mean_rewards_0902_17-33-45_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_separateNN_sequential.csv', index_col='Episode')\nsep2 = pd.read_csv('mean_rewards_0904_21-16-18_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_separateNN_sequential.csv', index_col='Episode')\nsep3 = pd.read_csv('mean_rewards_0904_21-16-53_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_separateNN_sequential.csv', index_col='Episode')\nsep4 = pd.read_csv('mean_rewards_0905_08-14-24_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_separateNN_sequential.csv', index_col='Episode')\nsep5 = pd.read_csv('mean_rewards_0905_09-38-00_5_0.0001_0.2_Dense_512_OutcomeReliabilityComparison_separateNN_sequential.csv', index_col='Episode')\n\nalpha = 0.2\n\nplt.plot(sep1['Moving Average Score (n=50)'], color='r', alpha=alpha)\nplt.plot(sep2['Moving Average Score (n=50)'], color='r', alpha=alpha)\nplt.plot(sep3['Moving Average Score (n=50)'], color='r', alpha=alpha)\nplt.plot(sep4['Moving Average Score (n=50)'], color='r', alpha=alpha)\nplt.plot(sep5['Moving Average Score (n=50)'], color='r', alpha=alpha)\n\nfor i in range(9-3):\n df_no = i+1+3\n series_orc = eval('orc{}'.format(df_no))['Moving Average Score (n=50)'] #['Score']#\n col = 'b'\n plt.plot(series_orc, color=col, alpha=alpha)#, label = df_no)\n\nsep_average = (\n sep1['Moving Average Score (n=50)'] + \n sep2['Moving Average Score (n=50)'] +\n sep3['Moving Average Score (n=50)'] +\n sep4['Moving Average Score (n=50)'] +\n sep5['Moving Average Score (n=50)'] \n)/5\n\norc_average = (\n orc4['Moving Average Score (n=50)'] +\n orc5['Moving Average Score (n=50)'] +\n orc6['Moving Average Score (n=50)'] +\n orc7['Moving Average Score (n=50)'] +\n orc8['Moving Average Score (n=50)'] +\n orc9['Moving Average Score (n=50)'] \n)/6\n\nplt.plot(orc_average, color='blue', label='Shared NNs averaged over six runs')\nplt.plot(sep_average, color='r', label='Separate NNs averaged over five runs')\n\nplt.legend()\nplt.xlabel(\"Episodes\")\nplt.ylabel(\"Reward\")\nplt.title('Shared NNs vs Separate NNs Performance')\nplt.show()","repo_name":"Paul1911/Deep-Reinforcement-Learning-with-Pong","sub_path":"Results/analyses.py","file_name":"analyses.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73272363529","text":"from lecture.examples.ex24_gui.domain.validators import ValidationException, StudentCRUDException\n\n\nclass ConsoleUI:\n \"\"\"\n Class responsible with the user interface\n Will use the controller to perform operations other than read, print\n \"\"\"\n\n def __init__(self, ctr):\n \"\"\"\n Initialise UI\n ctr StudentControler\n \"\"\"\n self.__ctr = ctr\n\n @staticmethod\n def _read_command():\n \"\"\"\n Read the command from the user\n return string, the command\n \"\"\"\n print(\"\"\"\n 0 - exit\n 1 - add student\n 2 - remove student\n 3 - search student\n 4 - update student\n \"\"\")\n return input(\"Give command:\").strip()\n\n def _create_student(self):\n \"\"\"\n Read a student and store in the apllication\n \"\"\"\n id = input(\"Student id:\").strip()\n name = input(\"Student name:\").strip()\n street = input(\"Address - street:\").strip()\n nr = input(\"Address - number:\").strip()\n city = input(\"Address - city:\").strip()\n try:\n self.__ctr.create(id, name, street, nr, city)\n except ValidationException as ex:\n print(\"special treatment\")\n print(ex)\n except StudentCRUDException as ex:\n print(ex)\n\n def _remove_student(self):\n \"\"\"\n Read a student id and remove the student\n\n \"\"\"\n id = input(\"Student id:\").strip()\n try:\n st = self.__ctr.remove(id)\n print(\"Student \" + st.name() + \" removed\")\n except ValueError as msg:\n print(msg)\n\n def _search(self):\n \"\"\"\n Search for students with name containing a given string\n \"\"\"\n cr = input(\"Name contains:\").strip()\n sts = self.__ctr.search(cr)\n if sts == []:\n print(\"No match\")\n return\n print(\"\")\n print(\"Search results :\" + str(len(sts)) + \" students\")\n print(\"____________________________\")\n print(\"ID\".ljust(4) + \"Name\".ljust(10) + \"Address\")\n for st in sts:\n print(st.id().ljust(4) + st.name().ljust(10) + str(st.address()))\n print(\"____________________________\")\n\n def _update_student(self):\n id = input(\"Give the id of the student:\").strip()\n name = input(\"Student name:\").strip()\n street = input(\"Address - street:\").strip()\n nr = input(\"Address - number:\").strip()\n city = input(\"Address - city:\").strip()\n try:\n old = self.__ctr.update(id, name, street, nr, city)\n print(\"Student \" + old.name() + \" updated\")\n except ValueError as msg:\n print(msg)\n\n def start(self):\n \"\"\"\n Start the ui\n \"\"\"\n while True:\n c = self._read_command()\n if c == \"0\":\n print(\"Bye Bye\")\n return\n if c == \"1\":\n self._create_student()\n if c == \"2\":\n self._remove_student()\n if c == \"3\":\n self._search()\n if c == \"4\":\n self._update_student()\n","repo_name":"cs-ubbcluj-ro/FP-2021-2022","sub_path":"src/lecture/examples/ex24_gui/ui/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"} +{"seq_id":"1407722833","text":"# coding: utf-8\n\nimport json\nimport logging\nimport os\n\nfrom tiops import modules as tcapi\nfrom tiops import utils\n\nfrom .terminal import TUIDisplay\n\n\nclass TUIModuleBase(TUIDisplay):\n def __init__(self, inventory=None, module=None):\n super(TUIModuleBase, self).__init__(inventory)\n\n try:\n self.hosts = self.inventory.servers(module)\n except AttributeError:\n pass\n\n\nclass TUIModule(TUIModuleBase):\n def __init__(self, inventory=None, module=None, args=None, status=False):\n super(TUIModule, self).__init__(inventory, module)\n self.args = args\n self.status = status\n\n def display(self, listing=False):\n if listing:\n self._list_clusters()\n else:\n self._display_cluster()\n\n def _list_clusters(self):\n profile_dir = utils.profile_path('clusters')\n _srv_list = [['Cluster', 'Version']]\n for _file in utils.list_dir(profile_dir):\n try:\n for x in ['downloads', 'host_vars', 'tiops.log']:\n if x in _file:\n raise RuntimeError\n except RuntimeError:\n continue\n if not os.path.isdir(_file):\n continue\n\n _cluster_name = os.path.split(_file)[1]\n try:\n _meta = utils.read_yaml(os.path.join(\n profile_dir, _file, 'meta.yaml'))\n except EnvironmentError as e:\n import errno\n # only pass when the error is file not found\n if e.errno != errno.ENOENT:\n raise\n self.term.warn(\n 'Metadata file of cluster {} not found, did the deploy process finished?'.format(_cluster_name))\n # skip this cluster\n continue\n try:\n _version = _meta['tidb_version']\n except KeyError:\n _version = '-'\n _srv_list.append([_cluster_name, _version])\n\n self.term.info('Available TiDB clusters:')\n for row in self.format_columns(_srv_list):\n self.term.normal(row)\n\n def _display_cluster(self):\n try:\n _status = self.args.show_status\n except AttributeError:\n _status = self.status\n\n cluster_home = utils.profile_path(\n 'clusters/{}'.format(self.args.cluster_name))\n _profile_path = utils.profile_path(cluster_home, 'meta.yaml')\n if os.path.exists(cluster_home) and os.path.exists(_profile_path):\n cluster_info = utils.read_yaml(_profile_path)\n info = 'TiDB cluster {}, version {}\\nNode list:'.format(\n self.args.cluster_name, cluster_info['tidb_version'])\n self.term.info(info)\n\n display_info = self._format_cluster(_status)\n for section in display_info:\n for row in self.format_columns(section):\n self.term.normal(row)\n\n def _format_cluster(self, show_status=False):\n try:\n _role_filter = self.args.role.lower()\n except AttributeError:\n _role_filter = None\n try:\n _node_filter = self.args.node_id.lower()\n except AttributeError:\n _node_filter = None\n try:\n _ip_filter = self.args.ip_addr\n except AttributeError:\n _ip_filter = None\n\n result = []\n\n if show_status:\n _title = ['ID', 'Role', 'Host', 'Ports',\n 'Status', 'Data Dir', 'Deploy Dir']\n else:\n _title = ['ID', 'Role', 'Host', 'Ports', 'Data Dir', 'Deploy Dir']\n srv_list = [_title]\n for srv in self.hosts:\n _host = srv['ip']\n _role = srv['role']\n _uuid = srv['uuid']\n\n # apply filters\n if _role_filter and _role.lower() not in _role_filter:\n continue\n if _node_filter and _uuid.lower() not in _node_filter:\n continue\n if _ip_filter and _host not in _ip_filter:\n continue\n\n try:\n _port = '{}/{}'.format(srv['client_port'], srv['peer_port'])\n except KeyError:\n try:\n _port = '{}/{}'.format(srv['port'], srv['status_port'])\n except KeyError:\n try:\n if srv.has_key('prometheus_port'):\n _port = '{}/{}'.format(srv['prometheus_port'],\n srv['pushgateway_port'])\n elif srv.has_key('node_exporter_port'):\n _port = '{}/{}'.format(srv['node_exporter_port'],\n srv['blackbox_exporter_port'])\n elif srv.has_key('web_port'):\n _port = '{}/{}'.format(srv['web_port'],\n srv['cluster_port'])\n else:\n _port = '{}'.format(srv['port'])\n except:\n pass\n\n # query node status\n if show_status:\n _status = self.__get_status(srv)\n\n _deploy = srv['full_deploy_dir']\n try:\n _data = srv['full_data_dir']\n if srv['role'].lower() == 'tidb':\n _data = '-'\n except KeyError:\n _data = \"-\"\n\n if show_status:\n srv_list.append(\n [_uuid, _role, _host, _port, _status, _data, _deploy])\n else:\n srv_list.append([_uuid, _role, _host, _port, _data, _deploy])\n\n result.append(srv_list)\n return result\n\n def __get_status(self, srv=None):\n _status = '-'\n if not srv:\n return _status\n\n _host = srv['ip']\n _role = srv['role']\n _uuid = srv['uuid']\n if 'pd' == _role.lower():\n _port = srv['client_port']\n _api = tcapi.PDAPI(_host, _port)\n try:\n _resp = _api.status()\n for _pd in json.loads(_resp):\n if _pd['name'] != _uuid:\n continue\n if _pd['health']:\n _status = 'Health'\n else:\n _status = 'Unhealth'\n except:\n return 'Down'\n try:\n _leader = _api.leader()\n if _leader == _uuid:\n _status = '{}|L'.format(_status)\n except:\n return _status\n elif 'tidb' == _role.lower():\n _port = srv['status_port']\n _api = tcapi.TiDBAPI(_host, _port)\n if _api.ok():\n _status = 'Up'\n else:\n _status = 'Down'\n elif 'tikv' == _role.lower():\n _port = srv['port']\n _api = tcapi.ClusterAPI(self.inventory)\n if _api.tikv_stores():\n for store in _api.tikv_stores()['stores']:\n if '{}:{}'.format(_host, _port) == store['store']['address']:\n return store['store']['state_name']\n if _api.tikv_tombstone():\n for store in _api.tikv_tombstone()['stores']:\n if '{}:{}'.format(_host, _port) == store['store']['address']:\n return store['store']['state_name']\n if _status == '-':\n return 'Down'\n elif 'pump' == _role.lower():\n _uuid = srv['uuid']\n _api = tcapi.BinlogAPI(self.inventory)\n if _api.pump_status:\n for _pump_id, _pump_info in _api.pump_status['status'].iteritems():\n if _pump_info['nodeId'] == _uuid:\n _status = _pump_info['state']\n break\n else:\n _status = 'Down'\n\n elif 'drainer' == _role.lower():\n _uuid = srv['uuid']\n _api = tcapi.BinlogAPI(self.inventory)\n if _api.drainer_status:\n for _drainer in _api.drainer_status:\n if _uuid == _drainer['nodeId']:\n _status = _drainer['state']\n break\n else:\n _status = 'Down'\n\n return _status\n","repo_name":"mwish-pingcap-company-repos/tiup","sub_path":"components/tiops/tiops/tui/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"44241281560","text":"\"\"\"South Korean ports spider.\n\nProvider: Korean Ministry of Oceans and Fisheries\nWebsite: https://tinyurl.com/yax8vous (for debugging and verifying scraped data)\n\n\"\"\"\nimport datetime as dt\nimport json\n\nfrom scrapy import Request, Spider\n\nfrom kp_scrapers.lib.errors import InvalidCliRun\nfrom kp_scrapers.lib.parser import try_apply\nfrom kp_scrapers.lib.request import allow_inline_requests\nfrom kp_scrapers.models.normalize import DataTypes\nfrom kp_scrapers.spiders.port_authorities import PortAuthoritySpider\nfrom kp_scrapers.spiders.port_authorities.south_korea import normalize\n\n\nclass SouthKoreaSpider(PortAuthoritySpider, Spider):\n name = 'SouthKorea'\n provider = 'Korean MOF'\n version = '1.1.0'\n produces = [DataTypes.PortCall, DataTypes.Vessel, DataTypes.Cargo]\n\n # lineup of vessel movements in selected port and date range, does not contain cargo data\n lineup_url = 'https://new.portmis.go.kr/portmis/sp/vssl/vsch/selectSpVsslInPagingList.do'\n\n # provides bol number which is required for POSTing the cargo_url form\n bol_url = 'https://new.portmis.go.kr/portmis/co/como/cnlg/selectNlgUfrgtInfoPop3TabList1.do'\n # detailed description of onboard cargo/volume/movement/charterers\n cargo_url = 'https://new.portmis.go.kr/portmis/co/como/cnlg/selectNlgUfrgtInfo.do'\n\n # provides dangerous goods declaration number\n danger_goods_url = 'https://new.portmis.go.kr/portmis/fr/dgst/dinu/dinu/selectDgst1010List.do'\n # detailed description of onboard dangerous cargo/volume/movement/charterers\n cargo_alt_url = 'https://new.portmis.go.kr/portmis/fr/dgst/dinu/dinu/selectDgst1010Map.do'\n\n spider_settings = {\n # as we're accessing the website's API, duplicate filtering has to be disabled\n 'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',\n # be kind to website, so that we don't get banned permanently\n 'DOWNLOAD_DELAY': 1,\n }\n\n def __init__(self, port_code=None, date_range=None, **kwargs):\n \"\"\"Init SouthKorea spider search filters.\n\n Args:\n port_code (str): three-digit code, see https://bit.ly/2JXbu3E for port name mapping\n date_range (str): formatted as \"YYYYMMDD,YYYYMMDD\",\n defaults to \"<14-days-prior>,<7-days-in-the-future>\"\n\n \"\"\"\n super().__init__(**kwargs)\n\n if not date_range:\n start = dt.datetime.strftime(dt.datetime.utcnow() - dt.timedelta(days=14), '%Y%m%d')\n end = dt.datetime.strftime(dt.datetime.utcnow() + dt.timedelta(days=7), '%Y%m%d')\n date_range = start + ',' + end\n else:\n if len(date_range.split(',')) != 2:\n raise InvalidCliRun('date_range', repr(date_range))\n\n start, end = date_range.split(',')\n if not try_apply(date_range.split(',')[0], int):\n raise InvalidCliRun('date_range(start)', repr(date_range.split(',')[0]))\n\n if not try_apply(date_range.split(',')[1], int):\n raise InvalidCliRun('date_range(end)', repr(date_range.split(',')[1]))\n\n # choose whether we'd like to limit data extraction to only a single port\n self.limit_port_code = str(port_code) if port_code else None\n # WARNING setting a large date_range may result in excessively long run times\n self.start_date = start\n self.end_date = end\n self.filters = kwargs\n\n def start_requests(self):\n \"\"\"Request list of vessel lineup for specified port and date ranges.\n\n Yields:\n Request:\n\n \"\"\"\n for port_code in normalize.PORT_MAPPING.keys():\n if self.limit_port_code and self.limit_port_code != port_code:\n self.logger.info(\"Skip data extraction for port_code=%s\", port_code)\n continue\n\n form = {\n 'bargeClsgn1': '', # optional\n 'bargeVsslNm': '', # optional\n 'clsgn': '', # optional\n 'currentPageNo': 1,\n 'etryptTkoffDt': '', # optional\n 'ibobprtSe': '1',\n 'prtAgCd': port_code,\n 'prtAgNm': '', # optional\n 'recordCount': 1000000, # reasonably large enough number for long date ranges\n 'reqstSe': 'all',\n 'srchBeginEtryndDt': self.start_date,\n 'srchEndEtryndDt': self.end_date,\n 'vsslInnb': '', # optional\n 'vsslNm': '', # optional\n }\n form.update(self.filters)\n yield Request(\n url=self.lineup_url,\n callback=self.collect_responses,\n method='POST',\n headers={'content-type': 'application/json; charset=utf-8'},\n body=json.dumps({'dmaParam': form}),\n )\n\n @staticmethod\n def init_form_bol(**opts):\n \"\"\"Init form data for POSTing.\n\n Args:\n opts (list[str]):\n\n Returns:\n str: json-formatted string\n\n \"\"\"\n return json.dumps(\n {\n 'dmaParam': {\n 'prtAgCd': opts.get('prtAgCd'),\n 'clsgn': opts.get('clsgn'),\n 'vsslNm': opts.get('vsslNm'),\n 'etryptYear': opts.get('etryptYear'),\n 'etryptCo': opts.get('etryptCo'),\n 'entrpsCd': '', # optional\n 'entrpsNm': '', # optional\n 'tkinTkoutSe': '', # optional\n 'lnlMthCd': '', # optional\n 'conTp': 'A',\n 'lnlEntrpsCd': '', # optional\n 'currentPageNo': 1,\n 'recordCount': 5000,\n 'stcPopGb': '', # optional\n 'vsslInnb': opts.get('vsslInnb'),\n 'unityFrghtUpdtOdr': '', # optional\n }\n }\n )\n\n @staticmethod\n def init_form_bol_cargo(**opts):\n \"\"\"Init form data for POSTing.\n\n Args:\n opts (list[str]):\n\n Returns:\n str: json-formatted string\n\n \"\"\"\n return json.dumps(\n {\n 'dmaParam': {\n 'prtAgCd': opts.get('prtAgCd'),\n 'clsgn': opts.get('clsgn'),\n 'etryptYear': opts.get('etryptYear'),\n 'etryptCo': opts.get('etryptCo'),\n 'entrpsCd': opts.get('entrpsCd'),\n 'tkinTkoutSe': opts.get('tkinTkoutSe'),\n 'lnlMthCd': '', # optional\n 'blNo': opts.get('blNo'),\n 'unityFrghtUpdtOdr': 1,\n }\n }\n )\n\n @staticmethod\n def init_form_decl(**opts):\n \"\"\"Init form data for POSTing.\n\n Args:\n opts (list[str]):\n\n Returns:\n str: json-formatted string\n\n \"\"\"\n # goods declaration submission date is not indicated anywhere\n # we therefore need to widen the search range, to 6 days before the vessel arrival date\n lower_date = dt.datetime.strptime(opts['etryptDt'][:-4], '%Y%m%d') - dt.timedelta(days=6)\n if opts.get('tkOfftkoffDt'): # may not be present always\n upper_date = dt.datetime.strptime(opts['tkoffDt'][:-4], '%Y%m%d')\n else:\n upper_date = dt.datetime.strptime(opts['etryptDt'][:-4], '%Y%m%d')\n\n return json.dumps(\n {\n 'dmaParam': {\n 'clsgn': opts.get('clsgn'),\n 'currentPageNo': 1,\n 'entrpsCd': '', # optional\n 'entrpsCdNm': '', # optional\n 'fromReqstDt': lower_date.strftime('%Y%m%d'),\n 'prtAgCd': opts.get('prtAgCd'),\n 'recordCount': 5000,\n 'toReqstDt': upper_date.strftime('%Y%m%d'),\n }\n }\n )\n\n @staticmethod\n def init_form_decl_cargo(**opts):\n \"\"\"Init form data for POSTing.\n\n Args:\n opts (list[str]):\n\n Returns:\n str: json-formatted string\n\n \"\"\"\n return json.dumps(\n {\n 'dmaDetailParam': {\n 'recordDngrCount': 1000,\n 'tkinSe': opts.get('tkinSe'),\n 'vsslInnb': opts.get('vsslInnb'),\n 'currentDngrPageNo': 1,\n 'satmntUpdtOdr': 1,\n 'recordCargCount': 5000,\n 'etryptCo': opts.get('etryptCo'),\n 'currentCargPageNo': 1,\n 'prtAgCd': opts.get('prtAgCd'),\n 'etryptYear': opts.get('etryptYear'),\n }\n }\n )\n\n def post(self, url, form, **kwargs):\n \"\"\"Post form.\n\n Args:\n url (str): post endpoint\n form (str): json-deserialised string\n kwargs (dict[str, str]): additional meta to append to response\n\n Returns:\n scrapy.Request:\n\n \"\"\"\n return Request(\n url=url,\n method='POST',\n headers={'content-type': 'application/json; charset=utf-8'},\n body=form,\n meta=kwargs,\n )\n\n @allow_inline_requests\n def collect_responses(self, response):\n \"\"\"Collect all POST responses.\n\n Serialise json response.\n Transform into raw dict.\n\n Args:\n response (scrapy.Response):\n\n Yields:\n Dict[str, str]:\n\n \"\"\"\n # each vessel in the lineup has a number of BOLs or goods declaration associated with it\n length = len(json.loads(response.body)['dltInOutList'])\n for idx, raw_item in enumerate(json.loads(response.body)['dltInOutList']):\n raw_item['cargoes'] = []\n self.logger.info(\n 'Port {} ({}/{}) : Vessel {} (IMO {}) (Callsign {}) (Type {})'.format(\n raw_item['prtAgCd'],\n idx + 1,\n length,\n raw_item['vsslNm'],\n raw_item['vsslInnb'],\n raw_item['clsgn'],\n raw_item['vsslKindNm'],\n )\n )\n\n # NOTE technically this should be in `normalize.py`,\n # but putting this here cuts down on run times by more than 80 %\n if raw_item['vsslKindNm'] not in normalize.IRRELEVANT_VESSEL_TYPES:\n\n # each BOL contains product/volume/movement/charterers data\n bol_numbers = yield self.post(\n url=self.bol_url, form=self.init_form_bol(**raw_item), **raw_item\n )\n for bol_number in json.loads(bol_numbers.body)['dltList1']:\n raw_item.update(bol_number)\n # get individual cargo data and append to list of cargoes\n bol_cargo = yield self.post(\n url=self.cargo_url, form=self.init_form_bol_cargo(**raw_item), **raw_item\n )\n raw_cargo = json.loads(bol_cargo.body)['dmaRstMap1']\n # there are errors on the website sometimes where it does not return any cargo\n if not raw_cargo:\n continue\n\n # Sometimes, BOL will show \"TANK\" as the product, which is incorrect\n # this is a one-off special case where we do not want this product\n if any(tank in raw_cargo['frghtNm'] for tank in ['TANK', 'TNK']):\n continue\n\n raw_item['cargoes'].append(raw_cargo)\n\n # sometimes, BOL will not be present (i.e. 'cargoes' will be empty)\n if not raw_item['cargoes']:\n # if so, search for goods declaration sheets related to that vessel\n # declaration contains a key needed for obtaining the dangerous goods data\n declarations = yield self.post(\n url=self.danger_goods_url, form=self.init_form_decl(**raw_item), **raw_item\n )\n declarations = json.loads(declarations.body)['dtlDgst1010List']\n for idx, decl in enumerate(declarations):\n # possible to have more than one goods declaration form for one port call\n # if so, take the latest, most up-to-date goods declaration\n if idx + 1 != len(declarations):\n continue\n\n raw_item.update(decl)\n # get list of all dangerous cargo onboard from declaration sheet\n danger_cargoes = yield self.post(\n url=self.cargo_alt_url,\n form=self.init_form_decl_cargo(**raw_item),\n **raw_item,\n )\n\n danger_cargo = json.loads(danger_cargoes.body)['dtlDgst1010Map']\n raw_item['cargoes'].append(danger_cargo)\n\n # contextualise raw item with metadata\n raw_item.update(provider_name=self.provider)\n yield normalize.process_item(raw_item)\n","repo_name":"theHausdorffMetric/test","sub_path":"kp_scrapers/spiders/port_authorities/south_korea/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":13272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19417404728","text":"from pathlib import Path\n\nconfig = {\n 'dataset': 'bach',\n 'random_seed': 22,\n\n # --- Dataloader ---\n 'dataloader_generator_kwargs': dict(\n include_transpositions=True,\n sequences_size=24,\n ),\n\n # --- DataProcessor ---\n 'data_processor_type': 'bach',\n 'data_processor_kwargs': dict(\n embedding_size=32\n ),\n\n # --- Decoder ---\n 'decoder_type': 'transformer_relative',\n 'decoder_kwargs': dict(\n d_model=512,\n n_head=8,\n num_encoder_layers=4,\n num_decoder_layers=8,\n dim_feedforward=2048,\n positional_embedding_size=8,\n dropout=0.1,\n ),\n\n # ======== Generation =======\n 'generation_kwargs': dict(\n temperature=0.9,\n top_p=0.8,\n ),\n\n # ======== model ID ========\n 'timestamp': None,\n 'savename': 'aug-gen',\n\n # ======== Augmentative Generation ========\n 'num_epochs': 40,\n 'generations_per_epoch': 50,\n 'lr': 1e-5,\n 'batch_size': 8,\n 'num_batches': 2048,\n\n # ======== Training ========\n # 'num_epochs': 40,\n # 'lr': 1e-5,\n # 'batch_size': 8,\n # 'num_batches': 2048,\n}\n","repo_name":"toufic0710/constraint-transformer-bach","sub_path":"transformer_bach/bach_decoder_config.py","file_name":"bach_decoder_config.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"36181487331","text":"import cv2 as cv\nimport time\nimport os\nimport sys\nimport signal\nimport argparse\nimport threading\nimport readline\nfrom subprocess import call\n# was renamed in python 3 (python 2 Queue)\nif sys.version_info[0] < 3:\n import Queue as queue\n from Queue import Queue\nelse:\n import queue\n from queue import Queue\n\n\ncommands = [\"ArtWork\", \"artwork\", \"Artwork\", \"artWork\", \".mp4\", \".avi\", \".mov\", \".xml\"]\n\n\ndef ArtWorkScriptError(error):\n \"\"\"\n Dummy error printer\n :param error:\n :return:\n \"\"\"\n print(error)\n exit(0)\n\n\ndef signal_handler(signum, frame):\n if os.path.exists(\"frames\"):\n call([\"rm\", \"-Rf\", \"frames\"])\n sys.exit()\n\n\ndef completer(text, state):\n options = [x for x in commands if x.startswith(text)]\n try:\n return options[state]\n except IndexError:\n return None\n\n\ndef process_frame_queue(path_to_artwork, path_to_xml):\n while True:\n try:\n frameVal = q.get(True, 0.05)\n call(path_to_artwork + \" -i \" + frameVal + \" -o \" + frameVal + \" \" + path_to_xml, shell=True)\n q.task_done()\n except queue.Empty:\n break\n\n\nif __name__ == '__main__':\n\n # add signal handler if user exits the script with CTRL+C\n signal.signal(signal.SIGINT, signal_handler)\n\n # add autocomplete to command line arguments\n readline.set_completer(completer)\n readline.parse_and_bind(\"tab: completer\")\n\n # add argument parser for command line arguments from user\n parser = argparse.ArgumentParser(description=\"ArtWork-Video-Script: Converts an video into an artwork video.\",\n epilog=\"For more information check www.github.com/ckreisl\",\n conflict_handler=\"resolve\")\n parser.add_argument(\"-i\",\n metavar=\"[video]\",\n help=\"Loads the video from a given file path\",\n required=True,\n type=str)\n parser.add_argument(\"-o\",\n metavar=\"[name]\",\n help=\"Saves the processed video under the denoted output name\",\n required=True,\n type=str)\n parser.add_argument(\"-a\",\n metavar=\"[artwork]\",\n help=\"Path to your compiled artwork executable\",\n required=True,\n type=str)\n parser.add_argument(\"-s\",\n metavar=\"[xml]\",\n help=\"Path to your rendersettings XML file\",\n required=True,\n type=str)\n parser.add_argument(\"-p\",\n metavar=\"[CORES]\",\n help=\"How many threads should be used (default: 4)\",\n type=int,\n default=4)\n parser.add_argument(\"-f\",\n metavar=\"[FPS]\",\n help=\"Set FPS for your output video (default: FPS_INPUT_VIDEO)\",\n type=float)\n parser.add_argument(\"-n\",\n metavar=\"[FRAME]\",\n help=\"Convert every N frame only (default: 1 this means convert every single frame)\",\n type=int,\n default=1)\n\n # get user input from command line parameters\n args = vars(parser.parse_args())\n\n (first, second, third) = cv.__version__.split('.')\n\n # check used OpenCV version\n if int(first) < 3:\n ArtWorkScriptError(\"OpenCV v.\" + str(cv.__version__) + \" < 3 not supported by this script\")\n\n # check if we are on unix linux system\n if not sys.platform == \"linux\" and not sys.platform == \"linux2\":\n ArtWorkScriptError(\"This script is currently only running with Unix Linux. \\n\"\n \"If you like to change this, let me know or send me a pull request.\")\n\n # parse command line arguments\n NUM_THREADS = args[\"p\"]\n FPS_VIDEO = args[\"f\"]\n N_FRAME = args[\"n\"]\n pathToVideo = args[\"i\"]\n outputName = args[\"o\"]\n pathToArtWork = args[\"a\"]\n pathToXML = args[\"s\"]\n\n # artwork is in current directory\n if \"/\" not in pathToArtWork:\n pathToArtWork = \"./\" + pathToArtWork\n\n # check given path to ArtWork executable\n if not os.path.isfile(pathToArtWork):\n ArtWorkScriptError(\"ArtWork executable could not be found in: \" + pathToArtWork + \" path.\")\n\n # check if video input file exists\n if not os.path.isfile(pathToVideo):\n ArtWorkScriptError(\"Video file could not be found in: \" + pathToVideo + \" path.\")\n\n # check if XML file exists\n if not os.path.isfile(pathToXML):\n ArtWorkScriptError(\"XML file could not be found in: \" + pathToXML + \" path.\")\n\n # process image and convert frames\n videoIn = cv.VideoCapture(pathToVideo)\n success, image = videoIn.read()\n\n if not success:\n ArtWorkScriptError(\"Could not load frame from video\")\n\n # set output fps to input fps\n if FPS_VIDEO is None:\n FPS_VIDEO = videoIn.get(cv.CAP_PROP_FPS)\n\n # frame width and height\n WIDTH, HEIGHT, LAYERS = image.shape\n LENGTH_VIDEO = videoIn.get(cv.CAP_PROP_FRAME_COUNT) / videoIn.get(cv.CAP_PROP_FPS)\n\n # python queue for multithreading\n q = Queue()\n\n print((\"Start processing with ... \\n\"\n \"NUM_THREADS: {} \\n\"\n \"VIDEO_HEIGH: {} \\n\"\n \"VIDEO_WIDTH: {} \\n\"\n \"VIDEO_FPS: {} \\n\"\n \"VIDEO_LENGTH: {} \\n\"\n \"N_FRAME: {} \\n\"\n \"OUTPUT_NAME: {}\").format(NUM_THREADS, WIDTH, HEIGHT, FPS_VIDEO, LENGTH_VIDEO, N_FRAME, outputName))\n\n # convert images in multiple threads\n thread_list = []\n for i in range(0, NUM_THREADS):\n t = threading.Thread(target=process_frame_queue, args=(pathToArtWork, pathToXML,))\n t.start()\n thread_list.append(t)\n\n # create separated folder where frames will be temporarily saved\n if os.path.exists(\"frames\"):\n call([\"rm\", \"-Rf\", \"frames\"])\n call([\"mkdir\", \"frames\"])\n\n path = os.getcwd() + \"/frames\"\n success = True\n count = 0\n\n print(\"Starting saving frames from video and converting them with your ArtWork executable\")\n # start timer and add data for multithreading\n start = time.time()\n while success:\n name = \"frame%d.png\" % count\n cv.imwrite(os.path.join(path, name), image)\n # put work to queue for multithreading\n if count % N_FRAME == 0:\n q.put(os.path.join(path, name))\n success, image = videoIn.read()\n count += 1\n\n q.join()\n for t in thread_list:\n t.join()\n print(\"Finished converting ... \")\n image_converting_time = time.time()-start\n print(\"Processing time artworks-frames [frame->artwork]: {0:.3f}s\".format(image_converting_time))\n\n # convert generated images into video (.avi)\n # VIDEO CODEC CAN BE CHANGED HERE\n # (check if codec is supported by your system)\n # ********************************************\n CODEC_fourcc = cv.VideoWriter_fourcc(*'MJPG')\n # ********************************************\n videoOut = cv.VideoWriter(outputName + \".avi\", CODEC_fourcc, FPS_VIDEO, (HEIGHT, WIDTH))\n\n # reload video\n videoIn.release()\n videoIn = cv.VideoCapture(pathToVideo)\n \n count = 0\n start = time.time()\n while videoIn.isOpened():\n ret, frame = videoIn.read()\n if ret:\n # load manipulated frame and write to video\n frame = cv.imread(os.path.join(path, \"frame{}.png\".format(count)))\n videoOut.write(frame)\n count += 1\n else:\n break\n\n video_converting_time = time.time()-start\n print(\"Processing time artwork-video [frames->video]: {0:.3f}s\".format(video_converting_time))\n final_runtime = video_converting_time + image_converting_time\n print(\"Final runtime {}s for an ({}x{}) video running {}s\".format(final_runtime, WIDTH, HEIGHT, LENGTH_VIDEO))\n\n # clean up, remove all created images\n videoIn.release()\n videoOut.release()\n call([\"rm\", \"-Rf\", \"frames\"])\n","repo_name":"ckreisl/artwork-video","sub_path":"artworkvideo.py","file_name":"artworkvideo.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18052764944","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport neal\nfrom dwave.system import DWaveSampler, EmbeddingComposite, LeapHybridSampler\n\nfrom helper_functions import (\n Pi_functional,\n create_bqm,\n compute_a_min,\n compute_all_J_tildes,\n feasible_solution,\n)\nfrom graph import show_bqm_graph\nfrom basisfunctions import calculate_S\n\n\ndef simulated_sample(bqm, filter=False):\n ready = False\n while not ready:\n sim_solver = neal.SimulatedAnnealingSampler()\n sampleset = sim_solver.sample(bqm, num_reads=1000).aggregate()\n if filter:\n sampleset = sampleset.filter(feasible_solution)\n if len(sampleset) > 0:\n ready = True\n else:\n ready = True\n return sampleset\n\n\ndef real_sample(bqm, filter=False):\n real_solver = EmbeddingComposite(DWaveSampler())\n return real_solver.sample(\n bqm, num_reads=1000, annealing_time=100, return_embedding=True\n ).aggregate()\n\ndef hybrid_sample(bqm, filter=False):\n real_solver = LeapHybridSampler()\n return real_solver.sample(\n bqm, time_limit=5\n ).aggregate()\n\n\nif __name__ == \"__main__\":\n # N = None\n # r_min = None\n # S = None # N by 5 array\n # r = None\n # u_c = None\n N = 100\n r_min = 0.002\n r = 1\n # S = np.array([[1, 1, -2, 0, 0]] * N)\n u_c = np.array(np.linspace(0, 1, N + 1)) ** 0.7\n S = calculate_S(N, p=1, q=0, f=0) # S depends on the distance between the nodes\n\n H = 1\n J_hat = H # set equal as in paper\n\n # here we need to do the embedding and stuff\n solver = None\n # Pi_min = Pi_functional(S, u_c)\n\n # box algorithm\n while r > r_min:\n J_tildes = compute_all_J_tildes(S, u_c, r)\n bqm = create_bqm(H, J_hat, J_tildes, boundary_condition=\"D\", b_c_strength=1)\n sampleset = simulated_sample(bqm, filter=False)\n\n # solver.sample(bqm) # adjust this to the solver!\n a_min = compute_a_min(sampleset, u_c, r)\n # new_Pi = Pi_functional(S, a_min)\n if Pi_functional(S, a_min) < Pi_functional(S, u_c):\n u_c = a_min\n else:\n r /= 2\n # Pi_min = new_Pi\n\n plt.plot(np.linspace(0, 1, N + 1), u_c)\n plt.show()\n","repo_name":"benc2/anneal-diff-eq","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27869158682","text":"class Solution:\n def uniquePaths2(self, m: int, n: int) -> int:\n def backtracking(j: int, k: int, rst: int):\n if j == m - 1 and k == n - 1:\n rst += 1\n return rst\n if j > m - 1 or k > n - 1:\n return rst\n return backtracking(j + 1, k, rst) + backtracking(j, k + 1, rst)\n\n return backtracking(0, 0, 0)\n\n def uniquePaths(self, m: int, n: int) -> int:\n memo = {}\n\n def path(j: int, k: int):\n if j == 1 and k == 1:\n return 1\n if j < 1 or k < 1:\n return 0\n left = memo[(j - 1, k)] if memo.get((j - 1, k)) else path(j - 1, k)\n up = memo[(j, k - 1)] if memo.get((j, k - 1)) else path(j, k - 1)\n val = left + up\n memo[(j, k)] = val\n return val\n\n return path(m, n)\n\n\nif __name__ == '__main__':\n x = Solution().uniquePaths(300, 300)\n print(x)\n","repo_name":"Li-Nina/leetcode","sub_path":"dogge/mediumCollection/uniquePaths.py","file_name":"uniquePaths.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5349608380","text":"from config import APP_KEY\nfrom flask import Flask, render_template, request, redirect, url_for \nfrom flask.json import jsonify\n\nimport pandas as pd\n\nimport pymongo\nfrom bson.json_util import dumps\n\n# Retrieve info from this \nconn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)\n\napp = Flask(__name__) # the name of the file & the object (double usage)\n\n# List all routes that are available.\n\n# This api route takes in year\n# Returns: JSON object for that particular year \n# a) Street Address\n# b) Lat & Long info\n# c) Race_Color\n@app.route(\"/api/maps/\")\ndef heat_maps(year):\n print(\"In of heat maps section.\")\n print('year', year)\n ## - Retrieve from DB\n db_DH = client.digitalHumanity_db\n collection_string = \"censuses_\" + year\n censuses_year_collection = db_DH[collection_string]\n all_data = dumps(censuses_year_collection.find({\"Latitude\":{\"$ne\":None},\"Longitude\":{\"$ne\":None}},\n {\"_id\":0,\"Street Address\":1,\"Latitude\":1, \"Longitude\":1, \"Race_Color\":1}))\n \n return all_data\n\n# This api route takes in race color like B or W & \n# Returns: JSON object for that particular race_color \n# a) All census years (for our case 1900, 1910, 1920)\n# b) All ages (infants to 65 +)\n@app.route(\"/api/censusyears_vs_ages/\")\ndef censusyears_vs_ages(race_color):\n print(\"In census years_vs_ages section.\")\n print('year', race_color)\n ## - Retrieve from DB\n db_DH = client.digitalHumanity_db\n census_years = [\"1900\",\"1910\",\"1920\"]\n \n counter = 0\n for cy in census_years:\n collection_string = \"censuses_\" + cy\n censuses_year_collection = db_DH[collection_string]\n \n if(counter == 0):\n all_data = dumps(censuses_year_collection.find({\"Race_Color\":race_color},\n {\"_id\":0,\"Year\":1,\"Age\":1}))\n else:\n new_data = dumps(censuses_year_collection.find({\"Race_Color\":race_color},\n {\"_id\":0,\"Year\":1,\"Age\":1}))\n all_data = all_data + new_data\n counter = counter + 1\n return all_data\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"trivial-vector/Digital-Humanities","sub_path":"Karika/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70520243849","text":"from app import db\n\n\nclass Report(db.Model):\n\t__tablename__ = 'reports'\n\n\tid = db.Column(db.Integer, primary_key=True)\n\treport_result_id = db.Column(db.Integer, nullable=True)\n\tsymbol = db.Column(db.String(10), nullable=False)\n\ttx_from = db.Column(db.String(255), nullable=False)\n\ttx_to = db.Column(db.String(255), nullable=False)\n\tquantity = db.Column(db.Float(), nullable=False)\n\tdescription = db.Column(db.Text())\n\tfraud = db.Column(db.Integer(), nullable=False) # blacklist : 100\n\tcurious = db.Column(db.Integer(), nullable=False) # 0\n\tcreated_at = db.Column(db.DateTime, default=db.func.current_timestamp())\n\tupdated_at = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp())\n\n\tdef __init__(self, symbol, tx_from, tx_to, quantity, description, fraud, curious):\n\t\tself.symbol = symbol\n\t\tself.tx_from = tx_from\n\t\tself.tx_to = tx_to\n\t\tself.quantity = quantity \n\t\tself.description = description\n\t\tself.fraud = fraud\n\t\tself.curious = curious\n\n\tdef serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'symbol': self.symbol,\n\t\t\t'tx_from': self.tx_from,\n\t\t\t'tx_to': self.tx_to,\n\t\t\t'quantity': self.quantity,\n\t\t\t'description': self.description,\n\t\t\t'fraud': self.fraud,\n\t\t\t'curious': self.curious\n\t\t}\n\n\tdef __repr__(self):\n\t\treturn '' % (\n\t\t\t\tself.id,\n\t\t\t\tself.symbol,\n\t\t\t\tself.tx_from,\n\t\t\t\tself.tx_to,\n\t\t\t\tself.quantity,\n\t\t\t\tself.description,\n\t\t\t\tself.fraud,\n\t\t\t\tself.curious\n\t\t)\n\n\nclass ReportResult(db.Model):\n\t__tablename__ = 'report_results'\n\n\tid = db.Column(db.Integer, primary_key=True)\n\ttx_to = db.Column(db.String(255), nullable=False)\n\ttotal_fraud = db.Column(db.Integer(), nullable=False)\n\ttotal_curious = db.Column(db.Integer(), nullable=False)\n\tcreated_at = db.Column(db.DateTime, default=db.func.current_timestamp())\n\tupdated_at = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp())\n\n\tdef __init__(self, tx_to, total_fraud, total_curious):\n\t\tself.tx_to = tx_to\n\t\tself.total_fraud = total_fraud\n\t\tself.total_curious = total_curious\n\n\tdef serialize(self):\n\t\treturn {\n\t\t\t'tx_to': self.tx_to,\n\t\t\t'total_fraud': self.total_fraud,\n\t\t\t'total_curious': self.total_curious\n\t\t}\n\n\tdef __repr__(self):\n\t\treturn '' % (\n\t\t\t\tself.tx_to,\n\t\t\t\tself.total_fraud,\n\t\t\t\tself.total_curious\n\t\t)\n\n","repo_name":"DeactivatedWhatSoever/addsec","sub_path":"app/report/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28173255113","text":"from pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SQLContext\n\nconf = SparkConf()\nsc = SparkContext(conf=conf)\nsqlContext = SQLContext(sc)\n\ndf = sqlContext.read.json(\"/user/cloudera/spark/employees_by_dept.json\")\n\ndf2 = sqlContext.read.json(\"/user/cloudera/spark/departments.json\")\n\ndf.show()\n\ndf2.show()\n\ndf.join(df2,df.id_dept==df2.id). \\\n select(df2.name, df.salary). \\\n groupBy(df2.name). \\\n avg('salary'). \\\n show()\n","repo_name":"xuezhizeng/CCA175-Exam-Preparation","sub_path":"03-Data Analysis/04-Joins/dataframe-api-join-json-files.py","file_name":"dataframe-api-join-json-files.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"74860427849","text":"#!/usr/bin/env python3\n\n\"\"\"\nImplement an HTTP web server in Python that knows how to run server-side\nCGI scripts coded in Python; serves files and scripts from current working\ndir; Python scripts must be stored in webdir\\cgi-bin or webdir\\htbin;\n\"\"\"\n\nimport os, sys\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\n\nwebdir = '.'\nport = 8080\n\nos.chdir(webdir)\nsrvraddr = (\"\", port)\nsrvobj = HTTPServer(srvraddr, CGIHTTPRequestHandler)\nsrvobj.serve_forever()\n","repo_name":"debsir/Learning","sub_path":"python/PP4E/Preview/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21072074842","text":"import sys\nimport os\nimport json\nimport csv\nfrom tqdm import tqdm\n\nfrom utils.lexical_metrics import get_base_metrics\nfrom utils.sentiment import get_vader_sentiment_scores\nfrom utils.responsivity import init_spacy, init_sentence_encoder, responsivity_metrics\n\n\nMETRIC_NAMES = [\"Conversation ID\", \"Words per hour\", \"Speech per turn\",\n \"Mean inter-speaker silence\", \"Turn taking balance\",\n \"Interruption rate\", \"Grade level\",\n \"MATTR lexical diversity\", \"Mean word length\",\n \"VADER sentiment\", \"Responsivity rate\"]\n\nnlp = init_spacy()\nencoder = init_sentence_encoder()\n\n\ndef get_metric_values_list(snippets):\n base_metrics = get_base_metrics(snippets)\n base_metric_vals = [x[1] for x in base_metrics]\n\n vader_sentiment = get_vader_sentiment_scores(snippets)\n avg_vader_sentiment = sum([x[1] for x in vader_sentiment])/len(vader_sentiment)\n\n _, _, response_speaker_turns = responsivity_metrics(snippets, nlp, encoder)\n responsivity_rate = len(response_speaker_turns)/len(snippets)\n\n return base_metric_vals + [round(avg_vader_sentiment, 3)] + [round(responsivity_rate,3)]\n\nif __name__ == '__main__':\n json_dir = sys.argv[1]\n csv_save_file = sys.argv[2]\n\n with open(csv_save_file, 'w', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter='\\t')\n csv_writer.writerow(METRIC_NAMES)\n\n for json_file in tqdm(sorted(os.listdir(json_dir), key=lambda x: int(x[:-5]))):\n with open(os.path.join(json_dir, json_file), 'r') as f:\n snippets = json.load(f)\n f.close()\n\n metric_vals = get_metric_values_list(snippets)\n csv_writer.writerow([json_file[:-5]]+metric_vals)\n","repo_name":"wonjune-kang/conversation-quality","sub_path":"extract_metrics.py","file_name":"extract_metrics.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71195827528","text":"import tensorflow as tf\nimport tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\n\ndef encoder(input_shape):\n\tx = inputs = tf.keras.Input(input_shape)\n\tx = tf.keras.layers.Flatten()(x)\n\tencoded_size = x.shape[-1]\n\tstd = tf.ones_like(x,dtype=tf.float32)*1e0\n\tx = tf.keras.layers.concatenate([x,std],axis=-1)\n\t# x = tf.keras.layers.Lambda(lambda x: tf.stack([x,tf.ones_like(x)*1e-7],2))(x)\n\t# x = tf.keras.layers.Reshape(target_shape=(2*encoded_size,))(x)\n\tx = tfp.layers.IndependentNormal(input_shape)(x)\n\treturn tf.keras.Model(inputs=inputs,outputs=x)\n\nhgt,wdt,n_ch = 24,24,1\nenc = encoder((hgt,wdt,n_ch))\nenc.summary()\n\ndef decoder(input_shape):\n\tx = inputs = tf.keras.Input(input_shape)\n\tx = tf.keras.layers.Reshape(target_shape=(24,24,1))(x)\n\treturn tf.keras.Model(inputs=inputs,outputs=x)\n\ndec = decoder((hgt*wdt*n_ch))\n\nG_A2B = tf.keras.Sequential()\nG_A2B.add(enc)\n#G_A2B.add(dec)\n\nA_sc = 1e1\nA_zero_horiz = tf.zeros((1,hgt//3,wdt,1),dtype=tf.float32)\nA_zero = tf.zeros((1,hgt//3,wdt//3,1),dtype=tf.float32)\nA_one = tf.ones((1,hgt//3,wdt//3,1),dtype=tf.float32)*3*A_sc/4\nA_aux = tf.concat([A_zero,A_one,A_zero],axis=2)\nA = tf.concat([A_zero_horiz,A_aux,A_zero_horiz],axis=1)\nB = G_A2B(A)\n\nprint('A shape:',A.shape)\nprint('B shape:',B.shape)\n\nfig, axs = plt.subplots(figsize=(6, 3), nrows=1, ncols=2)\nA_plt = axs[0].imshow(tf.squeeze(A), cmap='twilight', vmin=-A_sc, vmax=A_sc)\nfig.colorbar(A_plt, ax=axs[0])\naxs[0].axis('off')\nB_plt = axs[1].imshow(tf.squeeze(B), cmap='twilight', vmin=-A_sc, vmax=A_sc)\nfig.colorbar(B_plt, ax=axs[1])\naxs[1].axis('off')\nplt.show()","repo_name":"jpmeneses/IDEAL-GAN","sub_path":"flatten-test.py","file_name":"flatten-test.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35138082149","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom radar import make_radar\n\ndef organize_playdata():\n \"\"\"\n Organizes defensive play-type data from 2015-16.\n \n Returns:\n plays (dict): dict of individual play-type DataFrames\n key-vals are {play-type (str): data (pd.DataFrame)}\n all_data (pd.DataFrame): DataFrame of all play-types joined on player name\n ave_ppp (dict): Dict of {play-type: average_ppp}\n Example key:val {'handoff': 0.870}\n \"\"\"\n\n # Read individual play types into Pandas DataFrame\n handoff = pd.read_csv('../data/handoff.csv')\n offscreen = pd.read_csv('../data/offscreen.csv')\n postup = pd.read_csv('../data/postup.csv')\n prrm = pd.read_csv('../data/prrm.csv')\n isolation = pd.read_csv('../data/isolation.csv')\n prbh = pd.read_csv('../data/prbh.csv')\n spotup = pd.read_csv('../data/spotup.csv')\n \n # Get all DataFrames into dict\n plays = {'handoff': handoff, \n 'offscreen': offscreen, \n 'postup': postup, \n 'prrm': prrm, \n 'isolation': isolation, \n 'prbh': prbh, \n 'spotup': spotup}\n \n # Get average PPP for each play\n ave_ppp = {}\n for play in plays:\n ave_ppp[play] = get_ave_ppp(plays[play])\n \n # Add \"Competency\" to DataFrame\n # A player is \"Competent\" to defend a play if they have\n # defended it more than 25 times and keep it below league ave\n def add_competency(df, play):\n df['COMPETENT' + play] = (df.PPP < ave_ppp[play]) & (df.POSS > 25)\n return df\n \n for play in plays:\n plays[play] = add_competency(plays[play], play)\n plays[play]['PPP' + play] = plays[play]['PPP']\n \n ##########################\n #### BEGIN MERGE HELL ####\n ##########################\n \n joined1 = pd.merge(isolation, postup, on='PLAYER', how='outer')\n joined2 = pd.merge(joined1, spotup, on='PLAYER', how='outer')\n joined3 = pd.merge(joined2, handoff, on='PLAYER', how='outer')\n joined4 = pd.merge(joined3, prrm, on='PLAYER', how='outer')\n joined5 = pd.merge(joined4, prbh, on='PLAYER', how='outer')\n joined = pd.merge(joined5, offscreen, on='PLAYER', how='outer')\n all_data = joined.fillna(0)\n \n # Count total number of plays each player is competent to defend\n all_data['TOTAL COMPETENT'] = all_data['COMPETENTspotup'] + \\\n all_data['COMPETENThandoff'] + \\\n all_data['COMPETENToffscreen'] + \\\n all_data['COMPETENTpostup'] + \\\n all_data['COMPETENTprbh'] + \\\n all_data['COMPETENTprrm'] + \\\n all_data['COMPETENTisolation']\n \n return (plays, all_data, ave_ppp)\n\ndef get_ave_ppp(df):\n # Calculates Average PPP\n return (df.PPP * df.POSS).sum() / df.POSS.sum()\n \ndef prelim_plots(plays_dict, all_data):\n \"\"\"\n Some preliminary plots\n \n - How many possessions did each player have of each play type?\n - How much noise is there when a player has few possessions?\n - How many players are competent at 1 play type? 6 play types? etc.\n \"\"\"\n plt.figure(figsize=(5,20))\n plt.subplots_adjust(hspace=0.75)\n for index, play in enumerate(plays):\n subplotnum = int('71' + str(index + 1))\n plt.subplot(subplotnum)\n plt.hist(plays[play]['POSS'], bins=30)\n plt.title(play)\n plt.ylabel('Number of players')\n plt.xlabel('Total Possessions')\n plt.show()\n \n plt.figure(figsize=(5,20))\n plt.subplots_adjust(hspace=0.75)\n for index, play in enumerate(plays):\n subplotnum = int('71' + str(index + 1))\n plt.subplot(subplotnum)\n plt.scatter(plays[play]['POSS'], plays[play]['PPP'])\n plt.title(play)\n plt.ylabel('Numer of Possessions')\n plt.xlabel('Average Points Per Possession')\n plt.show()\n\n plt.figure()\n competency_count = all_data.groupby('TOTAL COMPETENT').count()['PLAYER']\n sns.barplot(x=list(range(7)), y=list(competency_count), color='grey') \n plt.ylabel('Number of Players')\n plt.xlabel('Number of Play-Types Defendable')\n plt.show()\n\n\ndef prelim_questions(plays_dict, all_data):\n \"\"\"\n Scratch work for prelim questions.\n \n - Which players can defend 6 play-types?\n \"\"\"\n elite_defenders = all_data[all_data['TOTAL COMPETENT'] == 6]['PLAYER']\n print('Defenders who can defend six play-types:')\n print(elite_defenders)\n\n\ndef make_player_barplot(player_name, plays_dict, ave_dict): \n \"\"\"\n Makes bar plot of how well a player can defend each play-type\n \"\"\"\n league_ave = []\n for index, play in enumerate(ave_dict.keys()):\n league_ave.append(ave_dict[play])\n x = []\n y = []\n for play in plays_dict:\n df = plays_dict[play]\n df = df[df['PLAYER'] == player_name]\n y.append(float( df['PPP'] - ave_dict[play]))\n x.append(play)\n sns.barplot(x=x, y=y, color='grey')\n plt.ylabel('Points per possession (relative to league ave)')\n plt.title(player_name)\n\nif __name__ == '__main__':\n # Organize Data\n plays, all_data, ave_ppp = organize_playdata()\n \n ##########################\n ### Some example plots ###\n ##########################\n \n #prelim_questions(plays, all_data)\n #make_player_barplot('Klay Thompson', plays, ave_ppp)\n #make_radar('Damian Lillard', all_data, ave_ppp)\n #make_radar('Mason Plumlee', all_data, ave_ppp)\n #prelim_plots(plays, all_data)\n","repo_name":"pointstoplots/chris-analysis","sub_path":"scripts/explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38801264973","text":"\"\"\"\nThis is to test if MWAXSubfileDistributor correctly reads the tonnes of\nconfig correctly from a \"beamformer\" config file.\n\"\"\"\nimport os\nfrom mwax_mover.mwax_subfile_distributor import MWAXSubfileDistributor\n\nTEST_BASE_PATH = \"tests/mock_mwax_bf\"\nTEST_CONFIG_FILE = \"tests/mwax_subfile_distributor_beamformer_test.cfg\"\nTEST_BEAMFORMER_SETTINGS_FILE = \"tests/beamformer_settings.txt\"\n\n\ndef get_base_path() -> str:\n \"\"\"Utility function to get the base path for these tests\"\"\"\n return os.path.join(os.getcwd(), TEST_BASE_PATH)\n\n\ndef check_and_make_dir(path):\n \"\"\"If dir does not exist, make it\"\"\"\n if not os.path.exists(path):\n print(f\"{path} not found. Creating {path}\")\n os.mkdir(path)\n\n\ndef setup_test_dirs():\n \"\"\"Gets the test dirs ready\"\"\"\n # Setup dirs first!\n # Make the base dir\n base_dir = get_base_path()\n check_and_make_dir(base_dir)\n\n # log path\n log_path = os.path.join(base_dir, \"logs\")\n check_and_make_dir(log_path)\n\n # subfile_incoming_path\n subfile_incoming_path = os.path.join(base_dir, \"dev_shm\")\n check_and_make_dir(subfile_incoming_path)\n\n # voltdata_incoming_path\n voltdata_incoming_path = os.path.join(base_dir, \"voltdata_incoming\")\n check_and_make_dir(voltdata_incoming_path)\n\n # voltdata_outgoing_path\n voltdata_outgoing_path = os.path.join(base_dir, \"voltdata_outgoing\")\n check_and_make_dir(voltdata_outgoing_path)\n\n # voltdata_dont_archive_path\n voltdata_dont_archive_path = os.path.join(\n base_dir, \"voltdata_dont_archive\"\n )\n check_and_make_dir(voltdata_dont_archive_path)\n\n # fil_outgoing_path\n fildata_path = os.path.join(base_dir, \"fildata_path\")\n check_and_make_dir(fildata_path)\n\n\ndef test_beamformer_config_file():\n \"\"\"Tests that SubfileDistributor reads a beamformer config file ok\"\"\"\n # Setup all the paths\n setup_test_dirs()\n\n # This will test mwax_subfile_distributor based\n # on correlator_test.cfg\n base_dir = TEST_BASE_PATH\n\n # Start mwax_subfile_distributor using our test config\n msd = MWAXSubfileDistributor()\n\n # Override the hostname\n msd.hostname = \"test_server\"\n\n # Call to read config <-- this is what we're testing!\n msd.initialise(TEST_CONFIG_FILE)\n\n #\n # Now confirm the params all match the config file\n #\n\n # mwax_mover section\n assert msd.cfg_log_path == os.path.join(base_dir, \"logs\")\n assert msd.cfg_webserver_port == \"9998\"\n assert msd.cfg_health_multicast_interface_name == \"eth0\"\n assert msd.cfg_health_multicast_ip == \"224.234.0.0\"\n assert msd.cfg_health_multicast_port == 8666\n assert msd.cfg_health_multicast_hops == 1\n assert msd.cfg_subfile_incoming_path == os.path.join(base_dir, \"dev_shm\")\n assert msd.cfg_voltdata_incoming_path == os.path.join(\n base_dir, \"voltdata_incoming\"\n )\n assert msd.cfg_voltdata_outgoing_path == os.path.join(\n base_dir, \"voltdata_outgoing\"\n )\n assert msd.cfg_voltdata_dont_archive_path == os.path.join(\n base_dir, \"voltdata_dont_archive\"\n )\n assert msd.cfg_always_keep_subfiles == 0\n assert msd.cfg_archive_command_timeout_sec == 300\n assert msd.cfg_psrdada_timeout_sec == 32\n assert msd.cfg_copy_subfile_to_disk_timeout_sec == 120\n assert msd.cfg_archiving_enabled == 0\n\n # beamformer section\n assert msd.cfg_bf_ringbuffer_key == \"0x1234\"\n assert msd.cfg_bf_fildata_path == os.path.join(base_dir, \"fildata_path\")\n assert msd.cfg_bf_settings_path == TEST_BEAMFORMER_SETTINGS_FILE\n\n # test_server section\n assert (\n msd.cfg_bf_archive_destination_host\n == \"host2.destination.com://dest/path\"\n )\n assert msd.cfg_bf_archive_destination_port == \"1094\"\n assert (\n msd.cfg_bf_archive_destination_enabled is False\n ) # this is due to archiving enabled=0\n assert msd.cfg_bf_numa_node == -1\n assert msd.cfg_bf_archive_command_numa_node == -1\n","repo_name":"MWATelescope/mwax_mover","sub_path":"tests/mwax_subfile_distributor_beamformer_test.py","file_name":"mwax_subfile_distributor_beamformer_test.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"14319959553","text":"\n# count triplet such that il[j]>l[k]\ndef left_maximum(l,m):\n\tcount=0\n\tfor i in range(len(l)):\n\t\tif l[i]>m:\n\t\t\tcount+=1\n\treturn count\n\t\ndef right_minimum(l,m):\n\tcount=0\n\tfor i in range(len(l)):\n\t\tif l[i] List[Optional[TreeNode]]:\n # [l, r]\n def generateDFS(l, r):\n res = []\n if l > r:\n res.append(None)\n return res\n\n # choose i as root\n for i in range(l, r+1):\n leftTree = generateDFS(l, i-1)\n rightTree = generateDFS(i+1, r)\n\n for leftNode in leftTree:\n for rightNode in rightTree:\n root = TreeNode(i)\n root.left = leftNode\n root.right = rightNode\n res.append(root)\n return res\n \n return generateDFS(1, n)","repo_name":"Vergil0327/leetcode-history","sub_path":"Trees/95. Unique Binary Search Trees II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31829104891","text":"import random\nimport numpy as np\nimport pandas as pd\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout\nfrom keras.layers import Activation\nfrom keras import backend as K\n\n\n# def AmaderRelu(x):\n# return K.relu(x, max_value=1.0, threshold=0.01)\n\n\nclass GameLearner:\n def __init__(self):\n self.reward = 0\n self.gamma = 0.8\n self.alpha = 0.8\n self.short_memory = np.array([])\n self.learning_rate = 0.0005\n self.model = self.network()\n # self.model = self.network(\"weights.hdf5\")\n self.epsilon = 0\n self.actual = []\n self.memory = []\n\n def get_state(self, game):\n state = [game.eggCount, game.floor, game.progressing]\n return np.asarray(state)\n\n def set_reward(self, game):\n self.reward = 0\n extraPenalty = game.getRewardVal()\n # if game is won\n if game.targetReached():\n # if 1 egg remains\n if game.eggCount == 1:\n self.reward = 100\n # if 0 eggs remain\n elif game.eggCount == 0:\n self.reward = 50\n # if an egg was broken and it was a wrong guess\n elif game.isStateChange():\n # if the agent is guessing towards the correct answer\n if game.progressing:\n if game.eggCount == 1:\n self.reward = -(5 + extraPenalty)\n elif game.eggCount == 0:\n self.reward = -(15 + extraPenalty) #game end\n # if the agent is guessing away from the correct answer\n else:\n if game.eggCount == 1:\n self.reward = -(20 + extraPenalty)\n elif game.eggCount == 0:\n self.reward = -(35 + extraPenalty) #game end\n # if the egg did not break\n else:\n # if the agent is guessing towards the correct answer\n if game.progressing:\n if game.eggCount == 2:\n self.reward = -5 + int(100/extraPenalty)\n elif game.eggCount == 1:\n self.reward = -10 + int(100/extraPenalty)\n # if the agent is guessing away from the correct answer\n else:\n self.reward = - (5 + extraPenalty)\n return self.reward\n\n\n def network(self, weights=None):\n model = Sequential()\n model.add(Dense(output_dim=120, activation='relu', input_dim=3))\n model.add(Dropout(0.15))\n model.add(Dense(output_dim=120, activation='relu'))\n model.add(Dropout(0.15))\n model.add(Dense(output_dim=120, activation='relu'))\n model.add(Dropout(0.15))\n # output is 100 values, each value describing the probable reward on dropping the egg from that floor\n # model.add(Dense(output_dim=100, activation='softmax'))\n # opt = Adam(self.learning_rate)\n\n\n model.add(Dense(input_dim=120, output_dim=100, activation='relu'))\n opt = RMSprop(lr=self.learning_rate)\n\n model.compile(loss='mse', optimizer=opt)\n\n if weights:\n model.load_weights(weights)\n return model\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def replay_new(self, memory):\n if len(memory) > 1000:\n minibatch = random.sample(memory, 1000)\n else:\n minibatch = memory\n for state, action, reward, next_state, done in minibatch:\n target = reward\n if not done:\n target = reward + self.gamma * np.amax(self.model.predict(next_state.reshape((1, 3)))[0])\n target_f = self.model.predict(np.array([state]))\n target_f[0][np.argmax(action)] = target\n self.model.fit(state.reshape((1, 3)), target_f, epochs=1, verbose=0)\n\n def train_short_memory(self, state, action, reward, next_state, done):\n target = reward\n if not done:\n pred = self.model.predict(next_state.reshape((1, 3)))[0]\n #alpha is the learning rate\n target = self.alpha * (reward + self.gamma * np.amax(pred))\n target_f = self.model.predict(state.reshape((1, 3)))\n target_f[0][np.argmax(action)] = target\n self.model.fit(state.reshape((1, 3)), target_f, epochs=1, verbose=0)","repo_name":"souptikmakarov/Egg_Drop_Game","sub_path":"GameLearner.py","file_name":"GameLearner.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22473981604","text":"import unittest\n\ndata = ((\"sadbutsad\", \"sad\", 0), (\"leetcode\", \"leeto\", -1))\n\n\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n needle_len = len(needle)\n for index in range(0, len(haystack) - needle_len + 1):\n if haystack[index : index + needle_len] == needle:\n return index\n return -1\n\n\nclass TestCase(unittest.TestCase):\n def test_solution(self):\n s = Solution()\n\n for haystack, needle, index in data:\n self.assertEqual(index, s.strStr(haystack, needle))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cybernextgen/leetcode","sub_path":"medium/28-find-the-index-of-the-first-occurrence-in-a-string.py","file_name":"28-find-the-index-of-the-first-occurrence-in-a-string.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5889023516","text":"#imports\nimport pathlib, cv2, copy\nimport numpy as np\nfrom ...utilities.miscfileio import cd\nfrom ...utilities.img_file_io import smooth_image_worker, im3writeraw, write_image_to_file, get_raw_as_hwl\nfrom .config import CONST\nfrom .plotting import do_masking_plots_for_image\nfrom .utilities import LabelledMaskRegion, get_enumerated_mask, get_size_filtered_mask, get_thresholded_image_compiled\nfrom .utilities import normalize_compiled, get_morphed_and_filtered_mask, get_exclusive_mask\nfrom .utilities import get_image_layer_local_variance_of_normalized_laplacian, get_double_thresholded_image_compiled\n\nclass ImageMask() :\n \"\"\"\n Class to store and work with a mask for an image\n \"\"\"\n\n #################### PROPERTIES ####################\n\n @property\n def packed_tissue_mask(self) : #the binary tissue mask packed using np.packbits\n if self.__tissue_mask is None :\n raise RuntimeError('ERROR: pasked_tissue_mask called before setting a tissue mask')\n return np.packbits(self.__tissue_mask)\n @property\n def compressed_mask(self): #the compressed mask with (# layers groups)+1 layers\n if self.__compressed_mask is None :\n raise RuntimeError('ERROR: compressed_mask called without first creating a mask!')\n return self.__compressed_mask\n @property\n def onehot_mask(self) : #the mask with good tissue=1, everything else=0\n return (np.where(self.uncompressed_full_mask==1,1,0)).astype(np.uint8)\n @property\n def uncompressed_full_mask(self): #the uncompressed mask with the real number of image layers\n if self.__compressed_mask is None :\n raise RuntimeError('ERROR: uncompressed_full_mask called without first creating a mask!')\n nlayers = 0\n for lgb in self.__layer_groups.values() :\n if lgb[1]>nlayers :\n nlayers=lgb[1]\n uncompressed_mask = np.ones((*self.__compressed_mask.shape[:-1],nlayers),dtype=np.uint8)\n for lgi,lgb in enumerate(self.__layer_groups.values()) :\n for ln in range(lgb[0],lgb[1]+1) :\n uncompressed_mask[:,:,ln-1] = self.__compressed_mask[:,:,lgi+1]\n return uncompressed_mask\n @property\n def labelled_mask_regions(self):\n return self._labelled_mask_regions #the list of labelled mask region objects for this mask\n\n #################### PUBLIC FUNCTIONS ####################\n\n def __init__(self,im_array,layer_groups,brightest_layers,im_key,bg_thresholds,norm_ets) :\n \"\"\"\n im_array = the multilayer image array whose mask should be created \n (may already be corrected to a set of exposure times)\n layer_groups = a dictionary of the different image layer groups and their bounds\n brightest_layers = the brightest layers in each group (used for plotting)\n im_key = the string representing the key of the image filename \n (used as a prepend to the masking file name and in labelled mask regions)\n bg_thresholds = a list of the background intensity thresholds in counts in each image layer\n norm_ets = a list of the exposure times to which the image layers have been normalized \n\n The last three arguments are only needed (and the last two are required) to save plots for this image\n \"\"\"\n #always start out using the GPU\n self.use_gpu = True\n #set the layer groups for the image\n self.__layer_groups=layer_groups\n #set the number of layers in each group that can be missed and still have the region flagged\n self.__fold_flag_cuts = {}\n for lgn,lgb in self.__layer_groups.items() :\n nlayers = lgb[1]-lgb[0]+1\n if nlayers<6 :\n self.__fold_flag_cuts[lgn] = 0\n elif nlayers<9 :\n self.__fold_flag_cuts[lgn] = 1\n else :\n if lgn.startswith('vectra') :\n self.__fold_flag_cuts[lgn] = 3\n elif lgn.startswith('polaris') :\n self.__fold_flag_cuts[lgn] = 1\n else :\n raise ValueError(f'Could not determine fold flag cuts for layer group {lgn} with bounds {lgb}')\n #set which layers are the brightest\n self.__bright_layers=brightest_layers\n #apply smoothing to Vectra images only\n microscope_name = ((list(self.__layer_groups.keys())[0]).split('_'))[0]\n if microscope_name=='vectra' :\n self.__blur_mask_sm_img_array = smooth_image_worker(im_array,CONST.BLUR_MASK_SMOOTHING_SIGMA,self.use_gpu)\n elif microscope_name=='polaris' :\n self.__blur_mask_sm_img_array = im_array\n else :\n raise ValueError(f'ERROR: unrecognized microscope name \"{microscope_name}\"!')\n #set some other variables\n self.__im_array = im_array\n self.__image_key = im_key\n self.__bg_thresholds = np.array(bg_thresholds)\n self.__norm_ets = np.array(norm_ets)\n #create the tissue mask\n self.__tissue_mask = self.__get_image_tissue_mask()\n #create the blur mask (possibly with some plots)\n self.__blur_mask = self.__get_image_blur_mask()\n #create the saturation masks (one for each layer group)\n self.__saturation_masks = self.__get_image_saturation_masks()\n #make the compressed mask and the list of labelled mask regions\n self.__make_compressed_mask_and_list_of_mask_regions()\n\n def save_mask_files(self,savedir) :\n \"\"\"\n Write out the actual mask files to the path given by savedir\n \"\"\"\n #if there is anything flagged in the final blur and saturation masks, write out the compressed mask\n is_masked = np.min(self.__blur_mask)<1\n if not is_masked :\n for lgsm in self.__saturation_masks.values() :\n if np.min(lgsm)<1 :\n is_masked=True\n break\n if not savedir.is_dir() :\n savedir.mkdir()\n with cd(savedir) :\n im3writeraw(f'{self.__image_key}_tissue_mask.bin',self.packed_tissue_mask)\n if is_masked :\n write_image_to_file(self.__compressed_mask,f'{self.__image_key}_full_mask.bin',dtype=np.uint8)\n\n def save_plots(self,orig_ets,et_hists_and_bins,savedir=None) :\n \"\"\"\n Make and save the sheet of plots for this image if requested\n\n orig_ets = a list of the given image's ORIGINAL exposure times in each layer \n (before any corrections were applied) \n et_hists_and_bins = a dict of exposure time histograms and their bins in each layer group. \n savedir = path to the directory in which the plot(s) should be saved \n (if None the plots will be written in the current directory)\n \"\"\"\n all_plots = []\n for lgi,(lgn,lgb) in enumerate(self.__layer_groups.items()) :\n fold_nlv_cut = CONST.FOLD_NLV_CUTS[lgn]\n fold_nlv_max_mean = CONST.FOLD_MAX_MEANS[lgn]\n fold_flag_cut = self.__fold_flag_cuts[lgn]\n group_blur_mask,stacked_masks = self.__get_image_layer_group_blur_mask(lgn,fold_nlv_cut,\n fold_nlv_max_mean,fold_flag_cut)\n plot_img_layer = self.__im_array[:,:,self.__bright_layers[lgi]]\n sorted_pil = np.sort(plot_img_layer[group_blur_mask==1].flatten())\n if len(sorted_pil)>0 :\n pil_max = sorted_pil[int(0.95*len(sorted_pil))]; pil_min = sorted_pil[0]\n else :\n pil_max = np.max(plot_img_layer); pil_min = np.min(plot_img_layer)\n norm = 255./(pil_max-pil_min)\n im_c = (np.clip(norm*(plot_img_layer-pil_min),0,255)).astype(np.uint8)\n overlay_c = np.array([im_c,im_c*group_blur_mask,im_c*group_blur_mask]).transpose(1,2,0)\n plots = [{'image':plot_img_layer,'title':f'raw IMAGE layer {self.__bright_layers[lgi]}'},\n {'image':overlay_c,'title':f'layer {lgb[0]}-{lgb[1]} blur mask overlay (clipped)'}]\n plots.append({'bar':et_hists_and_bins[lgn][0],\n 'bins':et_hists_and_bins[lgn][1],\n 'xlabel':f'layer {lgb[0]}-{lgb[1]} exposure times (ms)',\n 'line_at':orig_ets[lgb[0]-1]})\n plots.append({'image':self.__im_nlv[:,:,self.__bright_layers[lgi]],\n 'title':'local variance of normalized laplacian'})\n plots.append({'hist':self.__im_nlv[:,:,self.__bright_layers[lgi]].flatten(),\n 'xlabel':'variance of normalized laplacian',\n 'line_at':fold_nlv_cut})\n plots.append({'image':stacked_masks,\n 'title':f'stacked layer masks (cut at {fold_flag_cut})',\n 'cmap':'gist_ncar',\n 'vmin':0,'vmax':lgb[1]-lgb[0]+1})\n all_plots.append(plots)\n do_masking_plots_for_image(self.__image_key,self.__tissue_mask,all_plots,self.__compressed_mask,savedir)\n\n #unpack, reshape, and return a tissue mask from its packed mask file\n @staticmethod\n def unpack_tissue_mask(filepath,dimensions) :\n if not pathlib.Path(filepath).is_file() :\n raise FileNotFoundError(f'ERROR: tissue mask file {filepath} does not exist!')\n packed_mask = np.memmap(filepath,dtype=np.uint8,mode='r')\n return (np.unpackbits(packed_mask)).reshape(dimensions)\n\n #get a one-hot, fully-layered mask from a given blur/saturation mask filepath\n @staticmethod\n def onehot_mask_from_full_mask_file(samp,filepath) :\n if not pathlib.Path(filepath).is_file() :\n raise FileNotFoundError(f'ERROR: blur/saturation mask file {filepath} does not exist!')\n dimensions = (samp.fheight,samp.fwidth,samp.nlayersim3)\n read_mask = get_raw_as_hwl(filepath,*(dimensions[:-1]),len(samp.layer_groups)+1,dtype=np.uint8)\n return_mask = np.zeros(dimensions,dtype=np.uint8)\n for lgi,lgb in enumerate(samp.layer_groups.values()) :\n return_mask[:,:,lgb[0]-1:lgb[1]][read_mask[:,:,lgi+1]==1] = 1\n return return_mask\n\n #get a one-hot, fully-layered mask from a given blur/saturation mask filepath, ignoring regions flagged for blur\n @staticmethod\n def onehot_mask_from_full_mask_file_no_blur(samp,filepath) :\n if not pathlib.Path(filepath).is_file() :\n raise FileNotFoundError(f'ERROR: blur/saturation mask file {filepath} does not exist!')\n dimensions = (samp.fheight,samp.fwidth,samp.nlayersim3)\n read_mask = get_raw_as_hwl(filepath,*(dimensions[:-1]),len(samp.layer_groups)+1,dtype=np.uint8)\n max_blur_index = np.max(read_mask[:,:,0])\n return_mask = np.zeros(dimensions,dtype=np.uint8)\n for lgi,lgb in enumerate(samp.layer_groups.values()) :\n tissue_or_blur_slice = np.logical_or(read_mask[:,:,lgi+1]==1,\n np.logical_and(read_mask[:,:,lgi+1]>1,\n read_mask[:,:,lgi+1]<=max_blur_index))\n return_mask[:,:,lgb[0]-1:lgb[1]][tissue_or_blur_slice] = 1\n return return_mask\n\n #################### PRIVATE HELPER FUNCTIONS ####################\n\n def __get_image_tissue_mask(self) :\n \"\"\"\n return the fully-determined overall tissue mask as a 2d array of ones and zeroes for a given multilayer image\n \"\"\"\n #smooth the image\n sm_img_array = smooth_image_worker(self.__im_array,CONST.TISSUE_MASK_SMOOTHING_SIGMA,gpu=self.use_gpu)\n #threshold all the image layers\n thresholded_image = get_thresholded_image_compiled(sm_img_array,\n self.__bg_thresholds[np.newaxis,np.newaxis,:])\n #make masks for each individual layer\n layer_masks = []\n for li in range(self.__im_array.shape[-1]) :\n if self.use_gpu :\n try :\n #convert to UMat to use on the GPU\n layer_mask = cv2.UMat(thresholded_image[:,:,li])\n except Exception :\n self.use_gpu = False\n if not self.use_gpu :\n layer_mask = thresholded_image[:,:,li]\n #small close/open\n cv2.morphologyEx(layer_mask,cv2.MORPH_CLOSE,CONST.SMALL_CO_EL,layer_mask,borderType=cv2.BORDER_REPLICATE)\n cv2.morphologyEx(layer_mask,cv2.MORPH_OPEN,CONST.SMALL_CO_EL,layer_mask,borderType=cv2.BORDER_REPLICATE)\n if self.use_gpu :\n layer_mask = layer_mask.get()\n layer_masks.append(layer_mask)\n #find the well-defined tissue and background in each layer group\n overall_tissue_mask = np.zeros_like(layer_masks[0])\n overall_background_mask = np.zeros_like(layer_masks[0])\n total_stacked_masks = np.zeros_like(layer_masks[0])\n #for each layer group\n for lgn,lgb in self.__layer_groups.items() :\n stacked_masks = np.zeros_like(layer_masks[0])\n for ln in range(lgb[0],lgb[1]+1) :\n stacked_masks+=layer_masks[ln-1]\n total_stacked_masks+=stacked_masks\n #well-defined tissue is anything called tissue in at least all but two layers\n overall_tissue_mask[stacked_masks>(lgb[1]-lgb[0]-1)]+=10 if lgn.endswith('dapi') else 1\n #well-defined background is anything called background in at least half the layers\n overall_background_mask[stacked_masks<(lgb[1]-lgb[0]+1)/2.]+=10 if lgn.endswith('dapi') else 1\n #threshold tissue/background masks to include only those from the DAPI and at least one other layer group\n overall_tissue_mask = get_thresholded_image_compiled(overall_tissue_mask,10)\n overall_background_mask = get_thresholded_image_compiled(overall_background_mask,10)\n #final mask has tissue=1, background=0\n final_mask = np.zeros_like(layer_masks[0])+2\n final_mask[overall_tissue_mask==1] = 1\n final_mask[overall_background_mask==1] = 0\n #anything left over is signal if it's stacked in at least 60% of the total number of layers\n thresholded_stacked_masks = get_thresholded_image_compiled(total_stacked_masks,\n 0.6*self.__im_array.shape[-1])\n final_mask[final_mask==2] = thresholded_stacked_masks[final_mask==2]\n if np.min(final_mask) != np.max(final_mask) :\n #filter the tissue and background portions to get rid of the small islands\n final_mask = get_size_filtered_mask(final_mask,min_size=CONST.TISSUE_MIN_SIZE)\n if self.use_gpu :\n try :\n #convert to UMat\n final_mask = cv2.UMat(final_mask)\n except Exception :\n self.use_gpu = False\n #medium size close/open to smooth out edges\n cv2.morphologyEx(final_mask,cv2.MORPH_CLOSE,CONST.MEDIUM_CO_EL,final_mask,borderType=cv2.BORDER_REPLICATE)\n cv2.morphologyEx(final_mask,cv2.MORPH_OPEN,CONST.MEDIUM_CO_EL,final_mask,borderType=cv2.BORDER_REPLICATE)\n if self.use_gpu :\n final_mask = final_mask.get()\n return final_mask\n\n def __get_image_blur_mask(self) :\n \"\"\"\n return the single blur mask for an image (multilayer \"tissue fold\" blur and single-layer \"dust\" blur combined)\n \"\"\"\n #first set the normalized laplacian variance of the image\n self.__im_nlv = np.zeros(self.__im_array.shape,dtype=np.float32)\n for li in range(self.__im_array.shape[-1]) :\n try :\n layer_nlv = get_image_layer_local_variance_of_normalized_laplacian(self.__blur_mask_sm_img_array[:,:,li],\n self.use_gpu)\n except Exception :\n self.use_gpu = False\n layer_nlv = get_image_layer_local_variance_of_normalized_laplacian(self.__blur_mask_sm_img_array[:,:,li],\n self.use_gpu)\n self.__im_nlv[:,:,li] = layer_nlv\n #then set the local mean of the normalized laplacian variance\n self.__im_nlv_loc_mean = np.zeros_like(self.__im_nlv)\n for li in range(self.__im_array.shape[-1]) :\n layer_nlv_loc_mean = np.empty_like(self.__im_nlv[:,:,li])\n if self.use_gpu :\n try :\n layer_nlv_loc_mean = cv2.UMat(layer_nlv_loc_mean)\n except Exception :\n self.use_gpu = False\n cv2.filter2D(self.__im_nlv[:,:,li],cv2.CV_32F,CONST.SMALLER_WINDOW_EL,layer_nlv_loc_mean,\n borderType=cv2.BORDER_REFLECT)\n if self.use_gpu :\n layer_nlv_loc_mean=layer_nlv_loc_mean.get()\n layer_nlv_loc_mean=normalize_compiled(layer_nlv_loc_mean,np.sum(CONST.SMALLER_WINDOW_EL))\n self.__im_nlv_loc_mean[:,:,li] = layer_nlv_loc_mean\n #find the tissue fold mask, beginning with each layer group separately\n fold_masks_by_layer_group = {}\n for lgn in self.__layer_groups.keys() :\n lgtfm,_ = self.__get_image_layer_group_blur_mask(lgn,\n CONST.FOLD_NLV_CUTS[lgn],\n CONST.FOLD_MAX_MEANS[lgn],\n self.__fold_flag_cuts[lgn])\n fold_masks_by_layer_group[lgn] = lgtfm\n #combine the layer group blur masks to get the final mask for all layers\n stacked_fold_masks = np.zeros_like(fold_masks_by_layer_group[list(self.__layer_groups.keys())[0]])\n for lgn,layer_group_fold_mask in fold_masks_by_layer_group.items() :\n to_add = 10 if lgn.endswith('dapi') or lgn.endswith('fitc') else 1\n stacked_fold_masks[layer_group_fold_mask==0]+=to_add\n #flag anything flagged in at least one of the DAPI and FITC layer groups plus at least 3 other layer groups, \n # or both the DAPI and FITC layer groups\n overall_fold_mask = get_thresholded_image_compiled(stacked_fold_masks,12,invert=True)\n #morph and filter the mask using the common operations\n try :\n tissue_fold_mask = get_morphed_and_filtered_mask(overall_fold_mask,self.__tissue_mask,\n CONST.FOLD_MIN_PIXELS,CONST.FOLD_MIN_SIZE,self.use_gpu)\n except Exception :\n self.use_gpu = False\n tissue_fold_mask = get_morphed_and_filtered_mask(overall_fold_mask,self.__tissue_mask,\n CONST.FOLD_MIN_PIXELS,CONST.FOLD_MIN_SIZE,self.use_gpu)\n #get dust masks for the blurriest areas of the DAPI layer group\n dapi_group_name = None\n for lgn in self.__layer_groups.keys() :\n if lgn.endswith('dapi') :\n if dapi_group_name is not None :\n errmsg = f'ERROR: more than one DAPI layer group name found! Layer groups: {self.__layer_groups}'\n raise ValueError(errmsg)\n dapi_group_name = lgn\n if dapi_group_name is None :\n raise RuntimeError(f'ERROR: no DAPI layer group found! Layer groups: {self.__layer_groups}')\n dapi_layer_group = self.__layer_groups[dapi_group_name]\n n_layers_dust_flag_cut = 0.5*(dapi_layer_group[1]-dapi_layer_group[0]+1)\n dapi_dust_mask,_ = self.__get_image_layer_group_blur_mask(dapi_group_name,\n CONST.DUST_NLV_CUTS[dapi_group_name],\n CONST.DUST_MAX_MEANS[dapi_group_name],\n n_layers_dust_flag_cut)\n #same morphology transformations as for the multilayer blur masks\n try :\n morphed_dapi_dust_mask = get_morphed_and_filtered_mask(dapi_dust_mask,self.__tissue_mask,\n CONST.DUST_MIN_PIXELS,CONST.DUST_MIN_SIZE,\n self.use_gpu)\n except Exception :\n self.use_gpu = False\n morphed_dapi_dust_mask = get_morphed_and_filtered_mask(dapi_dust_mask,self.__tissue_mask,\n CONST.DUST_MIN_PIXELS,CONST.DUST_MIN_SIZE,\n self.use_gpu)\n #make sure any regions in that mask are sufficiently exclusive w.r.t. what's already flagged as blurry\n exclusive_dapi_dust_mask = get_exclusive_mask(morphed_dapi_dust_mask,tissue_fold_mask,0.25)\n #combine the multilayer and single layer blur masks into one by multiplying them together\n final_blur_mask = tissue_fold_mask*exclusive_dapi_dust_mask\n #return the blur mask\n return final_blur_mask\n\n def __get_image_layer_group_blur_mask(self,layer_group_name,nlv_cut,max_mean,n_layers_flag_cut) :\n \"\"\"\n return a blur mask for a given image layer group\n\n layer_group_name = the name of the layer group whose blur mask should be returned\n nlv_cut = the max value of the normalized laplacian variance below which \n a region should be flagged as blurred\n max_mean = the maximum allowed mean of the normalized laplacian variance within \n any window-sized region to allow the region to be flagged\n n_layers_flag_cut = the number of layers in the group that can remain unflagged \n while still calling the region overall blurry in this layer group\n \"\"\"\n #start by making a mask for every layer in the group\n stacked_masks = np.zeros(self.__im_array.shape[:-1],dtype=np.uint8)\n for ln in range(self.__layer_groups[layer_group_name][0],self.__layer_groups[layer_group_name][1]+1) :\n #get the local variance of the normalized laplacian image\n layer_nlv = self.__im_nlv[:,:,ln-1]\n #get the mean of those local normalized laplacian variance values in the window size\n layer_nlv_loc_mean = self.__im_nlv_loc_mean[:,:,ln-1]\n #threshold on the local variance of the normalized laplacian and its local mean to make a binary mask\n layer_mask = get_double_thresholded_image_compiled(layer_nlv,nlv_cut,layer_nlv_loc_mean,max_mean)\n if np.min(layer_mask) != np.max(layer_mask) :\n if self.use_gpu :\n try :\n #convert to UMat\n layer_mask = cv2.UMat(layer_mask)\n except Exception :\n self.use_gpu = False\n #small open/close to refine it\n cv2.morphologyEx(layer_mask,cv2.MORPH_OPEN,CONST.SMALL_CO_EL,layer_mask,\n borderType=cv2.BORDER_REPLICATE)\n cv2.morphologyEx(layer_mask,cv2.MORPH_CLOSE,CONST.SMALL_CO_EL,layer_mask,\n borderType=cv2.BORDER_REPLICATE)\n #erode by the smaller window element\n cv2.morphologyEx(layer_mask,cv2.MORPH_ERODE,CONST.SMALLER_WINDOW_EL,layer_mask,\n borderType=cv2.BORDER_REPLICATE)\n if self.use_gpu :\n layer_mask = layer_mask.get()\n #add it to the stack \n stacked_masks+=layer_mask\n #determine the final mask for this group by thresholding on how many individual layers contribute\n group_blur_mask = get_thresholded_image_compiled(stacked_masks,n_layers_flag_cut)\n #return the blur mask and the stack of masks for the layer group\n return group_blur_mask, stacked_masks\n\n def __get_image_saturation_masks(self) :\n \"\"\"\n Return the dict of saturation masks by layer group for a given image\n \"\"\"\n #normalize the image by its exposure time\n normalized_image_arr = normalize_compiled(self.__im_array,self.__norm_ets[np.newaxis,np.newaxis,:])\n #smooth the normalized image image\n sm_n_image_arr = smooth_image_worker(normalized_image_arr,CONST.TISSUE_MASK_SMOOTHING_SIGMA,gpu=self.use_gpu)\n #make masks for each layer group\n layer_group_saturation_masks = {}\n for lgn,lgb in self.__layer_groups.items() :\n #threshold the image layers to make a binary mask and sum them\n cut_at = CONST.SATURATION_INTENSITY_CUTS[lgn]\n stacked_masks = np.sum((np.where(sm_n_image_arr[:,:,lgb[0]-1:lgb[1]]>cut_at,0,1)).astype(np.uint8),axis=2)\n #the final mask is anything flagged in ANY layer\n group_mask = get_thresholded_image_compiled(stacked_masks,lgb[1]-lgb[0]) \n if np.min(group_mask)!=np.max(group_mask) :\n if self.use_gpu :\n try :\n #convert to UMat\n group_mask = cv2.UMat(group_mask)\n except Exception :\n self.use_gpu=False\n #medium sized open/close to refine it\n cv2.morphologyEx(group_mask,cv2.MORPH_OPEN,CONST.MEDIUM_CO_EL,group_mask,\n borderType=cv2.BORDER_REPLICATE)\n cv2.morphologyEx(group_mask,cv2.MORPH_CLOSE,CONST.MEDIUM_CO_EL,group_mask,\n borderType=cv2.BORDER_REPLICATE)\n if self.use_gpu :\n group_mask = group_mask.get()\n #filter the mask for the total number of pixels and regions by the minimum size\n group_mask = get_size_filtered_mask(group_mask,CONST.SATURATION_MIN_SIZE)\n if np.sum(group_mask==0) int:\n count = collections.defaultdict(int)\n for line in wall:\n linesum = 0\n for brick in line[:-1]:\n linesum+= brick\n count[linesum]+=1\n if not len(count):\n return len(wall)\n return len(wall)-max(count.values())\n ","repo_name":"hyperminji/LeetCode","sub_path":"554-brick-wall/554-brick-wall.py","file_name":"554-brick-wall.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1128065144","text":"# imports\nimport pytest # used for our unit tests\n\n# function to test\ndef checkLeapYear(year):\n # Python program to check if year is a leap year or not\n\n # divided by 100 means century year (ending with 00)\n # century year divided by 400 is leap year\n if (year % 400 == 0) and (year % 100 == 0):\n return \"{0} is a leap year\".format(year)\n\n # not divided by 100 means not a century year\n # year divided by 4 is a leap year\n elif (year % 4 == 0) and (year % 100 != 0):\n return \"{0} is a leap year\".format(year)\n\n # if not divided by both 400 (century year) and 4 (not century year)\n # year is not leap year\n else:\n return \"{0} is not a leap year\".format(year)\n\n# unit tests\n# below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator\n\n@pytest.mark.parametrize(\"year, expected\", [\n (2001, \"2001 is not a leap year\"), # Regular year\n (2004, \"2004 is a leap year\"), # Leap year\n (1900, \"1900 is not a leap year\"), # Century year\n (2000, \"2000 is a leap year\"), # Century leap year\n (-4, \"-4 is a leap year\"), # BC year\n (1000000, \"1000000 is a leap year\"), # Very large year\n (-1000000, \"-1000000 is a leap year\"), # Very small year\n])\ndef test_checkLeapYear(year, expected):\n assert checkLeapYear(year) == expected\n\n@pytest.mark.parametrize(\"year\", [\n 2000.0, # float\n \"2000\", # string\n])\ndef test_checkLeapYear_type_error(year):\n with pytest.raises(TypeError):\n checkLeapYear(year)\n\ndef test_checkLeapYear_no_input():\n with pytest.raises(TypeError):\n checkLeapYear()","repo_name":"vaishali-vcs/LLM_UnitTestGenerator_Project","sub_path":"GPT4_Data/Output/test_46.py","file_name":"test_46.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3434035266","text":"class Solution:\n \"\"\"\n You are given an array of strings words and a string chars.\n A string is good if it can be formed by characters from chars\n (each character can only be used once).\n\n Return the sum of lengths of all good strings in words.\n\n Time: 17% (320ms)\n \"\"\"\n def countCharacters(self, words: List[str], chars: str) -> int:\n total = 0\n char_map = {}\n for ch in chars:\n if ch not in char_map:\n char_map[ch] = 1\n else:\n char_map[ch] += 1\n for word in words:\n flag = True\n for ch in word:\n if ch not in char_map or char_map[ch] < word.count(ch):\n flag = False\n total += len(word) if flag else 0\n return total\n","repo_name":"aumaro-nyc/leetcode","sub_path":"Arrays/1160.py","file_name":"1160.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28726114761","text":"import argparse\nimport json\nimport os\n\nimport requests\n\n# SPP_URL = \"http://pdf-layout-detection-service-dev.us-west-2.elasticbeanstalk.com/detect/\"\nSPP_URL = \"http://127.0.0.1:8080/detect\"\n\n\ndef get_parsed_arxiv_pdf(arxiv_id: str):\n return get_parsed_url_pdf(f\"https://arxiv.org/pdf/{arxiv_id}.pdf\")\n\n\ndef get_parsed_url_pdf(url: str):\n r = requests.get(SPP_URL, params={\"pdf_url\": url})\n if r.ok:\n layout = r.json()[\"layout\"]\n else:\n print(f\"Failed with status code: {r.status_code}\")\n layout = None\n return layout\n\n\ndef get_parsed_local_pdf(pdf_path: str):\n with open(pdf_path, \"rb\") as f:\n files = {\"pdf_file\": (f.name, f, \"multipart/form-data\")}\n r = requests.post(SPP_URL, files=files)\n if r.ok:\n layout = r.json()[\"layout\"]\n else:\n print(f\"Failed with status code: {r.status_code}\")\n layout = None\n return layout\n\n\ndef parse_folder_of_pdfs(input_dir, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n for file in os.scandir(input_dir):\n layout = get_parsed_local_pdf(file.path)\n with open(f\"{output_dir}/{os.path.splitext(file.name)[0]}.json\", \"w\") as out:\n json.dump(layout, out, indent=2)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--arxiv_id\", type=str, help=\"Specify arxiv_id of paper to run through spp\"\n )\n parser.add_argument(\n \"--input_dir\",\n type=str,\n help=\"Path to folder containing PDFs to run through spp\",\n )\n parser.add_argument(\n \"--output_dir\",\n type=str,\n help=\"Path to folder where spp should place PDF parse output\",\n default=\"data/spp-output\",\n )\n args = parser.parse_args()\n\n os.makedirs(args.output_dir, exist_ok=True)\n\n if args.arxiv_id:\n layout = get_parsed_arxiv_pdf(args.arxiv_id)\n with open(f\"{args.output_dir}/{args.arxiv_id}.json\", \"w\") as out:\n json.dump(layout, out, indent=2)\n elif args.input_dir:\n parse_folder_of_pdfs(args.input_dir, args.output_dir)\n","repo_name":"rayfok/scim-nlp","sub_path":"src/run_spp.py","file_name":"run_spp.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24378942854","text":"import warnings\nimport os\nimport sys\n\nfrom conda_build.environ import (\n conda_build_vars,\n python_vars,\n perl_vars,\n lua_vars,\n r_vars,\n system_vars,\n feature_list,\n LANGUAGES,\n)\nfrom conda_build.os_utils import external\nfrom conda_build.environ import get_git_info, get_hg_build_info, verify_git_repo\nfrom conda_build import utils\n\n\ndef meta_vars(meta, skip_build_id=False):\n d = {}\n for key, value in meta.get_value(\"build/script_env\", {}).items():\n if not value:\n warnings.warn(\n f\"The environment variable '{key}' is undefined.\",\n UserWarning,\n stacklevel=1,\n )\n else:\n d[key] = value\n\n folder = meta.get_value(\"source/0/folder\", \"\")\n repo_dir = os.path.join(meta.config.work_dir, folder)\n git_dir = os.path.join(repo_dir, \".git\")\n hg_dir = os.path.join(repo_dir, \".hg\")\n\n if not isinstance(git_dir, str):\n # On Windows, subprocess env can't handle unicode.\n git_dir = git_dir.encode(sys.getfilesystemencoding() or \"utf-8\")\n\n git_exe = external.find_executable(\"git\", meta.config.build_prefix)\n if git_exe and os.path.exists(git_dir):\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\n git_url = meta.get_value(\"source/0/git_url\")\n\n if os.path.exists(git_url):\n if sys.platform == \"win32\":\n git_url = utils.convert_unix_path_to_win(git_url)\n # If git_url is a relative path instead of a url, convert it to an abspath\n git_url = os.path.normpath(os.path.join(meta.path, git_url))\n\n _x = False\n\n if git_url:\n _x = verify_git_repo(\n git_exe,\n git_dir,\n git_url,\n meta.config.git_commits_since_tag,\n meta.config.debug,\n meta.get_value(\"source/0/git_rev\", \"HEAD\"),\n )\n\n if _x or meta.get_value(\"source/0/path\"):\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\n\n elif external.find_executable(\"hg\", meta.config.build_prefix) and os.path.exists(\n hg_dir\n ):\n d.update(get_hg_build_info(hg_dir))\n\n # use `get_value` to prevent early exit while name is still unresolved during rendering\n d[\"PKG_NAME\"] = meta.get_value(\"package/name\")\n d[\"PKG_VERSION\"] = meta.version()\n d[\"PKG_BUILDNUM\"] = str(meta.build_number())\n if meta.final and not skip_build_id:\n d[\"PKG_BUILD_STRING\"] = str(meta.build_id())\n d[\"PKG_HASH\"] = meta.hash_dependencies()\n else:\n d[\"PKG_BUILD_STRING\"] = \"placeholder\"\n d[\"PKG_HASH\"] = \"1234567\"\n d[\"RECIPE_DIR\"] = meta.path\n return d\n\n\ndef get_dict(\n m,\n prefix=None,\n for_env=True,\n skip_build_id=False,\n escape_backslash=False,\n variant=None,\n):\n if not prefix:\n prefix = m.config.host_prefix\n\n m.config._merge_build_host = m.build_is_host\n\n # conda-build specific vars\n d = conda_build_vars(prefix, m.config)\n\n # languages\n d.update(python_vars(m, prefix, escape_backslash))\n d.update(perl_vars(m, prefix, escape_backslash))\n d.update(lua_vars(m, prefix, escape_backslash))\n d.update(r_vars(m, prefix, escape_backslash))\n\n if m:\n d.update(meta_vars(m, skip_build_id=skip_build_id))\n\n # system\n d.update(system_vars(d, m, prefix))\n\n # features\n d.update({feat.upper(): str(int(value)) for feat, value in feature_list})\n\n variant = variant or m.config.variant\n for k, v in variant.items():\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\n d[k] = v\n return d\n","repo_name":"mamba-org/boa","sub_path":"boa/core/environ.py","file_name":"environ.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"16"} +{"seq_id":"6301844267","text":"import numpy as np\nimport skimage.transform as st\nimport scipy.interpolate as si\nimport scipy.ndimage as sni\nimport itertools\nfrom learning.cloth_funnels.geometry import get_center_affine, pixel_to_3d, transform_points, transform_pose, get_pointcloud\ndef camera_image_to_view(cam_img, tx_camera_view, img_shape=(128,128)):\n tf_camera_view = st.AffineTransform(matrix=tx_camera_view)\n result_img = st.warp(cam_img, tf_camera_view.inverse, output_shape=img_shape)\n return result_img\n\nclass ImageStackTransformer:\n \"\"\"\n Note: this class follows skimage.transform coordinate convention\n (x,y) x right, y down\n The rest of skimage uses (col, row) convention\n \"\"\"\n def __init__(self, img_shape=(128,128), \n rotations=np.linspace(-np.pi, np.pi, 17), \n scales=[1.0, 1.5, 2.0, 2.5, 3.0]):\n \"\"\"\n Create a stack of rotations * scales images.\n Rotation: counter-clockwise\n Scales: >1 object appear bigger\n \"\"\"\n assert len(img_shape) == 2\n stack_shape = (len(rotations) * len(scales),) + tuple(img_shape)\n\n transforms = list()\n self.transform_tuples = list(itertools.product(rotations, scales))\n\n for rot, scale in itertools.product(rotations, scales):\n # both skimage and torchvision use\n tf = get_center_affine(\n img_shape=img_shape, \n rotation=rot, scale=scale)\n tf.params = tf.params.astype(np.float32)\n transforms.append(tf)\n\n self.shape = stack_shape\n self.transforms = transforms\n self.rotations = rotations\n self.scales = scales\n \n def forward_img(self, img, mode='constant'):\n results = [st.warp(img, tf.inverse, mode=mode, preserve_range=True) for tf in self.transforms]\n stack = np.stack(results).astype(np.uint8)\n return stack\n \n def forward_raw(self, raw, tx_camera_view):\n img_shape = self.shape[1:]\n stack = np.empty(\n (len(self.transforms),) + img_shape + raw.shape[2:], \n dtype=raw.dtype)\n for i, tf in enumerate(self.transforms):\n ntf = st.AffineTransform(tf.params @ tx_camera_view)\n stack[i] = st.warp(raw, ntf.inverse, \n order=1,\n output_shape=img_shape,\n preserve_range=True)\n return stack\n \n def inverse_coord(self, stack_coord):\n \"\"\"\n Convert 3d stack coordinate integers to\n float coordinate in the original image\n \"\"\"\n return self.transforms[stack_coord[0]].inverse(stack_coord[1:])\n\n def get_inverse_coord_map(self):\n identity_map = np.moveaxis(\n np.indices(self.shape[1:], dtype=np.float32)[::-1],0,-1\n )\n\n maps = list()\n for tf in self.transforms:\n tx = np.linalg.inv(tf.params)\n r = transform_points(\n identity_map.reshape(-1,2), \n tx).reshape(identity_map.shape)\n maps.append(r)\n coord_stack = np.stack(maps)\n return coord_stack\n\n def get_world_coords_stack(self, depth, tx_camera_view, tx_world_camera, cam_intr):\n img_coords_stack = self.get_inverse_coord_map()\n raw_img_coords_stack = transform_points(\n img_coords_stack.reshape(-1,2), \n np.linalg.inv(tx_camera_view)).reshape(\n img_coords_stack.shape)\n\n # x,y\n # transform to world coord\n world_coords_stack = np.empty(\n img_coords_stack.shape[:-1]+(3,), \n dtype=np.float32)\n for i in range(len(img_coords_stack)):\n img_coords = raw_img_coords_stack[i]\n # skimage uses (x,y) coordinate, pixel_to_3d uses (y,x)\n coords_3d = pixel_to_3d(depth, img_coords.reshape(-1,2)[:,::-1], \n cam_pose=tx_world_camera, cam_intr=cam_intr)\n img_coords_3d = coords_3d.reshape(img_coords.shape[:-1] + (3,))\n world_coords_stack[i] = img_coords_3d\n return world_coords_stack\n\n\ndef is_coord_valid_robot(coords, tx_robot_world, \n reach_radius=0.93, near_radius=0.0755):\n \"\"\"\n max recommended \n reach_radius: 0.946 0.85\n near_radius: 0 0.0755\n\n Reference:\n https://www.universal-robots.com/articles/ur/application-installation/what-is-a-singularity/\n \"\"\"\n coords_robot = transform_points(coords, tx_robot_world)\n dist_3d = np.linalg.norm(coords_robot, axis=-1)\n dist_xy = np.linalg.norm(coords_robot[...,:2], axis=-1)\n is_valid = (dist_3d < reach_radius) & (dist_xy > near_radius)\n return is_valid\n\ndef is_coord_valid_table(coords, table_low=(-0.58,-0.88,-0.05), table_high=(0.58,0.87,0.2)):\n is_valid = np.ones(coords.shape[:-1], dtype=bool)\n for i in range(3):\n this_valid = (table_low[i] < coords[...,i]) & (coords[...,i] < table_high[i])\n is_valid = is_valid & this_valid\n return is_valid\n\n\ndef fill_nearest(depth_im, mask):\n coords = np.moveaxis(np.indices(depth_im.shape),0,-1)\n interp = si.NearestNDInterpolator(coords[~mask], depth_im[~mask])\n out_im = depth_im.copy()\n out_im[mask] = interp(coords[mask])\n return out_im\n\ndef get_offset_stack(stack, offset=16):\n \"\"\"\n Assuming (N,H,W,D)\n up: move up offset pixels\n down: move down offset pixels\n \"\"\"\n value = np.nan\n if stack.dtype is np.dtype('bool'):\n value = False\n up_stack = np.full(stack.shape, value, dtype=stack.dtype)\n down_stack = np.full(stack.shape, value, dtype=stack.dtype)\n up_stack[:,offset:,...] = stack[:,:-offset,...]\n down_stack[:,:-offset:,...] = stack[:,offset:,...]\n return up_stack, down_stack\n\n\ndef check_line_validity(stack, offset=16, axis=1, eps=1e-7):\n length = offset*2+1\n weights = np.full((length,),1/length, dtype=np.float32)\n result = sni.convolve1d(stack.astype(np.float32), \n weights, axis=axis, mode='constant', cval=0)\n out = result > (1-eps)\n return out\n","repo_name":"xiaoxiaoxh/UniFolding","sub_path":"learning/cloth_funnels/transformed_view_env.py","file_name":"transformed_view_env.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"4031710324","text":"#인공지능 오븐이 오븐구이가 끝나는 시간을 분 단위로 자동적으로 계산\n#오븐 앞면엔 사용자에게 요리가 끝나는 시각을 알려주는 디지털 시게가 있음\n#입력: 요리를 시작하는 시각(A시 B분), 오븐구이를 하는 데 필요한 시간이 분 단위로 주어짐\n# 0<=A<=23, 0<=B<=59\n#디지털 시계는 23시 59분에서 1분이 지나면 0시 0분이 된다\n#출력: 오븐구이가 끝나는 시간을 출력(A B)\n\n# 현재 시간 A, 현재 분 B\nA, B=map(int, input().split())\n# 요리 소요 시간 C\nC=int(input())\n\n# 시간, 분, 요리 소요���간 범위 제한\nif (0<=A<=23) and (0<=B<=59) and (0<=C<=1000):\n\n add_h=C//60 # 현재 시간에 추가할 시간\n add_m=C-(60*add_h) #현재 분에 추가할 분\n\n # 요리 끝날 시간 end_h, 분 end_m\n end_h=A+add_h\n end_m=B+add_m\n\n # 59보다 큰 양 만큼 시간에 추가\n if end_m > 59:\n extra_h = end_m//60\n end_h += extra_h\n end_m = end_m - (60*extra_h)\n\n#종료시간이 24시 60분이라고 칠 때\n# -> 25시(24+1) 0분 (60-60)\n\nif (end_h > 23):\n end_h-=24\nprint(end_h, end_m)\n\n\n\n\n \n\n \n\n\n \n \n","repo_name":"3eonah/AlgorithmStudy","sub_path":"2525.py","file_name":"2525.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17400066132","text":"import gzip\nimport sys\ndef mainFunc(l, t):\n # This is where I begin the pagerank algorithm\n def pageRank(file, l, t):\n line = file.readline()\n line = line.decode('utf-8')\n pages = {}\n while (line != ''):\n arr = line.split('\\t')\n source = arr[0]\n target = arr[1]\n target.strip('\\n')\n if pages.get(target, -1) == -1:\n pages.update({target : []})\n if pages.get(source, -1) == -1:\n pages.update({source : [target]})\n else:\n pages[source].append(target)\n line = file.readline()\n line = line.decode('utf-8')\n I = {}\n for entries in pages:\n I.update({entries: 1/len(pages)})\n R = I.copy()\n doesConverage = False\n while(doesConverage == False):\n accumulator = 0\n for entry in R:\n R.update({entry : l / len(pages)})\n for page in pages:\n Q = pages[page]\n if (len(Q) > 0):\n for pageQ in Q:\n R[pageQ] += (1 - l) * I[page] / len(Q)\n else:\n accumulator += (1 - l) * I[page] / len(pages)\n for entry in R:\n R[entry] += accumulator\n summation = 0\n for entry in R:\n summation += abs(R[entry] - I[entry])\n if summation < t:\n doesConverage = True\n I = R.copy()\n return R\n\n def keyWithMaxVal(d):\n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]\n\n inlinks = {}\n pageranks = {}\n\n with gzip.open('links.srt.gz','r') as file1:\n line = file1.readline()\n line = line.decode('utf-8')\n while (line != ''):\n pages = line.strip('\\n').split('\\t')\n target = pages[1]\n if inlinks.get(target, -1) == -1:\n inlinks.update({target : 1})\n else:\n inlinks[target] = inlinks[target] + 1\n line = file1.readline()\n line = line.decode('utf-8') \n file1.close()\n\n with gzip.open('links.srt.gz','r') as file1:\n pageranks = pageRank(file1, l, t)\n file1.close()\n\n inlinksHundred = {}\n pagerankHundred = {}\n inlinksOrder = []\n pageranksOrder = []\n \n for num in range(100):\n maxInlink = keyWithMaxVal(inlinks)\n maxPagerank = keyWithMaxVal(pageranks)\n inlinksHundred.update({maxInlink: inlinks[maxInlink]})\n pagerankHundred.update({maxPagerank: pageranks[maxPagerank]})\n inlinksOrder.append(maxInlink)\n pageranksOrder.append(maxPagerank)\n inlinks.pop(maxInlink)\n pageranks.pop(maxPagerank)\n\n file2 = open(\"inlinks.txt\", \"w\")\n file3 = open(\"pagerank.txt\", \"w\")\n for num in range(100):\n file2.write(inlinksOrder[num] + ' ' + str(num + 1) + ' ' + str(inlinksHundred[inlinksOrder[num]]) + '\\n')\n file3.write(str(pageranksOrder[num].strip('\\n')) + ' ' + str(num + 1) + ' ' + str(pagerankHundred[pageranksOrder[num]]) + '\\n')\n file2.close()\n file3.close()\n \nif __name__ == '__main__':\n l = float(sys.argv[1])\n t = float(sys.argv[2])\n mainFunc(l, t)\n\n","repo_name":"Piyushm19/Python_Projects","sub_path":"Python Projects/446/P2/src/P2.py","file_name":"P2.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37163173234","text":"from vae.VAE import *\nfrom config.config import VAEConfig\nimport os\n\n\ndef gen_sample():\n config = VAEConfig().get_config()\n output_dir = config['output_dir']\n num_samples = config['num_samples']\n seed = config['seed']\n latent_dim = config['latent_dim']\n\n use_cuda = torch.cuda.is_available()\n\n ### Set the seeds. Default: 42\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n model = torch.load(os.path.join(output_dir, 'model.pt'))\n\n device_gpu = None\n if use_cuda:\n dev_gpu = \"cuda\"\n device_gpu = torch.device(dev_gpu)\n model.to(device_gpu)\n\n if os.path.exists(os.path.join(output_dir, 'data.pkl')):\n x_train, x_test = pickle.load(open(os.path.join(output_dir, 'data.pkl'), 'rb'))\n else:\n print(\"Training Data Doesn't exist.\")\n exit(0)\n return\n\n print(x_train.shape)\n x_train_sampled = x_train[np.random.randint(x_train.shape[0], size=num_samples), :]\n print(x_train_sampled.shape)\n x_train = torch.from_numpy(x_train_sampled)\n if use_cuda:\n x_train = x_train.to(device_gpu)\n xt = x_train.float()\n T = pickle.load(open(os.path.join(output_dir, 't-val.pkl'), 'rb'))\n print(\"T\", T)\n model.eval()\n assert (num_samples == xt.shape[0])\n with torch.no_grad():\n # compute the mu and logarithmic variance of the original data's z.\n mu, logvar = model.encode(xt.view(-1, xt.shape[1]))\n std = torch.exp(0.5 * logvar)\n num_samples_in_one_go = xt.shape[0]\n\n ### For each generated sample, compute these values.\n tgt_count = num_samples\n all_tensor_out_taken = []\n while tgt_count > 0:\n m = torch.randn(num_samples_in_one_go, latent_dim) ## Sample z?\n if use_cuda:\n m = m.to(device_gpu).float()\n rep_z = m * std + mu\n\n any_inf = torch.any(torch.isinf(rep_z), axis=1)\n mu = mu[~any_inf]\n std = std[~any_inf]\n rep_z = rep_z[~any_inf]\n xt = xt[~any_inf]\n num_samples_in_one_go = xt.shape[0]\n\n ## P(z)\n normal = torch.distributions.normal.Normal(0, 1) ## Normal Prior on Z.\n p_z = normal.log_prob(rep_z) ## Probabilitiy of z being sampled from normal prior.\n\n ## P(x|z)\n x_cap = model.decode(rep_z) ## Output based on the obtained z.\n x_distr = torch.distributions.normal.Normal(torch.mean(x_cap, axis=0),\n 1) ## Distribution with x_cap as mean\n p_x_z = x_distr.log_prob(xt)\n\n ## Q(z|x)\n q_normal = torch.distributions.normal.Normal(mu, std)\n q_z_x = q_normal.log_prob(rep_z)\n\n a = torch.exp(T + torch.mean(p_z, axis=-1) + torch.mean(p_x_z, axis=-1) - torch.mean(q_z_x, axis=-1))\n a_ones = torch.ones_like(a)\n if use_cuda:\n a_ones = a_ones.to(device_gpu)\n a = torch.min(a_ones, a)\n\n u = torch.rand(num_samples_in_one_go) ### Randomly generate U values.\n if use_cuda:\n u = u.to(device_gpu)\n accept_mask = (u - a <= 0)\n tensor_out_taken = x_cap[accept_mask]\n tgt_count -= len(tensor_out_taken)\n all_tensor_out_taken.append(tensor_out_taken)\n tensor_out = torch.cat(all_tensor_out_taken)[:num_samples]\n out = tensor_out.cpu().detach().numpy()\n transformed_output = transform_reverse(out, output_dir)\n print(\"GENERATED NUM OF SAMPLES\", transformed_output.shape[0])\n\n sample_file_path = os.path.join(output_dir, 'samples_{}.csv'.format(num_samples))\n transformed_output.to_csv(sample_file_path, index=False)\n print(\"Sample has been saved in {}\".format(sample_file_path))\n","repo_name":"Cher-er/daslab-aqp","sub_path":"vae/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9866503146","text":"import matplotlib.pyplot as plt\nimport os\nimport torch\nimport numpy as np\nfrom datetime import timedelta\nimport cv2\n\nfrom time import time\nfrom models import meta_model, meta_model_local, meta_model_sharp, meta_model_local_sharp, double_resnet, shooting_model, metamorphoses\nfrom train import train_opt\nfrom prepare_data import C_Dataset\nimport nibabel as nib\nfrom skimage.exposure import match_histograms\nfrom PIL import Image\nfrom utils import get_contours\nfrom utils import deform_image, dice\nimport random\n\n\n\nif __name__ == \"__main__\":\n def get_free_gpu():\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n return np.argmax(memory_available)\n\n\n free_gpu_id = get_free_gpu()\n device = 'cuda:' + str(free_gpu_id) if torch.cuda.is_available() else 'cpu'\n print(device)\n\n use_segmentation = True\n\n n_epoch = 100\n l = 15\n L2_weight = .5\n lamda = 3e-7\n v_weight = lamda / l\n z_weight = lamda/ l\n mu = 0.05\n batch_size = 1\n kernel_size = 31\n sigma = 6.\n debug = False\n\n # test_set = C_Dataset(device, \"test_split.txt\", mode=\"test\")\n # test_set = Brats2021_Dataset(device, mode=\"test\")\n dir = '/home/matthis/datasets/MICCAI_BraTS_2018_Data_Training/HGG/'\n image = 'Brats18_CBICA_ATB_1'\n im = nib.load(dir + image + \"/\" + image + \"_t1.nii.gz\").get_fdata()\n #seg = nib.load(dir + image + \"/\" + image + \"_seg.nii.gz\").get_fdata()\n #seg=seg[:,:,80].transpose()\n #seg[seg>0] = 1\n s = cv2.imread(\"/home/matthis/Images/mask-1.png\", cv2.IMREAD_GRAYSCALE)\n seg = np.array(cv2.resize(s, (240,240) ))\n seg = (seg > 0.5) * 1.\n im[im<0] = 0\n im = (im - im.min()) / (im.max() - im.min())\n im = im[:,:,80].transpose()\n target_img = np.transpose(np.load(\"/home/matthis/datasets/sri24_t1_preprocessed.npy\").squeeze())\n\n\n im[im!=0] = match_histograms(im[im!=0], target_img[target_img!=0])\n target_img = target_img[np.newaxis, np.newaxis, ...].copy()\n target_img = torch.from_numpy(target_img).float().to(device)\n source_img = torch.from_numpy(im).float().unsqueeze(0).unsqueeze(0)\n seg = torch.from_numpy(seg[np.newaxis, np.newaxis, ...].copy()).float()\n seg = torch.ones(source_img.shape)\n source = [source_img, seg]\n\n #print(\"Number of test images:\", len(test_set))\n #target_img = test_set.target.to(device)\n\n #test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=10)\n be_sharp = True\n z0 = torch.zeros(target_img.shape)\n\n print(\"### Starting Metamorphoses ###\")\n print(\"L2_weight=\", L2_weight)\n print(\"z_weight=\", z_weight)\n print(\"v_weight=\", v_weight)\n print(\"n_epoch=\", n_epoch)\n print(\"mu=\", mu)\n print(\"sigma=\", sigma)\n t = time()\n\n L2_norm_list = []\n def_list = []\n num_folds = []\n time_list = []\n landa_list = [3e-6]\n mu_list = [0.04]\n for mu in mu_list:\n for lamda in landa_list:\n v_weight = lamda / l\n z_weight = lamda / l\n print(\"mu=\", mu)\n print(\"lambda=\", lamda)\n t=time()\n model = metamorphoses(l, target_img.shape, device, kernel_size, sigma, mu, z0).to(device)\n optimizer = torch.optim.Adam(list(model.parameters()), lr=1e0, weight_decay=1e-8)\n L2, folds = train_opt(model, source, target_img, optimizer, device, n_iter=100, local_reg=use_segmentation, double_resnets=False, debug=debug, plot_iter=500, L2_weight=L2_weight, v_weight=v_weight, z_weight=z_weight)\n L2_norm_list.append(L2.detach().cpu().item())\n num_folds.append(folds.detach().cpu().item())\n time_list.append(time()-t)\n print(\"L2 loss:\", L2.detach().cpu().item(), \"Fold number:\", folds.detach().cpu().item(), \"Time:\", str(timedelta(seconds=time_list[-1])))\n\n print(\"Validation L2 loss: %f\" % (sum(L2_norm_list) / len(test_loader)),\n \"std: %f\" % (np.array(L2_norm_list).std()))\n print(\"Validation L2 deformation only: %f\" % (sum(def_list) / len(test_loader)),\n \"std: %f\" % (np.array(def_list).std()))\n print(\"Average fold number:\", sum(num_folds) / len(num_folds), \"std: %f\" % (np.array(num_folds).std()))\n print(\"Average time :\", str(timedelta(seconds=sum(time_list) / len(num_folds))), \"std: %f\" % (np.array(time_list).std()))\n\n\n\n\n","repo_name":"mattmail/2D_meta","sub_path":"src/main_opt.py","file_name":"main_opt.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11375835153","text":"from copy import deepcopy\r\nfrom math import *\r\n\r\nclass Object(object):\r\n __slots__ = ('x','y','vx','vy')\r\n \r\nG = 6.67428e-11\r\nearthMass = 6e24\r\nmoonMass = 7.347e22\r\n\r\ndef gravTo(x,y,mass):\r\n d = sqrt(x*x+y*y)\r\n if d < 1:\r\n return 0,0\r\n dx = x/d\r\n dy = y/d\r\n a = G*mass/(x*x+y*y)\r\n return a*dx,a*dy\r\n\r\ndef stateToObjects(state):\r\n result = []\r\n for o in state.objects:\r\n obj = Object()\r\n obj.x = o[0]\r\n obj.y = o[1]\r\n obj.vx = 0\r\n obj.vy = 0\r\n result.append(obj)\r\n return result\r\n\r\ndef deriv(objects):\r\n result = []\r\n for obj in objects:\r\n d = Object()\r\n d.x = obj.vx\r\n d.y = obj.vy\r\n d.vx,d.vy = gravTo(-obj.x,-obj.y,earthMass)\r\n if len(objects) > 10: # have moon\r\n max,may = gravTo(objects[-1].x-obj.x,\r\n objects[-1].y-obj.y,\r\n moonMass)\r\n d.vx += max\r\n d.vy += may\r\n result.append(d)\r\n return result\r\n\r\ndef advance(objs,dobjs,dt):\r\n for obj,d in zip(objs,dobjs):\r\n obj.x += d.x*dt\r\n obj.y += d.y*dt\r\n obj.vx += d.vx*dt\r\n obj.vy += d.vy*dt\r\n\r\nclass Simulator(object):\r\n def __init__(self,state,futureStates):\r\n self.objects = stateToObjects(state)\r\n self.state = deepcopy(state)\r\n self.time = state.time\r\n \r\n #calcForces(self.objects)\r\n nextState = futureStates[0]\r\n d = deriv(self.objects)\r\n for i,obj in enumerate(self.objects):\r\n obj.vx = nextState.objects[i][0]-0.5*d[i].vx-state.objects[i][0]\r\n obj.vy = nextState.objects[i][1]-0.5*d[i].vy-state.objects[i][1]\r\n \r\n # it doesn't work as expected \r\n #self.estimateSpeed([state]+futureStates)\r\n\r\n def estimateSpeed(self,states):\r\n objs = deepcopy(self.objects)\r\n for obj in objs:\r\n obj.vx = 0\r\n obj.vy = 0\r\n for j in range(len(states)-1):\r\n d = deriv(stateToObjects(states[j]))\r\n for i in range(len(objs)):\r\n objs[i].x += 0.5*d[i].vx\r\n objs[i].y += 0.5*d[i].vy\r\n objs[i].x += objs[i].vx\r\n objs[i].y += objs[i].vy\r\n objs[i].vx += d[i].vx\r\n objs[i].vy += d[i].vy\r\n j += 1\r\n \r\n for i in range(len(objs)):\r\n self.objects[i].vx = (states[j].objects[i][0]-objs[i].x)/j\r\n self.objects[i].vy = (states[j].objects[i][1]-objs[i].y)/j\r\n return\r\n \r\n def simulate(self,dt):\r\n d = deriv(self.objects)\r\n #advance(self.objects,d,dt)\r\n for obj,dobj in zip(self.objects,d):\r\n obj.x += (obj.vx+0.5*dobj.vx*dt)*dt\r\n obj.y += (obj.vy+0.5*dobj.vy*dt)*dt\r\n obj.vx += dobj.vx*dt\r\n obj.vy += dobj.vy*dt\r\n self.time += dt\r\n self.updateState()\r\n \r\n def rungeKutta(self,dt):\r\n y0 = deepcopy(self.objects)\r\n k1 = deriv(y0)\r\n y1 = deepcopy(y0)\r\n advance(y1,k1,0.5*dt)\r\n k2 = deriv(y1)\r\n y2 = deepcopy(y0)\r\n advance(y2,k2,0.5*dt)\r\n k3 = deriv(y2)\r\n y3 = deepcopy(y0)\r\n advance(y3,k3,dt)\r\n k4 = deriv(y3)\r\n advance(self.objects,k1,1.0/6*dt)\r\n advance(self.objects,k2,2.0/6*dt)\r\n advance(self.objects,k3,2.0/6*dt)\r\n advance(self.objects,k4,1.0/6*dt)\r\n self.time += dt\r\n self.updateState()\r\n \r\n \r\n def updateState(self):\r\n self.state.time = self.time\r\n for i,obj in enumerate(self.objects):\r\n self.state.objects[i] = obj.x,obj.y\r\n","repo_name":"Vlad-Shcherbina/icfpc2009-tbd","sub_path":"pyvis/src/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15860597771","text":"import torch\nfrom torch.autograd import Function\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom itertools import product\nfrom networkx.drawing.nx_agraph import to_agraph\n\n\ndef graph_from_matrix(adjacency_matrix, show_isolated_nodes=True, weighted=True, self_loops=False):\n # Size of nodes may be misleading since the plots do not show loops,\n # i.e. edges (a, a) for node a.\n if isinstance(adjacency_matrix, torch.Tensor):\n adjacency_matrix = adjacency_matrix.numpy()\n if not self_loops:\n np.fill_diagonal(adjacency_matrix, 0)\n max_edge_weight = 10\n min_edge_weight = 0.1\n max_node_weight = 100\n fig = plt.figure(1)\n rows, cols = np.where(adjacency_matrix != 0) # now rows == cols\n edges = zip(rows.tolist(), cols.tolist())\n if weighted:\n try:\n edge_weights = adjacency_matrix.reshape(-1)\n edge_weights = max_edge_weight * edge_weights[edge_weights != 0] / max(edge_weights) + min_edge_weight\n except Exception as e:\n print(f'Exception {e}: setting weighted=False')\n weighted = False\n if show_isolated_nodes:\n if weighted:\n try:\n weights = [float(x) for x in np.sum(adjacency_matrix, axis=1)]\n max_weight = max(weights)\n weights = [x * max_node_weight / max_weight for x in weights]\n except Exception as e:\n print(f'Exception {e}: setting weighted=False')\n weighted = False\n gr = nx.empty_graph(adjacency_matrix.shape[0])\n else:\n if weighted:\n try:\n weights = [float(x) for x in np.sum(adjacency_matrix, axis=0) if x != 0]\n max_weight = max(weights)\n weights = [x * max_node_weight / max_weight for x in weights]\n except Exception as e:\n print(f'Exception {e}: setting weighted=False')\n weighted = False\n gr = nx.Graph()\n gr.add_edges_from(edges)\n pos = {label: np.array((pos1, pos2)) for label, (pos1, pos2) in zip(range(64), product(range(8), range(8)))}\n pos = nx.circular_layout(gr)\n if weighted:\n nx.draw_networkx_nodes(gr, pos=pos, node_size=weights)\n nx.draw_networkx_edges(gr, pos=pos, width=edge_weights)\n else:\n nx.draw_networkx_nodes(gr, pos=pos)\n nx.draw_networkx_edges(gr, pos=pos)\n\n return fig\n\n\nclass VectorQuantization(Function):\n @staticmethod\n def forward(ctx, inputs, codebook):\n with torch.no_grad():\n embedding_size = codebook.size(1)\n inputs_size = inputs.size()\n inputs_flatten = inputs.view(-1, embedding_size)\n\n codebook_sqr = torch.sum(codebook ** 2, dim=1)\n inputs_sqr = torch.sum(inputs_flatten ** 2, dim=1, keepdim=True)\n\n # Compute the distances to the codebook\n distances = torch.addmm(codebook_sqr + inputs_sqr,\n inputs_flatten, codebook.t(), alpha=-2.0, beta=1.0)\n\n _, indices_flatten = torch.min(distances, dim=1)\n indices = indices_flatten.view(*inputs_size[:-1])\n ctx.mark_non_differentiable(indices)\n\n return indices\n\n @staticmethod\n def backward(ctx, grad_output):\n raise RuntimeError('Trying to call `.grad()` on graph containing '\n '`VectorQuantization`. The function `VectorQuantization` '\n 'is not differentiable. Use `VectorQuantizationStraightThrough` '\n 'if you want a straight-through estimator of the gradient.')\n\n\nclass VectorQuantizationStraightThrough(Function):\n @staticmethod\n def forward(ctx, inputs, codebook):\n indices = vq(inputs, codebook) # [B, T]\n indices_flatten = indices.view(-1) # [B*T*D]\n ctx.save_for_backward(indices_flatten, codebook)\n ctx.mark_non_differentiable(indices_flatten)\n\n codes_flatten = torch.index_select(codebook, dim=0,\n index=indices_flatten)\n codes = codes_flatten.view_as(inputs) # [B,T,D]\n\n return (codes, indices_flatten)\n\n @staticmethod\n def backward(ctx, grad_output, grad_indices):\n grad_inputs, grad_codebook = None, None\n\n if ctx.needs_input_grad[0]:\n # Straight-through estimator\n grad_inputs = grad_output.clone()\n if ctx.needs_input_grad[1]:\n # Gradient wrt. the codebook\n indices, codebook = ctx.saved_tensors\n embedding_size = codebook.size(1)\n\n # map the outputs of grad to the respective embedding vectors\n # grad_codebook: size of codebook, has all the summed grads\n # grad_output gives a list of grad vectors (dim like codebook),\n # but no indication of which code they originally belong to\n # use indices to to map the grads to the correct positions in codebook\n # indices.shape [B*T]\n # grad_output_flatten.shape [B*T, D]\n\n grad_output_flatten = (grad_output.contiguous()\n .view(-1, embedding_size))\n grad_codebook = torch.zeros_like(codebook)\n grad_codebook.index_add_(0, indices, grad_output_flatten)\n\n return (grad_inputs, grad_codebook)\n\n\nclass SampledVectorQuantizationStraightThrough(Function):\n\n @staticmethod\n def forward(ctx, inputs, codebook, m):\n\n B, T, D = inputs.shape\n K = codebook.shape[0] # ?\n e = codebook.view(1, K, D).repeat(B * T, 1, 1) # [B*T, K, D]\n z = inputs.view(B * T, 1, D).repeat(1, K, 1) # [B*T, K, D]\n if m is None:\n m = B * T\n probs = torch.norm(z - e, dim=2) # [B*T, K]\n indices = torch.distributions.Multinomial(m, probs).sample() # [B*T, K]\n # indices_sampled = SampleMultinomial(m, probs).sample() # [B*T, m]\n # indices_sampled.sum(axis=1) = m [B*T]\n # for each encoded symbol we get a K-dim vector of sampled counts\n\n m = indices.sum(axis=1)[0]\n # indices /= m\n ctx.save_for_backward(indices, codebook)\n ctx.mark_non_differentiable(indices)\n\n codes_flatten = torch.matmul(indices, codebook) / m # [B*T]\n\n codes = codes_flatten.view_as(inputs) # [B,T,D]\n\n return (codes, indices)\n\n @staticmethod\n def backward(ctx, grad_output, grad_indices):\n grad_inputs, grad_codebook = None, None\n if ctx.needs_input_grad[0]:\n # Straight-through estimator\n grad_inputs = grad_output.clone()\n\n if ctx.needs_input_grad[1]:\n # Gradient wrt. the codebook\n indices, codebook = ctx.saved_tensors # [B, K], [K, D]\n B, K = indices.shape\n m = indices.sum(axis=1)[0]\n D = codebook.size(1)\n indices = indices.view(B, 1, K).repeat(1, D, 1) / m\n # !! torch.matmul(indices, embedding) ?\n grad_output_flatten = (grad_output.contiguous()\n .view(B, D))\n # loop over entire batch. mutliply grads output with corresponding\n # indices to remap it to the codebook entries that it was created from\n grad_codebook = (grad_output_flatten.view(B, 1, D) * indices.permute(0, 2, 1)).sum(dim=0)\n\n return (grad_inputs, grad_codebook, None)\n\n\nsvq_st = SampledVectorQuantizationStraightThrough.apply\nvq = VectorQuantization.apply\nvq_st = VectorQuantizationStraightThrough.apply\n__all__ = [vq, vq_st]\n\n\ndef clip_grad_norm(parameters, optimizer: dict) -> None:\n if optimizer['grad_norm'] is not None:\n torch.nn.utils.clip_grad_norm_(parameters, optimizer['grad_norm'])\n","repo_name":"ramsesjsf/HiddenSchemaNetworks","sub_path":"src/hiddenschemanetworks/models/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"21450770259","text":"#!/usr/bin/env python3\n\n# This program uses the hexdump module, provided as a separate python file.\n\nimport hexdump\nimport socket\nimport struct\nimport sys\n\n\ndef parse_udp(segment):\n \"\"\"\n Parse a UDP segment (including header).\n\n This function will parse a UDP segment and produce the following values:\n `src_port`, `dst_port`, `udp_length`, `checksum`, `data_length`, `payload`\n\n These values are:\n src_port: source port parsed from the UDP header\n dst_port: destination port parsed from the UDP header\n udp_length: length field parsed from the UDP header\n checksum: checksum field parsed from the UDP header\n\n data_length: length of the payload, computed from udp_length\n payload: actual payload of the segment, without the header\n \"\"\"\n\n # Stub implementation returning bogus values to produce example output\n # while implementing. Remove this line before submitting and test your\n # work without it.\n\n # Your code extracting & computing the correct values goes here.\n header_length = 8\n header = segment[:header_length]\n src_port, dst_port, udp_length, checksum = struct.unpack('>4H', header[:8])\n \n payload = segment[header_length:udp_length]\n data_length = len(payload)\n\n return src_port, dst_port, udp_length, checksum, data_length, payload\n\n\ndef main():\n # Create the socket here.\n s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_UDP)\n s.bind(('',65535))\n \n while True:\n datagram, _ = s.recvfrom(65535)\n segment = extract_segment(datagram)\n dump_udp_to_console(segment)\n\n\ndef dump_udp_to_console(segment):\n (udp_src_port, udp_dst_port, udp_length, udp_checksum,\n udp_data_length, udp_payload) = parse_udp(segment)\n\n print(\"Full segment\")\n hexdump.hexdump(segment)\n\n print(\"\"\"\\nUDP header:\n Src port: {}\n Dst port: {}\n UDP length: {}\n Checksum: 0x{:04X}\n\nData length: {}\"\"\".format(udp_src_port, udp_dst_port,\n udp_length, udp_checksum,\n udp_data_length))\n print(\"Data:\")\n hexdump.hexdump(udp_payload)\n print(\"\\n\\n\")\n\n # Rudimentary testcases. Can't really check anything else without giving\n # the exercise away.\n if len(segment) != udp_length:\n print(\"Your parser is not retrieving the UDP length field correctly\",\n file=sys.stderr)\n if len(udp_payload) != udp_data_length:\n print(\"Your parser is miscomputing the data length or not extracting the payload correctly\",\n file=sys.stderr)\n\n\ndef extract_segment(datagram):\n header_length_in_bytes = (datagram[0] & 0x0F) * 4\n segment = datagram[header_length_in_bytes:]\n return segment\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Seraaphonano/Hello-world","sub_path":"netsec-assignment3-s1041423-s1032019/exercise2/udp_parser_start.py","file_name":"udp_parser_start.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70826063690","text":"\"\"\" Helper functions for the `run` command. \"\"\"\n\nimport importlib\nimport inspect\n\nimport dask\nimport edo\n\n\ndef get_default_optimiser_arguments():\n \"\"\" Get the default arguments from `edo.DataOptimiser`. \"\"\"\n\n signature = inspect.signature(edo.DataOptimiser)\n defaults = {\n k: v.default\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty\n }\n\n defaults[\"root\"] = None\n defaults[\"processes\"] = None\n defaults[\"fitness_kwargs\"] = None\n defaults[\"stop_kwargs\"] = None\n defaults[\"dwindle_kwargs\"] = None\n\n return defaults\n\n\ndef get_experiment_parameters(experiment):\n \"\"\" Get the parameters for the experiment. \"\"\"\n\n spec = importlib.util.spec_from_file_location(\"__main__\", experiment)\n experiment = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(experiment)\n\n module = {k.lower(): v for k, v in vars(experiment).items()}\n\n all_params = set(inspect.getfullargspec(edo.DataOptimiser).args) | set(\n inspect.getfullargspec(edo.DataOptimiser.run).args\n )\n\n module_params = {k: v for k, v in module.items() if k in all_params}\n\n module_params[\"families\"] = [\n edo.Family(dist) for dist in module[\"distributions\"]\n ]\n module_params[\"optimiser\"] = module.get(\"optimiser\", edo.DataOptimiser)\n\n params = get_default_optimiser_arguments()\n params.update(module_params)\n\n return params\n\n\n@dask.delayed\ndef run_single_trial(experiment, root, seed):\n \"\"\" Lazily run a single trial of an experiment. \"\"\"\n\n params = get_experiment_parameters(experiment)\n\n _ = params.pop(\"root\")\n optimiser = params.pop(\"optimiser\")\n processes = params.pop(\"processes\")\n fitness_kwargs = params.pop(\"fitness_kwargs\")\n stop_kwargs = params.pop(\"stop_kwargs\")\n dwindle_kwargs = params.pop(\"dwindle_kwargs\")\n\n opt = optimiser(**params)\n _ = opt.run(\n root=root / str(seed),\n processes=processes,\n random_state=seed,\n fitness_kwargs=fitness_kwargs,\n stop_kwargs=stop_kwargs,\n dwindle_kwargs=dwindle_kwargs,\n )\n","repo_name":"daffidwilde/edolab","sub_path":"src/edolab/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5274538552","text":"# class BST:\n# def __init__(self, value):\n# self.value = value\n# self.left = None\n# self.right = None\n\n# def insert(self, value):\n# currentNode = self\n# while True:\n# if value < currentNode.value:\n# if currentNode.left is None:\n# currentNode.left = BST(value)\n# break\n# else:\n# currentNode = currentNode.left\n# else:\n# if currentNode.right is None:\n# currentNode.right = BST(value)\n# break\n# else:\n# currentNode = currentNode.right\n# return self\n\n# def contains(self, value):\n# currentNode = self\n# while currentNode is not None:\n# if value < currentNode.value:\n# currentNode = currentNode.left\n# elif value > currentNode.value:\n# currentNode = currentNode.right\n# else:\n# return True\n# return False\n\n# def remove(self, value, parentNode = None):\n# currentNode = self\n# while currentNode is not None:\n# if value < currentNode.value:\n# parentNode = currentNode\n# currentNode = currentNode.left\n# elif value > currentNode.value:\n# parentNode = currentNode\n# currentNode = currentNode.right\n# else:\n# if currentNode.left is not None and currentNode.right is not None:\n# currentNode.value = currentNode.right.getMinValue()\n# currentNode.right.remove(currentNode.value, currentNode)\n# elif parentNode is None:\n# if currentNode.left is not None:\n# currentNode.value = currentNode.left.value\n# currentNode.right = currentNode.left.right\n# currentNode.left = currentNode.left.left\n# elif currentNode.right is not None:\n# currentNode.value = currentNode.right.value\n# currentNode.left = currentNode.right.left\n# currentNode.right = currentNode.right.right\n# else:\n# currentNode.value = None\n# elif parentNode.left == currentNode:\n# parentNode.left = currentNode.left if currentNode.left is not None else currentNode.right\n# elif parentNode.right == currentNode:\n# parentNode.right = currentNode.left if currentNode.left is not None else currentNode.right\n# break\n# return self if currentNode is not None else None\n\n# def getMinValue(self):\n# currentNode = self\n# while currentNode.left is not None:\n# currentNode = currentNode.left\n# return currentNode.value\n\n# Do not edit the class below except for\n# the insert, contains, and remove methods.\n# Feel free to add new properties and methods\n# to the class.\nclass BST:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def insert(self, value):\n # Write your code here.\n # Do not edit the return statement of this method.\n if value < self.value:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BST(value)\n else:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BST(value)\n \n return self\n\n def contains(self, value):\n # Write your code here.\n if value < self.value:\n if self.left:\n return self.left.contains(value)\n return False\n elif value > self.value:\n if self.right:\n return self.right.contains(value)\n return False\n return True\n pass\n\n def remove(self, value):\n if not self:\n return self\n\n if value < self.value:\n if self.left:\n self.left = self.left.remove(value)\n elif value > self.value:\n if self.right:\n self.right = self.right.remove(value)\n else:\n if not self.right and not self.left:\n return None\n elif not self.left:\n self.value = self.right.value\n self.left = self.right.left\n self.right = self.right.right\n elif not self.right:\n self.value = self.left.value\n self.right = self.left.right\n self.left = self.left.left\n return self\n\n else:\n successor = self.right.inOrderSuccessor()\n self.value = successor.value\n self.right = self.right.remove(successor.value)\n \n return self\n\n def inOrderSuccessor(self):\n while self.left:\n self = self.left\n return self","repo_name":"rep-pierce/algorithms","sub_path":"BST_contstruction.py","file_name":"BST_contstruction.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39965626090","text":"__author__ = 'marvinler'\n\nimport argparse\n\nimport numpy as np\nimport sklearn.metrics as metrics\nimport torch\nimport torch.utils.data\n\nfrom dataset import Dataset\nfrom model import instantiate_sparseconvmil\n\n\ndef _define_args():\n parser = argparse.ArgumentParser(description='SparseConvMIL: Sparse Convolutional Context-Aware Multiple Instance '\n 'Learning for Whole Slide Image Classification')\n\n parser.add_argument('--slide-parent-folder', type=str, default='sample_data', metavar='PATH',\n help='path of parent folder containing preprocessed slides data')\n parser.add_argument('--slide-labels-filepath', type=str, default='sample_data/labels.csv', metavar='PATH',\n help='path of CSV-file containing slide labels')\n\n parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of training epochs')\n parser.add_argument('--lr', type=float, default=2e-3, metavar='LR', help='learning rate')\n parser.add_argument('--reg', type=float, default=1e-6, metavar='R', help='weight decay')\n\n # Model parameters\n parser.add_argument('--tile-embedder', type=str, default='resnet18', metavar='MODEL', nargs='*',\n help='type of resnet architecture for the tile embedder')\n parser.add_argument('--tile-embedder-pretrained', action='store_true', default=False,\n help='use Imagenet-pretrained tile embedder architecture')\n parser.add_argument('--sparse-conv-n-channels-conv1', type=int, default=32,\n help='number of channels of first convolution of the sparse-input CNN pooling')\n parser.add_argument('--sparse-conv-n-channels-conv2', type=int, default=32,\n help='number of channels of first convolution of the sparse-input CNN pooling')\n parser.add_argument('--sparse-map-downsample', type=int, default=10, help='downsampling factor of the sparse map')\n parser.add_argument('--wsi-embedding-classifier-n-inner-neurons', type=int, default=32,\n help='number of inner neurons for the WSI embedding classifier')\n\n # Dataset parameters\n parser.add_argument('--batch-size', type=int, default=2, metavar='SIZE',\n help='number of slides sampled per iteration')\n parser.add_argument('--n-tiles-per-wsi', type=int, default=5, metavar='SIZE',\n help='number of tiles to be sampled per WSI')\n\n # Miscellaneous parameters\n parser.add_argument('--j', type=int, default=10, metavar='N_WORKERS', help='number of workers for dataloader')\n\n args = parser.parse_args()\n hyper_parameters = {\n 'slide_parent_folder': args.slide_parent_folder,\n 'slide_labels_filepath': args.slide_labels_filepath,\n 'epochs': args.epochs,\n 'lr': args.lr,\n 'reg': args.reg,\n 'tile_embedder': args.tile_embedder,\n 'tile_embedder_pretrained': args.tile_embedder_pretrained,\n 'sparse_conv_n_channels_conv1': args.sparse_conv_n_channels_conv1,\n 'sparse_conv_n_channels_conv2': args.sparse_conv_n_channels_conv2,\n 'sparse_map_downsample': args.sparse_map_downsample,\n 'wsi_embedding_classifier_n_inner_neurons': args.wsi_embedding_classifier_n_inner_neurons,\n 'batch_size': args.batch_size,\n 'n_tiles_per_wsi': args.n_tiles_per_wsi,\n 'j': args.j,\n }\n\n return hyper_parameters\n\n\ndef get_dataloader(dataset, batch_size, shuffle, num_workers):\n return torch.utils.data.DataLoader(dataset, batch_size, shuffle, num_workers=num_workers)\n\n\ndef perform_epoch(mil_model, dataloader, optimizer, loss_function):\n \"\"\"\n Perform a complete training epoch by looping through all data of the dataloader.\n :param mil_model: MIL model to be trained\n :param dataloader: loader of the dataset\n :param optimizer: pytorch optimizer\n :param loss_function: loss function to compute gradients\n :return: (mean of losses, balanced accuracy)\n \"\"\"\n proba_predictions = []\n ground_truths = []\n losses = []\n\n for data, locations, slides_labels, slides_ids in dataloader:\n data = data.cuda()\n locations = locations.cuda()\n slides_labels_cuda = slides_labels.cuda()\n\n optimizer.zero_grad()\n predictions = mil_model(data, locations)\n\n loss = loss_function(predictions, slides_labels_cuda)\n loss.backward()\n optimizer.step()\n\n # Store data for finale epoch average measures\n losses.append(loss.detach().cpu().numpy())\n proba_predictions.extend(predictions.detach().cpu().numpy())\n ground_truths.extend(slides_labels.numpy())\n\n predicted_classes = np.argmax(proba_predictions, axis=1)\n return np.mean(losses), metrics.balanced_accuracy_score(ground_truths, predicted_classes)\n\n\ndef main(hyper_parameters):\n # Loads dataset and dataloader\n print('Loading data')\n dataset = Dataset(hyper_parameters['slide_parent_folder'], hyper_parameters['slide_labels_filepath'],\n hyper_parameters['n_tiles_per_wsi'])\n n_classes = dataset.n_classes\n dataloader = get_dataloader(dataset, hyper_parameters['batch_size'], True, hyper_parameters['j'])\n print(' done')\n\n # Loads MIL model, optimizer and loss function\n print('Loading SparseConvMIL model')\n sparseconvmil_model = instantiate_sparseconvmil(hyper_parameters['tile_embedder'],\n hyper_parameters['tile_embedder_pretrained'],\n hyper_parameters['sparse_conv_n_channels_conv1'],\n hyper_parameters['sparse_conv_n_channels_conv2'],\n 3, 3, hyper_parameters['sparse_map_downsample'],\n hyper_parameters['wsi_embedding_classifier_n_inner_neurons'],\n n_classes)\n sparseconvmil_model = torch.nn.DataParallel(sparseconvmil_model)\n print(' done')\n optimizer = torch.optim.Adam(sparseconvmil_model.parameters(), hyper_parameters['lr'],\n weight_decay=hyper_parameters['reg'])\n loss_function = torch.nn.CrossEntropyLoss()\n\n # Loop through all epochs\n print('Starting training...')\n for epoch in range(hyper_parameters[\"epochs\"]):\n loss, bac = perform_epoch(sparseconvmil_model, dataloader, optimizer, loss_function)\n print('Epoch', f'{epoch:3d}/{hyper_parameters[\"epochs\"]}', f' loss={loss:.3f}', f' bac={bac:.3f}')\n print(' done')\n\nif __name__ == '__main__':\n main(_define_args())\n","repo_name":"MarvinLer/SparseConvMIL","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"16"} +{"seq_id":"15698571188","text":"from __future__ import print_function\n\ndef main():\n #static teks\n statteks = \"bandung jakarta medan bandung sumatera medan kalimantan medan papua aceh timika kediri kediri\"\n #spit teks\n teksSplit = statteks.split(\" \")\n #cetak teks yang telah di split\n print(\"split: \", teksSplit)\n #inisiasi array teks unik python\n teksunik = []\n #for loop python\n for a in teksSplit:\n if a not in teksunik:\n teksunik.append(a)\n \n print(\"teksunik: \", teksunik)\n\n #array untuk menghitung jumlah teks didalam kalimat\n tekscount = []\n for b in teksunik:\n hitung = 0\n for c in teksSplit:\n if b == c:\n hitung += 1\n tekscount.append(hitung)\n print(tekscount)\n\nif __name__ == \"__main__\":\n main()","repo_name":"dhanyn10/python","sub_path":"simple-bow.py","file_name":"simple-bow.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73409548167","text":"import os\nimport math\nimport torch\nimport random\nimport pathlib\nfrom pathlib import Path\nfrom argparse import Namespace\n#-------------#\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, DistributedSampler\nfrom torch.distributed import is_initialized, get_rank, get_world_size\n#-------------#\nfrom s3prl.utility.helper import is_leader_process\nfrom .model import Model, AMSoftmaxLoss, AAMSoftmaxLoss, SoftmaxLoss, UtteranceExtractor\nfrom .dataset import SpeakerVerifi_train, SpeakerVerifi_test\nfrom .utils import EER\n\n\nclass DownstreamExpert(nn.Module):\n \"\"\"\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n \"\"\"\n\n def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):\n super(DownstreamExpert, self).__init__()\n # config\n self.upstream_dim = upstream_dim\n self.downstream = downstream_expert\n self.datarc = downstream_expert['datarc']\n self.modelrc = downstream_expert['modelrc']\n self.expdir = expdir\n\n # dataset\n train_file_path = Path(self.datarc['file_path']) / \"dev\" / \"wav\"\n test_file_path = Path(self.datarc['file_path']) / \"test\" / \"wav\"\n \n train_config = {\n \"vad_config\": self.datarc['vad_config'],\n \"file_path\": [train_file_path],\n \"key_list\": [\"Voxceleb1\"],\n \"meta_data\": self.datarc['train_meta_data'],\n \"max_timestep\": self.datarc[\"max_timestep\"],\n }\n self.train_dataset = SpeakerVerifi_train(**train_config)\n\n dev_config = {\n \"vad_config\": self.datarc['vad_config'],\n \"file_path\": train_file_path, \n \"meta_data\": self.datarc['dev_meta_data']\n } \n self.dev_dataset = SpeakerVerifi_test(**dev_config)\n\n test_config = {\n \"vad_config\": self.datarc['vad_config'],\n \"file_path\": test_file_path, \n \"meta_data\": self.datarc['test_meta_data']\n }\n self.test_dataset = SpeakerVerifi_test(**test_config)\n\n # module\n self.connector = nn.Linear(self.upstream_dim, self.modelrc['input_dim'])\n\n # downstream model\n agg_dim = self.modelrc[\"module_config\"][self.modelrc['module']].get(\n \"agg_dim\",\n self.modelrc['input_dim']\n )\n \n ModelConfig = {\n \"input_dim\": self.modelrc['input_dim'],\n \"agg_dim\": agg_dim,\n \"agg_module_name\": self.modelrc['agg_module'],\n \"module_name\": self.modelrc['module'], \n \"hparams\": self.modelrc[\"module_config\"][self.modelrc['module']],\n \"utterance_module_name\": self.modelrc[\"utter_module\"]\n }\n # downstream model extractor include aggregation module\n self.model = Model(**ModelConfig)\n\n\n # SoftmaxLoss or AMSoftmaxLoss\n objective_config = {\n \"speaker_num\": self.train_dataset.speaker_num, \n \"hidden_dim\": self.modelrc['input_dim'], \n **self.modelrc['LossConfig'][self.modelrc['ObjectiveLoss']]\n }\n\n self.objective = eval(self.modelrc['ObjectiveLoss'])(**objective_config)\n # utils\n self.score_fn = nn.CosineSimilarity(dim=-1)\n self.eval_metric = EER\n self.register_buffer('best_score', torch.ones(1) * 100)\n\n # Interface\n def get_dataloader(self, mode):\n \"\"\"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n \"\"\"\n\n if mode == 'train':\n return self._get_train_dataloader(self.train_dataset) \n elif mode == 'dev':\n return self._get_eval_dataloader(self.dev_dataset)\n elif mode == 'test':\n return self._get_eval_dataloader(self.test_dataset)\n\n def _get_train_dataloader(self, dataset):\n sampler = DistributedSampler(dataset) if is_initialized() else None\n return DataLoader(\n dataset,\n batch_size=self.datarc['train_batch_size'], \n shuffle=(sampler is None),\n sampler=sampler,\n num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def _get_eval_dataloader(self, dataset):\n return DataLoader(\n dataset, batch_size=self.datarc['eval_batch_size'],\n shuffle=False, num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n # Interface\n def get_train_dataloader(self):\n return self._get_train_dataloader(self.train_dataset)\n\n # Interface\n def get_dev_dataloader(self):\n return self._get_eval_dataloader(self.dev_dataset)\n\n # Interface\n def get_test_dataloader(self):\n return self._get_eval_dataloader(self.test_dataset)\n\n # Interface\n def forward(self, mode, features, utter_idx, labels=None, records=None, **kwargs):\n \"\"\"\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the speaker labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use \"self.downstream/your_content_name\" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n \"\"\"\n\n features_pad = pad_sequence(features, batch_first=True)\n \n if self.modelrc['module'] == \"XVector\":\n # TDNN layers in XVector will decrease the total sequence length by fixed 14\n attention_mask = [torch.ones((feature.shape[0] - 14)) for feature in features]\n else:\n attention_mask = [torch.ones((feature.shape[0])) for feature in features]\n\n attention_mask_pad = pad_sequence(attention_mask,batch_first=True)\n attention_mask_pad = (1.0 - attention_mask_pad) * -100000.0\n\n features_pad = self.connector(features_pad)\n\n if mode == 'train':\n agg_vec = self.model(features_pad, attention_mask_pad.cuda())\n labels = torch.LongTensor(labels).to(features_pad.device)\n loss = self.objective(agg_vec, labels)\n records['loss'].append(loss.item())\n return loss\n \n elif mode in ['dev', 'test']:\n agg_vec = self.model.inference(features_pad, attention_mask_pad.cuda())\n agg_vec = torch.nn.functional.normalize(agg_vec,dim=-1)\n utt_name = utter_idx\n \n for idx in range(len(agg_vec)):\n records[utt_name[idx]] = agg_vec[idx].cpu().detach()\n\n return torch.tensor(0)\n\n # interface\n def log_records(self, mode, records, logger, global_step, **kwargs):\n \"\"\"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n \"\"\"\n save_names = []\n\n if mode == 'train':\n loss = torch.FloatTensor(records['loss']).mean().item()\n logger.add_scalar(f'sv-voxceleb1/{mode}-loss', loss, global_step=global_step)\n print(f'sv-voxceleb1/{mode}-loss: {loss}')\n\n elif mode in ['dev', 'test']:\n trials = self.test_dataset.pair_table\n labels = []\n scores = []\n for label, name1, name2 in trials:\n labels.append(label)\n score = self.score_fn(records[name1], records[name2]).numpy()\n scores.append(score)\n eer, *others = self.eval_metric(np.array(labels, dtype=int), np.array(scores))\n logger.add_scalar(f'sv-voxceleb1/{mode}-EER', eer, global_step=global_step)\n print(f'sv-voxceleb1/{mode}-EER: {eer}')\n\n if eer < self.best_score and mode == 'dev':\n self.best_score = torch.ones(1) * eer\n save_names.append(f'{mode}-best.ckpt')\n\n with open(Path(self.expdir) / f\"{mode}_predict.txt\", \"w\") as file:\n line = [f\"{name} {score}\\n\" for name, score in zip(records[\"pair_names\"], records[\"scores\"])]\n file.writelines(line)\n\n with open(Path(self.expdir) / f\"{mode}_truth.txt\", \"w\") as file:\n line = [f\"{name} {score}\\n\" for name, score in zip(records[\"pair_names\"], records[\"labels\"])]\n file.writelines(line)\n\n return save_names\n\n def separate_data(self, agg_vec):\n assert len(agg_vec) % 2 == 0\n total_num = len(agg_vec) // 2\n feature1 = agg_vec[:total_num]\n feature2 = agg_vec[total_num:]\n return feature1, feature2\n","repo_name":"s3prl/s3prl","sub_path":"s3prl/downstream/sv_voxceleb1/expert.py","file_name":"expert.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","stars":1943,"dataset":"github-code","pt":"16"} +{"seq_id":"8541019006","text":"import urllib\nfrom io import StringIO\nfrom dna_features_viewer import BiopythonTranslator\n\n# DEFINE FEATURES ASPECTS\n\n\ndef features_properties(f):\n \"\"\"Mutations get a red label, other features get a pastel color.\"\"\"\n label = None\n if f.type == \"Mutagenesis\":\n label = f.qualifiers[\"Note\"][0]\n color = {\n \"Mutagenesis\": \"firebrick\",\n \"Active site\": \"yellow\",\n \"Beta strand\": \"lightyellow\",\n \"Chain\": \"lightcyan\",\n \"Helix\": \"honeydew\",\n \"Initiator methionine\": \"white\",\n \"Metal binding\": \"lightsteelblue\",\n \"Turn\": \"moccasin\",\n }.get(f.type, \"white\")\n return dict(color=color, label=label)\n\n\n# GET THE RECORD FROM UNIPROT\n\nresponse = urllib.request.urlopen(\"https://www.uniprot.org/uniprot/P0A7B8.gff\")\nrecord_file = StringIO(response.read().decode())\n\n# TRANSLATE AND PLOT THE RECORD\n\ntranslator = BiopythonTranslator(features_properties=features_properties)\ngraphic_record = translator.translate_record(record_file)\nax, _ = graphic_record.plot(\n figure_width=15, max_label_length=100, elevate_outline_annotations=True,\n)\nax.set_title(\"Mutation effects in P0A7B8\", fontweight=\"bold\", fontsize=16)\nax.figure.savefig(\"gff_record_from_the_web.png\", bbox_inches=\"tight\")\n","repo_name":"Edinburgh-Genome-Foundry/DnaFeaturesViewer","sub_path":"examples/gff_record_from_the_web.py","file_name":"gff_record_from_the_web.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":508,"dataset":"github-code","pt":"16"} +{"seq_id":"43374291686","text":"\"\"\"\nGiven a sorted list of integers, square the elements and give the output in sorted order.\nFor example, given [-9, -2, 0, 2, 3], return [0, 4, 4, 9, 81].\nsteps:\nfind the index of the lowest positive #.\nfrom there interate left and right comparing the results of both side and placing them in sorted order\n\"\"\"\nimport math\n\ndef square(x):\n return x * x\n\ndef solution(my_list):\n # find the lowest positive, we could use binary search too\n biggest_positive = float('inf') \n idx = 0 \n for i, val in enumerate(my_list):\n if val >= 0 and val < biggest_positive:\n biggest_positive = val\n idx = i\n print(idx)\n\n if idx == 0:\n return my_list\n\n new_list = []\n k = 0\n i = idx\n j = idx - 1\n nsquare = square(my_list[j])\n psquare = square(my_list[i])\n while k < len(my_list):\n\n print('j ==> ', j)\n print('i ==> ', i)\n\n\n if psquare < nsquare:\n new_list.append(psquare)\n i += 1\n\n elif nsquare < psquare:\n new_list.append(nsquare)\n j -= 1\n\n else:\n new_list.append(psquare)\n new_list.append(nsquare)\n i += 1\n j -= 1\n k += 1\n\n if i+1 >= len(my_list):\n #i = 1\n psquare = float('inf')\n else:\n nsquare = square(my_list[j])\n\n if j - 1 < 0:\n #j += 1\n nsquare = -1 * float('inf')\n else:\n psquare = square(my_list[i])\n\n k += 1\n\n return new_list\n\nprint(solution( [-9, -2, 0, 2, 3]))\n","repo_name":"uchenna-j-edeh/dailly_problems","sub_path":"arrays_manipulations_algorithms/square_outputs_sorted_order.py","file_name":"square_outputs_sorted_order.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5889166086","text":"import abc, contextlib, csv, datetime, more_itertools, re\nfrom ..utilities.dataclasses import MyDataClass\nfrom ..utilities.miscfileio import field_size_limit_context, rm_missing_ok\nfrom ..utilities.version.git import GitCommit, thisrepo\nfrom .astropath_logging import MyLogger, printlogger, ThingWithLogger\n\nclass MRODebuggingMetaClass(abc.ABCMeta):\n def __new__(cls, name, bases, dct, **kwargs):\n try:\n return super().__new__(cls, name, bases, dct, **kwargs)\n except TypeError as e:\n if \"Cannot create a consistent\" in str(e):\n logger = printlogger(\"mro\")\n logger.critical(\"========================\")\n logger.critical(f\"MROs of bases of {name}:\")\n for base in bases:\n logger.critical(\"------------------------\")\n for c in base.__mro__:\n logger.critical(c.__name__)\n logger.critical(\"************************\")\n logger.critical(\"filtered for the bad ones:\")\n for base in bases:\n bad = [c for c in base.__mro__ if re.search(rf\"\\b{c.__name__}\\b\", str(e))]\n if len(bad) < 2: continue\n logger.critical(\"------------------------\")\n logger.critical(base.__name__)\n for c in bad:\n logger.critical(c.__name__)\n logger.critical(\"========================\")\n raise\n\nclass ThingWithRoots(abc.ABC, metaclass=MRODebuggingMetaClass):\n @property\n def rootnames(self):\n return set()\n @property\n def rootkwargs(self):\n return {name: getattr(self, name) for name in self.rootnames}\n\nclass ThingWithWorkflowKwargs(abc.ABC, metaclass=MRODebuggingMetaClass):\n @property\n @abc.abstractmethod\n def workflowkwargs(self):\n return {}\n\nclass WorkflowDependency(ThingWithRoots, ThingWithLogger, ThingWithWorkflowKwargs):\n @property\n def workflowkwargs(self):\n return self.rootkwargs\n\n @classmethod\n @abc.abstractmethod\n def getoutputfiles(cls, **workflowkwargs):\n \"\"\"\n Output files that this step is supposed to produce\n \"\"\"\n return []\n\n @classmethod\n def getmissingoutputfiles(cls, **workflowkwargs):\n \"\"\"\n Output files that were supposed to be produced but are missing\n \"\"\"\n return [_ for _ in cls.getoutputfiles(**workflowkwargs) if not _.exists()]\n\n @property\n def outputfiles(self):\n \"\"\"\n Output files that this step is supposed to produce\n \"\"\"\n return self.getoutputfiles(**self.workflowkwargs)\n\n @property\n def missingoutputfiles(self):\n \"\"\"\n Output files that were supposed to be produced but are missing\n \"\"\"\n return self.getmissingoutputfiles(**self.workflowkwargs)\n\n @classmethod\n def getworkinprogressfiles(cls, **workflowkwargs):\n \"\"\"\n Files that are saved from one run to the next so that we can\n resume where we left off. These are removed by cleanup()\n \"\"\"\n return []\n\n @property\n def workinprogressfiles(self):\n \"\"\"\n Files that are saved from one run to the next so that we can\n resume where we left off. These are removed by cleanup()\n \"\"\"\n return list(self.getworkinprogressfiles(**self.workflowkwargs))\n\n def cleanup(self):\n printed = False\n canprint = False\n for filename in self.workinprogressfiles:\n canprint = True\n if not printed and filename.exists():\n if not self.uselogfiles:\n raise RuntimeError(\"Can't clean up if not using log files for tracking\")\n self.logger.info(\"Cleaning up files from previous runs\")\n printed = True\n rm_missing_ok(filename)\n if printed:\n self.logger.info(\"Finished cleaning up\")\n elif canprint:\n self.logger.info(\"Clean start\")\n\n @classmethod\n @abc.abstractmethod\n def getlogfile(cls, *, logroot, **workflowkwargs):\n pass\n @classmethod\n @abc.abstractmethod\n def usegloballogger(cls): pass\n\n @classmethod\n @abc.abstractmethod\n def logmodule(cls):\n \"name of the log files for this class (e.g. align)\"\n @classmethod\n @abc.abstractmethod\n def logstartregex(cls): pass\n @classmethod\n @abc.abstractmethod\n def logendregex(cls): pass\n\n @classmethod\n def getrunstatus(cls, *, SlideID, **workflowkwargs):\n workflowkwargs[\"SlideID\"] = SlideID\n return SampleRunStatus.fromlog(\n SlideID=SlideID,\n samplelog=cls.getlogfile(**workflowkwargs),\n module=cls.logmodule(),\n missingfiles=list(cls.getmissingoutputfiles(**workflowkwargs)),\n workinprogressfiles=list(cls.getworkinprogressfiles(**workflowkwargs)),\n startregex=cls.logstartregex(),\n endregex=cls.logendregex(),\n )\n\n def runstatus(self, **kwargs):\n \"\"\"\n returns a SampleRunStatus object that indicates whether\n the sample ran successfully or not, and information about\n the failure, if any.\n \"\"\"\n return self.getrunstatus(**self.workflowkwargs, **kwargs)\n\n @property\n def rootnames(self):\n return {\"logroot\", *super().rootnames}\n\n @property\n @abc.abstractmethod\n def logroot(self):\n pass\n\n @abc.abstractmethod\n def run(self):\n pass\n\n @abc.abstractmethod\n def joblock(self):\n pass\n\n @abc.abstractmethod\n def workflowdependencies(self, **kwargs):\n return []\n\nclass WorkflowDependencySlideID(WorkflowDependency):\n @property\n def workflowkwargs(self):\n return {\n **super().workflowkwargs,\n \"SlideID\": self.SlideID,\n }\n\n @property\n @abc.abstractmethod\n def SlideID(self): pass\n\n @classmethod\n def getlogfile(cls, *, SlideID, logroot, **otherworkflowkwargs):\n return logroot/SlideID/\"logfiles\"/f\"{SlideID}-{cls.logmodule()}.log\"\n\nclass ExternalDependency(WorkflowDependencySlideID):\n def __init__(self, SlideID, logroot):\n self.__SlideID = SlideID\n self.__logroot = logroot\n @property\n def SlideID(self): return self.__SlideID\n @property\n def logroot(self): return self.__logroot\n @classmethod\n def getoutputfiles(cls, SlideID, **workflowkwargs): return super().getoutputfiles(SlideID, **workflowkwargs)\n\ndef makeexternaldependency(name, startregex, endregex):\n \"\"\"\n for dependencies that don't run through this package\n \"\"\"\n class dependency(ExternalDependency):\n @classmethod\n def logmodule(cls): return name\n @classmethod\n def logstartregex(cls): return startregex\n @classmethod\n def logendregex(cls): return endregex\n dependency.__name__ = name\n return dependency\n\nShredXML = makeexternaldependency(\"ShredXML\", \"shredxml started\", \"shredxml finished\")\n\nclass SampleRunStatus(MyDataClass):\n \"\"\"\n Stores information about if a sample ran successfully.\n started: did it start running?\n ended: did it finish running?\n error: error traceback as a string, if any\n previousrun: SampleRunStatus for the previous run of this sample, if any\n missingfiles: files that are supposed to be in the output, but are missing\n gitcommit: the commit at which it was run\n localedits: were there local edits when it was run?\n lastattemptedcleanup: SampleRunStatus for the last time this sample tried\n to clean up (whether it succeeded or not)\n lastcleanstart: SampleRunStatus for the last time this sample started fresh\n (either the first run in the log or the last time cleanup()\n was run)\n \"\"\"\n module: str\n SlideID: str\n started: datetime.datetime\n ended: datetime.datetime\n error: str\n previousrun: \"SampleRunStatus\"\n missingfiles: list\n gitcommit: GitCommit\n localedits: bool\n lastattemptedcleanup: \"SampleRunStatus\"\n lastcleanstart: \"SampleRunStatus\"\n\n def __post_init__(self):\n if self.lastattemptedcleanup is None: self.lastattemptedcleanup = self\n if self.lastcleanstart is None: self.lastcleanstart = self\n def __bool__(self):\n \"\"\"\n True if the sample started and ended with no error and all output files are present\n \"\"\"\n return bool(self.started and self.ended and self.error is None and not self.missingfiles)\n @property\n def nruns(self):\n \"\"\"\n How many times has this sample been run?\n \"\"\"\n if self.previousrun is None:\n return 1 if self.started else 0\n return self.previousrun.nruns + 1\n\n @classmethod\n def fromlog(cls, **kwargs):\n try:\n return cls._fromlog(**kwargs)\n except Exception as e:\n return e\n\n @classmethod\n def _fromlog(cls, *, samplelog, SlideID, module, missingfiles, workinprogressfiles, startregex, endregex):\n \"\"\"\n Create a SampleRunStatus object by reading the log file.\n samplelog: from CohortFolder/SlideID/logfiles/SlideID-module.log\n (not CohortFolder/logfiles/module.log)\n module: the module being run\n \"\"\"\n result = None\n started = None\n ended = None\n previousrun = None\n error = None\n gitcommit = None\n localedits = False\n lastattemptedcleanup = None\n lastcleanstart = None\n with contextlib.ExitStack() as stack:\n stack.enter_context(field_size_limit_context(100000000))\n try:\n f = stack.enter_context(open(samplelog))\n except IOError:\n return cls(started=None, ended=None, missingfiles=missingfiles, module=module, gitcommit=None, lastattemptedcleanup=None, lastcleanstart=None, localedits=False, error=None, previousrun=None, SlideID=SlideID)\n else:\n reader = more_itertools.peekable(csv.DictReader(f, fieldnames=(\"Project\", \"Cohort\", \"SlideID\", \"message\", \"time\"), delimiter=\";\"))\n for row in reader:\n if row[\"SlideID\"] != SlideID:\n continue\n elif not row[\"message\"]:\n continue\n else:\n startmatch = re.match(startregex, row[\"message\"])\n endmatch = re.match(endregex, row[\"message\"])\n\n if startmatch:\n if started is not None:\n result = cls(started=started, ended=ended, error=error, previousrun=previousrun, missingfiles=missingfiles, module=module, gitcommit=gitcommit, localedits=localedits, lastattemptedcleanup=lastattemptedcleanup, lastcleanstart=lastcleanstart, SlideID=SlideID)\n\n started = datetime.datetime.strptime(row[\"time\"], MyLogger.dateformat)\n error = None\n ended = None\n previousrun = result\n result = None\n if previousrun is not None:\n lastcleanstart = previousrun.lastcleanstart\n lastattemptedcleanup = previousrun.lastattemptedcleanup\n if not any(_.exists() for _ in workinprogressfiles):\n lastcleanstart = lastattemptedcleanup = None\n\n try:\n gitcommit = startmatch.group(\"commit\")\n if gitcommit is None: gitcommit = \"v\"+startmatch.group(\"version\")\n if gitcommit is not None: gitcommit = thisrepo.getcommit(gitcommit)\n localedits = bool(startmatch.group(\"date\"))\n except ValueError:\n gitcommit = None\n\n elif row[\"message\"].startswith(\"ERROR:\"):\n error = reader.peek(default={\"message\": \"\"})[\"message\"]\n if error and error[0] == \"[\" and error[-1] == \"]\":\n error = \"\".join(eval(error))\n else:\n error = row[\"message\"]\n elif \"Cleaning up files\" in row[\"message\"]:\n lastattemptedcleanup = None\n elif \"Finished cleaning up\" in row[\"message\"]:\n lastcleanstart = None #gets assigned to self in __post_init__\n elif \"Clean start\" in row[\"message\"]:\n lastattemptedcleanup = lastcleanstart = None\n elif endmatch:\n ended = datetime.datetime.strptime(row[\"time\"], MyLogger.dateformat)\n result = cls(started=started, ended=ended, error=error, previousrun=previousrun, missingfiles=missingfiles, module=module, gitcommit=gitcommit, localedits=localedits, lastattemptedcleanup=lastattemptedcleanup, lastcleanstart=lastcleanstart, SlideID=SlideID)\n started = None\n if result is None:\n result = cls(started=started, ended=ended, error=error, previousrun=previousrun, missingfiles=missingfiles, module=module, gitcommit=gitcommit, localedits=localedits, lastattemptedcleanup=lastattemptedcleanup, lastcleanstart=lastcleanstart, SlideID=SlideID)\n return result\n\n def __str__(self):\n if self: return f\"ran successfully in {self.ended - self.started}\"\n if not self.started:\n return \"did not run\"\n elif self.error is not None:\n return \"gave an error:\\n\\n\"+self.error\n elif not self.ended:\n return \"started, but did not end\"\n elif self.missingfiles:\n return \"ran successfully but some output files are missing: \" + \", \".join(str(_) for _ in self.missingfiles)\n assert False, self\n\n def __lt__(self, other):\n if isinstance(other, GitCommit):\n return self.gitcommit < other\n elif isinstance(other, SampleRunStatus):\n if other.gitcommit is None: return False\n return self.ended < other.started and self <= other.gitcommit\n else:\n return NotImplemented\n def __le__(self, other):\n if isinstance(other, GitCommit):\n if self.gitcommit is None: return True\n return self.gitcommit <= other\n elif isinstance(other, SampleRunStatus):\n return self == other or self < other\n else:\n return NotImplemented\n\n def __gt__(self, other):\n if isinstance(other, GitCommit):\n return self.gitcommit > other\n elif isinstance(other, SampleRunStatus):\n return other < self\n else:\n return NotImplemented\n def __ge__(self, other):\n if isinstance(other, GitCommit):\n return self.gitcommit >= other\n elif isinstance(other, SampleRunStatus):\n return other <= self\n else:\n return NotImplemented\n\n @property\n def failedincleanup(self):\n return self.lastattemptedcleanup != self.lastcleanstart\n","repo_name":"AstroPathJHU/AstroPathPipeline","sub_path":"astropath/shared/workflowdependency.py","file_name":"workflowdependency.py","file_ext":"py","file_size_in_byte":13622,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"28712847495","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom gen_data import Properties, Change_Request, produce_data\nimport plotly.graph_objs as go\nfrom plotly import tools\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Output, Input, State\n# from werkzeug.contrib.profiler import ProfilerMiddleware\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napplication = app.server\n\napp.layout = html.Div(children=[\n html.H1(children=\"Interactive Cobb-Douglas Production Function\"),\n dcc.Graph(\n id='main-display'\n ),\n html.Div(children=[\n html.Div(id=\"current-graph-time\", children=\"Current Time\"),\n dcc.Slider(\n id='graph-time-slider',\n min=0,\n max=100,\n value=25,\n step=1\n ),\n html.Br(),\n html.H3(children=\"Basics\"),\n html.Div(id=\"alpha\", children=\"alpha\"),\n dcc.Slider(\n id='alpha-slider',\n min=0.01,\n max=.99,\n value=.33,\n step=0.01\n ),\n html.Div(id=\"base-efficiency\", children=\"base efficiency\"),\n dcc.Slider(\n id='e-slider',\n min=0,\n max=10000,\n value=1000,\n step=100\n ),\n html.Div(id=\"efficiency-growth\", children=\"efficiency growth\"),\n dcc.Slider(\n id='g-slider',\n min=0.00,\n max=.5,\n value=.02,\n step=0.001\n ),\n html.Div(id=\"labor-growth\", children=\"labor growth\"),\n dcc.Slider(\n id='n-slider',\n min=0.00,\n max=.5,\n value=.02,\n step=0.001\n ),\n html.Div(id=\"savings\", children=\"savings\"),\n dcc.Slider(\n id='s-slider',\n min=0.00,\n max=.8,\n value=.2,\n step=0.001\n ),\n html.Div(id=\"depreciation\", children=\"depreciation\"),\n dcc.Slider(\n id='d-slider',\n min=0.00,\n max=.5,\n value=.05,\n step=0.001\n ),\n html.Br(),\n html.H3(children=\"Experimental (may not update correctly)\"),\n html.Div(id=\"delta-g\", children=\"delta g\"),\n dcc.Slider(\n id='delta-g-slider',\n min=0.00,\n max=.1,\n value=0,\n step=0.001\n ),\nhtml.Div(id=\"delta-n\", children=\"delta n\"),\n dcc.Slider(\n id='delta-n-slider',\n min=0.00,\n max=.1,\n value=0,\n step=0.001\n ),\nhtml.Div(id=\"delta-s\", children=\"delta s\"),\n dcc.Slider(\n id='delta-s-slider',\n min=0.00,\n max=.1,\n value=0,\n step=0.001\n ),\nhtml.Div(id=\"delta-d\", children=\"delta d\"),\n dcc.Slider(\n id='delta-d-slider',\n min=0.00,\n max=.1,\n value=0,\n step=0.001\n ),\n html.Br(),\n html.H3(children=\"Other\"),\n html.Div(id=\"n-periods\", children=\"number of time periods\"),\n dcc.Slider(\n id='time-slider',\n min=0,\n max=100,\n value=25,\n step=1\n ),\n html.Br(),\n html.Div(id=\"change-request\", children=\"Dynamic Change Requests\"),\n\n dcc.Input(id='request-change', type='text', placeholder='
%s ' % (ref_link[kk][1:-1], ref_name[kk][:])\n\n # print(items_table_main[ii][3])\n\n # selected_header_inds = [0, 1, 6, 7, 8] # ['Sample', 'Name', 'Description', 'REF']\n # skip_columns = list(range(len(headers_table_main)))\n # skip_columns.pop(selected_header_inds)\n skip_columns = []\n\n\n print_to_readme(input_str[:begin_ind_table_main], with_newline=False)\n print_to_readme(begin_str_table_main)\n if table_html_format:\n print_to_readme(build_table_html(headers_table_main, items_table_main))\n else:\n print_to_readme(build_table_md(headers_table_main, items_table_main))\n print_to_readme(end_str_table_main, with_newline=False)\n\n cursor = end_ind_table_main\n\n # Predicttion Benchmark Table: ETH\n # ==========================================\n # df_ETH = pd.read_excel(excel_file_table_benchmark_eth, header=[0])\n\n # headers_table_ETH = df_ETH.axes[1].values.tolist()\n # items_table_ETH = df_ETH.values.tolist()\n\n # begin_str_table_ETH = ''\n # end_str_table_ETH = ''\n # begin_ind_table_ETH = input_str.rfind(begin_str_table_ETH)\n # end_ind_table_ETH = input_str.rfind(end_str_table_ETH) + len(end_str_table_ETH)\n #\n # # Add texts before table - ETH\n # print_to_readme(input_str[cursor:begin_ind_table_ETH], with_newline=False)\n #\n # print_to_readme(begin_str_table_ETH)\n # print_to_readme(build_table(headers_table_ETH, items_table_ETH))\n # print_to_readme(end_str_table_ETH, with_newline=False)\n # cursor = end_ind_table_ETH\n # ========================================\n\n # Append rest of the text\n print_to_readme(input_str[cursor:], with_newline=False)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 2 and not sys.argv[1].startswith('--'):\n opentraj_root = sys.argv[1]\n else:\n opentraj_root = os.path.abspath(os.path.join(os.getcwd(), '..'))\n\n # double check: to see if this script can be found where it should be\n if not os.path.exists(os.path.join(opentraj_root, 'doc/readme_builder.py')):\n print('Error! could not find true opentraj path!')\n exit(-1)\n\n excel_file_table_datasets = os.path.join(opentraj_root, \"doc/data/opentraj-datasets.xlsx\")\n excel_file_table_benchmark_eth = os.path.join(opentraj_root, \"doc/data/opentraj-benchmark-eth.xlsx\")\n\n if '--download-tables' in sys.argv:\n # Todo: one option is to upload the doc into dropbox and download it each time\n os.system(f'wget -q -O {excel_file_table_datasets}'\n f' -N \"https://ethercalc.org/5xdmtogai5l8.xlsx\"')\n os.system(f'wget -q -O {excel_file_table_benchmark_eth}'\n f' -N \"https://ethercalc.org/bzn1f11s4w2b.xlsx\"')\n temp_output_file = os.path.join(opentraj_root, \"README__temp.md\")\n\n table_html_format = False\n if '--build' in sys.argv:\n output_str_ = ''\n print(opentraj_root)\n\n build_readme()\n print(output_str_)\n\n with open(temp_output_file, \"w\") as out_file:\n out_file.write(output_str_)\n\n if '--confirm' in sys.argv and os.path.exists(temp_output_file):\n with open(temp_output_file) as f_in:\n lines = f_in.readlines()\n with open(os.path.join(opentraj_root, \"README.md\"), \"w\") as f_out:\n f_out.writelines(lines)\n os.remove(temp_output_file)\n\n","repo_name":"crowdbotp/OpenTraj","sub_path":"docs/readme_builder.py","file_name":"readme_builder.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":405,"dataset":"github-code","pt":"16"} +{"seq_id":"72933075527","text":"import json\nfrom os import path\n\nfrom services.utils import post\n\nwith open('config/api.json') as f:\n data = json.load(f)\n FUNCTIONS = data['functions']\n\nPUNCT = {',', '.', '-', \"'s\", \"'m\", '?', \"n't\"}\n\nPOS_API_MAP = {\n 'Subject': 'nounColor',\n 'Verb': 'verbColor',\n 'PA': 'predicateAdjectiveColor',\n 'PN': 'predicateNominativeColor',\n 'DO': 'directObjectColor',\n 'IO': 'indirectObjectColor',\n 'Preposition': 'prepositionColor',\n 'Appositive': 'appositiveColor',\n 'Participle': 'participleColor',\n 'Infinitive': 'infinitiveColor',\n}\n\n\ndef create_worksheet(sheet_loc, title, lines, sources, settings):\n key_loc = '{} (Key){}'.format(*path.splitext(sheet_loc))\n body = create_request_body(title, lines, sources, settings)\n response = post(FUNCTIONS + 'worksheet', json=body).content\n sheet, key = tuple(response.split(b', key: '))\n write(sheet_loc, sheet)\n write(key_loc, key)\n\n\ndef create_request_body(title, lines, sources, settings):\n result = {\n 'title': title,\n 'lines': lines,\n 'sources': sources,\n 'settings': settings\n }\n with open('config/pos.json') as f:\n pos = json.load(f)\n\n for name, style in pos.items():\n if style['active']:\n result[POS_API_MAP[name]] = style['rgb']\n\n return result\n\n\ndef write(loc, content):\n with open(loc, 'wb') as f:\n f.write(content)\n\n\nif __name__ == '__main__':\n create_worksheet('C:\\\\Users\\\\sungo\\\\Documents\\\\test.docx',\n 'Worksheet',\n ['I like cats.'],\n [],\n {'Remove Commas': True})\n","repo_name":"GrammarGuru/Sentence","sub_path":"services/worksheet.py","file_name":"worksheet.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74404942728","text":"import pytorch_lightning as pl\nfrom torch.utils.data.dataloader import DataLoader\nfrom torchvision import transforms as T\n\nfrom dataloaders.GSVCitiesDataset import GSVCitiesDataset, GSVCitiesValDataset\n# from . import PittsburgDataset\n# from . import MapillaryDataset\n\nfrom prettytable import PrettyTable\n\nTYPE2LAMBDA = {\n 'resize': lambda **kw: T.Resize(size=kw['size'], interpolation=T.InterpolationMode.BILINEAR),\n 'randaug': lambda **kw: T.RandAugment(num_ops=kw['num_ops'], interpolation=T.InterpolationMode.BILINEAR),\n 'totensor': lambda **kw: T.ToTensor(),\n 'normalize': lambda **kw: T.Normalize(mean=kw['mean'], std=kw['std']),\n 'centercrop': lambda **kw: T.CenterCrop(size=kw['size']),\n 'pad': lambda **kw: T.Pad(size=kw['size'], fill=kw.get('fill', 0))\n}\n\ndef build_transform_compose(transforms):\n to_compose = []\n for t in transforms:\n t = TYPE2LAMBDA[t['type']](**t.get('kwargs', {}))\n to_compose.append(t)\n return T.Compose(to_compose)\n \n \n\nclass GSVCitiesDataModule(pl.LightningDataModule):\n def __init__(self,\n batch_size=32,\n img_per_place=4,\n min_img_per_place=4,\n shuffle_all=False,\n image_size=(480, 640),\n num_workers=4,\n show_data_stats=True,\n batch_sampler=None,\n random_sample_from_each_place=True,\n train_anno=None,\n query_anno=[],\n ref_anno=[],\n base_path=None,\n train_transform=[],\n vals_transforms=[],\n train_transform_e=[]\n ):\n super().__init__()\n self.batch_size = batch_size\n self.img_per_place = img_per_place\n self.min_img_per_place = min_img_per_place\n self.shuffle_all = shuffle_all\n self.image_size = image_size\n self.num_workers = num_workers\n self.batch_sampler = batch_sampler\n self.show_data_stats = show_data_stats\n self.random_sample_from_each_place = random_sample_from_each_place\n self.save_hyperparameters() # save hyperparameter with Pytorch Lightening\n\n self.train_anno = train_anno\n self.query_anno = query_anno\n self.ref_anno = ref_anno\n self.base_path = base_path\n # import pdb; pdb.set_trace()\n assert len(query_anno) == len(vals_transforms) == len(ref_anno), 'data not match with transform'\n self.train_transform = build_transform_compose(train_transform)\n self.train_transform_e = build_transform_compose(train_transform_e)\n self.valid_transforms = [build_transform_compose(vals_transform) for vals_transform in vals_transforms]\n\n self.train_loader_config = {\n 'batch_size': self.batch_size,\n 'num_workers': self.num_workers,\n 'drop_last': False,\n 'pin_memory': True,\n 'shuffle': self.shuffle_all}\n \n\n self.valid_loader_config = {\n 'batch_size': self.batch_size,\n 'num_workers': self.num_workers//2,\n 'drop_last': False,\n 'pin_memory': True,\n 'shuffle': False}\n\n def setup(self, stage):\n if stage == 'fit':\n self.reload()\n # load validation sets (pitts_val, msls_val, ...etc)\n self.val_set_names = self.query_anno\n self.val_datasets = [GSVCitiesValDataset(\n img_per_place=self.img_per_place,\n min_img_per_place=self.min_img_per_place,\n random_sample_from_each_place=self.random_sample_from_each_place,\n transform=self.train_transform,\n base_path=self.base_path,\n query_anno=q,\n ref_anno=r) for q, r in zip(self.query_anno, self.ref_anno)]\n if self.show_data_stats:\n self.print_stats()\n\n def reload(self):\n self.train_dataset = GSVCitiesDataset(\n img_per_place=self.img_per_place,\n min_img_per_place=self.min_img_per_place,\n random_sample_from_each_place=self.random_sample_from_each_place,\n transform=self.train_transform,\n transform_e=self.train_transform_e,\n base_path=self.base_path,\n train_anno=self.train_anno)\n\n def train_dataloader(self):\n self.reload()\n return DataLoader(dataset=self.train_dataset, **self.train_loader_config)\n\n def val_dataloader(self):\n val_dataloaders = []\n for val_dataset in self.val_datasets:\n val_dataloaders.append(DataLoader(\n dataset=val_dataset, **self.valid_loader_config))\n return val_dataloaders\n\n def print_stats(self):\n print() # print a new line\n table = PrettyTable()\n table.field_names = ['Data', 'Value']\n table.align['Data'] = \"l\"\n table.align['Value'] = \"l\"\n table.header = False\n table.add_row([\"# of places\", f'{self.train_dataset.__len__()}'])\n table.add_row([\"# of images\", f'{self.train_dataset.total_nb_images}'])\n print(table.get_string(title=\"Training Dataset\"))\n print()\n\n table = PrettyTable()\n table.field_names = ['Data', 'Value']\n table.align['Data'] = \"l\"\n table.align['Value'] = \"l\"\n table.header = False\n # table.add_row([\"# of places\", f'{self.train_dataset.__len__()}'])\n print(table.get_string(title=\"Validation Datasets\"))\n print()\n\n table = PrettyTable()\n table.field_names = ['Data', 'Value']\n table.align['Data'] = \"l\"\n table.align['Value'] = \"l\"\n table.header = False\n table.add_row(\n [\"Batch size (PxK)\", f\"{self.batch_size}x{self.img_per_place}\"])\n table.add_row(\n [\"# of iterations\", f\"{self.train_dataset.__len__()//self.batch_size}\"])\n table.add_row([\"Image size\", f\"{self.image_size}\"])\n print(table.get_string(title=\"Training config\"))\n","repo_name":"lannester666/event_based_vpr","sub_path":"dataloaders/GSVCitiesDataloader.py","file_name":"GSVCitiesDataloader.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44240762390","text":"import datetime as dt\nimport logging\nimport re\n\nfrom dateutil.parser import parse as parse_date\n\nfrom kp_scrapers.lib.date import get_first_day_of_next_month, to_isoformat\nfrom kp_scrapers.lib.parser import may_strip, str_to_float\nfrom kp_scrapers.lib.utils import ignore_key, map_keys\nfrom kp_scrapers.models.port_call import PortCall\nfrom kp_scrapers.models.utils import validate_item\n\n\nlogger = logging.getLogger(__name__)\n\nCARGO_BLACKLIST = [\n 'EX ',\n 'TICTS',\n 'TPA',\n 'BALLAST',\n 'CONTAINERS',\n 'GENERAL CARGO',\n r'FOR\\s*TPA\\s*PROJECT',\n r'WAITING\\s*TO\\s*SAIL',\n r'NAVY\\s*SHIP',\n r'FISHING\\s*VESSEL',\n r'MOTOR\\s*VEHICLES',\n]\n\nRELEVANT_VESSEL_TYPES = ['GC', 'T']\n\nNO_VALUE_SIGN = ['', '-', 'TBA']\n\n\n@validate_item(PortCall, normalize=True, strict=False)\ndef process_eta_item(raw_item):\n \"\"\"Transform raw item into an event.\n\n This is for EXPECTED ARRIVALS table.\n\n Args:\n raw_item (Dict[str]):\n\n Returns:\n Dict[str, str | Dict[str, str]]:\n\n \"\"\"\n item = map_keys(\n raw_item, eta_field_mapping(reported_date=raw_item['reported_date']), skip_missing=True\n )\n\n # ignore irrelevant vessel type (agent contains vessel type info)\n if item['agent_cargo_receiver'][1] not in RELEVANT_VESSEL_TYPES:\n logger.info(\n f'Vessel is of an irrelevant type: '\n f'{item[\"vessel_name\"]} ({item[\"agent_cargo_receiver\"][1]})'\n )\n return\n\n # ignore vessel with irrelevant cargo and empty cargo\n cargoes = list(normalize_cargoes_in_expected_arrival(item['agent_cargo_receiver'][2]))\n if not cargoes:\n logger.info(f'Vessel is carrying irrelevant cargo: {item[\"vessel_name\"]}')\n return\n\n return {\n 'port_name': item['port_name'],\n 'provider_name': item['provider_name'],\n 'reported_date': item['reported_date'],\n 'eta': item['eta'],\n 'vessel': {\n 'name': item['vessel_name'],\n 'gross_tonnage': item['gross_tonnage'],\n 'length': item['vessel_length'],\n },\n 'cargoes': cargoes,\n }\n\n\n@validate_item(PortCall, normalize=True, strict=False)\ndef process_at_berth_item(raw_item):\n \"\"\"Transform item into a port call event (arrival).\n\n Args:\n raw_item (Dict[str]):\n\n Returns:\n Dict[str, str | Dict[str, str]]:\n\n \"\"\"\n cargoes = list(normalize_cargoes_in_berth_plan(raw_item, ['cargo', 'import', 'export']))\n if not cargoes:\n return\n\n item = map_keys(raw_item, at_berth_field_mapping(), skip_missing=True)\n return {\n 'port_name': item['port_name'],\n 'provider_name': item['provider_name'],\n 'reported_date': item['reported_date'],\n 'berthed': guess_at_berth_berthed_date(item['reported_date']),\n 'vessel': {'name': item['vessel_name'], 'length': item['vessel_length']},\n 'cargoes': cargoes,\n }\n\n\n@validate_item(PortCall, normalize=True, strict=False)\ndef process_anchorage_item(raw_item):\n \"\"\"Transform item into a port call event (berthed).\n\n Args:\n raw_item (Dict[str]):\n\n Returns:\n Dict[str, str | Dict[str, str]]:\n\n \"\"\"\n cargoes = list(normalize_cargoes_in_berth_plan(raw_item, ['TYPE OF CARGO', 'IMPORT', 'EXPORT']))\n if not cargoes:\n return\n\n item = map_keys(raw_item, anchorage_field_mapping(), skip_missing=True)\n\n item['arrival'] = normalize_arrival_date(item.pop('arrival_date'), item.pop('arrival_time'))\n if not item['arrival']:\n return\n\n item['vessel'] = {'name': item.pop('vessel_name'), 'length': item.pop('vessel_length')}\n\n return item\n\n\ndef eta_field_mapping(**kwargs):\n return {\n '0': ('eta', lambda x: normalize_date(x, **kwargs)),\n '1': ignore_key('draught'),\n '2': ('vessel_length', lambda x: x if x else None),\n '3': ('gross_tonnage', None),\n '4': ('vessel_name', normalize_vessel_name),\n '5': ('agent_cargo_receiver', split_agent_cargo_receiver),\n 'port_name': ('port_name', None),\n 'provider_name': ('provider_name', None),\n 'reported_date': ('reported_date', lambda x: to_isoformat(x, dayfirst=True)),\n }\n\n\ndef at_berth_field_mapping():\n return {\n # vessel\n 'vessel_name': ('vessel_name', normalize_vessel_name),\n 'ship_draught': ignore_key('draught'),\n 'ship_length': ('vessel_length', lambda x: x if x else None),\n # port\n 'berth': ('berth', None),\n # meta field\n 'port_name': ('port_name', None),\n 'provider_name': ('provider_name', None),\n 'reported_date': ('reported_date', lambda x: to_isoformat(x, dayfirst=True)),\n }\n\n\ndef anchorage_field_mapping():\n return {\n # vessel\n 'SHIP NAME': ('vessel_name', normalize_vessel_name),\n 'DRAFT/LOA': ('vessel_length', normalize_vessel_length),\n # arrival date\n 'SIT.DATE': ('arrival_date', None),\n 'TIME0': ('arrival_time', None),\n # meta field\n 'port_name': ('port_name', None),\n 'provider_name': ('provider_name', None),\n 'reported_date': ('reported_date', lambda x: to_isoformat(x, dayfirst=True)),\n }\n\n\ndef normalize_arrival_date(date, time):\n \"\"\"Combine date and time, convert them to ISO 8601 format.\n\n Examples:\n >>> normalize_arrival_date('09.09.18', '0500')\n '2018-09-09T05:00:00'\n >>> normalize_arrival_date('09.09.18', '-')\n '2018-09-09T00:00:00'\n >>> normalize_arrival_date('-', '-')\n\n Args:\n date (str):\n time (str):\n\n Returns:\n str:\n\n \"\"\"\n if date in NO_VALUE_SIGN:\n return None\n\n if time in NO_VALUE_SIGN:\n return to_isoformat(date, dayfirst=True)\n\n return to_isoformat(' '.join([date, time]), dayfirst=True)\n\n\ndef normalize_date(date_str, reported_date):\n \"\"\"Normalize date to ISO 8601 format.\n\n The date_str doesn't contain month and year info, so we need reported date as reference. If the\n day is no bigger than reported date, then it's the same month and year as reported date. If the\n day is smaller than reported date, then it's next month.\n\n Examples:\n >>> normalize_date('30TH MON', '23RD JULY 2018')\n '2018-07-30T00:00:00'\n >>> normalize_date('2ND THU', '23RD JULY 2018')\n '2018-08-02T00:00:00'\n\n Args:\n date_str (str): format as 24TH TUE\n reported_date (str): format as 23RD JULY 2018\n\n Returns:\n str:\n\n \"\"\"\n reported_date = parse_date(reported_date)\n day, month, year = reported_date.day, reported_date.month, reported_date.year\n eta_day = int(re.search(r'\\d*', date_str).group())\n\n if eta_day >= day:\n return dt.datetime(year, month, eta_day).isoformat()\n else:\n next_date = get_first_day_of_next_month(reported_date)\n return dt.datetime(next_date.year, next_date.month, eta_day).isoformat()\n\n\ndef normalize_cargoes_in_expected_arrival(raw_cargo):\n \"\"\"Extract cargoes information from raw item (from Expected Arrival table).\n\n Args:\n raw_cargo (Dict[str, str]):\n\n Returns:\n List[Dict(str, str)]:\n\n \"\"\"\n cargo_info = _handel_specific_cargo(raw_cargo)\n for alias in CARGO_BLACKLIST:\n if any(alias in product for product in cargo_info):\n logger.info(f'Cargo is blacklisted, will not yield: {cargo_info}')\n return None\n\n for cargo in cargo_info:\n yield _assemble_cargo(cargo, None, None)\n\n\ndef _assemble_cargo(product, movement, volume):\n return {\n 'product': may_strip(product),\n 'movement': movement,\n 'volume': volume,\n 'volume_unit': 'tons',\n }\n\n\ndef normalize_cargoes_in_berth_plan(raw_item, key_list):\n \"\"\"Extract cargoes information from raw item (from Berth Plan table).\n\n Args:\n raw_item (Dict[str, str):\n key_list (List[str]):\n\n Returns:\n List[Dict[str, str]] | None:\n\n \"\"\"\n\n cargo_info = raw_item[key_list[0]].strip()\n discharge = raw_item[key_list[1]]\n load = raw_item[key_list[2]]\n\n for block in CARGO_BLACKLIST:\n if re.search(block, cargo_info):\n return\n\n # handle cargo info like 'JET A1 / 1K'\n cargo_info = _handel_specific_cargo(cargo_info)\n\n if cargo_info in NO_VALUE_SIGN:\n return\n\n if str_to_float(discharge):\n disch = str_to_float(discharge) / len(cargo_info)\n for cargo in cargo_info:\n yield _assemble_cargo(cargo, 'discharge', str(disch))\n\n if str_to_float(load):\n load = str_to_float(load) / len(cargo_info)\n for cargo in cargo_info:\n yield _assemble_cargo(cargo, 'load', str(load))\n\n\ndef _handel_specific_cargo(info):\n \"\"\"Handle cargo information.\n\n 1. cargo/receiver: JET A1 / 1K\n\n Args:\n info (str):\n\n Returns:\n List[str]\n\n \"\"\"\n if '/' in info:\n return info.split('/')\n\n return [info]\n\n\ndef normalize_vessel_name(vessel_name):\n \"\"\"Omit 'MT' at the beginning and remove double-spaces.\n\n Examples:\n >>> normalize_vessel_name('MT GRAND ACE 11')\n 'GRAND ACE 11'\n >>> normalize_vessel_name('MTAMT ')\n 'MTAMT'\n\n Args:\n vessel_name (str):\n\n Returns:\n str:\n\n \"\"\"\n for prefix in ('MT ', 'MSC '):\n if vessel_name.startswith(prefix):\n vessel_name = vessel_name.replace(prefix, '')\n return may_strip(vessel_name)\n\n\ndef split_agent_cargo_receiver(strs):\n \"\"\"Split AGENT / CARGO / RECEIVER field.\n\n Usually, the format is:\n Agent - Cargo\n Agent - Receiver\n\n So, we can split the string by ' - ' and retrieve the two parts. Agent may contains -, but the\n number of blanks near the dash is 1, unlike the separator: 2. Thus we can make sure it's\n divided correctly.\n\n Examples:\n >>> split_agent_cargo_receiver('SEAFORTH ( GC ) - WHEAT IN BULK ( S.S.B )')\n ('SEAFORTH', 'GC', 'WHEAT IN BULK')\n >>> split_agent_cargo_receiver('MESSINA ( RO-RO ) - TPA ( TO DISCH. 91 UNITS )')\n ('MESSINA', 'RO-RO', 'TPA')\n >>> split_agent_cargo_receiver('SEAFORTH ( GC ) - JET A1 / IK')\n ('SEAFORTH', 'GC', 'JET A1 / IK')\n\n Args:\n strs (str):\n\n Returns:\n Tuple[str, str, str]: a tuple of (agent, vessel_type, cargo/receiver)\n\n \"\"\"\n str_match = re.match(r'^([\\w\\s]+)\\(\\s*(\\S+)\\s*\\)\\s*\\-\\s*([\\w\\s\\/\\-]+)', strs)\n return tuple(map(may_strip, list(str_match.groups()))) if str_match else (None, None, None)\n\n\ndef guess_at_berth_berthed_date(reported_date):\n \"\"\"Arrival date would be a previous day of reported date.\n\n Args:\n reported_date (str):\n\n Returns:\n str: date in format of ISO 8601\n\n \"\"\"\n return (parse_date(reported_date, dayfirst=False) - dt.timedelta(days=1)).isoformat()\n\n\ndef normalize_vessel_length(raw_length):\n \"\"\"Extract vessel length from DRAFT/LOA field.\n\n Examples:\n >>> normalize_vessel_length('9.8 (183.06)')\n '183.06'\n >>> normalize_vessel_length('8.2 (240)')\n '240'\n\n Args:\n raw_length (str):\n\n Returns:\n str | None:\n\n \"\"\"\n length_match = re.search(r'\\((.*)\\)', raw_length)\n return length_match.group(1).strip() if length_match else None\n","repo_name":"theHausdorffMetric/test","sub_path":"kp_scrapers/spiders/port_authorities/dar_es_salam/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20724693525","text":"import sys\nsys.setrecursionlimit(10 ** 6)\n\nif __name__ == \"__main__\":\n s = input()\n t = input()\n answer = 0\n\n def change(s, t):\n global answer\n if len(s) == len(t):\n if s == t:\n answer = 1\n return \n else:\n return\n \n if t[-1] == 'A':\n change(s, t[:-1])\n\n if t[0] == 'B':\n change(s, t[1:][::-1])\n \n change(s, t)\n if answer == 0:\n print(0)\n else:\n print(1)","repo_name":"Angela-OH/Algorithm","sub_path":"백준/12919.py","file_name":"12919.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33168447824","text":"import cv2\nimport os\nimport sys\nimport logging\nimport json\nimport numpy as np\n\nimport mindspore as ms\n\n\ndef setup_logger(name, save_dir, distributed_rank, file_name='log.txt'):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n # don't log results for the non-master process\n if distributed_rank > 0:\n return logger\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s: %(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if save_dir:\n fh = logging.FileHandler(os.path.join(save_dir, file_name), mode='w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef find_files(dir_path, img_paths, suffix=['jpg', 'png', 'bmp']):\n files = os.listdir(dir_path)\n for f in files:\n path = os.path.join(dir_path, f)\n if os.path.isdir(path):\n find_files(path, img_paths, suffix)\n elif f.split('.')[-1] in suffix:\n img_paths.append(path)\n\n\ndef write_json(indices, q_img_paths, g_img_paths, dst_dir,\n name='submit.json', top_k=200):\n results = {}\n indices = indices[:, :top_k]\n for i in range(indices.shape[0]):\n query_name = os.path.basename(q_img_paths[i])\n results[query_name] = []\n for j in range(indices.shape[1]):\n idx = indices[i, j]\n gallery_name = os.path.basename(g_img_paths[idx])\n results[query_name].append(gallery_name)\n with open(os.path.join(dst_dir, name), 'w', encoding='utf-8') as f:\n json.dump(results, f)\n\n\ndef vis_rank(dataset, indices, out_dir, topk=5, size=(128, 256)):\n out_dir = os.path.join(out_dir, 'vis_rank')\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n color = (0, 0, 255)\n indices = indices[:, :topk]\n img_paths = []\n pids = []\n camids = []\n for img_path, pid, camid in dataset.query + dataset.gallery:\n img_paths.append(img_path)\n pids.append(pid)\n camids.append(camid)\n num_query = len(dataset.query)\n for i in range(indices.shape[0]):\n query_img = cv2.imread(img_paths[i])\n query_img = cv2.resize(query_img, size)\n imgs = [query_img]\n for j in range(indices.shape[1]):\n idx = num_query + indices[i, j]\n img = cv2.imread(img_paths[idx])\n img = cv2.resize(img, size)\n if pids[i] != pids[idx]:\n img = cv2.rectangle(img, (0, 0), size, color, 4)\n imgs.append(img)\n canvas = np.concatenate(imgs, axis=1)\n cv2.imwrite(os.path.join(out_dir, os.path.basename(img_paths[i])), canvas)\n","repo_name":"Xiangyu-CAS/ReID.mindspore","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11906182195","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nimport numpy as np\n\ntry:\n from Cython.Distutils import build_ext\nexcept ImportError:\n use_cython = False\nelse:\n use_cython = True\n\ncmdclass = { }\n\nif use_cython:\n ext_modules = [\n Extension(\"PyPore.cparsers\", [ \"PyPore/cparsers.pyx\" ], include_dirs=[np.get_include()] ),\n Extension(\"PyPore.calignment\", [ \"PyPore/calignment.pyx\" ], include_dirs=[np.get_include()] )\n ]\n cmdclass.update({ 'build_ext': build_ext })\nelse:\n ext_modules = [\n Extension(\"PyPore.cparsers\", [ \"PyPore/cparsers.c\" ], include_dirs=[np.get_include()] ),\n Extension(\"PyPore.calignment\", [ \"PyPore/calignment.c\" ], include_dirs=[np.get_include()] )\n ]\n\nsetup(\n name='pythonic-porin',\n version='0.2.0',\n author='Jacob Schreiber',\n author_email='jmschreiber91@gmail.com',\n packages=['PyPore'],\n url='http://pypi.python.org/pypi/pythonic-porin/',\n license='LICENSE.txt',\n description='Nanopore Data Analysis package. Provides tools for reading data,\\\n performing event detection, segmentation, visualization, and analysis using\\\n hidden Markov models, and other tools. Designed for the UCSC Nanopore Group.',\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n install_requires=[\n \"cython >= 0.20.1\",\n \"numpy >= 1.8.0\",\n \"matplotlib >= 1.3.1\"\n ],\n)\n","repo_name":"jmschrei/PyPore","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"16"} +{"seq_id":"9071180440","text":"import chainer\nimport numpy\nimport math\nimport nmtrain\n\nclass RNN_NMT(object):\n def __init__(self):\n self.bptt = None\n self.bptt_len = 0\n self.train = self.train_mle\n\n def configure_learning(self, learning_config, bptt_func = None):\n self.bptt = bptt_func\n self.config = learning_config\n self.learning_type = learning_config.learning.method\n if self.learning_type == \"mrt\":\n nmtrain.log.info(\"Setting learning to minimum risk training\")\n self.train = self.train_mrt\n self.minrisk = nmtrain.minrisk.minrisk.MinimumRiskTraining(learning_config.learning.mrt)\n else:\n nmtrain.log.info(\"Setting learning to maximum likelihood training\")\n self.train = self.train_mle\n\n def train_mle(self, model, src_batch, trg_batch, eos_id, outputer=None):\n batch_loss = 0\n bptt_ctr = 0\n model.encode(src_batch)\n\n if outputer: outputer.begin_collection(src=src_batch, ref=trg_batch)\n for i, trg_word in enumerate(trg_batch):\n y_t = chainer.Variable(model.xp.array(trg_word, dtype=numpy.int32))\n output = model.decode()\n batch_loss += chainer.functions.softmax_cross_entropy(output.y, y_t, reduce='no')\n model.update(y_t)\n\n # Truncated BPTT\n if hasattr(self, \"bptt\") and self.bptt is not None and self.bptt_len > 0:\n bptt_ctr += 1\n if bptt_ctr == self.config.bptt_len:\n self.bptt(batch_loss)\n bptt_ctr = 0\n\n if outputer: outputer(output)\n if outputer: outputer.end_collection()\n return chainer.functions.sum(batch_loss)\n\n def train_mrt(self, model, src_batch, trg_batch, eos_id, outputer=None):\n return self.minrisk(model, src_batch, trg_batch, eos_id, outputer)\n\n def predict(self, model, src_sent, eos_id, gen_limit=50,\n store_probabilities=False,\n beam=1, word_penalty=0):\n # The beam used to represent state in beam search\n class BeamState:\n def __init__(self, id, model_state, log_prob, word, attention, word_prob, parent):\n self.id = id\n self.model_state = model_state\n self.log_prob = log_prob\n self.word = word\n self.attention = attention\n self.word_prob = word_prob\n self.parent = parent\n\n # The n-argmax function\n def n_argmax(array, top):\n top = min(top, len(array))\n return numpy.argpartition(array, -top)[-top:]\n\n # The beams\n beams = [BeamState(0, None, 0, None, None, None, None)]\n beam_prediction = []\n worst_prob = -float(\"inf\")\n cur_id = 1\n # Start Prediction\n model.encode(src_sent)\n for i in range(gen_limit):\n # Expand all the beams\n new_beam = []\n for state in beams:\n if eos_id == state.word:\n if len(beam_prediction) == 0:\n worst_prob = state.log_prob\n else:\n worst_prob = min(state.log_prob, worst_prob)\n beam_prediction.append(state)\n else:\n if state.word is not None:\n model.set_state(state.model_state)\n word_var = chainer.Variable(model.xp.array([state.word], dtype=numpy.int32))\n model.update(word_var)\n\n # Produce the output\n output = model.decode()\n current_model = model.state()\n y_dist = chainer.cuda.to_cpu(chainer.functions.softmax(output.y).data[0])\n attn_out = chainer.cuda.to_cpu(output.a.data[0]) if hasattr(output, \"a\") else None\n word_prob = y_dist if store_probabilities else None\n # Produce the next words\n if beam == 1:\n words = [numpy.argmax(y_dist)]\n else:\n words = n_argmax(y_dist, beam)\n for word in words:\n new_probability = math.log(y_dist[word]) + word_penalty + state.log_prob\n new_beam.append(BeamState(id=cur_id, model_state=current_model, log_prob=new_probability,\n word=word, attention=attn_out,\n word_prob=word_prob, parent=state))\n cur_id += 1\n # First sort the beam\n new_beam = sorted(new_beam, key = lambda state: state.log_prob, reverse=True)\n # When the best hypothesis probability is worse than the best probability stop or\n # If no new state is generated\n if len(new_beam) == 0 or new_beam[0].log_prob < worst_prob:\n break\n else:\n beams = new_beam[:beam]\n\n # Apparently, no hypothesis reached the end of sentence\n if len(beam_prediction) == 0:\n beam_prediction = [beams[0]]\n else:\n beam_prediction = sorted(beam_prediction,\n key=lambda state:state.log_prob,\n reverse=True)\n\n ## Collecting output\n cur_state = beam_prediction[0]\n # attention\n attention_available = cur_state.attention is not None\n attention = [] if attention_available else None\n # probability of each time step\n probabilities = [] if store_probabilities else None\n # Prediction\n prediction = []\n while cur_state.parent is not None:\n prediction.append(cur_state.word)\n if attention_available:\n attention.append(numpy.expand_dims(cur_state.attention, axis=1))\n if store_probabilities:\n probabilities.append(numpy.expand_dims(cur_state.word_prob, axis=1))\n cur_state = cur_state.parent\n ## Packing output\n output = lambda: None\n output.prediction = list(reversed(prediction))\n # Output: Attention\n if attention_available:\n output.attention = numpy.concatenate(list(reversed(attention)), axis=1)\n # Output: Word probabilities\n if store_probabilities:\n output.probabilities = numpy.concatenate(list(reversed(probabilities)), axis=1)\n return output\n\n","repo_name":"philip30/nmtrain","sub_path":"nmtrain/classifiers/rnn_nmt.py","file_name":"rnn_nmt.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"36502694753","text":"# pylint: disable=missing-module-docstring\n\nimport subprocess as sp\nfrom rich.console import Console\nfrom src.exception import MKVmergeError, MKVextractError, ProcessError\n\nconsole = Console()\n\n\n# pylint: disable=too-few-public-methods\nclass Process:\n \"\"\"Subprocess console display\"\"\"\n\n def __init__(self):\n self.process_exceptions = {\n \"mkvmerge\": MKVmergeError,\n \"mkvextract\": MKVextractError,\n \"other\": ProcessError,\n }\n\n self.colors = {\"ok\": \"green\", \"busy\": \"cyan\"}\n\n def run(self, process, command, process_color=\"#F79EDE\"):\n \"\"\"Run specified command\"\"\"\n\n console.print(\n f\"> The following [{process_color}]{process}[/{process_color}] command will be executed:\\r\"\n )\n console.print(f\"[{self.colors['ok']}]{' '.join(command)}[/{self.colors['ok']}]\")\n console.print(\n f\"\\r> [{process_color}]{process}[/{process_color}] [{self.colors['busy']}]running...[/\"\n f\"{self.colors['busy']}]\",\n end=\"\\r\",\n )\n\n response = sp.run(command, stdout=sp.PIPE, stderr=sp.PIPE, check=False)\n return_code = response.returncode\n if return_code == 0:\n console.print(\n f\"> [{process_color}]{process}[/{process_color}] [{self.colors['ok']}]completed[/\"\n f\"{self.colors['ok']}]!\\r\\n\"\n )\n return response\n\n if command[0] not in self.process_exceptions:\n exception = self.process_exceptions[\"other\"]\n else:\n exception = self.process_exceptions[command[0]]\n\n message_exception = response.stdout.decode(\"utf-8\")\n if bool(\n response.stderr.decode(\"utf-8\")\n and not response.stderr.decode(\"utf-8\").isspace()\n ):\n message_exception = response.stderr.decode(\"utf-8\")\n\n raise exception(\n f\"Process returned exit code `{return_code}`.\\r\\n\\r\\n{message_exception}\"\n )\n","repo_name":"ToshY/mkvextract-subs","sub_path":"src/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24774902784","text":"import turtle\nimport math\nimport random\nimport neat\n\n#Player Class representing a player in the game which uses turtle module two draw the player in the game map\nclass Player(turtle.Turtle):\n def __init__(self):\n turtle.Turtle.__init__(self)\n self.shape(\"square\")\n self.color(\"blue\")\n self.penup()\n self.speed(0)\n self.current_pos = (1,1)\n\n #Move the player to its desired postion which one step in the loaded game map\n def next_move(self, input, level):\n self_x = self.current_pos[0]\n self_y = self.current_pos[1]\n end = False\n\n if input == 2:\n end = self.is_blocked((self_x+1, self_y), level)\n elif input == 3:\n end = self.is_blocked((self_x, self_y+1), level)\n elif input == 1:\n end = self.is_blocked((self_x, self_y-1), level)\n elif input == 0:\n end = self.is_blocked((self_x-1, self_y), level)\n\n return end\n\n #Check if the player is blocked by an obstacle\n def is_blocked(self, next_pos, level):\n if level[next_pos[1]][next_pos[0]] == 0:\n level[self.current_pos[1]][self.current_pos[0]] = 0\n level[next_pos[1]][next_pos[0]] = 2\n self.current_pos = next_pos\n\n elif level[next_pos[1]][next_pos[0]] == 3:\n level[self.current_pos[1]][self.current_pos[0]] = 0\n level[next_pos[1]][next_pos[0]] = 2\n self.current_pos = next_pos\n\n elif level[next_pos[1]][next_pos[0]] == 1:\n return True\n\n elif level[next_pos[1]][next_pos[0]] == 4:\n return True\n\n return False\n\n #Calcuate the distance between the player and an object (treasure)\n def distance(self, other):\n a = self.current_pos[0]-other[0]\n b = self.current_pos[1]-other[1]\n distance = math.sqrt((a**2)+(b**2))\n\n return distance\n\n #return the distance in (x,y) form between the player and an object (treasure)\n def dist(self, other):\n width = self.current_pos[0]-other[0]\n height = self.current_pos[1]-other[1]\n\n return (width, height)\n","repo_name":"evansaboo/snake-neural-networks","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9447336585","text":"import re\n\nimport prometheus_client as pc\nimport prometheus_push_client as ppc\n\n\ndef make_metric_fixture(request, metric):\n if metric._name not in ppc.PUSH_REGISTRY._names_to_collectors:\n ppc.PUSH_REGISTRY.register(metric)\n\n def unregister():\n metric._metric_init()\n if metric._name in ppc.PUSH_REGISTRY._names_to_collectors:\n ppc.PUSH_REGISTRY.unregister(metric)\n request.addfinalizer(unregister)\n\n return metric\n\n\ndef collect_metrics(*metric_names, data=None):\n data = data or pc.generate_latest(ppc.PUSH_REGISTRY).decode()\n\n def only_interesting(line):\n return (\n line and\n any(\n line.startswith(m) and not line.startswith(f\"{m}_created\")\n for m in metric_names\n )\n )\n\n interesting_lines = filter(only_interesting, data.split(\"\\n\"))\n return \"\\n\".join(interesting_lines)\n\n\ndef collect_formatter(formatter_cls, *metric_names):\n fmt = formatter_cls()\n\n collected = []\n for line in fmt.iter_samples():\n line = line.decode()\n\n if metric_names:\n for mname in metric_names:\n if line.startswith(mname):\n collected.append(line)\n break\n else:\n collected.append(line)\n\n collected = filter(lambda line: \"created\" not in line, collected)\n return \"\\n\".join(collected)\n","repo_name":"gistart/prometheus-push-client","sub_path":"test/testutils.py","file_name":"testutils.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"38876883467","text":"### 시간 초과 ###\n\n'''\nopt(start, end) := start번~end번 행렬 곱할때 최소 연산 횟수\nopt(start, end) = min(opt(start, mid) + opt(mid + 1, end) + 두 덩어리 사이의 연산횟수)\n\nopt배열을 채우는 순서\n구간이 좁은 순서부터 채움\n'''\nimport sys\ninput = sys.stdin.readline\n\n# input\nN = int(input())\nmatrix = [tuple(map(int, input().split())) for _ in range(N)]\n\n# process\nopt = [[float('inf') for _ in range(N)] for _ in range(N)]\n\nfor k in range(N):\n\topt[k][k] = 0\n\nfor length in range(2, N + 1):\n\tfor start in range(N):\n\t\tend = start + length - 1\n\t\tif end >= N: continue\n\n\t\tfor mid in range(start, end):\n\t\t\topt[start][end] = min(\n\t\t\t\topt[start][mid] + opt[mid + 1][end] + matrix[start][0] * matrix[mid][1] * matrix[end][1],\n\t\t\t\topt[start][end]\n\t\t\t)\n\n# output\nprint(opt[0][N - 1])","repo_name":"WaiNaat/BOJ-Python","sub_path":"11049_v3.py","file_name":"11049_v3.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1720575300","text":"#!/usr/bin/env python3\n# coding: utf-8\n# Author : penho\n# File : predict_ner.py\n# Date : 2021-11-01\n\nimport sys\n\nsys.path.append(\"\")\nfrom ner_model.models.transformers import BertConfig\nfrom ner_model.models.bert_for_ner import BertCrfForNer\nfrom ner_model.processors.utils_ner import CNerTokenizer, get_entities\nimport torch\nfrom ner_model.processors.ner_seq import ner_processors as processors\n\n\n\ndef predict_med_ner(sentence):\n MODEL_CLASSES = {\n 'bert': (BertConfig, BertCrfForNer, CNerTokenizer)}\n config_class, model_class, tokenizer_class = MODEL_CLASSES['bert']\n\n config = config_class.from_pretrained('med_kg/ner_model/outputs/1101medselfner-finetune/bert')\n tokenizer = tokenizer_class.from_pretrained('med_kg/ner_model/outputs/1101medselfner-finetune/bert')\n model = model_class.from_pretrained('med_kg/ner_model/outputs/1101medselfner-finetune/bert', config=config)\n # model.to(args.device)\n max_seq_length = 512\n # input_ids = tokenizer.encode(\"屁股痕痒是什么毛病?\", add_special_tokens=True, max_length=512) # Batch size 1\n input_ids = tokenizer.encode(sentence, add_special_tokens=True, max_length=512)\n # print(input_ids)\n padding_length = max_seq_length - len(input_ids)\n input_mask = [1] * len(input_ids)\n input_ids += [0] * padding_length\n input_mask += [0] * padding_length\n segment_ids = [0] * len(input_ids)\n\n input_ids = torch.tensor(input_ids).unsqueeze(0)\n input_mask = torch.tensor(input_mask).unsqueeze(0)\n input_lens = torch.tensor([1])\n # print(input_ids,input_mask,input_lens)\n\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"labels\": None, 'input_lens': input_lens}\n\n outputs = model(input_ids)\n # print(outputs)\n\n logits = outputs[0]\n tags = model.crf.decode(logits, inputs['attention_mask'])\n tags = tags.squeeze(0).cpu().numpy().tolist()\n preds = tags[0][1:-1] # [CLS]XXXX[SEP]\n\n processor = processors['medselfner']()\n label_list = processor.get_labels()\n id2label = {i: label for i, label in enumerate(label_list)}\n label_entities = get_entities(preds, id2label, 'bios')\n # print(label_entities)\n if len(label_entities) != 0:\n b = label_entities[0][1]\n e = label_entities[0][2]\n output = sentence[b:e + 1] + ':' + label_entities[0][0]\n else:\n output = '无识别出mention'\n return output\n\n# 这三个实体训练数据都无,但是模型泛化能力强可以识别出来。hzp\npredict_med_ner('大腿痕痒是什么毛病?')\npredict_med_ner('胆红素偏高怎么治疗?')\npredict_med_ner('999皮炎平可以治疗什么疾病?')\n","repo_name":"pen-ho/medical_knowledge_graph_app-master","sub_path":"med_kg/ner_model/predict_ner.py","file_name":"predict_ner.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"16"} +{"seq_id":"32458516663","text":"import sys\nimport os\nsys.path.append(os.environ[\"AIRFLOW_HOME\"])\nfrom modules.config import MONGO_HOST, MONGO_PORT, ELASTICSEARCH_HOST, ELASTICSEARCH_PORT\nfrom elasticsearch import Elasticsearch, helpers\nfrom pymongo import MongoClient\n\n# MongoDB 데이터 가져오기\ndef mongoFetch(client: MongoClient, db: str, collection: str, query: dict=None):\n mydb = client[db]\n mycollection = mydb[collection]\n\n data = list(mycollection.find(query))\n\n return data\n\n# MongoDB의 데이터를 ES로 적재\ndef esLoad():\n mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT)\n es = Elasticsearch(f\"http://{ELASTICSEARCH_HOST}:{ELASTICSEARCH_PORT}\")\n db = \"JDA\"\n collection = \"wanted\"\n mongo_query = {\"validation\": True}\n\n es.delete_by_query(index=\"wanted\", body={\"query\": {\"match_all\": {}}})\n mongo_data = mongoFetch(mongo, db, collection, mongo_query)\n docs = []\n for data in mongo_data:\n doc = {\n \"_index\": \"wanted\",\n \"_id\": str(data[\"_id\"]),\n \"_source\": {\n \"id\": data[\"id\"],\n \"url\": data[\"url\"],\n \"position\": data[\"position\"],\n \"company\": data[\"company\"],\n \"contents\": data[\"contents\"],\n \"scraped_time\": data[\"scraped_time\"]\n }\n }\n docs.append(doc)\n\n helpers.bulk(es, docs)","repo_name":"mungiyo/JDA","sub_path":"airflow/modules/loaders/esLoader.py","file_name":"esLoader.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7624068037","text":"import requests\nimport bs4\nimport urllib.parse\nimport re\nimport webbrowser\n\nprint ('Available divisions')\nprint ('-------------------')\nprint ('A - Se_Asia\\nB - Europe\\nC- China\\nD - Americas\\n')\ndivDict={\n 'a':'se_asia',\n 'b':'europe',\n 'c':'china',\n 'd':'americas' \n}\nval=input(\"Enter division:\").lower()\ndivision=divDict[val]\nval2=input(\"Core? [y/n]:\").lower()\nrole=0\n\n#check if support role or not\nif (val2=='y'):\n role=1\nelse:\n role=2\n\n#gets the leaderboard data\nres=requests.get('http://www.dota2.com/webapi/ILeaderboard/GetDivisionLeaderboard/v0001?division=' +division+'&leaderboard='+str(role))\n\ndata=res.json()\n\nbest_player_se_asia=data['leaderboard'][0]['name']\n\n\n#encode best player data to a url format\nquery_string = urllib.parse.urlencode({\"search_query\" : best_player_se_asia +' full game'})\n\n\n#fetches the video and sorted by upload date\nwebsite=\"http://www.youtube.com/results?\" + query_string +'&sp=CAI%253D'\n\nwebbrowser.open_new_tab(website)","repo_name":"armanmasangkay/Dota2Scraper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42923114315","text":"from matplotlib import pyplot as plt\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.base import TransformerMixin\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nclass DenseTransformer(TransformerMixin):\n\n def fit(self, X, y=None, **fit_params):\n return self\n\n def transform(self, X, y=None, **fit_params):\n return X.todense()\n\n\ndef train(x_train, x_validation, y_train, y_validation):\n numeric_features = [\"PassengerId\", \"Pclass\", \"Age\", \"SibSp\", \"Parch\", \"Fare\"]\n numeric_transformer = Pipeline(\n steps=[(\"imputer\", SimpleImputer(strategy=\"median\")),\n (\"scaler\", StandardScaler())]\n )\n categorical_features = [\"Sex\", \"Ticket\", \"Embarked\"]\n categorical_transformer = OneHotEncoder(handle_unknown=\"ignore\")\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features)\n ]\n )\n\n models = [('LR', LogisticRegression()),\n ('LDA', Pipeline([(\"to_dense\", DenseTransformer()),\n (\"classifier\", LinearDiscriminantAnalysis())])),\n ('KNN', KNeighborsClassifier()),\n ('CART', DecisionTreeClassifier()),\n ((\"RFC\"), RandomForestClassifier()),\n ('NB', Pipeline([(\"to_dense\", DenseTransformer()),\n (\"classifier\", GaussianNB())])),\n ('SVM', SVC(gamma='auto'))]\n pipelines = []\n for model in models:\n pipeline = (model[0], Pipeline(\n steps=[(\"preprocessor\", preprocessor),\n (\"classifier\", model[1])]\n ))\n pipelines.append(pipeline)\n\n results = []\n names = []\n\n for name, pipeline in pipelines:\n k_fold = KFold(n_splits=10, random_state=1, shuffle=True)\n cv_results = cross_val_score(pipeline, x_train, y_train, cv=k_fold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n\n for name, cv_results in zip(names, results):\n print(f\"{name}: {round(cv_results.mean(), 3)} ± {round(cv_results.std(), 3)}\")\n\n plt.boxplot(results, labels=names)\n plt.title('Algorithm Comparison')\n plt.show()\n\n return x_train, y_train, x_validation, y_validation\n","repo_name":"joepcash/titanic_survival","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8479937268","text":"# -*- coding: utf-8 -*-\n\nimport os, sys\n\nsys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom spectrocrunch.process.id21_xas import processEnergySynchronized as process\n\nif __name__ == \"__main__\":\n path = os.path.dirname(os.path.abspath(__file__))\n\n specfile = \"/data/visitor/hg79/id21/spec/16050901.dat\"\n specnumbers = [[268, 269, 270], [272, 273, 274]]\n\n detectorcfg = os.path.join(path, \"testdata\", \"xrfxanes\", \"id21\", \"hg79.cfg\")\n\n destpath = os.path.join(path, \"testresults\", \"hg79\")\n destradix = \"test\"\n process(\n specfile,\n specnumbers,\n destpath,\n destradix,\n detectorcfg,\n showelement=\"Pb M\",\n sourcedir=None,\n dtcor=True,\n fastfitting=True,\n )\n","repo_name":"woutdenolf/spectrocrunch","sub_path":"scraps/xas_example.py","file_name":"xas_example.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"15618826138","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"Error messages for NC4DataStore\"\"\"\n\nimport os\nimport pytest\nfrom xsuite.backend import xstores\nimport netCDF4 as nc4\nimport xarray as xr\nimport pkg_resources\n\nfilename = pkg_resources.resource_filename('xsuite', 'data/sresa1b_ncar_ccsm3-example.nc')\n\ndef test_not_nc_dataset():\n with pytest.raises(TypeError) as err:\n xstores.NC4DataStore('Not a NC Dataset')\n assert 'ds is not a NETCDF4 dataset' in str(err)\n\n\ndef test_not_str_or_dataset():\n with pytest.raises(TypeError) as err:\n xstores.NC4DataStore(32)\n assert 'Object is neither a string nor a NETCDF4' in str(err)\n\n\ndef test_without_format():\n data = nc4.Dataset(filename)\n ds = xr.Dataset.load_store(xstores.NC4DataStore(data, format=None))\n assert isinstance(ds, xr.Dataset)\n\n\ndef test_just_filename():\n ds = xr.Dataset.load_store(xstores.NC4DataStore(filename, format=None))\n assert isinstance(ds, xr.Dataset)\n","repo_name":"ucyo/xsuite","sub_path":"tests/xstores/test_errors_nc4datastore.py","file_name":"test_errors_nc4datastore.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"70574085447","text":"import pytest\n\nfrom network.nettestlib import veth_pair\n\nfrom .netfunctestlib import NOCHK\nfrom .netfunctestlib import parametrize_switch\n\n\nNETWORK_NAME1 = 'test-network-1'\nNETWORK_NAME2 = 'test-network-2'\n\n\n@pytest.fixture\ndef veth_nics():\n with veth_pair() as nics:\n yield nics\n\n\n@parametrize_switch\ndef test_interfaces_stats(adapter, switch, veth_nics):\n NETSETUP1 = {\n NETWORK_NAME1: {\n 'bridged': False,\n 'nic': veth_nics[0],\n 'switch': switch,\n }\n }\n NETSETUP2 = {\n NETWORK_NAME2: {\n 'bridged': False,\n 'nic': veth_nics[1],\n 'switch': switch,\n }\n }\n\n with adapter.setupNetworks(NETSETUP1, {}, NOCHK):\n with adapter.setupNetworks(NETSETUP2, {}, NOCHK):\n stats = adapter.getNetworkStatistics()\n netstats = stats.get('network')\n assert netstats\n for nic in veth_nics:\n assert nic in netstats\n assert int(netstats[nic]['tx']) >= 0\n assert int(netstats[nic]['rx']) >= 0\n","repo_name":"oVirt/vdsm","sub_path":"tests/network/functional/stats_test.py","file_name":"stats_test.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"} +{"seq_id":"43600531635","text":"\nimport geopandas as gpd\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport datetime\n\ndf = gpd.read_file(r'802_HMMSup_FZ.shp')# Read in file\ndf = pd.DataFrame(df) # Make it a dataframe\ndf['MT Time'] = pd.to_datetime(df['MT Time']) # Make MT Time a datetime index\ndf = df.set_index('MT Time')\n\ndf = df.between_time('8:00:00','19:00:00') # Select only \ndf = df.reset_index()\naug_df = df[df['MT Time'].dt.month == 8] # Seperate into seperate months (for some reason?)\n\n# print(aug_df)\n\nsep_df = df[df['MT Time'].dt.month == 9]\noct_df = df[df['MT Time'].dt.month == 10]\ncombination = []# Make combination - this should be fine, looked like Leos \nall_p = range(0,3**12)\nfor i in all_p:\n Tri = []\n quotient = int(i/3)\n remainder = i % 3\n Tri.append(remainder)\n while quotient != 0:\n remainder = quotient % 3\n Tri.append(remainder)\n quotient = int(quotient/3)\n combination.append(Tri)\n\n# Again should be fine, looks like Leo's - no comments so no idea why this here \ncomb = []\nfor i in combination:\n j = i[:]\n l = len(j)\n while l != 16:\n j.append(0)\n l = l + 1\n comb.append(j)\ncomb = comb\n\ndf = df.reset_index() # Reset the index\naug_day = 31 # Get the number of days in each month\nsep_day = 30\noct_day = 31\ndef get_cum(df,days,month, month_val):\n newdf = pd.DataFrame(index = pd.date_range(df.loc[df.index[0],'MT Time'].date(), df.loc[df.index[-1],'MT Time'].date(),freq='1D')) # Make an empty dataframe\n newdf['s'] = None # make a column, s\n\n run = 0 # Running counter\n num_days = range(1,days) # list of all days in the month\n for x in num_days: # Cycle through all the days in the month\n \n print('Running day {} of {} in {}'.format(x,days, month)) # Print message\n n_df = df[df['MT Time'].dt.date == datetime.date(year = 2021,month = month_val, day = x)] # Testing df\n print(n_df.shape[0])\n if ~n_df.empty: # if test me is not empty\n# print(n_df) # print it out as a test\n if n_df.shape[0] > 11: # If the number of records is greater than 11\n print('Im wokring')\n# \n MCDA = n_df['MCDA'].to_numpy() # Pull out the MCDA and classifications, turn them into numpy arrays\n low = n_df['Low_activi'].to_numpy()\n high = n_df['High_activ'].to_numpy()\n rest = n_df['Resting'].to_numpy()\n C_r = [] # Holder lists\n C_p = []\n for i in comb[0:3**12]: # cycle through comb\n \n n = 0 # set n,s, and p to these specific values\n s = 0\n p = 1\n for j in np.arange(0,12): # cycle through 1-12, should be the number of points in each day\n# print(str(j))\n if i[j] == 0: # for zero cases, \n s = s + MCDA[n] * 0.4 # get the mcda value\n p = p * (low[n]/(low[n] + high[n] + rest[n])) # set this formula\n elif i[j] == 1: # for one cases\n s = s + MCDA[n] * 0.2 # get the mcda value\n p = p * (rest[n]/(low[n] + high[n] + rest[n]))# set this formula\n elif i[j] == 2: # for 2 cases\n s = s + MCDA[n] * 0.4 # set the mcda value value\n p = p * (high[n]/(high[n] + rest[n] + low[n])) # set this formula\n n +=1 # n goes one higher \n C_r.append(s) # append s to C_r \n C_p.append(p) # append p to C_p\n \n# print(run/(3**12))\n run += 1\n c = pd.DataFrame(data = {'C_r':C_r, 'C_p':C_p})\n \n \n c['s'] = c[\"C_r\"] * c['C_p']\n else:\n c = pd.DataFrame()\n c.loc[0,'s'] = -1\n# print(c)\n newdf.loc[pd.to_datetime(month + ' ' + str(x) + ' 2021'),'s'] = c['s'].sum()\n \n return(newdf)\noct = get_cum(oct_df, oct_day, 'October',10)\n# print(oct)\n# # print(oct)\naug = get_cum(aug_df, aug_day, 'August',8)\n# print(aug)\nsep = get_cum(sep_df, sep_day, 'September',9)\nprint(sep)\n# df = df.set_index('MT Time')\nnewdf = pd.merge(oct,aug, left_index = True, right_index = True)\nnewdf = newdf.merge(sep, left_index = True, right_index = True)\n# # df = pd.concat([oct,aug,sep])\nprint(df)\n# df = gpd.GeoDataFrame(df, geometry = 'geometry')\n# df['MT Time'] = df['MT Time'].astype('str')\nnewdf.to_csv(r'~\\802CumExp.csv')","repo_name":"cgirlamo/Animal-Classification-P50","sub_path":"cum_exp.py","file_name":"cum_exp.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17657278362","text":"\nfrom msilib.schema import Error\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.template import loader\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import redirect, render\nfrom datetime import datetime\nimport json\nfrom .models import *\nfrom functools import reduce\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nDAYS = ['Пн','Вт','Ср','Чт','Пт','Сб','Вс']\n\n \ndef is_teacher(user):\n return user.groups.filter(name='Teacher').exists()\n\n@login_required\n@user_passes_test(lambda u: u.groups.filter(name='admin').exists(),login_url='teachersDashbord')\ndef index(request):\n if request.user.is_authenticated:\n username = request.user.username\n else:\n #user = \"None\"\n return HttpResponseRedirect(reverse('forbiden', args=()))\n template = loader.get_template('crm/index.html')\n context = {\"username\":username}\n return HttpResponse(template.render(context, request))\n\ndef loginView(request):\n template = loader.get_template('crm/login.html')\n context = {}\n return HttpResponse(template.render(context, request))\n\ndef forbiden(request):\n template = loader.get_template('crm/forbidenPage.html')\n context = {}\n return HttpResponse(template.render(context, request))\n\ndef logoutView(request):\n logout(request)\n return HttpResponseRedirect(reverse('index', args=()))\n\ndef loginCheck(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request,user)\n return HttpResponseRedirect(reverse('index', args=()))\n else:\n return render(request,'crm/login.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n\n#==========================================================================================================#\n@login_required\ndef CRM_clients(request):\n if request.user.is_authenticated:\n user = request.user.username\n else:\n #user = \"None\"\n return HttpResponseRedirect(reverse('forbiden', args=()))\n data = [{'id':s.pk,\n 'name':s.name,\n 'phone':Phone.objects.filter(client = s),\n 'money':s.money\n } for s in Client.objects.all()]\n #students = Client.objects.all()\n #contacts = {s:Phone.objects.get(client = s) for s in students}\n #print(data)\n template = loader.get_template('crm/simpleClients.html')\n context = {\n 'username' : user,\n 'data':data,\n }\n return HttpResponse(template.render(context, request))\n@login_required\ndef refreshClients(request):\n clients= Client.objects.all()\n for c in clients:\n lessons = Lesson.objects.filter(clients = c)\n credit = sum([l.price for l in lessons])\n payments = Payment.objects.filter(client = c)\n debet= sum([p.value for p in payments])\n #print(c,lessons,payments)\n c.money = debet-credit\n c.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n #return HttpResponseRedirect(reverse('CRM_clients', args=()))\n@login_required\ndef add_client(request):\n try:\n name = request.POST['name']\n except (KeyError):\n return render(request,'crm/addClient.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n c = Client(name = name)\n c.save()\n return HttpResponseRedirect(reverse('CRM_clients', args=()))\n\n#=======================================================\n@login_required\ndef CRM_Lessons(request):\n if request.user.is_authenticated:\n user = request.user.username\n else:\n #user = \"None\"\n return HttpResponseRedirect(reverse('forbiden', args=()))\n month = datetime.today().month \n if 'month' in request.POST:\n try:\n month = int(request.POST['month'][-2::])\n except:\n pass\n data = Lesson.objects.filter(date__month = month)\n if 'teacher' in request.POST:\n teacher = int(request.POST['teacher'])\n if teacher >= 0:\n teacher = Teacher.objects.get(pk = teacher)\n \n data = data.filter(teacher =teacher)\n if 'group' in request.POST:\n group = int(request.POST['group'])\n if group >= 0:\n group = Group.objects.get(pk = group)\n\n data = Lesson.objects.filter(group = group)\n data = data.order_by('-date')\n groups = Group.objects.all().order_by('archive')\n teachers = Teacher.objects.all()\n template = loader.get_template('crm/simpleLessons.html')\n \n\n context = {\n 'groups':groups,\n 'teachers':teachers,\n 'lessons' : data,\n }\n return HttpResponse(template.render(context, request)) \n@login_required\ndef addLesson(request):\n students = Client.objects.all()\n teachers = Teacher.objects.all()\n template = loader.get_template('crm/lesson.html')\n context = {\n 'name':\"\",\n 'active_students' : students,\n 'active_teachers' : teachers\n }\n return HttpResponse(template.render(context, request))\n@login_required\ndef addLessonFromGroup(request,group_pk):\n group = Group.objects.get(pk = group_pk)\n students = group.clients.all()\n teachers = Teacher.objects.all()\n template = loader.get_template('crm/lesson.html')\n context = {\n 'group':group,\n 'default_teacher':group.teacher.pk,\n 'active_students' : students,\n 'active_teachers' : teachers\n }\n return HttpResponse(template.render(context, request)) \n\n@login_required\ndef lessonSuccess(request):\n try:\n name = request.POST['name']\n teacher = int(request.POST['teacher'])\n price = float(request.POST['price'])\n date = datetime.strptime(request.POST['date'],\"%Y-%m-%d\")\n students = [int(key.replace(\"student\",\"\")) for key in request.POST if key.startswith(\"student\")]\n group = int(request.POST['group'])\n except:\n return HttpResponseRedirect(reverse('addLesson', args=()))\n date = datetime.strptime(request.POST['date'],\"%Y-%m-%d\")\n active_teacher = Teacher.objects.get(pk = teacher)\n active_students = Client.objects.filter(pk__in = students)\n group = Group.objects.get(pk = group)\n l = Lesson(teacher = active_teacher,\n name = name,\n date = date,\n price = price,\n group = group)\n l.save()\n l.clients.set(active_students)\n l.save()\n return HttpResponseRedirect(reverse('CRM_Lessons', args=()))\n\n#========================================================\n@login_required\ndef CRM_payments(request):\n payments = Payment.objects.order_by('-date')\n clients = Client.objects.all()\n template = loader.get_template('crm/simplePayments.html')\n context = {\n 'payments' : payments,\n 'clients' : clients\n }\n return HttpResponse(template.render(context, request)) \n@login_required\ndef addPayment(request):\n try:\n tag = request.POST['tag']\n client = int(request.POST['client'])\n note = request.POST['note']\n date = datetime.strptime(request.POST['date'],\"%Y-%m-%d\")\n value = float(request.POST['value'])\n except (KeyError):\n return render(request,'crm/addClient.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n active_client = Client.objects.get(pk = client)\n p = Payment(client = active_client,\n date = date,\n value = value,\n note = note)\n p.save()\n if tag == \"client\":\n return HttpResponseRedirect(reverse('client_card', args=(client,)))\n else:\n return HttpResponseRedirect(reverse('CRM_payments', args=()))\n\n#========================================================\n\ndef convertSchedule(schedule:str)->list:\n schedule = json.loads(schedule.replace(\"\\'\", \"\\\"\"))\n r = []\n for day,time in schedule.items():\n r.append(DAYS[int(day)]+\" \"+time)\n return r\n\n@login_required\ndef CRM_groups(request):\n groups = Group.objects.filter(archive = False)\n active_groups = [{\n 'pk':g.pk,\n 'teacher':g.teacher,\n 'name':g.name,\n 'clients':g.clients.all,\n 'schedule':convertSchedule(g.schedule)\n } for g in groups]\n groups = Group.objects.filter(archive = True)\n archive_group = [{\n 'pk':g.pk,\n 'teacher':g.teacher,\n 'name':g.name,\n 'clients':g.clients.all,\n 'schedule':convertSchedule(g.schedule)\n } for g in groups]\n template = loader.get_template('crm/simpleGroups.html')\n context = {\n 'groups' : active_groups,\n 'archive' : archive_group,\n }\n return HttpResponse(template.render(context, request))\n@login_required\ndef addGroup(request):\n clients = Client.objects.all()\n teachers = Teacher.objects.all()\n template = loader.get_template('crm/addGroup.html')\n context = {\n 'clients' : clients,\n 'teachers':teachers\n }\n return HttpResponse(template.render(context, request))\n@login_required\ndef addGroupSuccess(request):\n try:\n name = request.POST['name']\n students = [int(c) for c in request.POST.getlist('student')]\n students = Client.objects.filter(pk__in = students)\n teacher = Teacher.objects.get(pk = request.POST['teacher'])\n schedule = {}\n for day in request.POST.getlist('day'):\n schedule[day] = request.POST[f\"day{day}time\"]\n except:\n return render(request,'crm/addGroup.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n else:\n g = Group(name = name,\n schedule = str(schedule),\n teacher = teacher)\n g.save()\n g.clients.set(students)\n g.save()\n return HttpResponseRedirect(reverse('CRM_groups', args=()))\n\n#========================================================\ndef checkTime(schedule,day)->str:\n schedule = json.loads(schedule.replace(\"\\'\", \"\\\"\"))\n if day in schedule:\n return schedule[day]\n else:\n return False\n\n@login_required\ndef CRM_dashboard(request):\n all_groups = Group.objects.filter(archive = False)\n today = str(datetime.today().weekday())\n groups = [g for g in all_groups if checkTime(g.schedule,today)]\n data = []\n for g in groups:\n data.extend([{'id':s.pk,\n 'name':s.name,\n 'phone':Phone.objects.filter(client = s),\n 'money':s.money,\n 'message':f\"Зравствуйте, напоминаем что завтра в {checkTime(g.schedule,today)} занятие по {g.name}. Будем вас ждать\"\n } for s in g.clients.all()])\n active_groups = [{\n 'pk':g.pk,\n 'teacher':g.teacher,\n 'name':g.name,\n 'clients':g.clients.all,\n 'schedule':convertSchedule(g.schedule)\n } for g in groups]\n today = datetime.today().weekday()\n today = today+1 if today <6 else 0\n groups = [g for g in all_groups if checkTime(g.schedule,str(today))]\n tomorrow_data = []\n for g in groups:\n tomorrow_data.extend([{'id':s.pk,\n 'name':s.name,\n 'phone':Phone.objects.filter(client = s),\n 'money':s.money,\n 'message':f\"Зравствуйте, напоминаем что завтра в {checkTime(g.schedule,today)} занятие по {g.name}. Будем вас ждать\"\n } for s in g.clients.all()])\n tomorrow_active_groups = [{\n 'pk':g.pk,\n 'teacher':g.teacher,\n 'name':g.name,\n 'clients':g.clients.all,\n 'schedule':convertSchedule(g.schedule)\n } for g in groups] \n template = loader.get_template('crm/simpleDashboard.html')\n context = {\n 'groups' : active_groups,\n 'clients' : data,\n 'tomorrow_groups' : tomorrow_active_groups,\n 'tomorrow_clients' : tomorrow_data, \n }\n return HttpResponse(template.render(context, request))\n\n#========================================================\n@login_required\ndef client_card(request, client_id,error_message = None):\n client = Client.objects.get(pk=client_id)\n payments = Payment.objects.filter(client=client)\n group = Group.objects.filter(clients=client)\n phone = Phone.objects.filter(client=client)\n data = Lesson.objects.filter(clients=client)\n template = loader.get_template('crm/clientCard.html')\n context = {\n 'client' : client,\n 'payments':payments,\n 'group': group,\n 'phone': phone,\n 'lessons': data,\n 'error_message':error_message\n\n }\n return HttpResponse(template.render(context, request))\nclass phoneError(Exception):\n def __init__(self, message):\n self.message = message\n def __str__(self):\n return f\"[ERROR]{self.message}\\n\"\n\ndef fixPhone(phone:str)->int:\n remove = \"() -\"\n for c in remove:\n phone = phone.replace(c,\"\")\n if len(phone) not in (10,11) :\n raise phoneError(\"Неверный номер!\")\n elif len(phone) == 10 :\n phone = \"8\" + phone\n return int(phone)\n elif len(phone) == 11 :\n phone = \"8\" + phone[1::]\n return int(phone)\n else:\n pass\n\n@login_required\ndef addPhone(request):\n try:\n client = int(request.POST['client'])\n note = request.POST['note']\n phone = fixPhone(request.POST['phone'])\n except (KeyError):\n return render(request,'crm/clientCard.html',{\n 'error_message': \"Что-то пошло не так, причем серьезно!\"\n })\n except phoneError as e:\n return redirect(\"client_card\",client_id = client,error_message = e.message)\n active_client = Client.objects.get(pk = client)\n p = Phone(client = active_client,\n phone = phone,\n note = note)\n p.save()\n return HttpResponseRedirect(reverse('client_card', args=(client,)))\n@login_required\ndef editName(request):\n try:\n client = int(request.POST['client'])\n name = request.POST['name']\n except (KeyError):\n return render(request,'crm/clientCard.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n clients = Client.objects.get(pk = client)\n clients.name = name\n clients.save()\n return HttpResponseRedirect(reverse('client_card', args=(client,)))\n\n# def deletePhone(request):\n# try:\n# print(request.POST)\n# client = int(request.POST['client'])\n# note = request.POST['note']\n# phone = int(request.POST['phone'])\n# except (KeyError):\n# return render(request,'crm/clientCard.html',{\n# 'error_message': \"You didn't select a choice.\"\n# })\n# active_client = Client.objects.get(pk = client)\n# p = Phone(client = active_client,\n# phone = phone,\n# note = note)\n# p.delete()\n# context = {\n# 'phone': p,\n# }\n# return HttpResponseRedirect(reverse('client_card', args=(client, context)))\n\n\n#========================================================\n@login_required\ndef editLesson(request, lesson_id):\n lesson = Lesson.objects.get(pk=lesson_id)\n clients = Client.objects.all()\n \n context = {\n 'lessons': lesson,\n 'clients': clients,\n }\n template = loader.get_template('crm/editLesson.html')\n\n return HttpResponse(template.render(context, request))\n\n@login_required \ndef editPriceLessons(request):\n try:\n lesson = int(request.POST['lesson'])\n price = int(request.POST['price'])\n except (KeyError):\n return render(request,'crm/editLesson.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n lessons = Lesson.objects.get(pk=lesson)\n lessons.price = price\n lessons.save()\n return HttpResponseRedirect(reverse('editLesson', args=(lesson,)))\n\n@login_required\ndef deleteLessonClient(request, client_id, lesson_id):\n client = Client.objects.get(pk=client_id)\n lesson = Lesson.objects.get(pk=lesson_id)\n lesson.clients.remove(client)\n lesson.save()\n return HttpResponseRedirect(reverse('editLesson', args=(lesson_id,)))\n\n@login_required \ndef addClientToLesson(request):\n try:\n client = request.POST['client']\n lesson = int(request.POST['lesson'])\n except (KeyError):\n return render(request,'crm/editLesson.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n\n new_client = Client.objects.get(pk = client)\n lesson = Lesson.objects.get(pk = lesson)\n lesson.clients.add(new_client)\n lesson.save()\n return HttpResponseRedirect(reverse('editLesson', args=(lesson.pk,)))\n \n \n\n#========================================================\n@login_required\ndef editGroup(request, group_id):\n group = Group.objects.get(pk=group_id)\n clients = Client.objects.all()\n active_group = {\n 'pk':group.pk,\n 'teacher':group.teacher,\n 'name':group.name,\n 'clients':group.clients.all,\n 'schedule':convertSchedule(group.schedule),\n 'archive':group.archive,\n }\n\n context = {\n 'group': active_group,\n 'clients' : clients,\n }\n template = loader.get_template('crm/editGroup.html')\n\n return HttpResponse(template.render(context, request))\n@login_required\ndef addClientToGroup(request):\n try:\n client = request.POST['client']\n group = int(request.POST['group'])\n except (KeyError):\n return render(request,'crm/editLesson.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n new_client = Client.objects.get(pk = client)\n group = Group.objects.get(pk = group)\n group.clients.add(new_client)\n group.save()\n return HttpResponseRedirect(reverse('editGroup', args=(group.pk,)))\n \n@login_required\ndef deleteGroupClient(request, client_id, group_id):\n client = Client.objects.get(pk=client_id) \n group = Group.objects.get(pk=group_id)\n group.clients.remove(client)\n group.save()\n return HttpResponseRedirect(reverse('editGroup', args=(group_id,)))\n@login_required\ndef editGroupData(request):\n try:\n group = int(request.POST['group'])\n schedule = {}\n for day in request.POST.getlist('day'):\n schedule[day] = request.POST[f\"day{day}time\"]\n except:\n return render(request,'crm/editGroup.html',{\n 'error_message': \"You didn't select a choice.\"\n })\n else:\n if not schedule:\n return HttpResponseRedirect(reverse('editGroup', args=(group,))) \n else:\n for i in schedule:\n if schedule[i] == \"\":\n return HttpResponseRedirect(reverse('editGroup', args=(group,))) \n g = Group.objects.get(pk=group)\n g.schedule = str(schedule)\n g.save() \n return HttpResponseRedirect(reverse('editGroup', args=(group,)))\n@login_required\ndef switch_archive_group(request,group_id):\n g = Group.objects.get(pk=group_id)\n g.archive = not g.archive\n g.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n#====================================================================\n@login_required\n@user_passes_test(lambda u: u.groups.filter(name='admin').exists(),login_url='teachersDashbord')\ndef mountlyReport(request):\n pay = 130\n if 'month' in request.POST:\n today = int(request.POST['month'][-2::])\n else:\n today = datetime.today().month\n active_lessons = Lesson.objects.filter(\n date__year=2022,\n date__month = today)\n totalLessonsPeopleCount = sum([l.clients.all().count() for l in active_lessons])\n totalLessonsMoneyCount = sum([l.clients.all().count()*l.price for l in active_lessons])\n active_payments = Payment.objects.filter( \n date__year=2022,\n date__month = today)\n totalPayAll = sum([p.value for p in active_payments])\n totalPayDiff = totalPayAll-totalLessonsMoneyCount\n active_clients = Client.objects.filter(money__lte = -1)\n teacher_data = []\n totalHighRoll = 0\n for t in Teacher.objects.all():\n t_lessons = active_lessons.filter(teacher = t).filter(date__month = today)\n hourCount = sum([l.clients.all().count() for l in t_lessons])\n totalPay = sum([l.clients.all().count()*l.price for l in t_lessons])\n payday = sum([l.clients.all().count()*pay for l in t_lessons])\n totalHighRoll+=totalPay - payday\n data = {'name':t.name,\n 'lessonCount':t_lessons.count,\n 'hourCount':hourCount,\n 'eff':hourCount/t_lessons.count() if t_lessons.count()>0 else 0,\n 'totalPay':totalPay,\n 'payday':payday,\n 'highroll':totalPay - payday,\n 'lessonHystory':t_lessons\n }\n teacher_data.append(data)\n\n \n template = loader.get_template('crm/report.html')\n context = {\n 'totalLessonsCount' : active_lessons.count,\n 'totalLessonsPeopleCount' : totalLessonsPeopleCount,\n 'totalLessonsMoneyCount' : totalLessonsMoneyCount,\n 'totalPayCount' : active_payments.count,\n 'totalPay' : totalPayAll,\n 'totalPayDiff' : totalPayDiff,\n 'clients' : active_clients,\n 'teacher_data':teacher_data,\n 'totalHighRoll':totalHighRoll\n }\n return HttpResponse(template.render(context, request))\n\n#==============================================================================\n@login_required\ndef teachersDashbord(request):\n if request.user.is_authenticated:\n username = request.user.username\n else:\n #user = \"None\"\n return HttpResponseRedirect(reverse('forbiden', args=()))\n \n # groups = Group.objects.all()\n\n # groups = [g for g in groups if checkTime(g.schedule)]\n # active_groups = [{\n # 'pk':g.pk,\n # 'teacher':g.teacher,\n # 'name':g.name,\n # 'clients':g.clients.all,\n # 'schedule':convertSchedule(g.schedule)\n # } for g in groups]\n template = loader.get_template('crm/teachersDashboard.html')\n context ={\n \"username\":username,\n # 'groups' : active_groups,\n }\n return HttpResponse(template.render(context, request))","repo_name":"Nikonoff-N/NewCRM","sub_path":"newCRM/CRM/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21843881381","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n 【简介】\n 部件中的信号槽通信示例\n\n\n\"\"\"\n\n\nfrom PyQt5.QtWidgets import QMainWindow,QHBoxLayout, QPushButton , QApplication, QWidget \nimport sys \n\nclass WinForm(QMainWindow): \n\t\n\tdef __init__(self, parent=None): \n\t\tsuper(WinForm, self).__init__(parent)\n\t\tself.setWindowTitle('控件中的信号槽通信')\n \n\t\tself.button1 = QPushButton('Button 1') \n\t\t# \n\t\tself.button1.clicked.connect(self.onButtonClick) \n \n\t\tlayout = QHBoxLayout() \n\t\tlayout.addWidget(self.button1) \n \n\t\tmain_frame = QWidget() \n\t\tmain_frame.setLayout(layout) \n\t\tself.setCentralWidget(main_frame) \n \n\tdef onButtonClick(self ): \n #sender 是发送信号的对象\n\t\tsender = self.sender() \n\t\tprint( sender.text() + ' 被按下了' ) \n\t\t\n \nif __name__ == \"__main__\": \n\tapp = QApplication(sys.argv) \n\tform = WinForm() \n\tform.show() \n\tsys.exit(app.exec_())\n","repo_name":"cxinping/PyQt5","sub_path":"Chapter07/qt07_winSignalSlot02.py","file_name":"qt07_winSignalSlot02.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":2140,"dataset":"github-code","pt":"16"} +{"seq_id":"8621307431","text":"from collections import defaultdict\nfrom copy import deepcopy\ndef fish_move(x, y, d):\n global Map, taste, sx, sy, steps\n for i in range(8):\n nd = (d - i)%8\n dx, dy = steps[nd]\n nx, ny = x + dx, y + dy\n if 0 <= nx < 4 and 0 <= ny < 4 and taste[nx][ny] == 0:\n if nx != sx or ny != sy:\n return nx, ny, nd\n return x, y, d\n\ndef fishs_move():\n global Map, taste, sx, sy, steps\n New = [[[0] * 8 for _ in range(4)] for _ in range(4)]\n for i in range(4):\n for j in range(4):\n for d in range(8):\n if Map[i][j][d]:\n x, y, nd = fish_move(i, j, d)\n New[x][y][nd] += Map[i][j][d]\n Map = New\ndef shark_moves():\n global step_shark, Map, sx, sy, taste\n \"\"\"\n 이부분을 1로두면 상어가 먹지않더라도 움직이는 경우 제어 못함\n 따라서 -1로 두어서 먹이0 제일우선순위 자리 구하는 경우 생각해야함\n \"\"\"\n Max_cnt = -1\n traked = set()\n for a in range(4):\n for b in range(4):\n for c in range(4):\n cnt = 0\n visit = set()\n x, y = sx, sy\n dx, dy = step_shark[a]\n nx, ny = x + dx, y + dy\n if 0 <= nx < 4 and 0 <= ny < 4:\n visit.add((nx, ny))\n x, y = nx, ny\n else:\n continue\n dx, dy = step_shark[b]\n nx, ny = x + dx, y + dy\n if 0 <= nx < 4 and 0 <= ny < 4:\n visit.add((nx, ny))\n x, y = nx, ny\n else:\n continue\n dx, dy = step_shark[c]\n nx, ny = x + dx, y + dy\n if 0 <= nx < 4 and 0 <= ny < 4:\n visit.add((nx, ny))\n x, y = nx, ny\n else:\n continue\n for x, y in visit:\n cnt += sum(Map[x][y])\n if cnt > Max_cnt:\n Max_cnt = cnt\n traked = [a, b, c]\n x, y = sx, sy\n for d in traked:\n dx, dy = step_shark[d]\n x, y = x + dx, y + dy\n if sum(Map[x][y]):\n taste[x][y] = 3\n Map[x][y] = [0]*8\n return x, y\n\n\nsteps = [(0,-1),(-1, -1),(-1, 0),(-1, 1),(0, 1),(1, 1),(1, 0),(1,-1)]\nstep_shark = [(-1, 0),(0, -1),(1, 0),(0, 1)]\nM, S = map(int, input().split())\nMap = [[[0]*8 for _ in range(4)] for _ in range(4)]\ntaste = [[0]*4 for _ in range(4)]\nfor fish_cnt in range(M):\n x, y, d = map(int, input().split())\n Map[x-1][y-1][d-1] += 1\nsx, sy = map(int, input().split())\nsx -= 1\nsy -= 1\ncnt = 0\nwhile cnt < S:\n cnt += 1\n initail = deepcopy(Map)\n # 물고기 이동\n fishs_move()\n # 상어 이동\n sx, sy = shark_moves()\n # 냄새 -1\n for i in range(4):\n for j in range(4):\n if taste[i][j] != 0:\n taste[i][j] -= 1\n # 물고기 복사\n for i in range(4):\n for j in range(4):\n for d in range(8):\n Map[i][j][d] += initail[i][j][d]\ncnt = 0\nfor i in range(4):\n for j in range(4):\n cnt += sum(Map[i][j])\nprint(cnt)","repo_name":"leejongcheal/baekjoon_course_coding","sub_path":"삼성기출/21하 마법사 상어와 복제 23290.py","file_name":"21하 마법사 상어와 복제 23290.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11892866701","text":"# Study on eliminating unnecessary work\r\n# From pg 68 Gayle Laakman McDowell's \"Cracking the Coding Interview\"\r\n# created 11.10.2017 by CB Fay\r\n \r\n# O(n^3)\r\ndef printPairs(n):\r\n\t\"\"\"Print values of a,b,c,d where (a**3 + b**3 = c**3 + d**3).\"\"\"\r\n\tnum = 0\r\n\tfor a in range(1,n):\r\n\t\tfor b in range(1,n):\r\n\t\t\tfor c in range(1,n):\r\n\t\t\t\tA = a**3;\r\n\t\t\t\tB = b**3;\r\n\t\t\t\tC = c**3;\r\n\t\t\t\tif (A + B - C) > 0: # this is necessary to avoid complex numbers\r\n\t\t\t\t\td = int((A + B - C)**(1/3)) # there i only one valid d value for each (a,b,c)\r\n\t\t\t\telse: continue\r\n\t\t\t\tif a**3 + b**3 == c**3 + d**3 and d <= n:\r\n\t\t\t\t\tnum += 1\r\n\t\t\t\t\tprint(num, end=\"\\t\")\r\n\t\t\t\t\tprint(\"{},{},{},{}\".format(a,b,c,d))\r\n\t\t\t\t\r\n# O(n^2)\r\ndef printPairs2(n):\r\n\t\"\"\"Print values of a,b,c,d where (a**3 + b**3 = c**3 + d**3).\"\"\"\r\n\tmap = {} # a dictionary that holds all of the possible c**3 + d**3 combinations\r\n\tnum = 0 # a count of solutions found\r\n\tfor c in range(1,n):\r\n\t\tfor d in range(1,n):\r\n\t\t\tresult = c**3 + d**3; \r\n\t\t\tmap[result] = c, d # appends the pair (c, d) at key result\r\n\tfor a in range(1,n):\r\n\t\tfor b in range (1,n):\r\n\t\t\tresult = a**3 + b**3\r\n\t\t\tlist = [] # create an empty list\r\n\t\t\tlist.append(map[result]) # add all of the pair values at key result to the list\r\n\t\t\tfor pair in list: # print each element in the list. These are necessarily correct values of a and b\r\n\t\t\t\tnum += 1 \r\n\t\t\t\tprint(num, end=\"\\t\")\r\n\t\t\t\tprint(a, b, pair[0], pair[1], end=\" \")\r\n\t\t\t\tprint(\"\\t= {}\".format(a**3 + b**3))\r\n\r\n# O(n^2)\r\ndef printPairs3(n):\r\n\t\"\"\"Print values of a,b,c,d where (a**3 + b**3 = c**3 + d**3).\"\"\"\r\n\tmap = {} # a dictionary that holds all of the possible c**3 + d**3 combinations\r\n\tnum = 0 # the count of combinations of a,b,c,d values that make the statement true\r\n\tfor c in range(1,n): # start at 1, end with 999\r\n\t\tfor d in range(1,n):\r\n\t\t\tresult = c**3 + d**3\r\n\t\t\tmap.setdefault(result, []) # check for the key result, and if it doesn't exist, calling it will return an empty list.\r\n\t\t\tmap[result].append((c,d)) # appends a tuple (c,d) to the end of the single list held as a value to the key result\r\n\t\t\t\r\n\tfor keys in map: # for every possible outcome of c**3 + d**3...\r\n\t\tfor pair1 in map[keys]: # each value (list of tuples) in map gets assigned to pair1\r\n\t\t\tfor pair2 in map[keys]: # each value (list of tuples) in map gets assigned to pair2\r\n\t\t\t\tnum += 1 # increment the count\r\n\t\t\t\tprint(pair1, pair2)\r\n\t\t\t\t\r\n\t\t\t\t# print(num, end = \"\\t\")\r\n\t\t\t\t# print(pair1, pair2, end = \"\\t\")\r\n\t\t\t\t# print(\"a**3 + b**3 =\",end = \" \")\r\n\t\t\t\t# print(pair1[0]**3 + pair1[1]**3)\r\n\t\t\t\t\r\ndef main():\r\n\tprintPairs3(1000)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain() \r\n\t\t\t","repo_name":"craigfay/Python_Study","sub_path":"Python 0026 - printPairs/printPairs.py","file_name":"printPairs.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20725013635","text":"def factorial(n):\n dp = [1 for i in range(n + 1)]\n for i in range(2, n + 1):\n dp[i] = dp[i - 1] * i\n return dp\ndef solution(n, k):\n answer = []\n dp = factorial(n)\n visited = [0 for _ in range(n)]\n for i in range(n - 1, -1, -1):\n a = (k - 1) // dp[i] + 1\n count = 0\n for j in range(n):\n if visited[j] == 1:\n continue\n else:\n count += 1\n if count == a:\n visited[j] = 1\n answer.append(j + 1)\n break\n b = (k - 1) % dp[i] + 1\n k = b\n return answer","repo_name":"Angela-OH/Algorithm","sub_path":"프로그래머스/Lv.2/12936.py","file_name":"12936.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22473744884","text":"import unittest\nfrom typing import List\n\ndata = (\n ([1, 3, 5, 6], 5, 2),\n ([1, 3, 5, 6], 2, 1),\n ([1, 3, 5, 6], 7, 4),\n ([1, 3, 5, 6], 0, 0),\n ([1], 1, 0),\n ([1, 3], 0, 0),\n)\n\n\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n left_index = 0\n right_index = len(nums)\n while left_index < right_index:\n median_index = left_index + (right_index - left_index) // 2\n median_element = nums[median_index]\n if median_element >= target:\n right_index = median_index\n continue\n left_index = median_index + 1\n return left_index\n\n\nclass TestCase(unittest.TestCase):\n def test_solution(self):\n s = Solution()\n\n for input_data, target, result in data:\n self.assertEqual(result, s.searchInsert(input_data, target))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cybernextgen/leetcode","sub_path":"easy/35-search-insert-position.py","file_name":"35-search-insert-position.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5355803453","text":"\nfrom gcp_helpers.storage import Storage\nfrom gcp_helpers.bigquery import BigQuery\nfrom gcp_helpers.logger import Logger\nfrom config import raw_data_output_dir, project_id, dataset_id, bucket_name\nfrom pathlib import Path\nfrom utils.parallel import parallelize_threads\n\n# setup logger\nlogger = Logger(project_id).logger\n\n\ndef load_to_BQ(table_id):\n \"\"\"\n Upload data to GCS, then load into BQ\n\n Parameters\n ----------\n table_id : str\n Name of table to load data into\n \"\"\"\n\n # upload data to GCS\n s = Storage(project_id=project_id, bucket_name=bucket_name, logger=logger)\n local_src_path = Path(raw_data_output_dir) / f\"{table_id}.json\"\n gcs_dest_path = f'raw_data/{table_id}.json'\n s.upload_file(local_src_path, gcs_dest_path)\n\n # instantiate BigQuery API helper\n t = BigQuery(project_id=project_id,\n dataset_id=dataset_id,\n table_id=table_id,\n schema_json_path=f'./gen_features/schemas/{table_id}.json',\n logger=logger)\n\n # create dataset if it doesn't exist\n if not t.dataset_exists():\n t.create_dataset()\n\n # move data from GCS to BQ\n gcs_uri = f\"gs://{bucket_name}/{gcs_dest_path}\"\n t.load_from_gcs(gcs_uri, source_format='JSON')\n\n\ndef run():\n\n # Upload data to GCS, then load into BQ, in parallel\n params = [\n {'table_id': 'events'},\n {'table_id': 'customer_info'},\n ]\n parallelize_threads(load_to_BQ, params)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"sthobbs/fraud-model-dev","sub_path":"gen_features/upload_raw_data.py","file_name":"upload_raw_data.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"770152571","text":"import socket\nimport sys, select\nimport time\n\ndef stopwatch(seconds):\n start = time.time()\n time.clock() \n elapsed = 0\n while elapsed < seconds:\n elapsed = time.time() - start\n print (\"Waiting\",seconds,\"...\")\n time.sleep(0.5)\n\ndef getLine():\n\ti,o,e = select.select([sys.stdin],[],[],0.0001)\n\tfor s in i:\n\t\tif s == sys.stdin:\n\t\t\tinput = sys.stdin.readline()\n\t\t\treturn input\n\treturn False\n\naddress = \"\"\nmessage = \"\"\nserverHost = \"192.168.1.255\"\nserverPort = 8080\nplayerCount = 0\n \nserverAddr = (serverHost, serverPort)\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Make Socket Reusable\nserverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # Allow incoming broadcasts\nserverSocket.setblocking(False) # Set socket to non-blocking mode\nserverSocket.bind(('', serverPort)) #Accept Connections on port\nprint (\"Connected to\", serverPort)\n\nsetupMsg = \"hi\"\n\n\nserverSocket.sendto(setupMsg.encode('utf-8'), serverAddr)\n\ntry:\n\t(data, address) = serverSocket.recvfrom(8192)\nexcept:\n\tprint(\"No response from server, trying again...\")\n\tpass\n\t\nrecv = data.decode('utf-8').split()\ngamePort = recv[1]\nmyId = recv[0]\n\ngameAddr = (serverHost, gamePort)\ngameSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ngameSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Make Socket Reusable\ngameSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # Allow incoming broadcasts\ngameSocket.setblocking(False) # Set socket to non-blocking mode\ngameSocket.bind(('', gamePort)) #Accept Connections on port\nprint (\"Connecting to\", gamePort)\n\n\nprint(\"Connected to game port\", gamePort, \"with ID\", myId)\n\nwhile 1:\n\twhile 1:\n\t\tgameSocket.sendto((str(playerCount)).encode('utf-8'), gameAddr)\n\t\tgameSocket.setblocking(True) \n\t\tmessage, address = gameSocket.recvfrom(8192)\n\n\t\tif message.decode('utf-8') == \"0\":\n\t\t\tplayerCount += 1\n\t\t\tbreak\n\t\telif message:\n\t\t\tplayerCount = int(message.decode('utf-8'))\n\t\telse:\n\t\t\tstopwatch(2)\n\t\t\tprint(\"Waiting for players...\")\n\twhile 1:\n\t\tprint(\"Starting game...\")\n\t\tprint(\"Input a number between 1 and 10: \")\n\n\t\twhile(True):\n\t\t\tinput = getLine()\n\n\t\t\tif not (1 <= int(input) <= 10):\n\t\t\t\tprint(\"Please input a number between 1 and 10\")\n\t\t\t\tinput = getLine()\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tresults[myId] = input\n\t\tgameSocket.sendto((myId,\" \",input).encode('utf-8'), gameAddr)\n\t\tprint(myId, \":\", input)\n\n\t\ttry:\n\t\t\tgameSocket.settimeout(2)\n\t\t\tfor x in range(playerCount):\n\t\t\t\tmessage, address = gameSocket.recvfrom(8192)\n\n\t\t\t\tif message.decode('utf-8') == \"0\":\n\t\t\t\t\tplayerCount += 1\n\t\t\t\t\tgameSocket.sendto(playerCount.encode('utf-8'), gameAddr)\n\t\t\t\t\tx -= 1\n\t\t\t\telif int(message.decode('utf-8')) == playerCount:\n\t\t\t\t\tx -= 1\n\n\t\t\t\trecv = message.decode(utf-8).split()\n\t\t\t\tprint(recv[0],\":\",recv[1])\n\t\t\t\tresults[recv[0]]=int(recv[1])\n\n\t\t\tprint(\"Starting counting from player 0\")\n\t\t\tresult = sum(recv.values())\n\t\t\twinner = (sum % (playerCount+1))\n\t\t\tprint(\"Player\", winner, \"wins!\")\n\t\t\tprint(\"Starting a new game...\")\n\n\t\texcept socket.timeout:\n\t\t\tprint(\"A player disconnected\")\n\t\t\tplayerCount -= 1\n\t\t\tif playerCount>=2:\n\t\t\t\tprint(\"Restarting game...\")\n\t\t\telse:\n\t\t\t\tprint(\"Not enough players for a match, finding new game...\")\n\t\t\t\tgameSocket.close()\n\t\t\t\ttry:\n\t\t\t\t\t(data, address) = serverSocket.recvfrom(8192)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"No response from server, trying again...\")\n\t\t\t\t\tpass\n\t\t\t\t\t\n\t\t\t\trecv = data.decode('utf-8').split()\n\t\t\t\tgamePort = recv[1]\n\t\t\t\tmyId = recv[0]\n\n\t\t\t\tgameAddr = (serverHost, gamePort)\n\t\t\t\tgameSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\t\tgameSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Make Socket Reusable\n\t\t\t\tgameSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # Allow incoming broadcasts\n\t\t\t\tgameSocket.setblocking(False) # Set socket to non-blocking mode\n\t\t\t\tgameSocket.bind(('', gamePort)) #Accept Connections on port\n\t\t\t\tprint(\"Connecting to\", gamePort)\n\t\t\t\tprint(\"Connected to game port\", gamePort, \"with ID\", myId)\n\t\t\t\tbreak","repo_name":"rbv3/ProjetoInfraCom","sub_path":"Q3/p2p.py","file_name":"p2p.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14445902564","text":"import pandas as pd\nimport re\n\n# Read the CSV file\ndf = pd.read_csv('data/schools.csv')\n\n# Change column titles to lowercase\ndf.columns = [col.lower() for col in df.columns]\n\n# Define a function to convert time to timestamp form\ndef time_to_timestamp(time_str):\n time_ranges = re.findall(r'(\\d{1,2}:\\d{2})\\s*-\\s*(\\d{1,2}:\\d{2})', time_str)\n time_timestamps = []\n for start, end in time_ranges:\n start_timestamp = pd.Timestamp(start).strftime('%H:%M')\n end_timestamp = pd.Timestamp(end).strftime('%H:%M')\n time_timestamps.append(f\"{start_timestamp}-{end_timestamp}\")\n return ','.join(list(set(time_timestamps)))\n\n# Apply the conversion to each time column\ntime_columns = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday']\nfor col in time_columns:\n df[col] = df[col].apply(lambda x: time_to_timestamp(x.strip('[]')) if isinstance(x, str) else x)\n\n# Save the modified CSV file\ndf.to_csv('data/modified_schools.csv', index=False)\n","repo_name":"willblair0708/scienceolympiad","sub_path":"matching/schools.py","file_name":"schools.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70976259530","text":"# import Python packages\nimport holidays\n\n# define a temporary function\ndef is_holiday(date_col):\n usa_holidays = holidays.US()\n is_holiday = (date_col in usa_holidays)\n return is_holiday\n\n# The required model() function must return a single DataFrame\ndef model(dbt, session):\n dbt.config(\n materialized = \"table\",\n packages = [\"holidays\"]\n )\n\n orders_df = dbt.ref(\"fct_orders\")\n\n df = orders_df.to_pandas()\n\n # apply our function\n # (columns need to be in uppercase on Snowpark)\n df[\"IS_HOLIDAY\"] = df[\"ORDER_DATE\"].apply(is_holiday)\n\n # return final dataset (Pandas DataFrame)\n return df\n ","repo_name":"dbt-labs/rapid-onboarding-exemplar","sub_path":"_samples/python/py03__import_pypi_package__holiday.py","file_name":"py03__import_pypi_package__holiday.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"16"} +{"seq_id":"73409624007","text":"from __future__ import annotations\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom s3prl import Container, field\nfrom s3prl.base.logdata import Logs\nfrom s3prl.corpus.voxceleb1sv import voxceleb1_for_sv\nfrom s3prl.dataset.base import DataLoader\nfrom s3prl.dataset.speaker_verification_pipe import SpeakerVerificationPipe\nfrom s3prl.nn.speaker_model import SuperbXvector\nfrom s3prl.sampler import FixedBatchSizeBatchSampler\nfrom s3prl.task.speaker_verification_task import SpeakerVerification\nfrom s3prl.util.configuration import default_cfg\nfrom s3prl.util.workspace import Workspace\n\nfrom .base import SuperbProblem\n\nlogger = logging.getLogger(__name__)\n\n\nEFFECTS = [\n [\"channels\", \"1\"],\n [\"rate\", \"16000\"],\n [\"gain\", \"-3.0\"],\n [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n]\n\n\nclass SuperbSV(SuperbProblem):\n \"\"\"\n Superb Speaker Verification problem\n \"\"\"\n\n @default_cfg(\n **SuperbProblem.setup.default_except(\n corpus=dict(\n CLS=voxceleb1_for_sv,\n dataset_root=\"???\",\n ),\n train_datapipe=dict(\n CLS=SpeakerVerificationPipe,\n random_crop_secs=8.0,\n sox_effects=EFFECTS,\n ),\n train_sampler=dict(\n CLS=FixedBatchSizeBatchSampler,\n batch_size=10,\n shuffle=True,\n ),\n valid_datapipe=dict(\n CLS=SpeakerVerificationPipe,\n sox_effects=EFFECTS,\n ),\n valid_sampler=dict(\n CLS=FixedBatchSizeBatchSampler,\n batch_size=1,\n ),\n test_datapipe=dict(\n CLS=SpeakerVerificationPipe,\n sox_effects=EFFECTS,\n ),\n test_sampler=dict(\n CLS=FixedBatchSizeBatchSampler,\n batch_size=1,\n ),\n downstream=dict(\n CLS=SuperbXvector,\n ),\n task=dict(\n CLS=SpeakerVerification,\n loss_type=\"amsoftmax\",\n loss_cfg=dict(\n margin=0.4,\n scale=30,\n ),\n ),\n )\n )\n @classmethod\n def setup(cls, **cfg):\n \"\"\"\n This setups the ASV problem, containing train/valid/test datasets & samplers and a task object\n \"\"\"\n super().setup(**cfg)\n\n @default_cfg(\n **SuperbProblem.train.default_except(\n optimizer=dict(\n CLS=\"torch.optim.AdamW\",\n lr=1.0e-4,\n ),\n trainer=dict(\n total_steps=200000,\n log_step=500,\n eval_step=field(1e10, \"ASV do not use validation set\"),\n save_step=20000,\n gradient_clipping=1.0e3,\n gradient_accumulate_steps=5,\n valid_metric=\"eer\",\n valid_higher_better=False,\n max_keep=10,\n ),\n )\n )\n @classmethod\n def train(cls, **cfg):\n \"\"\"\n Train the setup problem with the train/valid datasets & samplers and the task object\n \"\"\"\n super().train(**cfg)\n\n @default_cfg(\n **SuperbProblem.inference.default_except(\n inference_steps=field(\n [\n 20000,\n 40000,\n 60000,\n 80000,\n 100000,\n 120000,\n 140000,\n 160000,\n 180000,\n 200000,\n ],\n \"The steps used for inference\\n\",\n \"egs: [900, 1000] - use the checkpoint of 90 and 100 steps for inference\",\n )\n )\n )\n @classmethod\n def inference(cls, **cfg):\n cfg = Container(cfg)\n\n workspace = Workspace(cfg.workspace)\n dataset = workspace[f\"{cfg.split_name}_dataset\"]\n sampler = workspace[f\"{cfg.split_name}_sampler\"]\n dataloader = DataLoader(dataset, sampler, num_workers=cfg.n_jobs)\n\n with torch.no_grad():\n all_eers = []\n for step in cfg.inference_steps:\n step_dir = workspace / f\"step-{step}\"\n task = step_dir[\"task\"]\n task = task.to(cfg.device)\n task.eval()\n\n test_results = []\n for batch_idx, batch in enumerate(\n tqdm(dataloader, desc=\"Test\", total=len(dataloader))\n ):\n batch = batch.to(cfg.device)\n result = task.test_step(**batch)\n test_results.append(result.cacheable())\n\n logs: Logs = task.test_reduction(test_results).logs\n logger.info(f\"Step {step}\")\n\n metrics = {key: value for key, value in logs.scalars()}\n step_dir.put(metrics, \"test_metrics\", \"yaml\")\n for key, value in metrics.items():\n logger.info(f\"{key}: {value}\")\n all_eers.append(metrics[\"EER\"])\n\n workspace.put({\"minEER\": min(all_eers)}, \"test_metrics\", \"yaml\")\n\n @default_cfg(\n **SuperbProblem.run.default_except(\n stages=[\"setup\", \"train\", \"inference\"],\n start_stage=\"setup\",\n final_stage=\"inference\",\n setup=setup.default_cfg.deselect(\"workspace\", \"resume\", \"dryrun\"),\n train=train.default_cfg.deselect(\"workspace\", \"resume\", \"dryrun\"),\n inference=inference.default_cfg.deselect(\"workspace\", \"resume\", \"dryrun\"),\n )\n )\n @classmethod\n def run(cls, **cfg):\n super().run(**cfg)\n","repo_name":"s3prl/s3prl","sub_path":"s3prl/problem/superb/sv.py","file_name":"sv.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","stars":1943,"dataset":"github-code","pt":"16"} +{"seq_id":"34754740919","text":"import sys\nimport os\n\nfrom typing import Dict, Tuple, List, NamedTuple\n\nimport numpy as np\n\nimport pyqtgraph as pg\n\nimport PyQt5\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.uic import *\n\nfrom seiscore import BinaryFile\nfrom seiscore import Spectrogram\n\nfrom config import ConfigFile\nfrom dbase import SqliteDbase\n\n\ndef get_lib_path() -> list:\n return [os.path.join(os.path.dirname(PyQt5.__file__), 'Qt5', 'plugins')]\n\n\nclass FormData(NamedTuple):\n filename: str\n component: str\n resample_freq: int\n min_frequency: float\n max_frequency: float\n conclusion: str\n\n\nclass MainWindow:\n def __init__(self, database: SqliteDbase):\n self.__app = QApplication(sys.argv)\n self.__window = QMainWindow()\n self.__dbase = database\n\n ui_path = 'SeisDefectViewer.ui'\n self.__ui = loadUi(ui_path, self.__window)\n self.__ui.cbFilesList.currentTextChanged.connect(self.show_signal_data)\n self.__ui.cbComponentList.currentTextChanged.connect(self.show_signal_data)\n self.__ui.bSave.clicked.connect(self.save_checking_conclusion)\n self.__ui.sbFMin.valueChanged.connect(self.set_spectrogram_y_limits)\n self.__ui.sbFMax.valueChanged.connect(self.set_spectrogram_y_limits)\n\n self.__files_info = None\n self.__spectrogram_plot = None\n self.update_lists()\n\n self.show_signal_data()\n\n self.screen_center()\n self.__window.show()\n self.__app.exec()\n\n @property\n def window(self):\n return self.__window\n\n @property\n def ui(self):\n return self.__ui\n\n @property\n def dbase(self) -> SqliteDbase:\n return self.__dbase\n\n @property\n def form_data(self) -> FormData:\n filename = self.ui.cbFilesList.currentText()\n component = self.ui.cbComponentList.currentText()\n resample_freq = self.ui.sbResampleFreq.value()\n f_min = self.ui.sbFMin.value()\n f_max = self.ui.sbFMax.value()\n conclusion = self.ui.cbConclusion.currentText()\n return FormData(filename, component, resample_freq, f_min, f_max,\n conclusion)\n\n def get_files_list(self) -> Dict[str, Tuple[int, str, List[str]]]:\n records = self.dbase.get_seismic_files_for_checking()\n transform_data = dict()\n for rec in records:\n path = rec[1]\n filename = os.path.basename(path)\n transform_data[filename] = rec\n return transform_data\n\n def screen_center(self):\n frame_geom = self.window.frameGeometry()\n screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())\n center_point = QApplication.desktop().screenGeometry(screen).center()\n frame_geom.moveCenter(center_point)\n self.window.move(frame_geom.topLeft())\n\n def set_files_list(self):\n self.ui.cbFilesList.clear()\n self.ui.cbFilesList.addItems(list(self.__files_info.keys()))\n\n def get_current_file_info(self) -> Tuple[int, str, List[str]]:\n return self.__files_info[self.form_data.filename]\n\n def set_components_list(self):\n self.ui.cbComponentList.clear()\n\n file_info = self.get_current_file_info()\n components = file_info[-1]\n self.ui.cbComponentList.addItems(components)\n\n def set_spectrogram_y_limits(self):\n if not self.__spectrogram_plot:\n return\n f_min = self.form_data.min_frequency\n f_max = self.form_data.max_frequency\n self.__spectrogram_plot.setRange(yRange=(f_min, f_max))\n\n def get_current_component(self) -> str:\n return self.form_data.component\n\n def get_signal(self) -> Tuple[np.ndarray, int]:\n _, path, _ = self.get_current_file_info()\n component = self.get_current_component()\n resample_freq = self.form_data.resample_freq\n\n bin_data = BinaryFile(path, use_avg_values=True,\n resample_frequency=resample_freq)\n return bin_data.read_signal(component), bin_data.resample_frequency\n\n def plot_signal(self, signal: np.ndarray, frequency: int):\n widget = self.ui.gSignal\n widget.clear()\n time_scale = np.arange(0, signal.shape[0], 1) / frequency\n widget.plot(time_scale, signal, pen=(255, 0, 0))\n\n def plot_spectrogram(self, signal: np.ndarray, frequency: int):\n plot = self.ui.gSpectrogram\n plot.clear()\n\n spectrogram = Spectrogram(signal, frequency)\n sp_data = spectrogram.sp_data\n\n if sp_data.frequencies.shape[0] == 0:\n return\n\n time, frequencies = sp_data.times, sp_data.frequencies\n amplitudes = sp_data.amplitudes\n\n amplitudes = 20 * np.log10(abs(amplitudes))\n amplitudes = amplitudes.T\n\n img = pg.ImageItem()\n img.setImage(amplitudes, xvals=time, yvals=frequencies)\n\n dx = (time[-1] - time[0]) / time.shape[0]\n dy = (frequencies[-1] - frequencies[0]) / frequencies.shape[0]\n img.scale(dx, dy)\n\n hist = pg.HistogramLUTItem()\n min_val, max_val = spectrogram.scale_limits()\n hist.setLevels(min_val, max_val)\n hist.gradient.restoreState(\n {'mode': 'rgb',\n 'ticks': [\n (0.0, (153, 102, 255, 255)),\n (0.2, (0, 0, 255, 255)),\n (0.4, (0, 255, 0, 255)),\n (0.6, (255, 255, 0, 255)),\n (0.8, (255, 102, 0, 255)),\n (1.0, (255, 0, 0, 255))]\n })\n hist.setImageItem(img)\n plot.addItem(img)\n self.__spectrogram_plot = plot\n self.set_spectrogram_y_limits()\n\n def show_signal_data(self):\n if not self.get_current_component():\n return\n try:\n signal, frequency = self.get_signal()\n except KeyError:\n return\n self.plot_signal(signal, frequency)\n self.plot_spectrogram(signal, frequency)\n\n def save_checking_conclusion(self):\n filename = self.ui.cbFilesList.currentText()\n component = self.ui.cbComponentList.currentText()\n conclusion = self.ui.cbConclusion.currentText()\n file_id = self.__files_info[filename][0]\n self.dbase.update_seis_file_checking_status(file_id, component, conclusion)\n self.update_lists()\n\n def update_lists(self):\n self.__files_info = self.get_files_list()\n if not self.__files_info:\n self.ui.statusBar.showMessage('Все файлы уже отбракованы')\n return\n\n self.set_files_list()\n self.set_components_list()\n\n\ndef run():\n QtCore.QCoreApplication.setLibraryPaths(get_lib_path())\n MainWindow(db)\n\n\nif __name__ == '__main__':\n # storage_folder = os.getenv('DATA_PATH')\n conf_file = '/media/michael/Data/Projects/GraviSeismicComparation' \\\n '/ZapolarnoeDeposit/2021/config.json'\n config = ConfigFile(conf_file)\n db = SqliteDbase(config.export_root)\n run()\n","repo_name":"MikkoArtik/FileSorter","sub_path":"ui/seis_defect_viewer.py","file_name":"seis_defect_viewer.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34111841580","text":"#!/usr/bin/env python3\n\nimport os\n\nimport gi\ngi.require_version('Gtk', '3.0')\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\n\nfrom src.Config import ConfigManager\nfrom src.Places import ICONS_FOLDER\nfrom src.Places import CSS_FOLDER\n\nfrom .browser.BrowserCtrl import BrowserCtrl\nfrom .tagger.TaggerCtrl import TaggerCtrl\nfrom .mover.MoverCtrl import MoverCtrl\nfrom .editor.EditorCtrl import EditorCtrl\n\nclass ServiceManager:\n\n def __init__(self, app):\n self.app = app\n\n def getApplication(self):\n return self.app\n\nclass BaseApp(Gtk.Application):\n app_id = \"fdibaldassarre.tagmanager.application\"\n name = \"Application\"\n\n def __init__(self):\n super().__init__(application_id=self.app_id)\n Gdk.set_program_class(self.name)\n self.services = ServiceManager(self)\n self._loadCssProvider()\n self._loadIcon()\n self.connect(\"activate\", self.do_activate)\n\n def _loadIcon(self):\n self.icon_path = os.path.join(ConfigManager.getOverridesFolder(), \"icon.png\")\n if not os.path.exists(self.icon_path):\n self.icon_path = os.path.join(ICONS_FOLDER, \"icon.png\")\n\n def _loadCssProvider(self):\n display = Gdk.Display.get_default()\n screen = Gdk.Display.get_default_screen(display)\n provider = Gtk.CssProvider()\n Gtk.StyleContext.add_provider_for_screen(screen, provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n # TODO: load file\n #css_file = os.path.join(CSS_FOLDER, 'TagManager.css')\n #provider.load_from_path(css_file)\n\n\nclass TagManagerApp(BaseApp):\n app_id = \"fdibaldassarre.tagmanager.browser\"\n name = \"Tag Manager\"\n\n def __init__(self):\n super().__init__()\n self.browser = BrowserCtrl(self.services)\n\n def do_activate(self, data=None):\n self.browser.start()\n\n def openTagger(self, file):\n '''\n Open the tagger for a file.\n\n :param dao.entities.common.IFile file: File to tag\n :return: Tagger controller\n :rtype: ui.tagger.TaggerCtrl\n '''\n self.tagger = TaggerCtrl(self.services, file)\n self.tagger.start()\n return self.tagger\n\n\nclass TaggerApp(BaseApp):\n app_id = \"fdibaldassarre.tagmanager.tag\"\n name = \"Tag Manager - Tag\"\n\n def __init__(self, file):\n super().__init__()\n self.tagger = TaggerCtrl(self.services, file)\n\n def do_activate(self, data=None):\n self.tagger.start()\n\nclass MoverApp(BaseApp):\n app_id = \"fdibaldassarre.tagmanager.move\"\n name = \"Tag Manager - Move\"\n\n def __init__(self, path):\n '''\n Initialize.\n\n :param str path: Path of the file to move to the root folder\n '''\n super().__init__()\n self.mover = MoverCtrl(self.services, path)\n\n def do_activate(self, data=None):\n self.mover.start()\n\n def openTagger(self, file):\n '''\n Open the tagger for a file.\n\n :param dao.entities.common.IFile file: File to tag\n :return: Tagger controller\n :rtype: ui.tagger.TaggerCtrl\n '''\n self.tagger = TaggerCtrl(self.services, file)\n self.tagger.start()\n return self.tagger\n\n\nclass EditorApp(BaseApp):\n app_id = \"fdibaldassarre.tagmanager.editor\"\n name = \"Tag Editor\"\n\n def __init__(self):\n super().__init__()\n self.editor = EditorCtrl(self.services)\n\n def do_activate(self, data=None):\n self.editor.start()\n","repo_name":"fdibaldassarre/tag-manager","sub_path":"src/ui/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2030613752","text":"from datetime import timedelta\n\ndef todelta(time):\n\tvalue = float(time[:-1])\n\tdtype = time[-1]\n\tif dtype == \"d\":\n\t\treturn timedelta(days=value)\n\tif dtype == \"h\":\n\t\treturn timedelta(hours=value)\n\tif dtype == \"m\":\n\t\treturn timedelta(minutes=value)\n\tif dtype == \"s\":\n\t\treturn timedelta(seconds=value)\n","repo_name":"BlackHawk94/Bot","sub_path":"utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13124084984","text":"from app import app\r\nfrom flask import Flask, jsonify, request, render_template, url_for, flash, redirect\r\nfrom werkzeug.utils import secure_filename\r\nimport io\r\nimport string\r\nimport time\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport sys\r\nimport pandas as pd\r\nfrom pandas_profiling import ProfileReport\r\nimport warnings\r\nfrom flask import send_file\r\n\r\n# regression files\r\nfrom reg_data_validation import data_check as reg_dc\r\nimport reg_driver as reg_driver\r\nimport regression_results as regres\r\n\r\n# classification files\r\nfrom cla_data_validation import data_check as cla_dc\r\nimport cla_driver as cla_driver\r\nfrom cla_predict import predict_function\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n# Global variables \r\nproblemType = \"\"\r\ndf = \"\"\r\ndep_col = \"\"\r\ndc_obj = \"\"\r\ndproutput = \"\"\r\ndpcoutput = \"\"\r\nbestmodel = \"\"\r\npredict_df = \"\"\r\n\r\n\r\n@app.route(\"/\")\r\ndef upload_form():\r\n return render_template(\"fileupload.html\")\r\n\r\n\r\n@app.route(\"/uploads\", methods=[\"GET\", \"POST\"])\r\ndef upload_file():\r\n global problemType, df, dc_obj\r\n if request.method == \"POST\" or request.method == \"GET\":\r\n # check if the post request has the file part\r\n if \"file\" not in request.files:\r\n flash(\"No file part\")\r\n return redirect(request.url)\r\n file = request.files[\"file\"]\r\n if file.filename == \"\":\r\n flash(\"No file selected for uploading\")\r\n return redirect(request.url)\r\n if file: # and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n filepath = os.path.join(app.config[\"UPLOAD_FOLDER\"], filename)\r\n file.save(filepath)\r\n flash(\"File successfully uploaded\")\r\n flash(\"Conducting data validation check\")\r\n \r\n if problemType ==\"Regression\":\r\n dc_obj = reg_dc(filepath)\r\n else:\r\n dc_obj = cla_dc(filepath)\r\n \r\n fileCheck = dc_obj.identify_file()\r\n if fileCheck == \"None\":\r\n flash(\"\\nFile type is supported\")\r\n else:\r\n flash(\"Error: \", fileCheck)\r\n flash(\"\\nSupported file types: - [csv, tsv, xlsx, json]\")\r\n sys.exit()\r\n\r\n df = dc_obj.file_to_dataframe()\r\n dataValidation = dc_obj.validation_check(df)\r\n if dataValidation == \"None\":\r\n flash(\"\\nData format is supported\")\r\n else:\r\n sys.exit(dataValidation)\r\n html = df.head().to_html(classes=\"table table-hover table-dark\")\r\n\r\n # write html to file\r\n text_file = open(\r\n \"/Users/vishalkundar/Downloads/Website/app/templates/index.html\", \"w\"\r\n )\r\n text_file.write(html)\r\n text_file.close()\r\n return render_template(\"fileupload.html\")\r\n\r\n\r\n@app.route(\"/display_df\", methods=[\"GET\", \"POST\"])\r\ndef display_df():\r\n if request.method == \"POST\" or request.method == \"GET\":\r\n return render_template(\"table.html\")\r\n\r\n\r\n@app.route(\"/results\", methods=[\"GET\", \"POST\"])\r\ndef display_results():\r\n global df, problemType, dc_obj, dproutput, dpcoutput, bestmodel\r\n if request.method == \"POST\" or request.method == \"GET\":\r\n dep_col = str(request.form.get(\"depvar\"))\r\n problemType = dc_obj.identify_problem(df, dep_col.strip())\r\n if problemType != \"Regression\" and problemType != \"Classification\":\r\n sys.exit(problemType)\r\n\r\n if problemType == \"Regression\":\r\n dproutput, bestmodel = reg_driver.runNoCodeML(\r\n df, dep_col\r\n )\r\n else:\r\n dpcoutput, bestmodel = cla_driver.runNoCodeML(\r\n df, dep_col\r\n )\r\n\r\n if df.shape[0] > 2000:\r\n profile = ProfileReport(df, minimal=True)\r\n else:\r\n profile = ProfileReport(df)\r\n profile.to_file(\r\n \"/Users/vishalkundar/Downloads/Website/app/templates/user_report.html\"\r\n )\r\n return render_template(\"Final_page.html\")\r\n\r\n\r\n@app.route(\"/user_report\", methods=[\"GET\", \"POST\"])\r\ndef display_user_report():\r\n return render_template(\"user_report.html\")\r\n\r\n\r\n@app.route(\"/predict\", methods=[\"GET\", \"POST\"])\r\ndef display_userReport():\r\n global problemType, dproutput, dpcoutput, bestmodel, predict_df\r\n if request.method == \"POST\" or request.method == \"GET\":\r\n # check if the post request has the file part\r\n if \"file\" not in request.files:\r\n flash(\"No file part\")\r\n return redirect(request.url)\r\n file = request.files[\"file\"]\r\n if file.filename == \"\":\r\n flash(\"No file selected for uploading\")\r\n return redirect(request.url)\r\n if file: # and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n filepath = os.path.join(app.config[\"UPLOAD_FOLDER\"], filename)\r\n file.save(filepath)\r\n flash(\"File successfully uploaded\")\r\n flash(\"Conducting data validation check\")\r\n \r\n if problemType == \"Regresion\":\r\n dc_obj = reg_dc(filepath)\r\n else:\r\n dc_obj = cla_dc(filepath)\r\n \r\n fileCheck = dc_obj.identify_file()\r\n if fileCheck == \"None\":\r\n flash(\"\\nFile type is supported\")\r\n else:\r\n flash(\"Error: \", fileCheck)\r\n flash(\"\\nSupported file types: - [csv, tsv, xlsx, json]\")\r\n sys.exit()\r\n\r\n df = dc_obj.file_to_dataframe()\r\n\r\n # Predict based on problem Type\r\n if problemType == \"Regression\":\r\n result_df = regres.predict_function(dproutput, bestmodel[0], df)\r\n \r\n else:\r\n i = 0 \r\n for key,val in bestmodel.items():\r\n if i>=1:\r\n break\r\n else:\r\n modeln = key\r\n modeld = val\r\n i+=1\r\n \r\n result_df = predict_function(dpcoutput, modeln,modeld, df)\r\n \r\n\r\n # Saving\r\n html = result_df.head().to_html(classes=\"table table-dark table-striped\")\r\n\r\n # write html to file\r\n text_file = open(\r\n \"/Users/vishalkundar/Downloads/Website/app/templates/predtable.html\",\r\n \"w\",\r\n )\r\n text_file.write(html)\r\n text_file.close()\r\n return render_template(\"predict.html\")\r\n\r\n\r\n@app.route(\"/download_results\", methods=[\"GET\", \"POST\"])\r\ndef download_results_ascsv():\r\n path = \"/Users/vishalkundar/Downloads/Website/predicted_data/results.csv\"\r\n return send_file(path, as_attachment=True)\r\n","repo_name":"vedantbarbhaya/NoCodeML","sub_path":"Website/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23953594582","text":"import sys\nimport getopt\nimport pandas as pd\nimport numpy as np\nimport random\nfrom sklearn.preprocessing import normalize\n\ndef readClusters():\n fn=open(\"data/scRNA/Filtered_clusters.txt\")\n clusters={}\n for line in fn:\n info=line.rstrip().split()\n clusters[info[0][-1]]=[int(x) for x in info[1:]]\n\n return clusters\ndef produce_mixture(out_dir, num_sample):\n\n expression_data=pd.read_excel(\"data/scRNA/formatted_expression.xlsx\")\n num_genes=expression_data.values.shape[0]\n gene_list=expression_data.loc[:,\"geneSymbol\"].values\n clusterMembers=readClusters()\n num_clusters=len(clusterMembers)\n cluster_sizes=np.zeros(num_clusters)\n for key, value in clusterMembers.items():\n cluster_sizes[int(key)]=len(value)\n #start with composistino of raw counts\n #indicates how many barcodes to select from each cluster\n compositions=np.zeros((num_sample,num_clusters),dtype=np.int)\n\n expressions=np.zeros((num_sample,num_genes))\n for i in range(num_sample):\n ingredients=[]\n for j in range(num_clusters):\n compositions[i][j]=random.randint(1,cluster_sizes[j])\n #for each sample, fetch the barcodes indices to be mixed \n sample_from_cluster=random.sample(clusterMembers[str(j)],compositions[i][j])\n ingredients.extend([expression_data.loc[:,x].values for x in sample_from_cluster]) #add this list of arrays\n #mix/average all the ingredients (expression from each barcodes selected)\n ingredients=np.array(ingredients)\n sample_expression=np.mean(ingredients,axis=0)\n expressions[i]=sample_expression\n\n #convert raw composition into ratios\n normed_composition = normalize(compositions, axis=1, norm='l1')\n\n\n #calculate ground truth\n cluster_means=np.zeros((num_clusters,num_genes))\n for j in range(num_clusters):\n all_members=clusterMembers[str(j)]\n all_member_expression=np.array([expression_data.loc[:,x].values for x in all_members])\n cluster_mean=np.mean(all_member_expression,axis=0)\n cluster_means[j]=cluster_mean\n \n #prepare output dataframs\n normed_composition=normed_composition.T #(#cluster, #sample)\n out_compos=pd.DataFrame(normed_composition, columns=[\"sample \"+str(i+1) for i in range(num_sample)])\n out_compos.insert(0,'cluster',[\"cluster\"+str(j) for j in range(num_clusters)])\n compo_fn=out_dir+\"sc_composition.xlsx\"\n\n expressions=expressions.T\n out_mixture=pd.DataFrame(expressions, columns=[\"sample \"+str(i+1) for i in range(num_sample)])\n out_mixture.insert(0,'geneSymbol',gene_list)\n mixture_fn=out_dir+\"sc_mixture.xlsx\"\n\n cluster_means=cluster_means.T\n out_clusterMeans=pd.DataFrame(cluster_means,columns=[\"cluster\"+str(j) for j in range(num_clusters)])\n out_clusterMeans.insert(0,\"geneSymbol\",gene_list)\n clusterMean_fn=out_dir+\"sc_ClusterMeans.xlsx\"\n\n\n #output files: composistion, ground truth (cluster means), mixture\n with pd.ExcelWriter(compo_fn) as writer:\n out_compos.to_excel(writer, index=False)\n with pd.ExcelWriter(mixture_fn) as writer:\n out_mixture.to_excel(writer, index=False)\n with pd.ExcelWriter(clusterMean_fn) as writer:\n out_clusterMeans.to_excel(writer, index=False)\n \n\ndef main(argv): \n \n out_dir = None\n num_sample=0\n\n try:\n opts, args = getopt.getopt(argv,\"hs:o:\")\n except getopt.GetoptError:\n print('mix_sc.py -o -s num_samples')\n sys.exit(2)\n for opt, arg in opts:\n \n if opt == '-h':\n print('mix_sc.py -o -s num_samples')\n sys.exit()\n elif opt==\"-o\":\n out_dir = arg\n elif opt==\"-s\":\n\n num_sample=int(arg)\n \n\n\n \n # print(\"Generate synthetic mixture from single cell RNA data...\")\n produce_mixture(out_dir, num_sample)\n\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"nuoliu/CellDeconv","sub_path":"code/mix_sc.py","file_name":"mix_sc.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41774220601","text":"import random\n\nfrom flask import (\n Blueprint,\n redirect,\n render_template,\n request,\n url_for,\n send_from_directory\n)\nfrom flask_login import login_required\nfrom sqlalchemy import distinct\n\nfrom printing.utilities import *\n\ninv = Blueprint(\"inventory\", __name__, url_prefix=\"/inventory\")\n\n\n@inv.route(\"/\")\n@login_required\ndef inventory():\n inventory = db.session.query(Project).filter(Project.customerfk == 2).filter(Project.active).all()\n context = {\"user\": User, \"inventory\": inventory, \"action\": 1}\n return render_template(\"app/inventory/inventory.html\", **context)\n\n\n@inv.route(\"/edit/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef inventory_edit(id):\n inventory = db.session.query(Project).filter(Project.id == id).first()\n\n if request.method == \"POST\":\n objectids = inventory.objectfk\n for i in range(1, 8):\n gcodename = f\"gcode{i}\"\n qtyname = f\"qty{i}\"\n if request.files[gcodename].filename != \"\":\n # Save uploaded file\n gcodefile = invgcode.save(request.files[gcodename])\n\n # process file for time and materials\n basedir = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(basedir, \"uploads\", gcodefile)\n\n time_in_h, weight_in_kg = calc_time_length(filepath, request.form.get(\"filament\"))\n\n qty = request.form.get(qtyname)\n \n # save file to db\n newgcode = Printobject(\n file=gcodefile,\n h_printtime=time_in_h, \n kg_weight=weight_in_kg,\n qtyperprint = qty,\n projectid = invrentory.id,\n )\n db.session.add(newgcode)\n db.session.commit()\n db.session.refresh(newgcode)\n objectids.append(newgcode.id)\n\n newval = request.form.to_dict()\n inventory.project_name = newval[\"name\"]\n inventory.customerfk = 2\n inventory.printerfk = int(newval[\"printer\"])\n inventory.filamentfk = int(newval[\"filament\"])\n inventory.objectfk = objectids\n inventory.shippingfk = 3\n inventory.employeefk = 1\n inventory.packaging = float(newval[\"packaging\"])\n inventory.advertising = float(newval[\"advertising\"])\n inventory.rent = float(newval[\"rent\"])\n inventory.extrafees = float(newval[\"other\"])\n inventory.active = 1\n inventory.threshold = int(newval[\"threshold\"])\n inventory.sale_price = float(newval[\"sale_price\"])\n inventory.catagory = str(newval[\"catagory\"])\n db.session.commit()\n\n return redirect(url_for(\"inventory.inventory_details\", id=id))\n\n printers = db.session.query(Printer).filter(Printer.active).all()\n filaments = db.session.query(Filament).filter(Filament.active).all()\n objfks = inventory.objectfk\n objects = db.session.query(Printobject).filter(Printobject.id.in_(objfks)).all()\n\n context = {\n \"user\": User,\n \"inventory\": inventory,\n \"printers\": printers,\n \"filaments\": filaments,\n \"objects\": objects,\n }\n return render_template(\"app/inventory/inventory_edit.html\", **context)\n\n\n@inv.route(\"/adjust\", methods=[\"GET\", \"POST\"])\n@login_required\ndef inventory_adjust():\n inventory = db.session.query(Project.id,\n Project.project_name,\n Project.threshold,\n Project.current_quantity,\n Printobject.qtyperprint,\n Printobject.h_printtime,\n Printobject.file\n ) .join(Printobject).filter(Project.customerfk == 2).all()\n\n if request.method == \"POST\":\n projectid = request.form.get(\"item\")\n newadj = Adjustment_log(\n projectfk=projectid,\n adjustment=request.form.get(\"adjustment\"),\n description=request.form.get(\"desc\"),\n time_created=datetime.datetime.now(),\n )\n db.session.add(newadj)\n \n for inv in inventory:\n if inv.id == int(projectid):\n qty = int(inv.current_quantity)\n inv.current_quantity = int(qty) + int(request.form.get(\"adjustment\"))\n \n db.session.commit()\n\n return redirect(url_for(\"inventory.inventory\"))\n\n context = {\"user\": User, \"inventory\": inventory}\n return render_template(\"app/inventory/inventory_adjust.html\", **context)\n\n\n@inv.route(\"/new\", methods=[\"GET\", \"POST\"])\n@login_required\ndef inventory_new():\n if request.method == \"POST\":\n objectids = []\n \n newinv = Project(\n project_name=request.form.get(\"name\"),\n customerfk=2,\n printerfk=request.form.get(\"printer\"),\n filamentfk=request.form.get(\"filament\"),\n shippingfk=3,\n employeefk=1,\n packaging=request.form.get(\"packaging\"),\n advertising=request.form.get(\"advertising\"),\n rent=request.form.get(\"rent\"),\n extrafees=request.form.get(\"other\"),\n ordernum=int(str(\"22\" + str(random.randint(1000, 9999)))),\n active=1,\n threshold = request.form.get('threshold'),\n sale_price=request.form.get(\"sale_price\"),\n current_quantity=0,\n catagory=request.form.get(\"catagory\"),\n )\n db.session.add(newinv)\n db.session.commit()\n db.session.refresh(newinv)\n \n for i in range(1, 8):\n gcodename = f\"gcode{i}\"\n qtyname = f\"qty{i}\"\n if request.files[gcodename].filename != \"\":\n # Save uploaded file\n gcodefile = invgcode.save(request.files[gcodename])\n\n # process file for time and materials\n basedir = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(basedir, \"uploads\", gcodefile)\n\n time_in_h, weight_in_kg = calc_time_length(filepath, request.form.get(\"filament\"))\n\n # save file to db\n newgcode = Printobject(\n file=gcodefile, \n h_printtime=time_in_h, \n kg_weight=weight_in_kg,\n qtyperprint = request.form.get(qtyname),\n projectid = newinv.id\n )\n db.session.add(newgcode)\n db.session.commit()\n db.session.refresh(newgcode)\n objectids.append(newgcode.id)\n\n newinv.objectfk = objectids\n db.session.commit()\n\n \n\n return redirect(url_for(\"inventory.inventory_details\", id=newinv.id))\n\n catagory = db.session.query(distinct(Project.catagory), Project.catagory).all()\n printers = db.session.query(Printer).filter(Printer.active).all()\n filaments = db.session.query(Filament).filter(Filament.active).all()\n\n context = {\n \"user\": User,\n \"printers\": printers,\n \"filaments\": filaments,\n \"catagory\": catagory,\n }\n return render_template(\"app/inventory/inventory_new.html\", **context)\n\n\n@inv.route(\"/details/\")\n@login_required\ndef inventory_details(id):\n inventory = db.session.query(Project).filter(Project.id == id).first()\n objects = db.session.query(Printobject).filter(Printobject.projectid == id).all()\n\n files = []\n totalcost = 0\n for obj in objects:\n printobj = CalcCostInd(id, obj.id)\n \n filcost = printobj.filcost()\n timecost = printobj.timecost()\n miscost = printobj.misfees()\n \n thing = {}\n thing['weight_kg'] = obj.kg_weight\n thing['print_time'] = obj.h_printtime\n thing['filename'] = obj.file\n thing['qtyperprint'] = obj.qtyperprint\n thing['filcost'] = filcost\n thing['timecost'] = timecost\n thing['printer'] = inventory.printer_rel.name\n thing['filament_diameter'] = inventory.filament_rel.diameter\n thing['filament_type'] = inventory.filament_rel.type_rel.type\n files.append(thing)\n\n if obj == objects[-1]:\n totalcost = totalcost + filcost + timecost + miscost\n else:\n totalcost = totalcost + filcost + timecost\n \n context = {\n \"user\": User,\n \"action\": 1,\n \"inventory\": inventory,\n \"files\": files,\n \"total_cost\": totalcost,\n \"miscost\": miscost,\n }\n return render_template(\"app/inventory/inventory_details.html\", **context)\n\n\n@inv.route(\"/\", methods=[\"GET\", \"POST\"])\ndef download(filename):\n filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"uploads\")\n\n return send_from_directory(filepath, filename, as_attachment=True)\n\n\n@inv.route(\"/clean\")\ndef clean():\n import fnmatch\n\n filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"uploads\")\n dbcount = Printobject.query.count()\n filecount = len(fnmatch.filter(os.listdir(filepath), \"*.*\"))\n if dbcount != filecount:\n clean_inventory_uploads(filepath)\n return redirect(url_for(\"inventory.inventory\"))\n\n@inv.route(\"/update\")\ndef update():\n Update_Inventory_Qty()\n return redirect(url_for(\"inventory.inventory\"))","repo_name":"iamrbtm/program","sub_path":"printing/templates/app/inventory/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":9366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5109768173","text":"from collections import deque\r\nimport copy\r\nimport sys\r\nsys.stdin = open('input.txt', 'r')\r\n\r\nN, M = map(int, input().split())\r\nroom = [list(map(int, input().split())) for _ in range(N)]\r\ndr = [0,0,-1,1]\r\ndc = [-1,1,0,0]\r\nmax = float('-inf')\r\ndef spread_virus():\r\n global max\r\n cnt = 0\r\n\r\n que = deque()\r\n visited = [[0] * M for _ in range(N)]\r\n for i in range(len(virus)):\r\n r, c = virus[i]\r\n que.append([r,c])\r\n visited[r][c] = 1\r\n while que:\r\n r, c = que.popleft()\r\n for d in range(4):\r\n nr = r + dr[d]\r\n nc = c + dc[d]\r\n if 0 <= nr < N and 0 <= nc < M and room[nr][nc] == 0 and not visited[nr][nc]:\r\n visited[nr][nc] = 1\r\n que.append([nr,nc])\r\n\r\n for i in range(N):\r\n for j in range(M):\r\n if not visited[i][j] and room[i][j] == 0:\r\n cnt += 1\r\n\r\n if max < cnt:\r\n max = cnt\r\n\r\n return\r\n\r\ndef choose_wall(r,c,depth):\r\n if depth == 3:\r\n spread_virus()\r\n else:\r\n for i in range(r, N):\r\n for j in range(c, M):\r\n if room[i][j] == 0:\r\n room[i][j] = 9\r\n choose_wall(i,j,depth+1)\r\n room[i][j] = 0\r\n c = 0\r\n\r\nvirus = []\r\n\r\nfor i in range(N):\r\n for j in range(M):\r\n if room[i][j] == 2:\r\n virus.append([i,j])\r\n\r\n\r\nchoose_wall(0,0,0)\r\nprint(max)","repo_name":"seulshine/Alpygo","sub_path":"Algorithm/Python/연구소.py","file_name":"연구소.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73276018568","text":"# PROBLEM 1: NAIVE BAYESIAN CLASSIFICATION USING GAUSSIAN ASSUMPTION\n\nfrom tensorflow.keras.datasets import mnist\nfrom sklearn.naive_bayes import GaussianNB\nimport numpy\n\n# loading training and test data\n(X_train, train_y), (X_test, test_y) = mnist.load_data()\n\n# initializing gaussian naive bayesian classifier\ngnb = GaussianNB()\nprint('NAIVE BAYESIAN CLASSIFIER')\n\n# flattening 28 x 28 element 2d array into 1d array to match gnb param requirements\ntrain_X = []\ntest_X = []\n\nfor i in range(len(X_train)):\n train_X.append(X_train[i].flatten())\n\nfor i in range(len(X_test)):\n test_X.append(X_test[i].flatten())\n\n# running bayesian classifier on training data\ny_train_pred = gnb.fit(train_X, train_y).predict(train_X)\n\nprint(\"TRAINING DATA - Num. mislabeled points: %d / %d\" %\n ((train_y != y_train_pred).sum(), len(train_X)))\n\n# running bayesian classifier on test data\ny_test_pred = gnb.fit(train_X, train_y).predict(test_X)\n\nprint(\"TEST DATA - Num. mislabeled points: %d / %d\" %\n ((test_y != y_test_pred).sum(), len(test_X)))\n","repo_name":"yzhang00/Python-ML-Project-1","sub_path":"naive_bayesian.py","file_name":"naive_bayesian.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20239608610","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # Projet individuel - Hugo\n path('communautes/', views.communautes, name='communautes'),\n path('communautes///', views.abonnement, name='abonnement'),\n path('communaute//', views.communaute, name='communaute'),\n path('post//', views.post, name='post'),\n path('nouveau_post/', views.nouveau_post, name='nouveau_post'),\n path('nouveau_post///', views.nouveau_post, name='nouveau_post_com'),\n path('modif_post//', views.modif_post, name='modif_post'),\n path('news_feed/', views.news_feed, name='news_feed'),\n path('nouvelle_communaute/', views.nouvelle_communaute, name='nouvelle_communaute'),\n path('', views.communautes, name='home'),\n\n #Extension 1 - Come\n path('communaute////', views.communaute, name='communaute_filtered'),\n path('post_like///', views.post_like, name='post_like'),\n path('post_read///', views.post_read, name='post_read'),\n\n #Extention 2 - Antoine\n\n ## Actions sur Communaute\n path('modif_communaute//', views.modif_communaute, name='modif_communaute'),\n path('delete_communaute//', views.delete_communaute, name='delete_communaute'),\n path('suspend_communaute///', views.suspend_communaute, name='suspend_communaute'),\n path('open_close_communaute//', views.open_close_communaute, name='open_close_communaute'),\n\n ## Actions sur Post\n path('visibility_post//', views.visibility_post, name='visibility_post'),\n path('nouveau_post///', views.nouveau_post, name='nouveau_special_post'),\n path('delete_post//', views.delete_post, name='delete_post'),\n path('sticky_modify_post//', views.sticky_modify_post, name='sticky_modify_post'),\n\n ## Actions sur Commentaire\n path('visibility_comment//', views.visibility_comment, name='visibility_comment'),\n\n #Extention 4 - Hugo\n path('calendrier/////////'\n , views.calendrier, name='calendrier'),\n path('advanced_search/', views.advanced_search, name='recherche_avancee'),\n\n]\n\n","repo_name":"hmiranda-queiros/Django_Project","sub_path":"communitymanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40100736722","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def insertionSortList(self, head: ListNode) -> ListNode:\n dummy = ListNode(0)\n pre = dummy\n node = head\n while node:\n cur = node\n node = node.next\n if cur.val < pre.val:\n pre = dummy\n while pre.next and cur.val > pre.next.val:\n pre = pre.next\n cur.next = pre.next\n pre.next = cur\n \n return dummy.next\n","repo_name":"Abdelhamid-bouzid/problem-Sovling-","sub_path":"Medium/insertionSortList.py","file_name":"insertionSortList.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"29423359983","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm, NewPost, EventUserForm\n\nfrom .models import Profile, UserPost, EventUser\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(request,\n username=cd['username'],\n password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponse('Аутентификация успешна')\n else:\n return HttpResponse('Аккаунт отключен')\n else:\n return HttpResponse('Неверный логин или пароль')\n else:\n form = LoginForm()\n return render(request, 'account/login.html', {'form': form})\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Создаем нового пользователя, но пока не сохраняем в базу данных.\n new_user = user_form.save(commit=False)\n # Задаем пользователю зашифрованный пароль.\n new_user.set_password(user_form.cleaned_data['password'])\n # Сохраняем пользователя в базе данных.\n new_user.save()\n # Создание профиля пользователя.\n Profile.objects.create(user=new_user).save()\n\n return render(request, 'account/register_done.html', {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request, 'account/register.html', {'user_form': user_form})\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n my_post = NewPost(request.POST)\n my_post = my_post.save(commit=False)\n my_post.user = request.user\n my_post.save()\n M_posts = UserPost.objects.all().filter(user=request.user).order_by('-pk')\n M_profile = Profile.objects.get(user=request.user)\n return render(request, 'account/profile.html', {'section': 'profile', 'profile': M_profile, 'posts': M_posts, 'post_form': NewPost()})\n\n\n@login_required\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user, data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile, data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n return render(request, 'account/edit.html', {'user_form': user_form, 'profile_form': profile_form})\n\n\n@login_required\ndef post_delete(request, pk):\n if request.method == 'POST':\n post = get_object_or_404(UserPost, pk=pk, user=request.user)\n post.delete()\n return redirect('/account')\n\n@login_required\ndef new_user_event(request):\n if request.method == 'POST':\n my_event = EventUserForm(request.POST)\n if my_event.is_valid():\n my_event = my_event.save(commit=False)\n my_event.user = request.user\n my_event.save()","repo_name":"kda2019/new_rep","sub_path":"studworld/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12591938515","text":"import torch\n\nparameter = {\n 'min_count_word': 1,\n 'output_size': 15,\n 'epoch': 20,\n 'batch_size': 10,\n 'embedding_dim': 300,\n 'hidden_size': 128,\n 'num_layers': 2, # 堆叠LSTM的层数,默认值为1\n 'dropout': 0.5,\n 'cuda': torch.device('cuda' if torch.cuda.is_available() else 'cpu'),\n 'lr': 0.001,\n 'num_unknow': 0,\n 'max_len': 20,\n 'data_path': 'data/classfication.csv',\n 'embedding_path': 'data/chinese_wiki_embeding8000.300d.txt',\n 'src_path': 'data/src.pkl',\n 'train_data_path': 'data/train_data.pkl',\n 'valid_data_path': 'data/valid_data.pkl'\n}\n","repo_name":"xuyouqian/Knowledge-Graph-Demo_00","sub_path":"意图识别_01/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"16"} +{"seq_id":"24178357534","text":"from __future__ import absolute_import\n\nimport torch\nfrom torch import nn\n\nfrom reid.evaluator import accuracy\n\n\nclass PairLoss(nn.Module):\n def __init__(self):\n super(PairLoss, self).__init__()\n\n # self.sigmod = nn.Sigmoid()\n self.BCE = nn.BCELoss()\n self.BCE.size_average = True\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def forward(self, score, tar_probe, tar_gallery):\n cls_Size = score.size() # torch.Size([4, 2])\n N_probe = cls_Size[0] # 4\n N_gallery = cls_Size[0]\n\n tar_gallery = tar_gallery.unsqueeze(1) # 6,1 tensor([[ 94],[ 10],[ 15],[ 16],[ 75],[ 39]])\n tar_probe = tar_probe.unsqueeze(0) # 1,6 tensor([[ 94, 10, 15, 16, 75, 39]])\n mask = tar_probe.expand(N_probe, N_gallery).eq(tar_gallery.expand(N_probe, N_gallery))\n mask = mask.view(-1).cpu().numpy().tolist()\n # [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]\n\n score = score.contiguous() # torch.Size([4, 4])\n samplers = score.view(-1) # torch.Size([16])\n\n # samplers = self.sigmod(samplers)\n # labels = Variable(torch.Tensor(mask).cuda())\n labels = torch.Tensor(mask).to(self.device)\n\n loss = self.BCE(samplers, labels)\n\n samplers_data = samplers.data # torch.Size([36])\n samplers_neg = 1 - samplers_data\n samplerdata = torch.cat((samplers_neg.unsqueeze(1), samplers_data.unsqueeze(1)), 1) # torch.Size([36, 2])\n\n labeldata = torch.LongTensor(mask).to(self.device)\n prec, = accuracy(samplerdata, labeldata)\n\n return loss, prec\n","repo_name":"flysnowtiger/GRL","sub_path":"reid/loss/pairloss.py","file_name":"pairloss.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"16"} +{"seq_id":"11395811096","text":"def solve(data): \r\n for n, d in enumerate(data):\r\n cont = False\r\n if n > 24:\r\n for e in data[n-25:n]:\r\n for f in data[n-25:n]:\r\n if e + f == d and e != f:\r\n cont = True\r\n break\r\n if cont:\r\n break\r\n if not cont: \r\n #print(f'{d}\\n')\r\n return d\r\n return True \r\n\r\ndef solve2(failnum, data):\r\n failn = failnum\r\n contrange = []\r\n contsum = 0\r\n for n, d in enumerate(data):\r\n contrange.clear()\r\n failn -= d\r\n contsum += d\r\n pos = n \r\n contrange.append(d)\r\n while failn > 0:\r\n pos += 1\r\n failn -= data[pos]\r\n contrange.append(data[pos])\r\n if failn == 0:\r\n return min(contrange), max(contrange)\r\n elif failn < 0:\r\n failn = failnum\r\n break\r\n\r\ndef getdata(filename):\r\n with open(filename) as f:\r\n nums = [ int(n.strip()) for n in f.readlines()]\r\n return nums #rows in file to list items\r\n \r\n \r\nif __name__ == '__main__':\r\n data = (getdata('data_d9.txt'))\r\n test = (getdata('test_d9.txt'))\r\n fn = solve(data)\r\n print(fn)\r\n low, high = solve2(fn, data)\r\n print(f'low {low} + high {high} = {low + high}')\r\n","repo_name":"r0ttan/AoC2020","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6808997949","text":"import warnings\nfrom flask import Flask, jsonify, request\nfrom traffic_lights.inference.predict import load_model, predict_from_bytes\n\nTEMP_MODAL_PATH = \"/tmp/tlr_model.pth\"\n\n\napp = Flask(__name__)\n\n\n@app.before_first_request\ndef before_first_request():\n print(\"Loading the model...\")\n warnings.filterwarnings(\"ignore\")\n global model, device\n model, device = load_model(TEMP_MODAL_PATH)\n print(\"Model loaded.\")\n\n\n@app.route(\"/\")\ndef entry():\n return (\n \"Traffic Light recognition using Pytorch. POST an image to /predict to try it\"\n )\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict_image():\n if request.method == \"POST\":\n file = request.files[\"file\"]\n img_bytes = file.read()\n if model is None or device is None:\n raise Exception(\"Model or device not found\")\n prediction = predict_from_bytes(img_bytes, model, device)\n result = jsonify(prediction)\n result.status_code = 200\n return result\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"aidandunlop/traffic_light_recognition","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"32469397528","text":"from django.db import models\nfrom django.contrib.auth.models import User\nimport json\n\n# Create your models here.\nclass TrafficImages(models.Model):\n image = models.FileField()\n file_name = models.CharField(max_length=250, null=True, blank=True)\n user = models.ForeignKey(User, related_name='uploaded_images', on_delete=models.CASCADE, null=True)\n created_at = models.DateTimeField(auto_now_add=True, null=True)\n\nclass ImageAnnotations(models.Model):\n image = models.ForeignKey(TrafficImages, on_delete=models.CASCADE, related_name='annotations')\n user = models.ForeignKey(User, related_name='user_annotations', on_delete=models.CASCADE)\n annotations = models.TextField(null=True, blank=True) # will save as json\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(null=True, blank=True)\n\n def __str__(self):\n return \"%s %s\" %(self.image, self.user)\n\n @property\n def annotation_json(self):\n try:\n return json.loads(self.annotations)\n except:\n return {}\n\n @property\n def annotation_string(self, annotation_obj):\n try:\n self.annotations = json.dumps(annotation_obj)\n except Exception:\n self.annotations = ''\n\n return self.annotations\n\n","repo_name":"kishorkumarj/SmartCowAssignment","sub_path":"image-annotation-server/image_annotation/annotation_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28726104001","text":"import json\nimport os\nimport re\nfrom collections import defaultdict\nfrom typing import List, Union\n\nimport spacy\nfrom spacy.symbols import PRON, VERB\n\nfrom paper import RhetoricUnit, S2OrcPaper, SPPPaper\n\nnlp = spacy.load(\"en_core_web_md\")\n\n\nclass LexicalSet:\n def __init__(self):\n self.lexical_set = {\n \"we\": [\"our\", \"present study\", \"this paper\", \"this work\", \"this research\"],\n \"previous\": [\"previously\", \"recent\", \"recently\"],\n \"thus\": [\"therefore\"],\n \"aim\": [\"objective\", \"goal\", \"purpose\", \"objectives\", \"goals\", \"purposes\"],\n \"question\": [\"questions\", \"hypothesis\", \"hypotheses\"],\n \"investigate\": [\n \"explore\",\n \"study\",\n \"test\",\n \"examine\",\n \"evaluate\",\n \"assess\",\n \"determine\",\n \"characterize\",\n \"analyze\",\n \"report\",\n \"seek\",\n # \"present\",\n ],\n \"use\": [\"employ\"],\n \"method\": [\"algorithm\", \"assay\"],\n \"observe\": [\"we see\", \"find\", \"show\"],\n \"conclude\": [\"conclusion\", \"summarize\", \"summary\"],\n \"suggest\": [\n \"illustrate\",\n \"demonstrate\",\n \"imply\",\n \"indicate\",\n \"confirm\",\n \"reflect\",\n \"support\",\n \"prove\",\n \"reveal\",\n ],\n \"because\": [\"result from\", \"attribute to\"],\n \"likely\": [\"probable\", \"probably\", \"possible\", \"possibly\", \"may\", \"could\"],\n \"need\": [\"remain\"],\n \"future\": [\"further\"],\n \"consistent\": [\n \"match\",\n \"agree\",\n \"support\",\n \"in line\",\n \"in agreement\",\n \"similar\",\n \"same\",\n \"analogous\",\n ],\n \"inconsistent\": [\n \"conflicting\",\n \"conflict\",\n \"contrast\",\n \"contrary\",\n \"differ\",\n \"different\",\n \"difference\",\n \"unlike\",\n ],\n \"than\": [\"compare\"],\n \"however\": [\"other hand\", \"although\", \"though\", \"but\", \"while\", \"despite\"],\n \"extend\": [\"extension\", \"extends\"],\n \"contribution\": [\"contributions\", \"contribute\", \"contributes\"],\n }\n\n def get_roots(self):\n return self.lexical_set.keys()\n\n def get_aliases(self, root):\n return self.lexical_set.get(root, [])\n\n\nclass AZClassifier:\n def __init__(self, json_file_path: str, dataset: str):\n self.json_file_path = json_file_path\n self.dataset = dataset\n\n self.id = os.path.splitext(os.path.basename(json_file_path))[0]\n\n if dataset == \"s2orc\":\n self.paper = S2OrcPaper(json_file_path)\n elif dataset == \"spp\":\n self.paper = SPPPaper(json_file_path, \"detect\")\n elif dataset == \"spp-cermine\":\n self.paper = SPPPaper(json_file_path, \"cermine\")\n\n self.lexical_set = LexicalSet()\n\n def _sentence_contains_root_or_aliases(\n self, sentence: str, roots: Union[str, List[str]]\n ):\n if isinstance(roots, str):\n roots = [roots]\n sentence = sentence.lower().strip()\n found = []\n for root in roots:\n aliases = self.lexical_set.get_aliases(root)\n for token in [root, *aliases]:\n if re.search(r\"\\b{}\\b\".format(token), sentence):\n found.append(token)\n return found\n\n def _has_section(self, aliases: List[str]):\n for section in self.paper.sections:\n for a in aliases:\n if a in section.lower():\n return True\n return False\n\n def _is_in_introduction(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n return \"introduction\" in section_found\n\n def _is_in_conclusion(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n aliases = [\"conclusion\", \"concluding\"]\n return any(a in section_found for a in aliases)\n\n def _is_in_discussion(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n return \"discussion\" in section_found\n\n def _is_in_related_work(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n aliases = [\"related work\", \"background\", \"relatedwork\"]\n return any(a in section_found for a in aliases)\n\n def _is_in_future_work(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n aliases = [\"future work\"]\n return any(a in section_found for a in aliases)\n\n def _is_in_result(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n aliases = [\"result\", \"evaluation\", \"finding\", \"study\", \"experiment\"]\n return any(a in section_found for a in aliases)\n\n def _is_in_method(self, sentence: str):\n section_found = self.paper.get_section_for_sentence(sentence)\n section_found = section_found.lower()\n aliases = [\"method\", \"system\"]\n return any(a in section_found for a in aliases)\n\n def _is_in_expected_section(self, sentence: str, label: str):\n if label == \"contribution\":\n return self._is_in_introduction(sentence)\n elif label == \"objective\":\n return self._is_in_introduction(sentence) or self._is_in_conclusion(\n sentence\n )\n elif label == \"novelty\":\n return self._is_in_related_work(sentence) or self._is_in_introduction(\n sentence\n )\n elif label == \"result\":\n return (\n self._is_in_introduction(sentence)\n or self._is_in_result(sentence)\n or self._is_in_discussion(sentence)\n or self._is_in_conclusion(sentence)\n )\n elif label == \"conclusion\":\n return self._is_in_conclusion(sentence) or self._is_in_discussion(sentence)\n elif label == \"future work\":\n return (\n self._is_in_introduction(sentence)\n or self._is_in_conclusion(sentence)\n or self._is_in_discussion(sentence)\n or self._is_in_future_work(sentence)\n )\n elif label == \"method\":\n return not (\n self._is_in_related_work(sentence)\n or self._is_in_result(sentence)\n or self._is_in_future_work(sentence)\n or self._is_in_discussion(sentence)\n )\n else:\n return False\n\n def detect_contribution(self):\n print(\"=== Contribution ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n found = self._sentence_contains_root_or_aliases(sentence, [\"contribution\"])\n\n if not found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(\n sentence, \"contribution\"\n )\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Contribution\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_objective(self):\n print(\"=== Objective ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n aim_noun_found = self._sentence_contains_root_or_aliases(\n sentence, [\"aim\", \"question\"]\n )\n aim_verb_found = self._sentence_contains_root_or_aliases(\n sentence, [\"investigate\"]\n )\n\n if not aim_noun_found and not aim_verb_found:\n continue\n\n # only consider when contribution roots and alises are verbs\n doc = nlp(sentence.lower())\n keep_aim_tokens = []\n for token in doc:\n if token.pos_ != VERB:\n continue\n if token.text in aim_verb_found:\n keep_aim_tokens.append(token)\n aim_found = aim_noun_found + keep_aim_tokens\n\n if not aim_found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(sentence, \"objective\")\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Objective\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_novelty(self):\n print(\"=== Novelty ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n found = self._sentence_contains_root_or_aliases(\n sentence, [\"inconsistent\", \"however\", \"extend\"]\n )\n\n if not found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(sentence, \"novelty\")\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Novelty\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_result(self):\n print(\"=== Result ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n found = self._sentence_contains_root_or_aliases(sentence, [\"observe\"])\n\n if not found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(sentence, \"result\")\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Result\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_conclusion(self):\n print(\"=== Conclusion ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n con_found = self._sentence_contains_root_or_aliases(sentence, [\"conclude\"])\n con_likely_found = self._sentence_contains_root_or_aliases(\n sentence, [\"suggest\", \"thus\", \"likely\", \"because\"]\n )\n\n if not con_found and not con_likely_found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(\n sentence, \"conclusion\"\n )\n if con_found:\n prob = 1\n elif con_likely_found:\n prob = 0.5\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Conclusion\",\n bboxes=bboxes,\n section=section,\n prob=prob,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_future_work(self):\n print(\"=== Future Work ===\")\n detected = []\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n found = self._sentence_contains_root_or_aliases(\n sentence, [\"will\", \"need\", \"future\"]\n )\n\n if not found:\n continue\n\n section = self.paper.get_section_for_sentence(sentence)\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n is_in_expected_section = self._is_in_expected_section(\n sentence, \"future work\"\n )\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Future Work\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def detect_method(self):\n print(\"=== Method ===\")\n detected = []\n seen_blocks = set()\n for sent_bbox_obj in self.paper.sentences:\n sentence = sent_bbox_obj.text\n bboxes = sent_bbox_obj.bboxes\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n if not is_author_statement:\n continue\n\n block_idx = sent_bbox_obj.block_idx\n if block_idx in seen_blocks:\n continue\n\n seen_blocks.add(sent_bbox_obj.block_idx)\n section = self.paper.get_section_for_sentence(sentence)\n is_in_expected_section = self._is_in_expected_section(\n sentence, \"method\"\n )\n rhetoric_unit = RhetoricUnit(\n text=sentence,\n label=\"Method\",\n bboxes=bboxes,\n section=section,\n prob=None,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n detected.append(rhetoric_unit)\n return detected\n\n def make_ssc_rhetoric_unit(self, sentence: str, label: str, prob: float = None):\n is_author_statement = (\n len(self._sentence_contains_root_or_aliases(sentence, \"we\")) > 0\n )\n ssc_rhetoric_unit_label_map = {\n \"Method\": \"method\",\n \"Objective\": \"objective\",\n \"Result\": \"result\",\n }\n is_in_expected_section = self._is_in_expected_section(\n sentence, ssc_rhetoric_unit_label_map.get(label, \"\")\n )\n return RhetoricUnit(\n text=sentence,\n label=label,\n bboxes=self.paper.sent_bbox_map[sentence],\n section=self.paper.sent_sect_map[sentence],\n prob=prob,\n is_author_statement=is_author_statement,\n is_in_expected_section=is_in_expected_section,\n )\n\n def _token_to_noun_chunk(self, doc):\n noun_chunks = [nc for nc in doc.noun_chunks]\n token_chunks = {}\n for tok in doc:\n for nc in noun_chunks:\n if tok.i >= nc.start and tok.i < nc.end:\n token_chunks[tok] = nc\n break\n return token_chunks\n\n def _get_noun_chunk_after(self, noun_chunks, i):\n for nc in noun_chunks:\n if i >= nc.start and i < nc.end:\n return nc\n\n def get_author_statements(self):\n author_statements = []\n for sentence in self.paper.sentences:\n if self._sentence_contains_root_or_aliases(sentence.text, \"we\"):\n author_statements.append(sentence)\n return author_statements\n\n def get_short_author_statements(self):\n short_statements = []\n author_statements = self.get_author_statements()\n for sent in author_statements:\n doc = nlp(sent.text.lower())\n clause_tokens = []\n pron_found = False\n verb_found = False\n noun_chunk_found = False\n clause_completed = False\n sconj_found = False\n for token in doc:\n if token.lemma_ == \"-PRON-\" or token.pos == PRON:\n pron_found = True\n clause_completed = False\n if pron_found and not clause_completed:\n if token.pos == VERB:\n clause_tokens.append(token.text)\n verb_found = True\n elif token.lemma_ == \"that\":\n clause_tokens.append(token.text)\n sconj_found = True\n elif verb_found:\n if not noun_chunk_found:\n clause_tokens.append(token.text)\n if self._get_noun_chunk_after(doc.noun_chunks, token.i):\n noun_chunk_found = True\n else:\n if not self._get_noun_chunk_after(doc.noun_chunks, token.i):\n if sconj_found:\n clause_tokens.append(token.text)\n noun_chunk_found = False\n sconj_found = False\n else:\n short_statement = \" \".join(clause_tokens)\n short_statements.append((short_statement, sent))\n break\n else:\n clause_tokens.append(token.text)\n else:\n clause_tokens.append(token.text)\n return short_statements\n\n\nif __name__ == \"__main__\":\n # Run on s2orc_acl_20200705v1 data\n ssc_output_file = \"data/ssc-input/s2orc_acl_2016\"\n DATA_DIR = \"data/s2orc_acl_20200705v1\"\n by_year = defaultdict(list)\n for file_e in os.scandir(DATA_DIR):\n if not os.path.isfile(file_e.path):\n continue\n with open(file_e.path, \"r\") as f:\n paper = json.load(f)\n metadata = paper[\"metadata\"]\n if \"year\" not in metadata:\n continue\n if \"arxiv_id\" not in metadata:\n continue\n year = metadata[\"year\"]\n arxiv_id = metadata[\"arxiv_id\"]\n if year and arxiv_id:\n by_year[year].append((file_e.path, arxiv_id))\n for paper in by_year[2016]:\n paper_path, arxiv_id = paper\n print(paper_path)\n detected = []\n azc = AZClassifier(paper_path, \"s2orc\")\n detected += azc.detect_contribution()\n detected += azc.detect_novelty()\n detected += azc.detect_objective()\n detected += azc.detect_conclusion()\n detected += azc.detect_future_work()\n","repo_name":"rayfok/scim-nlp","sub_path":"src/detect_rhetorical_classes.py","file_name":"detect_rhetorical_classes.py","file_ext":"py","file_size_in_byte":20490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"10314468752","text":"import json #Aqui cargo los archivos json, no hay mucho misterio,\r\ncargar=str(input('¿Desea cargar los archivos anteriores?\\n'))\r\nif cargar.lower()=='si':\r\n v=open(\"BACKUPEDIFICIO.json\",\"r\") #por medio del load confierto el archivo json en diccionario \r\n q=open(\"BACKUPUSURIOS.json\",\"r\")\r\n EDIFICIO=json.load(v)\r\n USUARIOS=json.load(q)\r\n\r\nelse:\r\n v=open(\"tiposParqueaderos.json\",\"r\")\r\n q=open(\"usuarios.json\",\"r\")\r\n EDIFICIO=json.load(v)\r\n USUARIOS=json.load(q)\r\n\r\n\r\n\r\ndef tranformiceusuarios (USUARIOS): #Aqui transformo las palabras automovil, a.electrico, motocicleta y discapacitados \r\n for x in range(len(USUARIOS['usuarios'])): #En los numeros correspondientes (En el diccionario usuarios) \r\n if USUARIOS['usuarios'][x][4]=='Automóvil':\r\n USUARIOS['usuarios'][x][4]=1\r\n elif USUARIOS['usuarios'][x][4]=='Automóvil Eléctrico':\r\n USUARIOS['usuarios'][x][4]=2\r\n elif USUARIOS['usuarios'][x][4]=='Motocicleta':\r\n USUARIOS['usuarios'][x][4]=3\r\n elif USUARIOS['usuarios'][x][4]=='Discapacitado':\r\n USUARIOS['usuarios'][x][4]=4\r\n return USUARIOS\r\n\r\ndef transformicepisos(EDIFICIO):\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])): #Aqui transformo las palabras automovil, a.electrico, motocicleta y discapacitados\r\n for c in range(10): #En los numeros correspondientes (En el diccionario del edificio) \r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4: #Recorro todo el diccionario con ciclos for \r\n for z in range(6): #Tuve que tomar los caracteres diferentes por las tildes \r\n if 'Automóvil'==EDIFICIO[a][b][c][4]:\r\n EDIFICIO[a][b][c][4]=1\r\n elif 'Automóvil Eléctrico'==EDIFICIO[a][b][c][4]:\r\n EDIFICIO[a][b][c][4]=2\r\n elif 'Motocicleta'==EDIFICIO[a][b][c][4]:\r\n EDIFICIO[a][b][c][4]=3\r\n elif 'Discapacitado'==EDIFICIO[a][b][c][4]:\r\n EDIFICIO[a][b][c][4]=4\r\n \r\n return EDIFICIO\r\n\r\n\r\n\r\n\r\ndef validacionregistro(USUARIOS):\r\n numeroidentificacion=eval(input('Ingrese su numero de identificacion: ')) #Aqui valido si el coche ya esta registrado o no, \r\n n=0 #primero pido el numero de identificacion para no hacerlo tedioso \r\n for x in range(len(USUARIOS['usuarios'])):\r\n if USUARIOS['usuarios'][x][1]==numeroidentificacion:\r\n n=n+1\r\n if n==0:\r\n nombre=str(input('Ingrese sus nombres y apellidos: ')) \r\n tipodeusuario=str(input('Ingrese su tipo de usuario: '))\r\n placa=str(input('Ingrese la placa de su vehiculo: '))\r\n tipovehiculo=eval(input('Ingrese su tipo de vehiculo:\\n 1.Automóvil\\n 2.Automóvil Eléctrico\\n 3.Motocicleta\\n 4.Discapacitado\\n(Ingrese el numero del tipo de auto que tenga)\\n'))\r\n plandepago=str(input('Ingrese su plan de pago:\\n 1.Mensualidad\\n 2.Diario\\n(INGRESE LA PALABRA COMPLETA POR FAVOR)\\n'))\r\n lista=[nombre,numeroidentificacion,tipodeusuario,placa,tipovehiculo,plandepago]\r\n USUARIOS['usuarios'].append(lista)\r\n return USUARIOS\r\n else:\r\n print('Usted ya registro un vehiculo')\r\n validacionregistro(USUARIOS)\r\n\r\n\r\n\r\n\r\n\r\ndef ingreso(USUARIOS):\r\n n=0\r\n placaingreso=str(input('Ingrese la placa de su vehiculo: ')) #Primero, aunque no me lo hayan pedido en el trabajo, valido si el coche ya ha sido parqueado\r\n for a in EDIFICIO.keys(): #si no es el caso luego verifico si la placa esta registrada \r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4:\r\n for z in range(6):\r\n if placaingreso==EDIFICIO[a][b][c][3]:\r\n print('Ese automovil ya se encuentra aqui.')\r\n return ingreso(USUARIOS)\r\n for x in range(len(USUARIOS['usuarios'])):\r\n if placaingreso==USUARIOS['usuarios'][x][3]:\r\n n=n+1\r\n if n==0:\r\n print('Su vehiculo no esta registrado...')\r\n tipovehiculo=eval(input('Ingrese su tipo de vehiculo:\\n 1.Automóvil\\n 2.Automóvil Eléctrico\\n 3.Motocicleta\\n 4.Discapacitado\\n(Ingrese el numero del tipo de auto que tenga)\\n'))\r\n lista=['???','???','Visitante',placaingreso,tipovehiculo,'Diario']\r\n USUARIOS['usuarios'].append(lista)\r\n return USUARIOS,placaingreso\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef ingreso2(EDIFICIO,placaingreso,USUARIOS): #Aqui utilizo 6 contadores para numerar el numero de puestos disponibles.\r\n for y in range(len(USUARIOS['usuarios'])): #Sumo los contadores recorriendo el diccionario con varios for anidados \r\n if placaingreso==USUARIOS['usuarios'][y][3]:\r\n minilista=USUARIOS['usuarios'][y]\r\n disponibles1=0\r\n disponibles2=0\r\n disponibles3=0\r\n disponibles4=0\r\n disponibles5=0\r\n disponibles6=0\r\n if minilista[4]==1:\r\n for a in EDIFICIO.keys():\r\n for g in range(len(EDIFICIO[a])):\r\n for h in range(10):\r\n if EDIFICIO[a][g][h]==minilista[4]:\r\n if a=='Piso1':\r\n disponibles1+=1\r\n elif a=='Piso2':\r\n disponibles2+=1\r\n elif a=='Piso3':\r\n disponibles3+=1\r\n elif a=='Piso4':\r\n disponibles4+=1\r\n elif a=='Piso5':\r\n disponibles5+=1\r\n elif a=='Piso6':\r\n disponibles6+=1\r\n if minilista[4]==2:\r\n for a in EDIFICIO.keys():\r\n for g in range(len(EDIFICIO[a])):\r\n for h in range(10):\r\n if EDIFICIO[a][g][h]==minilista[4] or EDIFICIO[a][g][h]==1:\r\n if a=='Piso1':\r\n disponibles1+=1\r\n elif a=='Piso2':\r\n disponibles2+=1\r\n elif a=='Piso3':\r\n disponibles3+=1\r\n elif a=='Piso4':\r\n disponibles4+=1\r\n elif a=='Piso5':\r\n disponibles5+=1\r\n elif a=='Piso6':\r\n disponibles6+=1\r\n if minilista[4]==3:\r\n for a in EDIFICIO.keys():\r\n for g in range(len(EDIFICIO[a])):\r\n for h in range(10):\r\n if EDIFICIO[a][g][h]==minilista[4]:\r\n if a=='Piso1':\r\n disponibles1+=1\r\n elif a=='Piso2':\r\n disponibles2+=1\r\n elif a=='Piso3':\r\n disponibles3+=1\r\n elif a=='Piso4':\r\n disponibles4+=1\r\n elif a=='Piso5':\r\n disponibles5+=1\r\n elif a=='Piso6':\r\n disponibles6+=1\r\n if minilista[4]==4:\r\n for a in EDIFICIO.keys():\r\n for g in range(len(EDIFICIO[a])):\r\n for h in range(10):\r\n if EDIFICIO[a][g][h]==minilista[4] or EDIFICIO[a][g][h]==1:\r\n if a=='Piso1':\r\n disponibles1+=1\r\n elif a=='Piso2':\r\n disponibles2+=1\r\n elif a=='Piso3':\r\n disponibles3+=1\r\n elif a=='Piso4':\r\n disponibles4+=1\r\n elif a=='Piso5':\r\n disponibles5+=1\r\n elif a=='Piso6':\r\n disponibles6+=1\r\n \r\n return EDIFICIO,disponibles1,disponibles2,disponibles3,disponibles4,disponibles5,disponibles6,minilista \r\n\r\n\r\n\r\ndef representacion(EDIFICIO,minilista):\r\n pisoelegido=eval(input('Ingrese el piso en el que desea dejar su auto: \\n(Solo ingrese el numero del piso que desea)\\n '))\r\n if pisoelegido==1:\r\n pisoelegido='Piso1'\r\n elif pisoelegido==2:\r\n pisoelegido='Piso2'\r\n elif pisoelegido==3:\r\n pisoelegido='Piso3'\r\n elif pisoelegido==4:\r\n pisoelegido='Piso4'\r\n elif pisoelegido==5: #Una vez el usuario elige su piso deseado creo un for que toma como rango el piso elegido, \r\n pisoelegido='Piso5' #Para la representacion utilizo varios for con una lista a la cual se van añadiendo 'X' o 'O' Dependiendo de si esta libre o no el puesto \r\n elif pisoelegido==6: #Logro todo esto con condicionales y recorriendo el diccionario \r\n pisoelegido='Piso6' \r\n else: \r\n print('Ese numero no esta en el menu') \r\n print('','','','',1,'','','',2,'','','',3,'','','',4,'','','',5,'','','',6,'','','',7,'','','',8,'','','',9,'','','',10)\r\n for x in range(len(EDIFICIO[pisoelegido])):\r\n lista=[]\r\n if minilista[4]==4 or minilista[4]==2:\r\n for y in range(10):\r\n if minilista[4]==EDIFICIO[pisoelegido][x][y] or 1==EDIFICIO[pisoelegido][x][y]: \r\n lista.append('O')\r\n else:\r\n lista.append('X')\r\n print(x+1,lista)\r\n else:\r\n for y in range(10):\r\n if minilista[4]==EDIFICIO[pisoelegido][x][y]: \r\n lista.append('O')\r\n else:\r\n lista.append('X')\r\n print(x+1,lista) \r\n return pisoelegido\r\n\r\ndef ocupa(EDIFICIO,minilista,pisoelegido): #Ahora le pido al usuario que eliga una fila y una columna \r\n fila=eval(input('Ingrese la fila del lugar que desea: ')) #Con ello puedo acceder facilmente al lugar donde el usuario desea ubicar su coche \r\n columna=eval(input('Ingrese la columna del lugar que desea: ')) #Pongo el -1 porque para acceder al diccionario se empieza desde 0. Pero para mayor comodidad para el usario le pido la fila y la columna desde 1 \r\n if minilista[4]==2 or minilista[4]==4: \r\n if minilista[4]==EDIFICIO[pisoelegido][fila-1][columna-1] or EDIFICIO[pisoelegido][fila-1][columna-1]==1: \r\n EDIFICIO[pisoelegido][fila-1][columna-1]=minilista #Aqui remplazo el puesto\r\n return EDIFICIO,fila,columna\r\n else:\r\n print('Ese sitio esta ocupado')\r\n return ocupa(EDIFICIO,minilista,pisoelegido)\r\n else:\r\n if minilista[4]==EDIFICIO[pisoelegido][fila-1][columna-1]:\r\n EDIFICIO[pisoelegido][fila-1][columna-1]=minilista\r\n return EDIFICIO,fila,columna\r\n else:\r\n print('Ese sitio esta ocupado')\r\n return ocupa(EDIFICIO,minilista,pisoelegido)\r\n\r\n \r\n\r\ndef validacionretiro(EDIFICIO):\r\n placaretiro=str(input('Ingrese la placa de su vehiculo: '))\r\n horas=eval(input('Ingrese su numero de horas que ha permanecido el vehiculo: '))\r\n n=0\r\n\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4: #Me sucedio un error y es que no puedo\r\n for z in range(6): #Transformar una lista a int, no tengo idea porque \r\n if placaretiro==EDIFICIO[a][b][c][z]: #Mi solucion fue convertir la lista a str \r\n lista=EDIFICIO[a][b][c] \r\n n+=1 \r\n if lista[4]==1: \r\n EDIFICIO[a][b][c]='Automóvil' \r\n elif lista[4]==2: \r\n EDIFICIO[a][b][c]='Automóvil Eléctrico' \r\n elif lista[4]==3: \r\n EDIFICIO[a][b][c]='Motocicleta'\r\n elif lista[4]==4:\r\n EDIFICIO[a][b][c]='Discapacitado'\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])): #Y aca transformo ese srt al numero correspondiente al tipo de auto \r\n for c in range(10): #Todo esto con el fin de que cuando retiren el coche, el puesto que este ocupaba vuelva a ser del tipo que era (Automovil, discapacitados...) \r\n if EDIFICIO[a][b][c]=='Automóvil': \r\n EDIFICIO[a][b][c]=1 \r\n elif EDIFICIO[a][b][c]=='Automóvil Eléctrico': \r\n EDIFICIO[a][b][c]=1\r\n elif EDIFICIO[a][b][c]=='Motocicleta':\r\n EDIFICIO[a][b][c]=3\r\n elif EDIFICIO[a][b][c]=='Discapacitado':\r\n EDIFICIO[a][b][c]=1\r\n \r\n \r\n if n==0:\r\n print('Ese vehiculo no esta aqui')\r\n else:\r\n if lista[5].lower()=='mensualidad':\r\n print('No debe realizar un pago') #Aca miro que tipo de pago tiene y hago las operaciones correspondientes\r\n else:\r\n if lista[2].lower()=='estudiante':\r\n print('El valor a pagar es',horas*1000)\r\n elif lista[2].lower()=='profesor':\r\n print('El valor a pagar es',horas*2000)\r\n elif lista[2].lower()=='personal administrativo':\r\n print('El valor a pagar es',horas*1500)\r\n elif lista[2].lower()=='visitante':\r\n print('El valor a pagar es',horas*3000)\r\n\r\n\r\n\r\ndef representacion2(fila,columna,minilista,EDIFICIO,pisoelegido):\r\n for x in range(len(EDIFICIO[pisoelegido])):\r\n lista=[] #Es un copy paste de la reprensentacion 1 solo que esta es la reprensentacion con los puestos ocupados ya con la informacion de los coches\r\n if minilista[4]==4 or minilista[4]==2:\r\n for y in range(10):\r\n if EDIFICIO[pisoelegido][fila-1][columna-1]==EDIFICIO[pisoelegido][x][y]: #Este es el añadido\r\n lista.append(minilista)\r\n elif minilista[4]==EDIFICIO[pisoelegido][x][y] or 1==EDIFICIO[pisoelegido][x][y]: \r\n lista.append('O')\r\n else:\r\n lista.append('X')\r\n print(x+1,lista)\r\n else:\r\n for y in range(10):\r\n if EDIFICIO[pisoelegido][fila-1][columna-1]==EDIFICIO[pisoelegido][x][y]:\r\n lista.append(minilista)\r\n elif minilista[4]==EDIFICIO[pisoelegido][x][y]: \r\n lista.append('O')\r\n else:\r\n lista.append('X')\r\n print(x+1,lista) \r\n\r\n\r\n\r\ndef txt1(EDIFICIO):\r\n numest=0\r\n numprof=0\r\n numadm=0\r\n numvis=0\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4:\r\n if EDIFICIO[a][b][c][2].lower() =='estudiante':\r\n numest+=1\r\n elif EDIFICIO[a][b][c][2].lower() =='profesor':\r\n numprof+=1\r\n elif EDIFICIO[a][b][c][2].lower() =='personal administrativo':\r\n numadm+=1\r\n elif EDIFICIO[a][b][c][2].lower() =='visitante':\r\n numvis+=1\r\n return numest,numprof,numadm,numvis\r\n\r\ndef txt2(EDIFICIO):\r\n numaut=0\r\n numele=0\r\n nummoto=0\r\n numdis=0\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4:\r\n if EDIFICIO[a][b][c][4]==1:\r\n numaut+=1\r\n elif EDIFICIO[a][b][c][4]==2:\r\n numele+=1\r\n elif EDIFICIO[a][b][c][4]==3:\r\n nummoto+=1\r\n elif EDIFICIO[a][b][c][4]==4:\r\n numdis+=1\r\n return numaut,numele,nummoto,numdis\r\n\r\ndef txt3(EDIFICIO):\r\n contpor=0\r\n contpiso1=0\r\n contpiso2=0\r\n contpiso3=0\r\n contpiso4=0\r\n contpiso5=0\r\n contpiso6=0\r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4:\r\n contpor+=1\r\n \r\n for a in EDIFICIO.keys():\r\n for b in range(len(EDIFICIO[a])):\r\n for c in range(10):\r\n if EDIFICIO[a][b][c]!=1 and EDIFICIO[a][b][c]!=2 and EDIFICIO[a][b][c]!=3 and EDIFICIO[a][b][c]!=4:\r\n if a=='Piso1':\r\n contpiso1+=1\r\n elif a=='Piso2':\r\n contpiso2+=1\r\n elif a=='Piso3':\r\n contpiso3+=1\r\n elif a=='Piso4':\r\n contpiso4+=1\r\n elif a=='Piso5':\r\n contpiso5+=1\r\n elif a=='Piso6':\r\n contpiso6+=1\r\n \r\n return contpor,contpiso1,contpiso2,contpiso3,contpiso4,contpiso5,contpiso6\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nAPAGADO=''\r\nwhile APAGADO!=-1: #El programa con un while para cuando se desee parar, lo unico que hice aqui es organizar las funciones para que el programa empeice a correr\r\n EDIFICIO=transformicepisos(EDIFICIO)\r\n USUARIOS=tranformiceusuarios (USUARIOS)\r\n accion=eval(input('Ingrese la accion que quiera realizar:\\n 1.Registrar auto\\n 2.Ingresar un auto\\n 3.Retirar un auto\\n '))\r\n if accion==1:\r\n USUARIOS=validacionregistro(USUARIOS)\r\n print(USUARIOS)\r\n\r\n\r\n\r\n \r\n elif accion==2:\r\n USUARIOS,placaingreso=ingreso(USUARIOS)\r\n EDIFICIO,disponibles1,disponibles2,disponibles3,disponibles4,disponibles5,disponibles6,minilista=ingreso2(EDIFICIO,placaingreso,USUARIOS)\r\n print('El numero de puestos disponibles en el piso 1 es:', disponibles1)\r\n print('El numero de puestos disponibles en el piso 2 es:', disponibles2)\r\n print('El numero de puestos disponibles en el piso 3 es:', disponibles3)\r\n print('El numero de puestos disponibles en el piso 4 es:', disponibles4)\r\n print('El numero de puestos disponibles en el piso 5 es:', disponibles5)\r\n print('El numero de puestos disponibles en el piso 6 es:', disponibles6)\r\n pisoelegido=representacion(EDIFICIO,minilista)\r\n print('\\n')\r\n EDIFICIO,fila,columna=ocupa(EDIFICIO,minilista,pisoelegido)\r\n print('\\n')\r\n representacion2(fila,columna,minilista,EDIFICIO,pisoelegido)\r\n\r\n\r\n\r\n elif accion==3:\r\n validacionretiro(EDIFICIO)\r\n\r\n\r\n \r\n else:\r\n print('Esa accion no existe')\r\n \r\n Cargaredificio=EDIFICIO\r\n Cargarusuarios=USUARIOS\r\n with open ('BACKUPEDIFICIO.json','w') as file:\r\n json.dump(Cargaredificio,file)\r\n with open ('BACKUPUSURIOS.json','w') as file:\r\n json.dump(Cargarusuarios,file)\r\n\r\n\r\n numest,numprof,numadm,numvis=txt1(EDIFICIO)\r\n REPORTE1=open(\"Cantidad de vehículos estacionados según el tipo de usuario.txt\",\"w\")\r\n REPORTE1.write('Numero de vehiculos estacionados de estudiantes:'+str(numest)+'\\n')\r\n REPORTE1.write('Numero de vehiculos estacionados de profesores:'+str(numprof)+'\\n')\r\n REPORTE1.write('Numero de vehiculos estacionados de administrativos:'+str(numadm)+'\\n')\r\n REPORTE1.write('Numero de vehiculos estacionados de visitantes:'+str(numvis)+'\\n')\r\n REPORTE1.close()\r\n\r\n numaut,numele,nummoto,numdis=txt2(EDIFICIO)\r\n REPORTE2=open(\"Cantidad de vehículos estacionados según el tipo de vehículo.txt\",\"w\")\r\n REPORTE2.write('Numero de automóviles:'+str(numaut)+'\\n')\r\n REPORTE2.write('Numero de automóvil eléctricos:'+str(numele)+'\\n')\r\n REPORTE2.write('Numero de motocicletas:'+str(nummoto)+'\\n')\r\n REPORTE2.write('Numero de discapacitados:'+str(numdis)+'\\n')\r\n REPORTE2.close()\r\n\r\n contpor,contpiso1,contpiso2,contpiso3,contpiso4,contpiso5,contpiso6=txt3(EDIFICIO)\r\n porto=(contpor*100)/550\r\n por1=(contpiso1*100)/100\r\n por2=(contpiso2*100)/100\r\n por3=(contpiso3*100)/100\r\n por4=(contpiso4*100)/100\r\n por5=(contpiso5*100)/100\r\n por6=(contpiso6*100)/50\r\n REPORTE3=open(\"Porcentajes.txt\",\"w\")\r\n REPORTE3.write('Porcentaje ocupación global:'+str(porto)+'\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 1:'+str(por1)+'%\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 2:'+str(por2)+'%\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 3:'+str(por3)+'%\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 4:'+str(por4)+'%\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 5:'+str(por5)+'%\\n')\r\n REPORTE3.write('Porcentaje ocupación piso 6:'+str(por6)+'%\\n')\r\n REPORTE3.close()\r\n\r\n\r\n\r\n \r\n APAGADO=int(input('Para apagar el programa ingrese -1, si desea continuar ingrese cualquier tecla diferente a -1. '))\r\n\r\n\r\n","repo_name":"Deidei-x/CodeDeployGitHubDemo.","sub_path":"Proyecto UWU.py","file_name":"Proyecto UWU.py","file_ext":"py","file_size_in_byte":25330,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"232526295","text":"\"\"\"\nScript to preprocess DEM data so that it can be used as input in an RNN.\nIn YADE data is stored to .npy files every interval of time steps.\n\nData consists of:\n\n* contact_params (samples, c).\n* input_params (samples, sequence_length, x).\n* output_params (samples, sequence_length, y).\n\nThis example considers DEM simulations of Triaxial compressions\nat different confinments (pressures), in drain and undrained conditions.\n\"\"\"\nimport os\n\nimport h5py\nimport numpy as np\n\nCONTACT_KEYS = [\n 'E', # young's modulus = log_10(E) with E in Pa\n 'v', # poisson's ratio\n 'kr', # rolling stiffness\n 'eta', # rolling friction\n 'mu' # sliding friction\n ]\n\nINPUT_KEYS_UNDRAINED = [\n 'e_x', # strains in 3 directions\n 'e_y',\n 'e_z' # the axial direction\n]\n\nINPUT_KEYS_DRAINED = [\n 'e_z' # the axial direction\n]\n\nOUTPUT_KEYS = [\n 'e', # void ratio\n 'p', # mean stress\n 'q', # deviatoric stress\n 'f0', # average contact normal force\n 'a_c', # fabric anisotropy\n 'a_n', # mechanical anisotropy\n 'a_t' # mechanical anisotropy due to tangential forces\n]\n\nUNUSED_KEYS_SEQUENCE = [\n 'dt', # size of timestep taken at this iteration\n 'numIter', # iteration number in the simulation at which equilibrium is\n # reached at the current loading\n 'K', # mysterious\n 'A' # also mysterious\n]\n\nUNUSED_KEYS_CONSTANT = [\n 'conf',# confining pressure (stored as group name) = log_10(confinement_pressure)\n 'mode',# drained/undrained (stored as group name)\n 'num' # number of particles\n]\n\ndef convert_all_to_hdf5(\n pressures: list,\n experiment_types: list,\n paths: tuple,\n sequence_length: int,\n stored_in_subfolders: bool\n ):\n \"\"\"\n Merge data of experiments of different pressures and types into a single\n hdf5 file.\n\n :param pressures: List of strings of pressures available.\n :param experiment_types: List of strings of experiment types available.\n :param paths: tuple containing two strings:\n - data_dir: Root directory containing all the data.\n - target_file: Path to hdf5 file to be created (should also have the name and the extension).\n :param sequence_length: Expected number of time steps in sequences. If the sequence is\n longer, only the first ``sequence_length`` element will be considered.\n :param stored_in_subfolders: True if yade .npy files are stored in pressure/experiment_type\n tree structure. False if all files are in a single folder `data_dir`.\n\n .. warning:: Will remove `target_file` if already exists.\n \"\"\"\n data_dir, target_file = paths\n if os.path.exists(target_file):\n os.remove(target_file)\n\n with h5py.File(target_file, 'a') as f:\n f.attrs['inputs_drained'] = INPUT_KEYS_DRAINED\n f.attrs['inputs_undrained'] = INPUT_KEYS_UNDRAINED\n f.attrs['outputs'] = OUTPUT_KEYS\n f.attrs['contact_params'] = CONTACT_KEYS\n f.attrs['unused_keys_sequence'] = UNUSED_KEYS_SEQUENCE\n f.attrs['unused_keys_constant'] = UNUSED_KEYS_CONSTANT\n\n for pressure in pressures:\n for experiment_type in experiment_types:\n grp = f.require_group(f'{pressure}/{experiment_type}')\n inputs_tensor, contact_tensor, outputs_tensor = \\\n convert_to_arrays(pressure, experiment_type,\n sequence_length, data_dir, stored_in_subfolders)\n grp['contact_params'] = contact_tensor\n grp['inputs'] = inputs_tensor\n grp['outputs'] = outputs_tensor\n\n print(f\"Added all data to {target_file}\")\n\n\ndef convert_to_arrays(\n pressure: str,\n experiment_type: str,\n sequence_length: int,\n data_dir: str,\n stored_in_subfolders: bool\n ):\n \"\"\"\n For a given pressure and experiment type, read all the files in the corresponding\n directory and concatenate those with the expected sequence length together\n into numpy arrays.\n\n :param pressure: String indicating the pressure used.\n :param experiment_type: String indicating the experiment type ('drained', 'undrained')\n :param sequence_length: Expected number of timesteps in sequence. If the sequence is\n longer, only the first ``sequence_length`` element will be considered.\n :param data_dir: The root directory of the data.\n :param stored_in_subfolders: True if yade .npy files are stored in pressure/experiment_type\n tree structure. False if all files are in a single folder `data_dir`.\n\n :return: Tuple of arrays of inputs, contacts, outputs\n\n .. warning:: sequences longer and shorter than `sequence_length` are ignored.\n \"\"\"\n if stored_in_subfolders: data_dir = data_dir + f'{pressure}/{experiment_type}/'\n if not os.listdir(data_dir):\n raise FileNotFoundError(f\"Directory {data_dir} is empty.\")\n\n file_names = [fn for fn in os.listdir(data_dir) if fn.endswith('.npy')]\n\n # rescale pressures by 10**6 to make it order 1.\n scalings = {key: 1. for key in OUTPUT_KEYS}\n scalings['p'] = scalings['q'] = 1.e6\n\n contact_list, inputs_list, outputs_list, other_lengths = ([] for _ in range(4))\n for f in file_names:\n try:\n sim_params, sim_features = np.load(data_dir + f, allow_pickle=True)\n except FileNotFoundError:\n print('IOError', data_dir + f)\n continue\n\n if not stored_in_subfolders:\n if (str(10**sim_params['conf']) != pressure) or (sim_params['mode'] != experiment_type):\n continue\n\n # test if sequence is of full length\n test_features = sim_features[OUTPUT_KEYS[0]]\n if len(test_features) >= sequence_length:\n inputs, outputs, contact_params = get_members(sim_params, sim_features, scalings, experiment_type, sequence_length)\n contact_list.append(contact_params)\n inputs_list.append(inputs)\n outputs_list.append(outputs)\n else:\n other_lengths.append(len(test_features))\n\n print(f\"At confining pressure {pressure}, for the {experiment_type} case, \"\n f\"there are {len(other_lengths)} samples with a different sequence lengths: {other_lengths}.\")\n\n # keras requires (batch, sequence_length, features) shape, so transpose axes\n inputs_list = np.transpose(np.array(inputs_list), (0, 2, 1))\n outputs_list = np.transpose(np.array(outputs_list), (0, 2, 1))\n contact_list = np.array(contact_list)\n\n print(f'Created array of {outputs_list.shape[0]} samples,')\n return inputs_list, contact_list, outputs_list\n\n\ndef get_members(\n sim_params: dict,\n sim_features: dict,\n scalings: dict,\n experiment_type: str,\n sequence_length: int):\n \"\"\"\n Get inputs, outputs and contact_params lists from sim_params (unpickled from simulation output files), given the constants declared.\n\n :param sim_params: dictionary loaded from simulation output file. Contains the fixed parameters of the simulation (i.e. contact params).\n :param sim_features: dictionary loaded from simulation output file. Contains the variable parameters during the simualtions (i.e inputs and outputs).\n :param scalings: floats to normalize some of the parameters.\n :param experiment_type: String indicating the experiment type ('drained', 'undrained')\n :param sequence_length: length of time series, if the simulations has longer time series than this parameter,\n the sequence will be cut and only the first sequence_lenght values will be taken into account.\n\n :return: 3 lists containing, inputs, outputs and contact_params.\n \"\"\"\n contact_params = [sim_params[key] for key in CONTACT_KEYS]\n #contact_list.append(contact_params)\n if experiment_type == 'drained':\n inputs = [sim_features[key][:sequence_length] for key in INPUT_KEYS_DRAINED]\n # Add sigma 2 and sigma 3 to inputs (optional) Not necessary since this info is in pressure or conf.\n #sigma_2 = (np.array(sim_features['p']) - (np.array(sim_features['q'])/3.0)) / scalings['p']\n #inputs.append(list(sigma_2[:sequence_length])) # sigma 2\n #inputs.append(list(sigma_2[:sequence_length])) # sigma 3\n elif experiment_type == 'undrained':\n inputs = [sim_features[key][:sequence_length] for key in INPUT_KEYS_UNDRAINED]\n else: raise ValueError(f\"experiment type must be drained or undrained but got {experiment_type}\")\n outputs = [np.array(sim_features[key][:sequence_length]) / scalings[key] for key in OUTPUT_KEYS]\n\n return inputs, outputs, contact_params\n\n\ndef get_pressures(data_dir: str):\n \"\"\"\n From a list of .npy YADE files get the confinement pressures, existing in the dataset.\n\n :param data_dir: Path to the data (YADE .npy files)\n\n :return: List of confinement pressures present in the dataset.\n \"\"\"\n if not os.listdir(data_dir):\n raise FileNotFoundError(f\"Directory {data_dir} is empty.\")\n\n pressures = []\n file_names = [fn for fn in os.listdir(data_dir) if fn.endswith('.npy')]\n for f in file_names:\n try:\n sim_params, _ = np.load(data_dir + f, allow_pickle=True)\n except FileNotFoundError:\n print('IOError', data_dir + f)\n continue\n pressure = str(10**sim_params['conf'])\n if pressure not in pressures: pressures.append(pressure)\n\n return pressures\n\n\ndef main():\n # path to directory containing the data\n data_dir='/Users/luisaorozco/Documents/Projects/GrainLearning/data/TriaxialCompression/'\n\n # path where the resultant hdf5 file will be created (including name of the file and extension)\n target_file='sequences.hdf5'\n\n # Option 1: YADE .npy files stored in subfolders pressure/experiment_type\n pressures = ['0.2e6', '0.5e6', '1.0e6']\n experiment_types = ['undrained','drained']\n stored_in_subfolders = True\n\n # Option 2: YADE .npy files are stored in a single folder.\n #pressures_2 = get_pressures('./')\n #experiment_types_2 = ['drained']\n #stored_in_subfolders_2 = False\n\n # Call main function\n convert_all_to_hdf5(\n pressures=pressures,\n experiment_types=experiment_types,\n paths=(data_dir, target_file),\n sequence_length=200,\n stored_in_subfolders=stored_in_subfolders\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chyalexcheng/grainLearning","sub_path":"scripts_tools/data_parsing/triaxial_YADE.py","file_name":"triaxial_YADE.py","file_ext":"py","file_size_in_byte":10376,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"14266030519","text":"# There is 100 lamps and for each lamp there is his own toggle.\r\n# each toggle turn on his own lamp and the multiplies of this lamp\r\n# for instance: toggle number 4 will turn on lamp number 4 and lamps numbers 8,12,16,20\r\n# which lamps will remain lit after changing position of all 100 toggles?\r\n\r\nimport numpy as np\r\n\r\nn=100\r\nlamps = list(np.zeros(n,dtype='bool'))\r\nnumeration = list(range(1,n+1))\r\n\r\n\r\nfor toggel in numeration:\r\n \r\n #print(f'toggel number: {toggel}')\r\n for index, lamp in zip(numeration,lamps):\r\n if index % toggel == 0 and lamps[index-1]== False:\r\n lamps[index-1]= True\r\n elif index % toggel == 0 and lamps[index-1]== True:\r\n lamps[index-1]=False\r\n \r\n #print(index,lamps[index-1])\r\n #print()\r\n\r\nresults=[]\r\nfor index,lamp in zip(numeration,lamps):\r\n if lamp:\r\n results.append(index)\r\n \r\nprint(f'For {n} toggels the result: {results}')\r\n\r\n\r\n\r\n \r\n","repo_name":"BaruchY7/My_python","sub_path":"Lamps Toggles riddle.py","file_name":"Lamps Toggles riddle.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19289575748","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\n\"\"\"\r\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<------COMMENT------>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\nHere at first the app name is declared to find the right app and the all urls of the template are\r\nadded to connect with the view of the app.\r\n\"\"\"\r\n\r\napp_name = 'polls'\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('/', views.DetailView.as_view(), name='detail'),\r\n path('/results/', views.ResultsView.as_view(), name='results'),\r\n path('/vote/', views.vote, name='vote'),\r\n]","repo_name":"mohaimin2018/Assignment_1_id_2310076221","sub_path":"Assignment_1_id_2310076221/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11920958370","text":"# -*- coding: utf-8 -*-\n# import sys\nfrom configparser import ConfigParser\nfrom PySide2 import QtWidgets\nfrom PySide2.QtWidgets import QFileDialog\nimport xlsxwriter\nimport csv\nimport mm_parse_gui\nimport progress_gui\nfrom vk_upload import *\nfrom mm_parser import *\nimport json\nimport configparser\nimport sys\nimport os\nfrom platform import system\nimport shutil\n\n\nPLATFORM_SEP = '\\\\' if system() == 'Windows' else '/' # Устанавливаем раздлитель в зависимости от ОС\nOLD_DATA = False\n\n\nclass Parse(QtWidgets.QDialog, progress_gui.UiProgress):\n def __init__(self):\n self.worker = None\n super(Parse, self).__init__()\n self.setupUi(self)\n self.setWindowTitle('Парсинг.')\n self.run()\n\n def run(self):\n self.worker = ParserMM(OLD_DATA)\n self.progressBar.setRange(1, 100)\n self.progressBar.setValue(0)\n self.worker.progressBar.connect(self.update_progress)\n self.worker.start()\n\n # def closeEvent(self, *args):\n # # self.worker.join()\n # # self.worker.wait()\n # # self.worker.terminate()\n # print('sss')\n\n def update_progress(self, err_code, err_msg, i, msg):\n\n \"\"\"\n изменение label и progressBar\n :param err_code:100 - value прогрессьбара\n 200 - удачное завершение\n 404 - ошибка\n 500 - не критическая ошибка, программа не прерывается\n 600 - дополнение к выводу, например, \"Попытка подключения № 2\"\n 700 - установка заголовка окна прогресса\n :param err_msg: Сообщение об ошибке из сигнала\n :param i: счетчик для прогрессбара\n :param msg: сообщение из потока\n :return:\n \"\"\"\n if err_code == 200:\n QtWidgets.QMessageBox.information(None, self.tr('Парсинг.'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n global OLD_DATA\n OLD_DATA = False\n self.hide()\n elif err_code == 300:\n QtWidgets.QMessageBox.information(None, self.tr('Парсинг.'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n\n elif err_code == 404:\n self.hide()\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n elif err_code == 500:\n QtWidgets.QMessageBox.information(None, self.tr('Парсинг.'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n elif err_code == 600:\n self.worker.progressBar.connect(self.error_label.setText(err_msg))\n elif err_code == 700:\n self.worker.progressBar.connect(self.error_label.setText(err_msg))\n self.setWindowTitle('Парсим данные с предыдущей сессии')\n else:\n self.worker.progressBar.connect(self.progressBar.setValue(i))\n self.error_label.setText(err_msg)\n self.label.setText(msg)\n\n\nclass VKUpload(QtWidgets.QDialog, progress_gui.UiProgress):\n def __init__(self):\n self.worker = None\n super(VKUpload, self).__init__()\n self.setupUi(self)\n self.setWindowTitle('Выгрузка в ВК.')\n self.run()\n\n def run(self):\n self.worker = VKUpl()\n self.progressBar.setRange(1, 100)\n self.progressBar.setValue(0)\n self.worker.progressBar.connect(self.update_progress)\n self.worker.start()\n\n def update_progress(self, err_code, err_msg, i, msg):\n \"\"\"\n изменение label и progressBar\n\n :param err_code: до 100 - value прогрессьбара\n 200 - удачное завершение\n 404 - ошибка\n :param err_msg: Сообщение об ошибке из сигнала\n :param i:\n :param msg: сообщение из потока\n :return:\n \"\"\"\n if err_code == 200:\n QtWidgets.QMessageBox.information(None, self.tr('Выгрузка в ВК'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n self.hide()\n elif err_code == 404:\n self.hide()\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr(err_msg),\n QtWidgets.QMessageBox.StandardButton.Ok)\n self.hide()\n else:\n self.worker.progressBar.connect(self.progressBar.setValue(i))\n self.label.setText(msg)\n\n\nclass MainWindow(QtWidgets.QMainWindow, mm_parse_gui.Ui_MainWindow):\n def openParseDialog(self):\n self.write_settings()\n self.w1 = Parse()\n self.w1.show()\n\n def openVKUpload(self):\n self.write_vk_settings()\n self.w2 = VKUpload()\n self.w2.show()\n\n def change_directory(self):\n directory = QFileDialog.getExistingDirectory(self, \"Выбрать папку\", \".\")\n self.output_dir_entry.setText(directory)\n\n @staticmethod\n def _save_file(_section_url):\n file = QFileDialog.getSaveFileName(filter='*.xlsx;; *.csv;; *.json')\n save_file(file[0], file[1].replace('*.', ''), f'temp{PLATFORM_SEP}items{PLATFORM_SEP}{_section_url}.tmp')\n\n def __init__(self):\n self.w1, self.w2 = None, None\n super().__init__()\n self.setupUi(self)\n\n self.fill_parser_settings_fields() # Заполняем поля ввода значениями из configs/config\n self.fill_vk_upload_fields() # Заполняем поля ввода значениями из configs/vkconfig\n self.parse_button.pressed.connect(self.openParseDialog)\n self.vk_upload_button.pressed.connect(self.openVKUpload)\n section = self.section_combo.currentText()\n self.save_as.triggered.connect(lambda: self._save_file(section))\n\n tmp_dir = os.listdir(f'temp{PLATFORM_SEP}soup')\n if len(tmp_dir) != 0:\n p = QtWidgets.QMessageBox.question(None, self.tr('Парсинг.'),\n 'С прошлого запуска остались не распарсенные данные.\\n'\n ' Распарсить их?',\n QtWidgets.QMessageBox.StandardButton.Yes,\n QtWidgets.QMessageBox.StandardButton.No)\n if p == QtWidgets.QMessageBox.Yes:\n global OLD_DATA\n OLD_DATA = True\n self.openParseDialog()\n else:\n OLD_DATA = False\n shutil.rmtree(f'temp{PLATFORM_SEP}soup')\n os.mkdir(f'temp{PLATFORM_SEP}soup')\n\n def write_settings(self):\n\n config = configparser.ConfigParser()\n config['config'] = {\n 'PRODUCT_COUNT': '80',\n 'USERNAME': self.username_entry.text(),\n 'PASSWORD': self.password_entry.text(),\n 'DUMP_TO_FILE': 'csv',\n 'SECTION_URL': self.section_combo.currentText(),\n }\n\n with open(f'configs{PLATFORM_SEP}config', 'w') as configfile:\n config.write(configfile)\n\n def write_vk_settings(self):\n\n config = configparser.ConfigParser()\n config['vkconfig'] = {\n 'ADDITIONAL_VALUE': self.additional_value_entry.text(),\n 'VK_LOGIN': self.vk_login_entry.text(),\n 'VK_PASSWORD': self.vk_password_entry.text(),\n 'GROUP_ID': self.group_id_entry.text(),\n 'SECTION': self.section_combo_vk.currentText(),\n }\n\n with open(f'configs{PLATFORM_SEP}vkconfig', 'w') as configfile:\n config.write(configfile)\n\n def fill_parser_settings_fields(self):\n try:\n config: ConfigParser = configparser.ConfigParser()\n config.read(f'configs{PLATFORM_SEP}config')\n\n try:\n with open(f'configs{PLATFORM_SEP}sections', 'r') as file:\n json.load(file)\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr('Файл настроек разделов сайта не найден...'),\n QtWidgets.QMessageBox.StandardButton.Ok)\n finally:\n with open(f'configs{PLATFORM_SEP}sections', 'r') as file:\n sections_from_file = json.load(file)\n count = 0\n for i in dict(sections_from_file[0]).keys():\n self.section_combo.addItem(\"\")\n self.section_combo.setItemText(count, i)\n count += 1\n\n self.username_entry.setText(config.get('config', 'USERNAME'))\n self.password_entry.setText(config.get('config', 'PASSWORD'))\n self.section_combo.setCurrentText(config.get('config', 'SECTION_URL'))\n\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr('Файл настроек парсера не найден...'),\n QtWidgets.QMessageBox.StandardButton.Ok)\n\n def fill_vk_upload_fields(self):\n try:\n config: ConfigParser = configparser.ConfigParser()\n config.read(f'configs{PLATFORM_SEP}vkconfig')\n try:\n with open(f'configs{PLATFORM_SEP}sections', 'r') as file:\n json.load(file)\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr('Файл настроек разделов сайта не найден...'),\n QtWidgets.QMessageBox.StandardButton.Ok)\n finally:\n with open(f'configs{PLATFORM_SEP}sections', 'r') as file:\n sections_from_file = json.load(file)\n count = 0\n for i in dict(sections_from_file[0]).keys():\n self.section_combo_vk.addItem(\"\")\n self.section_combo_vk.setItemText(count, i)\n count += 1\n\n self.additional_value_entry.setText(config.get('vkconfig', 'ADDITIONAL_VALUE'))\n self.vk_login_entry.setText(config.get('vkconfig', 'VK_LOGIN'))\n self.vk_password_entry.setText(config.get('vkconfig', 'VK_PASSWORD'))\n self.group_id_entry.setText(config.get('vkconfig', 'GROUP_ID'))\n\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, self.tr('Ошибка!'),\n self.tr('Файл настроек ВК не найден...'),\n QtWidgets.QMessageBox.StandardButton.Ok)\n\n\ndef main():\n try:\n os.mkdir('configs')\n except FileExistsError:\n pass\n try:\n with open(f'configs{PLATFORM_SEP}config', 'r') as file:\n file.read()\n except FileNotFoundError:\n config = configparser.ConfigParser()\n config['config'] = {\n 'PRODUCT_COUNT': '80',\n 'USERNAME': '',\n 'PASSWORD': '',\n 'DUMP_TO_FILE': 'json',\n 'SECTION_URL': 'Шапки',\n }\n with open(f'configs{PLATFORM_SEP}config', 'w') as configfile:\n config.write(configfile)\n\n try:\n with open(f'configs{PLATFORM_SEP}sections', 'r') as file:\n file.read()\n except FileNotFoundError:\n ParserMM.dump_to_json(None, [{\n \"Верхняя одежда\": \"\",\n \"Предзаказ\": \"\",\n \"Карнавал\": \"\",\n \"Обувь\": \"\",\n \"Распродажа\": \"\",\n \"Трикотаж\": \"\",\n \"Чулочно-носочные изделия\": \"\",\n \"Шапки\": \"\",\n \"Школа\": \"\"\n }],\n 'w')\n\n try:\n with open(f'configs{PLATFORM_SEP}vkconfig', 'r') as file:\n file.read()\n except FileNotFoundError:\n config = configparser.ConfigParser()\n config['vkconfig'] = {\n 'ADDITIONAL_VALUE': '',\n 'VK_LOGIN': '',\n 'VK_PASSWORD': '',\n 'GROUP_ID': '',\n }\n\n with open(f'configs{PLATFORM_SEP}vkconfig', 'w') as configfile:\n config.write(configfile)\n\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n\n\ndef save_file(_filename, _format, _data):\n if _format == 'csv':\n dump_to_csv(_filename + '.' + _format, _data)\n elif _format == 'xlsx':\n dump_to_xlsx(_filename + '.' + _format, _data)\n elif _format == 'json':\n shutil.copy2(_data, _filename + '.' + _format)\n\n\ndef dump_to_xlsx(filename, infile):\n \"\"\"\n Записывает данные в файл Excel\n\n :param filename: имя файла для записи.\n :param infile: данные для записи.\n :return: None, если данных для записи нет.\n \"\"\"\n if not len(infile):\n return None\n\n try:\n with open(infile, \"r\"):\n pass\n except FileNotFoundError:\n with open(infile, 'w'):\n pass\n finally:\n with open(infile, \"r\") as read_file:\n data = json.load(read_file)\n\n with xlsxwriter.Workbook(filename) as workbook:\n ws = workbook.add_worksheet()\n cell_format_bottom = workbook.add_format({\n 'bold': True,\n 'border': 1,\n 'align': 'left',\n 'font_size': 10\n })\n cell_format = workbook.add_format({\n 'border': 1,\n 'align': 'left',\n 'font_size': 10\n })\n\n headers = ['Раздел', 'Артикул', 'Наименование', 'Цена', 'Размер', 'Цвет', 'Фото', 'Описание']\n\n for col, h in enumerate(headers):\n ws.write_string(0, col, h, cell_format=cell_format_bottom)\n\n for row, item in enumerate(data, start=1):\n try:\n item['Раздел']\n except KeyError:\n item['Раздел'] = ''\n try:\n item['Артикул']\n except KeyError:\n item['Артикул'] = ''\n try:\n item['Название']\n except KeyError:\n item['Название'] = ''\n try:\n item['Цена']\n except KeyError:\n item['Цена'] = ''\n try:\n item['Размеры']\n except KeyError:\n item['Размеры'] = ''\n try:\n item['Цвет']\n except KeyError:\n item['Цвет'] = ''\n try:\n item['Картинка']\n except KeyError:\n item['Картинка'] = ''\n try:\n item['Описание']\n except KeyError:\n item['Описание'] = ''\n\n ws.write_string(row, 0, item['Раздел'], cell_format=cell_format)\n ws.write_string(row, 1, item['Артикул'], cell_format=cell_format)\n ws.write_string(row, 2, item['Название'], cell_format=cell_format)\n ws.write_string(row, 3, item['Цена'], cell_format=cell_format)\n ws.write_string(row, 4, item['Размеры'], cell_format=cell_format)\n ws.write_string(row, 5, item['Цвет'], cell_format=cell_format)\n ws.write_string(row, 6, item['Картинка'], cell_format=cell_format)\n ws.write_string(row, 7, item['Описание'], cell_format=cell_format)\n\n\ndef dump_to_csv(filename, infile, delimiter=';'):\n \"\"\"\n Записывает данные в файл .csv\n\n :param filename: имя файла для записи.\n :param infile: данные для записи.\n :param delimiter: разделитель столбцов.\n :return: None, если данных для записи нет.\n \"\"\"\n if not len(infile):\n return None\n try:\n with open(infile, \"r\"):\n pass\n except FileNotFoundError:\n with open(infile, 'w'):\n pass\n finally:\n with open(infile, \"r\") as read_file:\n data = json.load(read_file)\n\n with open(filename, \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=delimiter)\n # headers = (['Раздел', 'Артикул', 'Наименование', 'Цена', 'Размер', 'Цвет', 'Фото', 'Описание'])\n # writer.writerow(headers)\n\n for line in data:\n try:\n line['Раздел']\n except KeyError:\n line['Раздел'] = ''\n try:\n line['Артикул']\n except KeyError:\n line['Артикул'] = ''\n try:\n line['Название']\n except KeyError:\n line['Название'] = ''\n try:\n line['Цена']\n except KeyError:\n line['Цена'] = ''\n try:\n line['Размеры']\n except KeyError:\n line['Размеры'] = ''\n try:\n line['Цвет']\n except KeyError:\n line['Цвет'] = ''\n try:\n line['Картинка']\n except KeyError:\n line['Картинка'] = ''\n try:\n line['Описание']\n except KeyError:\n line['Описание'] = ''\n\n writer.writerow((line['Раздел'], line['Артикул'], line['Название'], line['Цена'], line['Размеры'],\n line['Цвет'], line['Картинка'], line['Описание']))\n\n\ndef dirs():\n try:\n os.mkdir('temp')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'temp{PLATFORM_SEP}old_data{PLATFORM_SEP}')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'temp{PLATFORM_SEP}items')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'temp{PLATFORM_SEP}pics')\n except FileExistsError:\n pass\n\n try:\n os.mkdir(f'temp{PLATFORM_SEP}soup')\n except FileExistsError:\n pass\n\n\nif __name__ == '__main__':\n dirs()\n main()\n","repo_name":"AlexSG8/parser_malenkymir_ru","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19530,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"49728636676","text":"import tensorflow as tf\nimport flwr as fl\n\nimport pandas as pd\nimport numpy as np\nfrom data import load_data\nfrom model import Model\n\n\nimport argparse\n\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data()\nmodel = Model()\n\n\nclass StoreClient(fl.client.NumPyClient):\n def __init__(\n self,\n model=model,\n x_train=x_train,\n y_train=y_train,\n x_valid=x_valid,\n y_valid=y_valid,\n x_test=x_test,\n y_test=y_test,\n ) -> None:\n super().__init__()\n self.model = model.model\n self.x_train = x_train\n self.y_train = y_train\n self.x_valid = x_valid\n self.y_valid = y_valid\n self.x_test = x_test\n self.y_test = y_test\n\n def get_parameters(self, config):\n return self.model.get_weights()\n\n def fit(self, parameters, config):\n self.model.set_weights(parameters)\n early_stopping = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=4,\n min_delta=100,\n restore_best_weights=True,\n )\n history = self.model.fit(\n self.x_train,\n self.y_train,\n batch_size=100,\n epochs=100,\n callbacks=[early_stopping],\n validation_data=(x_valid, y_valid))\n\n # Return updated model parameters and results\n parameters_prime = self.model.get_weights()\n num_examples_train = len(self.x_train)\n results = {\n \"loss\": history.history[\"loss\"][0],\n \"mean_squared_error\": history.history[\"mean_squared_error\"][0],\n \"val_loss\": history.history[\"val_loss\"][0],\n \"val_mean_squared_error\": history.history[\"val_mean_squared_error\"][0],\n }\n return parameters_prime, num_examples_train, results\n\n def evaluate(self, parameters, config):\n self.model.set_weights(parameters)\n loss, mean_squared_error, mean_absolute_error = self.model.evaluate(\n self.x_test, self.y_test\n )\n return (\n loss,\n len(self.x_test),\n {\n \"mean_squared_error\": float(mean_squared_error),\n \"mean_absolute_error\": mean_absolute_error,\n },\n )\n\n\n\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\"--client\", type=int, choices=range(1, 46), required=True)\n args = parser.parse_args()\n print(str(args.client))\n\n X_train, y_train, X_valid, y_valid, X_test, y_test = load_data(str(args.client))\n\n model = Model()\n\n fl.client.start_numpy_client(\n server_address=\"127.0.0.1:8080\",\n client=StoreClient(model, X_train, y_train, X_valid, y_valid, X_test, y_test),\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dljw/federated-learning","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35787232857","text":"# Fertilizer namespace\nfrom .common_namespace import *\nfrom .crop_namespace import *\nfrom .place_namespace import *\n\n# define the namespace and classes:\nfertilizerNS = Namespace(common_uri + \"fertilizer#\")\nfertilizerClass = fertilizerNS[\"Fertilizer\"]\n\n# define the properties:\nappliedAt = fertilizerNS['applied_at']\nareaApplied = fertilizerNS['fertilizer_area'] # literal\naverageQtyApplied = fertilizerNS['average_qty_applied'] # literal\nureaQty = fertilizerNS['urea_qty'] # literal\nammosulQty = fertilizerNS['ammosul_qty'] # literal\nammophosQty = fertilizerNS['ammophos_qty'] # literal\ncompleteQty = fertilizerNS['complete_qty'] # literal\nothersQty = fertilizerNS['others_qty'] # literal\nareaHarvested = fertilizerNS['area_harvested'] # literal\nyearApplied = fertilizerNS['year_applied'] # literal\ncropApplied = fertilizerNS['crop_applied']\n\nschema_triples = [\n # class declarations\n (fertilizerClass, rdfType, owlClass),\n # (fertilizerClass, rdfsSubClassOf, objectClass),\n\n (appliedAt, rdfType, owlObjectProperty),\n (appliedAt, rdfsDomain, fertilizerClass),\n (appliedAt, rdfsRange, placeClass),\n\n (areaApplied, rdfType, owlDatatypeProperty),\n (areaApplied, rdfsDomain, fertilizerClass),\n (areaApplied, rdfsRange, xsdDecimal),\n\n (averageQtyApplied, rdfType, owlDatatypeProperty),\n (averageQtyApplied, rdfsDomain, fertilizerClass),\n (averageQtyApplied, rdfsRange, xsdDecimal),\n\n (ureaQty, rdfType, owlDatatypeProperty),\n (ureaQty, rdfsDomain, fertilizerClass),\n (ureaQty, rdfsRange, xsdDecimal),\n\n (ammosulQty, rdfType, owlDatatypeProperty),\n (ammosulQty, rdfsDomain, fertilizerClass),\n (ammosulQty, rdfsRange, xsdDecimal),\n\n (ammophosQty, rdfType, owlDatatypeProperty),\n (ammophosQty, rdfsDomain, fertilizerClass),\n (ammophosQty, rdfsRange, xsdDecimal),\n\n (completeQty, rdfType, owlDatatypeProperty),\n (completeQty, rdfsDomain, fertilizerClass),\n (completeQty, rdfsRange, xsdDecimal),\n\n (othersQty, rdfType, owlDatatypeProperty),\n (othersQty, rdfsDomain, fertilizerClass),\n (othersQty, rdfsRange, xsdDecimal),\n\n (areaHarvested, rdfType, owlDatatypeProperty),\n (areaHarvested, rdfsDomain, fertilizerClass),\n (areaHarvested, rdfsRange, xsdDecimal),\n\n (yearApplied, rdfType, owlDatatypeProperty),\n (yearApplied, rdfsDomain, fertilizerClass),\n (yearApplied, rdfsRange, xsdYear),\n\n (cropApplied, rdfType, owlObjectProperty),\n (cropApplied, rdfsDomain, fertilizerClass),\n (cropApplied, rdfsRange, cropClass),\n\n]\n\nprefixes = [\n (fertilizerNS, \"fertilizer\"),\n]","repo_name":"edgarfelizmenio/semanticopenstat","sub_path":"namespaces/fertilizer_namespace.py","file_name":"fertilizer_namespace.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74622327049","text":"#!/usr/bin/env python3\r\n#coding:utf-8\r\n\r\n__author__ = 'xmxoxo'\r\n\r\n# TX中文词向量数据处理\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport argparse\r\n\r\n#import gensim\r\n#from gensim.models import KeyedVectors\r\n\r\n\r\n'''\r\n数据转换,把向量库转存为单独的文件分别保存\r\n词典文件 keywords.txt 和\r\n向量文件 vector.npy\r\n'''\r\ndef datprocess(filename, encoding='utf-8', outpath='./', dofilter=0, topn=0):\r\n try:\r\n i, f, total = 0, 0, 0\r\n chars, vectors = [], []\r\n print('开始转换向量,请稍等...')\r\n\r\n for line in open(filename, \"r\", encoding=encoding):\r\n i += 1\r\n # 跳过第1行\r\n if i==1:\r\n total = int(line.split(' ')[0])\r\n print('总记录数:%d' % total)\r\n continue;\r\n \r\n x = line.split(' ')\r\n # -----------------------------------------\r\n # 在这里对词语进行过滤\r\n if dofilter:\r\n if wordfilter(x[0]):\r\n f +=1 # 记录被过滤掉多少行\r\n #print('\\n第%d行,被过滤:%s' % (i, x[0]))\r\n continue;\r\n # -----------------------------------------\r\n \r\n chars.append(x[0])\r\n v = [float(i) for i in x[1:]]\r\n vectors.append(v)\r\n if i % 500:\r\n print('\\r[进度:%5.2f%% 已过滤:%5d] ��%-7d行: %-20s' % ((i*100/total), f, i, x[0][:18]) , end='' )\r\n #print(x[0], v[:10])\r\n if topn>0:\r\n if i >= topn:break;\r\n\r\n # 自动创建目录\r\n if not os.path.exists(outpath):\r\n os.mkdir(outpath)\r\n \r\n print()\r\n # 保存字典\r\n print('正在保存字典文件...')\r\n savetofile('\\n'.join(chars), os.path.join(outpath, 'keywords.txt'))\r\n # 保存向量\r\n print('正在保存向量文件...')\r\n np.save(os.path.join(outpath, 'vector.npy'), np.array(vectors))\r\n print('转换完成, 处理%d行,过滤%d行,输出%d行。' % (i, f, i-f) )\r\n \r\n except Exception as e:\r\n print(e)\r\n return None\r\n\r\n# 读入文件\r\ndef readtxtfile(fname, encoding='utf-8'):\r\n try:\r\n with open(fname, 'r', encoding=encoding) as f: \r\n data=f.read()\r\n return data\r\n except Exception as e:\r\n return ''\r\n \r\n\r\n# 保存文本信息到文件\r\ndef savetofile(txt, filename, encoding='utf-8'):\r\n pass\r\n try:\r\n with open(filename,'w', encoding=encoding) as f: \r\n f.write(str(txt))\r\n return 1\r\n except :\r\n return 0\r\n\r\n# 词典数据分析\r\ndef key_analyze ():\r\n # 加载数据\r\n filename = 'keywords.txt'\r\n #txt = readtxtfile(filename)\r\n dat = readtxtfile(filename).splitlines()\r\n # 计算长度\r\n #lstLength = [len(x) fo r x in dat]\r\n lstLength = map(len, dat)\r\n\r\n\r\n# 按字典进行批量替换\r\ndef replace_dict (txt, dictKey, isreg=0):\r\n import re\r\n for k,v in dictKey.items():\r\n if isreg:\r\n txt = re.sub(k, v, txt)\r\n else:\r\n txt = txt.replace(k,v)\r\n return txt\r\n\r\n# 正则表达式批量判断\r\n# TF=0 表示任意一个不匹配就返回 True\r\n# TF=1 表示任意一个匹配了就返回 True\r\ndef IsMatch (txt, lstKey, tf=1):\r\n import re\r\n r = None\r\n ret = False\r\n for k in lstKey:\r\n r = re.match(k,txt, re.U|re.I)\r\n # bool(r) = True 表示不匹配\r\n #if bool(r) != tf: \r\n #if (tf & (not bool(r))) or (not tf and bool(r)):\r\n if (tf==1 and r!=None) or (tf==0 and r==None):\r\n ret = True\r\n break\r\n return ret\r\n\r\n# 过滤\r\ndef wordfilter (txt):\r\n '''\r\n 以下过滤:\r\n\t* 纯数字\r\n\t* 带英文标点符号,且长度大于1的\r\n\t\t标点: [,;&:]\r\n\t* 重复3个以上\r\n\t\t例如:好好好, 天天天, 的的的\r\n\t* 纯英文\r\n * 英文字母+英文标点\r\n * 中文标点: [。,、“”?!:.()~?-……)(/]\r\n '''\r\n lstKey = [\r\n r'^\\d+$', # 纯数字\r\n r'^.+[\\-\\.,;&:\\'].*$', # 带英文标点符号,且长度大于1的\r\n r'^(.)\\1{2,}$', # 重复3个以上\r\n r'^([a-z])+$', # 纯英文字母\r\n r'^([a-z])[a-z\\-\\.,;&:]+$', # 英文字母+英文标点\r\n r'^.+[。、“”?!:()~\\?\\-……)(/].*$', # 带中文标点的\r\n ]\r\n ret = IsMatch(txt, lstKey, tf=1)\r\n\r\n return ret\r\n\r\n\r\ndef test ():\r\n '''\r\n '''\r\n t = '涉及到哪些'\r\n t = '23434'\r\n t = 'strong'\r\n t = 'jadfjdk-,1'\r\n\r\n k = [r'^\\d+$']\r\n #print(IsMatch(t, k, tf=1))\r\n #print(IsMatch(t, k, tf=0))\r\n\r\n print(wordfilter(t))\r\n \r\ndef main ():\r\n pass\r\n deffile='./dat/Tencent_AILab_ChineseEmbedding.txt'\r\n parser = argparse.ArgumentParser(description='腾讯词向量提取工具')\r\n parser.add_argument('-datfile', default=deffile, type=str, help='数据文件名')\r\n parser.add_argument('-outpath', default='./', type=str, help='输出目录')\r\n parser.add_argument('-dofilter', default=0, type=int, help='是否过滤非中文,默认=0不过滤')\r\n parser.add_argument('-topn', default=0, type=int, help='截取前N个向量,默认=0不截取')\r\n args = parser.parse_args()\r\n datfile = args.datfile\r\n outpath = args.outpath\r\n dofilter = args.dofilter\r\n topn = args.topn\r\n print('运行参数:', args)\r\n\r\n datprocess(filename=datfile, outpath=outpath, dofilter=dofilter, topn=topn)\r\n \r\n\r\nif __name__ == '__main__':\r\n pass\r\n main()\r\n","repo_name":"xmxoxo/Tencent_ChineseEmbedding_Process","sub_path":"dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"} +{"seq_id":"3577326094","text":"import os\r\nimport torch\r\n\r\n\r\nclass Config:\r\n def __init__(self):\r\n self.data_path = \"./squad\"\r\n self.train_path = os.path.join(self.data_path, \"train-v1.1.json\")\r\n self.dev_path = os.path.join(self.data_path, \"dev-v1.1.json\")\r\n self.model_path = \"./model\"\r\n self.hidden_size = 768\r\n self.max_seq_length = 512\r\n self.batch_size = 16\r\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n","repo_name":"nicoyang-21/patrnt_mrc_raw","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74667892489","text":"# algorithm AC1 actually changes the size of nums\n# this AC2 algorithm did not change the size of list\nclass Solution:\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n length = len(nums)\n nlength = 0\n for index in range(length):\n if nums[index] != val:\n nums[nlength] = nums[index]\n nlength += 1\n\n return nlength","repo_name":"FrancisDShanks/CodingPractice","sub_path":"LeetCode/27. Remove Element/remove_element AC2.py","file_name":"remove_element AC2.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34018477065","text":"word_guess = \"PURPLE\"\nguess_list = []\nwordBoard = [\"_\", \"_\", \"_\", \"_\", \"_\", \"_\"]\ndef showBoard():\n print(\" \".join(wordBoard))\n#showBoard()\ninstances = []\ndef checkGuess(letterGuess):\n guess_list.append(letterGuess)\n found = word_guess.__contains__(letterGuess)\n #print(found)\n \n if found: \n # instances = []\n \n for index in range(0, len(word_guess)):\n if (word_guess[index] == letterGuess):\n instances.append(index)\n #print(instances)\n for each in instances:\n #print(wordBoard[each])\n wordBoard[each] = letterGuess\n # print(wordBoard[each])\n return found\n\nwrongGuesses = 5\nwinGame = False\nprint(\"Can you guess the secret color? \")\nshowBoard()\n\nwhile wrongGuesses > 0 and winGame == False: \n\n guess = input(\"Guess a letter: \").upper()\n alreadyGuessed = guess_list.__contains__(guess)\n print(alreadyGuessed)\n if alreadyGuessed:\n print(\"You've already guessed this letter\")\n else: \n if checkGuess(guess):\n print(instances)\n if len(instances) > 1:\n print(\"Yes there are \" + str(len(instances)) + \" \" + guess)\n else:\n print(\"Yes there is a \" + guess)\n instances = []\n showBoard()\n check = wordBoard.__contains__(\"_\")\n if check == False:\n winGame = True\n else:\n wrongGuesses -=1\n print(\"I'm sorry but there is no letter \" + guess + \" in the word\")\n print(f\"You have {wrongGuesses} left. \")\n showBoard()\nif wrongGuesses == 0:\n print(\"You lose!!! The secret word was \" + word_guess) \nelse:\n print(\"Congratulations!!! You Won!!\")","repo_name":"Jruff1215/ASF200","sub_path":"week3/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10946887758","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom django.db.models import Count, Avg\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views import View\n\nfrom .models import Resources, Category, ResourcesTag, Review, Rating\nfrom apps.user.models import User\nfrom .form import PostResourceForm\nfrom .utils import generate_cat_count_list\n\ndef home_page(request):\n cnt = Resources.objects.all().count()\n act_use = User.objects.filter(is_active=True).count()\n res_per_cat = Resources.objects.values(\"cat_id__cat\").annotate(cnt=Count(\"cat_id\"))\n\n context = {\n 'cnt': cnt,\n 'act_use': act_use,\n 'res_per_cat': res_per_cat,\n }\n \n return render(request,\"resources/home.html\", context)\n\n\nclass ResourceDetailView(LoginRequiredMixin,View):\n max_viewed_resources = 5\n template_name = 'resources/resource_detail.html'\n\n def get(self, request, id):\n viewed_resources = request.session.get(\"viewed_resources\", [])\n res = Resources.objects.get(pk=id)\n viewed_resource = [id, res.title]\n\n if viewed_resource in viewed_resources:\n viewed_resources.remove(viewed_resource)\n viewed_resources.insert(0, viewed_resource)\n \n viewed_resources = viewed_resources[:self.max_viewed_resources]\n request.session[\"viewed_resources\"] = viewed_resources\n\n resource_tags = ResourcesTag.objects.filter(resources_id=res)\n reviews = Review.objects.filter(resources_id=res).count()\n average_rating = Rating.objects.filter(resources_id=res).aggregate(avg_rating=Avg(\"rate\"))[\"avg_rating\"]\n\n context = {\n 'res': res,\n 'resource_tags': resource_tags,\n 'reviews': reviews,\n 'average_rating': average_rating,\n }\n\n return render(request, self.template_name, context)\n\nclass ResourcePostView(LoginRequiredMixin, View):\n template_name = \"resources/resource_post.html\"\n\n def get(self, request):\n form = PostResourceForm()\n return render(\n request,\n self.template_name,\n {\"form\": form},\n )\n\n def post(self, request):\n form = PostResourceForm(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n new_resource = Resources.objects.create(**data)\n new_resource.user_id = User.objects.get(pk=1)\n new_resource.save()\n return redirect(\"http://127.0.0.1:8000/\")\n\n return render(\n request,\n self.template_name,\n {\"form\": form},\n )\n\n\nclass HomePage(TemplateView):\n template_name = 'home_page.html'","repo_name":"TimoGronau/resourceshares","sub_path":"apps/resources/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15769148724","text":"import core.helper as h\nimport time\nimport base64\nimport json\ntry:\n # Win32\n from msvcrt import getch\nexcept ImportError:\n # UNIX\n def getch():\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n return sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old)\n\nclass command:\n def __init__(self):\n self.name = \"keyboard\"\n self.description = \"Control device keyboard.\"\n self.type = \"applescript\"\n self.id = 115\n\n def run(self,session,cmd_data):\n h.info_info(\"Press Ctrl-C to stop.\")\n h.info_info(\"Device keyboard:\")\n while 1:\n key = getch()\n if key == chr(3):\n return \"\"\n payload = \"\"\"tell application \"System Events\"\n keystroke \\\"\"\"\"+key+\"\"\"\\\"\n end tell\"\"\"\n session.send_command({\"cmd\":\"applescript\",\"args\":payload})\n return \"\"\n\n","repo_name":"fengjixuchui/mouse","sub_path":"core/commands/macos/keyboard_macos.py","file_name":"keyboard_macos.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"22368046812","text":"from re import S\nfrom unittest import result\n\n\nlist_1 =['name', 'age', '1','19']\ndef main(a):\n f =a[:len(a)//2]\n f =list(reversed(f))\n w =a[len(a)//2:]\n w =list(reversed(w))\n print(f+w) \n\n\ndef fibonacce(n):\n list =[]\n a = 0\n b = 1\n for i in range(n):\n c = a+b\n list.append(c)\n a = b\n b = c\n print(list)\n\n#fibonacce(15)\n\ndef addition ():\n nomber1 = int(input('chislo'))\n nomber2 = int(input('chislo'))\n summa = nomber1 + nomber2\n print(summa)\n# addition()\n\ndef calc ():\n nomber1 = int(input('chislo'))\n nomber2 = int(input('chislo'))\n summa1 = nomber1 - nomber2\n print(summa1)\n# calc()\n\ndef calcaddintion ():\n calc()\n addition()\n\n#calcaddintion()\n\n'''\ndef fail (a):\n # a =input()\n with open(a ,'w') as b:\n b.write('qwerty')\nfail('pythim.py')\n'''\nimport random\ndef gen_number():\n list =['1','4','5','7','9','0']\n a = ['0444']\n for i in range(6):\n d =random.choice(list)\n a.append(d)\n number = ''.join(a)\n print(number)\n\n\n# a = '348597'\n\n# def digitize(n):\n# list =[]\n# for i in reversed(n):\n# list.append(i)\n# print(list)\n\n# digitize(a)\n\n# def main(n):\n# result = n+10\n# return result\n\n\n# print(main(20))\n\n\n\n# def my_func(*args, **kwargs):\n\n# print(kwargs) \n\n\n# my_func(fdfdf=374634)\n\n\ndef index(name, year, job, current_year=2022):\n age=current_year-year\n return f'Hello {name}! YA znau chto tebe {age} let. ty doljen pomenyat svou profeesion {job}'\n\n#print(index('Bruno', 2000, 'doctor'))\n\ndef add(n,b):\n return n+b \n#print(add(10,20))\n \ndef substract(n,b):\n return n-b\n#print(substract(20,30))\n\ndef multiply(m,k):\n return m*k\n#print(multiply(20,30))\n\ndef divide(l,d):\n return l/d\n#print(divide(20,30))\n\ndef collichestvo(b):\n a =0\n for i in b:\n if i == ' ':\n pass\n else:\n a+=1\n return a \n#print(collichestvo('werty qwert')) \n\ndef direc (**kwargs):\n return kwargs\n\n#print(direc(name='dosmart')) \n\ndef menu(food, drink):\n with open ('menu.txt', 'w')as s:\n s.write(f'{food}, {drink}')\nmenu('lagman', 'cola')\n\ndef file (n):\n with open (n, 'w')as d:\n d.write(' ')\n return d\n(file('fhfjhjhjh'))\n\n'''\ndef function():\n print( 'я главная функция')\n\n def function_1():\n print('я вложенная функция')\n \n function_1()\nfunction()\n'''\n'''\ndef dictionary(**kwargs):\n keys = kwargs.keys()\n values = kwargs.values()\n \n keys = tuple(keys)\n values = tuple(values)\n print(keys)\n print(values)\nprint(dictionary(name='dos', age=14))\n'''\ndef simpe(d):\n a =2\n while d%a!=0:\n a+=1\n return d==a\n(simpe(3))\n\ndef txt(a,b):\n ls =[]\n ls.append(a)\n ls.append(b)\n return ls\n#print(txt('qwerty',4456))\n'''\ndef chislo(n):\n if n==1:\n for i in range(n):\n print('1')\nchislo(1)\nelif n==10:\n for i in range(n):\n print('10')\nchislo(10)\n'''\ndef work (n, zp=5000):\n return f'{n}: {zp}'\n#print(work('dos', zp=9000))\n\nimport random \ndef chislo (n):\n ls =[]\n for i in range (n):\n ls.append(random.randint(1 ,9))\n return ls\n#print(chislo(10))\n\n\ndef guess_number():\n tries = 1\n random_number = random.randint(0,10)\n number = int(input('Выберите число в диапазоне от 0 до 10: '))\n if number == random_number:\n print('Вы выиграли!')\n else:\n while random_number != number:\n number = int(input('Попробуйте еще раз! '))\n tries += 1\n if tries == 5:\n print('Вы воспользовались всеми тремя шансами! ВЫ проиграли')\n break\n if random_number > number:\n print('бери выше')\n elif random_number < number:\n print('бери ниже')\n if number == random_number:\n print('Вы выиграли!')\n\nguess_number()","repo_name":"zhoomartov/first_rep","sub_path":"def.py","file_name":"def.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6212552312","text":"import gi\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk\n\n\nclass Handler:\n def onDestroy(self, *args):\n Gtk.main_quit()\n\n def b_clicked(self, button):\n print(\"Hello World!\")\n\n def q(self, *args):\n print(\"خروج\")\n Gtk.main_quit()\n exit()\n \n \nbuilder = Gtk.Builder()\nbuilder.add_from_file(\"a.glade\")\nbuilder.connect_signals(Handler())\n\nwindow = builder.get_object(\"w\")\nwindow.show_all()\n\nGtk.main()\n","repo_name":"SaeedGhazi/AF","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32147781127","text":"input() # input 은 \"입력!!!!\" 할때 사용하는 함수 입니다.\n # 만약 다음과 같은 상태에서 실행을 할 경우\n # 프로그램이 끝나지 않고 사용자의 입력을 기다리고 있는 것을 알 수 있습니다.\n # 하단의 terminal 에 커서를 가져다 놓고 Enter 를 누르면 프로그램이 종료됩니다.\n\ninput(\"입력해주세요 : \") # 다음의 코드를 실행해보면 terminal 창에\n # 입력해주세요 : 라는 문구가 뜨면서 입력을 기다리는 상태가 됩니다.\n # 즉, input 괄호안에 있는 값은 \n # '입력을 기다리면서 사용자에게 출력해주는 문구' 입니다.\n # 마찬가지로 실행했을 때 , Enter 를 누르면 프로그램이 종료됩니다.\n\ninput(\"입력 : \") # 따라서 input 은 괄호안에 자료를 출력하고, 사용자의 입력을 기다립니다.\n # 사용자가 입력을 완료하면(Enter 를 치면), 해당값을 반환!!!!!!! 합니다.\n # 만약 사용자가 abcd 를 입력해주면!!\n # input(\"입력 : \") 자체가 abcd 로 바뀝니다!!!\n\ninput(\"입력 : \")\n'abcd' # type 때 배웠던 반환에 대해서 생각을 해보면 이는 의미없는 코드입니다.\n\nprint(input(\"입력 : \")) # 이 코드를 쳐보면 이해하실 수 있습니다.\n\n\n# 하지만 input 은 아래와 같이 입력값을 저장하는 용도로 많이 사용됩니다.\na = input(\"a를 입력해주세요 : \") # name = '한승협' 과 같은 형태입니다!!!\n\n","repo_name":"hina0510/1_python","sub_path":"실습/kgit(006)-input.py.py","file_name":"kgit(006)-input.py.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8419920580","text":"from herbstluftwm.types import Point\nfrom conftest import RawImage\nfrom conftest import HlwmBridge\nimport itertools\nimport pytest\n\nfont_pool = [\n '-*-fixed-medium-r-*-*-13-*-*-*-*-*-*-*',\n 'Dejavu Sans:pixelsize=14:bold'\n]\n\n\n@pytest.mark.parametrize(\"xvfb\", [{'xrender': v} for v in [True, False]], indirect=True)\n@pytest.mark.parametrize(\"hlwm_process\", [{'transparency': v} for v in [True, False]], indirect=True)\ndef test_window_border_plain(hlwm, x11):\n color = (0x9f, 0xbc, 0x12)\n bw = 5 # border width\n handle, _ = x11.create_client()\n hlwm.attr.theme.color = RawImage.rgb2string(color)\n hlwm.attr.theme.border_width = bw\n img = x11.decoration_screenshot(handle)\n assert img.pixel(0, 0) == color\n expected_count = 2 * bw * img.width # horizontal border line\n expected_count += 2 * bw * img.height # vertical border\n expected_count -= 4 * bw * bw # we counted each twice\n assert img.color_count(color) == expected_count\n\n\n@pytest.mark.parametrize(\"xvfb\", [{'xrender': v} for v in [True, False]], indirect=True)\n@pytest.mark.parametrize(\"hlwm_process\", [{'transparency': v} for v in [True, False]], indirect=True)\ndef test_window_border_inner(hlwm, x11):\n color = (239, 2, 190)\n bw = 5 # border width\n inner_color = (48, 225, 26)\n inner_bw = 2\n hlwm.attr.theme.color = RawImage.rgb2string(color)\n hlwm.attr.theme.border_width = bw\n hlwm.attr.theme.inner_color = RawImage.rgb2string(inner_color)\n hlwm.attr.theme.inner_width = inner_bw\n handle, _ = x11.create_client()\n img = x11.decoration_screenshot(handle)\n # we check the inner border color in the upper left corner\n for x in range(0, bw):\n for y in range(0, bw):\n threshold = bw - inner_bw\n expected_color = inner_color if x >= threshold and y >= threshold else color\n assert img.pixel(x, y) == expected_color\n\n\n@pytest.mark.parametrize(\"xvfb\", [{'xrender': v} for v in [True, False]], indirect=True)\n@pytest.mark.parametrize(\"hlwm_process\", [{'transparency': v} for v in [True, False]], indirect=True)\ndef test_window_border_outer(hlwm, x11):\n color = (239, 2, 190)\n bw = 6 # border width\n outer_color = (48, 225, 26)\n outer_bw = 3\n hlwm.attr.theme.color = RawImage.rgb2string(color)\n hlwm.attr.theme.border_width = bw\n hlwm.attr.theme.outer_color = RawImage.rgb2string(outer_color)\n hlwm.attr.theme.outer_width = outer_bw\n handle, _ = x11.create_client()\n img = x11.decoration_screenshot(handle)\n # check the upper left corner\n for x in range(0, bw):\n for y in range(0, bw):\n threshold = outer_bw\n expected_color = outer_color if x < threshold or y < threshold else color\n assert img.pixel(x, y) == expected_color\n\n\ndef screenshot_with_title(x11, win_handle, title):\n \"\"\" set the win_handle's window title and then\n take a screenshot\n \"\"\"\n x11.set_window_title(win_handle, title)\n # double check that hlwm has updated the client's title:\n winid = x11.winid_str(win_handle)\n hlwm = HlwmBridge.INSTANCE\n assert hlwm.attr.clients[winid].title() == title\n # then, take the screenshot:\n return x11.decoration_screenshot(win_handle)\n\n\n@pytest.mark.parametrize(\"font\", font_pool)\ndef test_title_every_letter_is_drawn(hlwm, x11, font):\n \"\"\"the number of letters has some effect\"\"\"\n font_color = (255, 0, 0) # a color available everywhere\n hlwm.attr.theme.color = 'black'\n hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)\n hlwm.attr.theme.title_height = 14\n hlwm.attr.theme.padding_top = 4\n hlwm.attr.theme.title_font = font\n handle, _ = x11.create_client()\n\n # set the window title to some word\n count1 = screenshot_with_title(x11, handle, 'test').color_count(font_color)\n\n # duplicate the word in the title\n count2 = screenshot_with_title(x11, handle, 'test test').color_count(font_color)\n\n # then the number of pixels of the font_color should have doubled:\n assert count1 != 0\n assert count1 * 2 == count2\n\n\n@pytest.mark.parametrize(\"font\", font_pool)\ndef test_title_different_letters_are_drawn(hlwm, x11, font):\n \"\"\"changing letters changes the image\"\"\"\n font_color = (255, 0, 0) # a color available everywhere\n hlwm.attr.theme.color = 'black'\n hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)\n hlwm.attr.theme.title_height = 14\n hlwm.attr.theme.padding_top = 4\n hlwm.attr.theme.title_font = font\n handle, _ = x11.create_client()\n\n # put some characters in the title that take only few pixels\n count1 = screenshot_with_title(x11, handle, ',.b').color_count(font_color)\n\n # alter characters to others taking more pixels\n count2 = screenshot_with_title(x11, handle, ';:B').color_count(font_color)\n\n # then the number of pixels should have increased\n assert count1 < count2\n\n\n@pytest.mark.parametrize(\"font\", font_pool)\n@pytest.mark.parametrize(\"ellipsis\", [\n '',\n '...',\n '…',\n 10 * 'a_very_long_string_that_takes_all_the_available_space',\n])\ndef test_title_does_not_exceed_width(hlwm, x11, font, ellipsis):\n font_color = (255, 0, 0) # a color available everywhere\n bw = 30\n hlwm.attr.theme.color = 'black'\n hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)\n hlwm.attr.theme.title_height = 14\n hlwm.attr.theme.padding_top = 0\n hlwm.attr.theme.padding_left = 0\n hlwm.attr.theme.padding_right = 0\n hlwm.attr.theme.border_width = bw\n hlwm.attr.theme.title_font = font\n hlwm.attr.settings.ellipsis = ellipsis\n handle, winid = x11.create_client()\n\n # set a title that is too wide to be displayed in its entirety:\n w = hlwm.attr.clients[winid].decoration_geometry().width\n\n if font[0] != '-':\n three_bytes_per_glyph = 'ヘールブストルフト'\n assert len(three_bytes_per_glyph.encode('UTF-8')) == 3 * len(three_bytes_per_glyph)\n # for xft fonts, also test utf8 window titles\n utf8titles = [\n w * '♥',\n (w // 3) * 'äüöß',\n (w // len(three_bytes_per_glyph)) * three_bytes_per_glyph,\n ]\n else:\n # for plain X fonts, it does not seem to work in tox/pytest\n # (but strangely, it works in a manual Xephyr session)\n utf8titles = []\n\n for title in [w * '=', w * '|'] + utf8titles:\n img = screenshot_with_title(x11, handle, title)\n # verify that the title does not span too wide to the\n # left or to the right:\n # find leftmost non-black pixel:\n leftmost_font_x = None\n for x in range(0, w):\n for y in range(0, 14): # only verify top `title_height`-many pixels\n if img.pixel(x, y) != (0, 0, 0):\n leftmost_font_x = x\n break\n if leftmost_font_x is not None:\n break\n # find rightmost non-black pixel:\n rightmost_font_x = None\n for x in range(w - 1, 0, -1):\n for y in range(0, 14): # only verify top `title_height`-many pixels\n if img.pixel(x, y) != (0, 0, 0):\n rightmost_font_x = x\n break\n if rightmost_font_x is not None:\n break\n\n assert leftmost_font_x >= bw\n assert rightmost_font_x < bw + hlwm.attr.clients[winid].content_geometry().width\n\n\n@pytest.mark.parametrize(\"font\", font_pool)\ndef test_title_ellipsis_is_used(hlwm, x11, font):\n font_color = (255, 0, 0) # a color available everywhere\n bw = 30\n hlwm.attr.theme.color = 'black'\n hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)\n hlwm.attr.theme.title_height = 14\n hlwm.attr.theme.border_width = bw\n hlwm.attr.theme.title_font = font\n hlwm.attr.settings.ellipsis = 'abc'\n\n handle, winid = x11.create_client()\n assert screenshot_with_title(x11, handle, ' ').color_count(font_color) == 0\n # set a title that is too wide to be displayed in its entirety:\n w = hlwm.attr.clients[winid].decoration_geometry().width\n count1 = screenshot_with_title(x11, handle, w * ' ').color_count(font_color)\n assert count1 > 0\n hlwm.attr.settings.ellipsis = 'abcabc'\n count2 = screenshot_with_title(x11, handle, w * ' ').color_count(font_color)\n assert count2 > 0\n assert count2 == count1 * 2\n\n\n@pytest.mark.parametrize(\"frame_bg_transparent\", ['on', 'off'])\ndef test_frame_bg_transparent(hlwm, x11, frame_bg_transparent):\n hlwm.attr.settings.show_frame_decorations = 'all'\n hlwm.attr.settings.frame_gap = 24 # should not matter\n hlwm.attr.settings.frame_border_width = 0\n hlwm.attr.settings.frame_bg_active_color = '#ef0000'\n hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent\n tw = 8\n hlwm.attr.settings.frame_transparent_width = tw\n\n [frame_win] = x11.get_hlwm_frames()\n img = x11.screenshot(frame_win)\n w = img.width\n h = img.height\n\n for x, y in [(2, 2), (4, 2), (2, 8), (3, 4), (7, 7), (w - 1, h - 1), (w - tw, h - tw)]:\n assert img.pixel(x, y) == (0xef, 0, 0), \\\n f\"pixel at {x}, {y}\"\n\n # if there is a hole in the frame decoration, it seems that black is used\n # (either as a default value or because that's the color of the root window)\n color_expected = (0, 0, 0) if frame_bg_transparent == 'on' else (0xef, 0, 0)\n for x, y in [(tw, tw), (tw, tw + 2), (w - tw - 1, h - tw - 1), (50, h - tw - 1), (w // 2, h // 2)]:\n assert img.pixel(x, y) == color_expected, \\\n f\"pixel at {x}, {y}\"\n\n\n@pytest.mark.parametrize(\"frame_bg_transparent\", ['on', 'off'])\ndef test_frame_holes_for_tiled_client(hlwm, x11, frame_bg_transparent):\n hlwm.attr.settings.show_frame_decorations = 'all'\n hlwm.attr.settings.frame_bg_active_color = '#efcd32'\n hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent\n hlwm.attr.settings.frame_transparent_width = 8\n\n def expect_frame_bg_color(winid, expected_color):\n img = x11.screenshot(frame_win)\n w = img.width\n h = img.height\n for x, y in [(0, 0), (0, h - 1), (w - 1, 0), (w - 1, h - 1)]:\n assert img.pixel(x, y) == expected_color, \\\n f\"pixel at {x}, {y}\"\n\n [frame_win] = x11.get_hlwm_frames()\n expect_frame_bg_color(frame_win, (0xef, 0xcd, 0x32))\n\n x11.create_client()\n\n # one big tiled client should hide all of the frames bg color:\n expect_frame_bg_color(frame_win, (0, 0, 0))\n\n\n@pytest.mark.parametrize(\"frame_bg_transparent\", ['on', 'off'])\ndef test_frame_holes_for_pseudotiled_client(hlwm, x11, frame_bg_transparent):\n hlwm.attr.settings.show_frame_decorations = 'all'\n bgcol = (0xef, 0xcd, 0x32)\n hlwm.attr.settings.frame_bg_active_color = RawImage.rgb2string(bgcol)\n hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent\n hlwm.attr.settings.frame_transparent_width = 8\n\n [frame_win] = x11.get_hlwm_frames()\n geo = frame_win.get_geometry()\n w = geo.width\n h = geo.height\n\n # create a pseudotiled client that is very wide but not very high:\n winhandle, winid = x11.create_client(geometry=(0, 0, w + 10, h // 3 - 10))\n hlwm.attr.clients[winid].pseudotile = 'on'\n\n img = x11.screenshot(frame_win)\n assert (img.width, img.height) == (w, h)\n\n # the frame is visible on the top and bottom\n img.pixel(0, 0) == bgcol\n img.pixel(0, h - 1) == bgcol\n img.pixel(w // 2, 0) == bgcol\n img.pixel(w // 2, h - 1) == bgcol\n\n # but the frame is not visible on the left and right\n black = (0, 0, 0)\n img.pixel(0, h // 2) == black\n img.pixel(w - 1, h // 2) == black\n img.pixel(w // 2, h // 2) == black\n\n\n@pytest.mark.parametrize(\"method\", ['tab_*-attributes', 'other scheme'])\n@pytest.mark.parametrize(\"running_clients_num\", [3])\ndef test_decoration_tab_colors(hlwm, x11, method, running_clients, running_clients_num):\n active_color = (200, 23, 0) # something unique\n normal_color = (23, 200, 0) # something unique\n hlwm.attr.theme.active.color = RawImage.rgb2string(active_color)\n hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)\n if method == 'tab_*-attributes':\n hlwm.attr.theme.active.tab_color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.active.tab_title_color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.active.tab_outer_color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.active.tab_outer_width = 1\n if method == 'other scheme':\n hlwm.attr.theme.normal.color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)\n\n hlwm.attr.theme.title_height = 20\n hlwm.call(['set_layout', 'max'])\n # split twice to make tab area smaller and screenshots faster :-)\n hlwm.call(['split', 'bottom'])\n hlwm.call(['split', 'bottom'])\n\n winhandle = x11.window(hlwm.attr.clients.focus.winid())\n img = x11.decoration_screenshot(winhandle)\n color_count = img.color_count_dict()\n # we have three tabs, and one of them should have the active color:\n assert color_count[active_color] * (running_clients_num - 1) == color_count[normal_color]\n\n # if we disable tabs, then the 'normal_color' should disappear:\n hlwm.attr.settings.tabbed_max = False\n winhandle = x11.window(hlwm.attr.clients.focus.winid())\n img = x11.decoration_screenshot(winhandle)\n new_color_count = img.color_count_dict()\n assert normal_color not in new_color_count\n assert new_color_count[active_color] == running_clients_num * color_count[active_color]\n\n\n@pytest.mark.parametrize(\"running_clients_num\", [3])\ndef test_decoration_tab_urgent(hlwm, x11, running_clients, running_clients_num):\n active_color = (200, 23, 0) # something unique\n normal_color = (23, 200, 0) # something unique\n urgent_color = (17, 2, 189) # something unique\n hlwm.attr.theme.active.color = RawImage.rgb2string(active_color)\n hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)\n hlwm.attr.theme.normal.color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)\n hlwm.attr.theme.urgent.color = RawImage.rgb2string(urgent_color)\n hlwm.attr.theme.urgent.title_color = RawImage.rgb2string(urgent_color)\n\n hlwm.attr.theme.title_height = 20\n hlwm.call(['load', '(clients max:0 {})'.format(' '.join(running_clients))])\n # split twice to make tab area smaller and screenshots faster :-)\n hlwm.call(['split', 'bottom'])\n hlwm.call(['split', 'bottom'])\n\n winhandle = x11.window(running_clients[0])\n img = x11.decoration_screenshot(winhandle)\n color_count = img.color_count_dict()\n assert urgent_color not in color_count\n\n # make one of the unfocused tabs urgent\n x11.make_window_urgent(x11.window(running_clients[2]))\n img = x11.decoration_screenshot(winhandle)\n new_color_count = img.color_count_dict()\n assert urgent_color in new_color_count\n # there is one 'normal' and one 'urgent' tab, so the colors should\n # appear similarly often:\n assert new_color_count[urgent_color] == new_color_count[normal_color]\n assert new_color_count[normal_color] == color_count[normal_color] / 2\n\n\ndef test_decoration_tab_title_update(hlwm, x11):\n text_color = (212, 189, 140)\n hlwm.attr.theme.title_color = RawImage.rgb2string(text_color)\n hlwm.attr.theme.title_height = 20\n hlwm.call(['set_layout', 'max'])\n # split twice to make tab area smaller and screenshots faster :-)\n hlwm.call(['split', 'bottom'])\n hlwm.call(['split', 'bottom'])\n\n count = 5\n win_handles = [x11.create_client()[0] for _ in range(0, count)]\n\n # empty all window titles:\n for wh in win_handles:\n x11.set_window_title(wh, '')\n\n # focus handle 0:\n hlwm.call(['jumpto', x11.winid_str(win_handles[0])])\n\n # take a screenshot, it should not contain the text_color:\n assert x11.decoration_screenshot(win_handles[0]).color_count(text_color) == 0\n\n # change the title of an unfocused window:\n x11.set_window_title(win_handles[2], 'SOMETHING')\n # this change should now be visible in the tab bar, at least 5 pixels\n # should have this color now:\n assert x11.decoration_screenshot(win_handles[0]).color_count(text_color) > 5\n\n\n@pytest.mark.parametrize(\"running_clients_num\", [4])\ndef test_decoration_click_changes_tab(hlwm, mouse, running_clients, running_clients_num):\n hlwm.call(['load', '(clients max:0 {})'.format(' '.join(running_clients))])\n hlwm.attr.settings.tabbed_max = True\n hlwm.attr.theme.title_height = 10\n\n geo = hlwm.attr.clients.focus.decoration_geometry()\n tabbar_top_left = geo.topleft()\n tabbar_bottom_right = geo.topleft() + Point(geo.width, int(hlwm.attr.theme.title_height()))\n for idx in reversed(range(0, running_clients_num)):\n # pick a point between top left corner of the title bar\n # and the bottom right corner of the title bar:\n # the extra 0.5 makes that we click in the middle of the tab\n ratio = (idx + 0.5) / running_clients_num\n cursor = tabbar_top_left * (1 - ratio) + tabbar_bottom_right * ratio\n mouse.move_to(cursor.x, cursor.y)\n mouse.click('1')\n\n assert hlwm.attr.clients.focus.winid() == running_clients[idx]\n\n\ndef test_decoration_click_into_window_does_not_change_tab(hlwm, mouse):\n wins = hlwm.create_clients(2)\n hlwm.call(['load', '(clients max:1 {})'.format(' '.join(wins))])\n hlwm.attr.settings.tabbed_max = True\n hlwm.attr.theme.title_height = 10\n\n assert hlwm.attr.clients.focus.winid() == wins[1]\n\n # move into the window and click:\n mouse.move_into(wins[0], x=4, y=4)\n mouse.click('1')\n\n # this does not change the focus:\n assert hlwm.attr.clients.focus.winid() == wins[1]\n\n # to double check:\n # if the cursor was 8px further up, the click\n # however would change the tab\n mouse.move_relative(0, -8)\n mouse.click('1')\n\n assert hlwm.attr.clients.focus.winid() == wins[0]\n\n\ndef test_textalign_completion(hlwm):\n \"\"\"Test the TextAlign converter\"\"\"\n assert hlwm.complete(['attr', 'theme.title_align']) \\\n == sorted(['left', 'right', 'center'])\n for k in hlwm.complete(['attr', 'theme.title_align']):\n hlwm.attr.theme.title_align = k\n assert hlwm.attr.theme.title_align() == k\n\n\ndef test_title_position_remains(hlwm, x11):\n active_color = (212, 189, 140)\n normal_color = (221, 198, 104)\n hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)\n hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)\n hlwm.attr.settings.tabbed_max = True\n hlwm.attr.theme.title_height = 10\n hlwm.attr.theme.outer_width = 3\n hlwm.attr.tags.focus.tiling.focused_frame.algorithm = 'max'\n\n handle1, win1 = x11.create_client()\n x11.set_window_title(handle1, 'client 1')\n handle2, win2 = x11.create_client()\n x11.set_window_title(handle1, 'client 2')\n for align in ['left', 'center', 'right']:\n hlwm.attr.theme.title_align = align\n hlwm.call(['jumpto', win1])\n focus1 = x11.decoration_screenshot(handle1)\n hlwm.call(['jumpto', win2])\n focus2 = x11.decoration_screenshot(handle2)\n assert focus1.height == focus2.height\n assert focus1.width == focus2.width\n titlebar_height = 10\n for x, y in itertools.product(range(0, focus1.width), range(0, titlebar_height)):\n assert (focus1.pixel(x, y) == active_color) == (focus2.pixel(x, y) == normal_color), \\\n f'mismatch at pixel ({x}, {y})'\n assert (focus1.pixel(x, y) == normal_color) == (focus2.pixel(x, y) == active_color), \\\n f'mismatch at pixel ({x}, {y})'\n\n\n@pytest.mark.parametrize(\"client_count\", [1, 2])\ndef test_decoration_title_align(hlwm, x11, client_count):\n \"\"\"test the title_align attribute,\n by computing the 'average' position of the title\n \"\"\"\n text_color = (212, 189, 140)\n hlwm.attr.theme.title_color = RawImage.rgb2string(text_color)\n hlwm.attr.settings.tabbed_max = True\n hlwm.attr.theme.title_height = 10\n\n win_handle, winid = x11.create_client()\n hlwm.attr.tags.focus.tiling.focused_frame.algorithm = 'max'\n while hlwm.attr.tags.focus.client_count() < client_count:\n x11.create_client()\n\n assert hlwm.attr.clients.focus.winid() == winid\n x11.set_window_title(win_handle, '-')\n\n # compute the 'average' title position\n align_to_title_pos = {}\n for align in ['left', 'right', 'center']:\n hlwm.attr.theme.title_align = align\n img = x11.decoration_screenshot(win_handle)\n point_sum = Point(0, 0)\n point_count = 0\n for x, y in itertools.product(range(0, img.width), range(0, img.height)):\n if img.pixel(x, y) == text_color:\n point_sum.x += x\n point_sum.y += y\n point_count += 1\n # compute the average point:\n align_to_title_pos[align] = point_sum // point_count\n\n # all titles should be on the same height:\n assert align_to_title_pos['left'].y == align_to_title_pos['center'].y\n assert align_to_title_pos['center'].y == align_to_title_pos['right'].y\n\n # the x coordinate should be different by at least this:\n # the width of the decoration, divided by the number of tabs\n # and divided by roughly 3 :-)\n x_diff = hlwm.attr.clients.focus.decoration_geometry().width / client_count / 3\n assert align_to_title_pos['left'].x + x_diff < align_to_title_pos['center'].x\n assert align_to_title_pos['center'].x + x_diff < align_to_title_pos['right'].x\n","repo_name":"herbstluftwm/herbstluftwm","sub_path":"tests/test_decorations.py","file_name":"test_decorations.py","file_ext":"py","file_size_in_byte":21560,"program_lang":"python","lang":"en","doc_type":"code","stars":1053,"dataset":"github-code","pt":"16"} +{"seq_id":"13628675370","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass RedshiftToS3Transfer(BaseOperator):\n \"\"\"\n Executes an UNLOAD command to s3 as a CSV with headers\n\n :param schema: reference to a specific schema in redshift database\n :type schema: str\n :param table: reference to a specific table in redshift database\n :type table: str\n :param s3_bucket: reference to a specific S3 bucket\n :type s3_bucket: str\n :param s3_key: reference to a specific S3 key\n :type s3_key: str\n :param redshift_conn_id: reference to a specific redshift database\n :type redshift_conn_id: str\n :param aws_conn_id: reference to a specific S3 connection\n :type aws_conn_id: str\n :param verify: Whether or not to verify SSL certificates for S3 connection.\n By default SSL certificates are verified.\n You can provide the following values:\n\n - ``False``: do not validate SSL certificates. SSL will still be used\n (unless use_ssl is False), but SSL certificates will not be\n verified.\n - ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.\n You can specify this argument if you want to use a different\n CA cert bundle than the one used by botocore.\n :type verify: bool or str\n :param unload_options: reference to a list of UNLOAD options\n :type unload_options: list\n :param autocommit: If set to True it will automatically commit the UNLOAD statement.\n Otherwise it will be committed right before the redshift connection gets closed.\n :type autocommit: bool\n :param include_header: If set to True the s3 file contains the header columns.\n :type include_header: bool\n \"\"\"\n\n template_fields = ()\n template_ext = ()\n ui_color = '#ededed'\n\n @apply_defaults\n def __init__( # pylint: disable=too-many-arguments\n self,\n schema,\n table,\n s3_bucket,\n s3_key,\n redshift_conn_id='redshift_default',\n aws_conn_id='aws_default',\n verify=None,\n unload_options=tuple(),\n autocommit=False,\n include_header=False,\n *args, **kwargs):\n super(RedshiftToS3Transfer, self).__init__(*args, **kwargs)\n self.schema = schema\n self.table = table\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n self.redshift_conn_id = redshift_conn_id\n self.aws_conn_id = aws_conn_id\n self.verify = verify\n self.unload_options = unload_options\n self.autocommit = autocommit\n self.include_header = include_header\n\n if self.include_header and 'HEADER' not in [uo.upper().strip() for uo in self.unload_options]:\n self.unload_options = list(self.unload_options) + ['HEADER', ]\n\n def execute(self, context):\n postgres_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)\n\n credentials = s3_hook.get_credentials()\n unload_options = '\\n\\t\\t\\t'.join(self.unload_options)\n select_query = \"SELECT * FROM {schema}.{table}\".format(schema=self.schema, table=self.table)\n unload_query = \"\"\"\n UNLOAD ('{select_query}')\n TO 's3://{s3_bucket}/{s3_key}/{table}_'\n with credentials\n 'aws_access_key_id={access_key};aws_secret_access_key={secret_key}'\n {unload_options};\n \"\"\".format(select_query=select_query,\n table=self.table,\n s3_bucket=self.s3_bucket,\n s3_key=self.s3_key,\n access_key=credentials.access_key,\n secret_key=credentials.secret_key,\n unload_options=unload_options)\n\n self.log.info('Executing UNLOAD command...')\n postgres_hook.run(unload_query, self.autocommit)\n self.log.info(\"UNLOAD command complete...\")\n","repo_name":"OrvilleX/xflow","sub_path":"airflow/operators/redshift_to_s3_operator.py","file_name":"redshift_to_s3_operator.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"6223551727","text":"\nimport matplotlib.pyplot as plt\nfrom datatesting import extract_data\nfrom function_tree import evaluate, to_lisp, get_random_func\nfrom genetic_operators import cut_and_grow, shrink, hoist, crossover\nfrom generation_functions import generation\nfrom math import pi, log, sin, exp\nfrom sys import float_info\n\nvariables = {\n 'x':1\n}\n\n#NUM_DATA_POINTS = 150\n\n'''def mathFunc(x):\n if x < NUM_DATA_POINTS/2:\n return 10*sin(3*x) + 2*x\n else:\n return 10*sin(3*x)\n'''\n\ninput_array = extract_data('COVID19-eng.csv')\n#normalization_coeff = 10/(max(input_array))\n#for i in range(len(input_array)):\n# input_array[i] = input_array[i]*normalization_coeff\n\n#print(input_array)\n\n#for k in range(NUM_DATA_POINTS):\n# input_array.append(mathFunc(k))\n\n\n\n\n\n\n'''\nfor i in range(1000):\n print(\"Generation Number: \" +str(i+2))\n gen.evolve(10,0.4,0.3)\n'''\n\n\ngenNum = 1\n\nbase_cross_ratio = 0.01\nbase_rep_ratio = 0.0 \nMAX_GEN_NUM = 150000\nnum_of_gens = 1\n\nbest_of_runs = []\n\nexit_flag = False\n\n'''for _ in range(num_of_gens):\n if exit_flag:\n continue'''\ngen = generation(2500, variables, input_array)\ntry:\n while(True):\n cross_ratio = base_cross_ratio*(2/1+exp(-genNum/MAX_GEN_NUM)) - base_cross_ratio\n print(\"Generation Number: \" +str(genNum))\n best_candidate = gen.get_best_candidate()\n print(\"Best Candidate: \"+to_lisp(gen.get_best_candidate()))\n print(\"Longest candidate length: \"+ str(gen.get_longest_candidate()))\n score = gen.get_fitness(gen.get_best_candidate(), variables)\n #abs( evaluate(gen.get_best_candidate(), variables) - TARGET_VAL)\n\n\n print(\"Score: \"+str(score/sum(i*i for i in input_array)))\n if score/sum(i*i for i in input_array) < 0.01: # if you reach 1% error, break\n break\n print()\n #gen.evolve(1,0.4,0)\n gen.evolve(1,cross_ratio,0)\n genNum = genNum + 1\n if (genNum > MAX_GEN_NUM):\n best_of_runs.append(best_candidate)\n genNum = 0\n break\nexcept TypeError:\n pass\nexcept KeyboardInterrupt:\n pass\n #continue\n '''except KeyboardInterrupt:\n exit_flag = True\n break'''\n\nbest_of_runs.append(best_candidate)\n\n\n\nprint(\"Randomly Generated Function: \" + to_lisp(best_candidate))\nprint(f\"Evaluate Results: {evaluate(best_candidate, variables)}\")\n\noutput_array = []\nlowest_score = float_info.max\nbest_cand_of_runs = best_of_runs[0]\nfor l in best_of_runs:\n curr_score = gen.get_fitness(l, variables)\n if curr_score < lowest_score:\n lowest_score = curr_score\n best_cand_of_runs = l\n\n\nfor i in range(len(input_array)):\n variables['x'] = i\n #output_array.append(evaluate(best_cand_of_runs, variables))\n output_array.append(evaluate(best_candidate, variables))\n\nprint(best_of_runs)\n\nprint(\"Best Total Candidate: \"+to_lisp(best_cand_of_runs))\n\nplt.plot(input_array)\nplt.plot(output_array)\nplt.show()\n","repo_name":"markojovo/evo-algo","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33934301585","text":"with open(\"input12.txt\") as f:\n content = f.readlines()\n\npart1 = 0\n\nloc = (0, 0)\ndir_idx = 0\ndirections = [(1,0), (0,-1), (-1,0), (0,1)]\n\nfor step in content:\n ins = step[0]\n num = int(step[1:])\n if ins == \"F\":\n new_loc = (loc[0] + num * directions[dir_idx][0], loc[1] + num * directions[dir_idx][1])\n loc = new_loc\n if ins == \"N\":\n new_loc = (loc[0], loc[1] + num)\n loc = new_loc\n if ins == \"S\":\n new_loc = (loc[0], loc[1] - num)\n loc = new_loc\n if ins == \"E\":\n new_loc = (loc[0] + num, loc[1])\n loc = new_loc\n if ins == \"W\":\n new_loc = (loc[0] - num, loc[1])\n loc = new_loc\n if ins == \"R\":\n num /= 90\n dir_idx += int(num)\n dir_idx %= 4\n if ins == \"L\":\n num /= 90\n dir_idx -= int(num)\n dir_idx %= 4\n\npart1 = abs(loc[0]) + abs(loc[1])\nprint(\"part 1 = \" + str(part1))","repo_name":"dzolotusky/advent-of-code","sub_path":"2020/12/12.1.py","file_name":"12.1.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32517277020","text":"import sys\n\n\ndef 자리합(N):\n a = list(str(N))\n result = 0\n for i in range(len(a)):\n result += int(a[i])\n return result\n\n\nT = int(input())\nfor i in range(T):\n Sum = 0\n case = sys.stdin.readline().strip()\n a = case.split()[0]\n b = case.split()[1]\n for j in range(int(a), int(b) + 1):\n Sum += 자리합(j)\n print(Sum)","repo_name":"jane516/algorithm","sub_path":"자리합_5425.py","file_name":"자리합_5425.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12588075675","text":"import logging\nimport numpy as np\nfrom typing import Optional, Tuple, Dict\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom fvcore.nn import sigmoid_focal_loss_jit\nimport fvcore.nn.weight_init as weight_init\n\nfrom detectron2.config import configurable\nfrom detectron2.data.detection_utils import convert_image_to_rgb\nfrom detectron2.structures import ImageList\nfrom detectron2.utils.events import get_event_storage\nfrom detectron2.utils.logger import log_first_n\nfrom detectron2.layers import ShapeSpec, GradientScalarLayer\n\nfrom ..backbone import Backbone, build_backbone\nfrom ..postprocessing import detector_postprocess\nfrom ..proposal_generator import build_proposal_generator\nfrom ..roi_heads import build_roi_heads\nfrom .build import META_ARCH_REGISTRY\n\n__all__ = [\"GeneralizedRCNN\", \"ProposalNetwork\"]\n\n\n@META_ARCH_REGISTRY.register()\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Generalized R-CNN. Any models that contains the following three components:\n 1. Per-image feature extraction (aka backbone)\n 2. Region proposal generation\n 3. Per-region feature extraction and prediction\n \"\"\"\n\n @configurable\n def __init__(\n self,\n *,\n backbone: Backbone,\n proposal_generator: nn.Module,\n roi_heads: nn.Module,\n global_da_heads: nn.Module,\n local_da_heads: nn.Module,\n cam_heads: nn.Module,\n pixel_mean: Tuple[float],\n pixel_std: Tuple[float],\n input_format: Optional[str] = None,\n vis_period: int = 0,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n backbone: a backbone module, must follow detectron2's backbone interface\n proposal_generator: a module that generates proposals using backbone features\n roi_heads: a ROI head that performs per-region computation\n pixel_mean, pixel_std: list or tuple with #channels element,\n representing the per-channel mean and std to be used to normalize\n the input image\n input_format: describe the meaning of channels of input. Needed by visualization\n vis_period: the period to run visualization. Set to 0 to disable.\n \"\"\"\n super().__init__()\n self.backbone = backbone\n self.proposal_generator = proposal_generator\n self.roi_heads = roi_heads\n self.global_da_heads = global_da_heads\n self.local_da_heads = local_da_heads\n self.cam_heads = cam_heads\n\n self.input_format = input_format\n self.vis_period = vis_period\n if vis_period > 0:\n assert input_format is not None, \"input_format is required for visualization!\"\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1))\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1))\n assert (\n self.pixel_mean.shape == self.pixel_std.shape\n ), f\"{self.pixel_mean} and {self.pixel_std} have different shapes!\"\n\n @classmethod\n def from_config(cls, cfg):\n backbone = build_backbone(cfg)\n return {\n \"backbone\": backbone,\n \"proposal_generator\": build_proposal_generator(cfg, backbone.output_shape()),\n \"roi_heads\": build_roi_heads(cfg, backbone.output_shape()),\n \"global_da_heads\": GlobalDAHead(backbone.output_shape()),\n \"local_da_heads\": LocalDAHead(backbone.output_shape()),\n \"cam_heads\": CAMHead(backbone.output_shape(), cfg.MODEL.ROI_HEADS.NUM_CLASSES),\n \"input_format\": cfg.INPUT.FORMAT,\n \"vis_period\": cfg.VIS_PERIOD,\n \"pixel_mean\": cfg.MODEL.PIXEL_MEAN,\n \"pixel_std\": cfg.MODEL.PIXEL_STD,\n }\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n def visualize_training(self, batched_inputs, proposals):\n \"\"\"\n A function used to visualize images and proposals. It shows ground truth\n bounding boxes on the original image and up to 20 predicted object\n proposals on the original image. Users can implement different\n visualization functions for different models.\n\n Args:\n batched_inputs (list): a list that contains input to the model.\n proposals (list): a list that contains predicted proposals. Both\n batched_inputs and proposals should have the same length.\n \"\"\"\n from detectron2.utils.visualizer import Visualizer\n\n storage = get_event_storage()\n max_vis_prop = 20\n\n for input, prop in zip(batched_inputs, proposals):\n img = input[\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\n v_gt = Visualizer(img, None)\n v_gt = v_gt.overlay_instances(boxes=input[\"instances\"].gt_boxes)\n anno_img = v_gt.get_image()\n box_size = min(len(prop.proposal_boxes), max_vis_prop)\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(\n boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()\n )\n prop_img = v_pred.get_image()\n vis_img = np.concatenate((anno_img, prop_img), axis=1)\n vis_img = vis_img.transpose(2, 0, 1)\n vis_name = \"Left: GT bounding boxes; Right: Predicted proposals\"\n storage.put_image(vis_name, vis_img)\n break # only visualize one image in a batch\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances (optional): groundtruth :class:`Instances`\n * proposals (optional): :class:`Instances`, precomputed proposals.\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n list[dict]:\n Each dict is the output for one input image.\n The dict contains one key \"instances\" whose value is a :class:`Instances`.\n The :class:`Instances` object has the following keys:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\", \"pred_keypoints\"\n \"\"\"\n if not self.training:\n return self.inference(batched_inputs)\n\n images = self.preprocess_image(batched_inputs)\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n\n features = self.backbone(images.tensor)\n\n # batch_inputs = [target_inputs, source_inputs]\n # source inputs\n num_source_input = len(images) // 2\n source_images = ImageList(\n images.tensor[num_source_input:], images.image_sizes[num_source_input:]\n )\n source_gt_instances = gt_instances[num_source_input:]\n source_features = {}\n for feat_name in self.backbone._out_features:\n source_features[feat_name] = features[feat_name][num_source_input:]\n # target inputs\n target_images = ImageList(\n images.tensor[:num_source_input], images.image_sizes[:num_source_input]\n )\n target_gt_instances = gt_instances[:num_source_input]\n target_features = {}\n for feat_name in self.backbone._out_features:\n target_features[feat_name] = features[feat_name][:num_source_input]\n\n # (1) image-level class-agnostic alignment\n global_dc_losses = self.global_da_heads(features[self.roi_heads.in_features[0]]) # res5 or res4\n local_dc_losses = self.local_da_heads(features[list(features.keys())[0]]) # res2\n\n # (2) image-level class-wise alignment\n ic_losses = self.cam_heads(features[self.roi_heads.in_features[0]], gt_instances)\n\n # Instance-level and Image-level recognition (IIR) unit:\n # (3) instance-level foreground alignment\n if self.proposal_generator:\n source_proposals, source_proposal_losses = self.proposal_generator(\n source_images, source_features, source_gt_instances\n )\n\n target_proposals = self.proposal_generator.forward_weak_w_grad(\n target_images, target_features\n )\n\n proposals = target_proposals + source_proposals\n proposal_losses = source_proposal_losses\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n proposal_losses = {}\n\n # (4) instance-level class-wise alignment\n _, detector_losses = self.roi_heads(\n source_images, source_features, source_proposals, source_gt_instances\n )\n img_cls_losses = self.roi_heads.forward_weak(\n target_features, target_proposals, target_gt_instances\n )\n\n if self.vis_period > 0:\n storage = get_event_storage()\n if storage.iter % self.vis_period == 0:\n self.visualize_training(batched_inputs, proposals)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(img_cls_losses)\n losses.update(proposal_losses)\n losses.update(global_dc_losses)\n losses.update(local_dc_losses)\n losses.update(ic_losses)\n return losses\n\n def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):\n \"\"\"\n Run inference on the given inputs.\n\n Args:\n batched_inputs (list[dict]): same as in :meth:`forward`\n detected_instances (None or list[Instances]): if not None, it\n contains an `Instances` object per image. The `Instances`\n object contains \"pred_boxes\" and \"pred_classes\" which are\n known boxes in the image.\n The inference will then skip the detection of bounding boxes,\n and only predict other per-ROI outputs.\n do_postprocess (bool): whether to apply post-processing on the outputs.\n\n Returns:\n same as in :meth:`forward`.\n \"\"\"\n assert not self.training\n\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n\n if detected_instances is None:\n if self.proposal_generator:\n proposals, _ = self.proposal_generator(images, features, None)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n\n results, _ = self.roi_heads(images, features, proposals, None)\n else:\n detected_instances = [x.to(self.device) for x in detected_instances]\n results = self.roi_heads.forward_with_given_boxes(features, detected_instances)\n\n if do_postprocess:\n return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)\n else:\n return results\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images\n\n @staticmethod\n def _postprocess(instances, batched_inputs, image_sizes):\n \"\"\"\n Rescale the output instances to the target size.\n \"\"\"\n # note: private function; subject to changes\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n instances, batched_inputs, image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n\n@META_ARCH_REGISTRY.register()\nclass ProposalNetwork(nn.Module):\n \"\"\"\n A meta architecture that only predicts object proposals.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))\n self.register_buffer(\"pixel_std\", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n Same as in :class:`GeneralizedRCNN.forward`\n\n Returns:\n list[dict]:\n Each dict is the output for one input image.\n The dict contains one key \"proposals\" whose value is a\n :class:`Instances` with keys \"proposal_boxes\" and \"objectness_logits\".\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n features = self.backbone(images.tensor)\n\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n # In training, the proposals are not useful at all but we generate them anyway.\n # This makes RPN-only models about 5% slower.\n if self.training:\n return proposal_losses\n\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n proposals, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"proposals\": r})\n return processed_results\n\n\nclass GlobalDAHead(nn.Module):\n \"\"\"\n Global domain classifier for image-level class-agnostic alignment\n \"\"\"\n\n def __init__(self, backbone_out_shape: Dict[str, ShapeSpec]):\n super(GlobalDAHead, self).__init__()\n if 'res5' in backbone_out_shape.keys():\n in_channels = 2048\n elif 'res4' in backbone_out_shape.keys():\n in_channels = 1024\n elif 'plain5' in backbone_out_shape.keys():\n in_channels = 512\n else:\n raise KeyError(\"Unknown backbone output name: {}\".format(backbone_out_shape.keys()))\n\n self.da_conv1 = nn.Conv2d(in_channels, 512, kernel_size=3, stride=2, padding=1, bias=False)\n self.da_conv2 = nn.Conv2d(512, 128, kernel_size=3, stride=2, padding=1, bias=False)\n self.da_conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=False)\n self.da_bn1 = nn.BatchNorm2d(512)\n self.da_bn2 = nn.BatchNorm2d(128)\n self.da_bn3 = nn.BatchNorm2d(128)\n self.da_fc = nn.Linear(128, 1)\n\n self.gama = 5\n grl_weight = 1.0\n self.grl = GradientScalarLayer(-1.0 * grl_weight)\n\n def forward(self, x):\n x = self.grl(x)\n\n x = F.dropout(F.relu(self.da_bn1(self.da_conv1(x))), training=self.training)\n x = F.dropout(F.relu(self.da_bn2(self.da_conv2(x))), training=self.training)\n x = F.dropout(F.relu(self.da_bn3(self.da_conv3(x))), training=self.training)\n\n x = F.avg_pool2d(x, (x.size(2), x.size(3)))\n x = x.view(-1, 128)\n x = self.da_fc(x)\n\n da_targets = torch.zeros_like(x, requires_grad=False)\n num_source_input = x.shape[0] // 2\n da_targets[:num_source_input, ...] += 1\n losses = sigmoid_focal_loss_jit(x, da_targets, gamma=self.gama, reduction='mean')\n\n return {'loss_global_da': losses}\n\n\nclass LocalDAHead(nn.Module):\n \"\"\"\n Local domain classifier for image-level class-agnostic feature alignment\n \"\"\"\n\n def __init__(self, backbone_out_shape: Dict[str, ShapeSpec]):\n super(LocalDAHead, self).__init__()\n if 'res2' in backbone_out_shape.keys():\n in_channels = 256\n elif 'plain2' in backbone_out_shape.keys():\n in_channels = 128\n else:\n print(backbone_out_shape.keys())\n raise KeyError(\"Unknown backbone output name: {}\".format(backbone_out_shape.keys()))\n\n self.da_conv1 = nn.Conv2d(in_channels, 256, kernel_size=1, stride=1, padding=0, bias=False)\n self.da_conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, bias=False)\n self.da_conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1, padding=0, bias=False)\n self._init_weights()\n\n grl_weight = 1.0\n self.grl = GradientScalarLayer(-1.0 * grl_weight)\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n\n normal_init(self.da_conv1, 0, 0.01)\n normal_init(self.da_conv2, 0, 0.01)\n normal_init(self.da_conv3, 0, 0.01)\n\n def forward(self, x):\n x = self.grl(x)\n\n x = F.relu(self.da_conv1(x))\n x = F.relu(self.da_conv2(x))\n x = torch.sigmoid(self.da_conv3(x))\n\n da_targets = torch.zeros_like(x, requires_grad=False)\n num_source_input = x.shape[0] // 2\n da_targets[:num_source_input, ...] += 1\n losses = F.mse_loss(x, da_targets, reduction='mean')\n\n return {'loss_local_da': losses}\n\n\nclass CAMHead(nn.Module):\n \"\"\"\n Image-level multi-label classifier for image-level class-wise alignment\n \"\"\"\n\n def __init__(self, backbone_out_shape: Dict[str, ShapeSpec], num_classes: int):\n super(CAMHead, self).__init__()\n if 'res5' in backbone_out_shape.keys():\n in_channels = 2048\n elif 'res4' in backbone_out_shape.keys():\n in_channels = 1024\n elif 'plain5' in backbone_out_shape.keys():\n in_channels = 512\n else:\n raise KeyError(\"Unknown backbone output name: {}\".format(backbone_out_shape.keys()))\n self.num_classes = num_classes\n\n self.cam_conv = nn.Conv2d(in_channels, self.num_classes, kernel_size=1, bias=False)\n weight_init.c2_msra_fill(self.cam_conv)\n\n def forward(self, x, gt_instances):\n x = self.cam_conv(x)\n\n logits = F.avg_pool2d(x, (x.size(2), x.size(3)))\n logits = logits.view(-1, self.num_classes)\n\n if gt_instances is None:\n return {'loss_cam': 0.0 * logits.sum()}\n\n gt_classes_img_oh = self.get_image_level_gt(gt_instances)\n\n losses = F.binary_cross_entropy_with_logits(\n logits, gt_classes_img_oh, reduction='mean'\n )\n return {'loss_cam': losses * 0.1}\n\n @torch.no_grad()\n def get_image_level_gt(self, targets):\n \"\"\"\n Convert instance-level annotations to image-level\n \"\"\"\n gt_classes_img = [torch.unique(t.gt_classes, sorted=True) for t in targets]\n gt_classes_img_int = [gt.to(torch.int64) for gt in gt_classes_img]\n gt_classes_img_oh = torch.cat(\n [\n torch.zeros(\n (1, self.num_classes), dtype=torch.float, device=gt_classes_img[0].device\n ).scatter_(1, torch.unsqueeze(gt, dim=0), 1)\n for gt in gt_classes_img_int\n ],\n dim=0,\n )\n return gt_classes_img_oh","repo_name":"XuYunqiu/H2FA_R-CNN","sub_path":"detectron2/modeling/meta_arch/rcnn.py","file_name":"rcnn.py","file_ext":"py","file_size_in_byte":20599,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"16"} +{"seq_id":"13745926904","text":"\"\"\"\nLogging utilities for the WeatherMap\n\"\"\"\n\nimport traceback\nimport inspect\nfrom datetime import datetime, timedelta\n\nTAB_TEXT = ' ' * 4\nMODULE_NAME = ''\n\n\ndef __get_callstack_indent_count(\n stack_adjustment=3\n):\n \"\"\"\n Returns the number of indents that should be applied to the logging statement.\n\n Keyword Arguments:\n stack_adjustment {int -- The number of frames down the ACTUAL function name is. (default: {3})\n\n Returns:\n int -- The numnber of indents.\n \"\"\"\n\n try:\n cs_info = traceback.extract_stack()\n\n indents = 0\n\n for index in range(len(cs_info) - stack_adjustment, 0, -1):\n if MODULE_NAME in cs_info[index].name:\n break\n else:\n indents += 1\n\n if indents < 0:\n indents = 0\n\n return indents\n except:\n return 0\n\n\ndef __get_indents(\n count=0,\n stack_adjustment=3\n):\n \"\"\"\n Returns whitespace for the number of given indents.\n\n Keyword Arguments:\n count {int} -- The number of indents to return whitespace for. (default: {0})\n stack_adjustment {int -- The number of frames down the ACTUAL function name is. (default: {3})\n\n Returns:\n string -- A whitespace string.\n \"\"\"\n\n if count < 0:\n count = 0\n\n function_name = 'UKNOWN'\n line_num = 'UNKNOWN'\n\n try:\n cs_info = traceback.extract_stack()\n index = len(cs_info) - stack_adjustment\n function_name = '{}()'.format(cs_info[index].name)\n\n if MODULE_NAME in function_name:\n function_name = cs_info[index].filename\n\n line_num = cs_info[index].lineno\n except:\n pass\n\n return '{}{}:{}: '.format(TAB_TEXT * count, function_name, line_num)\n\n\ndef safe_log(\n logger,\n message\n):\n \"\"\"\n Logs an INFO level message safely. Also prints it to the screen.\n\n Arguments:\n logger {logger} -- The logger to use.\n message {string} -- The message to log.\n \"\"\"\n\n try:\n indents = __get_indents(__get_callstack_indent_count())\n if logger is not None:\n logger.log_info_message(indents + message)\n else:\n print('{} INFO: {}{}'.format(datetime.now(), indents, message))\n except:\n print(indents + message)\n\n\ndef safe_log_warning(\n logger,\n message\n):\n \"\"\"\n Logs a WARN level message safely. Also prints it to the screen.\n\n Arguments:\n logger {logger} -- The logger to use.\n message {string} -- The message to log.\n \"\"\"\n\n try:\n indents = __get_indents(__get_callstack_indent_count())\n\n if logger is not None:\n logger.log_warning_message(indents + message)\n else:\n print('{} WARN: {}{}'.format(datetime.now(), indents, message))\n except:\n print(indents + message)\n","repo_name":"ptader/categorical-sectional","sub_path":"safe_logging.py","file_name":"safe_logging.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"2314659115","text":"import avango\nimport avango.script\nimport avango.gua\nfrom avango.script import field_has_changed\n\nfrom examples_common.GuaVE import GuaVE\n\n#avango.enable_logging(4, \"client.log\")\n\nVR16 = \"141.54.147.16\"\nPAN = \"141.54.147.52\"\nLOCALHOST = \"127.0.0.1\"\nDAEDALOS = \"141.54.147.34\"\n\nSPACEMONSTER = \"141.54.147.101\"\n\nKERBEROS = \"141.54.147.20\"\n\nCURRENTLY_USED_SERVER = KERBEROS\n\n\nCLIENT_MODE = \"MEASUREMENT_ANAGLYPH\"\n#CLIENT_MODE = \"VIDEO_POWERWALL\"\n#CLIENT_MODE = \"SCREENSHOT_DESKTOP\"\n##CLIENT_MODE = \"DEBUG_3_USERS_WEAK_PC\"\n\nDEBUG_MODE = \"NONE\"\n#DEBUG_MODE = \"OCCLUSION_SLAVE_DEBUG\"\n#DEBUG_MODE = \"CENTRAL_USER\"\n\nSTEREO_MODE = 0\nWINDOW_RESOLUTION = 0\nRENDERING_RESOLUTION = 0\nLEFT_VIEWPORT_START = 0\nRIGHT_VIEWPORT_START = 0\n\nif \"MEASUREMENT_ANAGLYPH\" == CLIENT_MODE:\n STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_CYAN\n\n if \"OCCLUSION_SLAVE_DEBUG\" == DEBUG_MODE:\n #WINDOW_RESOLUTION = avango.gua.Vec2ui(3840, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(3840, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n if True:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(3840, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(3840, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n else:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(1280, 720)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(1280, 720)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n\n else:\n if False:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(4096 - 400 - 425, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(400, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(400, 0)\n else:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(4096, 0)\n \n DISPLAY_VARIABLE_LEFT = \":0.1\"\n DISPLAY_VARIABLE_CENTER = \":0.1\"\n DISPLAY_VARIABLE_RIGHT = \":0.1\" # for the occlusion slave, one GPU is rendering everything\nelif \"VIDEO_POWERWALL\" == CLIENT_MODE:\n DISPLAY_VARIABLE_LEFT = \":0.1\"\n DISPLAY_VARIABLE_CENTER = \":0.1\"\n DISPLAY_VARIABLE_RIGHT = \":0.1\" \n STEREO_MODE = avango.gua.StereoMode.SIDE_BY_SIDE\n \n \n if True:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(2*4096, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(4096 - 400 - 425, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(400, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(4096 + 400, 0)\n else:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(2*4096, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(4096, 0) \n \n\n if False:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(2*4096, 2160)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(4096 - 400 - 425, 2160)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(400, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(4096 + 400, 0)\n else:\n WINDOW_RESOLUTION = avango.gua.Vec2ui(2*1024, 512)\n RENDERING_RESOLUTION = avango.gua.Vec2ui(1024, 512)\n #RENDERING_RESOLUTION = avango.gua.Vec2ui(4096, 2160)\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(1024, 0) \n\n\nelif \"SCREENSHOT_DESKTOP\" == CLIENT_MODE:\n STEREO_MODE = avango.gua.StereoMode.MONO\n WINDOW_RESOLUTION = avango.gua.Vec2ui(128, 72)\n RENDERING_RESOLUTION = WINDOW_RESOLUTION\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n DISPLAY_VARIABLE_LEFT = \":0.4\"\n DISPLAY_VARIABLE_CENTER = \":0.2\"\n DISPLAY_VARIABLE_RIGHT = \":0.1\"\nelif \"DEBUG_3_USERS_WEAK_PC\" == CLIENT_MODE:\n STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_CYAN\n #WINDOW_RESOLUTION = avango.gua.Vec2ui(1400, 1600)\n #WINDOW_RESOLUTION = avango.gua.Vec2ui(3840, 2160)\n WINDOW_RESOLUTION = avango.gua.Vec2ui(128, 72)\n RENDERING_RESOLUTION = WINDOW_RESOLUTION\n LEFT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n RIGHT_VIEWPORT_START = avango.gua.Vec2ui(0, 0)\n DISPLAY_VARIABLE_LEFT = \":0\"\n DISPLAY_VARIABLE_CENTER = \":0\"\n DISPLAY_VARIABLE_RIGHT = \":0\"\n\nclass Initializer(avango.script.Script):\n\n def __init__(self):\n self.super(Initializer).__init__()\n\n # scenegraph\n self.nettrans = avango.gua.nodes.NetTransform(Name=\"net\",\n # specify role, ip, and port\n Groupname=\"AVCLIENT|\"+CURRENTLY_USED_SERVER+\"|7432\") #server\n #Groupname=\"AVCLIENT|141.54.147.54|7432\")\n \n self.graph = avango.gua.nodes.SceneGraph(Name=\"scenegraph\")\n self.graph.Root.value.Children.value = [self.nettrans]\n\n print(\"Before setting is initialized\")\n\n # viewing setup\n #size = avango.gua.Vec2ui(1600, 1200)\n #size = avango.gua.Vec2ui(1920, 1080)\n #size = avango.gua.Vec2ui(100, 100)\n size = avango.gua.Vec2ui(128, 72) \n #size = avango.gua.Vec2ui(3840, 2160)\n self.window_center = avango.gua.nodes.GlfwWindow(Size=size,\n Display = DISPLAY_VARIABLE_CENTER, # \":0.1\",\n LeftResolution=size,\n RightResolution=size,\n Title=\"slave_weimar_v0_osaka_center\")\n\n self.window_center.EnableVsync.value = False\n avango.gua.register_window(\"slave_weimar_v0_osaka_center\", self.window_center)\n \n if \"CENTRAL_USER\" != DEBUG_MODE: \n self.window_left = avango.gua.nodes.GlfwWindow(Size=size,\n Display = DISPLAY_VARIABLE_LEFT, # \":0.1\",\n LeftResolution=size,\n RightResolution=size,\n Title=\"slave_weimar_v0_osaka_left\")\n\n self.window_left.EnableVsync.value = False\n avango.gua.register_window(\"slave_weimar_v0_osaka_left\", self.window_left)\n\n if \"CENTRAL_USER\" != DEBUG_MODE: \n self.window_right = avango.gua.nodes.GlfwWindow(Size=size,\n Display = DISPLAY_VARIABLE_RIGHT, # \":0.1\",\n LeftResolution=size,\n RightResolution=size,\n Title=\"slave_weimar_v0_osaka_right\")\n\n self.window_right.EnableVsync.value = False\n avango.gua.register_window(\"slave_weimar_v0_osaka_right\", self.window_right)\n\n logger = avango.gua.nodes.Logger(EnableWarning=False)\n\n #self.MemoryController = avango.gua.nodes.NamedSharedMemoryController()\n #self.MemoryController.add_read_only_memory_segment(\"DEPTH_FEEDBACK_SEGMENT\")\n\n #self.MemoryController.register_remotely_constructed_object_on_segment(\"DEPTH_FEEDBACK_SEGMENT\", \"DEPTH_FEEDBACK_SEMAPHOR\")\n\n\n #print(\"Before adding shared memory segments\")\n #add shared depth buffer memory\n\n self.viewer = avango.gua.nodes.Viewer()\n self.viewer.SceneGraphs.value = [self.graph]\n \n if \"CENTRAL_USER\" != DEBUG_MODE: \n self.viewer.Windows.value = [self.window_center, self.window_right, self.window_left]\n else:\n self.viewer.Windows.value = [self.window_center]\n\n self.viewer.DesiredFPS.value = 1000.0\n\n self.viewer.run()\n #while True:\n # depth_feedback_state = self.MemoryController.get_value_from_named_object(\"DEPTH_FEEDBACK_SEMAPHOR\")\n #print(\"Read State\")\n #if 0 == depth_feedback_state:\n #self.MemoryController.set_value_for_named_object(\"DEPTH_FEEDBACK_SEMAPHOR\", 1)\n # self.viewer.frame()\n #elif 2 == depth_feedback_state:\n # pass\n #print(\"Depth buffer was successfully written.\")\n\n\n\n # parameters\n print(\"Before setting is initialized\")\n self.is_initialized = False\n print(\"always evaluate\")\n self.always_evaluate(True)\n print(\"after always evaluate\")\n\n\n\n def evaluate(self):\n print(\"beginning of evaluation\")\n if not self.is_initialized:\n if len(self.nettrans.Children.value) > 0:\n self.on_arrival()\n self.is_initialized = True\n\n #self.always_evaluate(False)\n\n\n def on_arrival(self):\n pass\n #print(\"I HAVE ARRIVED\")\n #print(self.graph[\"/net/screen/cam\"].Name.value)\n\n #occlusion_slave_pipeline_description = avango.gua.nodes.PipelineDescription(\n # Passes=[\n # avango.gua.nodes.TriMeshPassDescription(),\n # avango.gua.nodes.LightVisibilityPassDescription(),\n # avango.gua.nodes.SPointsPassDescription(),\n # avango.gua.nodes.OcclusionSlaveResolvePassDescription()\n # ])\n #self.graph[\"/net/screen/cam\"].PipelineDescription.value = occlusion_slave_pipeline_description\n\n #print(\"Reconfigured pipeline\")\n\n\n\ninit = Initializer()\n\n","repo_name":"vrsys/avango","sub_path":"examples/group_to_group_telepresence/occlusion_slave_osaka_3_users.py","file_name":"occlusion_slave_osaka_3_users.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"8353684809","text":"from rockpaperscissors.Game import Move, Round, Game, Outcome, find_move\n\n\ndef parse_move(code):\n if code == 'A':\n return Move.Rock\n if code == 'B':\n return Move.Paper\n if code == 'C':\n return Move.Scissors\n return None\n\n\ndef parse_outcome(code):\n if code == 'X':\n return Outcome.Loss\n if code == 'Y':\n return Outcome.Draw\n if code == 'Z':\n return Outcome.Win\n return None\n\n\ndef parse_line(line):\n codes = line.split()\n\n if not codes or len(codes) is not 2:\n return None\n\n their_move = parse_move(codes[0])\n outcome = parse_outcome(codes[1])\n\n return Round(find_move(their_move, outcome), their_move)\n\n\ndef read_game_file(path):\n rounds = []\n\n with open(path) as file:\n for line in file:\n game_round = parse_line(line)\n if game_round:\n rounds.append(game_round)\n\n return Game(rounds)\n","repo_name":"p-verburg/advent_of_code_2022","sub_path":"rockpaperscissors/GameFileReader.py","file_name":"GameFileReader.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18413660731","text":"import sys\n\ndef find(a):\n while a != node[a]:\n a = node[a]\n node[a] = node[node[a]]\n return a\n\ndef union(a, b):\n a = find(a)\n b = find(b)\n if a < b:\n node[a] = b\n else:\n node[b] = a\n\ndef dfs(x, y):\n while 1:\n dx, dy = direct[matrix[x][y]]\n nx, ny = x + dx, y + dy\n matrix[x][y] = 'F'\n if matrix[nx][ny] == 'F':\n if find(nx * m + ny) != find(x * m + y):\n return 0\n else:\n return 1\n else:\n union(nx * m + ny, x * m + y)\n x, y = nx, ny\n\nn, m = map(int, sys.stdin.readline().split())\nmatrix = []\nnode = [i for i in range(n * m)]\ndirect = {'D' : (1, 0), 'U' : (-1, 0), 'L' : (0, -1), 'R' : (0, 1)}\nfor _ in range(n):\n matrix.append(list(sys.stdin.readline().rstrip()))\nans = 0\nfor i in range(n):\n for j in range(m):\n if matrix[i][j] != 'F':\n ans += dfs(i, j)\nprint(ans)\n","repo_name":"nain95/Algorithm","sub_path":"BOJ/16000/16724.py","file_name":"16724.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32892862891","text":"import os\nimport common_pygame\nimport random\npygame = common_pygame.pygame\nscreen = common_pygame.screen\n\n\nclass progressBar():\n\n def __init__(self):\n self.color = (102, 170, 255)\n self.y1 = screen.get_height() / 2\n self.y2 = self.y1 + 20\n self.max_width = 800 - 40\n self.font = pygame.font.Font(\"BITSUMIS.TTF\", 64)\n self.loading = self.font.render(\"LOADING\", True, self.color)\n self.textHeight = self.y1 - 80\n\n def update(self, percent):\n screen.fill((0, 0, 0))\n\n screen.blit(self.loading, (300, self.textHeight))\n txtpercent = self.font.render(str(percent) + \"%\", True, self.color)\n screen.blit(txtpercent, (20, self.y1 + 30))\n pygame.draw.rect(screen, self.color,\n (20, self.y1, self.max_width, 20), 2)\n pygame.draw.rect(screen, self.color, (20, self.y1,\n (percent * self.max_width) / 100, 20), 0)\n pygame.display.flip()\n\n (r, g, b) = self.color\n r = min(r + 2, 255)\n g = max(g - 2, 0)\n b = max(b - 2, 0)\n self.color = (r, g, b)\n self.loading = self.font.render(\"LOADING\", True, self.color)\n","repo_name":"mthenault/MICshooter","sub_path":"sources/progressbar.py","file_name":"progressbar.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"10804664858","text":"\"\"\"\n1. Реализовать обработку нескольких клиентов на сервере, используя функцию select.\n Клиенты должны общаться в «общем чате»: каждое сообщение участника отправляется всем, подключенным к серверу.\n2. Реализовать функции отправки/приема данных на стороне клиента.\n Чтобы упростить разработку на данном этапе, пусть клиентское приложение будет либо только принимать,\n либо только отправлять сообщения в общий чат. Эти функции надо реализовать в рамках отдельных скриптов.\n\"\"\"\n# SERVER\nfrom socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR\nfrom common.globals import DEF_PORT, ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, SENDER,\\\n RESPONSE, ERROR, MAX_CONNECTIONS, MESSAGE, MESSAGE_TEXT\nfrom common.utils import get_message, send_message, handle_parameters\nfrom time import time\nimport logging\nimport select\nimport log.server_log_config\n\nSERVER_LOGGER = logging.getLogger('server')\n\n\ndef handle_client_message(msg, msg_list, client_sock):\n SERVER_LOGGER.debug(f'Обработка сообщения от клиента: {msg}')\n if ACTION in msg and msg[ACTION] == PRESENCE and TIME in msg and USER in msg and msg[USER][ACCOUNT_NAME] == 'Guest':\n SERVER_LOGGER.debug(f'Обработка сообщения УСПЕШНА, отправляю ответ: \"RESPONSE: 200\"')\n send_message(client_sock, {RESPONSE: 200})\n return\n elif ACTION in msg and msg[ACTION] == MESSAGE and TIME in msg and MESSAGE_TEXT in msg:\n SERVER_LOGGER.debug(f'Обработка сообщения УСПЕШНА, отправляю: {ACCOUNT_NAME} ответ: \"{MESSAGE_TEXT}\"')\n msg_list.append((msg[ACCOUNT_NAME], msg[MESSAGE_TEXT]))\n return\n else:\n SERVER_LOGGER.error(f'Обработка сообщения ПРОВАЛЕНА, отправляю ответ: \"RESPONSE: 400\"\\n'\n f'Содержимое неправильного запроса:\\n{msg}')\n send_message(client_sock, {\n RESPONSE: 400,\n ERROR: 'Bad request'})\n return\n\n\ndef del_sock(sock, sock_list):\n sock.close()\n sock_list.remove(sock)\n\n\ndef main():\n listen_address, listen_port, _ = handle_parameters(ip='ANY', port=DEF_PORT, mode='listen')\n SERVER_LOGGER.info(f'Сервер запущен. Слушаю IP:{listen_address if listen_address else \"любой\"} '\n f'PORT:{listen_port}')\n\n serv_sock = socket(AF_INET, SOCK_STREAM)\n serv_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n serv_sock.bind((listen_address, listen_port))\n serv_sock.settimeout(0.5)\n clients = []\n msgs = []\n\n serv_sock.listen(MAX_CONNECTIONS)\n try:\n while True:\n try:\n client_sock, client_address = serv_sock.accept()\n except OSError:\n pass\n else:\n SERVER_LOGGER.debug(f'Подключен: {client_address}')\n clients.append(client_sock)\n\n recv_data_list = []\n send_data_list = []\n try:\n if clients:\n recv_data_list, send_data_list, _ = select.select(clients, clients, [], 0)\n except OSError:\n pass\n\n if recv_data_list:\n for client_with_msg in recv_data_list:\n try:\n recvd_msg = get_message(client_with_msg)\n handle_client_message(recvd_msg, msgs, client_with_msg)\n if recvd_msg.get(\"action\") == 'msg':\n _user = recvd_msg.get(\"account_name\")\n _msg = recvd_msg.get(\"msg_text\")\n else:\n _user = recvd_msg.get(\"user\").get(\"account_name\")\n _msg = 'подключился'\n SERVER_LOGGER.info(f'{client_with_msg.getpeername()} '\n f'{_user}: {_msg}')\n except:\n SERVER_LOGGER.info(f'{client_with_msg.getpeername()}'\n f' отключился от сервера.')\n del_sock(client_with_msg, clients)\n\n if msgs and send_data_list:\n msg = {\n ACTION: MESSAGE,\n SENDER: msgs[0][0],\n TIME: time(),\n MESSAGE_TEXT: msgs[0][1]\n }\n del msgs[0]\n for waiting_client in send_data_list:\n try:\n send_message(waiting_client, msg)\n except:\n SERVER_LOGGER.info(f'{waiting_client.getpeername()} отключился от сервера.')\n del_sock(waiting_client, clients)\n except KeyboardInterrupt:\n SERVER_LOGGER.info(f'Завершение работы, отключаю {len(clients)} клиентов...')\n for client in clients:\n del_sock(client, clients)\n serv_sock.close()\n SERVER_LOGGER.info(f'Сервер остановлен')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Wrexan/GB_Client-Server_apps_using_Python","sub_path":"lesson_07/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71814272009","text":"from .MsgManager.manager import MiddleManager\nfrom .Utils.common_utils import *\nimport time\nimport cv2\nfrom Oviz.Utils.point_cloud_utils import read_pcd, read_bin\n\ngroup_template = ['template', \"sub_1\"]\n\nclass Oviz:\n _oviz_node = MiddleManager()\n '''\n data:\n group:\n pointcloud: []\n image: []\n group2\n '''\n\n @staticmethod\n def __del__():\n Oviz._oviz_node.close()\n _data = dict()\n @staticmethod\n def init_oviz_api(ip, port = 12345):\n Oviz._oviz_node.init_oviz_api(ip, port)\n\n @staticmethod\n def imshow(msg = None, group = \"template\"):\n group_data = Oviz._data.setdefault(group, {})\n topic_data = group_data.setdefault(IMAGE, [])\n if isinstance(msg, str):\n msg_data = cv2.imread(msg)\n topic_data.append(msg_data)\n else:\n topic_data.append(msg)\n\n @staticmethod\n def pcshow(msg = None, group = \"template\"):\n group_data = Oviz._data.setdefault(group, {})\n topic_data = group_data.setdefault(POINTCLOUD, [])\n if isinstance(msg, str):\n if msg.endswith(\".pcd\"):\n pc = read_pcd(msg)\n elif msg.endswith(\".bin\"):\n pc = read_bin(msg)\n topic_data.append(pc)\n else:\n topic_data.append(msg)\n\n @staticmethod\n def bbox3dshow(msg = None, group = \"template\"):\n group_data = Oviz._data.setdefault(group, {})\n topic_data = group_data.setdefault(BBOX3D, [])\n topic_data.append(msg)\n\n @staticmethod\n def waitKey(cnt = -1):\n Oviz._oviz_node.pub(Oviz._data)\n if cnt < 0:\n while not Oviz._oviz_node.is_decontrol():\n time.sleep(0.1)\n Oviz._oviz_node.reset_decontrol()\n else:\n time.sleep(cnt)\n Oviz._data.clear()\n","repo_name":"xiaoqiang-cheng/Oviz","sub_path":"Oviz/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"1925144332","text":"from flask import request, jsonify, send_file\nfrom app.routes import images_bp\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom app import db,app\nfrom flask import render_template\nfrom app.models.user import User # Importa la clase User del modelo\nfrom app.models.image import Image # Importa la clase User del modelo\nimport os \nimport datetime as dt # Cambia el nombre del módulo a 'dt' para evitar conflictos\nimport pytesseract # Importa la biblioteca pytesseract\nfrom PIL import Image as IMGPIL\n\n\n@images_bp.route('/upload', methods=['POST'])\ndef upload_image():\n # Lógica para cargar imágenes\n # ...\n return jsonify({'access_token': \"access_token\"}), 200\n\n\n\n# Función para verificar la extensión de archivo permitida\ndef allowed_file_extension(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in {'jpg', 'jpeg', 'png', 'gif'}\n\n# Función para generar un nombre de archivo único (puedes ajustar esto según tus necesidades)\ndef generate_unique_filename(filename):\n import uuid\n unique_filename = str(uuid.uuid4()) + '.' + filename.rsplit('.', 1)[1].lower()\n return unique_filename\n\n\n# Ruta para la página de actualización de imagen de perfil\n@images_bp.route('/updateImage', methods=['GET'])\ndef viewUpdateImage():\n return render_template('updateImage.html')\n\n# Ruta para actualizar la imagen de perfil\n@images_bp.route('/update_profile_picture', methods=['POST'])\n@jwt_required() # Requiere autenticación mediante JWT\ndef update_profile_picture():\n try:\n current_user_id = get_jwt_identity() # Obtiene el ID del usuario a partir del token JWT\n\n # Verifica si el usuario existe en la base de datos\n user = User.query.get(current_user_id)\n if not user:\n return jsonify({'message': 'Usuario no encontrado'}), 404\n\n foto = request.files.get('foto') # Obtiene el nuevo archivo de imagen cargado\n\n # Verifica si se proporcionó una nueva imagen\n if foto:\n # Verifica si la extensión del archivo es válida (por ejemplo, solo permite imágenes JPEG)\n if not allowed_file_extension(foto.filename):\n return jsonify({'message': 'Tipo de archivo no permitido para la imagen'}), 400\n \n\n # Crea el directorio de destino si no existe\n profile_pictures_folder = os.path.join(app.config['PROFILE_PICTURES_FOLDER'], '')\n os.makedirs(profile_pictures_folder, exist_ok=True)\n\n # Genera un nombre único para la nueva imagen de perfil\n unique_filename = generate_unique_filename(foto.filename)\n\n # Guarda la nueva imagen en el directorio de imágenes de perfil\n foto.save(os.path.join(app.config['PROFILE_PICTURES_FOLDER'], unique_filename))\n\n # Actualiza el nombre del archivo de imagen en la base de datos\n user.foto = unique_filename\n db.session.commit()\n\n return jsonify({'message': 'Imagen de perfil actualizada exitosamente',\n 'nameImage': unique_filename}), 200\n except Exception as e:\n return jsonify({'message': 'Error al actualizar la imagen de perfil', 'error': str(e)}), 500\n \n\n@images_bp.route('/get_profile_picture/')\ndef get_profile_picture(image_name):\n\n image_path = os.getcwd()+'/'+app.config['PROFILE_PICTURES_FOLDER'] +\"/\" +image_name\n \n print(image_path)\n # Verifica si el archivo de imagen existe\n if os.path.isfile(image_path):\n # Devuelve la imagen como una respuesta\n return send_file(image_path, mimetype='image/jpeg') # Ajusta el mimetype según el tipo de imagen\n else:\n # Devuelve una respuesta de error si la imagen no existe\n return 'Imagen no encontrada', 404\n \n\n\n@images_bp.route('/userImages', methods=['GET'])\ndef viewUserImages():\n return render_template('user_images.html')\n\n\n# Ruta para obtener todas las imágenes del usuario actual\n@images_bp.route('/userImages', methods=['POST'])\n@jwt_required() # Requiere autenticación mediante JWT\ndef get_user_images():\n try:\n current_user_id = get_jwt_identity() # Obtiene el ID del usuario a partir del token JWT\n\n # Verifica si el usuario existe en la base de datos\n user = User.query.get(current_user_id)\n if not user:\n return jsonify({'message': 'Usuario no encontrado'}), 404\n\n # Obtiene todas las imágenes asociadas al usuario actual\n user_images = Image.query.filter_by(user_id=current_user_id).all()\n\n # Prepara una lista de diccionarios con información de las imágenes\n images_data = []\n for image in user_images:\n images_data.append({\n 'id': image.id,\n 'fecha_registro': image.fecha_registro,\n 'ubicacion': image.ubicacion,\n 'user_id': image.user_id\n # Puedes agregar más campos si es necesario\n })\n\n return jsonify(images_data), 200\n\n except Exception as e:\n return jsonify({'message': 'Error al obtener las imágenes del usuario', 'error': str(e)}), 500\n \n\n\n\n# Ruta para actualizar la imagen de perfil\n@images_bp.route('/add_photo_user', methods=['POST'])\n@jwt_required() # Requiere autenticación mediante JWT\ndef add_photo_user():\n try:\n current_user_id = get_jwt_identity() # Obtiene el ID del usuario a partir del token JWT\n\n # Verifica si el usuario existe en la base de datos\n user = User.query.get(current_user_id)\n if not user:\n return jsonify({'message': 'Usuario no encontrado'}), 404\n\n foto = request.files.get('foto') # Obtiene el nuevo archivo de imagen cargado\n\n # Verifica si se proporcionó una nueva imagen\n if foto:\n # Verifica si la extensión del archivo es válida (por ejemplo, solo permite imágenes JPEG)\n if not allowed_file_extension(foto.filename):\n return jsonify({'message': 'Tipo de archivo no permitido para la imagen'}), 400\n \n # Crea el directorio de destino si no existe\n profile_pictures_folder = os.path.join(app.config['PROFILE_PICTURES_FOLDER'], '')\n os.makedirs(profile_pictures_folder, exist_ok=True)\n\n # Genera un nombre único para la nueva imagen de perfil\n unique_filename = generate_unique_filename(foto.filename)\n\n # Guarda la nueva imagen en el directorio de imágenes de perfil\n foto.save(os.path.join(profile_pictures_folder, unique_filename))\n\n # Crea una instancia de la clase Image y agrega los datos a la tabla Images\n new_image = Image(\n fecha_registro= dt.datetime.now(), # Puedes cambiar esto según tus necesidades\n ubicacion=unique_filename,\n user_id=current_user_id\n )\n \n # Agrega la nueva imagen a la sesión de la base de datos\n db.session.add(new_image)\n db.session.commit()\n\n return jsonify({'message': 'Imagen de perfil y tabla Images actualizadas exitosamente',\n 'nameImage': unique_filename}), 200\n\n return jsonify({'message': 'No se proporcionó ninguna imagen para actualizar'}), 400\n\n except Exception as e:\n return jsonify({'message': 'Error al actualizar la imagen de perfil y la tabla Images', 'error': str(e)}), 500\n \n\n\n\n@images_bp.route('/processImage', methods=['POST'])\n@jwt_required() # Requiere autenticación mediante JWT\ndef process_image():\n try:\n current_user_id = get_jwt_identity() # Obtiene el ID del usuario a partir del token JWT\n\n # Verifica si el usuario existe en la base de datos (puedes agregar tu lógica aquí)\n\n data = request.get_json() # Obtiene los datos JSON de la solicitud\n image_location = data.get('filename') # Obtiene la ubicación de la imagen desde los datos JSON\n\n # Verifica si se proporcionó una ubicación de imagen válida\n if image_location:\n # Construye la ruta completa a la imagen\n image_path = os.path.join(os.getcwd(), app.config['PROFILE_PICTURES_FOLDER'], image_location.lstrip('/'))\n print(\"------------\")\n print(image_path)\n print(\"------------\")\n\n # Verifica si el archivo de imagen existe\n if os.path.isfile(image_path):\n # Utiliza pytesseract para extraer el texto de la imagen\n text = pytesseract.image_to_string(IMGPIL.open(image_path))\n\n # Devuelve el texto extraído como respuesta JSON\n return jsonify({'text': text}), 200\n else:\n return jsonify({'message': 'La ubicación de la imagen no es válida'}), 400\n else:\n return jsonify({'message': 'No se proporcionó ninguna ubicación de imagen válida'}), 400\n\n except Exception as e:\n return jsonify({'message': 'Error al procesar la imagen', 'error': str(e)}), 500","repo_name":"CristianPumaES6/L3V3L_UP_T3K","sub_path":"app/routes/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":9048,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12709978500","text":"import json\nimport pytest\nimport structlog\nimport time\nimport yaml\n\nfrom unittest import mock\nfrom .. import stats # The filename stats.py, not something in __init__.py\nfrom ..crud_views import log as crud_view_log\nfrom .test_post_put_patch import COLLECTION_URL, item_with_uuid\n\n\n@pytest.fixture\ndef mocked():\n return mock.patch.object(stats, 'log')\n\n\ndef test_stats_tween_logs_stats(testapp, mocked):\n ''' plus in this case we always log url '''\n with mocked as mocked_log :\n testapp.get(\"/\")\n assert mocked_log.bind.call_count == 2\n assert mocked_log.bind.call_args_list[0] == mock.call(url_path='/',\n url_qs='',\n host='localhost')\n mocked_log.bind.assert_called_with(db_count=mock.ANY, db_time=mock.ANY,\n rss_begin=mock.ANY, rss_change=mock.ANY,\n rss_end=mock.ANY, wsgi_begin=mock.ANY,\n url_path='/',\n url_qs='',\n host='localhost',\n wsgi_end=mock.ANY, wsgi_time=mock.ANY)\n return\n\n\ndef test_stats_tween_logs_telemetry_id(testapp, mocked):\n with mocked as mocked_log:\n res = testapp.get(\"/?telemetry_id=test_telem\")\n assert mocked_log.bind.call_count == 2\n assert mocked_log.bind.call_args_list[0] == mock.call(telemetry_id='test_telem',\n url_path='/',\n url_qs='telemetry_id=test_telem',\n host='localhost')\n mocked_log.bind.assert_called_with(db_count=mock.ANY, db_time=mock.ANY,\n rss_begin=mock.ANY, rss_change=mock.ANY,\n rss_end=mock.ANY, wsgi_begin=mock.ANY,\n wsgi_end=mock.ANY, wsgi_time=mock.ANY,\n url_path='/',\n url_qs='telemetry_id=test_telem',\n host='localhost',\n telemetry_id='test_telem')\n\n # we should also return telem in the header\n assert 'telemetry_id=test_telem' in res.headers['X-Stats']\n return\n\n\ndef test_telemetry_id_carries_through_logging(testapp, external_tx):\n mocked = mock.patch.object(crud_view_log, 'info')\n with mocked as mock_log:\n res = testapp.post_json(COLLECTION_URL + \"?telemetry_id=test&log_action=action_test\", item_with_uuid[0], status=201)\n mock_log.assert_called_with(event=\"add_to_indexing_queue\", uuid=mock.ANY,\n sid=mock.ANY, telemetry_id=mock.ANY)\n # also make sure we have a logger that has defaultsset from stats.py\n logger = crud_view_log.bind()\n assert logger._context.get('url_path') == COLLECTION_URL\n assert logger._context.get('url_qs') == \"telemetry_id=test&log_action=action_test\"\n assert logger._context.get('host') == 'localhost'\n assert logger._context.get('telemetry_id') == 'test'\n assert logger._context.get('log_action') == 'action_test'\n\n\ndef test_logging_basic(testapp, external_tx, capfd):\n '''\n in prod logging setup, an Elasticsearch server is provided. Logs will\n be piped to the appropriate logs (e.g. httpd/error_log) and also sent\n to Elasticsearch. That is tested here in snovault in test_indexing;\n here, we configure the logs without the es_server to ensure that\n the rest of it works\n '''\n # something that generates logs\n # add a telemetry id and some log contents using a query string\n res = testapp.post_json(COLLECTION_URL + \"?telemetry_id=test&log_action=action_test\", item_with_uuid[0], status=201)\n # multiple logs emitted in this process, must find the one we want\n check_logs = capfd.readouterr()[-1].split('\\n')\n log_msg = None\n for record in check_logs:\n if not record:\n continue\n try:\n proc_record = yaml.safe_load('{' + record.strip().split('{', 1)[1])\n except:\n continue\n if not isinstance(proc_record, dict):\n continue\n if proc_record.get('telemetry_id') == 'test':\n log_msg = proc_record\n assert '@timestamp' in log_msg\n assert 'logger' in log_msg\n assert 'level' in log_msg\n\n\ndef test_logging_see_debug_log(testapp, capfd):\n \"\"\"\n Tests that when we hit a route with the @debug_log decorator we see an appropriate log statement\n \"\"\"\n testapp.get('/') # all routes are marked\n check_logs = capfd.readouterr()[-1].split('\\n')\n for record in check_logs:\n if not record:\n continue\n if 'DEBUG_FUNC' in record:\n return\n raise AssertionError(\"Did not see 'DEBUG_FUNC' in a log message\")\n","repo_name":"dmichaels/harvard-snovault","sub_path":"snovault/tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40188847484","text":"from flask import Flask, render_template, request, jsonify\nfrom bs4 import BeautifulSoup\nimport requests\nfrom pymongo import MongoClient\nimport certifi\n\napp = Flask(__name__)\n\nclient = MongoClient('mongodb+srv://sparta:test@cluster0.i0rbuls.mongodb.net/?retryWrites=true&w=majority')\ndb = client.dbsparta\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route(\"/movie\", methods=[\"POST\"])\ndef movie_post():\n URL = \"https://movie.daum.net/ranking/reservation\"\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n data = requests.get(URL, headers=headers)\n soup = BeautifulSoup(data.text, 'html.parser')\n\n lis = soup.select(\"#mainContent > div > div.box_ranking > ol > li\")\n comment_content = soup.select(\"#mainContent > div > div.box_ranking > ol > li > div > div.thumb_item div.poster_info a\")\n\n ## 기존 데이터 삭제\n db.movie.delete_many({})\n\n for li, comm in zip(lis, comment_content):\n title = li.select_one('.link_txt').text\n rate = li.select_one('.txt_num').text\n poster = li.select_one('.poster_movie > img')['src']\n rank = li.select_one('.rank_num').text\n url = li.select_one('.link_txt')['href']\n summary_content = li.select_one('.link_story').text\n\n if title is not None:\n doc = {\n 'title': title,\n 'poster': poster,\n 'rank': rank,\n 'rate': rate,\n 'url': url,\n 'content': summary_content\n }\n\n db.movie.insert_one(doc)\n \n\n return jsonify({'msg': 'POST 연결 완료!'})\n\n@app.route(\"/book\", methods=[\"POST\"])\ndef book_get():\n seat_receive = request.form['seat_give']\n year_receive = request.form['year_give']\n month_receive = request.form['month_give']\n date_receive = request.form['date_give']\n name_receive = request.form['name_give']\n \n book_poster = list(db.movie.find_one({'title':name_receive}))\n\n doc = {\n 'seat': seat_receive,\n 'year': year_receive,\n 'month': month_receive,\n 'date': date_receive,\n 'name': name_receive,\n 'poster':book_poster['poster']\n }\n db.booked.insert_one(doc)\n\n return jsonify({'msg': '예약 저장 완료!'})\n\n@app.route(\"/book\", methods=[\"GET\"])\ndef book_show():\n all_books = list(db.booked.find({},{'_id':False}))\n return jsonify({'result': all_books})\n\n\n@app.route(\"/movie\", methods=[\"GET\"])\ndef movie_get():\n all_movie = list(db.movie.find({},{'_id':False}))\n return jsonify({'result': all_movie})\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8000, debug=True)\n","repo_name":"littleduck1219/movie_site","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10613166358","text":"from ..generators import *\nfrom .general import footer, header\nfrom .util import addBoilerplate\nfrom .general import namesNotationsDenotedBy\n\n\ndef localFun(i):\n nb = TD(str(i or 1))\n fig = TD(QuestionnedField(field=f\"Step{i}\", child={\n f\"Figure{i}\"}, classes=\"Definition\"))\n step = TD(QuestionnedField(field=f\"Step{i}\", classes=\"Definition\"))\n return {\"child\": TR([nb, step, fig]),\n \"questions\": {f\"Step{i}\"},\n \"filledFields\": [f\"Step{i}\", f\"Figure{i}\"]}\n\n\ndef globalFun(l):\n result = \"No figure at all\"\n for i in [\"Figure\", \"Figure2\", \"Figure3\", \"Figure4\", \"Figure5\", \"Result\"]:\n result = FilledOrEmpty(i,\n {i},\n result)\n description = TH(QuestionnedField(\n field=f\"Description\", classes=\"Notation\"))\n result = TH(QuestionnedField(field=f\"Description\",\n child=result, classes=\"Notation\"))\n return Table([TR([description, result])]+l)\n\n\ndef steps(i):\n return NumberedFields(fieldPrefix=\"Step\",\n greater=i,\n localFun=localFun,\n globalFun=globalFun)\n\n\nproperties = TableFields(\n [\"Interest\", \"Types\", \"Fiable\", \"Réglable\", \"Décoratif\", \"Défaisable\"])\n\n\ndef noeudContruction(i):\n return addBoilerplate([namesNotationsDenotedBy, properties , steps(i)])\n\n\nnoeud = noeudContruction(5)\n","repo_name":"Arthur-Milchior/anki-template-card-type","sub_path":"examples/noeud.py","file_name":"noeud.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"13870170028","text":"import os\n\nimport numpy\nfrom flask import Flask, render_template, request, session, redirect, url_for\nimport cv2\nfrom sift import sift_setup\nimport uuid\n\n# setup\napp = Flask(__name__)\n\napp.secret_key = 'jYxz/MBS&CXNHc.Gb6/WR^b[s/%fNLG'\n\n\n# render html \n@app.route('/')\ndef index():\n\n # If user hasn't been on the site before, assign them a UUID\n if 'UUID' not in session:\n session['UUID'] = uuid.uuid4().hex\n\n return render_template('index.html')\n\n\n# File upload\n@app.route('/upload', methods=['POST'])\ndef file_upload():\n\n # If post method\n if request.method == 'POST':\n\n # Get images and min kp from form\n img1 = img_upload_to_cv2(request.files['img1'].read())\n img2 = img_upload_to_cv2(request.files['img2'].read())\n kp = float(request.form.get('kp'))\n\n # Get result, time, and image from sift function\n result, time, image = sift_setup(img1, img2, kp)\n\n # Save image to static/images folder\n cv2.imwrite(os.path.join(os.getcwd(), 'flask', 'static', 'images', (session['UUID'] + '.jpg')), image)\n\n # Prepare data for jinja template\n data = [result, time, url_for('static', filename='images/' + session['UUID'] + '.jpg')]\n\n # Render the template\n return render_template('results.html', data=data)\n\n else:\n return redirect(url_for('index'))\n\n\n# Helper function to convert images\ndef img_upload_to_cv2(image_str):\n return cv2.imdecode(numpy.fromstring(image_str, numpy.uint8), cv2.IMREAD_UNCHANGED)\n\n\napp.run(debug=True)\n","repo_name":"JGehl99/COMP-4990-Project","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"65256811","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nimport sqlite3\nfrom datetime import datetime\nfrom twilio.twiml.messaging_response import MessagingResponse\n\napp = Flask(__name__)\ndatabase_name = \"database.db\" \n\n@app.route('/send_message')\ndef send_message(ID,last_message):\n conn = sqlite3.connect(database_name)\n c = conn.cursor()\n c.execute(\"INSERT INTO messages (ID,message,time) VALUES (?, ?, ?) \",\n (ID,last_message,datetime.now()))\n conn.commit()\n conn.close()\n\n return str(ID)+ \" \"+str(last_message)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html') \n\n@app.route(\"/delete_all\")\ndef delete_all(ID):\n conn = sqlite3.connect(database_name)\n c = conn.cursor()\n c.execute(\"DELETE from messages where ID=?\",(ID,))\n conn.commit()\n conn.close()\n\n@app.route(\"/web\",methods=['GET','POST'])\ndef web_reply():\n new_message = send_message(str(request.remote_addr),request.values.get(\"Body\", None))\n return new_message\n\n@app.route(\"/sms\", methods = ['GET', 'POST'])\ndef sms_reply():\n new_message = send_message(str(request.remote_addr),request.values.get(\"Body\", None))\n resp = MessagingResponse()\n resp.message(new_message)\n return str(resp)\n\ndef select_all(ID):\n conn = sqlite3.connect(database_name)\n c = conn.cursor()\n c.execute(\"SELECT * from messages where ID=?\",(ID,))\n\n rows = c.fetchall()\n for row in rows:\n print(row)\n\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"QuintinN/HopHacks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28581005312","text":"# 1-task\r\nname = input(\"Как к вам обращаться? \")\r\nprint(\"Добрый день,\", name, \"!\")\r\nage = int(input(\"Сколько Вам лет?\"))\r\nprint(\"Рады с Вами познакомиться,\", name, \", которому\", age, \"лет!\")\r\nanimal = input(\"Ваше любимое животное?\")\r\nprint(f\"{animal}-отличный выбор!\")\r\n\r\n# 2-task\r\ntime = int(input(\"Enter your time in seconds\"))\r\nhours = time // 3600\r\nminutes = time // 60 - hours * 60\r\nseconds = time % 60\r\nprint(f\"{hours:2}:{minutes:02}:{seconds:02}\")\r\n\r\n# 3-task\r\nn = input(\"Введите любую цифру\")\r\nwhile n < '0':\r\n print(\"Введите целую цифру, которая больше 0. Попробуйте еще раз, пожалуйста.\")\r\n break\r\n#n = input(\"Введите любую целую цифру больше 0: \")\r\nprint(f\"{n} + {n + n} + {n + n + n} = {int(n) + int(n + n) + int(n + n + n)}\")\r\n\r\n# 4-task\r\nnum_1 = int(input(\"Введите, пожалуйста,целое положительное число \"))\r\ngreatest_dig = 0\r\nnum = num_1\r\n\r\nwhile num > 0:\r\n dig = num % 10\r\n if dig > greatest_dig:\r\n greatest_dig = dig\r\n if greatest_dig == 9:\r\n break\r\n num = num // 10\r\n\r\nprint(f'Наибольшая цифра в числе {num_1} равна {greatest_dig}')\r\n\r\n# 5-6-tasks\r\ncosts = float(input(\"Введите, пожалуйста, значение издержек Вашей фирмы.\"))\r\nrevenue = float(input(\"Отлично! Теперь введите значение выручки Вашей фирмы!\"))\r\n\r\nif costs < revenue:\r\n print(f\"Поздравляю! Вы работаете с прибылью{revenue - costs}\")\r\n print(f\"Рентабельность вашей выручки составила:{(revenue - costs)/revenue*100:1f}\")\r\n personal = int(input(\"А сколько человек работает в вашей фирме?\"))\r\n print(f\"Если раздадите прибыль Вашим сотрудник, то каждый из них получит: {(revenue - costs)/personal:1f}\")\r\nelif costs == revenue:\r\n print(\"Вы не получаете прибыль, но у вас и нет убытка!\")\r\nelse:\r\n print(f\"Увы, Вы работаете в убыток на {(revenue - costs)*-1}\")\r\n\r\n# 6-task\r\nwhile True:\r\n days = 1\r\n a = float(input(\"Добрый день! Подскажите Ваш результат на первой пробежке!\"))\r\n b = float(input(\"Какого результата Вы хотите добиться?\"\r\n \" И мы подскажем на какой день можно будет достичь его при увеличении нагрузки на 10% каждый день ;)\"))\r\n if a <= 0 or b < 0:\r\n print(\"Результат должен быть больше нуля. Стартовое значение больше или равно нулю\")\r\n else:\r\n while a < b:\r\n a += a * 0.1\r\n days += a * 0.1\r\n print(f\"Вы добьетесь результата за {int(days)} дней\")\r\n break","repo_name":"kanykeia/python","sub_path":"1 урок.py","file_name":"1 урок.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25091916501","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef show_3D_single(data, id, cls_id):\r\n # data[:, :3] = pc_normalize(data[:, :3])\r\n data = data[data[:, 3] == cls_id]\r\n print(data.shape)\r\n colormap = []\r\n lab = np.asarray([[184, 179, 168],\r\n [255, 0, 0],\r\n [255, 127, 0],\r\n [255, 255, 0],\r\n [0, 255, 0],\r\n [0, 0, 255],\r\n [38, 0, 51],\r\n [148, 0, 211]]) / 255.0\r\n colormap = [[] for _ in range(data.shape[0])]\r\n for i in range(data.shape[0]):\r\n colormap[i] = lab[int(data[i, 3])]\r\n # plt.figure(figsize=(10, 10))\r\n ax = plt.subplot(111, projection='3d')\r\n # 设置视角\r\n # ax.view_init(elev=30, azim=-60)\r\n # 关闭坐标轴\r\n # plt.axis('off')\r\n # 设置坐标轴范围\r\n #ax.set_zlim3d(0, 0.1)\r\n #ax.set_ylim3d(-0.25, 0.35)\r\n #ax.set_xlim3d(-0.75, 0.35)\r\n ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=colormap, s=20, marker='.') # , cmap='plasma')\r\n #plt.savefig('/home/zhenyu/code_test_2/point_save/' + '%d.png' % id, dpi=500, bbox_inches='tight', transparent=True)\r\n plt.close()\r\n #plt.show()","repo_name":"Gary3410/shape_estimation","sub_path":"plot_point.py","file_name":"plot_point.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35258228126","text":"import matplotlib.pyplot as plt\n\n#for bar graph\nimport numpy as np\n\nprint(\"\"\"Hello and Welcome to Calorie Targeter\n\nWe must collect some information in order to estimate your caloric intake. After which, \nwe will enter it into the revised Harris Benedict Equation and provide you the results.\"\"\")\n\n\nprint()\ngender = input(\"Let us being by first asking what is your gender? male or female \")\nweight_kilo = int(input(\"What is your weight in pounds? \"))\ncm_height = int(input(\"How tall are you in inches? \"))\nage = int(input(\"What is your age? \"))\n\n# convert weight and height to pounds and feet\nweight = weight_kilo / 2.205\nheight = cm_height * 2.54\n# print(weight)\n# print(height)\n\n# allow the user to identify their activity level\nexercise_level = int(input(\"\"\"\nWhat is your exercise level, 1-5 \n1 Sedentary: little or no regular exercise \n2 Mild : intensive exercise for at least 20 minuets, 1 to 3 times a week\n3 Moderate: intensive exercise for at least 30 to 60 minutes 3 to 4 times weekly\n4 Heavy: intensive exercise for 60 minuets or greater 5 to 7 days weekly\n5 Extreme: Exceedingly active and or very demanding activities. \n\"\"\"))\n\n\nif gender == 'male':\n total_calories_needed = 0\n # (REVISED) HARRIS BENEDICT EQUATIONS:\n bmr = 88.362 + (13.397 * weight) + (4.799 * height) - (5.677 * age)\n # print(bmr)\n\n # Activity or stress factors\n if exercise_level == 5:\n total_calories_needed = bmr * 1.9\n elif exercise_level == 4:\n total_calories_needed = bmr * 1.7\n elif exercise_level == 3:\n total_calories_needed = bmr * 1.5\n elif exercise_level == 2:\n total_calories_needed = bmr * 1.3\n elif exercise_level == 1:\n total_calories_needed = bmr * 1.2\n\nelif gender == 'female':\n # (REVISED) HARRIS BENEDICT EQUATIONS:\n bmr = 447.593 + (9.247 * weight) + (3.098 * height) - (4.330 * age)\n # print(bmr)\n\n # Activity or stress factors\n if exercise_level == 5:\n total_calories_needed = bmr * 1.9\n elif exercise_level == 4:\n total_calories_needed = bmr * 1.725\n elif exercise_level == 3:\n total_calories_needed = bmr * 1.55\n elif exercise_level == 2:\n total_calories_needed = bmr * 1.375\n elif exercise_level == 1:\n total_calories_needed = bmr * 1.2\n\nhigh_weight_loss = total_calories_needed * .71\nstandard_weight_loss = total_calories_needed * .85\nmild_weight_loss = total_calories_needed * .9\nmaintain = total_calories_needed * 1\nbulking = total_calories_needed + 250\n\n# print(f\"\"\"\n# Consume the following target amount of calories for your goal\n#\n# 1 Bulk, weight gain: {round(bulking,2)} calories\n# 2 Maintain weight: {round(total_calories_needed,2)} calories\n# 3 Mild weight loss: {round(mild_weight_loss,2)} calories\n# 4 Standard weight loss: {round(standard_weight_loss,2)} calories\n# 5 High weight loss: {round(extreme_weight_loss, 2)} calories\n# \"\"\")\n\nweight_goal = int(input(\"\"\"What is your weight loss/weight gain goal?\"\"\n1: High weight loss:\n2: Standard weight loss\n3: Mild weight loss\n4: Maintain weight\n5: Bulk\n\"\"\"))\nif weight_goal == 1:\n total_calories_needed = high_weight_loss\nelif weight_goal == 2:\n total_calories_needed = standard_weight_loss\nelif weight_goal == 3:\n total_calories_needed = mild_weight_loss\nelif weight_goal == 4:\n total_calories_needed = maintain\nelif weight_goal == 5:\n total_calories_needed = bulking\n\n\nconsumed_today = int(input(\"Lastly, how many calories have you had today? \"))\n# num = int(input(\"number?\"))\n\n\ncalories_needed_math = total_calories_needed - consumed_today\nprint(f'''\nIn order to meet your caloric intake goal, you need to consume \n{round(calories_needed_math, 2)} more calories today\n''')\nprint(\"Please refer to the following graphs for visualization.\")\n# ==============================================================================\n# Graph section\n\nfig, ax = plt.subplots(figsize=(10, 10))\n\n# Graph the data\n# X axis\nx = [1, 2, 3, 4, 5]\n# Y axis\ny = [high_weight_loss, standard_weight_loss, mild_weight_loss, maintain, bulking]\n\n\nplt.yticks([100, 200, 300, 400, 500, 600, 700, 800, 900,\n 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900,\n 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900,\n 3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700, 3800, 3900,\n 4000, 4100, 4200, 4300, 4400, 4500, 4600, 4700, 4800, 4900])\n\n\n\nplt.plot(x, y, color=\"dodgerblue\", label='Target Calories', linewidth=8)\nplt.plot([x], [y], marker='o', markersize=8, color=\"black\")\n\n\n\n\n\n# Plot dot of\nx2 = [weight_goal]\ny2 = [consumed_today]\nplt.plot([x2], [y2], marker='X', markersize=20, color=\"red\", label='Current Calories For Weight Goal')\n\n\n\n# Graph title\nplt.title('Calories Needed For Exercise Level')\nplt.legend()\n# Add grid lines\nplt.grid(True)\n# Labels y axis\nplt.ylabel('Calories')\n# Labels x axis\nplt.xlabel('Weight Goal')\n# X axis metrics/label\nlabels = ['High Weight Loss', 'Standard Weight loss', 'Mild Weight Loss', 'Maintain', 'Bulk']\nplt.xticks(x, labels)\n\n# Start and stop point for arrows made into variables\nxy_arrowy = total_calories_needed - 50\nxytext_arrowy = consumed_today + 50\n\n# Arrow section\nax.annotate(\"\",\n # Arrow head\n xy=(weight_goal, xy_arrowy), xycoords='data',\n # Arrow base\n xytext=(weight_goal, xytext_arrowy), textcoords='data',\n # Curved arrow\n size=40, va=\"center\", ha=\"center\",\n arrowprops=dict(arrowstyle=\"simple\",\n color=\"red\",\n connectionstyle=\"arc3,rad=-0.1\"),\n )\n\n# Auto Wrapping text\n\nt = (f\"Consume {round(calories_needed_math, 2)} more calories today to meet your daily Goal\")\nplt.text(weight_goal, xy_arrowy - 300, t, fontsize=14, family='serif', style='italic', ha='left', rotation=-15, wrap=True)\n\n# Makes the graph show\nplt.show()\nplt.axis([0, 5, 0, 4000])\n\n# Bar graph\n#=============================================================================================\n\nlabels = ['Hight Weight Loss', 'Standard Weight loss', 'Mild Weight Loss', 'Maintain', 'Bulk']\ncalories_consumed = [consumed_today, consumed_today, consumed_today, consumed_today, consumed_today]\ncalories_needed = [round(high_weight_loss, 1), round(standard_weight_loss, 1), round(mild_weight_loss, 1), round(maintain, 1), round(bulking, 1)]\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, calories_consumed, width, label='Consumed Calories')\nrects2 = ax.bar(x + width/2, calories_needed, width, label='Needed Calories')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Calories')\nax.set_title('Calories Needed For Exercise Level')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\nautolabel(rects1)\nautolabel(rects2)\n\nfig.tight_layout()\n\nplt.show()","repo_name":"PdxCodeGuild/class_orca","sub_path":"code/matthew/python/mini_capstone.py","file_name":"mini_capstone.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"18901648253","text":"import sys\nimport rospy\nfrom subscriber import *\nimport nubot_common.msg as nubotmsg\nimport numpy as np\nimport math\n\nclass PITracker(RobotSubscriber, GoalSubscriber) :\n def __init__(self, *args, **kwargs):\n agent_id = 1\n all_names = rospy.get_param_names()\n if '/agent_id' in all_names :\n rospy.logwarn('retrieve agent_id from param')\n agent_id = rospy.get_param('/agent_id')\n rospy.logwarn('agent : %s'%agent_id)\n # initialize super class\n GoalSubscriber.__init__(self)\n RobotSubscriber.__init__(self,'/nubot'+str(agent_id)+'/omnivision/OmniVisionInfo', agent_id)\n ### create nav path subscriber\n self.sub = {\n 'pos' : TrajectorySubscriber('robosoccer_trajectory_pos'),\n 'vel' : TrajectorySubscriber('robosoccer_trajectory_vel')\n }\n self.sub['pos'].register_callback(self.trajectory_callback)\n self.pub = rospy.Publisher('/nubot'+str(agent_id)+'/nubotcontrol/velcmd', nubotmsg.VelCmd, queue_size=3)\n self.error = {\n 'x' : .0, 'y' : .0, 'w' : .0,\n 'sum' : {\n # sum for compute integral part of control\n 'x' : .0, 'y' : .0, 'w' : .0,\n }\n }\n self.control = {\n 'x' : .0, 'y' : .0, 'w' : .0,\n 'com' : {\n # commanded velocity\n 'x' : .0, 'y' : .0, 'w' : .0,\n }\n }\n self.pid = {\n # some tuning parameter\n 'p' : np.diag([1., 1., 1.]), \n 'i' : np.diag([.2, .2, .2])\n }\n # characteristic polynomial for computing gain\n self.char_poly = {\n 'p' : np.diag([.7, .7, .7]), \n 'i' : np.diag([.05, .05, .05])\n }\n # command (or reference if you like)\n self.command = {\n 'pos' : {'x' : .0, 'y' : .0, 'w' : .0,},\n 'vel' : {'x' : .0, 'y' : .0, 'w' : .0,}\n }\n self.enable = False\n rospy.logwarn('READY')\n\n def compute_gain_from_char_poly(self) :\n # t0 = rospy.get_time()\n pos, vel = self.command['pos'], self.command['vel']\n poly = self.char_poly\n c, s = math.cos(pos['w']), math.cos(pos['w'])\n u, v = vel['x'], vel['y']\n B = np.matrix([[c, -s, 0],[s, c, 0],[0, 0, 1]])\n A = np.matrix([[0, 0, -u*s-v*c],[0, 0, u*c-v*s],[0, 0, 0]])\n self.pid = {\n # inverse of rotation matrix is it's transpose\n 'i' : -1 * B.transpose() * poly['i'],\n 'p' : B.transpose() * (A - poly['p'])\n # 'p' : np.linalg.inv(-1 * B) * poly['p'],\n # 'i' : np.linalg.inv(B) * (A - poly['i'])\n }\n # rospy.loginfo('computed gain in %s s'%(rospy.get_time() - t0))\n \n def compute_control(self) :\n t0 = rospy.get_time()\n c, pid, e = self.control, self.pid, self.error\n s = e['sum']\n p_term = pid['p'] * np.matrix([[e['x']], [e['x']], [e['w']]])\n i_term = pid['i'] * np.matrix([[s['x']], [s['y']], [s['w']]])\n c['x'] = c['com']['x'] + p_term[0] + i_term[0]\n c['y'] = c['com']['y'] + p_term[1] + i_term[1]\n c['w'] = c['com']['w'] + p_term[2] + i_term[2]\n rospy.loginfo('computed control in %s s'%(rospy.get_time() - t0))\n # for k in set(control.keys()).intersection(error.keys()) :\n # control[k] = control['com'][k] + pid['p'] * error[k] + pid['i'] * error['sum'][k]\n\n def compute_error(self, time) :\n sub = self.sub\n idx = 0\n l = len(sub['pos'].t)\n # if ref is empty we can't do anything\n if l < 1 : return False\n t0 = rospy.get_time()\n pos, vel = sub['pos'], sub['vel']\n # if indexed time behid reference, take first element\n # if beyond last ref, take the last\n # iterate if in between\n rx = (pos.x[0], pos.x[0])\n ry = (pos.y[0], pos.y[0])\n rw = (pos.w[0], pos.w[0])\n rvx = (vel.x[0], vel.x[0])\n rvy = (vel.y[0], vel.y[0])\n rvw = (vel.w[0], vel.w[0])\n if time < sub['pos'].t[0] : idx = 0\n elif time > sub['pos'].t[-1] : \n idx = l-1 \n rx = (pos.x[idx], pos.x[idx])\n ry = (pos.y[idx], pos.y[idx])\n rw = (pos.w[idx], pos.w[idx])\n rvx = (vel.x[idx], vel.x[idx])\n rvy = (vel.y[idx], vel.y[idx])\n rvw = (vel.w[idx], vel.w[idx])\n else :\n for i in range(l) :\n if i == 0 : continue\n t = (sub['pos'].t[i-1], sub['pos'].t[i])\n if (time > t[0]) and (time < t[1]) :\n idx = i\n rx = (pos.x[i-1], pos.x[i])\n ry = (pos.y[i-1], pos.y[i])\n rw = (pos.w[i-1], pos.w[i])\n rvx = (vel.x[i-1], vel.x[i])\n rvy = (vel.y[i-1], vel.y[i])\n rvw = (vel.w[i-1], vel.w[i])\n break\n i = idx\n t = (sub['pos'].t[i-1], sub['pos'].t[i])\n if i==0 : t = (sub['pos'].t[0], sub['pos'].t[0])\n if (time > t[0]) and (time < t[1]) :\n dt = time - t[0]\n ref = self.command\n # save reference trajectory\n ref['vel'] = {\n 'x' : (rvx[0] + (rvx[1]-rvx[0]) * dt),\n 'y' : (rvy[0] + (rvy[1]-rvy[0]) * dt),\n 'w' : (rvw[0] + (rvw[1]-rvw[0]) * dt)\n }\n ref['pos'] = {\n 'x' : (rx[0] + (rx[1]-rx[0]) * dt),\n 'y' : (ry[0] + (ry[1]-ry[0]) * dt),\n 'w' : (rw[0] + (rw[1]-rw[0]) * dt)\n }\n self.control['com'] = self.command['vel']\n # compute error\n ## get computed error\n sum, ex = self.error['sum'], self.error['x']\n ey, ew = self.error['y'], self.error['w']\n ex = self.pos[0] - ref['pos']['x']\n ey = self.pos[1] - ref['pos']['y']\n ## TODO : fix angle shortest path!!!\n ew = self.pos[2] - ref['pos']['w']\n # sum the error for integral term\n sum['x'] = sum['x'] + ex\n sum['y'] = sum['y'] + ey\n sum['w'] = sum['w'] + ew\n rospy.loginfo('computed error in %s s'%(rospy.get_time()-t0))\n return True\n return False\n\n def publish(self) :\n e, c, k = self.error, self.control, self.pid\n s = e['sum']\n vel = nubotmsg.VelCmd()\n vel.Vx, vel.Vy, vel.w = c['x'], c['y'], c['w']\n p, i = k['p'], k['i']\n rospy.logwarn('command\\t:(%s,%s,%s)'%(c['com'][0],c['com'][1],c['com'][1]))\n rospy.logwarn('gain:\\nKP\\n:%s\\nKI:\\n%s)'%(p,i))\n rospy.logwarn('error\\t:(%s,%s,%s)'%(e['x'],e['y'],e['w']))\n rospy.logwarn('control\\t:(%s,%s,%s)'%(c['x'],c['y'],c['w']))\n self.pub.publish(vel)\n\n def trajectory_callback(self, t, x, y, w) :\n # reset error when new trajectory is received\n self.error = {\n 'x' : .0, 'y' : .0, 'w' : .0,\n 'sum' : {\n # sum for compute integral part of control\n 'x' : .0, 'y' : .0, 'w' : .0,\n }\n }\n self.enable = True\n\n def goal_callback(self, msg) :\n rospy.loginfo('pi_tracker goal callback :')\n super(PITracker, self).goal_callback(msg)\n # reset error when goal is changed\n self.error = {\n 'x' : .0, 'y' : .0, 'w' : .0,\n 'sum' : {\n # sum for compute integral part of control\n 'x' : .0, 'y' : .0, 'w' : .0,\n }\n }\n self.enable = True\n \n def callback(self, msg) :\n rospy.loginfo('pi_tracker update :')\n super(PITracker, self).callback(msg)\n # time = self.header.stamp.to_sec()\n time = rospy.get_time()\n if self.compute_error(time) :\n self.compute_gain_from_char_poly()\n self.compute_control()\n self.publish()","repo_name":"alifahrri/robosoccer_motion_planning","sub_path":"src/robosoccer_trajectory_tracking/scripts/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"19941935811","text":"adjacencias = []\n\nwith open(\"v1.out\",\"r\") as f:\n for line in f:\n line = line.strip()\n line = line.replace(\",\",\"--\")\n adjacencias.append(line)\n\nprint(adjacencias)\n\nstr_adj = \" \"\nfor i in range(len(adjacencias)-1):\n str_adj += adjacencias[i] + \"; \"\n\nstr_adj+=adjacencias[-1]+\" \"\n\nwith open(\"grafo.dot\", \"w\") as f:\n f.write(\"graph G {\")\n f.write(str_adj)\n f.write(\"} \")","repo_name":"JaasielAbner/Iris-Plotly","sub_path":"Plot2D/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9072235009","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n temp_head_1 = headA\n temp_head_2 = headB\n\n h1 = temp_head_1\n h2 = temp_head_2\n\n while h1:\n h3 = h2\n while h3:\n if h1 == h3:\n print(\"Intersected at '{}'\".format(h1.val))\n return h1\n h3 = h3.next\n h1 = h1.next","repo_name":"shhotu010/dsa_with_python","sub_path":"13_Linked_List_Problems/09_intersection_point.py","file_name":"09_intersection_point.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"25602010542","text":"# 만들 수 없는 금액\n\n# 동네 편의점의 주인인 동빈이는 N개의 동전을 가지고 있습니다. \n# 이때 N개의 동전을 이용하여 만들 수 없는 양의 정수 금액 중 최솟값을 구하는 프로그램을 작성하세요.\n# 예를 들어, N = 5 이고, 각 동전이 각각 3, 5, 7원 짜리 동전이라고 가정합시다.\n# 이때 동빈이가 만들 수 없는 양의 정수 금액 중 최솟 값은 1원입니다.\n\n# 입력\n# 5\n# 3 2 1 1 9\n\n# 출력\n# 8\n\nn = int(input())\ncoin_list = list(map(int, input().split()))\n\ncoin_list.sort(reverse= True)\n\n# for target in range(1,1000001):\n# result = target\n# for coin in coin_list:\n# if target - coin >= 0:\n# target -=coin\n# if target != 0:\n# print(result)\n# break\ntarget = 1\nfor x in coin_list:\n if target < x:\n break\n target += x\nprint(target)","repo_name":"HRDI0/This_is_coding_test","sub_path":"#This is coding test-greedy_7.py","file_name":"#This is coding test-greedy_7.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43207083970","text":"'''\nCreated on May 3, 2018\n\n@author: janis\n'''\nimport enum\nfrom types import SimpleNamespace\nimport typing\n\nfrom colito.resolvers import make_enum_resolver\nfrom colito.logging import getModuleLogger\nimport warnings\n\nlog = getModuleLogger(__name__)\n\nclass SummaryError(Exception): pass\n\nclass SummaryOptions(SimpleNamespace):\n def __init__(self, compact=False, **kwargs):\n super().__init__(compact=compact, **kwargs)\n\nDEFAULT_SUMMARY_OPTIONS = SummaryOptions()\n\nclass OnMissing(enum.Enum):\n OMMIT = enum.auto()\n RAISE = enum.auto()\n USE_NONE = enum.auto()\n\nON_MISSING_RESOLVER = make_enum_resolver(OnMissing)\n\n\ndef summary_from_fields(instance, fields, missing=OnMissing.RAISE):\n '''Create a dict from given fields'''\n global ON_MISSING_RESOLVER\n missing = ON_MISSING_RESOLVER.resolve(missing)\n def getattrof(field):\n return getattr(instance,field)\n flds = fields\n if missing == OnMissing.RAISE:\n def fieldvalue(field):\n try:\n return getattrof(field)\n except AttributeError:\n raise SummaryError(f'No field {field} while summarising instance of type {type(instance).__name__}', instance)\n elif missing == OnMissing.OMMIT:\n fieldvalue = getattrof\n flds = tuple(f for f in fields if hasattr(instance, f))\n elif missing == OnMissing.USE_NONE:\n def fieldvalue(field):\n try:\n return getattrof(field)\n except AttributeError:\n return None\n vals = (fieldvalue(fld) for fld in flds)\n return dict(zip(flds, vals))\n\ndef summarise_exception(exc, summary_options:SummaryOptions):\n import traceback as tb\n dct = {'message':str(exc), 'type':type(exc).__name__}\n if hasattr(exc, '__traceback__') and exc.__traceback__ is not None:\n frames = tb.extract_tb(exc.__traceback__)\n dct_frames = []\n for frame in frames:\n dct_frames.append({'name':frame.name,'lineno':frame.lineno,'line':frame.line,'file':frame.file})\n dct['frames'] = dct_frames\n if frames:\n dct.update({f'last_{k}':v for k,v in dct['frames'][0].items()})\n if hasattr(exc,'__context__') and exc.__context__ is not None:\n dct['context'] = summarise_exception(exc.__context__, summary_options)\n return dct\n\n\nclass Summarisable:\n summary_sibling_priority:int = 0\n __slots__ = ()\n def __summary__(self, options:SummaryOptions = DEFAULT_SUMMARY_OPTIONS):\n '''The parameters to be included in the summary as a dict'''\n raise NotImplementedError()\n\n @property\n def __summary_name__(self) -> str:\n '''The name of this summary object'''\n return self.__class__.__name__\n\n\nclass SummarisableAsDict(Summarisable):\n __summary_conversions__ = {}\n __slots__ = ()\n def __summary__(self, options: SummaryOptions = DEFAULT_SUMMARY_OPTIONS):\n smr = self.__summary_dict__(options)\n sc = _get_summary_conversions(self)\n convs = sc(self.__class__)\n for key,fn in convs.items():\n if key in smr:\n smr[key] = fn(smr[key])\n elif isinstance(key, type):\n for smr_key, smr_val in smr.items():\n if isinstance(smr_val, key):\n smr[smr_key] = fn(smr_val)\n return SummarisableDict(smr)\n\n def __summary_dict__(self, options:SummaryOptions) -> typing.Dict[str, typing.Any]:\n raise NotImplementedError()\n\n def __repr__(self):\n dct = self.__summary_dict__(SummaryOptions(compact=True))\n params_txt = ','.join(f'{key}={value!r}' for key,value in dct.items())\n return f'<{self.__class__.__name__}({params_txt})>'\n\nclass SummaryFields(list):\n '''Used as a base class for the __summary_fields__ member.'''\n __slots__ = ()\n def __call__(self, cls):\n return list(self)\n\ndef _get_summary_fields(what):\n try:\n sf = what.__summary_fields__\n if isinstance(sf, SummaryFields):\n return sf\n else:\n return SummaryFields(sf)\n except AttributeError:\n return SummaryFields()\n\nclass SummaryFieldsAppend(SummaryFields):\n '''When used as a __summary_fields__ member it prepends the base class fields.'''\n __slots__ = ()\n def __call__(self, cls):\n fields = super().__call__(cls)\n base = cls.__bases__[0]\n base_fields = _get_summary_fields(base)\n return base_fields(base) + fields\n\nclass SummaryConversions(dict):\n '''Used as a base class for the __summary_conversions__ member.'''\n __slots__ = ()\n def __call__(self, cls):\n return dict(self)\n\ndef _get_summary_conversions(what):\n try:\n sc = what.__summary_conversions__\n if isinstance(sc, SummaryConversions):\n return sc\n else:\n return SummaryConversions(sc)\n except AttributeError:\n return SummaryConversions()\n\nclass SummaryConversionsAppend(SummaryConversions):\n '''When used as a __summary_conversions__ member it prepends the base class fields.'''\n def __call__(self, cls):\n convs = super().__call__(cls)\n base = cls.__bases__[0]\n base_convs = _get_summary_conversions(base)\n return {**base_convs(base), **convs}\n \nclass SummarisableFromFields(SummarisableAsDict):\n \"\"\" Create a summarisable object with fields those in the __summary_fields__ entry.\n \n \n >>> class Test(SimpleNamespace, SummarisableFromFields):\n ... __summary_fields__ = ['a','b']\n >>> t = Test(a=4,b=5,c=6)\n >>> t.__summary__()\n {'a': 4, 'b': 5}\n >>> class Derived(Test):\n ... __summary_fields__ = ['c','e']\n >>> d = Derived(a=1,b=2,c=3,d=4,e=5,f=6)\n >>> d.__summary__()\n {'c': 3, 'e': 5}\n >>> Derived.__summary_fields__ = SummaryFieldsAppend(['c','e'])\n >>> d.__summary__()\n {'a': 1, 'b': 2, 'c': 3, 'e': 5}\n \"\"\"\n #: Specify the fields used for the summary.\n __summary_fields__ = {}\n __slots__ = ()\n #: Specify the action in case of a missing field.\n __summary_onmissing__ = OnMissing.RAISE\n def __summary_dict__(self, options:SummaryOptions) -> typing.Dict[str, typing.Any]:\n sf = _get_summary_fields(self)\n fields = sf(self.__class__)\n return summary_from_fields(self, fields, missing=self.__summary_onmissing__)\n\nclass SummarisableDict(dict, SummarisableAsDict):\n __slots__ = ()\n def __summary_dict__(self, options:SummaryOptions) -> typing.Dict[str, typing.Any]:\n return self\n\nclass SummarisableException(Exception, SummarisableAsDict):\n __slots__ = ()\n def __summary_dict__(self, summary_options:SummaryOptions):\n return summarise_exception(self, summary_options)\n \n\nclass SummarisableAsList(Summarisable):\n __slots__ = ()\n def __summary__(self, options:SummaryOptions = DEFAULT_SUMMARY_OPTIONS):\n smr = self.__summary_list__(options)\n return SummarisableList(smr)\n\n def __summary_list__(self, options:SummaryOptions) -> typing.List:\n raise NotImplementedError()\n\nclass SummarisableList(list, SummarisableAsList):\n __slots__ = ()\n def __summary_list__(self, options:SummaryOptions) -> typing.List:\n return self\n\n\nclass SummaryState:\n\n def __init__(self, instance, depth, value=None, name=None, parent=None, key=None, must_summarise=None, priority:int=None) -> None:\n self.instance = instance\n self.value = value if value is not None else instance\n self.name = name if name is not None else type(self.value).__name__\n self.depth = depth\n self.parent:SummaryState = parent\n self.key = key\n self.must_summarise = must_summarise if must_summarise is not None else isinstance(self.value, Summarisable) \n self.priority:int = priority if priority is not None else self.value.summary_sibling_priority if hasattr(self.value,'summary_sibling_priority') else 0\n @property\n def cls(self):\n return type(self.instance).__name__\n \n def __repr__(self):\n s = str(self.value)\n sval = f'\"{s:.17}\"...' if len(s)>20 else f'\"{s}\"'\n sname = f'{self.key}=' if self.key is not None else f'{self.name}=' if self.name is not None else \"\"\n return f''\n @property\n def path(self):\n path = []\n parent = self\n while True:\n name = parent.key if parent.key is not None else parent.name\n path.append(name)\n parent = parent.parent\n if parent is None:\n break\n if isinstance(parent.value, dict):\n path[-1] = f'.{path[-1]}'\n elif isinstance(parent.value, list):\n path[-1] = f'[{path[-1]}]'\n else:\n raise TypeError(f'Unknown parent value type {type(parent.value).__name__}')\n spath = ''.join(path[::-1]) \n return spath\n\nclass SummaryVisitor:\n def on_start(self, state:SummaryState, actions): pass\n def on_encounter(self, state:SummaryState, actions): pass\n def on_children(self, state:SummaryState, actions, new_actions): pass\n def on_summarised(self, state:SummaryState, actions): pass\n def on_assemble(self, state:SummaryState, actions):pass\n\n\nclass StopFiltering(Exception): pass\n\nclass OnError(enum.Enum):\n IGNORE = enum.auto()\n SUMMARISE = enum.auto()\n RAISE = enum.auto()\n \nON_ERROR_RESOLVER = make_enum_resolver(OnError)\n\nclass Summariser:\n '''Summariser class\n \n >>> s = Summariser()\n >>> o = {'a':'1','b':2}\n >>> s(o)\n {'a': '1', 'b': 2}\n >>> class Container(SimpleNamespace, SummarisableFromFields):\n ... __summary_fields__ = ['a','b']\n ... __summary_onmissing__ = 'ommit'\n >>> c = Container(a=1,b=Container(a=11,b=Container(a=111)))\n >>> s(c)\n {'a': 1, 'b': {'a': 11, 'b': {'a': 111}}}\n >>> c1 = Container(a=SummarisableList([Container(a='l0',b='l1'),Container(a='L0',b='L1')]))\n >>> s(c1)\n {'a': [{'a': 'l0', 'b': 'l1'}, {'a': 'L0', 'b': 'L1'}]}\n '''\n def __init__(self, visitors:typing.Sequence[SummaryVisitor]=[], options:SummaryOptions=DEFAULT_SUMMARY_OPTIONS, onerror=OnError.RAISE):\n self.visitors = []\n for v in visitors:\n self.add_visitor(v)\n self.options = options\n self.onerror = ON_ERROR_RESOLVER.resolve(onerror)\n\n def add_visitor(self, state_visitor:SummaryVisitor):\n self.visitors.append(state_visitor)\n return self\n \n class SummaryAction:\n def __init__(self, summariser, state):\n self.summariser = summariser\n self.state = state\n @property\n def priority(self): return self.state.priority\n @property\n def options(self): return self.summariser.options\n def __call__(self, actions):\n raise NotImplementedError()\n def __repr__(self):\n return f'<{self.__class__.__name__} ({self.state})>'\n \n class SummariseAction(SummaryAction):\n def __init__(self, summariser, state):\n super().__init__(summariser=summariser,state=state)\n @property\n def isroot(self): return self.parent is None\n \n def __call__(self, actions):\n state = self.state\n self.summariser.notify('encounter', state, actions)\n AssembleAction = self.summariser.AssembleAction\n value = state.value\n options = self.options\n if isinstance(value, Summarisable) and state.must_summarise:\n state.value = value.__summary__(options)\n state.name = value.__summary_name__\n state.must_summarise = False\n self.summariser.notify('summarised', state, actions)\n if isinstance(state.value, (SummarisableDict, SummarisableList)):\n new_actions_children = []\n if isinstance(state.value, SummarisableDict):\n state.value = dict(state.value)\n for key,entry in state.value.items():\n new_actions_children.append(self.process_key(state, actions, key, entry))\n elif isinstance(state.value, SummarisableList):\n state.value = list(state.value)\n for key,entry in enumerate(state.value):\n new_actions_children.append(self.process_key(state, actions, key, entry))\n action_assemble = AssembleAction(summariser=self.summariser, state=state)\n new_actions = [action_assemble] + new_actions_children\n new_actions_srt = sorted(new_actions, key = lambda a:a.priority)\n self.summariser.notify('children',state, actions, new_actions=new_actions_srt)\n actions += new_actions_srt\n else:\n if state.parent is not None:\n state.parent.value[state.key] = state.value\n return state.value\n def process_key(self, state, actions, key, entry):\n SummariseAction = self.summariser.SummariseAction\n sub_state = SummaryState(entry, depth=state.depth+1, parent=state, key = key)\n action = SummariseAction(self.summariser, sub_state)\n return action\n\n class AssembleAction(SummaryAction):\n priority = -1000 # overrides state priority. If the child state of the root has a lower priority than this, it overrides the final value of the summariser.\n def __call__(self,actions):\n state = self.state\n self.summariser.notify('assemble', state=state, actions=actions)\n if state.parent is not None:\n state.parent.value[state.key] = state.value\n return state.value\n \n \n def notify(self, event, state, actions, *args, **kwargs):\n try:\n for v in self.visitors[::-1]:\n fn = getattr(v,f'on_{event}')\n fn(state=state, actions=actions, **kwargs)\n except StopFiltering:\n pass\n except Exception as e:\n if self.onerror == OnError.SUMMARISE:\n log.exception(f'Error during event {event} state {state}: {e}')\n state.value = {'summary_error':str(e),'state':str(state),'error_type':type(e).__name__}\n else:\n raise\n def _initialise(self, instance, depth):\n state = SummaryState(instance, depth=depth)\n actions = [self.SummariseAction(self, state)]\n self.notify('start', state, actions)\n return actions\n def __call__(self, instance, depth=0):\n actions = self._initialise(instance, depth)\n while actions:\n action = actions.pop()\n try:\n res = action(actions)\n except Exception as e:\n if self.onerror == OnError.IGNORE:\n log.warn(f'Summary error during handling of action {action}.')\n else:\n raise SummaryError(f'While performing action {action}.') from e\n \n return res\n\nclass ConvertingVisitorBase(SummaryVisitor):\n def on_assemble(self, state:SummaryState, actions):\n if isinstance(state.value, dict):\n for key,value in state.value.items():\n self.process_entry(state, key, value)\n elif isinstance(state.value, list):\n for key, value in enumerate(state.value):\n self.process_entry(state, key, value)\n \n def process_entry(self, state, key, value):\n pass\n\nclass ConvertListedVisitorMixin(SummaryVisitor):\n \n def __init__(self, *args, converters:typing.Dict[type, typing.Callable], **kwargs):\n self._converters = converters\n self._classes = tuple(converters.keys())\n super().__init__(*args, **kwargs)\n \n def process_entry(self, state, key, value):\n super().process_entry(state, key, value)\n if isinstance(value, self._classes):\n for cls, conv in self._converters.items():\n if isinstance(value, cls):\n res = conv(value)\n state.value[key] = res\n break\n\n\nclass ConvertDisallowedlowedVisitorMixin(SummaryVisitor):\n def __init__(self, *args, allowed_types: typing.Sequence[type], **kwargs):\n self._allowed_types = tuple(allowed_types)\n super().__init__(*args, **kwargs)\n \n def process_entry(self, state, key, value):\n super().process_entry(state, key, value)\n if not isinstance(value, self._allowed_types):\n self.convert_disallowed(state, key, value)\n\n def convert_disallowed(self, state, key, value):\n state.value[key] = str(value)\n \n\nclass JSONDisallowedTypeWarning(Warning): pass\nclass JSONConvertingVisitor(ConvertDisallowedlowedVisitorMixin, ConvertingVisitorBase):\n __allowed_types__ = [dict, str, list, int, float, tuple, type(None)]\n def __init__(self, warn=True):\n super().__init__(allowed_types=self.__allowed_types__)\n self._warn = True\n def convert_disallowed(self, state, key, value):\n super().convert_disallowed(state, key, value)\n if self._warn:\n warnings.warn(JSONDisallowedTypeWarning(f'Disallowed type {type(value).__name__} at {key} of {state} at {state.path}'))\n\nclass NamingVisitor(SummaryVisitor):\n \"\"\" A visitor that provides names to the objects in the summary.\n \n >>> class Container(SimpleNamespace, SummarisableFromFields):\n ... __summary_fields__ = ['a','b']\n ... __summary_onmissing__ = 'ommit'\n >>> o = Container(a=1,b=SummarisableList([4,5]))\n >>> Summariser(visitors = [NamingVisitor(False,False)])(o)\n {'name': 'Container', 'records': {'a': 1, 'b': {'class': 'SummarisableList', 'entries': [4, 5]}}}\n >>> Summariser(visitors = [NamingVisitor(True,False)])(o)\n {'class': 'Container', 'a': 1, 'b': {'class': 'SummarisableList', 'entries': [4, 5]}}\n >>> Summariser(visitors = [NamingVisitor(False,True)])(o)\n {'name': 'Container', 'records': {'a': 1, 'b': [4, 5]}}\n >>> Summariser(visitors = [NamingVisitor(True,True)])(o)\n {'class': 'Container', 'a': 1, 'b': [4, 5]}\n \"\"\"\n\n def __init__(self,flatten_dicts:bool=True, flatten_lists:bool=True):\n self.flatten_dicts = flatten_dicts\n self.flatten_lists = flatten_lists\n \n def on_assemble(self, state, actions):\n if isinstance(state.value, dict):\n if self.flatten_dicts:\n state.value = {'class':state.name, **state.value}\n else:\n state.value = {'name':state.name, 'records': state.value}\n elif isinstance(state.value, list):\n if self.flatten_lists:\n pass\n else:\n state.value = {'class':state.name, 'entries': state.value}\nclass NamedSummariser(Summariser):\n def __init__(self, *args, flatten_dicts:bool=True, flatten_lists:bool=True,**kwargs):\n super().__init__(*args, **kwargs)\n self.add_visitor(NamingVisitor(flatten_dicts=flatten_dicts, flatten_lists=flatten_lists))\n\n\nclass Fields(dict):\n def __init__(self, name, pairs):\n self.pairs = list(pairs)\n self.name = name\n super().__init__(pairs)\n \n def copy(self):\n return Fields(str(self.name), list(self.items()))\n \n def __iadd__(self, fields:'Fields'):\n if not isinstance(fields, Fields):\n raise TypeError(f'Cannot combine class {self.__class__.__name__} with {fields.__class__.__name__}.')\n self.pairs.append((fields.name,fields))\n return self\n \n def __add__(self, fields:'Fields'):\n fields_new = self.copy()\n fields_new += fields\n return fields_new\n \n def __repr__(self):\n return f'<{self.__class__.__name__} with {len(self)} fields>'\n \n def dump(self, sep='=', newline='\\n', align_keys=False, pad=0, use_repr=True, prefix=''):\n if align_keys:\n key_size = max(map(len, self.keys()))\n key_mod = f':{align_keys if isinstance(align_keys,str) else \"\"}{key_size}'\n else:\n key_mod = ''\n fmt = f'{prefix}{{key!s{key_mod}}}{\" \"*pad}{sep}{\" \"*pad}{{value{\"!r\" if use_repr else \"!s\"}}}'\n text = newline.join(fmt.format(key=key, value=value) for key,value in self.items())\n return text\n \n def flatten(self, sep='.'):\n \"\"\"Flatten the fields of this SummaryFields into a new one.\n \n The names in the flat one are the concatenation of all ancestral key names joined with the provided separator.\"\"\"\n def flatten_pairs(pairs):\n flat_pairs = []\n for key, value in pairs:\n if isinstance(value, Fields):\n fields = typing.cast(Fields, value)\n sub_pairs = flatten_pairs(fields.items())\n flat_pairs += list((f'{key}{sep}{sub_key}',sub_value) for sub_key,sub_value in sub_pairs)\n else:\n flat_pairs.append((key,value))\n return flat_pairs\n pairs = flatten_pairs(self.items())\n fields = Fields(str(self.name), pairs)\n return fields\n\nclass FieldVisitor(SummaryVisitor):\n def on_assemble(self, state, actions):\n if isinstance(state.value, dict):\n state.value = Fields(state.name, state.value.items())\nclass FieldSummariser(Summariser):\n \"\"\"\n >>> class Container(SimpleNamespace, SummarisableFromFields):\n ... __summary_fields__ = ['a','bee']\n ... __summary_onmissing__ = 'ommit'\n >>> o = Container(a=1,bee=SummarisableList([4,5]))\n >>> print(FieldSummariser()(o).dump(align_keys=True,pad=1,prefix='+'))\n +a = 1\n +bee = [4, 5]\n >>> o = Container(a=Container(a=4,bee=[]),bee=SummarisableList([4,5]))\n >>> print(FieldSummariser()(o).flatten().dump(align_keys=True))\n a.a =4\n a.bee=[]\n bee =[4, 5]\n\n {'name': 'Container', 'records': {'a': 1, 'bee': {'class': 'SummarisableList', 'entries': [4, 5]}}}\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.add_visitor(FieldVisitor())\n\n\n# class CompressionScheme(enum.Enum):\n# BASE64 = enum.auto()\n# B64GZIP = enum.auto()\n# GZIP = enum.auto()\n# \n# class CompressionToString(enum.Enum):\n# AUTO = enum.auto()\n# JSON = enum.auto()\n# STR = enum.auto()\n# REPR = enum.auto()\n# RAW = enum.auto()\n# \n# '''Tagging class as summary compressible. Also usable as a mixin.'''\n# class SummaryCompressible(Summarisable):\n# summary_compressible_encoding = 'utf8'\n# summary_compressible_enable:bool = True\n# summary_compressible_tostring:CompressionToString = None\n# summary_compressible_scheme:CompressionScheme = CompressionScheme.B64GZIP\n# def __init__(self, value, enable:bool=None, tostring:CompressionToString=None, scheme:CompressionScheme=None):\n# if enable is not None:\n# self.summary_compressible_enable = enable\n# if tostring is not None:\n# self.summary_compressible_tostring = tostring\n# if scheme is not None:\n# self.summary_compressible_scheme = scheme\n# self.summary_value = value\n# def __repr__(self):\n# return f'{self.__class__.__name__}({self.summary_value!r})'\n# def __str__(self):\n# return str(self.value)\n# def __summary__(self, options:SummaryOptions):\n# return self.summary_value\n# \n# '''Tagging class as summary grouppable. Also usable as a mixin.'''\n# class SummaryGroupable(Summarisable):\n# summary_group_enable:bool = True\n# summary_group_name:str = None\n# def __init__(self, value, name=None, enable:bool = True):\n# if enable is not None:\n# self.summary_group_enable:bool = enable\n# if name is not None:\n# self.summary_group_name:str = name\n# if self.summary_group_name is None:\n# self.summary_group_name = self.__class__.__name__\n# if not hasattr(self.__class__,'summary_value'):\n# self.summary_value = value\n# def __summary__(self, options:SummaryOptions):\n# return self.summary_value\n# \n# class SummaryGroupCompressible(SummaryGroupable):\n# summary_group_compressible = SummaryCompressible # class to use to compress group\n\n# \n# try:\n# import skopt\n# class SearchSpaceSummaryVisitor(ClassSummaryVisitor):\n# __summary_class__ = skopt.space.Dimension\n# \n# @ifapplicable\n# def on_encounter(self, state:SummaryState):\n# fields = ('args','name','digest')\n# state.value = {field:state.value[field] for field in fields}\n# except ImportError: pass\n# \n# COMPRESSION_SCHEME_RESOLVER = make_enum_resolver(CompressionScheme)\n# COMPRESSION_STRINGIFY_RESOLVER = make_enum_resolver(CompressionToString)\n# \n# class CompressedEntry(dict):\n# def __init__(self, value, scheme:CompressionScheme='b64gzip', tostring:CompressionToString='auto', encoding = 'utf8'):\n# txt, tostring = self._stringify(value, COMPRESSION_STRINGIFY_RESOLVER.resolve(tostring))\n# txt_bin = txt.encode(encoding)\n# data, scheme = self._compress(txt_bin, COMPRESSION_SCHEME_RESOLVER.resolve(scheme))\n# txt_data = data.decode('latin')\n# super().__init__(value=txt_data, scheme=scheme, tostring=tostring, encoding=encoding)\n# \n# @classmethod\n# def _stringify(cls, what, tostring):\n# if tostring == CompressionToString.AUTO:\n# if isinstance(what, (str, bytes)):\n# return cls._stringify(what, CompressionToString.RAW)\n# else:\n# try:\n# return cls._stringify(what, CompressionToString.JSON)\n# except TypeError:\n# return cls._stringify(what, CompressionToString.STR)\n# else:\n# if tostring == CompressionToString.JSON:\n# s = cls._to_json(what)\n# elif tostring == CompressionToString.STR or tostring == CompressionToString.RAW:\n# s = str(what)\n# elif tostring == CompressionToString.REPR:\n# s = repr(what)\n# else:\n# raise RuntimeError(f'Implementation error. Contact developers.')\n# return s, tostring.name\n# @classmethod\n# def _compress(cls, data, scheme:CompressionScheme):\n# import base64, gzip\n# if scheme == CompressionScheme.GZIP or scheme == CompressionScheme.B64GZIP:\n# data = gzip.compress(data)\n# if scheme == CompressionScheme.BASE64 or scheme == CompressionScheme.B64GZIP:\n# data = base64.b64encode(data)\n# return data, scheme.name\n# \n# @classmethod\n# def _to_json(cls, what):\n# je = json.JSONEncoder(indent=None, separators=',:')\n# return je.encode(what)\n# \n# class CompressingSummaryVisitor(ClassSummaryVisitor):\n# __summary_class__ = SummaryCompressible\n# encoding = 'utf8'\n# def __init__(self, plain=False, scheme=CompressionScheme.B64GZIP, tostring=CompressionToString.AUTO, allow_override:bool=True):\n# '''@param reorder_expansion: if the expansion of a data instance is requested, reorder it to be the last among its siblings. Can be True, False, once'''\n# self.plain = plain\n# self.tostring = COMPRESSION_STRINGIFY_RESOLVER.resolve(tostring)\n# self.scheme = COMPRESSION_SCHEME_RESOLVER.resolve(scheme)\n# self.allow_override: bool = allow_override\n# \n# @ifapplicable\n# def on_encounter(self, state:SummaryState, actions):\n# self._compress(state)\n# \n# @ifapplicable\n# def on_summarised(self, state:SummaryState, actions):\n# self._compress(state)\n# \n# def _compress(self, state):\n# compressible:SummaryCompressible = state.value\n# if compressible.summary_compressible_enable:\n# value = compressible.summary_value\n# tostring = compressible.summary_compressible_tostring if self.allow_override and hasattr(compressible,'summary_compressible_tostring') and compressible.summary_compressible_tostring is not None else self.tostring\n# scheme = compressible.summary_compressible_scheme if self.allow_override and hasattr(compressible,'summary_compressible_scheme') and compressible.summary_compressible_scheme is not None else self.scheme\n# data = CompressedEntry(value=value, scheme=scheme, tostring=tostring)\n# state.value = data['value'] if self.plain else data\n# \n# def __repr__(self):\n# return f'<{self.__class__.__name__} {\"PLAIN\" if self.plain else \"OBJECT\"} {self.tostring.name} {self.scheme.name}>'\n# \n# class GrouppedEntry(SummarisableDict):\n# def __init__(self, group, key):\n# super().__init__(group=group, key=key)\n# \n# class GrouppingSummaryVisitor(ClassSummaryVisitor):\n# __summary_class__ = SummaryGroupable\n# encoding = 'utf8'\n# summary_compressible = SummaryCompressible\n# \n# class Data(Summarisable):\n# summary_sibling_priority = -10\n# class SummaryGroupData(SummarisableDict): pass\n# def __init__(self, visitor):\n# self.visitor = visitor\n# self.groups = {}\n# self.compressors = {}\n# self.finalised = False\n# def append(self, state:SummaryState):\n# groupable = state.value\n# group_name = groupable.summary_group_name\n# if self.finalised:\n# raise RuntimeError(f'Summary filter is finalised, so no more groups can be added.')\n# if group_name not in self.groups:\n# self.groups[group_name] = []\n# if hasattr(groupable,'summary_group_compressible') and groupable.summary_group_compressible is not None:\n# self.compressors[group_name] = groupable.summary_group_compressible\n# group = self.groups[group_name] = self.groups.get(group_name, [])\n# key = len(group)\n# group.append(state.value.summary_value)\n# state.value = GrouppedEntry(group=group_name, key=key)\n# \n# @property\n# def compress(self): return self.visitor.compress\n# def _compressible_class(self, data, group):\n# cls_cmp = self.compressors.get(group,self.visitor.summary_compressible)\n# return cls_cmp(data)\n# \n# def __summary__(self, summary_options:SummaryOptions):\n# self.finalised = True\n# if self.compress:\n# cls_lst,cls_dct,cls_cmp = SummarisableList, self.SummaryGroupData, self._compressible_class\n# else:\n# cls_lst,cls_dct,cls_cmp = list, dict, lambda data, group:data\n# data = cls_lst(cls_dct(group=group,len=len(data),data=cls_cmp(data, group)) for group,data in self.groups.items())\n# return data\n# \n# def __str__(self):\n# sgrp = '['+','.join(f'{name}:{len(group)}' for name,group in self.groups.items())+']'\n# return f'{\"[FIN]\" if self.finalised else \"[OK]\"} groups:{sgrp}>'\n# def __repr__(self):\n# return f'<{self.__class__.__name__} {self!s}>'\n# \n# def __init__(self, compress=False, allow_override:bool = True):\n# self.data = None\n# self.reset()\n# self.compress = compress\n# self.allow_override: bool = allow_override\n# def reset(self):\n# self.data = self.Data(self)\n# \n# @ifapplicable\n# def on_encounter(self, state:SummaryState, actions):\n# if state.value.summary_group_enable:\n# self.data.append(state)\n# \n# def __repr__(self):\n# return f'<{self.__class__.__name__} {self.data!s}>'\n# \n# class JSONDefaultEncoder(json.JSONEncoder):\n# def default(self, obj):\n# if isinstance(obj, complex):\n# return str(obj)\n# \n'''\nimport gzip\nimport base64\nimport numpy as np \n\nclass ValidityCodec:\n \n @staticmethod\n def encode(validity):\n return base64.b64encode(gzip.compress(np.array(validity,bool))).decode('latin')\n \n @staticmethod\n def decode(txt):\n buf = gzip.decompress(base64.decodestring(txt))\n return np.frombuffer(buf, bool)\n\n'''\n'''\nclass SelectorSummariser(ClassSummaryFilter):\n summarise_class = 'Selector'\n \n def apply(self, state:SummaryState):\n selector = state.instance\n records = state.records\n data = selector.language.data\n is_cache_enabled = selector.cache.enabled\n selector.cache.enabled = False\n def evaluate(evaluators, part_check):\n eval_sum = OrderedDict()\n if state.parts & part_check:\n for eval_cls in evaluators.classes:\n eval_tag = evaluators.get_class_tag(eval_cls)\n try:\n eval_inst = eval_cls(data)\n eval_sum[eval_tag] = eval_inst.evaluate(selector)\n except Exception as e:\n log.error(f'While evaluating {eval_tag}: {e}')\n eval_sum[eval_tag] = nan\n return eval_sum\n records['measures'] = evaluate(MEASURES_DEFAULT_CONSTRUCTIBLE, SummaryParts.SELECTOR_MEASURES)\n records['optimistic-estimators'] = evaluate(OPTIMISTIC_ESTIMATORS_DEFAULT_CONSTRUCTIBLE, SummaryParts.SELECTOR_OPTIMISTIC_ESTIMATES)\n records['cached'] = {k:v for k,v in selector.cache.items()\n if isinstance(v,float)}\n selector.cache.enabled = is_cache_enabled\n if state.parts & SummaryParts.SELECTOR_VALIDITIES:\n records['validity'] = ValidityCodec.encode(selector.validity)\n \n def summarise(self, selectors) -> SummaryList:\n return SummaryList(map(self.summarise_selector, selectors))\n'''\n\n'''\nclass ScoringfunctionSummariser(ConditionalSummaryFilter):\n def isapplicable(self, state:SummaryState)->bool:\n if not isinstance(state.instance, ProductBundle):\n return False\n return issubclass(state.instance.factory_object, ScoringFunctions)\n \n def apply(self, state:SummaryState):\n fields = ('args','name','digest')\n state.records = OrderedDict(((field,state.records[field]) for field in fields))\n'''\n \nif __name__ == '__main__':\n import sys\n del sys.path[0]\n import doctest\n doctest.testmod()\n \n ","repo_name":"kalofoli/sergio","sub_path":"colito/summaries.py","file_name":"summaries.py","file_ext":"py","file_size_in_byte":34450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42360648759","text":"\"\"\"Zadanie 2 lista_2\"\"\"\nimport sys\nTAB = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n\n\ndef encode(output_name, input_name):\n \"\"\"Funkcja koduje plik kodowaniem Base64\"\"\"\n file_read = open(input_name)\n file_write = open(output_name, \"wb\")\n bits = \"\"\n\n for line in file_read:\n for char in line:\n bits += '{0:08b}'.format(ord(char))\n\n chunks = [bits[index: index + 6] for index in range(0, len(bits), 6)]\n\n for chunk in chunks:\n file_write.write(TAB[int(chunk, 2)].encode())\n file_read.close()\n file_write.close()\n print(\"Zakodowano plik: \", input_name)\n\n\ndef decode(input_name, output_name):\n \"\"\"Funkcja dekoduje plik zakodowany Base64\"\"\"\n file_read = open(input_name, \"rb\")\n file_write = open(output_name, \"w\")\n bits = \"\"\n\n letters = list(file_read.read())\n\n for letter in letters:\n bits += '{0:06b}'.format(TAB.index(chr(letter)))\n chunks = [bits[index: index + 8] for index in range(0, len(bits), 8)]\n\n for chunk in chunks:\n file_write.write(str(chr(int(chunk, 2))))\n\n file_read.close()\n file_write.close()\n print(\"Odkodowano plik:\", input_name)\n\n\nif sys.argv[1] == \"--encode\":\n encode(sys.argv[2], sys.argv[3])\nelif sys.argv[1] == \"--decode\":\n decode(sys.argv[2], sys.argv[3])\n","repo_name":"Marcin-Szadkowski/Python","sub_path":"Lista2/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30169555311","text":"class Calibration:\n def __init__(self, mtx, dist):\n self.__mtx = mtx\n self.__dist = dist\n\n @property\n def mtx(self):\n return self.__mtx\n\n @property\n def dist(self):\n return self.__dist\n\n\nclass Pipeline:\n def __init__(self, calibration):\n self.calibration = calibration\n\n # Helper Utilities\n def hls(self, image, threshold=[170, 255]):\n \"\"\"Extract S channel from image\"\"\"\n hls_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n s_channel = hls_image[:,:, 2]\n\n binary = np.zeros_like(s_channel)\n binary[(s_channel >= threshold[0]) & (s_channel <= threshold[1])] = 1\n\n return binary\n\n def undistort(self, image):\n \"\"\"Undistort the image given the distortion matrix and destination points\"\"\"\n return cv2.undistort(image,\n self.calibration.mtx,\n self.calibration.dist, None,\n self.calibration.mtx)\n\n def lab(self, image, threshold=[150,255]):\n lab_color_space = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)\n b_channel = lab_color_space[:, :, 2]\n\n binary = np.zeros_like(b_channel)\n binary[((b_channel >= threshold[0]) & (b_channel <= threshold[1]))] = 1\n\n return binary\n\n def color_threshold(self, image):\n \"\"\"Extract R and S channels from `image`.\"\"\"\n b_channel_threshold = self.lab(image)\n s_channel_threshold = self.hls(image)\n\n color_composite = np.zeros_like(s_channel_threshold)\n color_composite[((s_channel_threshold == 1) | (b_channel_threshold == 1))] = 1\n\n return color_composite\n\n def sobelize(self, image, kernel_size=9):\n \"\"\"Helper function to calculate the Sobel in the X and Y directions\"\"\"\n sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize = kernel_size)\n sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize = kernel_size)\n\n return sobel_x, sobel_y\n\n def sobel_threshold(self, image, orientation='x', min_threshold=30, max_threshold=255):\n \"\"\"Absolute Sobel threshold. Adapted from the lectures.\"\"\"\n if orientation == 'x':\n sobel, _ = self.sobelize(image)\n else:\n _, sobel = self.sobelize(image)\n\n absolute_sobel = np.absolute(sobel)\n scaled_sobel = np.uint8(np.multiply(255, absolute_sobel) / np.max(absolute_sobel))\n\n binary = np.zeros_like(scaled_sobel)\n binary[(scaled_sobel >= min_threshold) & (scaled_sobel <= max_threshold)] = 1\n\n return binary\n\n def magnitude_threshold(self, image, sobel_kernel_size=9, threshold=[40, 200]):\n \"\"\"Adapted from the lectures\"\"\"\n\n sobel_x, sobel_y = self.sobelize(image, kernel_size=sobel_kernel_size)\n\n gradient_magnitude = np.sqrt(np.square(sobel_x) + np.square(sobel_y))\n scaling_factor = np.divide(np.max(gradient_magnitude), 255)\n scaled_gradient = np.divide(gradient_magnitude, scaling_factor).astype(np.uint8)\n\n binary = np.zeros_like(scaled_gradient)\n binary[(scaled_gradient >= threshold[0]) & (scaled_gradient <= threshold[1])] = 1\n\n return binary\n\n def directional_threshold(self, image, sobel_kernel_size=9, threshold=[0.6, 1.1]):\n \"\"\"Adapted from the lectures\"\"\"\n\n sobel_x, sobel_y = self.sobelize(image, kernel_size = sobel_kernel_size)\n\n directional_gradient = np.arctan2(np.absolute(sobel_y), np.absolute(sobel_x))\n\n binary = np.zeros_like(directional_gradient)\n binary[(directional_gradient >= threshold[0]) & (directional_gradient <= threshold[1])] = 1\n\n return binary\n\n def gradient_thresholds(self, image):\n # Sobel Gradients\n x_gradient = self.sobel_threshold(image, orientation='x', min_threshold=30, max_threshold=200)\n y_gradient = self.sobel_threshold(image, orientation='y', min_threshold=30, max_threshold=200)\n\n # Mag Gradient\n magnitude_threshold = self.magnitude_threshold(image, threshold=[50, 255])\n # Directional Gradient\n directional_gradient = self.directional_threshold(image, threshold=[0.8, 1.2])\n\n gradient_composite = np.zeros_like(directional_gradient)\n # gradient_composite[((x_gradient == 1) & (magnitude_threshold == 1))] = 1\n gradient_composite[((x_gradient == 1 | (magnitude_threshold == 1)) & ((directional_gradient == 1) | (y_gradient == 1)))] = 1\n\n return gradient_composite\n\n def composite_threshold(self, gradient_threshold, color_threshold):\n \"\"\"Combine Gradient and Color threholds\"\"\"\n binary = np.zeros_like(gradient_threshold)\n binary[(gradient_threshold == 1) | (color_threshold == 1)] = 1\n\n return binary\n\n def warp(self, image):\n image_size = (image.shape[1], image.shape[0])\n\n source = np.float32(\n [[500, 480],\n [810, 482],\n [1250, 720],\n [40, 720]])\n\n destination = np.float32(\n [[0, 0],\n [1200, 0],\n [1200, 720],\n [0, 720]])\n\n matrix = cv2.getPerspectiveTransform(source, destination)\n inverse_matrix = cv2.getPerspectiveTransform(destination, source)\n warped_image = cv2.warpPerspective(image, matrix, image_size, flags=cv2.INTER_LINEAR)\n\n return warped_image, matrix, inverse_matrix\n\n def find_lane(self, binary_warped):\n \"\"\"Adapted from Udacity's lectures\"\"\"\n histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:,:], axis=0)\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n midpoint = np.int(histogram.shape[0] / 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n window_height = np.int(binary_warped.shape[0]/nwindows)\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n margin = 100\n minpix = 50\n left_lane_inds = []\n right_lane_inds = []\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window+1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n\n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\n\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n\n # Generate x and y values for plotting\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n return ploty, lefty, righty, leftx, rightx, left_fitx, right_fitx\n\n def curvature_offset(self, ploty, lefty, righty, leftx, rightx):\n image_width, image_height = 1280, 720\n y_eval = np.max(ploty)\n\n lane_pixel_width = np.multiply(image_width, 0.57)\n meters_per_pixel_y = np.divide(23, image_height)\n meters_per_pixel_x = np.divide(3.7, lane_pixel_width)\n\n left_fit_cr = np.polyfit(lefty * meters_per_pixel_y, leftx * meters_per_pixel_x, deg=2)\n right_fit_cr = np.polyfit(righty * meters_per_pixel_y, rightx * meters_per_pixel_x, deg=2)\n\n # Adapted from Udacity lectures\n radius_curvature_left = ((1 + (2 * left_fit_cr[0] * y_eval * meters_per_pixel_y + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval*meters_per_pixel_y + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n curvature_radius = np.mean([radius_curvature_left, right_curverad])\n\n image_center = np.multiply(lane_pixel_width, meters_per_pixel_x) / 2\n car_center = np.mean([left_fit_cr, right_fit_cr])\n vehicle_offset = np.abs(image_center - car_center)\n\n return curvature_radius, vehicle_offset\n\n def draw_lane(self, warped_image, undistorted_image, inverse_matrix, ploty,\n left_fitx, right_fitx, curvature_radius, vehicle_offset):\n \"\"\"Draw the lane onto the image and apply the text.\"\"\"\n warp_zero = np.zeros_like(warped_image).astype(np.uint8)\n\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n image_size = (color_warp.shape[1], color_warp.shape[0])\n\n left_points = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n right_points = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n left_and_right_points = np.hstack((left_points, right_points))\n\n cv2.fillPoly(color_warp, np.int_([left_and_right_points]), (0,255, 0))\n newwarp = cv2.warpPerspective(color_warp, inverse_matrix, image_size)\n result = cv2.addWeighted(undistorted_image, 1, newwarp, 0.3, 0)\n\n cv2.putText(result, 'Distance from center: {:.2f} m'.format(vehicle_offset), (100,80), fontFace = 16,\\\n fontScale = 2, color=(255,255,255), thickness = 4)\n\n cv2.putText(result, 'Radius of Curvature {} m'.format(int(curvature_radius)), (120,140),\n fontFace = 16, fontScale = 2, color=(255,255,255), thickness = 4)\n\n return result\n\n def run(self, image):\n # Undisort the image\n undistorted_image = self.undistort(image)\n\n # Color thresholds\n color_threshold = self.color_threshold(undistorted_image)\n\n # Gradient threshold\n gradient_threshold = self.gradient_thresholds(color_threshold)\n\n # Combine Gradient and Color thresholding\n combined_thresholds = self.composite_threshold(gradient_threshold, color_threshold)\n\n # Warp the image\n warped_image, _, inverse_matrix = self.warp(combined_thresholds)\n\n # Draw the lane and derive the curvature offeset\n ploty, lefty, righty, leftx, rightx, left_fitx, right_fitx = self.find_lane(warped_image)\n curvature_radius, car_offset = self.curvature_offset(ploty, lefty, righty, leftx, rightx)\n output = self.draw_lane(warped_image, undistorted_image,\n inverse_matrix, ploty, left_fitx, right_fitx, curvature_radius, car_offset)\n\n return output\n","repo_name":"erikshestopal/CarND-Advanced-Lane-Lines","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":11890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40579943892","text":"import numpy as np\n\nimport discretization\nimport util\n\n\n# pmf being an array probability masses. This function returns the corresponding \n# index that was chosen.\n# rng being a random.Random object\ndef sample_discrete_distribution(pmf_table, rng):\n rand_val = rng.random()\n accum = 0\n for i in np.ndindex(pmf_table.shape):\n accum += pmf_table[i]\n if accum > rand_val:\n break\n return i\n\n\n# Represents an n-dimensional functions whose output values are suitable for \n# input values of another node. This means they must be evenly spaced, within a \n# certain range.\nclass DiscretePMF(discretization.DiscretizedFunction):\n\n # pmf_table, being a table of probability masses, is assumed to be normalized\n # index_to_value_list being a list of size n, where n is the number of \n # inputs to the function, where each element is a sorted list of values.\n def __init__(self, pmf_table, index_to_input_list):\n discretization.DiscretizedFunction.__init__(self, pmf_table, index_to_input_list)\n\n @classmethod\n def from_DiscretizedFunction(cls, discretized_fn):\n tab = discretized_fn.output_table\n tab = tab / np.sum(tab)\n return cls(tab, discretized_fn.index_to_input)\n\n @classmethod\n def from_pdf(cls, fn, samples):\n return cls.from_DiscretizedFunction(\n discretization.DiscretizedFunction.from_nd_fn(\n fn,\n (),\n samples\n ).integrate()\n )\n\n @classmethod\n def from_scipy_pmf(cls, scipy_pmf, samples):\n tab = np.zeros(util.get_shape_from_ragged_array(samples))\n for i in np.ndindex(tab.shape):\n inputs = util.get_values_from_ragged_array(samples, i)\n tab[i] = scipy_pmf(inputs)\n tab = tab / np.sum(tab)\n return cls(tab, samples)\n\n def sample(self, rng):\n result = [None] * len(self.index_to_input)\n indices = sample_discrete_distribution(self.output_table, rng)\n for i in range(len(indices)):\n result[i] = self.index_to_input[i][indices[i]]\n return {'probability': self.output_table[tuple(indices)], 'index': indices, 'output': result}\n\n\n# This class handles organization of a CPD table, various parents and their \n# possible values. It also works for PMF samples of single variables.\nclass DiscreteCPD(discretization.DiscretizedFunction):\n\n # cpd_table being an n + 1 dimensional table, where n is the number of \n # parents.\n # dimension_to_parent_name being an n-entry list of strings, relating each \n # dimension of cpd_table (index) to the name of a random variable, one of this \n # variable's parents.\n # indices_to_values being a n + 1-entry ragged list, where the first index is the \n # dimension into the CPD table of the corresponding variable. The second \n # dimension relates \n # indices of that variable to its actual values. The n + 1th entry represents \n # the output values for this discrete CPD.\n def __init__(self, cpd_table, index_to_input_list):\n discretization.DiscretizedFunction.__init__(self, cpd_table, index_to_input_list)\n\n @classmethod\n def from_DiscretizedFunction(cls, discretized_fn):\n tbl = discretized_fn.output_table\n tbl = tbl / np.reshape(np.sum(tbl, axis=tbl.ndim - 1), tuple(list(tbl.shape[:-1]) + [1]))\n return cls(tbl, discretized_fn.index_to_input)\n\n @classmethod\n def from_pdf(cls, fn, samples):\n return cls.from_DiscretizedFunction(\n discretization.DiscretizedFunction.from_nd_fn(\n fn,\n (),\n samples\n ).integrate()\n )\n\n # pdf_factory takes values for variables upon which this random variable is conditioned and produces a pdf.\n # samples is an n-entry list, where the first n-1 entries are random variables upon which this random variable is\n # conditioned.\n @classmethod\n def from_pdf_factory(cls, pdf_factory, samples):\n # Have to deal with integration eating a sample point.\n output_table_shape = list(util.get_shape_from_ragged_array(samples))\n # Not exactly sure why I need the int cast, but, otherwise, it converts the last entry to a floating point.\n output_table_shape = tuple(output_table_shape[:-1] + [int(output_table_shape[-1] - 1)])\n output_table = np.zeros(output_table_shape)\n\n input_shape = output_table_shape[:-1]\n\n for i in np.ndindex(input_shape):\n conditioned_assignment = util.get_values_from_ragged_array(samples, i)\n cpd = pdf_factory(conditioned_assignment)\n discretized_cpd = discretization.DiscretizedFunction.from_nd_fn(cpd, (), [samples[-1]]).integrate()\n discretized_cpd_table = discretized_cpd.output_table\n discretized_cpd_table /= np.sum(discretized_cpd_table)\n output_table[i] = discretized_cpd_table\n return cls(output_table, samples[:-1] + [samples[-1][:-1]])\n\n # conditioned_values being a list of real numbers, the values upon which this CPD is to be clamped.\n def sample(self, conditioned_values, rng):\n clamped_cpd = self.get_clamped_cpd(conditioned_values)\n sample_idx = sample_discrete_distribution(clamped_cpd, rng)\n return {'probability': clamped_cpd[sample_idx], 'index': sample_idx, 'output': self.index_to_input[self.output_table.ndim - 1][sample_idx]}\n\n # conditioned_values being a list of real numbers\n def get_clamped_cpd(self, conditioned_values):\n cpd_indices = [None] * (self.output_table.ndim - 1)\n for i in range(len(conditioned_values)):\n cpd_indices[i] = self.input_to_index[i][conditioned_values[i]]\n\n return self.output_table[tuple(cpd_indices)]\n","repo_name":"Simon-Swenson-8351/ua-research-fracture","sub_path":"bayes-nets/python/lib/trunk/discrete_prob.py","file_name":"discrete_prob.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31563300709","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport scipy.fftpack as fftpk\nfrom pydub import AudioSegment\nimport os\n\ndef toWAV(mp3):\n wav = mp3.split(\".\")[0] + \".wav\"\n sound = AudioSegment.from_mp3(mp3)\n sound.export(wav, format=\"wav\")\n return wav\n\nclass Song():\n def __init__(self, songName, start_sec = 0, end_sec = 0):\n clear = False\n if \".mp3\" in songName:\n songName = toWAV(songName)\n clear = True\n \n #print(\"Reading audio file...\")\n audiofile = wavfile.read(songName)\n self.sampfreq, self.data = audiofile[0], audiofile[1]/32767 #16 bits\n self.peakAlphaIndex = 0\n self.length = len(self.data) - self.peakAlphaIndex\n self.length_seconds = self.length / 44100\n\n self.channels = len(self.data[0])\n if self.channels == 2: # checks for stereo\n self.data = np.add(self.data[:, [0]], self.data[:, [1]]) / self.channels\n self.data = np.reshape(self.data, -1)\n #Normalize to 1 is max\n self.data = self.data * (1 / np.max(self.data))\n\n if end_sec != 0:\n self.data = self.data[int(start_sec * self.sampfreq):int(end_sec * self.sampfreq)]\n \n if clear:\n os.remove(songName)\n \n def GetRMS(self): # decibels\n rms = 20*np.log10((np.mean(np.absolute(self.data))))\n return int(rms*100)/100\n\n def FindAlphaPeak(self, start = 0, ratio = 0.8):\n # MAKE SURE THE DATA IS NORMALIZED SO RATIO = THRESHOLD\n for i in range(len(self.data)):\n if abs(self.data[i]) > ratio:\n self.peakAlpha = self.data[i]\n self.peakAlphaIndex = i\n self.length = len(self.data) - self.peakAlphaIndex\n self.length_seconds = self.length / 44100\n return int((self.peakAlphaIndex/self.sampfreq)*1000)/1000\n \n def GetNoteOnset(self, unit = 2048, chunk_size = 2048, threshold_ratio = 0.8, HPF = 20, LPF = 500):\n sus, on, self.notes = -1, -1, []\n note_on = 0\n song = self.data[self.peakAlphaIndex:]\n threshold = Get_Threshold(song, chunk_size, threshold_ratio, HPF, LPF, self.sampfreq)\n #print(\"Ratio: \" + str(threshold_ratio) + \" = Threshold: \" + str(threshold))\n \n for i in range(self.length//unit):\n start = unit*(i) + self.peakAlphaIndex\n end = unit*(i) + chunk_size + self.peakAlphaIndex\n \n on = ReadChunk(self.data[start:end], threshold, LPF, HPF, self.sampfreq)\n if on == 1 and sus == -1: #Note change\n note_on = start\n sus = 1\n elif on == -1 and sus == 1:\n note_release = start\n self.notes.append([note_on, note_release])\n sus = -1\n \n def GetPeaks(self, x):\n self.pks, self.pksValue = [],[]\n for i in self.notes: #notes as an array of start,end pairs\n try:\n start = i[0] - x\n end = i[0] + x\n noteSamples = self.data[start:end]\n point = np.max(np.absolute(noteSamples))\n except:\n start = i[0]\n end = i[1]\n noteSamples = self.data[start:end]\n point = np.max(np.absolute(noteSamples))\n \n transientPoint = np.max(np.where(np.absolute(noteSamples) == point)) + start\n self.pks.append(transientPoint)\n self.pksValue.append(point)\n \n def GetTruePeaks(self, x):\n self.truepeaks = []\n for i in self.notes:\n range_ = x\n unit = 64\n rms = []\n index = []\n for j in range(range_//unit): \n start = i[0] - (range_) + unit*j \n end = start + unit\n chunk = self.data[start:end]\n peakChunk = FindPeaksSignal(chunk)\n rms.append(sum(peakChunk)/len(peakChunk))\n index.append(start)\n \n highChunk = index[rms.index(max(rms))]\n self.truepeaks.append(highChunk)\n \n def GetBPM(self, minBPM = 80, maxBPM = 210, kind = \"mode\"):\n x = [i[0] for i in self.notes]\n d = [x[i+1]-x[i] for i in range(len(x)) if i < len(x)-1]\n if kind == \"mean\":\n beat_s = mean(d)/self.sampfreq\n elif kind == \"mode\":\n beat_s = mode(d)/self.sampfreq\n elif kind == \"median\":\n beat_s = median(d)/self.sampfreq\n else:\n print(\"Error\")\n if beat_s == 0:\n bpm = 0\n else:\n bpm = 60/beat_s\n while bpm < minBPM or bpm > maxBPM:\n if bpm < minBPM:\n bpm = bpm*2\n elif bpm > maxBPM:\n bpm = bpm/2\n return bpm\n \n def GetBPM_PKS(self, minBPM = 80, maxBPM = 210, kind = \"mode\"):\n d = [self.pks[i+1]-self.pks[i] for i in range(len(self.pks)) if i < len(self.pks)-1]\n if kind == \"mean\":\n beat_s = mean(d)/self.sampfreq\n elif kind == \"mode\":\n beat_s = mode(d)/self.sampfreq\n elif kind == \"median\":\n beat_s = median(d)/self.sampfreq\n else:\n print(\"Error.\")\n if beat_s == 0:\n bpm = 0\n else:\n bpm = 60/beat_s\n while bpm < minBPM or bpm > maxBPM:\n if bpm < minBPM:\n bpm = bpm*2\n elif bpm > maxBPM:\n bpm = bpm/2\n return bpm \n \n def CalculateThreshold_RMS(self):\n self.rms = GetRMS(self.data)\n floor = -48\n if self.rms > -12:\n tr = 0.9\n elif self.rms > -14 and self.rms <= -12:\n tr = 0.8\n elif self.rms > -16 and self.rms <= -14:\n tr = 0.7\n elif self.rms > -20 and self.rms <= -16:\n tr = 0.65\n elif self.rms > -24 and self.rms <= -20:\n tr = 0.6\n elif self.rms > -30 and self.rms <= -24:\n tr = 0.5\n elif self.rms > -40 and self.rms <= -30:\n tr = 0.4\n elif self.rms > -60 and self.rms <= -40:\n tr = 0.2\n else:\n tr = 0.8\n \n #tr = 1 - (self.rms/floor)\n #print(\"Suggested ratio is: \" + str(tr))\n return int(tr * 10000)/10000\n\n def PlotPeaks(self):\n x = self.pks\n y = [abs(i) for i in self.pksValue]\n y = [1 for i in self.pksValue]\n plt.figure(figsize = (20,5))\n plt.xlabel(\"Sample Position\")\n plt.ylabel(\"Amplitude\")\n plt.grid(True)\n plt.scatter(x,y)\n plt.savefig(\"song peaks.png\")\n plt.show()\n \n def SaveOnsets(self):\n self.notes\n data = \"\"\n for i in self.notes:\n data = data + str(i[0]/self.sampfreq) + \",\" + str(i[1]/self.sampfreq) + \"\\n\"\n with open(\"onsets.csv\", \"w+\") as file:\n file.write(data)\n \n def PlotNoteOnset(self):\n x = [i[0] for i in self.notes]\n y = [1 for i in self.notes]\n plt.figure(figsize=(20,10))\n plt.plot(x,y)\n plt.show()\n\n\n\ndef GetFrequencyPeaks(x, y):\n peaks_x = []\n peaks_y = []\n for i in range(len(y)):\n if i > 0 and i < len(y)-1:\n if y[i] > y[i - 1] and y[i] > y[i + 1]:\n peaks_x.append(x[i])\n peaks_y.append(y[i])\n return peaks_x, peaks_y\n\n \ndef PlotNote(note, sampfreq, LPF, HPF, name): \n x, y = CalculateFFT_dB(note,sampfreq, LPF, HPF) \n x_peaks, y_peaks = GetFrequencyPeaks(x,y)\n plt.figure(figsize=(20,10))\n #plt.xticks(x_peaks)\n plt.xlabel(\"Frequency (Hz)\")\n plt.ylabel(\"Amplitude (dB)\")\n plt.plot(x,y)\n plt.savefig(name)\n plt.show()\n\ndef PlotNote2(note, sampfreq, LPF, HPF, name, xticks): \n x, y = CalculateFFT_dB(note,sampfreq, LPF, HPF) \n plt.figure(figsize=(20,10))\n plt.xticks(xticks)\n plt.xlabel(\"Frequency (Hz)\")\n plt.ylabel(\"Amplitude (dB)\")\n plt.plot(x,y)\n plt.savefig(name)\n plt.show()\n\n\ndef PlotPeaks2(x, y, xticks, ylim, name):\n plt.figure(figsize=(20,10))\n plt.grid(True)\n plt.plot(x,y)\n plt.scatter(x,y)\n plt.ylim(ylim)\n plt.xticks(xticks)\n plt.savefig(name)\n plt.show()\n\ndef ReadChunk(chunk, threshold, LPF, HPF, sampfreq):\n FFT = abs(scipy.fft.fft(chunk))\n freqs = fftpk.fftfreq(len(FFT), (1.0/sampfreq))\n \n freqs = freqs[range(len(FFT)//2)]\n \n freqsHPF = freqs[freqs > HPF]\n freqsLPF = freqs[freqs < LPF]\n \n indexHPF = int(max(np.where(freqs == freqsHPF[0])))\n indexLPF = int(max(np.where(freqs == freqsLPF[len(freqsLPF)-1])))\n \n FFT = FFT[range(len(FFT)//2)]\n FFTF = FFT[indexHPF:indexLPF]\n \n if np.sum(np.absolute(FFTF)) > threshold:\n frequency = 1\n else:\n frequency = -1\n return frequency\n \ndef FindPeaksSignal(x):\n peaks = [0]\n for i in range(1,len(x)-1):\n if abs(x[i]) >= abs(x[i - 1]) and abs(x[i]) >= abs(x[i+1]):\n peaks.append(x[i])\n return peaks\n\ndef CalculateFFT_dB(chunk, sampfreq, HPF, LPF):\n \n window = chunk.size\n \n chunk = chunk * window\n FFT = abs(scipy.fft.fft(chunk))\n freqs = fftpk.fftfreq(len(FFT), (1.0/sampfreq))\n \n freqs = freqs[range(len(FFT)//2)]\n \n freqsHPF = freqs[freqs > HPF]\n freqsLPF = freqs[freqs < LPF]\n \n indexHPF = int(max(np.where(freqs == freqsHPF[0])))\n indexLPF = int(max(np.where(freqs == freqsLPF[len(freqsLPF)-1])))\n \n FFT = FFT[range(len(FFT)//2)]\n \n FFTF = FFT[indexHPF:indexLPF]\n freqsF = freqs[indexHPF:indexLPF]\n \n return freqsF, FFTF \n\n\ndef Get_Threshold(data, chunk_size, ratio, HPF, LPF, sampfreq):\n #Find the highest power in the frequency range in the whole data\n #Use it as threshold multiplied by the ratio received\n total = []\n for i in range(int((len(data)/chunk_size))-1):\n start = chunk_size*(i)\n end = chunk_size*(i) + chunk_size\n chunk = data[start:end]\n \n FFT = abs(scipy.fft.fft(chunk))\n freqs = fftpk.fftfreq(len(FFT), (1.0/sampfreq))\n\n freqs = freqs[range(len(FFT)//2)]\n\n freqsHPF = freqs[freqs > HPF]\n freqsLPF = freqs[freqs < LPF]\n \n indexHPF = int(max(np.where(freqs == freqsHPF[0])))\n indexLPF = int(max(np.where(freqs == freqsLPF[len(freqsLPF)-1])))\n \n FFT = FFT[range(len(FFT)//2)]\n \n FFTF = FFT[indexHPF:indexLPF]\n \n total.append(np.sum(np.absolute(FFTF)))\n \n threshold = (max(total))\n #threshold = (max(total)) - (min(total))\n return threshold * ratio\n \n \ndef SavePeaks(peaks, sampfreq, channels, alphaPeak, name):\n data = \"\"\n for i in peaks:\n data = data + str(((alphaPeak + i) * channels)/sampfreq) + \"\\n\"\n with open(name, \"w+\") as file:\n file.write(data)\n \ndef GetRMS(part):\n rms = 20*np.log10((np.mean(np.absolute(part)) + 0.0001))\n #print(\"RMS is: \" + str(rms) + \" dB\")\n return rms\n\ndef GetRMS_100(part):\n rms = (np.mean(np.absolute(part))) * 100\n #print(\"RMS is: \" + str(rms) + \" dB\")\n return rms\n\ndef CalculateThreshold_RMS(data):\n rms = GetRMS(data)\n floor = -96\n tr = 1 - (rms/floor)\n #print(\"Suggested ratio is: \" + str(tr))\n return int(tr * 10000)/10000\n \ndef GetTopFrequencies(a,b,start,num = 5):\n x, y = list(a), list(b)\n freq,amp = [],[]\n for i in range(num):\n m = max(max(y))\n f = x[y.index(m)]\n freq.append(f)\n amp.append(m)\n \n index = y.index(m)\n y.pop(index)\n x.pop(index)\n \n return [freq, amp, start]\n\ndef mode(List): #most frequent\n return max(set(List), key = List.count) \ndef mean(List): #average value\n return sum(List)/len(List)\ndef median(List): #middle of the list\n return sorted(List)[int(len(List)/2)]\n\ndef GetBPMS(song, tr, freqTop):\n bpms1, onsets1 = BPM_Bass(song, tr, freqTop)\n bpms2, onsets2 = BPM_Bass2(song, tr, freqTop)\n return [bpms1, bpms2], onsets1\n\ndef BPM_Bass(song, tr, freqTop):\n song.GetNoteOnset(unit = 2048, chunk_size = 2048, threshold_ratio = tr, HPF = 0, LPF = freqTop)\n song.GetPeaks(x = 1024)\n magicRatio = (128/129.19921875)\n bpm1 = int(song.GetBPM()*magicRatio*100)/100\n bpm2 = int(song.GetBPM_PKS()*100)/100\n return [bpm1, bpm2], [song.pks]\n \ndef BPM_Bass2(song, tr, freqTop):\n song.GetNoteOnset(unit = 1024, chunk_size = 1024, threshold_ratio = tr, HPF = 0, LPF = freqTop)\n song.GetPeaks(x = 512)\n magicRatio = (128/129.19921875)\n bpm1 = int(song.GetBPM()*magicRatio*100)/100\n bpm2 = int(song.GetBPM_PKS()*100)/100\n return [bpm1, bpm2], [song.pks]\n \n","repo_name":"AlbertoV5/BPM-Detection","sub_path":"onset.py","file_name":"onset.py","file_ext":"py","file_size_in_byte":12971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10559804921","text":"import heapq\nfrom aocd import data\n\ndirs = [\n (1,0),\n (0,-1),\n (-1,0),\n (0,1)\n]\n\ndef main(input): \n grid = [[int(c) for c in line] for line in input.split()]\n rows, cols = len(grid), len(grid[0])\n best = [[-1 for i in range(cols)] for j in range(rows)]\n heap = []\n # Tuples are risk, x, y\n heapq.heappush(heap, (0, 0, 0,))\n while True:\n r, x, y = heapq.heappop(heap)\n if x == cols-1 and y == rows-1:\n print(r)\n return\n for x1, y1 in dirs:\n x2 = x+x1\n y2 = y+y1\n if bounds_check(x2, y2, rows, cols):\n r2 = r + grid[y2][x2]\n if best[y2][x2] == -1 or r2 < best[y2][x2]:\n best[y2][x2] = r2\n heapq.heappush(heap, (r2,x2,y2))\n\n\ndef bounds_check(x, y, r, c):\n return 0 <= x < c and 0 <= y < r\n\n\nif __name__ == \"__main__\":\n main(data)\n","repo_name":"aetimmes/AoC-2021","sub_path":"solutions/15a.py","file_name":"15a.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14035661518","text":"from operator import or_ as merge_dict\nfrom typing import Dict, List, TypedDict\n\nfrom app.schema import ArgsSchema\n\n\nclass ShoppingItem(TypedDict):\n name: str\n price: int\n quantity: int\n\n\ndef divide_account(\n shopping_list: List[ShoppingItem],\n emails: List[str]\n) -> Dict[str, int]:\n \"\"\"Given a shopping list and an email list, that function divides\n the total price between each email.\n\n Args:\n shopping_list (List[Dict[ShoppingItem]]): List of products as\n [\n {\n \"name\": str,\n \"price\": int,\n \"quantity\": int\n },\n ...\n ]\n emails (List[str]): List of unique and valide emails\n\n Returns:\n Dict[str, int]: Price per email as\n {\n email: price,\n ...\n }\n \"\"\"\n\n schema_result = ArgsSchema().load({\n \"shopping_list\": shopping_list,\n \"emails\": emails\n })\n\n shopping_list = schema_result[\"shopping_list\"]\n emails = schema_result[\"emails\"]\n\n total_price = sum(item[\"price\"] * item[\"quantity\"] for item in shopping_list)\n\n price_per_person, remainder = divmod(total_price, len(emails))\n\n return merge_dict(\n dict.fromkeys(emails[:remainder], price_per_person + 1),\n dict.fromkeys(emails[remainder:], price_per_person)\n )\n","repo_name":"menezesfelipee/stone_challenge","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33700201648","text":"#coding: utf-8\n\"\"\"Подсказывает на какой странице книги я остановился\"\"\"\n\nimport os\nimport string\nimport sys\n\ndef Clear(String):\n \"\"\"\n Очищает строку от символов \" [],' \" (Заменяя их на \"\")\n которые остались после того как она была превращена в строку из списка\n \"\"\"\n\n String = String.replace(\"[\",\"\")\n String = String.replace(\"]\",\"\")\n String = String.replace(\",\",\"\",String.count(\",\"))\n String = String.replace(\"'\",\"\",String.count(\"'\"))\n String = String.replace('\"',\"\",String.count('\"'))\n\n return String\n\n\ndef main():\n\n \"\"\"\n Работает на винде и линухе\n больше нигде не проверялась\n \"\"\"\n\n Books = {} #словарь в котором будут хранится пары: книга - страница\n Path = os.getcwd() + \"/times\" #Рабочая директория + название файла\n sysName = sys.platform\n\n if sysName == \"win32\":\n File = open(Path, encoding = \"utf8\")\n else:\n File = open(Path)\n\n Times = File.readline() #количество запусков программы\n print(\"You have run this program %s times \" % Times.rsplit()[0])\n\n #считывание данных\n\n for BookName in File:\n BookName = BookName.split()\n\n *CurrentBook, CurrentPage = BookName #текущая книгa, текущая страница\n CurrentBook, CurrentPage = str(CurrentBook), str(CurrentPage)\n CurrentBook = Clear(CurrentBook)\n Books[CurrentBook] = CurrentPage\n\n print(\"You have stopped on %s page of ' %s' \" % ( CurrentPage, CurrentBook ) )\n NewPage = input(\"Will you enter new page for this book? \")\n\n #обработка полученных данных и составление новых\n if NewPage.isdigit() and int(NewPage) > int(CurrentPage): #все норм\n print(\"Got it\")\n print(\"you have read %s page(s) of ' %s', since you run this programm last time\"\n % (str( int(NewPage) - int(CurrentPage) ), CurrentBook ) )\n Books[CurrentBook] = NewPage\n elif NewPage.isdigit(): #что-то не так\n print(\"Amazing!! Crazy!!!\")\n\n File.close()\n\n\n #запись новых данных в файл\n File = open(Path,'w')\n File.write(str(int(Times) + 1)+\"\\n\")\n for Book in Books:\n print(Book, Books[Book], file = File)\n File.close()\n\n print(\"Good Bye\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GitCaptain/home-repo","sub_path":"Python/Reading/ReadProgress.py","file_name":"ReadProgress.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35863004684","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom wholecell.utils import units\n\ndef exp2(x, a, b, c, d):\n\treturn a * np.exp(b * x) + c * np.exp(d * x)\n\nclass GrowthData(object):\n\n\tdef __init__(self, kb):\n\t\tself.tau_d = np.array(kb.cellDryMassComposition[\"doublingTime\"].asNumber(units.min))\n\n\t\tavgToBeginningConvFactor = kb.avg_cell_to_initial_cell_conversion_factor\n\t\tself._dryMass = np.array([148., 258., 433., 641., 865.]) / avgToBeginningConvFactor # TOKB\n\t\tself._proteinMass = self._dryMass * kb.cellDryMassComposition[\"proteinMassFraction\"]\n\t\tself._rnaMass = self._dryMass * kb.cellDryMassComposition[\"rnaMassFraction\"]\n\t\tself._dnaMass = self._dryMass * kb.cellDryMassComposition[\"dnaMassFraction\"]\n\n\t\t# We are assuming these are constant over all growth rates\n\t\t# (probably not be true...)\n\t\tself.RRNA23S_MASS_SUB_FRACTION = 0.525 # This is the fraction of RNA that is 23S rRNA\n\t\tself.RRNA16S_MASS_SUB_FRACTION = 0.271 # This is the fraction of RNA that is 16S rRNA\n\t\tself.RRNA5S_MASS_SUB_FRACTION = 0.017 # This is the fraction of RNA that is 5S rRNA\n\t\tself.TRNA_MASS_SUB_FRACTION = 0.146 # This is the fraction of RNA that is tRNA\n\t\tself.MRNA_MASS_SUB_FRACTION = 0.041 # This is the fraction of RNA that is mRNA\n\n\t\t# The type stub (just in PyCharm?) is wrong about curve_fit's arg p0.\n\t\t# noinspection PyTypeChecker\n\t\tp0 = (0, 0, 0, 0) # type: float\n\t\tself.dryMassParams, _ = curve_fit(exp2, self.tau_d, self._dryMass, p0=p0)\n\t\tself.proteinMassParams, _ = curve_fit(exp2, self.tau_d, self._proteinMass, p0=p0)\n\t\tself.rnaMassParams, _ = curve_fit(exp2, self.tau_d, self._rnaMass, p0=p0)\n\t\tself.dnaMassParams, _ = curve_fit(exp2, self.tau_d, self._dnaMass, p0=p0)\n\n\t\tself.chromMass = self._chromMass(kb)\n\n\t\tself.C_PERIOD = 40. # TOKB. [minutes]\n\t\tself.D_PERIOD = 20. # TOKB. [minutes]\n\t\tself.CD_PERIOD = self.C_PERIOD + self.D_PERIOD\n\n\tdef _chromMass(self, kb):\n\t\tdntCounts = np.array([\n\t\t\tkb.genome_A_count + kb.genome_T_count,\n\t\t\tkb.genome_C_count + kb.genome_G_count,\n\t\t\tkb.genome_G_count + kb.genome_C_count,\n\t\t\tkb.genome_T_count + kb.genome_A_count\n\t\t])\n\n\t\tdntMasses = (kb.get_masses(kb.molecule_groups.polymerizedDNT_IDs) / kb.n_avogadro).asUnit(units.g)\n\n\t\tchromMass = units.dot(dntCounts, dntMasses)\n\t\treturn chromMass\n\n\tdef _clipTau_d(self, tau_d):\n\t\t# Clip values to be in the range that we have data for\n\t\tif hasattr(tau_d, \"dtype\"):\n\t\t\ttau_d[tau_d > self.tau_d.max()] = self.tau_d.max()\n\t\t\ttau_d[tau_d < self.tau_d.min()] = self.tau_d.min()\n\t\telse:\n\t\t\tif tau_d > self.tau_d.max():\n\t\t\t\ttau_d = self.tau_d.max()\n\t\t\telif tau_d < self.tau_d.min():\n\t\t\t\ttau_d = self.tau_d.min()\n\t\treturn tau_d\n\n\n\tdef dnaMass(self, tau_d):\n\t\tif tau_d < self.D_PERIOD:\n\t\t\traise Exception(\n\t\t\t\t\"Can't have doubling time shorter than cytokinesis time!\")\n\n\t\t# TODO: If you really care, this should be a loop.\n\t\t# It is optimized to run quickly over the range of T_d\n\t\t# and C and D periods that we have.\n\t\treturn self.chromMass * (1 +\n\t\t\t1 * (np.maximum(0., self.CD_PERIOD - tau_d) / self.C_PERIOD) +\n\t\t\t2 * (np.maximum(0., self.CD_PERIOD - 2 * tau_d) / self.C_PERIOD) +\n\t\t\t4 * (np.maximum(0., self.CD_PERIOD - 4 * tau_d) / self.C_PERIOD)\n\t\t\t)\n\n\n\tdef massFractions(self, tau_d):\n\t\t\"\"\"\n\t\tGiven an input doubling time in minutes, output mass fractions in fg\n\t\t\"\"\"\n\n\t\tD = {}\n\t\tD[\"dnaMass\"] = self.dnaMass(tau_d)\n\n\t\ttau_d = self._clipTau_d(tau_d)\n\n\t\tD[\"proteinMass\"] = units.fg * exp2(tau_d, *self.proteinMassParams)\n\t\tD[\"rnaMass\"] = units.fg * exp2(tau_d, *self.rnaMassParams)\n\t\tD[\"rRna23SMass\"] = D[\"rnaMass\"] * self.RRNA23S_MASS_SUB_FRACTION\n\t\tD[\"rRna16SMass\"] = D[\"rnaMass\"] * self.RRNA16S_MASS_SUB_FRACTION\n\t\tD[\"rRna5SMass\"] = D[\"rnaMass\"] * self.RRNA5S_MASS_SUB_FRACTION\n\t\tD[\"tRnaMass\"] = D[\"rnaMass\"] * self.TRNA_MASS_SUB_FRACTION\n\t\tD[\"mRnaMass\"] = D[\"rnaMass\"] * self.MRNA_MASS_SUB_FRACTION\n\n\t\treturn D","repo_name":"CovertLab/WholeCellEcoliRelease","sub_path":"reconstruction/ecoli/compendium/growth_data.py","file_name":"growth_data.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"16"} +{"seq_id":"4945476421","text":"import tkinter\nfrom tkinter import *\nimport random\n\n# class Random_area :\n# def rand_choice(self, canvas, canvas_img) :\n# r_img = random.choice(self.img_list)\n# canvas.itemconfig(canvas_img, image=r_img)\n \n# def exe(self) :\n# window = Tk()\n# window.title(\"랜덤 추첨\")\n# window.config(padx=10, pady=10, bg=\"lightblue\")\n\n# canvas = Canvas(window, height=680, width=680, bg=\"ivory\")\n# canvas.pack()\n\n# img_main = PhotoImage(file=\"area/gift.png\")\n# canvas_img = canvas.create_image(340, 340, image=img_main)\n# canvas.create_text(340, 30, text=\"랜덤박스\", fill=\"brown\", \\\n# font=(\"나눔바른펜\", 30, \"bold\"))\n\n\n# self.img_list = []\n# for i in range(8) :\n# img = PhotoImage(file=f\"images/img{i}.png\")\n# self.img_list.append(img)\n\n# button = Button(window, text=\"랜덤 뽑기\", bg=\"white\", fg=\"hotpink\", \\\n# font=(\"나눔바른펜\", 20, \"bold\"), \\\n# command=self.rand_choice(canvas, canvas_img))\n\n# button.place(x=550, y=0)\n\n\n# window.mainloop()\n\n# rand = Random_area()\n# rand.exe()\n\ndef rand_choice() :\n r_img = random.choice(img_list)\n canvas.itemconfig(canvas_img, image=r_img)\n\nwindow = Tk()\nwindow.title(\"랜덤 추첨\")\nwindow.config(padx=10, pady=10, bg=\"ivory\")\n\ncanvas = Canvas(window, height=600, width=600, bg=\"ivory\")\ncanvas.pack()\n\nimg_main = tkinter.PhotoImage(file=\"images/gift.png\", master=window)\ncanvas_img = canvas.create_image(300, 300, image=img_main)\n# canvas.create_text(340, 30, text=\"랜덤박스\", fill=\"brown\", \\\n# font=(\"나눔바른펜\", 30, \"bold\"))\n\n\nimg_list = []\nfor i in range(6) :\n img = tkinter.PhotoImage(file=f\"images/img{i}.png\", master=window)\n img_list.append(img)\n\nbutton = Button(window, text=\"여행지 뽑기\", bg=\"white\", fg=\"orange\", \\\n font=(\"나눔바른펜\", 17, \"bold\"), \\\n command=rand_choice)\n\nbutton.place(x=470, y=0)\n\n\nwindow.mainloop()","repo_name":"kimgyuhee/crawling","sub_path":"Korea_trip_crawling_project/code/random_box.py","file_name":"random_box.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41881465609","text":"\"\"\"\nhttps://leetcode.com/submissions/detail/421671894/?from=/explore/challenge/card/november-leetcoding-challenge/566/week-3-november-15th-november-21st/3535/\n\n[BEST]\nclass Solution(object):\n def merge(self, intervals):\n answer = []\n intervals = sorted(intervals, key=lambda x: x[0])\n \n rangeMin = intervals[0][0]\n rangeMax = intervals[0][1]\n for ele in intervals:\n\n if ele[0] > rangeMax:\n answer.append([rangeMin, rangeMax])\n rangeMin = ele[0]\n rangeMax = ele[1]\n\n if ele[0] < rangeMin:\n rangeMin = ele[0]\n if ele[1] > rangeMax:\n rangeMax = ele[1]\n\n answer.append([rangeMin, rangeMax])\n return answer\n\"\"\"\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n\n if len(intervals) <= 1:\n return intervals\n\n intervals.sort()\n result = []\n current = intervals[0]\n result.append(current)\n\n for interval in intervals:\n current_start = current[0]\n current_end = current[1]\n interval_start = interval[0]\n interval_end = interval[1]\n\n if current_end >= interval_start:\n current[1] = max(current_end, interval_end)\n else:\n current = interval\n result.append(current)\n\n return result\n\nif __name__ == '__main__':\n s = Solution()\n print(s.merge([[1,3],[2,6],[8,10],[15,18]]))\n print(s.merge([[1,4],[4,5]]))\n\n","repo_name":"jaecheolkim99/CodingPlayground","sub_path":"LeetCode/Merge Intervals.py","file_name":"Merge Intervals.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21014505968","text":"r\"\"\"\r\n ____ _\r\n | _ \\ ___ __| |_ __ _ _ _ __ ___\r\n | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\r\n | __/ (_) | (_| | | | |_| | | | | | |\r\n |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\r\n\r\n Copyright 2021 Podrum Team.\r\n\r\n This file is licensed under the GPL v2.0 license.\r\n The license file is located in the root directory\r\n of the source code. If not you may not use this file.\r\n\"\"\"\r\n\r\nfrom podrum.protocol.mcbe.mcbe_protocol_info import mcbe_protocol_info\r\nfrom podrum.protocol.mcbe.packet.mcbe_packet import mcbe_packet\r\n\r\n\r\nclass start_game_packet(mcbe_packet):\r\n def __init__(self, data: bytes = b\"\", pos: int = 0) -> None:\r\n super().__init__(data, pos)\r\n\r\n self.multiplayer_game = None\r\n self.lan_broadcasting = None\r\n self.xbox_live_broadcast_mode = None\r\n self.platform_broadcast_mode = None\r\n self.enable_commands = None\r\n self.require_texture_pack = None\r\n self.game_rules = None\r\n self.experiments = None\r\n self.has_used_experiments = None\r\n self.bonus_chest = None\r\n self.start_map = None\r\n self.permission_level = None\r\n self.chunk_tick_range = None\r\n self.locked_behavior_pack = None\r\n self.locked_texture_pack = None\r\n self.from_locked_template = None\r\n self.only_msa_gamer_tags = None\r\n self.from_world_template = None\r\n self.world_template_option_locked = None\r\n self.only_old_villagers = None\r\n self.game_version = None\r\n self.limited_world_width = None\r\n self.limited_world_height = None\r\n self.new_nether = None\r\n self.edu_shared_uri_resource_bottom_name = None\r\n self.edu_shared_uri_resource_uri_link = None\r\n self.experimental_gameplay = None\r\n self.level_id = None\r\n self.world_name = None\r\n self.premium_world_template_id = None\r\n self.trial = None\r\n self.movement_type = None\r\n self.movement_rewind_size = None\r\n self.server_authoritative_block_breaking = None\r\n self.current_tick = None\r\n self.enchantment_seed = None\r\n self.item_states = None\r\n self.multiplayer_correlation_id = None\r\n self.server_authoritative_inventories = None\r\n self.server_engine = None\r\n\r\n self.entity_id = None\r\n self.entity_runtime_id = None\r\n self.player_gamemode = None\r\n self.spawn = None\r\n self.rotation = None\r\n self.seed = None\r\n self.spawn_biome_type = None\r\n\r\n self.packet_id: int = mcbe_protocol_info.start_game_packet\r\n\r\n def decode_payload(self):\r\n pass\r\n \r\n def encode_payload(self):\r\n self.write_signed_var_long(self.entity_id)\r\n self.write_var_long(self.entity_runtime_id)\r\n self.write_signed_var_int(self.player_gamemode)\r\n self.write_vector_3_float(self.spawn)\r\n self.write_vector_2_float(self.rotation)\r\n self.write_signed_var_int(self.seed)\r\n self.write_short_le(self.spawn_biome_type)\r\n self.write_string(self.custom_biome_name)\r\n self.write_signed_var_int(self.dimension)\r\n self.write_signed_var_int(self.generator)\r\n self.write_signed_var_int(self.world_gamemode)\r\n self.write_signed_var_int(self.difficulty)\r\n self.write_block_coordinates(self.world_spawn)\r\n self.write_byte(self.disable_achievements)\r\n self.write_signed_var_int(self.time)\r\n self.write_signed_var_int(self.edu_offer)\r\n self.write_byte(self.edu_features)\r\n self.write_string(self.edu_product_id)\r\n self.write_float_le(self.rain_level)\r\n self.write_float_le(self.lightning_level)\r\n self.write_bool(self.confirmed_platform_locked)\r\n self.write_bool(self.multiplayer_game)\r\n self.write_bool(self.lan_broadcasting)\r\n self.write_signed_var_int(self.xbox_live_broadcast_mode)\r\n self.write_signed_var_int(self.platform_broadcast_mode)\r\n self.write_bool(self.enable_commands)\r\n self.write_bool(self.require_texture_pack)\r\n self.write_game_rules(self.game_rules)\r\n self.write_experiments(self.experiments)\r\n self.write_bool(self.has_used_experiments)\r\n self.write_bool(self.bonus_chest)\r\n self.write_bool(self.start_map)\r\n self.write_signed_var_int(self.permission_level)\r\n self.write_int_le(self.chunk_tick_range)\r\n self.write_bool(self.locked_behavior_pack)\r\n self.write_bool(self.locked_texture_pack)\r\n self.write_bool(self.from_locked_template)\r\n self.write_bool(self.only_msa_gamer_tags)\r\n self.write_bool(self.from_world_template)\r\n self.write_bool(self.world_template_option_locked)\r\n self.write_bool(self.only_old_villagers)\r\n self.write_string(self.game_version)\r\n self.write_int_le(self.limited_world_width)\r\n self.write_int_le(self.limited_world_height)\r\n self.write_bool(self.new_nether)\r\n self.write_string(self.edu_shared_uri_resource_bottom_name)\r\n self.write_string(self.edu_shared_uri_resource_uri_link)\r\n self.write_bool(self.experimental_gameplay)\r\n self.write_string(self.level_id)\r\n self.write_string(self.world_name)\r\n self.write_string(self.premium_world_template_id)\r\n self.write_bool(self.trial)\r\n self.write_var_int(self.movement_type)\r\n self.write_signed_var_int(self.movement_rewind_size)\r\n self.write_bool(self.server_authoritative_block_breaking)\r\n self.write_long_le(self.current_tick)\r\n self.write_signed_var_int(self.enchantment_seed)\r\n self.write_var_int(0) # block states length\r\n self.write_item_states(self.item_states)\r\n self.write_string(self.multiplayer_correlation_id)\r\n self.write_bool(self.server_authoritative_inventories)\r\n self.write_string(self.server_engine)\r\n self.write_long_le(0)\r\n","repo_name":"Podrum/PodrumLegacy","sub_path":"podrum/protocol/mcbe/packet/start_game_packet.py","file_name":"start_game_packet.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"16"} +{"seq_id":"26650163561","text":"import numpy as np\nimport pandas as pd\n\n# 生成一个示例scRNA-seq数据集,其中100个基因和10个细胞\ngenes = ['gene' + str(i) for i in range(1, 11)]\ncells = ['cell' + str(i) for i in range(1, 11)]\ndata = np.random.randint(low=0, high=10, size=(10, 10))\ndf = pd.DataFrame(data, index=genes, columns=cells)\n\n# 随机生成2个位置的行索引和列索引\nrows = np.random.choice(df.index, size=8)\ncols = np.random.choice(df.columns, size=3)\n\n# 将这2个位置的值设为0\ndf.loc[rows, cols] = 0\nprint(df)\n# # 统计每个细胞的非零表达值数量\ncounts = (df != 0).sum(axis=0)\nprint(counts)\n\n# 设置阈值为n,保留非零表达值数量大于n的细胞\nn = 5\nfiltered_cells = counts[counts > n].index\nfiltered_df = df[filtered_cells]\n#\nprint(filtered_df)\n","repo_name":"Yang-noob/scADL","sub_path":"code_test/cell_genes_filt_test.py","file_name":"cell_genes_filt_test.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11060459746","text":"from unittest import TestCase\nfrom unittest.mock import MagicMock\n\nfrom server import polygons\n\nclass PolygonsTestCase(TestCase):\n\n def test_is_close_enough(self):\n known_input = [\n MagicMock(longitude=55.749982, latitude=37.612350),\n MagicMock(longitude=55.749992, latitude=37.612779),\n MagicMock(longitude=55.749582, latitude=37.611974),\n MagicMock(longitude=55.748264, latitude=37.452187),\n MagicMock(longitude=55.748827, latitude=37.451842),\n MagicMock(longitude=55.746306, latitude=37.711178)\n ]\n self.assertTrue(polygons.is_close_enough(known_input[0], known_input[1]))\n self.assertTrue(polygons.is_close_enough(known_input[1], known_input[2]))\n self.assertTrue(polygons.is_close_enough(known_input[0], known_input[2]))\n self.assertTrue(polygons.is_close_enough(known_input[3], known_input[4]))\n self.assertFalse(polygons.is_close_enough(known_input[2], known_input[3]))\n self.assertFalse(polygons.is_close_enough(known_input[4], known_input[5]))\n self.assertFalse(polygons.is_close_enough(known_input[0], known_input[5]))\n \n\n def test_group_fire_spots_by_distance(self):\n known_input = [\n MagicMock(longitude=55.749982, latitude=37.612350),\n MagicMock(longitude=55.749992, latitude=37.612779),\n MagicMock(longitude=55.749582, latitude=37.611974),\n MagicMock(longitude=55.748264, latitude=37.452187),\n MagicMock(longitude=55.748827, latitude=37.451842),\n MagicMock(longitude=55.746306, latitude=37.711178)\n ]\n known_output_len = 3\n known_output_set0 = set(known_input[:3])\n known_output_set1 = set(known_input[3:5])\n known_output_set2 = set(known_input[5:])\n output = polygons.group_fire_spots_by_distance(known_input)\n print(known_input)\n print(output)\n self.assertEqual(len(output), known_output_len)\n self.assertEqual(set(output[0]), known_output_set0)\n self.assertEqual(set(output[1]), known_output_set1)\n self.assertEqual(set(output[2]), known_output_set2)\n\n","repo_name":"IIKovalenko/mayskiy_shashlyk","sub_path":"tests/polygon_helpers_test.py","file_name":"polygon_helpers_test.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70942169929","text":"# parameters.py\nfrom Tools import *\nimport commonVar as common\n\nimport networkx as nx\nimport matplotlib as mplt\n\n\ndef loadParameters(self):\n\n print(\"NetworkX version %s running\" % nx.__version__)\n print(\"Matplotlib version %s running\\n\" % mplt.__version__)\n\n nxv = nx.__version__\n vOK = checkVersion(nxv, 'NetworkX', 1, 9, 1)\n\n if not vOK:\n print(\"NetworkX 1.9.1 or greater required\")\n os.sys.exit(1)\n\n mpltv = mplt.__version__\n vOK = checkVersion(mpltv, 'Matplotlib', 1, 5, 1)\n\n if not vOK:\n print(\"Matplotlib 1.5.1 or greater required\")\n os.sys.exit(1)\n\n mySeed = eval(input(\"random number seed (1 to get it from the clock) \"))\n if mySeed == 1:\n random.seed()\n else:\n random.seed(mySeed)\n\n self.nAgents = 0\n print(\"No 'bland' agents\")\n\n #self.worldXSize= input(\"X size of the world? \")\n self.worldXSize = 1\n print(\"X size of the world not relevant\")\n\n #self.worldYSize= input(\"Y size of the world? \")\n self.worldYSize = 50\n print(\"y size of the world not relevant\")\n\n # recipes\n common.maxLenght = 10\n common.maxSector = 6\n print(\n \"recipes: max lenght\",\n common.maxLenght,\n \"and max sector number\",\n common.maxSector)\n\n self.nCycles = eval(input(\"How many cycles? (0 = exit) \"))\n\n v = input(\"verbose? (y/[n]) \")\n if v == \"y\" or v == \"Y\":\n common.verbose = True # predefined False\n","repo_name":"terna/SLAPP3","sub_path":"6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/production/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"30454861903","text":"osoba = (\"sofija\",25,\"python\") # tuple se ne moze menjati podatci se samo mogu transportovati!!!\r\nprint(osoba[0])\r\n\r\nime , godine, smer = osoba # raspakovani tuple\r\n\r\nprint(ime)\r\n\r\nime = osoba[0]\r\ngodine = osoba[1]\r\nsmer = osoba[2]\r\nprint(ime)\r\n\r\n","repo_name":"rudo23/moj_repozitori2","sub_path":"python files/python-ppf-pr8-sekvence_tupl.py","file_name":"python-ppf-pr8-sekvence_tupl.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30711401828","text":"# vim: set fileencoding=utf8:\nfrom setuptools import setup, find_packages\n\nversion = '0.0.3'\n\ndef read(filename):\n import os.path\n return open(os.path.join(os.path.dirname(__file__), filename)).read()\nsetup(\n name=\"django-userel\",\n version=version,\n description = \"Extend ForeignKey field for User. It support ``auto_now`` and ``auto_now_add``\",\n long_description=read('README.rst'),\n classifiers = [\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n keywords = \"django ForeignKey auto_now auto_now_add user\",\n author = \"Alisue\",\n author_email = \"lambdalisue@hashnote.net\",\n url=r\"https://github.com/lambdalisue/django-userel\",\n download_url = r\"https://github.com/lambdalisue/django-userel/tarball/master\",\n license = 'MIT',\n packages = find_packages(),\n include_package_data = True,\n install_requires=[\n 'distribute',\n 'setuptools-git',\n ],\n test_suite='packageutils.runtests.runtests',\n tests_require=[\n 'django>=1.3',\n 'PyYAML',\n ],\n)\n","repo_name":"lambdalisue/django-userel","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"9198309841","text":"import sys\nimport os\nfrom PIL import Image\n\ndef get_dice_score(clustered_img_path, original_img_path):\n dice_score = 0\n number_of_images = 0\n for clustered, original in zip(os.listdir(clustered_img_path), os.listdir(original_img_path)):\n number_of_images += 1\n f_clustered = os.path.join(clustered_img_path, clustered)\n f_original = os.path.join(original_img_path, original)\n try:\n clustered_img = Image.open(f_clustered)\n original_img = Image.open(f_original)\n clustered_img = clustered_img.convert('RGB')\n original_img = original_img.convert('RGB')\n clustered_img_pixels = clustered_img.load()\n original_img_pixels = original_img.load()\n except IOError as err:\n print('cannot open file' + err)\n sys.exit()\n\n tumor = (0, 255, 0) # green\n stroma = (0, 0, 255) # blue\n\n all_counter_tumor = 0\n all_counter_stroma = 0\n match_stroma = 0\n match_tumor = 0\n\n for i in range(original_img.width):\n for j in range(original_img.height):\n if original_img_pixels[i, j] == stroma:\n all_counter_stroma += 1\n if clustered_img_pixels[i, j] == stroma:\n all_counter_stroma += 1\n if original_img_pixels[i, j] == tumor:\n all_counter_tumor += 1\n if clustered_img_pixels[i, j] == tumor:\n all_counter_tumor += 1\n if original_img_pixels[i, j] == stroma and clustered_img_pixels[i, j] == stroma:\n match_stroma += 1\n elif original_img_pixels[i, j] == tumor and clustered_img_pixels[i, j] == tumor:\n match_tumor += 1\n\n dice_score_stroma = 2 * match_stroma / all_counter_stroma if all_counter_stroma > 0 else 0\n dice_score_tumor = 2 * match_tumor / all_counter_tumor if all_counter_tumor > 0 else 0\n number_of_images -= 1 if all_counter_stroma == 0 or all_counter_tumor == 0 else 0\n\n dice_score += (dice_score_stroma + dice_score_tumor) / 2\n\n final_dice_score = dice_score / number_of_images\n\n print('final_dice_score', final_dice_score)\n\nif __name__ == '__main__':\n clustered_images_path = \"clustered_images\"\n original_images_path = \"ground-truth_images\"\n get_dice_score(clustered_images_path, original_images_path)\n","repo_name":"bc4s/SSBU","sub_path":"python/dice-score.py","file_name":"dice-score.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34281994847","text":"from PIL import Image, ImageDraw\nimport json\n\ndef draw_bounding_box(image_path, bounding_box_list):\n img = Image.open(image_path)\n draw = ImageDraw.Draw(img)\n\n for box in bounding_box_list:\n # bounding_box_list中的每个box应该是一个元组或列表,格式为(left, top, right, bottom)\n draw.rectangle(box, outline=\"red\", width=2)\n\n img = img.convert('RGB')\n\n # 显示图像(这将在默认的图像查看器中打开图像)\n # img.show()\n\n # 如果你想保存带有边界框的新图像,可以使用以下代码:\n img_name = image_path[:-4]\n img.save('result.jpg')\n\n# 使用函数\ndef get_bounding_box_list(image_name, json_file):\n with open(json_file, 'r') as f:\n ocr_result = json.load(f)\n\n img_result = ocr_result[image_name]\n bounding_box_list = []\n box_list = list(img_result.keys())\n for box in box_list:\n tmp = list(map(int, box.split(',')))\n new_box = [tmp[0], tmp[1], tmp[0]+tmp[2], tmp[1]+tmp[3]]\n bounding_box_list.append(new_box)\n return bounding_box_list\n\ndef get_text(image_name, json_file):\n with open(json_file, 'r') as f:\n ocr_result = json.load(f)\n\n img_result = ocr_result[image_name]\n text_list = list(img_result.values())\n total_text = '\\n'.join(text_list)\n return total_text\n\n\n# bounding_box_list = get_bounding_box_list('coupontrak.com_[2022_12_20]_0.png', 'ocr_result.json')\n# # print(bounding_box_list)\n# image_path = 'imgs/coupontrak.com_[2022_12_20]_0.png'\n# # draw_bounding_box(image_path, bounding_box_list)\n# text = get_text('coupontrak.com_[2022_12_20]_0.png', 'ocr_result.json')\n# print(text)\n","repo_name":"CuiChi1109/CRP_Detection","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18846869088","text":"import numpy as np\nfrom scipy.special import erfc, erfcinv\nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad\n\nfrom pylab import *\n\nc = 0.3 # m/ns\ntwopi = 2.*np.pi\n\nclass Dist:\n def __init__(self, config_file, beam, RMfile):\n self.bm = beam\n self.RMfile = RMfile\n self.ReadConfig(config_file)\n self.FindNsrc()\n\n def ReadConfig(self, config_file):\n for line in open(config_file).readlines():\n if line.startswith('#'):\n continue\n if line.startswith('aL'):\n self.aL = float(line.split(':')[1])\n if line.startswith('bL'):\n self.bL = float(line.split(':')[1])\n if line.startswith('aH'):\n self.aH = float(line.split(':')[1])\n if line.startswith('bH'):\n self.bH = float(line.split(':')[1])\n if line.startswith('Smin'):\n self.Smin = float(line.split(':')[1])\n if line.startswith('Smax'):\n self.Smax = float(line.split(':')[1])\n if line.startswith('So'):\n self.So = float(line.split(':')[1])\n if line.startswith('Pmean'):\n self.PImu = float(line.split(':')[1])\n if line.startswith('Psig'):\n self.PIsig = float(line.split(':')[1])\n if line.startswith('Pmax'):\n self.PImax = float(line.split(':')[1])\n if line.startswith('Gmean'):\n self.Gmean = float(line.split(':')[1])\n if line.startswith('Gsig'):\n self.Gsig = float(line.split(':')[1])\n\n\n def FindNsrc(self):\n _s = np.linspace(self.Smin,self.Smax,500)\n def hi(x): return self.bm * self.aH * x**-self.bH\n def lo(x): return self.bm * self.aL * x**-self.bL\n dNdS = np.where(_s >= self.So, hi(_s), lo(_s))\n NgtS = np.cumsum(dNdS)*(_s[1]-_s[0])\n self.NgtS = (NgtS[-1] - NgtS)\n self.Nsrc = int(self.NgtS[0] - self.NgtS[-1])\n self.NgtS /= self.NgtS[0]\n self.Fs = interp1d(self.NgtS,_s)\n\n\n def DrawSrcFlux(self):\n return self.Fs(np.random.uniform(self.NgtS.min(),self.NgtS.max(),self.Nsrc))\n\n def DrawLogNormal(self):\n if self.PImu == 0:\n return np.zeros(self.Nsrc)\n else:\n #Use the formula given in Eqn 5 of Hales 2014.\n def pdf(x):\n p = -0.5*(np.log10(x/self.PImu)/self.PIsig)**2\n p = np.exp(p)\n p /= x*self.PIsig*np.log(10.)*np.sqrt(2.*np.pi)\n return p\n def cdf(x):\n return quad(pdf, 0, x)[0]\n Xmax = cdf(self.PImax)\n Xes = np.linspace(1e-6,1,300)\n CDF = np.array([cdf(xi) for xi in Xes])\n return np.interp(np.random.uniform(0, Xmax, self.Nsrc), CDF, Xes)\n #Ex = np.log(self.PImu)\n #Sx = np.sqrt(np.log(1.+(self.PIsig/self.PImu)**2))\n #X = np.random.uniform(0,1,self.Nsrc)\n #X *= 0.5*erfc(-1.*(np.log(self.PImax)-Ex)/(Sx*np.sqrt(2.)))\n #X = Ex - np.sqrt(2.)*Sx*erfcinv(2.*X)\n #return np.exp(X)\n\n def DrawSphere(self):\n cosTh = np.random.uniform(0, 1, self.Nsrc)\n phi = np.random.uniform(0, twopi, self.Nsrc)\n Th = np.arccos(cosTh)\n l = np.sin(Th)*np.cos(phi)\n m = np.sin(Th)*np.sin(phi)\n return l, m\n\n def DrawFromCDF(self):\n _load = np.load(self.RMfile)\n _rms = _load['arr_0']\n _Frm = _load['arr_1']\n return np.interp(np.random.uniform(0, 1, self.Nsrc), _Frm, _rms)\n\n def SimSkyParams(self):\n prms = {}\n prms['P'] = self.DrawLogNormal()\n prms['G'] = self.Gsig * np.random.standard_normal(self.Nsrc) + self.Gmean\n prms['F'] = self.DrawSrcFlux()\n prms['L'], prms['M'] = self.DrawSphere()\n prms['RM'] = self.DrawFromCDF()\n prms['X'] = np.random.uniform(0.,1.,self.Nsrc)\n return prms\n","repo_name":"damoupenn/PolSim","sub_path":"src/Distributions.py","file_name":"Distributions.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37656309420","text":"\"\"\"\nReferences:\nhttps://tkinter.com/\nhttps://anzeljg.github.io/rin2/book2/2405/docs/tkinter/index.html\nhttps://docs.python.org/3/library/tk.html\nhttps://matplotlib.org/stable/gallery/pie_and_polar_charts/pie_features.html\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkcalendar import *\nfrom datetime import datetime\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n# classes\nfrom Database import Database\nfrom Popup import Popup\nfrom User import User\n\nclass Dashboard(tk.Frame):\n \"\"\" creates Dashboard, main page of the application \"\"\"\n def __init__(self, root):\n self.db = Database() # Database() instance\n self.library = self.db.library # Library() instance\n self.user = None # current user\n self.user_lst = self.db.get_users() # list of users from database\n self.bg = '#fff7e8' # frame background cream color\n # create parent frame and configurations\n super().__init__(root)\n self.config(bg=self.bg)\n self.grid(sticky='NWSE')\n self.grid_columnconfigure((0,1,2,3,4,5), weight=1)\n self.grid_rowconfigure((0,1,2,3,4,5), weight=1)\n # Add Some Style\n self.style = ttk.Style()\n self.style.theme_use('aqua')\n # draw widgets\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" creates all widgets on Dashboard\"\"\"\n # User ID dropdown selection\n self.user_label = tk.Label(self,\n text=\"Select User ID: \",\n background=self.bg)\n self.user_label.config(bg='#fff7e8')\n self.user_label.grid(column=1, row=0, sticky='E', pady=10, rowspan=2)\n\n self.user_entry_var = tk.StringVar()\n self.user_entry = ttk.Combobox(self,\n textvariable=self.user_entry_var,\n background=self.bg,\n state='readonly')\n self.user_entry['values']=self.user_lst +['New User']\n self.user_entry.grid(column=2, row=0, sticky='W', pady=10, rowspan=2)\n # calendar\n self.cal = Calendar(self,\n selectmode='day',\n showweeknumbers=False,\n firstweekday='sunday',\n selectforeground='#FF5733',\n headersbackground='#FF5733',\n foreground='#FF5733')\n self.cal.grid(column=4, row=2, sticky='NWSE', padx=10, pady=10)\n # 'View & Edit Routine' Button. Creates Popup when clicked.\n self.calendar_button = tk.Button(self,\n text=\"View & Edit Routine\",\n command=lambda: self.getDate())\n self.calendar_button.grid(column=4, row=3)\n # displays message to select User ID\n # if 'View & Edit Routine' clicked but no user ID\n self.calendar_label = tk.Label(self, text=\"\", background=self.bg)\n self.calendar_label.grid(column=4, row=3, sticky=\"S\", pady=10)\n # 'Instructions' button to view instructions\n self.instructions_button = tk.Button(self,\n text=\"Instructions\",\n command=lambda: self.instructions())\n self.instructions_button.grid(column=1, row=5, sticky='W')\n # draw pie chart\n self.draw_pie()\n\n def draw_pie(self):\n \"\"\" Draws pie chart for Monthly Muslce Distrbution\"\"\"\n # create a figure\n self.figure = Figure(figsize=(6,3), dpi=90)\n # create FigureCanvasTkAgg object\n self.figure_canvas = FigureCanvasTkAgg(self.figure, self)\n self.figure_canvas.get_tk_widget().grid(column=1, row=2,\n pady=10, padx=10,\n columnspan=2, rowspan=2,\n sticky='NWSE')\n # makes sure pie chart is drawn when user is selected\n if self.user != None:\n # update user tasks and pie chart\n self.user.update(self.cal.get_displayed_month())\n data = self.user.pie\n muscles = list(data.keys())\n usage = list(data.values())\n if set(usage) == set([0]):\n usage = []\n # create pie plot\n axes = self.figure.add_subplot(anchor=\"W\")\n axes.pie(usage, normalize=True)\n axes.set_title('Monthly Muscle Distribution')\n axes.legend(muscles,\n loc='center right',\n bbox_to_anchor=(1.5, 0.5),\n borderaxespad=0.)\n\n ############################################################################\n ########################### EVENTS #########################################\n ############################################################################\n def mouse_pressed(self, event):\n \"\"\" Refreshes user information based on Dashboard actions \"\"\"\n # Queries user information when user selected\n if str(event.widget) == \".!dashboard.!combobox\":\n self.updateUserLst(self.user)\n # updates user pie chart when calendar selected\n elif self.user != None and str(event.widget) == \".!dashboard.!calendar\":\n self.user.update(self.cal.get_displayed_month())\n # refreshes pie chart on window\n self.draw_pie()\n\n ############################################################################\n ########################### SHARED METHODS #################################\n ############################################################################\n def updateUserLst(self, id):\n \"\"\" Takes User ID selection and initalizes User instances \"\"\"\n # Make 'Select User ID' message disappear\n self.calendar_label.config(text = \"\")\n id = self.user_entry.get() # combobox selection\n # queries existing user or new user\n self.user = User(self.db, self.user_entry.get())\n # Combobox selection becomes new user id\n if id == 'New User':\n self.user_lst = self.db.get_users()\n self.user_entry['values'] = self.user_lst +['New User']\n self.user_entry_var.set(self.user.id)\n\n def getDate(self):\n \"\"\" Popup window if user is selected else displays msg \"\"\"\n date = self.cal.get_date()\n date = datetime.strptime(date, '%m/%d/%y').strftime('%Y-%m-%d')\n if self.user != None:\n Popup(self.library, date, self.user, self)\n else:\n self.calendar_label.config(text = \"Select a User ID!\")\n\n def instructions(self):\n \"\"\" creates Instructions popup window \"\"\"\n # extract textfile containing instructions string\n with open('instructions.txt', 'r') as f:\n instructions = f.read()\n instructions_frame = tk.Toplevel(self)\n instructions_frame.title(\"App Instructions\")\n instructions_label = tk.Label(instructions_frame, text=instructions)\n instructions_label.pack(ipadx=10, ipady=10)\n","repo_name":"sophieyeh256/w200_project1_gymroutine","sub_path":"Dashboard.py","file_name":"Dashboard.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15219678559","text":"import collections\n\nclass TrieNode:\n def __init__(self):\n self.alpha = ''\n self.children = collections.defaultdict(TrieNode)\n \nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n \n \n def insert(self, word, alpha):\n node = self.root\n for char in word:\n node = node.children[char]\n node.alpha = alpha\n \n def search(self, word):\n node = self.root\n for char in word:\n if char not in node.children:\n return ''\n node = node.children[char]\n return node.alpha\n \nT = int(input())\ntrie = Trie()\nfor _ in range(T):\n alpha, num = list(input().rstrip().split())\n trie.insert(num, alpha)\n \nbinary_sequence = input().rstrip()\n\ni = 0\nresult = ''\nwhile binary_sequence:\n temp = trie.search(binary_sequence[:i])\n if not temp:\n i += 1\n else:\n result += temp\n binary_sequence = binary_sequence[i:]\n i = 0\nprint(result)","repo_name":"uniqueimaginate/Coding","sub_path":"Problem_Solving/BOJ/6800.py","file_name":"6800.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71526785607","text":"#import all the necessary modules\nfrom mcpi.minecraft import Minecraft\nfrom mcpi import block\nimport time\n\n#maak je koppeling met de Minecraft wereld\nmc=Minecraft.create()\n\n#zoek de positie van de speler op\npos=mc.player.getTilePos()\n\n#Controleer of je niet te dicht bij de rand van de wereld staat\nif pos.z<-40:\n mc.postToChat('teleporteer naar een veilige plek')\n mc.player.setPos(pos.x,pos.y,-40)\n pos=mc.player.getTilePos()\n\n\n#Markeer waar de teleport is\nzpos=pos.z-40\n\n#Maak een vallei door blokken weg te halen\n#mc.setBlocks(pos.x-1,pos.y+3,pos.z,pos.x+1,pos.y-7,pos.z,block.AIR.id)\nmc.setBlocks(pos.x-1,pos.y+3,pos.z,pos.x+1,pos.y-7,pos.z-88,block.AIR.id)\n\n\n#Bouw de onzichtbare bedrock plaats\nmc.setBlocks(pos.x,pos.y-1,pos.z,pos.x,pos.y-7,pos.z,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x-1,pos.y-1,pos.z,pos.x,pos.y-7,pos.z,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x+1,pos.y-1,pos.z,pos.x,pos.y-7,pos.z,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x,pos.y-1,pos.z-88,pos.x-1,pos.y-7,pos.z-88,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x-1,pos.y-1,pos.z-88,pos.x,pos.y-7,pos.z-88,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x+1,pos.y-1,pos.z-88,pos.x,pos.y-7,pos.z-88,block.BEDROCK_INVISIBLE.id)\nmc.setBlocks(pos.x,pos.y,pos.z,pos.x,pos.y-7,pos.z-92,block.BEDROCK_INVISIBLE.id)\n\n#Maak de bommen\nmc.setBlocks(pos.x,pos.y,pos.z,pos.x,pos.y,pos.z-88,block.TNT.id,1)\n\n#Maak het podiumeinde\nmc.setBlocks(pos.x-2,pos.y,pos.z-93,pos.x+2,pos.y,pos.z-97,block.GLOWING_OBSIDIAN.id)\nmc.setBlocks(pos.x-1,pos.y+1,pos.z-94,pos.x+1,pos.y+1,pos.z-96,block.NETHER_REACTOR_CORE.id,1)\nmc.setBlock(pos.x,pos.y+2,pos.z-95,block.REDSTONE_ORE.id)\n\n#hoeveel teleports heb je over\nteleport=1\n\n#Maak het teleport display\nmc.setBlock(pos.x+1,pos.y+1,pos.z-44,block.NETHER_REACTOR_CORE.id,2)\nmc.setBlock(pos.x-1,pos.y+1,pos.z-44,block.NETHER_REACTOR_CORE.id,2)\n\n#Teleporteer de speler als hij/zij op een bepaalde plek is\nwhile teleport ==1:\n pos=mc.player.getTilePos()\n if pos.z==zpos:\n mc.player.setPos(pos.x,pos.y,pos.z-24)\n teleport=0\n","repo_name":"educaris/minecraftPython","sub_path":"TNTrun.py","file_name":"TNTrun.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4495183687","text":"import numpy as np\r\nimport torch\r\nimport torchvision\r\nimport argparse\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import datasets, models, transforms\r\nfrom torch import nn\r\nfrom torch import optim\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sb\r\n\r\nparser = argparse.ArgumentParser(description='trains a machine learning model to classify images')\r\n\r\nparser.add_argument('--model', type=str,\r\n help='determines the pretrained network to use. ARGUMENT REQUIRED: options include vgg and densenet')\r\nparser.add_argument('--epochs', type=int, help='determines number of epochs the NN will be trained for, default=1')\r\nparser.add_argument('--learning_rate', type=float, help='determines learning rate of NN. default = 0.005')\r\nparser.add_argument('--device', type=str,\r\n help='determines which device the NN will be trained on. options are gpu and cpu, default=cpu')\r\n\r\nargs = parser.parse_args()\r\n\r\ndata_dir = 'flowers'\r\ntrain_dir = data_dir + '/train'\r\nvalid_dir = data_dir + '/valid'\r\ntest_dir = data_dir + '/test'\r\n\r\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])])\r\n\r\ndata_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n\r\ntrain_data = torchvision.datasets.ImageFolder(root=train_dir, transform=train_transforms)\r\nvalid_data = torchvision.datasets.ImageFolder(root=valid_dir, transform=data_transforms)\r\ntest_data = torchvision.datasets.ImageFolder(root=test_dir, transform=data_transforms)\r\n\r\ntrain_dataloader = DataLoader(train_data, batch_size=64, shuffle=True)\r\nvalid_dataloader = DataLoader(valid_data, batch_size=64, shuffle=True)\r\ntest_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)\r\n\r\n# print(args.model)\r\nif args.model == 'vgg':\r\n model = torchvision.models.vgg16(pretrained=True)\r\n for param in model.parameters():\r\n param.requires_grad = False\r\n\r\n model.classifier = nn.Sequential(nn.Linear(25088, 1000),\r\n nn.ReLU(),\r\n nn.Dropout(0.1),\r\n nn.Linear(1000, 500),\r\n nn.ReLU(),\r\n nn.Dropout(0.1),\r\n nn.Linear(500, 102),\r\n nn.LogSoftmax(dim=1))\r\n\r\nelif args.model == 'resnet':\r\n model = torchvision.models.resnet18(pretrained=True)\r\n model.classifier = nn.Sequential(nn.Linear(1000, 500),\r\n nn.ReLU(),\r\n nn.Dropout(0.1),\r\n nn.Linear(500, 102),\r\n nn.LogSoftmax(dim=1))\r\nelse:\r\n raise Exception('Please use a supported model. Supported models are vgg and resnet')\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() and args.device == 'gpu' else \"cpu\")\r\noptimizer = optim.Adam(model.classifier.parameters(),\r\n lr=args.learning_rate if args.learning_rate is not None else 0.005)\r\ncriterion = nn.NLLLoss()\r\nmodel.to(device)\r\n\r\nepochs = args.epochs if args.epochs is not None else 1\r\nsteps = 0\r\nrunning_loss = 0\r\nprint_every = 5\r\nfor epoch in range(epochs):\r\n for inputs, labels in train_dataloader:\r\n steps += 1\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n optimizer.zero_grad()\r\n\r\n logps = model.forward(inputs)\r\n loss = criterion(logps, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n running_loss += loss.item()\r\n\r\n if steps % print_every == 0:\r\n valid_loss = 0\r\n accuracy = 0\r\n model.eval()\r\n with torch.no_grad():\r\n for inputs, labels in valid_dataloader:\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n logps = model.forward(inputs)\r\n batch_loss = criterion(logps, labels)\r\n\r\n valid_loss += batch_loss.item()\r\n\r\n # Calculate accuracy\r\n ps = torch.exp(logps)\r\n top_p, top_class = ps.topk(1, dim=1)\r\n equals = top_class == labels.view(*top_class.shape)\r\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\r\n\r\n print(f\"Epoch {epoch + 1}/{epochs}.. \"\r\n f\"Train loss: {running_loss / print_every:.3f}.. \"\r\n f\"Validation loss: {valid_loss / len(valid_dataloader):.3f}.. \"\r\n f\"Validation accuracy: {accuracy / len(valid_dataloader):.3f}\")\r\n running_loss = 0\r\n model.train()\r\n\r\nmodel.class_to_idx = train_data.class_to_idx\r\ncheck_point = {'model': model.state_dict(),\r\n 'index_vals': model.class_to_idx,\r\n 'opti_state': optimizer.state_dict(),\r\n 'model_to_use': args.model}\r\ntorch.save(check_point, 'checkpoint.pth')\r\n","repo_name":"terrablader11/udacity-projects","sub_path":"ai-prog-python-proj2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34633179726","text":"from Model.BERT_BILSTM_CRF import BERTBILSTMCRF\nfrom Model.BILSTM_Attetion_CRF import BILSTMAttentionCRF\nfrom Model.BILSTM_CRF import BILSTMCRF\nfrom Model.IDCNN_CRF import IDCNNCRF\nfrom Model.IDCNN5_CRF import IDCNNCRF2\n\nfrom sklearn.metrics import f1_score, recall_score\nimport numpy as np\nimport pandas as pd\n\nfrom Public.utils import *\nfrom keras.callbacks import EarlyStopping\nfrom DataProcess.process_data import DataProcess\n\nmax_len = 100\n\n\ndef train_sample(train_model='BERTBILSTMCRF',\n # ['BERTBILSTMCRF', 'BILSTMAttentionCRF', 'BILSTMCRF',\n # 'IDCNNCRF', 'IDCNNCRF2']\n epochs=15,\n log = None,\n ):\n\n # bert需要不同的数据参数 获取训练和测试数据\n if train_model == 'BERTBILSTMCRF':\n dp = DataProcess(data_type='msra', max_len=max_len, model='bert')\n else:\n dp = DataProcess(data_type='msra', max_len=max_len)\n train_data, train_label, test_data, test_label = dp.get_data(one_hot=True)\n\n log.info(\"----------------------------数据信息 START--------------------------\")\n log.info(f\"当前使用数据集 MSRA\")\n # log.info(f\"train_data:{train_data.shape}\")\n log.info(f\"train_label:{train_label.shape}\")\n # log.info(f\"test_data:{test_data.shape}\")\n log.info(f\"test_label:{test_label.shape}\")\n log.info(\"----------------------------数据信息 END--------------------------\")\n\n if train_model == 'BERTBILSTMCRF':\n model_class = BERTBILSTMCRF(dp.vocab_size, dp.tag_size, max_len=max_len)\n elif train_model == 'BILSTMAttentionCRF':\n model_class = BILSTMAttentionCRF(dp.vocab_size, dp.tag_size)\n elif train_model == 'BILSTMCRF':\n model_class = BILSTMCRF(dp.vocab_size, dp.tag_size)\n elif train_model == 'IDCNNCRF':\n model_class = IDCNNCRF(dp.vocab_size, dp.tag_size, max_len=max_len)\n else:\n model_class = IDCNNCRF2(dp.vocab_size, dp.tag_size, max_len=max_len)\n\n model = model_class.creat_model()\n\n callback = TrainHistory(log=log, model_name=train_model) # 自定义回调 记录训练数据\n early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束\n model.fit(train_data, train_label, batch_size=32, epochs=epochs,\n validation_data=[test_data, test_label],\n callbacks=[callback, early_stopping])\n\n # 计算 f1 和 recall值\n\n pre = model.predict(test_data)\n pre = np.array(pre)\n test_label = np.array(test_label)\n pre = np.argmax(pre, axis=2)\n test_label = np.argmax(test_label, axis=2)\n pre = pre.reshape(pre.shape[0] * pre.shape[1], )\n test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )\n\n f1score = f1_score(pre, test_label, average='macro')\n recall = recall_score(pre, test_label, average='macro')\n\n log.info(\"================================================\")\n log.info(f\"--------------:f1: {f1score} --------------\")\n log.info(f\"--------------:recall: {recall} --------------\")\n log.info(\"================================================\")\n\n # 把 f1 和 recall 添加到最后一个记录数据里面\n info_list = callback.info\n if info_list and len(info_list)>0:\n last_info = info_list[-1]\n last_info['f1'] = f1score\n last_info['recall'] = recall\n\n return info_list\n\n\nif __name__ == '__main__':\n\n # 需要测试的模型\n train_modes = ['IDCNNCRF', 'IDCNNCRF2', 'BILSTMAttentionCRF', 'BILSTMCRF', 'BERTBILSTMCRF']\n\n # 定义文件路径(以便记录数据)\n log_path = os.path.join(path_log_dir, 'train_log.log')\n df_path = os.path.join(path_log_dir, 'df.csv')\n log = create_log(log_path)\n\n # 训练同时记录数据写入的df文件中\n columns = ['model_name','epoch', 'loss', 'acc', 'val_loss', 'val_acc', 'f1', 'recall']\n df = pd.DataFrame(columns=columns)\n for model in train_modes:\n info_list = train_sample(train_model=model, epochs=15, log=log)\n for info in info_list:\n df = df.append([info])\n df.to_csv(df_path)\n\n","repo_name":"CLOVEXCWZ/NER_DEMO","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"16"} +{"seq_id":"26538348519","text":"# Authored by Zack Glynn 2010\n\n\n#def obj_ex(): # uncomment this line if this script is to be used as a function\nif True: # comment out or delete this line if this is used as a function instead of a standalone script\n\timport maya.cmds as mc\n\n\tif len(mc.ls(sl = True)) != 0:\t# Checks to see if anything is selected. Otherwise, the script cancels out.\n\n\t\tstartFr = mc.playbackOptions(query = True, min = True)\n\t\tendFr = mc.playbackOptions(query = True, max = True)\n\t\tprint('Start frame is: ' + str(startFr))\n\t\tprint('End frame is: ' + str(endFr))\n\t\twindow1 = mc.fileDialog2(caption = \"Choose the directory to save your .obj sequence\", fileMode = 3, dialogStyle=2)\n\n\t\tif str(window1) != 'None':\n\t\t\t#origName = mc.file(q=True, sceneName=True)\n\t\t\tpathname = window1[0]\n\t\t\tobjName = 'defaultName' # Default name used if no name is specified\n\t\t\tu = '_'\n\n\t\t\twindow2 = mc.promptDialog(title = 'Sequence Name', message = 'Type the name you would like to give your .obj sequence.', button = ['OK','Cancel'], defaultButton = 'OK', cancelButton = 'Cancel')\n\t\t\tif window2 == 'OK': # Names the sequence if user hits \"OK\" button. Otherwise the default name is used\n\t\t\t\tobjName = mc.promptDialog(query = True, text = True)\n\t\t\t\tobjName = str(objName)\n\n\t\t\twhile startFr <= endFr:\n\t\t\t\tmc.currentTime (startFr)\n\t\t\t\tfileName = objName + u + '%04d' % (startFr)\n\t\t\t\tmc.file(rename = pathname + '/' + fileName)\n\t\t\t\tmc.file(es = True, typ = 'OBJexport', pr = True)\n\t\t\t\tprint('Successfully exported: ' + fileName)\n\t\t\t\tstartFr = startFr + 1\n\n\t\t\tmc.confirmDialog(title = 'Success', message = '.obj sequence successfully exported and saved.')\n\n\t\telse:\n\t\t\tmc.confirmDialog(title = 'Canceled', message = 'Operation canceled')\n\n\telse:\n\t\tmc.confirmDialog(title = 'Nothing selected', message = 'No objects were selected to export! Please try again.')\n","repo_name":"eestrada/MayaGeoSequence","sub_path":"objExport.py","file_name":"objExport.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6552708285","text":"from dotenv import dotenv_values\n\nsecret = dotenv_values(\".env\")\n\n\"\"\"\n This file contains all the global variables used in the project\n\"\"\"\n\n\"\"\"\n Website table. Contains the list of websites to scrap\n\"\"\"\nTABLE_WEBSITES_NAME = \"websites\"\nTABLE_WEBSITES_COLUMNS = \"website_id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE,url TEXT NOT NULL UNIQUE\"\nTABLE_WEBSITES_INIT = [[\"france24\",\"https://www.france24.com/fr/info-en-continu/\"],\n [\"francetvinfo\",\"https://www.francetvinfo.fr/\"]]\n\n\"\"\"\n News table. Contains the list of news scrapped\n\"\"\"\nTABLE_NEWS_NAME = \"news\"\nTABLE_NEWS_COLUMNS = \" news_id SERIAL PRIMARY KEY, website_id INT REFERENCES websites (website_id), url TEXT NOT NULL UNIQUE, ia_tweet TEXT, date BIGINT, tweet_sent BOOLEAN DEFAULT FALSE\"\n\n\"\"\"\n Configuration table. Contains the configuration of the bot\n\"\"\"\nTABLE_CONFIGURATION_NAME = \"configuration\"\nTABLE_CONFIGURATION_COLUMNS = \"configuration_id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, value TEXT NOT NULL\"\nTABLE_CONFIGURATION_INIT = [[\"tweet_every_x_minutes\",secret[\"TIME_BETWEEN_TWEETS_IN_MINUTES\"]],\n [\"last_tweet_date\",\"0\"]]","repo_name":"BenjaminDemolin/Agregactus_v2","sub_path":"Common/aa_global_variable.py","file_name":"aa_global_variable.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11618290230","text":"# -*- coding:utf-8 -*-\r\n\r\n#类别代号:应用安全-0 源码安全-1 数据安全-2 漏洞-3 恶意行为-4 权限-5 服务器安全-6\r\n#危险等级:高危-3 中危-2 低危-1 普通-0\r\nAppItemDict = {\r\n '0_0' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'动态调试检查',\r\n 'desc' : u'应用的android:debuggable属性为true时,应用则可以使用jdb调试,进而可以使用其他工具进行调试',\r\n 'level' : u'中危',\r\n 'harm' : u'debuggable属性开启的应用可以使用多种工具调试,其执行流程和核心算法及数据极易暴露,造成算法和数据等知识产权受侵害',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用的调试配置开启,导致应用的调试限制极低,可以使用多种工具动态分析应用的核心流程和敏感数据',\r\n 'adviseT' : u'1.将应用中的debuggable属性显式配置为false',\r\n },\r\n\r\n '0_1' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'Activity安全',\r\n 'desc' : u'公开暴露的Activity组件可以被任意应用启动',\r\n 'level' : u'低危',\r\n 'harm' : u'Activity组件过于暴露将导致隐式启动Intent攻击或本地拒绝服务攻击等危害',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于Activity组件公开暴露,恶意应用可以在外部通过Intent隐式启动目标Activity或者构造能使Activity功能失效的Intent进行拒绝服务攻击等',\r\n 'adviseT' : u'App内使用的Activity的配置尽量将exported属性为false,使用的私有Activity不应配置intent-filter,如果配置了intent-filter需设置exported属性为false,如果exported属性为true,则建议设置自定义权限进行限制',\r\n },\r\n\r\n '0_2' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'Broadcast Receiver安全',\r\n 'desc' : u'公开暴露的Broadcast Receiver组件可以接收所有app发出的广播',\r\n 'level' : u'低危',\r\n 'harm' : u'公开暴露的Broadcast Receiver组件容易受到拒绝服务攻击,利用广播传输敏感信息容易造成信息泄露危害',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于广播接收器的exported属性设置不当且配置了intent-filter项目,导致恶意应用可以构造隐式Intent攻击广播接收器;利用广播传输敏感信息时,容易导致恶意应用截获载体Intent导致敏感数据丢失',\r\n 'adviseT' :\r\n u'1.私有广播接收器设置exported属性为false,并且不配置intent-filter(私有广播接收器依然能接收到同UID的广播)\\n2.对接收来的广播进行验证\\n3.发送广播时需注意接收app是否会泄露信息\\n4.发送的广播包含敏感信息时需指定广播接收器,使用显示意图或setPackage()接口函数\\n5.Sticky broadcast粘性广播中不应该包含敏感信息',\r\n },\r\n\r\n '0_3' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'Service安全',\r\n 'desc' : u'公开暴露的service组件可以被其他应用访问',\r\n 'level' : u'低危',\r\n 'harm' : u'公开暴露的service组件存在遭受拒绝服务、消息伪造等攻击的风险',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于service组件公开且对访问数据的过滤处理不当的话,恶意应用可以构造隐式Intent对service组件进行攻击,导致组件出现拒绝服务或执行意外操作',\r\n 'adviseT' : u'1.只有应用自身使用的service应设置为私有\\n2.service接收到的数据需谨慎处理\\n3.有明确的服务需调用时要使用显式Intent',\r\n },\r\n\r\n '0_4' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'Content Provider安全',\r\n 'desc' : u'公开暴露的Content Provider组件可以被其他APP访问',\r\n 'level' : u'低危',\r\n 'harm' : u'公开暴露的Content Provider组件容易受到恶意应���访问,若此组件有问题,则可能产生任意数据访问、SQL注入等风险',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于Content Provider组件的exported属性设置不当,权限设置不妥,导致恶意应用可以任意访问数据;对于使用SQLite存储的数据,恶意应用可以构造注入SQL语句,对查询方式有问题的应用进行SQL注入攻击',\r\n 'adviseT' : u'1.在设计APP时,对于用户隐私数据或其他重要数据等不必要提供给外部应用使用的,要在AndroidManifest文件中将其exported属性显式的设为false\\n2.正确定义私有权限\\n3.防止本地SQL注入',\r\n },\r\n\r\n '0_5' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'Intent安全',\r\n 'desc' : u'评估样本是否存在Intent漏洞使被其他程序恶意调用',\r\n 'level' : u'低危',\r\n 'harm' : u'1.恶意调用\\n2.恶意接受数据\\n3.仿冒应用,例如(恶意钓鱼,启动登录界面)\\n4.恶意发送广播、启动应用服务。\\n5.调用组件,接受组件返回的数据\\n6.拦截有序广播',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在Android上的Intent-based攻击很普遍,这种攻击轻则导致应用程序崩溃,重则可能演变提权漏洞。当然,通过静态特征匹配,Intent-Based的恶意样本还是很容易被识别出来的。然而最近出现了一种基于Android Browser的攻击手段——Intent Scheme URLs攻击。这种攻击方式利用了浏览器保护措施的不足,通过浏览器作为桥梁间接实现Intend-Based攻击。相比于普通Intend-Based攻击,这种方式极具隐蔽性,而且由于恶意代码隐藏WebPage中,传统的特征匹配完全不起作用。除此之外,这种攻击还能直接访问跟浏览器自身的组件(无论是公开还是私有)和私有文件,比如cookie文件,进而导致用户机密信息的泄露',\r\n 'adviseT' : u'1.尽量显式调用Intent,对发送目标明确限定\\n2.对Intent的内容进行访问过滤,校验传递的参数',\r\n },\r\n\r\n '0_6' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'日志信息检查',\r\n 'desc' : u'评估样本是否存在日志泄露的漏洞',\r\n 'level' : u'低危',\r\n 'harm' : u'通过logcat可以查看应用中的日志信息,如果日志使用不当可能造成用户数据的泄露',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'android.permission.READ_LOGS:app读取日志权限,android 4.1之前版本通过申请READ_LOGS权限就可以读取其他应用的log了。但是谷歌发现这样存在安全风险,于是android 4.1以及之后版本,即使申请了READ_LOGS权限也无法读取其他应用的日志信息了。4.1版本中 Logcat的签名变为“signature|system|development”了,这意味着只有系统签名的app或者root权限的app才能使用该权限。普通用户可以通过ADB查看所有日志',\r\n 'adviseT' : u'关闭调试日志调用,或者确保日志的输出使用了正确的级别,涉及敏感数据的日志信息在发布版本中被关闭',\r\n },\r\n\r\n '0_7' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'权限安全',\r\n 'desc' : u'评估样本是否存在app权限控制高于实际使用所需权限控制级别',\r\n 'level' : u'低危',\r\n 'harm' : u'1.其他程序越权访问此应用\\n2.被其他程序运行时篡改',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'没有设置合理的权限控制,normal:低风险权限,在安装的时候,系统会自动授予权限给 application。dangerous:高风险权限,系统不会自动授予权限给 app,在用到的时候,会给用户提示。signature:签名权限,在其他 app 引用声明的权限的时候,需要保证两个 app 的签名一致。这样系统就会自动授予权限给第三方 app,而不提示给用户。signatureOrSystem:这个权限是引用该权限的 app 需要有和系统同样的签名才能授予的权限,一般不推荐使用',\r\n 'adviseT' : u'不要申请不必要的权限',\r\n },\r\n\r\n '0_8' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'日志信息渗透',\r\n 'desc' : u'调试日志可能输出重要的日志文件,其中包含的信息可能导致客户端用户信息泄露,暴露客户端代码逻辑等,为发起攻击提供便利',\r\n 'level' : u'高危',\r\n 'harm' : u'调试日志可能暴露用户的重要日志信息,专业人员可能通过对日志信息进行分析窃取用户信息,破解客户端代码逻辑或者为其他调试跟踪等破解手段提供辅助信息',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'该APP存在调试日志的调用,可能发生重要日志信息泄露的风险',\r\n 'adviseT' : u'关闭调试日志调用,或者确保日志的输出使用了正确的级别,涉及敏感数据的日志信息在发布版本中被关闭',\r\n },\r\n\r\n '0_9' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'Activity劫持',\r\n 'desc' : u'输入密码的Activity可能被劫持',\r\n 'level' : u'高危',\r\n 'harm' : u'Activity被劫持后,用户可能将用户名和密码输入到虚假的Activity中',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于关键的Activity没有针对界面劫持进行检测,因此恶意软件可以获取当前界面的Activity名和所在的包名来判断是否劫持该Activity。当其被劫持后,用户可能将用户名密码输入到虚假的Activity中',\r\n 'adviseT' : u'在Activity的onStop方法中检测当前界面是否被劫持。',\r\n },\r\n\r\n '0_10' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'模拟器检测',\r\n 'desc' : u'评估样本是否可以在模拟器环境下运行',\r\n 'level' : u'高危',\r\n 'harm' : u'1.应用可以在模拟器下运行\\n2.可以被定制的模拟器下调试带来被逆向的风险',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用运行过程中没有做相关的模拟器识别和检测功能,导致应用可以在模拟器上运行和调试',\r\n 'adviseT' : u'为应用添加反模拟器的相关功能',\r\n },\r\n\r\n '0_11' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'进程保护',\r\n 'desc' : u'将指定代码注入到目标进程中并执行',\r\n 'level' : u'高危',\r\n 'harm' : u'1.监控用户输入及输出的信息\\n2.截获关键参数信息',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'应用进程在运行中可以被动态注入外部模块,可以对程序逻辑造成破坏',\r\n 'adviseT' : u'对应用采取加固措施,添加防注入功能',\r\n },\r\n\r\n '0_12' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'动态调试渗透',\r\n 'desc' : u'未采取反调试措施的应用,其执行流程可以被IDA等调试工具动态调试',\r\n 'level' : u'高危',\r\n 'harm' : u'未采取反调试措施的应用,其执行流程可以被多种调试工具进行分析,其dex文件和so文件中的核心算法或敏感信息极易暴露,导致应用作者权益受损害',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用未采取必要反调试措施,导致IDA等工具对其动态分析的成本极低,dex文件和so文件中的核心算法或敏感信息容易被窃取',\r\n 'adviseT' : u'1.将应用中的debuggable属性显式配置为false\\n2.对应用采取反调试保护措施,防止其他工具的调试',\r\n },\r\n\r\n '0_13' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'支付控制',\r\n 'desc' : u'评估样本的支付流程是否完善',\r\n 'level' : u'高危',\r\n 'harm' : u'造成支付或内购功能缺失,��响服务提供方利益',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'第三方支付不完善造成商户账号信息泄露,支付成功条件判断漏洞或缓存内购买记录不合理存放使得购买凭证被绕过,给服务提供方造成损失',\r\n 'adviseT' : u'对支付流程进行完整的安全加固',\r\n },\r\n\r\n '0_14' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'登录控制',\r\n 'desc' : u'应用自动存储用户信息实现自动登录的同时存在用户信息泄密',\r\n 'level' : u'高危',\r\n 'harm' : u'用户的账户密码等信息可能泄漏',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'为了实现自动登录和保存密码等功能,用户名和登录密码等信息会被保存在文本内,文本内容不加密或简单加密的情况下可以被提取使用户信息泄漏造成账户安全隐患',\r\n 'adviseT' : u'用户名密码等信息加密保存',\r\n },\r\n\r\n '0_15' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'支付密码设置',\r\n 'desc' : u'检查应用中登录密码、支付密码是否采用不同安全级别的管理',\r\n 'level' : u'高危',\r\n 'harm' : u'用户名和密码和支付密码等敏感数据会泄露,导致用户权益受损害',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于存在一些需求,导致可能将密码写入代码中',\r\n 'adviseT' : u'使用鼎源APP加固方案的代码混淆功能,防止源码被获取',\r\n },\r\n\r\n '0_16' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'双因子认证',\r\n 'desc' : u'双因子认证可以保障用户的某些重要操作的安全性',\r\n 'level' : u'高危',\r\n 'harm' : u'在用户进行如大额支付,更改密码等重要操作时没有除密码以外的验证手段使非法操作容易完成',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在没有双因子认证的情况下恶意攻击者在获得用户名密码等信息后可以进行任意操作如更改密码等将账户据为己有或盗取账户内资金等',\r\n 'adviseT' : u'添加双因子认证',\r\n },\r\n\r\n '0_17' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'超时重新授权',\r\n 'desc' : u'评估应用在会话超时的情况下是否要求重新授权',\r\n 'level' : u'高危',\r\n 'harm' : u'影响应用在用户不操作的情况下的安全性',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'当连接超时时没有重新授权的话,会导致通过过期会话访问用户账户的风险',\r\n 'adviseT' : u'添加超时重新授权机制',\r\n },\r\n\r\n '0_18' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'密码强度',\r\n 'desc' : u'密码复杂程度,密码越简单越容易被破解',\r\n 'level' : u'高危',\r\n 'harm' : u'过于简单的密码容易被简单破解引起账户丢失等情况',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'密码强度的大小直接影响到账户被暴力破解的难易程度',\r\n 'adviseT' : u'增加密码强度的强制限定',\r\n },\r\n\r\n '0_19' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'测试数据包含',\r\n 'desc' : u'检查应用安装文件中是否包含测试数据(包含冗余文件和冗余说明,其中冗余文件包括:含有通信地址等敏感信息的配置文件、含有Java代码的备份文件、测试文件、与应用无关的冗余资源文件等;冗余说明包括:脚本代码中的功能说明、发布者信息、注释掉的代码块等,以及其他文本中出现的冗余说明)',\r\n 'level' : u'中危',\r\n 'harm' : u'造成应用安装包冗余,通过测试数据暴露应用功能逻辑的信息',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'应用打包前未将冗余部分删除,应用打包后体积增大,相关信息暴露',\r\n 'adviseT' : u'删除安装包冗余内容',\r\n },\r\n '0_20' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'WebView安全',\r\n 'desc' : u'评估样本是否存在WebView漏洞被恶意利用',\r\n 'level' : u'中危',\r\n 'harm' : u'1.通过javasrriptInterface接口远程挂马\\n2.远程获取shell',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在android的sdk中封装了webView控件。这个控件主要用开控制的网页浏览。在程序中装载webView控件,可以设置属性(颜色,字体等)。类似PC下directUI的功能。在webView 下有一个非常特殊的接口函数addJavascriptInterface。能实现本地java和js的交互。利用addJavascriptInterface这个接口函数可实现穿透webkit控制android 本机',\r\n 'adviseT' : u'限制使用javascriptInterface接口',\r\n },\r\n '0_21' : {\r\n 'type' : 1, # 0-自动 1-人工\r\n 'name' : u'第三方SDK安全',\r\n 'desc' : u'评估样本app在切换界面后是否会留存敏感信息,产生泄露风险',\r\n 'level' : u'中危',\r\n 'harm' : u'敏感信息未清空,可能导致敏感信息被盗取',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在界面切换操作时,是否存在对敏感信息的处理',\r\n 'adviseT' : u'1.建议对APP编码中的界面切换进行安全加固\\n2.议将allowBackup属性值显示设置为false:出于安全考虑,建议关闭应用备份功能; 在AndroidMenifest.xml文件中,将相应组件的“android:allowBackup”属性设置为“false”',\r\n },\r\n '0_22' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'动态注册广播',\r\n 'desc' : u'使用registerReceiver动态注册的广播在组件的生命周期里是默认导出的。导出的广播可以导致拒绝服务、数据泄漏或是越权调用。',\r\n 'level' : u'低危',\r\n 'harm' : u'1.本地APP中广播外泄\\n2.外部广播传递到本地APP中',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Android 可以在配置文件中声明一个receiver或者动态注册一个receiver来接收广播信息,攻击者假冒APP构造广播发送给被攻击的receiver,是被攻击的APP执行某些敏感行为或者返回敏感信息等,如果receiver接收到有害的数据或者命令时可能泄露数据或者做一些不当的操作,会造成用户的信息泄漏甚至是财产损失',\r\n 'adviseT' : u'1.通过给将要传递的广播绑定权限\\n2.对接收来的广播进行验证,返回结果时需注意接收app是否会泄露信息',\r\n },\r\n '0_23' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'反调试保护自动化',\r\n 'desc' : u'为了防止 APK 被动态调试,可以检测是否有调试器连接。Android 系统在 android.os.Debug 类中提供了 isDebuggerConnected()方法,用于检测是否有调试器连接。可以在 Application 类中调用 isDebuggerConnected()方法,判断是否有调试器连接,如果有,直接退出程序。。',\r\n 'level' : u'低危',\r\n 'harm' : u'应用未设置启用反调试保护机制,恶意攻击者可以反编译源程序后,在程序中任意设置断点并进行动态调试,窥探客户端的数据流和工作流。大大降低了黑客分析程序逻辑的难度,导致敏感数据加密逻辑被分析。',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'��在风险',\r\n 'analysisT' : u'由于业务考虑或开发策略,选择了信任所有证书就会引起问题',\r\n 'adviseT' : u'建议增加反调试保护机制',\r\n },\r\n\t'0_24' : {\r\n 'type' : 0,\r\n 'name' : u'本地拒绝服务',\r\n 'desc' : u'攻击者通过intent发送空数据、异常或畸形数据给受害者应用,导致其崩溃',\r\n 'level' : u' 中危',\r\n 'harm' : u'本地拒绝服务漏洞不仅可以导致安全防护等应用的防护功能被绕过或失效(如杀毒应用、安全卫士、防盗锁屏等),而且也可被竞争方应用利用来攻击,使得自己的应用崩溃,造成不同程度的经济利益损失。',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Android应用本地拒绝服务漏洞源于程序没有对Intent.getXXXExtra()获取的异常或者畸形数据处理时没有进行异常捕获,从而导致攻击者可通过向受害者应用发送此类空数据、异常或者畸形数据来达到使该应用崩溃的目的',\r\n 'adviseT' : u'1.在使用Intent获取Extra数据时增加异常处理,防止抛出异常引发崩溃\\n2.在使用getAction时对返回值进行null检查后再使用\\n3.在使用Intent获取Array、List类的的数据时,检查长度后再做操作',\r\n },\r\n}\r\n\r\nSourceItemDict = {\r\n '1_0' : {\r\n 'type' : 0,\r\n 'name' : u'程序签名校验检查',\r\n 'desc' : u'应用存在重新打包和重新签名的安全风险,为了提高安全性有必要对签名信息进行校验',\r\n 'level' : u'高危',\r\n 'harm' : u'1.应用可以被篡改内容后重新签名发布\\n2.应用可以被植入恶意代码后重新签名发布',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'应用没有添加原始签名的校验的话,将导致应用被二次打包和重新签名的攻击',\r\n 'adviseT' : u'1. Native层本地签名校验\\n2.服务器校验',\r\n },\r\n\r\n '1_1' : {\r\n 'type' : 0,\r\n 'name' : u'完整性校验检查',\r\n 'desc' : u'应用很容易被恶意篡改后进行二次打包重新发布,存在极强的完整性破坏风险',\r\n 'level' : u'高危',\r\n 'harm' : u'应用容易被反编译篡改和二次打包发布',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用没有采取必要的防反编译保护手段和二次打包校验措施,导致应用容易被反编译篡改和二次打包发布',\r\n 'adviseT' : u'1.对应用添加保护标志,使反编译操作失效\\n2.对应用运行时添加校验,使运行时失效',\r\n },\r\n\r\n '1_2' : {\r\n 'type' : 0,\r\n 'name' : u'代码混淆检查',\r\n 'desc' : u'没有经过混淆加密的DEX文件很容易通过反编译工具还原为smali文件或java文件,对于恶意逆向分析提供便利',\r\n 'level' : u'高危',\r\n 'harm' : u'DEX文件中的逻辑容易暴露,核心算法和敏感数据容易被窃取',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于DEX文件没有经过混淆加密保护,应用容易通过反编译工具还原为java代码,根据其函数名和类名等信息可以预测应用行为分析应用功能',\r\n 'adviseT' : u'1.开发中使用proguard对应用源码进行混淆加密编译\\n2.在发布后对DEX文件进行更高强度的混淆加密',\r\n },\r\n\r\n '1_3' : {\r\n 'type' : 0,\r\n 'name' : u'反编译防范检测',\r\n 'desc' : u'没有经过反编译保护可能会使恶意用户获取程序源码',\r\n 'level' : u'高危',\r\n 'harm' : u'恶意用户获取程序源码可能会知晓程序的内部逻辑等重要信息',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'没有经过反编译保护可能会使恶意用户获取程序源码',\r\n 'adviseT' : u'对程序进行加壳保护,在Manifest文件中加入不存在的资源引用。',\r\n },\r\n\r\n '1_4' : {\r\n 'type' : 0,\r\n 'name' : u'资源文件保护检查',\r\n 'desc' : u'没有经过资源文件保护可能会使恶意用户获取资源文件',\r\n 'level' : u'中危',\r\n 'harm' : u'资源可能被恶意用户获取',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'没有经过资源文件保护可能会使恶意用户获取资源文件',\r\n 'adviseT' : u'根据资料对资源文件进行保护',\r\n },\r\n\r\n '1_5' : {\r\n 'type' : 0,\r\n 'name' : u'硬编码-密码敏感词编码检查',\r\n 'desc' : u'源码中用户名密码等敏感信息用具有固定含有的名称表示',\r\n 'level' : u'中危',\r\n 'harm' : u'用户名密码等敏感信息如若用具有特定含义的名称命名容易被攻击者进行跟踪测试,进而影响应用安全',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'用户名,密码等敏感词被作为关键词在源码中搜索后作为对象进行跟踪,动态调试时根据关键字的运行记录可以定位关键方法的位置',\r\n 'adviseT' : u'1.利用无意义的变量名替换敏感词,做好对比记录\\n2.对敏感词加密',\r\n },\r\n\r\n '1_6' : {\r\n 'type' : 0,\r\n 'name' : u'硬编码-密钥敏感词编码检查',\r\n 'desc' : u'源码中密钥方法等敏感词被跟踪调试的风险',\r\n 'level' : u'中危',\r\n 'harm' : u'源码接口,方法参数等需要加密用到密钥方法被跟踪调试,影响应用关键方法的安全',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Cipher类中的方法如getProvider() getAlgorithm()等作为关键词搜索到的参数等,可作为跟踪调试的对象,又如keyGeneratord方法可对生成的key进行加密',\r\n 'adviseT' : u'去除敏感词表示,更换变量名',\r\n },\r\n\r\n '1_7' : {\r\n 'type' : 0,\r\n 'name' : u'主配置文件保护检查',\r\n 'desc' : u'Apk可能被反编译和修改',\r\n 'level' : u'中危',\r\n 'harm' : u'没有进行主配置文件保护的程序可能被轻易的反编译',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于配置文件中存在着大量的信息,例如服务名、Activity名等等,如果被反编译了,可能会泄露APK内部的一些信息。此外,将AndroidManifest文件反编译后加上debuggable标识,再对程序进行重打包,就有可能对程序进行非法的调试,进而进行破解',\r\n 'adviseT' : u'在AndroidManifest文件中添加不存在的资源标识',\r\n },\r\n\r\n '1_8' : {\r\n 'type' : 0,\r\n 'name' : u'安全编码规范-调用ROOT权限检查',\r\n 'desc' : u'恶意应用加入Root用户组后存在感染系统和其他应用的风险',\r\n 'level' : u'中危',\r\n 'harm' : u'获得Root权限后以便于进行搜索查询注入等操作,影响手机安全',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'需要调用Root权限的应用除了部分安全应用外大部分均为恶意应用或伪装成普通应用的恶意应用,该类应用如若获得Root权限将会对系统和系统内的应用,造成账户丢失,数据遗失等问题',\r\n 'adviseT' : u'限制不必要的Root权限',\r\n },\r\n\r\n '1_9' : {\r\n 'type' : 0,\r\n 'name' : u'硬编码-敏感字符串检查',\r\n 'desc' : u'当用户在使用应用的过程中,输入库中敏感字符串以后的处理机制',\r\n 'level' : u'低危',\r\n 'harm' : u'敏感字符串不做处理,将影响使用应用的用户使用环境',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'对于敏感字符串,国家规定和谐的词语以及客户定制的敏感词不进行处理可能会违反国家政策,破坏应用的使用环境',\r\n 'adviseT' : u'在应用中添加方法,根据词库屏蔽敏感字符串',\r\n },\r\n\r\n '1_10' : {\r\n 'type' : 0,\r\n 'name' : u'安全编码规范-加密算法检查',\r\n 'desc' : u'检查应用中是否使用了加密算法对关键信息进行保护',\r\n 'level' : u'低危',\r\n 'harm' : u'1.暴露关键信息\\n2.暴露重要逻辑',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在代码的关键点如登录注册等需要与后台进行交互的位置上使用加密算法利于保护用户信息以及关键代码实现,',\r\n 'adviseT' : u'1.不要使用自定义的加密算法\\n2.采用强度足够的加密算法进行加密操作\\n3.对加密算法逻辑进行保护',\r\n },\r\n\r\n '1_11' : {\r\n 'type' : 0,\r\n 'name' : u'安全编码规范-全反射调用检查',\r\n 'desc' : u'评估样本是否存在不安全的全反射调用',\r\n 'level' : u'低危',\r\n 'harm' : u'存在反射调用风险的app可能会导致恶意程序读取设备SD卡上的信息,包括联系人信息或短信,造成用户个人隐私的泄露',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于使用不安全的全反射调用,导致恶意代码执行',\r\n 'adviseT' : u'建议对APP应用实施全反射调用检查',\r\n },\r\n\r\n '1_12' : {\r\n 'type' : 0,\r\n 'name' : u'安全编码规范-明文SQL语句检查',\r\n 'desc' : u'评估样本app编码中是否出现不安全的SQL语句',\r\n 'level' : u'低危',\r\n 'harm' : u'不安全及不规范的SQL语句会增加SQL注入的风险',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在涉及数据的操作中,存在不安全的SQL语句,需要遍历检查',\r\n 'adviseT' : u'1.少用空值作为查询条件\\n2.对数字型的SQL查询采用明确的数字型强制类型转换\\n3.对字符型的参数进行转移后,能防止攻击者输入单引号、双引号、反斜线等“闭合类字符”进行闭合注入',\r\n },\r\n\r\n '1_13' : {\r\n 'type' : 1,\r\n 'name' : u'程序签名校验渗透',\r\n 'desc' : u'APP应用存在重新打包和重新签名的安全风险,为了提高安全性有必要对签名信息进行校验',\r\n 'level' : u'高危',\r\n 'harm' : u'1.应用可以被篡改内容后重新签名发布\\n2.应用可以被植入恶意代码后重新签名发布\\n3.应用使用的签名校验算法强度不够容易被绕过或屏蔽',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'应用没有添加原始签名的校验的话,将导致应用被二次打包和重新签名的攻击',\r\n 'adviseT' : u'1. Native层本地签名校验\\n2.服务器校验',\r\n },\r\n\r\n '1_14' : {\r\n 'type' : 1,\r\n 'name' : u'完整性校验渗透',\r\n 'desc' : u'应用很容易被恶意篡改后进行二次打包重新发布,存在极强的完整性破坏风险',\r\n 'level' : u'高危',\r\n 'harm' : u'应用容易被反编译篡改和二次打包发布',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用的防反编译措施强度不够或二次打包校验容易被绕过,导致应用的完整性无法保证',\r\n 'adviseT' : u'1.对应用添加保护标志和校验代码,使反编译和二次打包后的运行失效\\n2.对应用进行加固,防止应用的保护代码被绕过',\r\n },\r\n\r\n '1_15' : {\r\n 'type' : 1,\r\n 'name' : u'代码混淆渗透',\r\n 'desc' : u'没有经过混淆加密的so文件很容易通过反编译工具还原为汇编代码或伪代码,对于恶意逆向分析提供便利',\r\n 'level' : u'高危',\r\n 'harm' : u'so文件中的逻辑容易暴露,核心算法和敏感数据容易被窃取',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于so文件没有经过混淆处理或加密保护,恶意逆向分析可以对so文件进行还原',\r\n 'adviseT' : u'1.开发中使用LLVM-Obfuscator对应用c/c++源码进行混淆加密编译\\n2.在发布后对so文件进行加固保护',\r\n },\r\n\r\n '1_16' : {\r\n 'type' : 1,\r\n 'name' : u'DEX保护强度渗透',\r\n 'desc' : u'评估样本是否对Dex文件进行了保护处理',\r\n 'level' : u'高危',\r\n 'harm' : u'1.程序逻辑被篡改\\n2.收费平台被破坏\\n3.加入恶意推送',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'未对dex文件做过处理的,很容易的可以找到程序的原始逻辑,程序的代码可以被二次篡改',\r\n 'adviseT' : u'1.对smali文件进行流程混淆',\r\n },\r\n\r\n '1_17' : {\r\n 'type' : 1,\r\n 'name' : u'SO保护强度渗透',\r\n 'desc' : u'评估样本是否存在SO保护措施',\r\n 'level' : u'高危',\r\n 'harm' : u'1.未保护的SO可以看到源码\\n2.程序的逻辑可以被篡改',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Android程序的大部分关键代码都写在native层,未对so文件进行保护,这种情况下可以被静态查看或者保护强度不够可以被轻松的动态dump。这将对开发者造成严重的损害',\r\n 'adviseT' : u'1.对so文件进行混淆保护',\r\n },\r\n\r\n '1_18' : {\r\n 'type' : 1,\r\n 'name' : u'资源文件保护渗透',\r\n 'desc' : u'检测资源文件是否经过加固保护,分析其加固保护的强度',\r\n 'level' : u'高危',\r\n 'harm' : u'没有进行加固保护或加固保护强度太弱的资源文件会被轻易破解,导致程序敏感信息泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'没有经过资源文件保护可能会使恶意用户获取资源文件',\r\n 'adviseT' : u'使用具有资源文件防反编译功能的加密加固工具对APP中的资源文件进行加密',\r\n },\r\n\r\n '1_19' : {\r\n 'type' : 0,\r\n 'name' : u'内存代码安全',\r\n 'desc' : u'评估样本是否存在Android本地数据安全问题',\r\n 'level' : u'高危',\r\n 'harm' : u'用户信息、密码等敏感重要的信息明文存储在Shared Preferences文件中,导致攻击者可通过root手机来查看敏感信息',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'使用MODE_WORLD_READABLE模式创建Shared Preferences文件或使用MODE_WORLD_WRI TEABLE模式创建Shared Preferences文件并含有“android:sharedUserId”属性值和测试签名。导致攻击者可通过root手机来查看敏感信息',\r\n 'adviseT' : u'1.避免使用MODE_WORLD_WRITEABLE和MODE_WORLD_READABLE模式创建进程间通信的',\r\n },\r\n\r\n '1_20' : {\r\n 'type' : 1,\r\n 'name' : u'业务敏感逻辑渗透',\r\n 'desc' : u'敏感逻辑出现问题有可能使用户账户信息泄露等',\r\n 'level' : u'高危',\r\n 'harm' : u'用户信息泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在很多情况下,业务逻辑是需要保密的,例如登录的逻辑,汇款的逻辑等。所以应当对业务逻辑进行加密,同时对通讯流程进行加密,这样才能够保证第三方无法猜测业务逻辑',\r\n 'adviseT' : u'对应用进行加固,对通讯信道进行加密',\r\n },\r\n\r\n '1_21' : {\r\n 'type' : 1,\r\n 'name' : u'反编译防范渗透',\r\n 'desc' : u'检测应用是否采取了有效的防反编译保护措施,分析其保护强度',\r\n 'level' : u'高危',\r\n 'harm' : u'应用没有采取有效的防反编译保护措施或其保护强度不够的话会导致应用被恶意分析人员进行反编译等操作,造成应用敏感信息和核心逻辑的泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'应用可被反编译,可以获取还原的源码',\r\n 'adviseT' : u'对程序进行加壳保护,在Manifest文件中加入不存在的资源引用',\r\n },\r\n\r\n '1_22' :{\r\n 'type' : 0,\r\n 'name' : u'未移除WebView组件系统隐藏接口',\r\n 'desc' : u'android webview组件包含3个隐藏的系统接口:searchBoxJavaBridge_, accessibilityTraversal以及accessibility,恶意程序可以利用它们实现远程代码执行',\r\n 'level' : u' 中危',\r\n 'harm' : u'攻击者可以利用这个实例执行远程代码',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'当BrowserFrame初始化的时候,会创建一个 android.webkit.SearchBoxImpl实例,并将此实例使用searchBoxJavaBridge_这个名字添加为JavaScript Object,而攻击者可以利用这个实例执行远程代码',\r\n 'adviseT' : u'请通过显示调用removeJavascriptInterface移除这三个系统隐藏接口',\r\n },\r\n\r\n '1_23' : {\r\n 'type' : 0, # 0-自动 1-人工\r\n 'name' : u'随机数不安全使用',\r\n 'desc' : u'调用SecureRandom类中的setSeed方法。',\r\n 'level' : u'中危',\r\n 'harm' : u'生成的随机数具有确定性,存在被破解的可能性。',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n 'harmTag':[u'用户敏感信息泄露',u'数据暴露'],\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在调用SecureRandom类的构造函数SecureRandom(byte[] seed)和setSeed()方法设置随机种子,这两种方式都会导致生成的随机数可以预测,导致后续依赖于该随机数的加密不安全',\r\n 'adviseT' : u'1.不使用setSeed方法\\n2.使用/dev/urandom或者/dev/random来初始化伪随机数生成器',\r\n },\r\n}\r\n\r\nDataItemDict = {\r\n '2_0' : {\r\n 'type' : 0,\r\n 'name' : u'应用数据任意备份',\r\n 'desc' : u'AndroidManifest.xml中的android:allowBackup属性设置不当会导致adb命令backup和restore的使能,可以备份和恢复应用程序数据',\r\n 'level' : u'高危',\r\n 'harm' : u'应用数据可以通过adb backup命令备份出来,其敏感数据会通过备份操作泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于AndroidManifest.xml中的android:allowBackup属性设置不当,应用中的敏感数据可以被备份到本地,导致数据被窃取和泄露',\r\n 'adviseT' : u'将AndroidManifest.xml中的android:allowBackup属性显式设置为false',\r\n },\r\n\r\n '2_1' : {\r\n 'type' : 0,\r\n 'name' : u'存储数据检查',\r\n 'desc' : u'本地存储的数据容易被恶意用户的分析和破解',\r\n 'level' : u'中危',\r\n 'harm' : u'本地存储的数据在没有经过加密措施处理时,很容易被相关工具打开,造成敏感信息泄露和数据篡改侵害',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用默认本地存储的数据是没有经过加密处理的,本地数据容易遭受泄露和篡改侵害',\r\n 'adviseT' : u'1.尽量不要把敏感数据存储在本地,把用户名密码存储在keystore内\\n2.ContentProvider要使用带参数的数据操作,防止SQL注入',\r\n },\r\n\r\n '2_6' : {\r\n 'type' : 1,\r\n 'name' : u'存储数据渗透',\r\n 'desc' : u'本地存储的敏感数据需要具备一定安全级别的加密措施进行保护,否则容易被恶意用户分析和破解',\r\n 'level' : u'高危',\r\n 'harm' : u'应用本地存储的数据没有经过良好的加密措施保护的话,即使存储的数据经过加密处理也容易被分析破解,应用可以被获取到数据原文或被篡改数据',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用对数据采取的加密算法强度不够或者加密流程容易被分析控制,则数据即使经过加密处理也存在极大风险',\r\n 'adviseT' : u'1.尽量不要把敏感数据存储在本地,把用户名密码存储在keystore内\\n2.ContentProvider要使用带参数的数据操作,防止SQL注入\\n3.对所有输入的数据要先检查再使用\\n4.用户敏感信息的存储和操作流程要做加密等保护措施',\r\n },\r\n\r\n '2_3' : {\r\n 'type' : 0,\r\n 'name' : u'远程数据通讯协议',\r\n 'desc' : u'在远程数据通讯过程中,使用\\nhttp协议传输数据时,所有数据均为明文传输',\r\n 'level' : u'中危',\r\n 'harm' : u'1.传输数据容易被截获进行抓包分析\\n2.数据包中的敏感信息容易被分析篡改',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于没有使用加密协议,数据包内容没有得到有效保护,导致传输中的敏感信息容易被分析篡改',\r\n 'adviseT' : u'1.尽量使用https等加密协议进行数据传输\\n2.数据包中的敏感信息不能用明文传输',\r\n },\r\n\r\n '2_4' : {\r\n 'type' : 0,\r\n 'name' : u'Content Provider 目录遍历脆弱点',\r\n 'desc' : u'评估样本是否存在Content \\nProvider目录遍历脆弱点漏洞使被其他程序恶意调用',\r\n 'level' : u'中危',\r\n 'harm' : u'攻击者利用该应用暴露的Content Provider的openFile()接口进行文件目录遍历以达到访问任意可读文件的目的',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Android Content Provider存在文件目录遍历安全漏洞,该漏洞源于对外暴露Content Provider组件的应用,没有对Content Provider组件的访问进行权限控制和对访问的目标文件的Content Query Uri进行有效判断,攻击者可以利用该应用暴露的Content Provider的openFile()接口进行文件目录遍历以达到访问任意可读文件的目的',\r\n 'adviseT' : u'1.将不必要导出的Content Provider设置为不导出\\n2.去除没有必要的OpenFile()接口',\r\n },\r\n\r\n '2_5' : {\r\n 'type' : 0,\r\n 'name' : u'Content Provider URI脆弱点',\r\n 'desc' : u'评估样本是否存在Content Provider URI脆弱点漏洞使被其他程序恶意调用',\r\n 'level' : u'中危',\r\n 'harm' : u'攻击者利用该应用暴露的Content Provider的openFile()接口进行文件目录遍历以达到访问任意可读文件的目的',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'content URI 是一个标志provider中的数据的URI.Content URI中包含了整个provider的以符号表示的名字(它的authority) 和指向一个表的名字(一个路径)。当你调用一个客户端的方法来操作一个provider中的一个表,指向表的content URI是参数之一',\r\n 'adviseT' : u'1. minSdkVersion不低于9\\n2. 不向外部app提供的数据的私有content provider设置exported=“false”避免组件暴露(编译api小于17时更应注意此点)',\r\n },\r\n\r\n '2_2' : {\r\n 'type' : 1,\r\n 'name' : u'密码专用键盘保护',\r\n 'desc' : u'监控键盘输入信息',\r\n 'level' : u'高危',\r\n 'harm' : u'用户的账户信息被窃取',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'键盘被监控后用户在屏幕上的点击操作将以文本的形式储存并发送到攻击者手中,其中的用户名密码等信息如若丢失则会影响账户安全',\r\n 'adviseT' : u'使用安全键盘相关的SDK产品或者自定义键盘',\r\n },\r\n\r\n '2_7' : {\r\n 'type' : 1,\r\n 'name' : u'敏感数据截获',\r\n 'desc' : u'在应用传输数据的过程中截获特定数据',\r\n 'level' : u'高危',\r\n 'harm' : u'1.用户名密码等信息被窃取盗用\\n2.网络数据被截取,可用于分析用户\\n3.伪造网络请求,影响账户安全',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'用户数据被截取,用户信息如个人信息使用习惯等信息被窃取',\r\n 'adviseT' : u'传输数据加密处理',\r\n },\r\n\r\n '2_8' : {\r\n 'type' : 1,\r\n 'name' : u'敏感数据残留',\r\n 'desc' : u'应用储存的用户信息残留在旧界面和容器中会导致泄露',\r\n 'level' : u'高危',\r\n 'harm' : u'残留数据中包含的敏感信息泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'网络缓存数据、GUI对象缓存、用户名密码缓存数据等未能及时清除会导致用户数据被窃取盗用',\r\n 'adviseT' : u'1.设置浏览器不储存缓存或用clearCache()方法来删除任何存储在本地的文件\\n2.在用户离开(切换)应用界面或者注销登录时清除gui界面的数据\\n3.在每一个activity(界面)启动的时候检测用户是否处于登录状态,如果没有则跳转到登录界面',\r\n },\r\n\r\n '2_9' : {\r\n 'type' : 1,\r\n 'name' : u'数据访问控制',\r\n 'desc' : u'数据需要分级处理,设置访问限制',\r\n 'level' : u'高危',\r\n 'harm' : u'应用数据中的敏感信息会泄露',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'一方面数据存放的位置决定了数据是否是公开,因此重要数据需要存放在包内的文件夹;另一方面数据需要进行加密',\r\n 'adviseT' : u'重要数据需要进行加密处理,并设置较高访问权限限制',\r\n },\r\n\r\n '2_10' : {\r\n 'type' : 1,\r\n 'name' : u'敏感数据加密',\r\n 'desc' : u'用户信息、程序逻辑可能泄露',\r\n 'level' : u'高危',\r\n 'harm' : u'敏感数据中可能包括用户隐私等,造成泄露可能会被第三方利用',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'用户信息是应用程序应当保护的,不仅仅包括用户的用户名和密码,同样也包括用户的社交信息、照片、视频等。这些文件如果存放在不合适的目录中,就可能使得用户的信息能够被其他程序获取。因此需要对这些信息进行加密。同时,应用内部的一些重要资源,也应当进行加密,防止应用被破解',\r\n 'adviseT' : u'对所有敏感数据文件进行加密',\r\n },\r\n\r\n '2_11' : {\r\n 'type' : 0,\r\n 'name' : u'内存数据安全',\r\n 'desc' : u'评估样本是否存在内存数据安全问题',\r\n 'level' : u'高危',\r\n 'harm' : u'用户信息、密码等敏感重要的信息明文存储,导致攻击者可通过root手机来查看敏感信息。',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'sqliteEncrypt、SDCard、SharedPreferences中的隐私数据(帐号、密码等)通过明文保存,或者使用弱加密算法进行加密,很容易被黑客获取并利用,导致隐私数据等敏感信息泄露;创建文件时,设置为全局可读或可写,很容易被黑客利用,获取文件中存储的隐私信息或者篡改文件内容,从而导致用户敏感信息泄露、财产受损等风险',\r\n 'adviseT' : u'1.用MODE_PRIVATE模式创建SharedPreference文件、应��内部存储文件、创建sqlite文件、应用内部存储文件;\\n2.高强度加密算法加密用户隐私数据,强烈建议SDCard不存储用户数据,如必须存储,请使用高强度的加密算法;\\n 3.建议AES/DES加密时使用CBC或者CFB模式',\r\n },\r\n\r\n '2_12' : {\r\n 'type' : 1,\r\n 'name' : u'远程数据通信加密',\r\n 'desc' : u'在远程数据通讯过程中,使用未加密协议传输数据时,所有数据均为明文传输',\r\n 'level' : u'高危',\r\n 'harm' : u'传输数据容易被截获进行抓包分析数据包中的敏感信息容易被分析篡改',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于没有使用加密协议,数据包内容没有得到有效保护,导致传输中的敏感信息容易被分析篡改',\r\n 'adviseT' : u'1.尽量使用https等加密协议进行数据传输\\n2.数据包中的敏感信息不能用明文传输',\r\n },\r\n\r\n '2_13' : {\r\n 'type' : 1,\r\n 'name' : u'数据传输完整性',\r\n 'desc' : u'在数据传输过程中,数据可以被截获和篡改,篡改后的数据包会导致信息失效或失真,给用户利益造成损失',\r\n 'level' : u'高危',\r\n 'harm' : u'使传输的数据内容或结构破坏,导致信息失效无法被接收方使用使传输的数据内容改变,导致信息失真,无法传递准确的信息到接收方',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于数据传输时信道或其他因素导致数据会发生错误或被恶意篡改,如果没有校验机制或防篡改机制,数据完整性很容易产生风险',\r\n 'adviseT' : u'1.使用加密通信协议进行传输,防止信息被恶意篡改\\n2.使用纠错校验机制对信息进行校验和纠错,防止信息发生失效',\r\n },\r\n\r\n '2_14' : {\r\n 'type' : 1,\r\n 'name' : u'本地数据通讯安全',\r\n 'desc' : u'本地数据在系统组件之间传递时需要对数据的有效性和准确性进行过滤和检验',\r\n 'level' : u'高危',\r\n 'harm' : u'如果本地数据通讯过程中,数据源发送错误数据或传送中被恶意篡改都会导致应用出现异常甚至被恶意利用',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'数据源传递错误数据可以导致应用异常或产生拒绝服务,数据传输中被恶意篡改可以使应用被错误信息控制',\r\n 'adviseT' : u'Activity组件在相互交互时要严格过滤Intent内容,防止无效或错误内容处理,ContentProvider组件在交互时要对Cursor对象严格过滤;Broadcast组件要对广播信息进行筛选;AIDL Service组件要过滤接口收到的信息',\r\n },\r\n\r\n '2_15' : {\r\n 'type' : 1,\r\n 'name' : u'转账安全性检测',\r\n 'desc' : u'评估样本app是否存在不安全的转账行为',\r\n 'level' : u'高危',\r\n 'harm' : u'转账安全性问题会导致用户个人信息及账号信息的泄露,造成隐私和财产损失',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'需要对转账的整个过程进行风险评估,判断是否存在安全性风险',\r\n 'adviseT' : u'建议对APP的转账安全性进行加固',\r\n },\r\n\r\n '2_16' : {\r\n 'type' : 1,\r\n 'name' : u'安装包中敏感信息加密',\r\n 'desc' : u'评估样本app的安装包敏感信息是否存在被盗取的风险',\r\n 'level' : u'高危',\r\n 'harm' : u'若安装包敏感信息未加密,可能导致程序被反编译和二次编译,APP被伪造和篡改。',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'需要检查安装包中的存在敏感信息的文件是否被加密',\r\n 'adviseT' : u'建议对APP的安装包文件进行安全加固',\r\n },\r\n\r\n '2_17' : {\r\n 'type' : 1,\r\n 'name' : u'界面切换后面敏感信息需清空',\r\n 'desc' : u'评估样本app在切换界面后是否会留存敏感信息,产生泄露风险',\r\n 'level' : u'高危',\r\n 'harm' : u'敏感信息未清空,可能导致敏感信息被盗取',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在界面切换操作时,是否存在对敏感信息的处理',\r\n 'adviseT' : u'建议对APP编码中的界面切换进行安全加固',\r\n },\r\n\r\n '2_18' : {\r\n 'type' : 1,\r\n 'name' : u'数据输入敏感数据显示',\r\n 'desc' : u'评估样本app是否存在数据输入敏感数据显示的风险',\r\n 'level' : u'中危',\r\n 'harm' : u'身份证或密码未做掩盖,存在被窃取的风险',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'APP需要输入的敏感信息存在被用户周围窥探者窃取的风险',\r\n 'adviseT' : u'对APP的敏感数据输入环节做安全性加固',\r\n },\r\n\r\n '2_19' : {\r\n 'type' : 1,\r\n 'name' : u'本地数据通讯权限检查',\r\n 'desc' : u'评估样本是否存在超越权限的通讯或信息泄露行为',\r\n 'level' : u'中危',\r\n 'harm' : u'存在权限风险的app存在本地Dos攻击、强制恢复出厂设置、信息泄露的风险',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于使用不安全的通讯方式,未进行最小权限设置,对app通讯路径检查不完全,造成拒绝废服务攻击及合谋攻击的风险',\r\n 'adviseT' : u'建议对APP应用实施权限审核及通讯加固',\r\n },\r\n\r\n '2_20' : {\r\n 'type' : 1,\r\n 'name' : u'数据输出敏感数据显示',\r\n 'desc' : u'评估样本app是否存在数据输出敏感数据显示的风险',\r\n 'level' : u'低危',\r\n 'harm' : u'身份证或密码未做掩盖,存在被窃取的风险',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'APP需要输出的敏感信息存在被用户周围窥探者窃取的风险',\r\n 'adviseT' : u'对APP的敏感数据输出环节做安全性加固',\r\n },\r\n\r\n '2_21' : {\r\n 'type' : 0,\r\n 'name' : u'卸载清除',\r\n 'desc' : u'评估样本是否存在app文件未彻底删除的文件及缓存',\r\n 'level' : u'低危',\r\n 'harm' : u'APP卸载不彻底,未彻底删除的文件及缓存泄露用户信息',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'APP在卸载后,APP安装目录存在未卸载完成的文件或缓存文件未进行更新,用户隐私数据存在泄漏的危险',\r\n 'adviseT' : u'建议对APP应用实施彻底清除的审核',\r\n },\r\n\r\n '2_22' : {\r\n 'type' : 1,\r\n 'name' : u'证书验证',\r\n 'desc' : u'应用中在使用ssl、https等通信协议时,使用不当会造成校验失效等问题',\r\n 'level' : u'低危',\r\n 'harm' : u'证书验证失效会造成所有证书都成为可信证书,对连接失去保护机制',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于环境问题或连接的限制,部分应用在开发中会选择信任所有证书,造成证书验证失效',\r\n 'adviseT' : u'尽量使用有效的证书验证连接',\r\n },\r\n\r\n '2_23' : {\r\n 'type' : 1,\r\n 'name' : u'会话安全',\r\n 'desc' : u'应用会话中会使用的cookie、url参数等信息可以被恶意分析者利用',\r\n 'level' : u'中危',\r\n 'harm' : u'1.恶意分析人员可以通过cookie、url参数等信息分析程���功能\\n2.会话信息可以被修改参数内容导致应用行为异常',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于会话信息可以被分析修改,导致会话中的信息可以被分析或者篡改,用于非法用途',\r\n 'adviseT' : u'1.对会话内容进行加密,防止恶意分析\\n2.对会话内容进行校验,防止恶意篡改',\r\n },\r\n\r\n '2_24' : {\r\n 'type' : 1,\r\n 'name' : u'重放攻击',\r\n 'desc' : u'应用中关键信息没有进行序列和计数校验,可以引起重放攻击',\r\n 'level' : u'中危',\r\n 'harm' : u'对关键信息的多次请求会导致短时间内执行多个操作,会导致关键信息被恶意篡改',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于对请求序列和计数校验不完善或不准确将导致重放攻击的发生',\r\n 'adviseT' : u'在开发过程中对请求的序列和频率进行限制,防止频繁重放攻击',\r\n }\r\n}\r\n\r\nVulItemDict = {\r\n '3_0' : {\r\n 'type' : 0,\r\n 'name' : u'电话拨打权限绕过漏洞检查',\r\n 'desc' : u'com.android.phone.PhoneGlobals$NotificationBroadcastReceiver是导出的且没有做任何的权限限制,任意应用都可以发送intent来调用它',\r\n 'level' : u'高危',\r\n 'harm' : u'恶意应用可以通过发送构造的intent来调用NotificationBroadcastReceiver实现对指定电话号码的拨打',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于android开发人员的疏忽,BroadcastReceiver导出应该在其标签中加上属性android:exported=”false”,但开发人员漏掉了”android:”,写成了exported=”false”,这样的属性是不起作用的,而包含有intent filters的BroadcastReceiver默认是导出的,从而导致此漏洞的发生',\r\n 'adviseT' : u'1.代码审计应用中是否被恶意植入了相关调用的代码\\n2.尽量使用已修复该漏洞的系统版本',\r\n },\r\n\r\n '3_1' : {\r\n 'type' : 0,\r\n 'name' : u'反射漏洞注入风险',\r\n 'desc' : u'ContentProvider组件访问SQL数据时SQL语句可以被进行反射型注入',\r\n 'level' : u'中危',\r\n 'harm' : u'SQL注入会引起SQL数据的泄露,导致敏感信息被访问',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于SQL语句的处理不规范,导致SQL反射注入风险的存在',\r\n 'adviseT' : u'使用SQL参数化查询',\r\n },\r\n\r\n '3_2' : {\r\n 'type' : 0,\r\n 'name' : u'选择漏洞注入风险',\r\n 'desc' : u'ContentProvider组件访问SQL数据时SQL语句可以被进行选择型注入',\r\n 'level' : u'中危',\r\n 'harm' : u'SQL注入会引起SQL数据的泄露,导致敏感信息被访问',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于SQL语句的处理不规范,导致SQL选择注入风险的存在',\r\n 'adviseT' : u'使用SQL参数化查询',\r\n },\r\n\r\n '3_3' : {\r\n 'type' : 0,\r\n 'name' : u'内网测试信息残留',\r\n 'desc' : u'应用文件中存在残留的调试信息或调试网址,可能被其他恶意用户利用和分析',\r\n 'level' : u'低危',\r\n 'harm' : u'调试网址暴露可以被用于分析和渗透应用的测试服务器',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于应用文件中存在残留的调试信息或调试网址,恶意用户可以通过分析调试信息和渗透测试服务器对应用逻辑进行分析或获取服务器端关键信息',\r\n 'adviseT' : u'1.将不用测试网址信��及时清除\\n2. 关闭不再使用的测试服务',\r\n },\r\n\r\n '3_4' : {\r\n 'type' : 1,\r\n 'name' : u'下载任意APK',\r\n 'desc' : u'更新用的APK可能被劫持',\r\n 'level' : u' 中危',\r\n 'harm' : u'地址被劫持后,可能被替换成恶意APK。',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于没有检验升级网址的签名,有可能被中间人攻击',\r\n 'adviseT' : u'1.不使用友盟SDK\\n2. 下载功能Service组件不要导出,不要允许隐式调用',\r\n },\r\n\r\n '3_5' : {\r\n 'type' : 1,\r\n 'name' : u'全局可读写内部文件',\r\n 'desc' : u'评估样本是否存在全局可读写内部文件的风险',\r\n 'level' : u' 低危',\r\n 'harm' : u'攻击者利用该越权漏洞,对手机系统全局文件进行恶意篡改',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'不正确的权限设置,导致系统的文件系统暴露在攻击者范围下,可以随意篡改非root权限的文件',\r\n 'adviseT' : u'修改文件的访问权限',\r\n },\r\n\r\n # '3_6' : {\r\n # 'type' : 0,\r\n # 'name' : u'WebView安全',\r\n # 'desc' : u'评估样本是否存在WebView漏洞被恶意利用',\r\n # 'level' : u' 中危',\r\n # 'harm' : u'1.通过javasrriptInterface接口远程挂马\\n2.远程获取shell',\r\n # 'score' : 2,\r\n # 'result' : u'不存在风险',\r\n # 'analysis' : u'没有导致风险的因素',\r\n # 'detail' : u'无',\r\n # 'advise' : u'无',\r\n #\r\n # #存在风险下的注释\r\n # 'resultT' : u'存在风险',\r\n # 'analysisT' : u'在android的sdk中封装了webView控件。这个控件主要用开控制的网页浏览。在程序中装载webView控件,可以设置属性(颜色,字体等)。类似PC下directUI的功能。在webView 下有一个非常特殊的接口函数addJavascriptInterface。能实现本地java和js的交互。利用addJavascriptInterface这个接口函数可实现穿透webkit控制android 本机',\r\n # 'adviseT' : u'暂停使用javascriptInterface接口',\r\n # },\r\n\r\n '3_7' : {\r\n 'type' : 0,\r\n 'name' : u'HTTPS关闭主机名验证',\r\n 'desc' : u'构造HttpClient时,设置HostnameVerifier时参数使用ALLOW_ALL_HOSTNAME_VERIFIER或空的HostnameVerifier。',\r\n 'level' : u' 中危',\r\n 'harm' : u'关闭主机名校验可以导致黑客使用中间人攻击获取加密内容。',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于业务考虑或开发策略,选择了信任所有证书就会引起问题',\r\n 'adviseT' : u'1.尽量采用校验服务器证书的方式实现业务\\n2.对传输的数据进行检验',\r\n },\r\n\r\n '3_8' : {\r\n 'type' : 1,\r\n 'name' : u'Webview绕过证书校验',\r\n 'desc' : u'WebView组件加载网页发生证书认证错误时,会调用WebViewClient类的onReceivedSslError方法,如果该方法实现调用了handler.proceed()来忽略该证书错误,则会受到中间人攻击的威胁,可能导致隐私泄露',\r\n 'level' : u' 中危',\r\n 'harm' : u'绕过证书验证会受到中间人攻击的威胁,从而导致隐私泄露',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于业务需求或开发策略,选择了绕过证书错误的实现方式,则会引起漏洞,导致中间人攻击威胁。',\r\n 'adviseT' : u'1.不调用android.webkit.SslErrorHandler的proceed方法\\n2.当发生证书认证错误时,采用默认的处理方法SslErrorHandler.cancel(),停止加载问题页面',\r\n },\r\n\r\n '3_9' : {\r\n 'type' : 1,\r\n 'name' : u'浏览器的intent scheme url攻击检测',\r\n 'desc' : u'检测浏览器是否存在\\nintent scheme url漏洞',\r\n 'level' : u' 中危',\r\n 'harm' : u'可能导致应用程序崩溃,也可演变提权漏洞',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险��因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'相比于普通Intend-Based攻击,这种方式极具隐蔽性。这种攻击还能直接访问跟浏览器自身的组件(无论是公开还是私有)和私有文件,比如cookie文件,进而导致用户机密信息的泄露',\r\n 'adviseT' : u'建议浏览器及调用浏览器的app进行安全加固',\r\n },\r\n\r\n '3_10' : {\r\n 'type' : 1,\r\n 'name' : u'手势密码绕过',\r\n 'desc' : u'安全防护较弱的手势密码有被破解的风险',\r\n 'level' : u' 中危',\r\n 'harm' : u'App的实施密码未做安全防护,可以被破解或绕过,盗取用户信息',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'手势密码的密码文件存放在默认目录下,恶意用户可以下载并破解,密码输入为经过安全设置,存在手势密码失效的风险',\r\n 'adviseT' : u'对App手势密码进行安全加固,密码文件进行加密处理',\r\n },\r\n\r\n '3_11' : {\r\n 'type' : 1,\r\n 'name' : u'被调用安装任意apk',\r\n 'desc' : u'APP程序中存在被任意更新或安装病毒程序的风险',\r\n 'level' : u' 中危',\r\n 'harm' : u'APP程序被强制更新,造成信息损失或拒绝服务攻击',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'APP本身具备更新或者调用系统功能更新app的功能,可以被利用,被其他程序调用并更新,导致拒绝服务攻击',\r\n 'adviseT' : u'对APP程序的更新功能进行安全分析,对存在的任意调用风险进行消除',\r\n },\r\n\r\n '3_12' : {\r\n 'type' : 1,\r\n 'name' : u'被调用卸载任意apk',\r\n 'desc' : u'APP程序中存在被任意调用卸载的风险',\r\n 'level' : u' 中危',\r\n 'harm' : u'APP程序被强制卸载,造成信息损失或拒绝服务攻击',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'APP本身具备自我卸载或者调用系统卸载app的功能,可以被利用,被其他程序调用并卸载',\r\n 'adviseT' : u'对APP程序的卸载功能进行安全分析,对存在的任意调用风险进行消除',\r\n },\r\n '3_13' : {\r\n 'type' : 0,\r\n 'name' : u'zip文件目录遍历漏洞',\r\n 'desc' : u'方法的返回值中包含有”../”跳转符,需要特别注意不要将文件写到了目标文件夹之外。如果不对”../”跳转符做过滤,就有可能遍历目录,在解压zip文件时以本app的权限覆盖任意文件。',\r\n 'level' : u' 中危',\r\n 'harm' : u'如果没有对 zipEntry.getName进行检查,盲目解压创建文件,将会穿越目录建立文件',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'在生成文件时使用了zipEntry.getName()函数,但未过滤getName()函数的返回值是否含有的上级目录字符串(../),攻击者可构造zip文件通过在getName()中包含../从而改变文件的存放位置',\r\n 'adviseT' : u'在使用zipEntry.getName()生成文件时,过滤上级目录字符串(../)',\r\n },\r\n\t'3_14' : {\r\n 'type' : 0,\r\n 'name' : u'WebView组件忽略SSL证书验证错误',\r\n 'desc' : u'WebView调用onReceivedSslError方法时,直接执行handler.proceed()来忽略该证书错误',\r\n 'level' : u' 中危',\r\n 'harm' : u'忽略SSL证书错误可能引起中间人攻击',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'Android WebView组件加载网页发生证书认证错误时,会调用WebViewClient类的onReceivedSslError方法,如果该方法实现调用了handler.proceed()来忽略该证书错误,则会受到中间人攻击的威胁,可能导致隐私泄露',\r\n 'adviseT' : u'1.不要重写onReceivedSslError方法, 或者对于SSL证书错误问题按照业务场景判断,避免造成数据明文传输情况',\r\n }\r\n}\r\n\r\nHarmItemDict = {\r\n '4_0' : {\r\n 'type' : 0,\r\n 'name' : u'广告检测',\r\n 'desc' : u'评估样本是否存在广告',\r\n 'level' : u'高危',\r\n 'harm' : u'一般广告对客户体验有影响,恶意广告会推送恶意信息',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'通过推送给客户产生垃圾信息',\r\n 'adviseT' : u'检查androidmanifest.xml是否含有广告sdk特征',\r\n },\r\n '4_1' : {\r\n 'type' : 1,\r\n 'name' : u'病毒检测',\r\n 'desc' : u'存在病毒的app会导致手机系统受到病毒的破坏,发生隐私泄露,违规的文件读写操作,监控网络数据等操作',\r\n 'level' : u'高危',\r\n 'harm' : u'App的病毒未作安全防护,存在感染病毒的风险',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'病毒会导致系统的防护系统失效,获取系统最高权限,执行特点的操作和指令,给用户造成财产损失和隐私泄露',\r\n 'adviseT' : u'使用动态分析和静态分析的方式对病毒进行检测,并对存在的漏洞进行加固',\r\n }\r\n}\r\n\r\nPermissionItemDict = {\r\n '5_NONE' : {\r\n 'type' : 0,\r\n 'name' : u'无',\r\n 'desc' : u'无',\r\n 'level' : u'未知',\r\n 'harm' : u'不详',\r\n 'score' : 0,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'无',\r\n 'detail' : u'无',\r\n 'advise' : u'如果没有使用该权限请关闭',\r\n },\r\n\r\n #权限名称用包含包名的全名称\r\n '5_android.permission.WRITE_SMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_SMS',\r\n 'desc' : u'写短信',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得写短信的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_SMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RECEIVE_SMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RECEIVE_SMS',\r\n 'desc' : u'监控接收短信',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得监控接收短信的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RECEIVE_SMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_CONTACTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_CONTACTS',\r\n 'desc' : u'读取联系人信息',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得读取联系人信息的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_CONTACTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_SMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_SMS',\r\n 'desc' : u'读取短信',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得读取短信的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_SMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SEND_SMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SEND_SMS',\r\n 'desc' : u'发送短信',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得发送短信的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SEND_SMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_com.android.browser.permission.WRITE_HISTORY_BOOKMARKS' : {\r\n 'type' : 0,\r\n 'name' : u'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS',\r\n 'desc' : u'写浏览器书签',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得写浏览器书签的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_com.android.browser.permission.READ_HISTORY_BOOKMARKS' : {\r\n 'type' : 0,\r\n 'name' : u'com.android.browser.permission.READ_HISTORY_BOOKMARKS',\r\n 'desc' : u'读取浏览器书签',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取浏览器书签的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'com.android.browser.permission.READ_HISTORY_BOOKMARKS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_com.android.launcher.permission.INSTALL_SHORTCUT' : {\r\n 'type' : 0,\r\n 'name' : u'com.android.launcher.permission.INSTALL_SHORTCUT',\r\n 'desc' : u'创建快捷方式',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得创建快捷方式的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'com.android.launcher.permission.INSTALL_SHORTCUT',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BRICK' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BRICK',\r\n 'desc' : u'请求能够禁用设备',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得请求能够禁用设备的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BRICK',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.INTERNET' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.INTERNET',\r\n 'desc' : u'连接网络',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得连接网络的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.INTERNET',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_PHONE_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_PHONE_STATE',\r\n 'desc' : u'读取电话状态',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得读取电话状态的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_PHONE_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_EXTERNAL_STORAGE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_EXTERNAL_STORAGE',\r\n 'desc' : u'写外部存储器(如:SD卡)',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得写外部存储器的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_EXTERNAL_STORAGE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_CHECKIN_PROPERTIES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_CHECKIN_PROPERTIES',\r\n 'desc' : u'允许读写访问 \"properties\"表在checkin数据库中,改值可以修改上传',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问登记属性,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_CHECKIN_PROPERTIES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_LOCATION_EXTRA_COMMANDS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_LOCATION_EXTRA_COMMANDS',\r\n 'desc' : u'应用程序访问位置的额外命令',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问位置的额外命令的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_LOCATION_EXTRA_COMMANDS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_MOCK_LOCATION' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_MOCK_LOCATION',\r\n 'desc' : u'程序创建模拟位置',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得程序创建模拟位置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_MOCK_LOCATION',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_NETWORK_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_NETWORK_STATE',\r\n 'desc' : u'访问GSM网络有关信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问GSM网络有关信息的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_NETWORK_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_SURFACE_FLINGER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_SURFACE_FLINGER',\r\n 'desc' : u'使用SurfaceFlinger底层特性',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得使用SurfaceFlinger底层特性的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_SURFACE_FLINGER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BLUETOOTH' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BLUETOOTH',\r\n 'desc' : u'连接到已配对的蓝牙设备',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得连接到已配对的蓝牙设备的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BLUETOOTH',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BLUETOOTH_ADMIN' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BLUETOOTH_ADMIN',\r\n 'desc' : u'程序发现和配对蓝牙设备',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得程序发现和配对蓝牙设备的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BLUETOOTH_ADMIN',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BROADCAST_STICKY' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BROADCAST_STICKY',\r\n 'desc' : u'广播常用intents',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得广播常用intents的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BROADCAST_STICKY',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CALL_PRIVILEGED' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CALL_PRIVILEGED',\r\n 'desc' : u'拨打任何号码,包含紧急号码无需通过拨号用户界面需要用户确认',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得拨打任何号码,包含紧急号码无需通过拨号用户界面需要用户确认的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CALL_PRIVILEGED',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CHANGE_COMPONENT_ENABLED_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CHANGE_COMPONENT_ENABLED_STATE',\r\n 'desc' : u'改变其他组件的使能状态',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得改变其他组件的使能状态的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CHANGE_COMPONENT_ENABLED_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CHANGE_CONFIGURATION' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CHANGE_CONFIGURATION',\r\n 'desc' : u'修改当前设置',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得修改当前设置的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CHANGE_CONFIGURATION',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CHANGE_WIFI_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CHANGE_WIFI_STATE',\r\n 'desc' : u'改变Wi-Fi连接状态',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得改变Wi-Fi连接状态的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CHANGE_WIFI_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CLEAR_APP_CACHE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CLEAR_APP_CACHE',\r\n 'desc' : u'清除应用缓存',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得清除应用缓存的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CLEAR_APP_CACHE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CLEAR_APP_USER_DATA' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CLEAR_APP_USER_DATA',\r\n 'desc' : u'清除用户设置',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得清除用户设置的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CLEAR_APP_USER_DATA',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CONTROL_LOCATION_UPDATES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CONTROL_LOCATION_UPDATES',\r\n 'desc' : u'允许无线模块启用禁止位置更新',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得允许无线模块启用禁止位置更新的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CONTROL_LOCATION_UPDATES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DELETE_CACHE_FILES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DELETE_CACHE_FILES',\r\n 'desc' : u'删除缓存文件',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得删除缓存文件的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.DELETE_CACHE_FILES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DELETE_PACKAGES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DELETE_PACKAGES',\r\n 'desc' : u'删除包',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得删除包的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.DELETE_PACKAGES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DEVICE_POWER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DEVICE_POWER',\r\n 'desc' : u'访问底层电源管理',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问底层电源管理的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.DEVICE_POWER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DISABLE_KEYGUARD' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DISABLE_KEYGUARD',\r\n 'desc' : u'禁用键盘锁',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得禁用键盘锁的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.DISABLE_KEYGUARD',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DUMP' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DUMP',\r\n 'desc' : u'从系统服务返回状态抓取信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得从系统服务返回状态抓取信息的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.DUMP',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.EXPAND_STATUS_BAR' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.EXPAND_STATUS_BAR',\r\n 'desc' : u'应用在状态栏扩展或收缩',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得应用在状态栏扩展或收缩的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.EXPAND_STATUS_BAR',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.FACTORY_TEST' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.FACTORY_TEST',\r\n 'desc' : u'作为一个工厂测试程序',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得作为一个工厂测试程序的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.FACTORY_TEST',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.FLASHLIGHT' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.FLASHLIGHT',\r\n 'desc' : u'访问闪光灯',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问闪光灯的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.FLASHLIGHT',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.FORCE_BACK' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.FORCE_BACK',\r\n 'desc' : u'强制后退操作在顶层',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得强制后退操作在顶层的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.FORCE_BACK',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.GET_ACCOUNTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.GET_ACCOUNTS',\r\n 'desc' : u'访问帐户列表',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得访问帐户列表的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.GET_ACCOUNTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.HARDWARE_TEST' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.HARDWARE_TEST',\r\n 'desc' : u'允许访问硬件',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得允许访问硬件的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.HARDWARE_TEST',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.INJECT_EVENTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.INJECT_EVENTS',\r\n 'desc' : u'截获用户事件,如按键、触摸、轨迹球',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得截获用户事件的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.INJECT_EVENTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.INSTALL_PACKAGES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.INSTALL_PACKAGES',\r\n 'desc' : u'安装packages',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得安装packages的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.INSTALL_PACKAGES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MANAGE_APP_TOKENS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MANAGE_APP_TOKENS',\r\n 'desc' : u'在窗口管理器中管理程序',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得在窗口管理器中管理程序的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MANAGE_APP_TOKENS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.PERSISTENT_ACTIVITY' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.PERSISTENT_ACTIVITY',\r\n 'desc' : u'设置其他的activities显示',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得设置其他的activities显示的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.PERSISTENT_ACTIVITY',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.PROCESS_OUTGOING_CALLS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.PROCESS_OUTGOING_CALLS',\r\n 'desc' : u'监视、修改有关拨出电话的信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得监视、修改有关拨出电话的信息的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.PROCESS_OUTGOING_CALLS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_CALENDAR' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_CALENDAR',\r\n 'desc' : u'读取用户日历数据',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取用户日历数据的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_CALENDAR',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_FRAME_BUFFER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_FRAME_BUFFER',\r\n 'desc' : u'抓屏或访问帧缓冲',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得抓屏或访问帧缓冲的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_FRAME_BUFFER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_INPUT_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_INPUT_STATE',\r\n 'desc' : u'当前按键状态',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得当前按键状态的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_INPUT_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_LOGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_LOGS',\r\n 'desc' : u'读取底层系统日志文件',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取底层系统日志文件的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_LOGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_OWNER_DATA' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_OWNER_DATA',\r\n 'desc' : u'读取所有者数据',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取所有者数据的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_OWNER_DATA',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_SYNC_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_SYNC_SETTINGS',\r\n 'desc' : u'读取同步设置',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取同步设置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_SYNC_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_SYNC_STATS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_SYNC_STATS',\r\n 'desc' : u'读取同步状态',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取同步状态的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_SYNC_STATS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.REBOOT' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.REBOOT',\r\n 'desc' : u'重新启动设备',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得重新启动设备的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.REBOOT',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RECEIVE_BOOT_COMPLETED' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RECEIVE_BOOT_COMPLETED',\r\n 'desc' : u'接收到ACTION_BOOT_COMPLETED',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得接收到ACTION_BOOT_COMPLETED的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RECEIVE_BOOT_COMPLETED',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RECEIVE_MMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RECEIVE_MMS',\r\n 'desc' : u'接收MMS彩信',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得接收MMS彩信的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RECEIVE_MMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RECEIVE_WAP_PUSH' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RECEIVE_WAP_PUSH',\r\n 'desc' : u'接收WAP PUSH信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得接收WAP PUSH信息的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RECEIVE_WAP_PUSH',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RECORD_AUDIO' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RECORD_AUDIO',\r\n 'desc' : u'录制音频',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得录制音频的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RECORD_AUDIO',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.REORDER_TASKS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.REORDER_TASKS',\r\n 'desc' : u'排列任务',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得排列任务的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.REORDER_TASKS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_APN_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_APN_SETTINGS',\r\n 'desc' : u'写入APN设置',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得写入APN设置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_APN_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_CALENDAR' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_CALENDAR',\r\n 'desc' : u'写入用户日历数据',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得写入用户日历数据的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_CALENDAR',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_CONTACTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_CONTACTS',\r\n 'desc' : u'写入联系人数据',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得写入联系人数据的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_CONTACTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_GSERVICES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_GSERVICES',\r\n 'desc' : u'修改Google地图服务',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得修改Google地图服务的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_GSERVICES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_OWNER_DATA' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_OWNER_DATA',\r\n 'desc' : u'写入所有者数据',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得写入所有者数据的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_OWNER_DATA',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_SETTINGS',\r\n 'desc' : u'读取或写入系统设置',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得读取或写入系统设置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_SYNC_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_SYNC_SETTINGS',\r\n 'desc' : u'写入同步设置',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得写入同步设置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_SYNC_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_com.android.voicemail.permission.ADD_VOICEMAIL' : {\r\n 'type' : 0,\r\n 'name' : u'com.android.voicemail.permission.ADD_VOICEMAIL',\r\n 'desc' : u'增加语音邮件',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得增加语音邮件的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'com.android.voicemail.permission.ADD_VOICEMAIL',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MOUNT_UNMOUNT_FILESYSTEMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MOUNT_UNMOUNT_FILESYSTEMS',\r\n 'desc' : u'挂载、反挂载外部文件系统',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得挂载、反挂载外部文件系统的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MOUNT_UNMOUNT_FILESYSTEMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_FINE_LOCATION' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_FINE_LOCATION',\r\n 'desc' : u'通过GPS芯片接收卫星的定位信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获取精确位置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_FINE_LOCATION',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CALL_PHONE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CALL_PHONE',\r\n 'desc' : u'拨打电话',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得拨打电话的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CALL_PHONE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.RESTART_PACKAGES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.RESTART_PACKAGES',\r\n 'desc' : u'重启其他程序',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得重启其他程序的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.RESTART_PACKAGES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_WALLPAPER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_WALLPAPER',\r\n 'desc' : u'设置桌面壁纸',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获得设置桌面壁纸的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_WALLPAPER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_COARSE_LOCATION' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_COARSE_LOCATION',\r\n 'desc' : u'通过WiFi或移动基站的方式获取用户错略的经纬度信息',\r\n 'level' : u'中危',\r\n 'harm' : u'应用将获取错略位置的权限,可能被用于恶意行为',\r\n 'score' : 2,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_COARSE_LOCATION',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCESS_WIFI_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCESS_WIFI_STATE',\r\n 'desc' : u'访问Wi-Fi网络状态信息',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得访问Wi-Fi网络状态信息的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCESS_WIFI_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ADD_SYSTEM_SERVICE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ADD_SYSTEM_SERVICE',\r\n 'desc' : u'发布系统级服务',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得发布系统级服务的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ADD_SYSTEM_SERVICE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BATTERY_STATS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BATTERY_STATS',\r\n 'desc' : u'更新手机电池统计信息',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得更新手机电池统计信息的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BATTERY_STATS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BROADCAST_PACKAGE_REMOVED' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BROADCAST_PACKAGE_REMOVED',\r\n 'desc' : u'在删除应用程序包后广播提示消息',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得在删除应用程序包后广播提示消息的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BROADCAST_PACKAGE_REMOVED',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CAMERA' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CAMERA',\r\n 'desc' : u'请求访问使用照相设备',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得请求访问使用照相设备的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CAMERA',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.GET_PACKAGE_SIZE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.GET_PACKAGE_SIZE',\r\n 'desc' : u'获取任何package占用空间容量',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获取任何package占用空间容量的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.GET_PACKAGE_SIZE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.GET_TASKS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.GET_TASKS',\r\n 'desc' : u'获取有关当前或最近运行的任务信息',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获取有关当前或最近运行的任务信息的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.GET_TASKS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.INTERNAL_SYSTEM_WINDOW' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.INTERNAL_SYSTEM_WINDOW',\r\n 'desc' : u'允许打开使用系统用户界面的窗口',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得允许打开使用系统用户界面的窗口的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.INTERNAL_SYSTEM_WINDOW',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MODIFY_AUDIO_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MODIFY_AUDIO_SETTINGS',\r\n 'desc' : u'修改全局音频设置',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得修改全局音频设置的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MODIFY_AUDIO_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_ACTIVITY_WATCHER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_ACTIVITY_WATCHER',\r\n 'desc' : u'控制已经在全局系统中启动的activities',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得控制已经在全局系统中启动的activities的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_ACTIVITY_WATCHER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_ANIMATION_SCALE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_ANIMATION_SCALE',\r\n 'desc' : u'修改全局信息比例',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得修改全局信息比例的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_ANIMATION_SCALE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_DEBUG_APP' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_DEBUG_APP',\r\n 'desc' : u'配置程序处于调试状态',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得配置程序处于调试状态的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_DEBUG_APP',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_ORIENTATION' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_ORIENTATION',\r\n 'desc' : u'底层访问设置屏幕方向和实际旋转',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得底层访问设置屏幕方向和实际旋转的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_ORIENTATION',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_PREFERRED_APPLICATIONS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_PREFERRED_APPLICATIONS',\r\n 'desc' : u'修改列表参数',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得修改列表参数的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_PREFERRED_APPLICATIONS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_PROCESS_FOREGROUND' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_PROCESS_FOREGROUND',\r\n 'desc' : u'强制当前程序运行到前台',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得强制当前程序运行到前台的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_PROCESS_FOREGROUND',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_PROCESS_LIMIT' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_PROCESS_LIMIT',\r\n 'desc' : u'允许设置最大的运行进程数量',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得允许设置最大的运行进程数量的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_PROCESS_LIMIT',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_TIME_ZONE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_TIME_ZONE',\r\n 'desc' : u'设置时间区域',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得设置时间区域的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_TIME_ZONE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.STATUS_BAR' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.STATUS_BAR',\r\n 'desc' : u'打开、关闭或禁用状态栏及图标',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得打开、关闭或禁用状态栏及图标的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.STATUS_BAR',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SUBSCRIBED_FEEDS_READ' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SUBSCRIBED_FEEDS_READ',\r\n 'desc' : u'访问订阅RSS Feed内容',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得访问订阅RSS Feed内容的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SUBSCRIBED_FEEDS_READ',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SUBSCRIBED_FEEDS_WRITE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SUBSCRIBED_FEEDS_WRITE',\r\n 'desc' : u'写入或修改订阅内容的数据库',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得写入或修改订阅内容的数据库的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SUBSCRIBED_FEEDS_WRITE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.VIBRATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.VIBRATE',\r\n 'desc' : u'访问振动设备',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得访问振动设备的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.VIBRATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WAKE_LOCK' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WAKE_LOCK',\r\n 'desc' : u'使用WakeLocks使得进程在休眠时从屏幕消失',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得使用WakeLocks使得进程在休眠时从屏幕消失的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WAKE_LOCK',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.READ_EXTERNAL_STORAGE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.READ_EXTERNAL_STORAGE',\r\n 'desc' : u'读取外部存储',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得读取外部存储的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.READ_EXTERNAL_STORAGE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.DIAGNOSTIC' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.DIAGNOSTIC',\r\n 'desc' : u'允许程序RW诊断资源',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得允许程序RW诊断资源的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置���启了该权限项',\r\n 'detail' : u'android.permission.DIAGNOSTIC',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CHANGE_NETWORK_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CHANGE_NETWORK_STATE',\r\n 'desc' : u'允许程序改变网络连接状态',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许程序改变网络连接状态的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CHANGE_NETWORK_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MASTER_CLEAR' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MASTER_CLEAR',\r\n 'desc' : u'清除一切数据',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得清除一切数据的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MASTER_CLEAR',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MODIFY_PHONE_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MODIFY_PHONE_STATE',\r\n 'desc' : u'允许修改话机状态,如电源,人机接口等',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许修改话机状态,如电源,人机接口等的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MODIFY_PHONE_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_ALWAYS_FINISH' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_ALWAYS_FINISH',\r\n 'desc' : u'允许程序控制是否活动间接完成在处于后台时',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得允许程序控制是否活动间接完成在处于后台时的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_ALWAYS_FINISH',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SET_WALLPAPER_HINTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SET_WALLPAPER_HINTS',\r\n 'desc' : u'允许程序设置壁纸hits',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得允许程序设置壁纸hits的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SET_WALLPAPER_HINTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SIGNAL_PERSISTENT_PROCESSES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SIGNAL_PERSISTENT_PROCESSES',\r\n 'desc' : u'允许程序请求发送信号到所有显示的进程中',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许程序请求发送信号到所有显示的进程中的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SIGNAL_PERSISTENT_PROCESSES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.SYSTEM_ALERT_WINDOW' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.SYSTEM_ALERT_WINDOW',\r\n 'desc' : u'允许一个程序打开窗口使用 TYPE_SYSTEM_ALERT,显示在其他所有程序的顶层',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得显示在其他所有程序的顶层的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.SYSTEM_ALERT_WINDOW',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.ACCOUNT_MANAGER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.ACCOUNT_MANAGER',\r\n 'desc' : u'账户管理,获取账户验证信息',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得账户验证信息的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.ACCOUNT_MANAGER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.AUTHENTICATE_ACCOUNTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.AUTHENTICATE_ACCOUNTS',\r\n 'desc' : u'验证账户,允许一个程序通过账户验证方式访问账户管理ACCOUNT_MANAGER相关信息',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得验证账户的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.AUTHENTICATE_ACCOUNTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BIND_APPWIDGET' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BIND_APPWIDGET',\r\n 'desc' : u'绑定小插件,允许一个程序告诉appWidget服务需要访问小插件的数据库',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得绑定小插件的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BIND_APPWIDGET',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BIND_DEVICE_ADMIN' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BIND_DEVICE_ADMIN',\r\n 'desc' : u'绑定设备管理,请求系统管理员接收者receiver',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得绑定设备管理的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BIND_DEVICE_ADMIN',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BIND_INPUT_METHOD' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BIND_INPUT_METHOD',\r\n 'desc' : u'绑定输入法,请求InputMethodService服务',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得绑定输入法的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BIND_INPUT_METHOD',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BIND_REMOTEVIEWS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BIND_REMOTEVIEWS',\r\n 'desc' : u'绑定RemoteView,必须通过RemoteViewsService服务来请求',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得绑定RemoteView的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BIND_REMOTEVIEWS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BIND_WALLPAPER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BIND_WALLPAPER',\r\n 'desc' : u'绑定壁纸,必须通过WallpaperService服务来请求',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得绑定壁纸的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BIND_WALLPAPER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BROADCAST_SMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BROADCAST_SMS',\r\n 'desc' : u'收到短信时广播',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得收到短信时广播的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BROADCAST_SMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.BROADCAST_WAP_PUSH' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.BROADCAST_WAP_PUSH',\r\n 'desc' : u'WAP PUSH广播',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得WAP PUSH广播的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.BROADCAST_WAP_PUSH',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CHANGE_WIFI_MULTICAST_STATE' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CHANGE_WIFI_MULTICAST_STATE',\r\n 'desc' : u'改变WiFi多播状态',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得改变WiFi多播状态的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CHANGE_WIFI_MULTICAST_STATE',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CWJ_GROUP' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CWJ_GROUP',\r\n 'desc' : u'底层访问权限,允许CWJ账户组访问底层信息',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得底层访问权限的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CWJ_GROUP',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.CELL_PHONE_MASTER_EX' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.CELL_PHONE_MASTER_EX',\r\n 'desc' : u'手机优化大师扩展权限',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得手机优化大师扩展权限的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.CELL_PHONE_MASTER_EX',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.GLOBAL_SEARCH' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.GLOBAL_SEARCH',\r\n 'desc' : u'允许全局搜索',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许全局搜索的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.GLOBAL_SEARCH',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.INSTALL_LOCATION_PROVIDER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.INSTALL_LOCATION_PROVIDER',\r\n 'desc' : u'安装定位提供',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得安装定位提供的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.INSTALL_LOCATION_PROVIDER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.KILL_BACKGROUND_PROCESSES' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.KILL_BACKGROUND_PROCESSES',\r\n 'desc' : u'结束后台进程,允许程序调用killBackgroundProcesses(String).方法结束后台进程',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得结束后台进程的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.KILL_BACKGROUND_PROCESSES',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MANAGE_ACCOUNTS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MANAGE_ACCOUNTS',\r\n 'desc' : u'允许程序管理AccountManager中的账户列表',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许程序管理AccountManager中的账户列表的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MANAGE_ACCOUNTS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MTWEAK_USER' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MTWEAK_USER',\r\n 'desc' : u'允许mTweak用户访问高级系统权限',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许mTweak用户访问高级系统权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MTWEAK_USER',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MTWEAK_FORUM' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MTWEAK_FORUM',\r\n 'desc' : u'允许使用mTweak社区权限',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许使用mTweak社区权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MTWEAK_FORUM',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.MOUNT_FORMAT_FILESYSTEMS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.MOUNT_FORMAT_FILESYSTEMS',\r\n 'desc' : u'格式化可移动文件系统,比如格式化清空SD卡',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得格式化可移动文件系统的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.MOUNT_FORMAT_FILESYSTEMS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.NFC' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.NFC',\r\n 'desc' : u'允许程序执行NFC近距离通讯操作,用于移动支持',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许程序执行NFC近距离通讯操作的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.NFC',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.com.android.alarm.permission.SET_ALARM' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.com.android.alarm.permission.SET_ALARM',\r\n 'desc' : u'设置闹铃提醒',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得设置闹铃提醒的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'com.android.alarm.permission.SET_ALARM',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.UPDATE_DEVICE_STATS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.UPDATE_DEVICE_STATS',\r\n 'desc' : u'更新设备状态',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得更新设备状态的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.UPDATE_DEVICE_STATS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.USE_CREDENTIALS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.USE_CREDENTIALS',\r\n 'desc' : u'使用证书',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得使用证书的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.USE_CREDENTIALS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.USE_SIP' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.USE_SIP',\r\n 'desc' : u'使用SIP视频',\r\n 'level' : u'低危',\r\n 'harm' : u'应用将获得使用SIP视频的权限,可能被用于恶意行为',\r\n 'score' : 1,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.USE_SIP',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n '5_android.permission.WRITE_SECURE_SETTINGS' : {\r\n 'type' : 0,\r\n 'name' : u'android.permission.WRITE_SECURE_SETTINGS',\r\n 'desc' : u'允许程序读写系统安全敏感的设置项',\r\n 'level' : u'高危',\r\n 'harm' : u'应用将获得允许程序读写系统安全敏感的设置项的权限,可能被用于恶意行为',\r\n 'score' : 3,\r\n 'result' : u'存在风险',\r\n 'analysis' : u'在权限配置中可能设置开启了该权限项',\r\n 'detail' : u'android.permission.WRITE_SECURE_SETTINGS',\r\n 'advise' : u'如果应用中没有使用该权限建议将其设为关闭状态',\r\n },\r\n}\r\nServerItemDict = {\r\n '6_0' : {\r\n 'type' : 1,\r\n 'name' : u'SQL注入',\r\n 'desc' : u'攻击者注入SQL命令以欺骗服务器,从而可以执行恶意操作',\r\n 'level' : u'中危',\r\n 'harm' : u'1.通过SQL注入,执行非授权的SQL代码\\n2.攻击者进行提权操作,控制整个服务器',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'1.如果应用程序使用一些用户输入的数据来构造动态的SQL语句去访问数据库,将可能遭受到SQL注入攻击\\n2.如果在代码中使用了存储过程,并且这些存储过程缺乏对用户输入的合理限制,则也很容易发生SQL注入',\r\n 'adviseT' : u'1.进行完善的代码检查,避免SQL注入漏洞,比如,使用绑定变量\\n2.及时进行数据库版本升级,防止数据库版本过低而导致提权漏洞\\n3.对日志进行全面的审计,及时预防攻击行为',\r\n },\r\n\r\n '6_1' : {\r\n 'type' : 1,\r\n 'name' : u'Xss跨站脚本',\r\n 'desc' : u'恶意攻击者往Web页面里插入恶意html代码,当用户浏览该页之时,嵌入其中Web里面的html代码会被执行,从而达到恶意用户的特殊目的',\r\n 'level' : u'中危',\r\n 'harm' : u'取决于用户输入了什么样的脚本,技术熟练的攻击者可以利用XSS攻下服务器',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于大多数程序员不太注意对用户输入的字符串进行细致的检验,或经过一定的检验但忽略了相关的细节,容易留下xss漏洞。而一旦有xss漏洞经过利用变会造成无可估量的损失',\r\n 'adviseT' : u'1.将重要的cookie标记为http only\\n2.只允许用户输入我们期望的数据\\n3.对数据进行Html Encode 处理过滤或移除特殊的Html标签\\n4过滤JavaScript 事件的标签',\r\n },\r\n\r\n '6_2' : {\r\n 'type' : 1,\r\n 'name' : u'缓冲区溢出',\r\n 'desc' : u'通过往程序的缓冲区写超出其长度的内容,造成缓冲区的溢出,从而破坏程序的堆栈,使程序转而执行其它指令,以达到攻击的目的',\r\n 'level' : u'中危',\r\n 'harm' : u'1.缓冲区溢出的危害性具有破坏性与隐蔽性的特点。\\n2.破坏性体现在易使服务程序停止运行\\n3.隐蔽性体现在攻击的随机性及不可预测性',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'1.导致程序运行失败等后果\\n2.执行非授权指令,甚至获得目标主机的控制权',\r\n 'adviseT' : u'1.代码编写:避免使用危险函数,对于缓冲区的操作要进行严格的边界检查,这可借助一些工具如编译器来实现\\n2.开发语言:可使用类型\u001F安全的编程语言\\n3.运行状态:可进行动态保护,主要是数组边界检查和保证返回指针的完整性',\r\n },\r\n\r\n '6_3' : {\r\n 'type' : 1,\r\n 'name' : u'文件上传漏洞',\r\n 'desc' : u'攻击者利用程序缺陷绕过系统对文件的验证与处理策略将恶意程序上传到服务器并获得执行服务器端命令的能力',\r\n 'level' : u'中危',\r\n 'harm' : u'攻击者可以利用文件上传漏洞所得到的权限最低也是WEBSHELL,甚至可以获得管理员权限',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于大多数程序员不太注意对上传文件进行细致的过滤,或经过一定的检验过滤但忽略了相关的细节,容易���下文件上传漏洞,而一旦有文件上传漏洞经过利用变会造成无可估量的损失',\r\n 'adviseT' : u'1.检查是否判断了上传文件类型及后缀\\n2.文件上传目录禁止脚本解析。\\n3.运行状态:可进行动态保护,主要是数组边界检查和保证返回指针的完整性。\\n4.建议使用随机字符串重命名文件名,而不是使用用户上传上来的name参数的值',\r\n },\r\n\r\n '6_4' : {\r\n 'type' : 1,\r\n 'name' : u'数据库泄露',\r\n 'desc' : u'数据库存在泄露的风险,有被非法下载利用的威胁',\r\n 'level' : u'中危',\r\n 'harm' : u'数据库泄露,数据库被非法下载',\r\n 'score' : 2,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于数据库用户存在弱口令漏洞,或者数据库用户的权限过大等问题,导致数据库存在泄露的风险',\r\n 'adviseT' : u'1.对数据库加密\\n2.对数据库连接文件加密。\\n3.使用基于数据加密的应用绑定技术,防止非法访问。\\n4.增加用户的身份鉴别强度\\n5.数据库管理员需要按最小原则构建安全的权限体系',\r\n },\r\n\r\n '6_5' : {\r\n 'type' : 1,\r\n 'name' : u'弱口令漏洞',\r\n 'desc' : u'口令的组成是简单数字和字母的简单组合',\r\n 'level' : u'低危',\r\n 'harm' : u'弱口令极其容易被猜测到或被破解工具爆破',\r\n 'score' : 1,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'由于弱口令只是数字和字母的简单组合,或者是一些可以根据社会工程猜测出的字符串,所以极其容易被破解',\r\n 'adviseT' : u'1.对于密码设置:强制对所有的密码强度必须达到一定的级别\\n2.对于服务器安全机制:限制错误登录次数单位时间内验证的过多则封杀ip',\r\n },\r\n\r\n '6_6' : {\r\n 'type' : 1,\r\n 'name' : u'跨权漏洞',\r\n 'desc' : u'攻击者通过普通用户提升权限而获得管理员权限,最终控制服务器',\r\n 'level' : u'高危',\r\n 'harm' : u'提权漏洞一旦被触发,攻击者就可以非法操纵系统一切可以操纵的资源',\r\n 'score' : 3,\r\n 'result' : u'不存在风险',\r\n 'analysis' : u'没有导致风险的因素',\r\n 'detail' : u'无',\r\n 'advise' : u'无',\r\n\r\n #存在风险下的注释\r\n 'resultT' : u'存在风险',\r\n 'analysisT' : u'攻击者利用权限提升漏洞,通过普通用户提升权限而获得管理员权限,最终控制服务器',\r\n 'adviseT' : u'1.用户端安全设置\\n2.服务器安全权限设置',\r\n },\r\n}","repo_name":"ichoukou/git_repository","sub_path":"ass_to_all_two/itemConfigs.py","file_name":"itemConfigs.py","file_ext":"py","file_size_in_byte":170503,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28254796987","text":"from pafo.triangle import Triangle, PlanarTriangle\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport pathlib\nthisdir = pathlib.Path(__file__).resolve().parent\nfrom functools import partial\nimport numpy as np \nfrom typing import Tuple\n\ntry:\n from tests.visualize_solution import plot\nexcept:\n from visualize_solution import plot\n\ndef perp(a: np.ndarray) -> np.ndarray:\n b = np.empty_like(a)\n b[0] = -a[1]\n b[1] = a[0]\n return b\n\ndef seg_intersect(a1: np.ndarray, a2: np.ndarray, b1: np.ndarray, b2: np.ndarray) -> np.ndarray:\n da = a2-a1\n db = b2-b1\n dp = a1-b1\n dap = perp(da)\n denom = np.dot( dap, db)\n num = np.dot( dap, dp )\n return (num / denom.astype(float))*db + b1\n\ndef main():\n fig: plt.Figure\n ax: plt.Axes\n\n speed = 0.05\n\n R = PlanarTriangle([\n [0, 0], [1, 0], [0.6, 0.2]\n ])\n P = Triangle([30, 60, 90], deg=True)\n Q = R.min_max_traversal_triangle(P)\n pad = 0.02\n focal = seg_intersect(R.points[0], Q.points[0], R.points[1], Q.points[1])\n\n x_min = np.min([focal[0], *R.points[:,0], *Q.points[:,0]])-pad\n x_max = np.max([focal[0], *R.points[:,0], *Q.points[:,0]])+pad\n y_min = np.min([focal[1], *R.points[:,1], *Q.points[:,1]])-pad\n y_max = np.max([focal[1], *R.points[:,1], *Q.points[:,1]])+pad\n\n savepath = thisdir.joinpath(\"focal\")\n for i in range(10):\n with plot(savepath, x_min, x_max, y_min, y_max) as (fig, ax):\n ax.scatter(*focal, c=\"blue\")\n \n _R = PlanarTriangle(R.points + (Q.points - R.points) * i / 9)\n Q.plot(fig, ax, \"green\")\n\n ax.add_collection(LineCollection(\n [[Q.points[2], focal]],\n linestyles=\"--\", colors=\"black\"\n ))\n ax.add_collection(LineCollection(\n [[R.points[0], focal], [R.points[1], focal]],\n linestyles=\"--\", colors=\"black\"\n ))\n\n ax.scatter(*_R.points.T, c=\"black\")\n ax.scatter(*Q.points.T, c=\"green\")\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"jaredraycoleman/pafo","sub_path":"tests/visualize_focal.py","file_name":"visualize_focal.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43374227126","text":"\"\"\"\nAuthor: Uchenna Edeh\nGiven an array of numbers, find the maximum sum of any contiguous subarray of the array.\n\nFor example, given the array [34, -50, 42, 14, -5, 86], the maximum sum would be 137, since we would take elements 42, 14, -5, and 86.\n\nGiven the array [-5, -1, -8, -9], the maximum sum would be 0, since we would not take any elements.\n\nDo this in O(N) time.\nAlgorithms:\nuse three variables\n1. running_max_sum\n2. max_sum\n3. max_sum_from_last_smallest_num\n\"\"\"\nimport sys\n\ndef solution1(A):\n max_sum_contiguos = A[0]\n current_sum = 0\n previous = 0\n\n for i, val in enumerate(A): \n current_sum = val + current_sum \n\n if current_sum > max_sum_contiguos: \n max_sum_contiguos = current_sum\n else:\n if val > current_sum:\n current_sum = val\n if current_sum > max_sum_contiguos:\n max_sum_contiguos = current_sum\n\n previous = val\n\n return max_sum_contiguos \n \n\ndef main(args):\n if len(args) != 2:\n raise AssertionError(\"Usage:\\n\\tpython3 {0} '{1}'\\n\\tExpected Result: {2}\\n\\tPlease Try Again!\\n\\t\".format(__file__, \"34, -50, 42, 14, -5, 86\", '137' ))\n my_list = [int(x) for x in args[1].split(',')]\n print(solution1(my_list))\n\nif __name__ == \"__main__\":\n try:\n main(sys.argv)\n except AssertionError as e:\n print(e)\n sys.exit(1)\n\n","repo_name":"uchenna-j-edeh/dailly_problems","sub_path":"arrays_manipulations_algorithms/max_sum_contiguous_subarray.py","file_name":"max_sum_contiguous_subarray.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75337617287","text":"import time\nimport cv2\nimport sys\nimport numpy\nimport trackingBox\nimport socketio\nimport json\nimport rel\n\ntracker_types = ['KCF','MOSSE', 'CSRT']\ntracker_type = tracker_types[2]\n\ncamera_motion = trackingBox.CameraMove(\"http://10.4.145.139:5000/controller\")\n\n#face_detect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#face_detect = cv2.CascadeClassifier('haarcascade_fullbody.xml')\n\n\nfacebox1 = (0,0,0,0)\nfacebox2 = (0,0,0,0)\ninitial_find1 = False\ninitial_find2 = False\ntrackingFailure1 = 1\ntrackingFailure2 = 1\ncenterX1 = 300\ncenterX2 = 300\ncenterAngleX = 0\ncenterAngleY = 3\ncurrentAngle = 0\nincrementAngle = 1\n\n\nif tracker_type == 'KCF':\n TrackerFunction = cv2.TrackerKCF_create\nelif tracker_type == 'MOSSE':\n TrackerFunction = cv2.TrackerMOSSE_create\nelif tracker_type == \"CSRT\":\n TrackerFunction = cv2.TrackerCSRT_create\n\ntracker1 = TrackerFunction()\ntracker2 = TrackerFunction()\n\n\n# Starts the video and lets the camera focus for a second\nvideo = cv2.VideoCapture(1) # for using CAM\n#video = cv2.VideoCapture(\"rtsp://10.4.145.139:8554/cam\")\ntime.sleep(1.0)\n \n# Exit if video not opened.\nif not video.isOpened():\n print(\"Could not open video\")\n sys.exit()\n\n# Read first frame.\nok, frame = video.read()\nif not ok:\n print ('Cannot read video file')\n sys.exit()\n\n\nwhile True:\n #Read in the frame data \n ok, frame = video.read()\n #frame = cv2.flip(frame,1)\n\n # If the frame data cannot be read then exit the tracking loop\n if not ok:\n break\n \n # Getting the time before running the tracking algorithm\n timer = cv2.getTickCount()\n\n frameWidth = video.get(cv2.CAP_PROP_FRAME_WIDTH)\n \n\n key_pressed = cv2.waitKey(1) & 0xFF\n \n # Gets the initial bounding boxes for the faces detected in the frame\n if (key_pressed == ord('s')) or ((initial_find1 == 0) or (initial_find2 == 0)): # runs if the face tracking algorithm\n #Converting the frame to gray tone so that the face detection can work\n #gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n \n #Detecting the faces in the frame\n #faces = face_detect.detectMultiScale(gray_frame,1.1,3)\n faces = cv2.selectROIs(\"Select Fencers\",frame,False)\n #print(faces)\n cv2.destroyWindow(\"Select Fencers\")\n\n #Checking the number of faces in the frame\n num_faces = numpy.shape(faces)\n # Tracking success\n if num_faces[0] < 1:\n initial_find1 = False\n initial_find2 = False\n camera_motion.move(centerAngleX,centerAngleY)\n currentAngle = centerAngleX\n elif num_faces[0] < 2:\n facebox1 = (faces[0][0],faces[0][1],faces[0][2],faces[0][3])\n tracker1 = TrackerFunction()\n ok1 = tracker1.init(frame, facebox1)\n initial_find1 = True\n # If there is more than two faces in frame draw bounding boxes for the first two\n else:\n facebox1 = (int(faces[0][0]),int(faces[0][1]),int(faces[0][2]),int(faces[0][3]))\n facebox2 = (int(faces[1][0]),int(faces[1][1]),int(faces[1][2]),int(faces[1][3]))\n tracker1 = TrackerFunction()\n ok1 = tracker1.init(frame, facebox1)\n initial_find1 = True\n tracker2 = TrackerFunction()\n ok2 = tracker2.init(frame, facebox2)\n initial_find2 = True\n elif key_pressed == ord('q'): # if press q \n break\n\n # Tracking the first object\n if facebox1 and initial_find1:\n # Tracking success\n trackingFailure1 = 0\n ok1, facebox1 = tracker1.update(frame)\n p1 = (int(facebox1[0]), int(facebox1[1]))\n p2 = (int(facebox1[0] + facebox1[2]), int(facebox1[1] + facebox1[3]))\n centerX1, centerY1 = trackingBox.center(facebox1)\n cv2.rectangle(frame, p1, p2, (0,255,255), 2, 1)\n cv2.line(frame, (centerX1-5, centerY1), (centerX1+5, centerY1), (0,255,255), 1)\n cv2.line(frame, (centerX1, centerY1-5), (centerX1, centerY1+5), (0,255,255), 1)\n else :\n # Tracking failure\n trackingFailure1 = 1\n cv2.putText(frame, \"Tracking failure detected on 1\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\n # Tracking the second object\n if facebox2 and initial_find2:\n # Tracking success\n trackingFailure2 = 0\n ok2, facebox2 = tracker2.update(frame)\n p1 = (int(facebox2[0]), int(facebox2[1]))\n p2 = (int(facebox2[0] + facebox2[2]), int(facebox2[1] + facebox2[3]))\n centerX2, centerY2 = trackingBox.center(facebox2)\n cv2.rectangle(frame, p1, p2, (255,0,255), 2, 1)\n cv2.line(frame, (centerX2-5, centerY2), (centerX2+5, centerY2), (255,0,255), 1)\n cv2.line(frame, (centerX2, centerY2-5), (centerX2, centerY2+5), (255,0,255), 1)\n else :\n # Tracking failure\n trackingFailure2 = 1\n cv2.putText(frame, \"Tracking failure detected on 2\", (100,110), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\n \n if (trackingFailure1 == 0) and (trackingFailure2 == 0):\n moveLeft, moveRight = trackingBox.movementOfMovingCamera(frameWidth,centerX1,centerX2)\n currentAngle = currentAngle - moveLeft + moveRight\n if(currentAngle < 20) and (currentAngle > -20):\n camera_motion.move(currentAngle,centerAngleY)\n elif(currentAngle > 20):\n currentAngle = 20\n elif(currentAngle < -20):\n currentAngle = -20\n print(frameWidth)\n print(centerX1)\n print(centerX2)\n print(moveRight)\n print(moveLeft)\n \n\n\n\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n\n #Displaying the tracking algorithm\n cv2.putText(frame, tracker_type + \" Tracker\", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2)\n \n # Display FPS on frame\n cv2.putText(frame, \"FPS : \" + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)\n # Display result\n cv2.imshow(\"Tracking\", frame)\n\nvideo.release()\ncv2.destroyAllWindows()","repo_name":"UAHFencingClub/VideoReplaySystem","sub_path":"MotionCamera/multiple_tracking.py","file_name":"multiple_tracking.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42661350222","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.mail import send_mail\n\nfrom .forms import ContactForm\nfrom farmacias.models import Pharmacy\n\n# Create your views here.\n\n\ndef contact_form(request):\n\n pharmacy = Pharmacy.objects.get(name='schestakow')\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n\n if form.is_valid():\n name = form.cleaned_data['name']\n sender = form.cleaned_data['sender']\n message = form.cleaned_data['message']\n\n recipients = ['fschestakow@gmail.com']\n content = '%s\\nCorreo: %s\\nDice: %s' % (name, sender, message)\n subject = 'Correo a través de la pagina web'\n\n send_mail(subject,\n content,\n sender,\n recipients)\n\n return HttpResponseRedirect('./')\n\n else:\n form = ContactForm()\n\n return render(request, 'contacto/contact.html', {'form': form,\n 'pharmacy': pharmacy})\n","repo_name":"augustodn/fschestakow","sub_path":"contacto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42086688215","text":"from Objects.Classes.Connect4Game import Connect4GameClass\nfrom Objects.Classes.QLearningAgent import QLearningAgentClass\nimport matplotlib.pyplot as plt\n\nclass Connect4TrainerClass:\n def __init__(self, opponent=None, num_episodes=500, q_table_path= \"Q_tables\", starting_policy=\"alternate\", espilon_decay=None, min_epsilon=None, q_table_name = \"q_table.pkl\"):\n self.num_episodes = num_episodes\n self.q_table_path = q_table_path\n # Initialize the Q-learning agent\n self.agent = QLearningAgentClass(\n action_space=Connect4GameClass().columns,\n learning_rate=0.1,\n discount_factor=0.9,\n epsilon=1.0,\n epsilon_decay=0.99999 if not espilon_decay else espilon_decay,\n min_epsilon=0.05 if not min_epsilon else min_epsilon,\n q_table_name = q_table_name\n )\n self.opponent = opponent() if opponent else self.agent\n # Starting policy\n self.starting_policy = starting_policy # Added parameter for starting policy\n self.agent_starts = starting_policy != 'never_start' # Determine if agent starts in the first game\n\n\n def train(self):\n # Initialize the Connect4 environment\n connect4_env = Connect4GameClass(rows=4, columns=5, in_a_row=4)\n\n Q_size = []\n for episode in range(self.num_episodes):\n current_state = connect4_env.reset()\n done = False\n\n # New logic for handling who starts the game\n if self.starting_policy == 'alternate':\n self.agent_starts = not self.agent_starts\n elif self.starting_policy == 'never_start':\n self.agent_starts = False # Ensure agent never starts\n \n # If opponent starts, make the first move for the opponent\n if not self.agent_starts and self.opponent:\n current_state, _ = self.opponent.play(connect4_env, current_state, False)\n\n while not done:\n possible_actions = connect4_env.possible_actions()\n action = self.agent.choose_action(current_state, possible_actions)\n\n # Apply the action to the environment\n next_state, reward, done = connect4_env.step(action)\n\n # If there is an opponent, let the opponent play here\n if self.opponent:\n next_state, done = self.opponent.play(connect4_env, next_state, done)\n\n # Agent learns from the action\n self.agent.update_q_table(current_state, action, reward, next_state, done, possible_actions)\n current_state = next_state\n\n # Progress tracking\n if episode % 1000 == 0:\n table = self.agent.q_table\n size = len(table)\n Q_size.append(size)\n print(f\"Episode {episode} complete. Q-table size: {size}, epsilon: {self.agent.epsilon}\")\n\n # Save the Q-table\n self.agent.save_q_table(self.q_table_path, q_table_name=self.agent.q_table_name)\n return Q_size\n # Plotting the Q-table size\n \n\n\n\n\n \n","repo_name":"BjornThor123/FinalProjectREI505M","sub_path":"Agent_Trainer.py","file_name":"Agent_Trainer.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72211510407","text":"import discord\nfrom discord.ext import commands\n\nfrom req import core\n\n\nclass Bot(core.Core):\n def __init__(self):\n super().__init__(\"Nico\", [Events, Commands])\n\n\nclass Events(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\nclass Commands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(name=\"ticket\", aliases=[\"t\"], invoke_without_command=True)\n async def ticket(self, ctx, *, info=None):\n perms = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),\n ctx.guild.get_role(self.bot.config.roles['moderator']): discord.PermissionOverwrite(read_messages=True),\n ctx.guild.get_role(self.bot.config.roles['admin']): discord.PermissionOverwrite(read_messages=True),\n ctx.author: discord.PermissionOverwrite(read_messages=True)\n }\n ch = await ctx.guild.create_text_channel(name=\"ticket\", category=self.bot.get_channel(748286304944390264), overwrites=perms)\n\n embed=discord.Embed(title=f\"{ctx.author.display_name} opened a ticket\",\n description=info,\n color=0xfefefe)\n m = await ch.send(\"@everyone\", embed=embed)\n await m.pin()\n\n @ticket.command(name=\"add\")\n async def ticket_add(self, ctx, user:discord.Member):\n await ctx.channel.edit(overwrites={**ctx.channel.overwrites, user: discord.PermissionOverwrite(read_messages=True)})\n m = await ctx.channel.send(user.mention, embed=discord.Embed(description=f\"**{ctx.author.display_name}** added **{user.display_name}**\", color=0x22cc22))\n await m.pin()\n\n @ticket.command(name=\"remove\")\n async def ticket_remove(self, ctx, user:discord.Member):\n await ctx.channel.edit(overwrites={**ctx.channel.overwrites, user: discord.PermissionOverwrite(read_messages=None)})\n m = await ctx.channel.send(user.mention, embed=discord.Embed(description=f\"**{ctx.author.display_name}** removed **{user.display_name}**\", color=0xcc2222))\n\n @ticket.command(name=\"comment\")\n async def ticket_comment(self, ctx, *, body):\n m = await ctx.send(embed=discord.Embed(title=f\"Comment by {ctx.author.display_name}\",\n description=body,\n color=0x33aaff))\n m = await m.pin()\n await m.delete()\n\n @ticket.command(name=\"close\")\n async def ticket_close(self, ctx):\n await ctx.channel.edit(overwrites={**ctx.channel.overwrites, ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False)})\n\n\nBot()","repo_name":"Elli83/CliqueBots","sub_path":"nico.py","file_name":"nico.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"3544787952","text":"import unittest\n\nimport pytest\nfrom aiohttp import web\n\nfrom resources.procedures import ProcedureResource as pr\n\nJSON = [{\n \"id\": 1,\n \"name\": \"Procedure 1\",\n \"active\": 1},\n {\n \"id\": 2,\n \"name\": \"Procedure 2\",\n \"active\": 1,\n}]\n\nJSON_DETAIL = {\n \"id\": 1,\n \"name\": \"Procedure 1\",\n \"active\": 1}\n\nJSON_REGISTER = {\n \"id\": 3,\n \"name\": \"Procedure 3\",\n \"active\": 1}\n\n\nclass TestProcedure(unittest.TestCase):\n @pytest.fixture\n def cli(loop, test_client):\n app = web.Application()\n app.router.add_get(r\"/procedures\", pr().get_list)\n app.router.add_get(\n r\"/procedures/{procedure_id}\".format(\n procedure_id), pr().get_detail)\n app.router.add_post(r\"/procedures\", pr().register)\n app.router.add_put(r\"/procedures/{procedure_id}\", pr().alter)\n app.router.add_delete(r\"/procedures/{procedure_id}\", pr().delete)\n return loop.run_until_complete(test_client(app))\n\n async def test_01_get_list(cli):\n cli.server.app[\"procedures\"] = JSON\n resp = await cli.get(\"/procedures\")\n assert resp.status == 200\n assert await resp.text() == JSON\n\n async def test_02_get_detail(cli):\n cli.server.app = JSON_DETAIL\n resp = await cli.get(\"/procedures/{}\".format(\n JSON_DETAIL[\"id\"]))\n assert resp.status == 200\n assert await resp.text() == JSON_DETAIL\n\n async def test_03_register(cli):\n cli.server.app = JSON_REGISTER\n resp = await cli.post(\"/procedures\", data=JSON_REGISTER)\n assert resp.status == 302\n assert await resp.text() == JSON_REGISTER\n\n async def test_04_alter(cli):\n JSON_DETAIL[\"name\"] = \"procedure (NEW)\"\n cli.server.app = JSON_DETAIL\n resp = await cli.put(\"/procedures/{}\".format(\n JSON_DETAIL[\"id\"]))\n assert resp.status == 302\n assert await resp.text() == JSON_DETAIL\n\n async def test_05_delete(cli):\n resp = await cli.delete(\"/procedures/{}\".format(\n JSON_DETAIL[\"id\"]))\n assert resp.status == 204\n assert await resp.text() == \"\"\n","repo_name":"fernandochimi/medical-appointment","sub_path":"src/tests/test_procedure.py","file_name":"test_procedure.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19392879818","text":"# Reverse every other word in a given string, then return the string. Throw away any leading or trailing whitespace, while ensuring there is exactly one space between each word. Punctuation marks should be treated as if they are a part of the word in this kata.\n\n#Psuedo:\n# strip spaces\n# split into list\n# loop indices of list,\n# if i = odd,\n# reverse by reverse slicing\n# append\n# join\n\n# def reverse_alternate(s):\n\n# s= s.strip()\n# s = s.split()\n# lst_output = []\n \n# for i in range(len(s)):\n# if i % 2 != 0:\n# lst_output.append(s[i][::-1])\n# else:\n# lst_output.append(s[i])\n \n# output = \" \".join(lst_output)\n# return output\n\n #Class version:\nclass Reverse():\n\n def __init__(self,s):\n self.s = s\n\n def strip_split(self):\n s = self.s\n s = s.strip()\n s = s.split()\n return s\n\n def reverse_alternate(self): \n s = self.strip_split()\n\n lst_output = []\n \n for i in range(len(s)):\n if i % 2 != 0:\n lst_output.append(s[i][::-1])\n else:\n lst_output.append(s[i])\n \n output = \" \".join(lst_output)\n return output\n\ntest1 = Reverse(\"Did it work?\")\n\nprint(test1.reverse_alternate())","repo_name":"JACedwards/Code_Wars_Solutions","sub_path":"ReverseEveryOtherWord.py","file_name":"ReverseEveryOtherWord.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70046339530","text":"\"\"\"\nGet prices from one file and use this prices fo all products in another file.\nwrite result with correct prices in output file.\n\nUsage:\n fix_price.py --if= --df= --of=\n\"\"\"\nfrom docopt import docopt\nfrom clint.textui import progress\narguments = docopt(__doc__)\nifile = arguments['--if']\ndfile = arguments['--df']\nofile = arguments['--of']\n\nprices = {}\nwith open(ifile) as inf:\n for line in inf.readlines():\n game_data = line.split('\\t')\n name = game_data[1]\n price = game_data[2]\n prices[name] = price\n\nnew_lines = []\nwith open(dfile) as df:\n for line in df.readlines():\n game_data = line.split('\\t')\n name = game_data[1]\n game_data[2] = prices.get(name,'0')\n new_lines.append('\\t'.join(game_data))\n\nwith open(ofile,'w') as of:\n of.writelines(new_lines)\n","repo_name":"stivsh/gtool","sub_path":"fix_price.py","file_name":"fix_price.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17692633125","text":"from typing import Tuple\n\nfrom .. import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n street_prefixes = (\n \"Strada\",\n \"Aleea\",\n \"Intrarea\",\n \"Bulevardul\",\n \"Soseaua\",\n \"Drumul\",\n )\n street_name_formats = (\n \"{{street_prefix}} {{last_name}}\",\n \"{{street_prefix}} {{first_name}} {{last_name}}\",\n \"{{street_prefix}} {{last_name}}\",\n )\n street_address_formats = (\n \"{{street_name}}\",\n \"{{street_name}} {{building_number}}\",\n \"{{street_name}} {{building_number}} {{secondary_address}}\",\n )\n address_formats = (\"{{street_address}}\\n{{city}}, {{postcode}}\",)\n building_number_formats = (\"Nr. %#\", \"Nr. %##\")\n secondary_address_formats = (\"Bl. %# Sc. %# Ap. %##\",)\n postcode_formats = (\n \"1#####\",\n \"2#####\",\n \"3#####\",\n \"4#####\",\n \"5#####\",\n \"6#####\",\n \"7#####\",\n \"8#####\",\n \"9#####\",\n )\n city_formats = (\"{{city_name}}\",)\n cities = (\n \"Cluj-Napoca\",\n \"Timisoara\",\n \"Iasi\",\n \"Constanta\",\n \"Craiova\",\n \"Brasov\",\n \"Galati\",\n \"Ploiesti\",\n \"Oradea\",\n \"Braila\",\n \"Arad\",\n \"Pitesti\",\n \"Sibiu\",\n \"Bacau\",\n \"Targu Mures\",\n \"Baia Mare\",\n \"Buzau\",\n \"Botosani\",\n \"Satu Mare\",\n \"Suceava\",\n \"Ramnicu Valcea\",\n \"Drobeta-Turnu Severin\",\n \"Piatra-Neamt\",\n \"Targoviste\",\n \"Targu Jiu\",\n \"Focsani\",\n \"Tulcea\",\n \"Resita\",\n \"Slatina\",\n \"Bistrita\",\n \"Calarasi\",\n \"Giurgiu\",\n \"Deva\",\n \"Hunedoara\",\n \"Zalau\",\n \"Barlad\",\n \"Alba Iulia\",\n \"Sfantu Gheorghe\",\n \"Roman\",\n \"Vaslui\",\n \"Turda\",\n \"Medias\",\n \"Alexandria\",\n \"Voluntari\",\n \"Pipera (Voluntari)\",\n \"Slobozia\",\n \"Lugoj\",\n \"Medgidia\",\n \"Onesti\",\n \"Miercurea-Ciuc\",\n \"Petrosani\",\n \"Tecuci\",\n \"Mangalia\",\n \"Odorheiu Secuiesc\",\n \"Ramnicu Sarat\",\n \"Sighetu Marmatiei\",\n \"Campina\",\n \"Navodari\",\n \"Campulung\",\n \"Caracal\",\n \"Sacele\",\n \"Fagaras\",\n \"Dej\",\n \"Rosiori de Vede\",\n \"Mioveni\",\n \"Curtea de Arges\",\n \"Husi\",\n \"Reghin\",\n \"Sighisoara\",\n \"Pantelimon\",\n \"Pascani\",\n \"Oltenita\",\n \"Turnu Magurele\",\n \"Caransebes\",\n \"Falticeni\",\n \"Radauti\",\n \"Lupeni\",\n \"Dorohoi\",\n \"Vulcan\",\n \"Campia Turzii\",\n \"Zarnesti\",\n \"Borsa\",\n \"Popesti-Leordeni\",\n \"Codlea\",\n \"Carei\",\n \"Moinesti\",\n \"Petrila\",\n \"Sebes\",\n \"Tarnaveni\",\n \"Floresti\",\n \"Gherla\",\n \"Fetesti-Gara\",\n \"Buftea\",\n \"Cugir\",\n \"Moreni\",\n \"Gheorgheni\",\n \"Comanesti\",\n \"Salonta\",\n \"Cernavoda\",\n \"Targu Secuiesc\",\n \"Bailesti\",\n \"Campulung Moldovenesc\",\n \"Aiud\",\n \"Dragasani\",\n \"Valea Caselor (Dragasani)\",\n \"Bals\",\n \"Bocsa\",\n \"Motru\",\n \"Corabia\",\n \"Bragadiru\",\n \"Urziceni\",\n \"Rasnov\",\n \"Rasnov Romacril\",\n \"Buhusi\",\n \"Zimnicea\",\n \"Marghita\",\n \"Mizil\",\n \"Cisnadie\",\n \"Targu Neamt\",\n \"Calafat\",\n \"Vatra Dornei\",\n \"Adjud\",\n \"Gaesti\",\n \"Tandarei\",\n \"Gura Humorului\",\n \"Chitila\",\n \"Viseu de Sus\",\n \"Otopeni\",\n \"Ludus\",\n \"Brad\",\n \"Dragu-Brad\",\n \"Valu lui Traian\",\n \"Cumpana\",\n \"Sannicolau Mare\",\n \"Valenii de Munte\",\n \"Jilava\",\n \"Dabuleni\",\n \"Filiasi\",\n \"Blaj\",\n \"Ovidiu\",\n \"Simleu Silvaniei\",\n \"Matca\",\n \"Pecica\",\n \"Rovinari\",\n \"Videle\",\n \"Baicoi\",\n \"Pucioasa\",\n \"Jimbolia\",\n \"Baia Sprie\",\n \"Targu Frumos\",\n \"Vicovu de Sus\",\n \"Orsova\",\n \"Sinaia\",\n \"Negresti-Oas\",\n \"Beius\",\n \"Santana\",\n \"Pechea\",\n \"Simeria\",\n \"Boldesti-Scaeni\",\n \"Poienile de sub Munte\",\n \"Valea lui Mihai\",\n \"Covasna\",\n \"Targu Ocna\",\n \"Toplita\",\n \"Sovata\",\n \"Otelu Rosu\",\n \"Oravita\",\n \"Moisei\",\n \"Harsova\",\n \"Murfatlar\",\n \"Beclean\",\n \"Poiana Mare\",\n \"Huedin\",\n \"Babadag\",\n \"Marasesti\",\n \"Topoloveni\",\n \"Sangeorgiu de Mures\",\n \"Jibou\",\n \"Sabaoani\",\n \"Hateg\",\n \"Avrig\",\n \"Darmanesti\",\n \"Marginea\",\n \"Moldova Veche\",\n \"Ineu\",\n \"Bolintin-Vale\",\n \"Mihail Kogalniceanu\",\n \"Macin\",\n \"Tomesti\",\n \"Nasaud\",\n \"Uricani\",\n \"Rosu\",\n \"Calan\",\n \"Borcea\",\n \"Afumati\",\n \"Domnesti\",\n \"Draganesti-Olt\",\n \"Cristuru Secuiesc\",\n \"1 Decembrie\",\n \"Lumina\",\n \"Fetesti\",\n \"Mogosoaia\",\n \"Modelu\",\n \"Dumbravita\",\n \"Seini\",\n \"Alesd\",\n \"Sangeorz-Bai\",\n \"Curtici\",\n \"Darabani\",\n \"Nadlac\",\n \"Victoria\",\n \"Amara\",\n \"Branesti\",\n \"Harlau\",\n \"Lipova\",\n \"Techirghiol\",\n \"Agnita\",\n \"Sacueni\",\n \"Titu\",\n \"Siret\",\n \"Segarcea\",\n \"Odobesti\",\n \"Podu Iloaiei\",\n \"Ocna Mures\",\n \"Urlati\",\n \"Strehaia\",\n \"Tasnad\",\n \"Cajvana\",\n \"Tuzla\",\n \"Sadova\",\n \"Vlahita\",\n \"Stei\",\n \"Diosig\",\n \"Cobadin\",\n \"Gilau\",\n \"Vladimirescu\",\n \"Dancu\",\n \"Bumbesti-Jiu\",\n \"Busteni\",\n \"Peretu\",\n \"Cudalbi\",\n \"Bosanci\",\n \"Balotesti\",\n \"Lunca Cetatuii\",\n \"Dragalina\",\n \"Fieni\",\n \"Chisineu-Cris\",\n \"Balan\",\n \"Sandominic\",\n \"Strejnicu\",\n \"Baciu\",\n \"Fundulea\",\n \"Remetea\",\n \"Fagetel (Remetea)\",\n \"Ianca\",\n \"Roseti\",\n \"Breaza de Sus\",\n \"Cornetu\",\n \"Insuratei\",\n \"Apahida\",\n \"Berceni\",\n \"Vicovu de Jos\",\n \"Savinesti (Poiana Teiului)\",\n \"Savinesti\",\n \"Teius\",\n \"Barbulesti\",\n \"Plosca\",\n \"Toflea\",\n \"Magurele\",\n \"Feldru\",\n \"Anina\",\n \"Negresti\",\n \"Valea Mare (Negresti)\",\n \"Peris\",\n \"Fundeni\",\n \"Giroc\",\n \"Baile Borsa\",\n \"Oituz\",\n \"Rucar\",\n \"Curcani\",\n \"Babeni\",\n \"Valea Mare (Babeni)\",\n \"Rodna\",\n \"Deta\",\n \"Ruscova\",\n \"Intorsura Buzaului\",\n \"Pancota\",\n \"Glina\",\n \"Talmaciu\",\n \"Copsa Mica\",\n \"Motatei\",\n \"Gugesti\",\n \"Schela Cladovei\",\n \"Sancraiu de Mures\",\n \"Iernut\",\n \"Targu Lapus\",\n \"Maieru\",\n \"Prejmer\",\n \"Pogoanele\",\n \"Dobroesti\",\n \"Baraolt\",\n \"Arbore\",\n \"Homocea\",\n \"Corund\",\n \"Tufesti\",\n \"Giarmata\",\n \"Baia\",\n \"Dumbraveni\",\n \"Eforie Nord\",\n \"Horodnic de Sus\",\n \"Greci\",\n \"Tudora\",\n \"Straja\",\n \"Rasinari\",\n \"Sebis\",\n \"Raducaneni\",\n \"Siria\",\n \"Paunesti\",\n \"Saveni\",\n \"Tunari\",\n )\n\n states: Tuple[Tuple[str, str], ...] = (\n (\"AB\", \"Alba\"),\n (\"AG\", \"Argeș\"),\n (\"AR\", \"Arad\"),\n (\"B\", \"București\"),\n (\"BC\", \"Bacău\"),\n (\"BH\", \"Bihor\"),\n (\"BN\", \"Bistrița-Năsăud\"),\n (\"BR\", \"Brăila\"),\n (\"BT\", \"Botoșani\"),\n (\"BV\", \"Brașov\"),\n (\"BZ\", \"Buzău\"),\n (\"CJ\", \"Cluj\"),\n (\"CL\", \"Călărași\"),\n (\"CS\", \"Caraș Severin\"),\n (\"CT\", \"Constanța\"),\n (\"CV\", \"Covasna\"),\n (\"DB\", \"Dâmbovița\"),\n (\"DJ\", \"Dolj\"),\n (\"GJ\", \"Gorj\"),\n (\"GL\", \"Galați\"),\n (\"GR\", \"Giurgiu\"),\n (\"HD\", \"Hunedoara\"),\n (\"HR\", \"Harghita\"),\n (\"IF\", \"Ilfov\"),\n (\"IL\", \"Ialomița\"),\n (\"IS\", \"Iași\"),\n (\"MH\", \"Mehedinți\"),\n (\"MM\", \"Maramureș\"),\n (\"MS\", \"Mureș\"),\n (\"NT\", \"Neamț\"),\n (\"OT\", \"Olt\"),\n (\"PH\", \"Prahova\"),\n (\"SB\", \"Sibiu\"),\n (\"SJ\", \"Sălaj\"),\n (\"SM\", \"Satu Mare\"),\n (\"SV\", \"Suceava\"),\n (\"TL\", \"Tulcea\"),\n (\"TM\", \"Timiș\"),\n (\"TR\", \"Teleorman\"),\n (\"VL\", \"Vâlcea\"),\n (\"VN\", \"Vrancea\"),\n (\"VS\", \"Vaslui\"),\n )\n\n def street_prefix(self) -> str:\n \"\"\"\n :example: 'Strada'\n \"\"\"\n return self.random_element(self.street_prefixes)\n\n def secondary_address(self) -> str:\n \"\"\"\n :example: 'Bl. 123 Sc. 2 Ap. 15'\n \"\"\"\n return self.numerify(self.random_element(self.secondary_address_formats))\n\n def city_name(self) -> str:\n return self.random_element(self.cities)\n\n def city_with_postcode(self) -> str:\n return self.postcode() + \" \" + self.random_element(self.cities)\n\n def administrative_unit(self) -> str:\n \"\"\"\n :example: u'Timiș'\n \"\"\"\n return self.random_element(self.states)[1] # type: ignore\n\n state = administrative_unit\n\n def state_abbr(self) -> str:\n \"\"\"\n :example: u'TM'\n \"\"\"\n return self.random_element(self.states)[0] # type: ignore\n","repo_name":"joke2k/faker","sub_path":"faker/providers/address/ro_RO/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9633,"program_lang":"python","lang":"hr","doc_type":"code","stars":16539,"dataset":"github-code","pt":"16"} +{"seq_id":"40781936085","text":"from pieces.cell import Cell \n\nclass Board:\n\n number_of_rows = 50\n number_of_columns = 50\n direction_dict = {\n 'UP': (0, 1),\n 'DOWN': (0, -1),\n 'LEFT': (-1, 0),\n 'RIGHT': (1, 0),\n 'UP_LEFT': (-1, 1),\n 'UP_RIGHT': (1, 1),\n 'DOWN_LEFT': (1, -1),\n 'DOWN_RIGHT': (-1, -1),\n }\n\n def __init__(self, rules):\n self.grid = [[Cell(x, y) for y in range(self.number_of_rows)] for x in range(self.number_of_columns)]\n self.gen = 0\n self.rules = rules\n\n def next_gen(self):\n # Check Neighbors\n for x in range(self.number_of_columns): # O(n)\n for y in range(self.number_of_rows): # O(n)\n temp_cell = self.grid[x][y]\n for dir_x, dir_y in self.direction_dict.values(): # O(n)\n if x + dir_x >= 0 and y + dir_y >= 0 and x + dir_x < Board.number_of_columns and y + dir_y < Board.number_of_rows:\n temp_neighbor = self.grid[x + dir_x][y + dir_y]\n if temp_neighbor.isAlive():\n temp_cell.add_neighbor()\n \n # update grid\n for x in range(self.number_of_columns): # O(n)\n for y in range(self.number_of_rows): # O(n)\n self.grid[x][y].update(self.rules)\n\n self.gen += 1\n return self.gen\n\n def __str__(self):\n output = ''\n for i in range(len(self.grid)):\n output += ' '.join(map(str, self.grid[i])) + '\\n'\n\n return output\n","repo_name":"LiranCaduri/Conway-sGameOfLife","sub_path":"pieces/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73799555847","text":"import csv\nfrom datetime import datetime\n\nfrom trade_plate.tools.constants import PORTFOLIO\n\nimport matplotlib.pyplot as plt\n\n# ['Date', 'Type', 'Buy Amount', 'Currency', 'Sell Amount', 'Currency',\n# 'Fee Amount', 'Currency', 'Exchange', 'Comment', 'Deduct']\n\n\nclass CURRENCIES:\n BTC = \"BTC\"\n ETH = \"ETH\"\n NEAR = \"NEAR\"\n MINA = \"MINA\"\n GRT = \"GRT\"\n USD = \"USD\"\n EUR = \"EUR\"\n\n\nbtc_portfolio = []\neth_portfolio = []\nnear_portfolio = []\nmina_portfolio = []\ngrt_portfolio = []\nwith open(PORTFOLIO.PATH, newline=\"\") as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=\",\")\n\n for row in csv_reader:\n if (\n row[3] == CURRENCIES.BTC\n and (row[5] == CURRENCIES.USD or row[5] == CURRENCIES.EUR)\n ) or (\n (row[3] == CURRENCIES.USD or row[3] == CURRENCIES.EUR)\n and row[5] == CURRENCIES.BTC\n ):\n btc_portfolio.append(row)\n if (row[3] == CURRENCIES.ETH and row[5] == CURRENCIES.USD) or (\n row[3] == CURRENCIES.USD and row[5] == CURRENCIES.ETH\n ):\n eth_portfolio.append(row)\n if (row[3] == CURRENCIES.NEAR and row[5] == CURRENCIES.USD) or (\n row[3] == CURRENCIES.USD and row[5] == CURRENCIES.NEAR\n ):\n near_portfolio.append(row)\n if (row[3] == CURRENCIES.MINA and row[5] == CURRENCIES.USD) or (\n row[3] == CURRENCIES.USD and row[5] == CURRENCIES.MINA\n ):\n mina_portfolio.append(row)\n if (row[3] == CURRENCIES.GRT and row[5] == CURRENCIES.USD) or (\n row[3] == CURRENCIES.USD and row[5] == CURRENCIES.GRT\n ):\n grt_portfolio.append(row)\n\n\ndef summary(portfolio: list):\n buy_cost: float = 0\n buy_amount: float = 0\n sell_cost: float = 0\n sell_amount: float = 0\n for row in portfolio:\n date_time = datetime.strptime(row[0], \"%m/%d/%Y %H:%M:%S\")\n if date_time > datetime(2022, 11, 1):\n if row[1] == \"Buy\":\n buy_amount += float(row[2])\n buy_cost += float(row[4])\n elif row[1] == \"Sell\":\n sell_amount += float(row[4])\n sell_cost += float(row[2])\n else:\n print(f\"Unknown type: {row[1]}\")\n\n return buy_cost, buy_amount\n\n\ndef show(cost, amount):\n if amount:\n print(f\"Cost: {cost}\\nAmount: {amount}\\nAverage: {(cost) / (amount)}\")\n\n\ntotal_cost = 0\nsizes = []\n\nprint(\"BTC\")\ncost, amount = summary(btc_portfolio)\ntotal_cost += cost\nsizes.append(cost)\nshow(cost, amount)\nprint(\"-------------------\")\nprint(\"ETH\")\ncost, amount = summary(eth_portfolio)\ntotal_cost += cost\nsizes.append(cost)\nshow(cost, amount)\nprint(\"-------------------\")\nprint(\"NEAR\")\ncost, amount = summary(near_portfolio)\ntotal_cost += cost\nsizes.append(cost)\nshow(cost, amount)\nprint(\"-------------------\")\nprint(\"MINA\")\ncost, amount = summary(mina_portfolio)\ntotal_cost += cost\nsizes.append(cost)\nshow(cost, amount)\nprint(\"-------------------\")\nprint(\"GRT\")\ncost, amount = summary(grt_portfolio)\ntotal_cost += cost\nsizes.append(cost)\nshow(cost, amount)\nprint(\"-------------------\")\nprint(f\"Total Cost: {total_cost}\")\n\nlabels = (\n CURRENCIES.BTC,\n CURRENCIES.ETH,\n CURRENCIES.NEAR,\n CURRENCIES.MINA,\n CURRENCIES.GRT,\n)\nexplode = (0, 0, 0, 0, 0)\n\nfig1, ax1 = plt.subplots()\nax1.pie(\n sizes, explode=explode, labels=labels, autopct=\"%1.1f%%\", shadow=True, startangle=90\n)\nax1.axis(\"equal\")\n\nplt.show()\n","repo_name":"alperozaydin/trade-plate","sub_path":"trade_plate/tools/portfolio_tracker/crypto_pro.py","file_name":"crypto_pro.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15858782211","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\nfrom window01 import Ui_Window01\n\n\nclass Ui_MainWindow(object):\n\n # def __init__(self):\n # self.pushButton = None\n # self.textEdit = None\n # self.label = None\n # self.centralwidget = None\n # self.ui = None\n # self.window = None\n\n def openWindow(self):\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_Window01()\n edges = self.get_edges()\n if edges is not None:\n self.ui.edges = edges\n self.ui.setupUi(self.window)\n self.window.show()\n\n def get_edges(self):\n edges_text = self.textEdit.toPlainText()\n try:\n edges = int(edges_text)\n except ValueError:\n QMessageBox.critical(None, \"Invalid input\", \"Please enter a valid integer\")\n return None\n return edges\n\n # def openWindow(self):\n # self.window = QtWidgets.QMainWindow()\n # self.ui = Ui_Window01()\n # self.get_edges()\n # self.ui.setupUi(self.window)\n # self.window.show()\n\n # def get_edges(self):\n # edges = self.textEdit.toPlainText()\n # edges = int(edges)\n # # sending variables to the next Window\n # self.ui.edges = edges\n\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(550, 500)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(30, 110, 281, 71))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit.setGeometry(QtCore.QRect(299, 130, 55, 30))\n self.textEdit.setObjectName(\"textEdit\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(390, 220, 81, 31))\n self.pushButton.clicked.connect(self.openWindow) # this line to call the next window.\n font = QtGui.QFont()\n font.setPointSize(9)\n self.pushButton.setFont(font)\n self.pushButton.setObjectName(\"pushButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"How many Edges has the Graph : \"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Next\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"fahadperwani/tree_decomposition","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32766021231","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\n\nimport math\n\nSTATE_COUNT_THRESHOLD = 3\nMAX_LIGHTS_DISTANCE = 300\nMIN_LIGHTS_DISTANCE = 20\n\nclass GroundTruthBuilder(object):\n def __init__(self):\n self.red_count = 0;\n self.yellow_count = 0;\n self.green_count = 0;\n self.images_dir = \"/home/student/saved_images\"\n self.red_dir = self.images_dir + \"/red\"\n self.yellow_dir = self.images_dir + \"/yellow\"\n self.green_dir = self.images_dir + \"/green\"\n\n def save_image(self, light, image):\n img_file = None\n if light == TrafficLight.RED:\n self.red_count = self.red_count + 1\n img_file = '%s/%d.png' % (self.red_dir, self.red_count)\n elif light == TrafficLight.YELLOW:\n self.yellow_count = self.yellow_count + 1\n img_file = '%s/%d.png' % (self.yellow_dir, self.yellow_count)\n elif light == TrafficLight.GREEN:\n self.green_count = self.green_count + 1\n img_file = '%s/%d.png' % (self.green_dir, self.green_count)\n\n if img_file is not None:\n rospy.loginfo(\"img_file %s\", img_file)\n cv2.imwrite(img_file, image)\n rospy.loginfo(\"Total: %d; Red: %d; Yellow: %d; Green: %d\",\n (self.red_count + self.yellow_count + self.green_count),\n self.red_count, self.yellow_count, self.green_count)\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n self.gt_builer = GroundTruthBuilder()\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n\n light = self.get_closest_light(self.pose, self.lights)\n if light is not None:\n # rospy.loginfo('Got a light %s' % light.state)\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n self.gt_builer.save_image(light.state, cv_image)\n \n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_light(self, pose, lights):\n \"\"\"Identifies the closest traffic light, if any\n Args:\n pose (Pose): current position of the car\n lights (TrafficLights): reported lights\n\n Returns:\n Traffic light or None\n\n \"\"\"\n if pose is None:\n return None\n\n if lights is None:\n return None\n\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n # https://answers.ros.org/question/69754/quaternion-transformations-in-python/\n euler_orientation = tf.transformations.euler_from_quaternion(quaternion)\n # roll = euler_orientation[0]\n # pitch = euler_orientation[1]\n yaw = euler_orientation[2]\n\n def dist(p1, p2):\n return math.sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2)\n\n result = None\n best_distance = None\n\n for i in range(len(lights)):\n distance = dist(pose.pose.position, lights[i].pose.pose.position)\n # rospy.loginfo('Distance is %f' % distance)\n\n if best_distance is not None:\n if distance > best_distance:\n continue\n\n if (distance < MAX_LIGHTS_DISTANCE) and (distance > MIN_LIGHTS_DISTANCE):\n heading = math.atan2(\n (lights[i].pose.pose.position.y - pose.pose.position.y),\n (lights[i].pose.pose.position.x - pose.pose.position.x))\n angle = abs(yaw - heading)\n # rospy.loginfo('Angle is %f' % angle)\n if angle < math.pi / 9:\n best_distance = distance\n result = lights[i]\n\n return result\n\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n #TODO implement\n return 0\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n return self.light_classifier.get_classification(cv_image)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n light = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n car_position = self.get_closest_waypoint(self.pose.pose)\n\n #TODO find the closest visible traffic light (if one exists)\n\n if light:\n state = self.get_light_state(light)\n return light_wp, state\n self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","repo_name":"gmakarevich-aurora/CarND-Capstone","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":8657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37660495058","text":"# Delete a node from a singly-linked list, \n# given only a variable pointing to that node.\n\n# Since we don't have a reference to the head,\n# We copy the value of the next node to the \n# reference node to delete, then delete it's\n# next node.\n\n\nclass LinkedListNode:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\ndef delete_node(node_to_delete):\n # Get the input's next node, the one we want to skip to\n next_node = node_to_delete.next\n\n if next_node:\n # Replace the input node's value and pointer \n # with the next node's value and pointer. The previous\n # node now effectively skips over the input node\n node_to_delete.value = next_node.value\n node_to_delete.next = next_node.next\n\n else:\n # cannot delete last node\n raise Exception('Cannot delete last node')\n\n# There are two potential side-effects:\n\n# 1. Any references to the input node have now effectively been reassigned to its next node. \n# In our example, we \"deleted\" the node assigned to the variable b, but in actuality we just \n# gave it a new value (2) and a new next! If we had another pointer to b somewhere else in our \n# code and we were assuming it still had its old value (8), that could cause bugs.\n\n# 2. If there are pointers to the input node's original next node, those pointers now point to a \n# \"dangling\" node (a node that's no longer reachable by walking down our list). In our example \n# above, c is now dangling. If we changed c, we'd never encounter that new value by walking down \n# our list from the head to the tail.","repo_name":"DrkSephy/practice","sub_path":"ic/q22_delete_node_from_singly_linked_list.py","file_name":"q22_delete_node_from_singly_linked_list.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"6905300131","text":"from flask import Blueprint\nfrom flask import render_template\nenroll = Blueprint(\"enroll\", __name__, template_folder=\"templates\")\n\ncustomItems = [\n {\"id\": \"1\", \"label\": \"test1\", \"name\": \"test1\", \"info\": \"testInfo\"},\n {\"id\": \"1\", \"label\": \"test1\", \"name\": \"test1\", \"info\": \"testInfo\"},\n {\"id\": \"1\", \"label\": \"test1\", \"name\": \"test1\", \"info\": \"testInfo\"},\n {\"id\": \"1\", \"label\": \"test1\", \"name\": \"test1\", \"info\": \"testInfo\"},\n] #debug\n\"\"\"\n 设计的自定义内容格式,默认组件类型:text\n id: id\n label: 属性名\n name: name\n info: placeholder\n necessity: 是否必需\n\"\"\"\n\n\n@enroll.route('/')\ndef enroll_index():\n return render_template(\"signup.html\", customItems=customItems)\n","repo_name":"ChuckieEsan/uni_studio","sub_path":"studio/apps/enroll/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39940991074","text":"\"\"\"\nПрограмма разработана только для типа ЧС - пожар\n(коды 10210, 10211 - 10213, 10231 - 10234), но с\nнаписаным алгоритмом общей классификацией ЧС, по коду.\n\"\"\"\nimport os\nimport datetime\nimport json\nimport re\nimport math\n\nos.chdir(os.getcwd())\n\n\nclass Pfp:\n list_week = [\"Понеділок\", \"Вівторок\", \"Середа\", \"Четвер\", \\\n \"Пʼятниця\", \"Субота\", \"Неділя\"]\n list_year = [\"Січня\", \"Лютого\", \"Березня\", \"Квітня\", \"Травня\", \"Червня\", \\\n \"Липня\", \"Серпня\", \"Вересня\", \"Жовтня\", \"Листопада\", \"Грудня\"]\n\n def __init__(self):\n self.dt = int(datetime.datetime.strftime(datetime.datetime.now(), '%d')), \\\n self.list_year[int(datetime.datetime.strftime(datetime.datetime.now(), '%m'))-1], \\\n datetime.datetime.strftime(datetime.datetime.now(), '%Y ��оку.'), \\\n self.list_week[datetime.datetime.now().weekday()], \\\n datetime.datetime.strftime(datetime.datetime.now(), 'Час: %H:%M:%S')\n self.dialog_data = dict()\n self.address_data = dict()\n self.departament_data = dict()\n\n def write_data(self, dict_to_write):\n fr = open(\"pfp_data.json\", 'r')\n str_data = str()\n str_end = str()\n for line in fr:\n str_data += line[0: len(line) - 1]\n str_end = line[len(line) - 1:]\n str_data += str_end\n dict_data = json.loads(str_data)\n list_data = [\"dialog_data\", \"address_data\", \"departament_data\"]\n self.dialog_data = dict_data.get(list_data[0])\n self.address_data = dict_data.get(list_data[1])\n self.departament_data = dict_data.get(list_data[2])\n fr.close()\n if dict_to_write.get(list_data[0]) != None:\n self.dialog_data.update(dict_to_write.get(list_data[0]))\n elif dict_to_write.get(list_data[1]) != None:\n self.address_data.update(dict_to_write.get(list_data[1]))\n elif dict_to_write.get(list_data[2]) != None:\n self.departament_data.update(dict_to_write.get(list_data[2]))\n str_new = '{\\n\\t\"' + str(list_data[0]) + '\": {\\n'\n for key in self.dialog_data:\n if str(self.dialog_data.get(key))[0] == \"[\":\n str_new += '\\t\\t\"' + key + '\": ' + str(self.dialog_data.get(key)).replace(\"'\",'\"') + ',\\n'\n else:\n str_new += '\\t\\t\"' + key + '\": ' + '\"' + str(self.dialog_data.get(key)).replace(\"'\",'\"') + '\",\\n'\n str_new_end = str_new[0:len(str_new) - 2]\n str_new = str_new_end + \"\\n\\t},\\n\"\n str_new += '\\t\"' + str(list_data[1]) + '\": {\\n'\n for key in self.address_data:\n str_new += '\\t\\t\"' + key + '\": ' + str(self.address_data.get(key)).replace(\"'\", '\"') + ',\\n'\n str_new_end = str_new[0:len(str_new) - 2]\n str_new = str_new_end + \"\\n\\t},\\n\"\n str_new += '\\t\"' + str(list_data[2]) + '\": {\\n'\n for key in self.departament_data:\n if str(self.departament_data.get(key))[0] == \"[\":\n str_new += '\\t\\t\"' + key + '\": ' + str(self.departament_data.get(key)).replace(\"'\", '\"') + ',\\n'\n else:\n str_new += '\\t\\t\"' + key + '\": ' + '\"' + str(self.departament_data.get(key)).replace(\"'\", '\"') + '\",\\n'\n str_new_end = str_new[0:len(str_new) - 2]\n str_new = str_new_end + \"\\n\\t}\\n}\\n\"\n\n fw = open(\"pfp_data.json\", 'w')\n fw.write(str_new)\n fw.close()\n\n\n def load_data(self):\n f = open(\"pfp_data.json\", 'r')\n str_data = str()\n str_end = str()\n for line in f:\n str_data += line[0: len(line) - 1]\n str_end = line[len(line) - 1:]\n str_data += str_end\n f.close()\n dict_data = json.loads(str_data)\n self.dialog_data = dict_data.get(\"dialog_data\")\n self.address_data = dict_data.get(\"address_data\")\n self.departament_data = dict_data.get(\"departament_data\")\n\n def get_call_data(self):\n print(\"{} {} {} {}. {}\\n\" \\\n .format(self.dt[0], self.dt[1], self.dt[2], self.dt[3], self.dt[4]))\n print(self.dialog_data.get(\"001\"))\n e_type_res = self.list_dialog(self.dialog_data.get(\"002\"), 0)\n if e_type_res == 0:\n return print(self.dialog_data.get(\"000\"))\n print(self.dialog_data.get(\"003\"))\n e_nature_res = self.list_dialog(self.dialog_data.get(\"004\"), 0)\n if e_nature_res == 0:\n return print(self.dialog_data.get(\"000\"))\n print(self.dialog_data.get(\"005\"))\n e_level_res = self.list_dialog(self.dialog_data.get(\"006\"), 0)\n if e_level_res == 0:\n return print(self.dialog_data.get(\"000\"))\n\n list_em = [e_type_res, e_nature_res, e_level_res]\n return list_em\n\n @staticmethod\n def list_dialog(description, param):\n inp = str()\n while inp != 0:\n for i in range(len(description) - 1):\n print(\"{}. {};\".format(i + 1, description[i]))\n print(\"{}. {}\".format(len(description), description[len(description) - 1]))\n err = 'Не вірна цифра! Або введіть цифру повторно, або \"0\" для завершення програми.'\n try:\n inp = int(input(\"Введіть цифру: \"))\n if inp == 0:\n return 0\n elif param == 0:\n return description[inp - 1]\n elif param == 1:\n s = len(description[inp - 1]) - 6\n s_1 = len(description[inp - 1]) - 1\n return description[inp - 1][s: s_1]\n except IndexError:\n print(err)\n except ValueError:\n print(err)\n\n\nclass Emergency:\n \"\"\"\n Содержит: тип ЧС (пожар, наводнение...),\n характер (природный или техногенный),\n уровень (международный, государственный,\n региональный, местный, объектовый)\n \"\"\"\n def __init__(self, e_em, e_code = 0):\n self.e_type = e_em[0]\n self.e_nature = e_em[1]\n self.e_level = e_em[2]\n self.em_address = str()\n self.e_code = e_code\n self.dep_data = list()\n\n\n def emergency_info(self, em_address):\n self.em_address = em_address\n print(\"\\n{}, {} характеру, {} рівня\" \\\n .format(self.e_type, str.lower(self.e_nature), str.lower(self.e_level)))\n print(\"\\nЧС трапилось за адресою: {}\".format(self.em_address))\n\n def emergency_navigate(self, cl_name):\n d = EmergencyNavigation(self.em_address, \\\n cl_name.address_data.get(self.em_address)[0], \\\n cl_name.address_data.get(self.em_address)[1], \\\n cl_name.address_data.get(self.em_address)[2], \\\n cl_name)\n self.dep_data = d.minimal_distance()\n print()\n print(\"Відстань до найближчої частини: \", self.dep_data[0], \\\n \"км.\\nАдреса частини:\", self.dep_data[1])\n\n def get_emergency_code(self):\n \"\"\"\n По данным пользователя устанавливается характер и уровень,\n и формируется код ЧС.\n \"\"\"\n dict_group = dict()\n dict_subclass = dict()\n dict_class = dict()\n dict_em = dict()\n list_subclass = list()\n list_class = list()\n list_em = list()\n print(\"\\nФормуємо код ЧС:\")\n\n for key in self.dict_emergency:\n if int(key) % 10000 == 0:\n dict_group.update({key: self.dict_emergency.get(key)})\n list_group = [(self.dict_emergency.pop(key) + \" (код: \" + key + \")\") for key in dict_group]\n str_code_inp = Pfp.list_dialog(list_group, 1)\n\n for key in self.dict_emergency:\n if int(key) % 100 == 0:\n dict_subclass.update({key: self.dict_emergency.get(key)})\n for key in dict_subclass:\n if (int(key) >= int(str_code_inp)) and (int(key) < int(str_code_inp) + 10000):\n list_subclass.append(self.dict_emergency.pop(key) + \" (код: \" + key + \")\")\n str_code_inp = Pfp.list_dialog(list_subclass, 1)\n\n for key in self.dict_emergency:\n if int(key) % 10 == 0:\n dict_class.update({key: self.dict_emergency.get(key)})\n for key in dict_class:\n if (int(key) >= int(str_code_inp)) and (int(key) < int(str_code_inp) + 100):\n list_class.append(self.dict_emergency.pop(key) + \" (код: \" + key + \")\")\n str_code_inp = Pfp.list_dialog(list_class, 1)\n\n for key in self.dict_emergency:\n dict_em.update({key: self.dict_emergency.get(key)})\n for key in dict_em:\n if (int(key) >= int(str_code_inp)) and (int(key) < int(str_code_inp) + 10):\n list_em.append(self.dict_emergency.pop(key) + \" (код: \" + key + \")\")\n if len(list_em) > 0:\n str_code_inp = Pfp.list_dialog(list_em, 1)\n\n self.e_code = int(str_code_inp)\n return int(self.e_code)\n\n def read_emergency_classifier(self):\n \"\"\"\n Читает из файла форматированный текст классификатора ЧС.\n Возвращает словарь\n \"\"\"\n f = open(\"Emergency_classifier.txt\", 'r')\n str_end = str()\n list_data = list()\n dict_data = dict()\n for line in f:\n list_data.append(line[0: len(line) - 1])\n str_end = line[len(line) - 1:]\n end = list_data[len(list_data) - 1]\n list_data[len(list_data) - 1] = end + str_end\n f.close()\n count = 0\n iter_data = iter(list_data)\n for i in iter_data:\n if i[0:7] == '-------':\n count += 1\n if len(i) > 0\\\n and i[0] == '|'\\\n and str.isdigit(i[1:6])\\\n and count < 2:\n last_key = i[1:6]\n last_val = re.sub(\" +\", \" \", i[7:65])\n if last_val[len(last_val) - 1] == \" \":\n val_1 = last_val[0:len(last_val) - 1]\n last_val = val_1\n dict_data.update({last_key:last_val})\n i = next(iter_data)\n try:\n while i[6] != '+':\n new_val = \"\\n\" + re.sub(\" +\", \" \", i[7:65])\n if new_val[len(new_val) - 1] == \" \":\n val_2 = new_val[0:len(new_val) - 1]\n new_val = val_2\n if new_val[1:3] != '--':\n dict_data.update({last_key: last_val + new_val})\n last_val = last_val + new_val\n i = next(iter_data)\n except IndexError:\n continue\n self.dict_emergency = dict_data\n # print(self.dict_emergency)\n\n\nclass EmergencyNavigation:\n \"\"\"\n Содержит связку адресов объектов с их\n координатами, и выполняет доп. ф-ции с\n геометрическими расчетами по навигации.\n \"\"\"\n def __init__(self, address, storeys, latitude, longitude, cl_name):\n self.address = address\n self.storeys = storeys\n self.latitude = latitude\n self.longitude = longitude\n self.cl_name = cl_name\n self.dep_min = str()\n self.d_min = 6371.0\n\n def minimal_distance(self):\n for key in self.cl_name.departament_data:\n t = (self.latitude, self.longitude, \\\n self.cl_name.departament_data.get(key)[0], \\\n self.cl_name.departament_data.get(key)[1])\n if self.distance(t) < self.d_min and \\\n self.storeys <= 5 and \\\n self.cl_name.departament_data.get(key)[4] > 0:\n self.d_min = round(self.distance(t), 3)\n self.dep_min = key\n elif self.distance(t) < self.d_min and \\\n self.storeys > 5 and \\\n self.cl_name.departament_data.get(key)[5] > 0:\n self.d_min = round(self.distance(t), 3)\n self.dep_min = key\n return [self.d_min, self.dep_min]\n\n @staticmethod\n def distance(tuple_place):\n float_R = 6371\n float_fi_a, float_lambda_a, float_fi_b, float_lambda_b = tuple_place\n\n def func_d(float_fi_a, float_lambda_a, float_fi_b, float_lambda_b):\n d = math.acos(math.sin(math.radians(float_fi_a)) *\n math.sin(math.radians(float_fi_b)) +\n math.cos(math.radians(float_fi_a)) *\n math.cos(math.radians(float_fi_b)) *\n math.cos(math.radians(float_lambda_a - float_lambda_b)))\n return d\n return float_R * func_d(float_fi_a, float_lambda_a, float_fi_b, float_lambda_b)\n\n\nclass Probability:\n \"\"\"\n Содержит статистику пожаров. Пишет ее в отдельный файл.\n \"\"\"\n def __init__(self, p_name_control, p_name_em):\n self.p_date = p_name_control.dt\n self.p_data = [p_name_em.em_address, p_name_em.e_code, \\\n p_name_em.dep_data]\n\n def p_to_file(self):\n\n str_data = (\"{} {} {} {}. {} -- \" \\\n .format(self.p_date[0], self.p_date[1], self.p_date[2], \\\n self.p_date[3], self.p_date[4])) + \\\n (\"{} (код ЧС: {}) -- \".format(self.p_data[0], self.p_data[1]) + \\\n (\"Адреса частини МНС: {} -- \".format(self.p_data[2][1])) + \\\n (\"Відстань: {} км.\\n\".format(self.p_data[2][0])))\n fp = open(\"prlt_data.txt\", 'a')\n fp.write(str_data)\n fp.close()\n print(\"\\nДані було записано до файлу!\")\n\n\n\na = Pfp() # Создаем экземпляр в классе управления\na.load_data() # Загружаем из файла данные для работы программы\nprint()\nem = a.get_call_data() # Получаем вводом параметры для ЧС\na_1 = Emergency(em) # Создаем экземпляр ЧС по полученным параметрам\n# Вводим адрес ЧС\n# a_1.emergency_info(\"Проспект Дмитра Яворницького, 10\")\n# a_1.emergency_info(\"Проспект Дмитра Яворницького, 8\")\n# a_1.emergency_info(\"Проспект Дмитра Яворницького, 36\")\na_1.emergency_info(\"вулиця Олени Ган, 26\")\na_1.emergency_navigate(a) # Получаем из управляющего класса \"а\" данные по введенному адресу,\n # и определяем ближайшую пожарную часть\na_1.read_emergency_classifier() # Загружаем из файла данные для классификации ЧС\na_1.get_emergency_code() # Формируем вводом код ЧС\na_2 = Probability(a, a_1)\na_2.p_to_file()\n\n\n\n","repo_name":"AlexS1981/Project_Fire_Probability","sub_path":"Project_Fire_Probability.py","file_name":"Project_Fire_Probability.py","file_ext":"py","file_size_in_byte":15527,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5120038591","text":"def summa(a, b):\n # \"\"\"\n # >>> summa(3, 4)\n # 7\n # >>> summa(0, 4)\n # 4\n # >>> summa(-4, 4)\n # 0\n # >>> summa('A', 'b')\n # 'Ab'\n # >>> summa(4, None)\n # Not supported type\n # >>> summa(1, 0)\n # 1\n # \"\"\"\n try:\n return a + b\n except TypeError:\n print('Not supported type')\n except Exception:\n print('Something is wrong')\n return None\n\n\ndef mines(a, b):\n # \"\"\"\n # >>> mines(5, 4)\n # 1\n # >>> mines(0, 4)\n # -4\n # \"\"\"\n return a - b\n\n\n# if __name__ == \"__main__\":\n# import doctest\n# doctest.testmod()\n\n# if __name__ == \"__main__\":\n# import doctest\n# doctest.testfile('1.txt')\n","repo_name":"ArefaEvgeniy/OPT","sub_path":"Lesson_15/task_05.py","file_name":"task_05.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6513154433","text":"import telebot\nimport random\nimport requests\n\nbot=telebot.TeleBot('1756508816:AAHk_prvWoVNkNW7-0GHDDw0CIyvyRk2dXo')\n@bot.message_handler(commands=['chance'])\ndef handle_start(message):\n print(message.text)\n msg='шанс того, что'\n msg+=message.text.replace('/chance','')\n msg+=' равен '+str(random.randint(0,100))\n bot.send_message(message.chat.id,msg)\n\n@bot.message_handler(commands=['lyrics'])\ndef handle_start(message):\n\n url = \"https://genius.p.rapidapi.com\"\n\n querystring = {\"q\":message.text.replace('/lyrics','')}\n\n headers = {\n 'x-rapidapi-key': \"b749dd581bmshaad3442a3b1153bp15a97ajsn12dd06f55432\",\n 'x-rapidapi-host': \"genius.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url+\"/search\", headers=headers, params=querystring)\n print(response.json()['response']['hits'][0]['result']['id'])\n txt = requests.get(\"https://genius.com/songs/\"+str(response.json()['response']['hits'][0]['result']['id']), headers=headers)\n response = requests.get(\"https://www.youtube.com/results?search_query=\"+message.text.replace('/lyrics','').replace(' ','+'))\n video = response.text.split('\"watchEndpoint\":{\"videoId\":\"')[1].split('\"')[0]\n\n bot.send_message(message.chat.id,txt.text.split(\"

\")[1].split(\"

\")[0].replace(\"
\",'')+'\\nhttps://www.youtube.com/watch?v='+video)\nbot.polling()","repo_name":"yungpipp/aboba","sub_path":"tg.py","file_name":"tg.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20555583897","text":"import os\n\n\nAPP_NAME = 'tigerhost-deploy'\n\nPROJECT_REMOTE = 'git@github.com:naphatkrit/TigerHost.git'\n\nDOMAIN_NAME = 'tigerhostapp.com'\n\nDEBUG = bool(os.environ.get('DEBUG', False))\n\nDEISCTL_INSTALL_URL = 'http://deis.io/deisctl/install.sh'\n\nDEIS_INSTALL_URL = 'http://deis.io/deis-cli/install.sh'\n\nADDONS_COMPOSE_PROJECT_NAME = 'addons'\n\nMAIN_COMPOSE_PROJECT_NAME = 'tigerhost'\n","repo_name":"naphatkrit/TigerHost","sub_path":"deploy/deploy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"1550849562","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\nfrom random import randint\n\nfrom enum import Enum\n\nimport pygame\nimport time\nimport serial\nimport math\nimport sys\nimport codecs\n\nimport threading\nimport os\n\nfrom MK1 import *\nfrom mavlink_faking import *\nfrom database import *\n\n# define number\nlistrange = 63\nY_min = -110\nY_max = 110\nY1_min = 110\nY1_max = -110\nippppp = 0\ntimergetdata=200\n\nclass mode(Enum):\n auto = 1\n manual = 2\n debug = 3\n stop = 0\n\njoystick_no = []\njoystick_choice = 0\nchoiceJoystickstatus = False\nJoystickconect = False\nNameCOM = []\nbutton=[0,0,0,0,0]\naxis=[0,0,0]\ndatasendalpha=[]\ndataread=[]\ntransmit=[]\ncomconnect = False\ncontrol = mode.stop\nlastcontrol = mode.stop\nglobaldebug=[0,0]\n\nclass GUI_Init(Ui_MK1,messageProcess,processData):\n def __init__(self, ID, name):\n super(GUI_Init,self).__init__()\n self.setupUi()\n self.setupUi1()\n\n def setupUi1(self):\n # event device combo box\n self.device.currentIndexChanged.connect(self.choiceJoystick)\n self.device.popupAboutToBeShown.connect(self.getjoystick)\n\n # event connect button\n self.connect.clicked.connect(self.connectbtn)\n\n #event send button\n self.send.clicked.connect(self.senddata)\n\n # event auto/manual/stop button\n self.auto_2.clicked.connect(self.auto_mode)\n self.stop.clicked.connect(self.stop_mode)\n self.manual.clicked.connect(self.manual_mode)\n self.debug.clicked.connect(self.debug_mode)\n\n # event pusshpid button\n self.pushpid.clicked.connect(self.sendpid)\n self.pushpid_1.clicked.connect(self.sendpid)\n self.pushpid_2.clicked.connect(self.sendpid)\n\n # event change value debug\n self.rightspeed.valueChanged.connect(self.changevalue)\n self.leftspeed.valueChanged.connect(self.changevalue)\n\n # event Enter for textbox \"testsend\"\n self.testsend.installEventFilter(self)\n\n # addplot\n # xaxis\n self.xaxis.setBackground('w')\n self.xaxis.showGrid(x=True, y=True)\n self.xaxis.setYRange(Y_min, Y_max, padding=0)\n self.xaxis.setMouseEnabled(x=False, y=False)\n # yaxis\n self.yaxis.setBackground('w')\n self.yaxis.showGrid(x=True, y=True)\n self.yaxis.setYRange(Y1_min, Y1_max, padding=0)\n self.yaxis.setMouseEnabled(x=False, y=False)\n # xaxis_1\n self.xaxis_1.setBackground('w')\n self.xaxis_1.showGrid(x=True, y=True)\n self.xaxis_1.setYRange(Y_min, Y_max, padding=0)\n self.xaxis_1.setMouseEnabled(x=False, y=False)\n # yaxis_1\n self.yaxis_1.setBackground('w')\n self.yaxis_1.showGrid(x=True, y=True)\n self.yaxis_1.setYRange(Y_min, Y_max, padding=0)\n self.yaxis_1.setMouseEnabled(x=False, y=False)\n # general\n self.general.setBackground('w')\n self.general.showGrid(x=True, y=True)\n self.general.setYRange(Y_min, Y_max, padding=0)\n self.general.setMouseEnabled(x=False, y=False)\n # general\n self.general_2.setBackground('w')\n self.general_2.showGrid(x=True, y=True)\n self.general_2.setYRange(Y_min, Y_max, padding=0)\n self.general_2.setMouseEnabled(x=False, y=False)\n\n #updateGUItimer\n self.x = list(range(listrange))\n self.y1 = list(range(listrange))\n self.y2 = list(range(listrange))\n self.y3 = list(range(listrange))\n self.y4 = list(range(listrange))\n self.timer=QtCore.QTimer()\n self.timer.setInterval(100)\n self.timer.timeout.connect(self.refreshUI)\n self.timer.start()\n pen = pg.mkPen(color=(0, 0, 0))\n self.data_line1=self.xaxis.plot(self.x, self.y1, pen=pen)\n self.data_line2=self.yaxis.plot(self.x, self.y2, pen=pen)\n #timergetdata\n self.timergetdata=QtCore.QTimer()\n self.timergetdata.setInterval(timergetdata)\n self.timergetdata.timeout.connect(self.getdata)\n self.timergetdata.start()\n\n # startup\n self.stop.setEnabled(False)\n self.connect.setStyleSheet('QPushButton {color: green;}')\n self.connect.setText('CONNECT')\n # self.main_process()\n\n # event handle\n def eventFilter(self,obj,event):\n if event.type()==QtCore.QEvent.KeyPress and obj is self.testsend:\n if event.key()==QtCore.Qt.Key_Return and self.testsend.hasFocus():\n self.senddata()\n return super().eventFilter(obj,event)\n\n def refreshUI(self):\n global datasendalpha\n if(control==mode.manual or control==mode.debug):\n self.x = self.x[1:]\n self.x.append(self.x[-1] + 1)\n self.y1= self.y1[1:]\n self.y2= self.y2[1:]\n if(control==mode.manual):\n self.y1.append(axis[0])\n self.y2.append(axis[1])\n elif(control==mode.debug):\n self.y1.append(globaldebug[0])\n self.y2.append(globaldebug[1])\n self.data_line1.setData(self.x, self.y1)\n self.data_line2.setData(self.x, self.y2)\n elif(control==mode.stop):\n datasendalpha=\"STOP\"\n else:\n datasendalpha=\"AUTO\"\n if(comconnect==True and (control==mode.stop or control==mode.auto)):\n # self.transmit.write(datasendalpha.encode())\n # print(datasendalpha)\n pass\n\n def connectbtn(self):\n global comconnect,transmit\n NameCOM = self.COM.currentText()\n try:\n if(comconnect == False):\n #for windows\n self.transmit = serial.Serial(NameCOM, 115200, timeout=2.5)\n #for ubuntu\n # self.transmit = serial.Serial(\"/dev/pts/5\",115200,timeout=2.5)\n self.COM.setEnabled(False)\n self.connect.setText('DISCONNECT')\n self.connect.setStyleSheet('QPushButton {color: red;}')\n self.re_se_data.append('Serial port ' + NameCOM + ' opened')\n comconnect = True\n else:\n self.COM.setEnabled(True)\n self.transmit.close()\n self.connect.setText('CONNECT')\n self.connect.setStyleSheet('QPushButton {color: green;}')\n self.re_se_data.append('Serial port ' + NameCOM + ' closed')\n comconnect = False\n except IOError:\n data=\"\"\n data+=('Serial port ' + NameCOM);\n if(comconnect == False):\n data+=\" opening \"\n else:\n data+=\" closing \"\n data+=\"error <\\span>\"\n self.re_se_data.append(data)\n\n def manual_mode(self):\n global control \n self.controlmode_label.setText(\"Manual\")\n control = mode.manual\n self.timer.setInterval(16)\n self.controlmode()\n\n def auto_mode(self):\n global control\n self.controlmode_label.setText(\"Auto\")\n control = mode.auto\n self.timer.setInterval(100)\n self.controlmode()\n\n def stop_mode(self):\n global control\n self.controlmode_label.setText(\"Stop\")\n control = mode.stop\n self.timer.setInterval(100)\n self.controlmode()\n\n def debug_mode(self):\n global control\n self.controlmode_label.setText(\"Debug\")\n control = mode.debug\n self.timer.setInterval(16)\n self.controlmode()\n\n def choiceJoystick(self):\n global joystick_choice,choiceJoystickstatus,Joystickconect\n print(self.device.currentIndex())\n choiceJoystickstatus=True\n Joystickconect=True\n\n def getjoystick(self):\n global joystick_no\n num_joy = pygame.joystick.get_count()\n if (num_joy > 0):\n self.device.clear()\n for x in range(num_joy):\n joystick_no = pygame.joystick.Joystick(x)\n joystick_no.init()\n self.device.addItem(joystick_no.get_name())\n self.ID_device.setText(str(joystick_no.get_id()))\n\n def controlmode(self):\n global control\n if (control == mode.stop):\n self.stop.setEnabled(False)\n self.auto_2.setEnabled(True)\n self.manual.setEnabled(True)\n self.debug.setEnabled(True)\n elif (control == mode.manual):\n self.stop.setEnabled(True)\n self.auto_2.setEnabled(True)\n self.manual.setEnabled(False)\n self.debug.setEnabled(True)\n elif(control == mode.debug):\n self.stop.setEnabled(True)\n self.auto_2.setEnabled(True)\n self.manual.setEnabled(True)\n self.debug.setEnabled(False)\n else:\n self.stop.setEnabled(True)\n self.auto_2.setEnabled(False)\n self.manual.setEnabled(True)\n self.debug.setEnabled(True)\n\n def changevalue(self):\n global globaldebug\n globaldebug[0]=int(self.leftspeed.value())\n globaldebug[1]=int(self.rightspeed.value())\n\n def sendpid(self):\n pass\n\n def senddata(self):\n displaydata=\"\"\n maindata=self.testsend.text()\n displaydata+=maindata\n displaydata+=(\"\")\n self.re_se_data.append(displaydata)\n self.testsend.clear()\n if comconnect==True:\n maindata+=\"\\n\"\n self.transmit.write(maindata.encode())\n else:\n self.re_se_data.append(\" Comms isn't connect \")\n\n def getdata(self):\n global dataread,transmit\n bytetoread=[]\n if comconnect == True:\n bytetoread=self.transmit.inWaiting()\n if bytetoread > 0:\n # maindata=str(self.transmit.read(bytetoread),'utf-8')\n rawdata=self.transmit.read(bytetoread)\n str_data=str(rawdata)\n str_data=str_data.replace(\"\\'\",\"\")\n str_data=str_data.replace(\"b\",\"\")\n self.messageDecoder(rawdata,len(rawdata))\n displaydata=\"\"\n displaydata+=str_data\n displaydata+=(\"\")\n self.re_se_data.append(displaydata)\n # print(database_t.velocity.velocity1)\n # print('Hello mother facker')\n\nclass backgroundProcess():\n def __init__(self, ID, name):\n super(backgroundProcess).__init__()\n self.getdatafromJoystick()\n\n def getdatafromJoystick(self):\n global control,axis,button,datasendalpha,choiceJoystickstatus,joystick_no,joystick_choice,Joystickconect\n cache = []\n datasend=[]\n while(1):\n cachedic=0\n if(choiceJoystickstatus==True):\n choiceJoystickstatus=False\n joystick_no=pygame.joystick.Joystick(joystick_choice)\n joystick_no.init()\n pygame.event.pump()\n if((control==mode.manual and Joystickconect==True) or control==mode.debug):\n if(control==mode.manual):\n x=round(joystick_no.get_axis(0)*100,0)\n y=-round(joystick_no.get_axis(1)*100,0)\n if(x!=0):\n alpha=math.atan(y/x)\n alpha*=(180/3.14)\n else:\n if(y>=0):\n alpha=90\n elif(y<0):\n alpha=-90\n\n if(abs(alpha)>60):\n axis[0]=y\n axis[1]=y\n elif(abs(alpha)<=30):\n axis[1]=x\n axis[0]=-x\n else:\n if(x>0):\n axis[1]=y\n axis[0]=y*abs(alpha/70)\n else:\n axis[1]=y*abs(alpha/70)\n axis[0]=y\n for i in range(5):\n button[i]=joystick_no.get_button(i)\n else:\n axis[0]=globaldebug[0]\n axis[1]=globaldebug[1]\n\n for i in range(2):\n if(axis[i]<0 or axis[i]==-0):\n cacheaxis=abs(axis[i])\n if (i==0):\n cachedic+=1\n else:\n cachedic+=2\n else:\n cacheaxis=axis[i]\n\n if(cacheaxis<10):\n cache+=\"00\"\n elif (cacheaxis<100):\n cache+=\"0\"\n\n if(i==0):\n cache+=str(int(cacheaxis))\n elif(i==1):\n cache+=str(int(cacheaxis))\n cache+=str(int(cachedic))\n\n # cache+=\".\"\n cache+=str(len(cache))\n cache+=\"]\"\n datasend=''.join(cache)\n print(\"{}\".format(datasend))\n datasendalpha=datasend\n #transmit.write(datasend.encode())\n cache=[]\n cache+=\"[\"\n datasend=[]\n time.sleep(0.016)\n\ndef UIbuild():\n app = QtWidgets.QApplication(sys.argv)\n GUI = GUI_Init(1,\"GUI_builder\")\n GUI.show()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n pygame.display.init()\n pygame.joystick.init()\n UIbuild()","repo_name":"nhatmicls/ManualControl","sub_path":"GUI/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":13580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14594622091","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\n\n# 图像预处理层,先用numpy载入初始化的30个SRM核的数据\nSRM_npy = np.load('./file/kernel.npy')\n\n\n# 图像预处理层的类\nclass SRM_conv2d(nn.Module):\n def __init__(self, stride=1, padding=2):\n super(SRM_conv2d, self).__init__()\n # 输入1层(图像)\n self.in_channels = 3\n # 输出30层,因为是有30个卷积核,分别进行计算得到30个\n self.out_channels = 30\n # 设置卷积核大小\n self.kernel_size = (5, 5)\n # 设置步长\n if isinstance(stride, int):\n self.stride = (stride, stride)\n else:\n self.stride = stride\n # 设置padding\n if isinstance(padding, int):\n self.padding = (padding, padding)\n else:\n self.padding = padding\n # 卷积膨胀\n self.dilation = (1,1)\n # 转置\n self.transpose = False\n # padding\n self.output_padding = (0,)\n # 分组,默认设置成1组\n self.groups = 1\n # 设置预处理层卷积核权值为30个5*5的Tensor,此时只是设置,并没有初始化\n self.weight = Parameter(torch.Tensor(30, 3, 5, 5),requires_grad=True)\n # 设置预处理层卷积核偏置为30个Tensor,此时只是设置,并没有初始化\n self.bias = Parameter(torch.Tensor(30),requires_grad=True)\n # 重置上面值的大小\n self.reset_parameters()\n\n def reset_parameters(self):\n # 将上面加载的SRM核,载入到self.weight中\n self.weight.data.numpy()[:] = SRM_npy\n # 默认设置偏置为0\n self.bias.data.zero_()\n\n def forward(self, input):\n # 前向计算\n return F.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n\n# 卷积模块\nclass ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, with_bn=False):\n super(ConvBlock, self).__init__()\n # 卷积运算\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1)\n # relu激活函数\n self.relu = nn.ReLU()\n # 传递启用BN层的参数\n self.with_bn = with_bn\n # 如果启用BN层参数开启\n if with_bn:\n # BN计算\n self.norm = nn.BatchNorm2d(out_channels)\n else:\n # 若未开启,则不进行BN层计算,直接传递运算结果\n self.norm = lambda x: x\n self.reset_parameters()\n\n def forward(self, x):\n # 前向���算\n return self.norm(self.relu(self.conv(x)))\n\n def reset_parameters(self):\n # 卷积模块权值初始化\n nn.init.xavier_uniform_(self.conv.weight)\n # 偏置初始化\n self.conv.bias.data.fill_(0.2)\n\n\n# YeNet总体\nclass YeNet(nn.Module):\n # 构造网络\n def __init__(self):\n super(YeNet, self).__init__()\n # 图像预处理层\n self.preprocessing = SRM_conv2d()\n # 卷积模块\n self.block2 = ConvBlock(30, 30, 3)\n self.block3 = ConvBlock(30, 30, 3)\n self.block4 = ConvBlock(30, 30, 3)\n # pooling\n self.pool1 = nn.AvgPool2d(2, 2)\n self.block5 = ConvBlock(30, 32, 3,)\n self.pool2 = nn.AvgPool2d(2, 2)\n self.block6 = ConvBlock(32, 32, 3,)\n self.pool3 = nn.AvgPool2d(2, 2)\n self.block7 = ConvBlock(32, 32, 3,)\n self.pool4 = nn.AvgPool2d(2, 2)\n self.block8 = ConvBlock(32, 16, 3)\n self.block9 = ConvBlock(16, 16, 3)\n # 线性激活层\n self.ip1 = nn.Linear(4 * 4 * 16, 2)\n # 根据条件重置参数\n self.reset_parameters()\n\n # 前向计算\n def forward(self, x):\n # 转换成float\n x = x.float()\n # 预处理\n x = self.preprocessing(x)\n x = F.relu(x)\n # 卷积运算\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n # pooling\n x = self.pool1(x)\n # print(np.shape(x))\n x = self.block5(x)\n x = self.pool2(x)\n # print(np.shape(x))\n x = self.block6(x)\n # x = self.pool3(x)\n x = self.block7(x)\n x = self.pool4(x)\n # print(np.shape(x))\n x = self.block8(x)\n x = self.block9(x)\n # print(np.shape(x))\n # 维度转换\n x = x.view(x.size(0), -1)\n # 全连接层\n x = self.ip1(x)\n return x\n\n def reset_parameters(self):\n for mod in self.modules():\n # 卷积层重置参数,这个根据卷积层,图像预处理层\n if isinstance(mod, SRM_conv2d) or isinstance(mod, ConvBlock):\n mod.reset_parameters()\n # 线性激活层\n elif isinstance(mod, nn.Linear):\n # 权值初始化\n nn.init.normal_(mod.weight, 0., 0.01)\n # 偏置初始化\n mod.bias.data.zero_()\n\ndef init():\n return YeNet()","repo_name":"qzf136/CNN-adversary","sub_path":"YeNet.py","file_name":"YeNet.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"74716897608","text":"import argparse\nimport os\nimport sys\nimport configparser\nimport glob\nimport requests\n\nclass Vibranium:\n\n def __init__(self):\n\n # Set WACCANDA endpoint\n self.api = 'http://localhost:3000/api/'\n\n # Check if $WACC_HOME is set\n try:\n os.environ['WACC_HOME']\n except KeyError:\n print(\"ERROR: $WACC_HOME environment variable not set! Is the WACC compiler installed correctly?\")\n exit(1)\n\n # Then commence business as usual\n parser = argparse.ArgumentParser(description='The Vibranium package manager for the WACC language.',\n usage='''vibranium []\n \nValid commands:\ninit set up a Vibranium project\ninstall install a package\nremove remove a package\ncompile compile a project''')\n parser.add_argument('command', metavar='command', type=str, help='the command you wish to run.')\n \n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unknown command!')\n parser.print_help()\n exit(1)\n\n # Dispatch command\n getattr(self, args.command)()\n\n def init(self):\n \"\"\" Initialise folder structure and config file\"\"\"\n try:\n os.mkdir('.installed_packages')\n except FileExistsError:\n print(\"WARNING: It seems that the package directory has already been initialised!\")\n \n # Create installed packages directory\n if not os.path.exists('.installed_packages/package.directory'):\n with open('.installed_packages/package.directory', 'w+') as f:\n f.write('')\n f.close()\n\n # Create main file\n if not os.path.exists('main.wacc'):\n with open('main.wacc', 'w+') as f:\n f.write('begin\\nskip\\nend')\n f.close()\n\n # Configure config\n cp = configparser.ConfigParser()\n cp['SETTINGS'] = {'entrypoint': 'main.wacc',\n 'output_dir': 'out'}\n cp['DEPENDENCIES'] = {}\n with open('vibranium.config', 'w+') as f:\n cp.write(f)\n f.close()\n\n print(\"Initialisation successful!\")\n\n def install(self):\n \"\"\" Installs a package \"\"\"\n parser = argparse.ArgumentParser(description='Installs packages')\n parser.add_argument('package', metavar='package', help='package name. leave empty to install all dependencies.', nargs='*')\n parser.add_argument('--save', '-s', default=False, action='store_true', \n help='should the package be saved to the requirements?')\n args = parser.parse_args(sys.argv[2:])\n packages = args.package\n\n # Install requirements\n if len(packages) == 0:\n cp = configparser.ConfigParser()\n cp.read('vibranium.config')\n deps = list(cp['DEPENDENCIES'].items())\n packages = ['{}=={}'.format(p,v) for (p,v) in deps]\n\n # Otherwise, business as usual\n print('Installing ' + ', '.join(packages))\n # Open package directory\n if not os.path.exists('.installed_packages/package.directory'):\n print(\"ERROR: Cannot find package directory! Has the project been initalised properly?\")\n exit(1)\n cp = configparser.ConfigParser()\n cp.read('.installed_packages/package.directory')\n \n # Check if we already have the package\n for package in packages:\n version = 'latest'\n if '==' in package:\n version = package.split('==')[1]\n package = package.split('==')[0]\n if package in cp.keys():\n p = cp[package]\n if (p['version'] == version):\n print('Package already present!')\n exit(0)\n \n # Download the package!\n url = self.api + 'install/{}/{}'.format(package, version)\n r = requests.post(url)\n\n # Check if found\n if r.content == b'missing':\n print('ERROR: package \"{}\" has gone missing on our server!'.format(package))\n exit(1)\n \n if r.content == b'not found':\n print('ERROR: package \"{}=={}\" couldn\\'t be found!'.format(package, version))\n exit(1)\n\n # Write to packages\n dpath = '.installed_packages/{}.wacc'.format(package)\n with open(dpath, 'wb+') as f:\n f.write(r.content)\n f.close()\n\n # Update package directory\n cp[package] = {\n 'version': 'latest',\n 'path': dpath\n }\n with open('.installed_packages/package.directory', 'w+') as f:\n cp.write(f)\n f.close()\n if args.save: \n cp = configparser.ConfigParser()\n cp.read('vibranium.config')\n cp['DEPENDENCIES'][package] = version\n with open('vibranium.config', 'w') as f:\n cp.write(f)\n f.close()\n \n print(\"INSTALL SUCCESS!\")\n\n def remove(self):\n \"\"\" Removes a package \"\"\"\n parser = argparse.ArgumentParser(description='Removes packages')\n parser.add_argument('package', metavar='package', help='Package name', nargs='+')\n parser.add_argument('--save', '-s', default=False, action='store_true', \n help='Should the package be removed from the requirements?')\n args = parser.parse_args(sys.argv[2:])\n\n print('removing')\n if args.save:\n print('saving')\n\n def compile(self):\n \"\"\" Compiles a program \"\"\"\n cwd = os.getcwd()\n # Check if is valid project\n if not os.path.exists(os.path.join(cwd, 'vibranium.config')):\n print(\"ERROR: this is not a valid vibranium project\")\n exit(1)\n\n # Read config\n cp = configparser.ConfigParser()\n cp.read('vibranium.config')\n\n # Get subdirs to include\n includePaths = [x[0] for x in os.walk(cwd)]\n includePathsArg = ' '.join(includePaths)\n \n # Get install folder for self\n install_dir = os.path.dirname(os.path.realpath(__file__))\n ass = 'sh ' + os.path.join(install_dir, 'assemble.sh')\n comp = 'sh ' + os.path.join(install_dir, 'compile.sh')\n link = 'sh ' + os.path.join(install_dir, 'link.sh')\n \n # Create build directory\n output_dir = 'build'\n try:\n output_dir = cp['SETTINGS']['output_dir']\n except KeyError:\n print(\"ERROR: config doesn't define 'output_dir'!\")\n exit(1)\n\n if os.path.exists(output_dir):\n os.system('rm -r ' + output_dir)\n os.mkdir(output_dir)\n\n # Get files to compiled\n filesToCompile = []\n for r, _, f in os.walk(os.getcwd()):\n for file in f:\n if '.wacc' in file:\n filesToCompile.append(os.path.join(r, file))\n\n # Get a list of any existing assembler files\n sFiles = set(glob.glob('*.s'))\n\n # Compile all of them\n for f in filesToCompile:\n print('Compiling {}...'.format(f))\n returnCode = os.system(comp + ' ' + f + ' ' + includePathsArg)\n if returnCode != 0:\n print('COMPILE FAILED!')\n exit(1)\n\n # Find all new ASM files \n sFiles = set(glob.glob('*s')) - sFiles \n\n # Assemble the files\n for f in sFiles:\n print('Assembling {}...'.format(f))\n returnCode = os.system(ass + ' ' + f + ' ' + os.path.join(output_dir, f[:-1] + 'o'))\n if returnCode != 0:\n print('COMPILE FAILED!')\n exit(1)\n\n # Kill the baby ASM files\n os.system('rm ' + ' '.join(sFiles))\n\n # Link the files\n print('Linking objects...')\n oFiles = glob.glob(os.path.join(output_dir, '*.o'))\n returnCode = os.system(link + ' ' + os.path.join(output_dir, 'main') + ' ' + ' '.join(oFiles))\n if returnCode != 0:\n print('COMPILE FAILED!')\n exit(1)\n \n # Success!\n print(\"COMPILE SUCCEEDED!\")\n\nif __name__ == '__main__':\n Vibranium()","repo_name":"bogdansurdu/vibranium-waccanda","sub_path":"vibranium/vibranium.py","file_name":"vibranium.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"7797153396","text":"import copy\nimport os, glob\nimport itertools\nimport operator\nimport csv\n\nimport credo\nfrom credo import modelrun as mrun\nfrom credo import modelresult as mres\n\n# The below is for Python 2.5 compatibility\ntry:\n import itertools\n product = itertools.product\nexcept AttributeError:\n import credo.utils\n product = credo.utils.productCalc\n\nclass ModelResultNotExistError(Exception):\n \"\"\"Exception for specifying that a Model Result that CREDO was asked\n to read in, doesn't exist.\"\"\"\n pass\n\n\nclass ModelVariant:\n \"\"\" A class that can be added to a :class:`.ModelSuite` to help \n auto-generate a suite of ModelRuns to perform, where a particular\n parameter is being varied over a certain range.\n\n This is an abstract base class, you should select an actual ModelVariant.\n\n .. attribute:: paramRange\n \n A list, containing the values that the parameter should be varied\n over. E.g. [0,1,2], or [5.6, 7.8, 9.9]. Needs to be of the correct\n type for the particular parameter. The Python \n `range() `_\n function can be useful in generating such a list.\"\"\"\n\n def __init__(self, paramRange):\n self.paramRange = paramRange\n \n def applyToModel(self, modelRun, ii):\n \"\"\"Function to apply the ii-th value of paramRange to a model.\"\"\"\n raise NotImplementedError(\"Abstract base method, please over-ride\"\\\n \" in your implementation.\")\n\n def valLen(self):\n \"\"\"Returns the length of the list of parameter values to vary\n specified in :attr:`.paramRange`.\"\"\"\n return len(self.paramRange)\n \n def valStr(self, ii):\n \"\"\"Return a string version of the ii-th parameter value.\"\"\"\n paramVal = self.paramRange[ii]\n if type(paramVal) == float:\n valStr = \"%g\" % paramVal\n else:\n valStr = str(paramVal)\n return valStr\n\n\nclass StgXMLVariant(ModelVariant):\n \"\"\"\n A :class:`.ModelVariant` designed to modify StGermain XML model input\n parameters.\n \n .. attribute:: paramPath\n \n The value to use when over-riding the parameter in a StGermain\n dictionary, using the StGermain command line format.\n \n E.g. Setting \"gravity\" would override the gravity parameter in\n the dictionary, whereas setting to \n \"components.initialConditionsShape.startX\" would override the\n startX parameter, within the initialConditionsShape component.\n \"\"\" \n def __init__(self, paramPath, paramRange):\n ModelVariant.__init__(self, paramRange)\n self.paramPath = paramPath\n \n def applyToModel(self, modelRun, ii):\n \"\"\"Apply the ii-th value in the attr:`.paramRange` to a particular\n :class:`~credo.modelrun.ModelRun`.\"\"\"\n modelRun.paramOverrides[self.paramPath] = self.paramRange[ii]\n \n def cmdLineStr(self, ii):\n \"\"\"Return the command-line string to apply this value.\"\"\"\n return credo.io.stgcmdline.paramStr(self.paramPath, self.paramRange[ii])\n\n\nclass JobParamVariant(ModelVariant):\n \"\"\"A :class:`.ModelVariant` designed to modify job parameters.\n\n .. attribute: jobParam:\n \n string name of parameter you wish to vary (eg \"nproc\").\n \"\"\" \n def __init__(self, jobParam, paramRange):\n ModelVariant.__init__(self, paramRange)\n self.jobParam = jobParam\n \n def applyToModel(self, modelRun, ii):\n \"\"\"Apply the ii-th value in the attr:`.paramRange` to a particular\n :class:`~credo.modelrun.ModelRun`.\"\"\"\n modelRun.jobParams[self.jobParam] = self.paramRange[ii]\n \n def cmdLineStr(self, ii):\n \"\"\"Return the command-line string to apply this value.\"\"\"\n return credo.io.stgcmdline.paramStr(self.jobParam, self.paramRange[ii])\n\n\ndef getParamValuesIter(modelVariants, iterGen):\n \"\"\"Given a list of model variants and an iterator generator function\n (eg itertools.izip or itertools.product) to use, generates\n a specific iterator that can be used on the modelVariants to obtain\n the actual param values.\"\"\"\n paramIter = iterGen(*map(operator.attrgetter('paramRange'),\n modelVariants.itervalues()))\n return paramIter\n\ndef getParamValues(modelVariants, iterGen):\n \"\"\"Shortcut to create a list of param values using\n :func:`.getParamValuesIter`\"\"\"\n paramIter = getParamValuesIter(modelVariants, iterGen)\n return list(paramIter)\n\ndef getVariantIndicesIter(modelVariants, iterGen):\n \"\"\"Given a list of model variants and iterator generator function to use,\n generates an iterator of indices into the modelVariants list.\"\"\"\n variantLens = [mv.valLen() for mv in modelVariants.itervalues()]\n indexIterator = iterGen(*map(range, variantLens))\n return indexIterator\n\ndef getVariantNameDicts(modelVariants, indicesIt):\n \"\"\"Generates a list of dictionaries of parameters to be modified for each\n model run, given a list of :class:`.StgXMLVariant` and an iterator into\n them (e.g. generated by :func:`.getVariantIndicesIter`.\n \"\"\"\n paramDicts = []\n modelVarList = modelVariants.values()\n for indexSet in indicesIt:\n newDict = {}\n for mvI, mvEntry in enumerate(modelVariants.iteritems()):\n mvName, modelVar = mvEntry\n paramVal = modelVar.paramRange[indexSet[mvI]]\n newDict[mvName] = paramVal\n paramDicts.append(newDict)\n return paramDicts \n\ndef getVariantParamPathDicts(modelVariants, indicesIt):\n \"\"\"Generates a list of dictionaries of parameters to be modified for each\n model run, given a list of :class:`.StgXMLVariant` and an iterator into\n them (e.g. generated by :func:`.getVariantIndicesIter`.\n \"\"\"\n for mv in modelVariants.itervalues():\n assert isinstance(mv, StgXMLVariant)\n paramDicts = []\n for indexSet in indicesIt:\n newDict = {}\n for mvI, modelVar in enumerate(modelVariants.itervalues()):\n paramVal = modelVar.paramRange[indexSet[mvI]]\n newDict[modelVar.paramPath] = paramVal\n paramDicts.append(newDict)\n return paramDicts\n\ndef getVariantCmdLineOverrides(modelVariants, indicesIt):\n \"\"\"Generates a list of strings to use at cmd line for each\n model run, given a list of :class:`.StgXMLVariant` and an iterator into\n them (e.g. generated by :func:`.getVariantIndicesIter`.\n \"\"\"\n overrideCmdLines = []\n for indexSet in indicesIt:\n overStrs = [] \n for mvI, modelVar in enumerate(modelVariants.itervalues()):\n overStrs.append(modelVar.cmdLineStr(indexSet[mvI]))\n overrideCmdLines.append(\" \".join(overStrs))\n return overrideCmdLines\n\ndef getSubdirTextParamVals(modelVariants, paramIndices):\n \"\"\"Creates a subdirectory text based on the names and values of each\n variant.\"\"\"\n subDirName = \"\"\n varTexts = []\n for mvI, mvEntry in enumerate(modelVariants.iteritems()):\n mvName, modelVar = mvEntry\n valStr = modelVar.valStr(paramIndices[mvI])\n varTexts.append(\"%s_%s\" % (mvName, valStr))\n subDirName = \"-\".join(varTexts)\n return subDirName\n\ndef getTextParamValsSubdirs(modelVariants, indicesIt):\n \"\"\"Given a list of :class:`ModelVariants` and an index iterator,\n returns a list of all subDirs to use.\"\"\"\n subDirs = []\n for indexSet in indicesIt:\n subDirs.append(getSubdirTextParamVals(modelVariants, indexSet))\n return subDirs\n\ndef getVarRunIs(varName, modelVariants, runDicts):\n \"\"\"Given a variant name, modelVariants dict and iterGen function,\n returns a mapping of values of the named modelVariant to run indices\"\"\"\n varRunIs = {}\n for varValue in modelVariants[varName].paramRange:\n varRunIs[varValue] = []\n for runI, runDict in enumerate(runDicts):\n if runDict[varName] == varValue:\n varRunIs[varValue].append(runI)\n return varRunIs\n\ndef getResultsByVarRunIs(varRunIsMap, results):\n \"\"\"Given a varRunIsMap generated by :func:`getVarRunIs` and an array of\n results, gives a mapping directly from variant values to corresponding result.\"\"\"\n resultsMap = {}\n for varValue, varRunIs in varRunIsMap.iteritems():\n resultsMap[varValue] = [results[runI] for runI in varRunIs]\n return resultsMap\n\ndef getOtherParamValsByVarRunIs(varRunIsMap, varDicts, otherParam):\n \"\"\"Given a varRunIsMaps generated by :func:`varRunIsMap`, a varDict and\n the name of another variant param in the dict, returns a mapping from\n the variant values to the values of the other param at the same indices.\"\"\"\n otherValsMap = {}\n for varValue, varRunIs in varRunIsMap.iteritems():\n otherValsMap[varValue] = [varDicts[runI][otherParam] for runI \\\n in varRunIs]\n return otherValsMap\n\n################\n\ndef getSubdir_TextParamVals(modelRun, modelVariants, paramIndices, runIndex):\n \"\"\"Generate an output sub-directory name for a run with\n a printed version of :attr:`ModelSuite.modelVariants` names, \n and vales for this run.\n (Good in the sense of being fairly self-describing, but can\n be long if you have many model variants).\"\"\"\n subPath = getSubdirTextParamVals(modelVariants, paramIndices)\n return subPath\n\ndef getSubdir_RunIndex(modelRun, modelVariants, paramIndices, runIndex):\n \"\"\"Simply prints the index of the run as a subdirectory.\"\"\"\n return \"%.5d\" % runIndex\n\ndef getSubdir_RunIndexAndText(modelRun, modelVariants, paramIndices, runIndex):\n \"\"\"Subdir is based on both the run index, and the textual variant names.\"\"\"\n subPath = getSubdirTextParamVals(modelVariants, paramIndices)\n return \"%.5d-%s\" % (runIndex, subPath)\n\nclass ModelSuite:\n '''A class for running a suite of Models (e.g. a group for profiling,\n or a System Test that requires multiple runs).\n \n The two main ways of using this class are:\n\n * Creating a :class:`.ModelSuite`, and then adding \n :class:`~credo.modelrun.ModelRun` s to the suite using\n the :meth:`.addRun` method.\n * Creating a :class:`.ModelSuite`, and providing a\n :class:`~credo.modelrun.ModelRun` as a template, then adding \n :class:`.StgXMLVariant` s to define what sort of parameter\n sweep should be performed. In this case, :meth:`.generateRuns()`\n needs to be called after all variants have been added.\n\n .. attribute:: outputPathBase\n\n The base path to use for saving model results under.\n\n .. attribute:: runs\n\n A list of :class:`~credo.modelrun.ModelRun` s to be run as part of the\n suite. See :meth:`.generateRuns` and :meth:`.addRun`.\n\n .. attribute:: runDescrips\n\n Short (eg 1 line) textual description for each ModelRun stored in the\n :attr:`.runs`.\n\n .. attribute:: runCustomOptSets\n\n Custom sets of options (to be used at the command line) associated \n with each run in :attr:`.runs` (strings).\n \n .. attribute:: resultsList\n\n Initially `None`, after the suite has been run (using :meth:`.runAll`),\n saves a reference to all :class:`~credo.modelresult.ModelResult` s\n generated.\n\n .. attribute:: subOutputPathGenFunc\n \n This function will can be used to customise the model sub-path based\n on each modelRun. Override it if you wish to use other than the default.\n\n .. attribute:: templateMRun\n\n (Optional) setting this to an :class:`~credo.modelresult.ModelRun`\n means this run can be used as a \"template\" to add variants to, \n and create a parameter sweep over this run.\n\n .. seealso: :meth:`.addVariant`, :meth:`generateRuns`, and\n :class:`.StgXMLVariant`.\n \n .. attribute:: iterGen\n\n (Related to auto-generation): A generator function to create an\n iterator to use when auto-generating a suite based on modelVariants.\n See Python module :mod:`itertools` module for more.\n\n .. attribute:: modelVariants\n\n Set of :class:`.StgXMLVariant` s to apply to the template run in\n order to auto-generate a suite to vary certain parameters. See\n :attr:`.templateMRun` for more.\n\n '''\n\n def __init__(self, outputPathBase, templateMRun=None):\n self.outputPathBase = outputPathBase\n self.runs = []\n self.runDescrips = []\n self.runCustomOptSets = []\n self.resultsList = []\n self.subOutputPathGenFunc = getSubdir_RunIndex\n # Parameters related to dynamic generation\n self.templateMRun = templateMRun\n self.iterGen = None\n self.modelVariants = {}\n\n def addRun(self, modelRun, runDescrip=None, runCustomOpts=None,\n forceOutputPathBaseSubdir=True):\n \"\"\"Add a model run to the list of those to be run.\n\n :param modelRun: A :class:`~credo.modelrun.ModelRun` to be added.\n :keyword runDescrip: An (optional) string describing the run.\n :keyword runCustomOpts: (optional) string of any custom options\n that should be passed through to StGermain, only for this run.\n :keyword forceOutputPathBaseSubdir: if True (default), will\n update the model run's output dir to enforce it's a subdir of\n :attr:`.outputPathBase`\n :returns: the index of the newly added run in the modelRun list.\"\"\"\n if not isinstance(modelRun, mrun.ModelRun):\n raise TypeError(\"Error, given run not an instance of a\"\\\n \" ModelRun\" % runI)\n if forceOutputPathBaseSubdir:\n commonPrefix = os.path.commonprefix([self.outputPathBase,\n modelRun.outputPath])\n if commonPrefix != self.outputPathBase:\n newPath = os.path.join(self.outputPathBase, modelRun.name)\n modelRun.outputPath = newPath\n modelRun.logPath = newPath\n self.runs.append(modelRun)\n self.runDescrips.append(runDescrip)\n self.runCustomOptSets.append(runCustomOpts)\n # Return the index of the newly added run.\n return len(self.runs) - 1\n\n def getRunByName(self, runName):\n \"\"\"Get a modelRun instance from the suite with a particular name.\"\"\"\n for modelRun in self.runs:\n if modelRun.name == runName:\n return modelRun\n\n def getRunIndex(self, runName):\n \"\"\"Get the index within the suite of a run with the given name.\"\"\"\n for runI, modelRun in enumerate(self.runs):\n if modelRun.name == runName:\n return runI\n\n def preRunCleanup(self):\n \"\"\"Convenience function to call all sub-methods for tasks to do\n before running to clean up directories.\"\"\"\n self.cleanAllOutputPaths()\n self.cleanAllLogFiles()\n\n def cleanAllOutputPaths(self):\n '''Remove all files in each model's output path. Useful to get rid of\n results still there from previous jobs. Doesn't delete sub-directories,\n in case they are other model runs' results that should be ignored.'''\n startDir = os.getcwd()\n for modelRun in self.runs:\n os.chdir(modelRun.basePath)\n for filePath in glob.glob(os.path.join(modelRun.outputPath,\"*\")):\n if os.path.isfile(filePath):\n os.unlink(filePath)\n os.chdir(startDir)\n\n def cleanAllLogFiles(self):\n \"\"\"Remove all stdout and stderr files from each ModelRun's designated\n output and log paths.\"\"\"\n startDir = os.getcwd()\n for modelRun in self.runs:\n os.chdir(modelRun.basePath)\n logFiles = [modelRun.getStdOutFilename(),\n modelRun.getStdErrFilename()]\n for fname in logFiles:\n if os.path.isfile(fname):\n os.unlink(fname)\n os.chdir(startDir)\n\n def addVariant(self, name, modelVariant):\n \"\"\"Add a :class:`.StgXMLVariant` to the list to be applied to a\n template run. See :attr:`.modelVariants`.\"\"\"\n self.modelVariants[name] = modelVariant\n\n def generateRuns(self, iterGen=product): \n \"\"\"When using a template modelRun, will generate runs for the suite\n based on it. The generated runs are saved to \n the :attr:`.runs` attribute ready to be run using :meth:`.runAll`.\n \n This requires that there are one or more :class:`.StgXMLVariant`\n recorded on the class already.\n\n :param iterGen: this determines what iterator strategy should be\n used to generate the runs. Defaults to a product, but a simple\n \"zip\" style can be achieved using the itertools.izip iterator\n generating function.\n See the Python :mod:`itertools` module for more.\n \"\"\"\n\n assert self.templateMRun\n\n # Save the strategy passed in.\n self.iterGen = iterGen\n # Empty the \"runs\", in case it has values in there already\n self.runs = []\n\n # Strategy used below is instead of iterating directly over the \n # parameters we are applying to each run, create indices into the\n # modelVariants lists to work out which to apply for each run.\n indexIterator = getVariantIndicesIter(self.modelVariants, self.iterGen)\n\n for runI, paramIndices in enumerate(indexIterator):\n # First create a copy of the template model run\n newMRun = copy.deepcopy(self.templateMRun)\n # Now, apply each variant to it as appropriate\n for varI, modelVar in enumerate(self.modelVariants.itervalues()):\n modelVar.applyToModel(newMRun, paramIndices[varI])\n\n subPath = self.subOutputPathGenFunc(newMRun, self.modelVariants,\n paramIndices, runI)\n newMRun.name += \"-%s\" % (subPath)\n newMRun.outputPath = os.path.join(self.outputPathBase, subPath)\n newMRun.logPath = os.path.join(self.outputPathBase, subPath)\n self.runs.append(newMRun)\n self.runDescrips.append(subPath)\n self.runCustomOptSets.append(None)\n\n def writeAllModelRunXMLs(self):\n \"\"\"Save an XML record of each ModelRun currently in :attr:`.runs`.\"\"\"\n for runI, modelRun in enumerate(self.runs):\n modelRun.writeInfoXML()\n\n def writeAllModelResultXMLs(self):\n \"\"\"Save an XML record of each ModelResult currently in\n :attr:`.resultsList`.\"\"\"\n for runI, mResult in enumerate(self.resultsList):\n mResult.writeRecordXML()\n \n def getCustomOpts(self, runI, extraCmdLineOpts):\n \"\"\"Get the custom opts (as a string) to apply for modelRun runI.\"\"\"\n customOpts = None\n if self.runCustomOptSets[runI]:\n customOpts = self.runCustomOptSets[runI]\n if extraCmdLineOpts:\n if customOpts == None: customOpts = \"\"\n customOpts += extraCmdLineOpts\n return customOpts \n\n def readResultsFromPath(self, basePath, overrideOutputPath=None,\n checkAllPresent=True):\n \"\"\"Read the results generated for a given ModelSuite located off the \n given basePath where the suite was run, and return the list of results.\n\n This will ignore results in the directory not related to this suite.\n\n :arg overrideOutputPath: if specified, this path overrides the default\n outputPath of the suite itself to search for the results.\n (I.e. useful if you are reading from a previous suite with different\n output path.)\n :arg checkAllPresent: if True this will check that all runs expected\n for the suite were found in the list of results.\n\n .. note:\n Currently this just relies on model result names for the suite\n matching up correctly. In future, should really scan the ModelResult\n XMLs and check they match correctly.\n \"\"\" \n if overrideOutputPath is not None:\n outputPathBase = overrideOutputPath\n else:\n outputPathBase = self.outputPathBase\n # First read all results\n # TODO: passing in the 'name' below is hacky:- really should be \n # reading this in from model result XMLs\n if self.templateMRun:\n baseName = self.templateMRun.name\n else:\n baseName = None\n readResults = getModelResultsArray(baseName,\n os.path.join(basePath, outputPathBase))\n # Now check through, and build a new list only contained in this index\n sResults = []\n for result in readResults:\n runIndex = self.getRunIndex(result.modelName)\n if runIndex == None: continue\n else:\n sResults.append((runIndex, result))\n mResults = [None] * len(sResults)\n # Now put them in the right order\n for runIndex, result in sResults:\n mResults[runIndex] = result\n # Finally, check each run in the suite is present in the returned list\n if checkAllPresent:\n resultNames = [res.modelName for res in mResults]\n for runI, mRun in enumerate(self.runs):\n if mRun.name not in resultNames:\n raise ModelResultNotExistError(\"Error, given basePath\"\\\n \" for reading model\"\\\n \" results from, %s, with output path %s, is missing\"\\\n \" result for suite's run '%s' (index %d).\"\\\n \"\\n(names read are %s).\" %\\\n (basePath, outputPathBase, mRun.name, runI,\n resultNames))\n return mResults \n \n# TODO: here perhaps would be where we have tools to generate stats/plots\n# of various properties of the suite, e.g. memory usage? Or should\n# this be a capability of some sort of uber-results list? Or profiling\n# tools?\n\ndef writeInputsOutputsToCSV(mSuite, observablesDict, fname):\n \"\"\"Write a CSV file, containing all the ModelVariants defined for a \n ModelSuite, and also all the observables in the observablesDict.\n\n :param observablesDict: a dictionary of 'observables', each entry in\n the form 'obsName':[obsVals for each run], e.g. \"vrms\":[0.6, 0.8, 0.9].\n :param fname: file name of the CSV file to create, inside the model\n suite's base output path.\n \n .. note:: Could be a function on the ModelSuite?\n \"\"\" \n target = open(os.path.join(mSuite.runs[0].basePath, mSuite.outputPathBase, fname), \"w\" )\n wtr = csv.writer(target)\n # Need to do sorting to make sure keys here match those below.\n sortedVarNames = mSuite.modelVariants.keys()\n sortedVarNames.sort()\n wtr.writerow(sortedVarNames + observablesDict.keys())\n indexIt = getVariantIndicesIter(mSuite.modelVariants, mSuite.iterGen)\n varDicts = getVariantNameDicts(mSuite.modelVariants, indexIt)\n for varDict, observs in zip(varDicts, zip(*observablesDict.itervalues())):\n sortedValues = [varDict[varName] for varName in sortedVarNames]\n wtr.writerow(sortedValues + list(observs))\n target.close()\n\ndef getModelResultsArray(baseName, baseDir):\n \"\"\"Post-processing: given a base model name and base output directory,\n search this directory for model results, and read into a list of\n :class:`~credo.modelresult.ModelResult` . \n\n .. note:: Needs more checking added, and ability to recover metadata\n about the ModelRuns.\n \"\"\"\n modelResults = []\n for fName in os.listdir(baseDir):\n fullPath = os.path.join(baseDir, fName)\n if os.path.isdir(os.path.join(baseDir, fName)):\n dirName = fName\n if baseName == None:\n modelName = dirName\n else: \n modelName = \"%s-%s\" % (baseName, dirName)\n mResult = mres.readModelResultFromPath(fullPath)\n #ModelResult(modelName, fullPath)\n # TODO: When func ready, search for an XML file containing\n # job meta info, and attach here\n # mResult.jobMetaInfo = ...\n modelResults.append(mResult)\n return modelResults\n","repo_name":"PatSunter/credo","sub_path":"credo/modelsuite.py","file_name":"modelsuite.py","file_ext":"py","file_size_in_byte":23933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"69999820809","text":"from elasticsearch_config import es\nfrom elasticsearch.helpers import bulk\nimport time\nimport pandas as pd\n\nnew_index = \"og_county_prices\"\nprices_wti = \"prices_wti\"\nog_index = \"og_county\"\n\n# ---------- CREAR INDEX OG FIELD PRICES ----------\n\nog_mapp = es.indices.get_mapping(index=og_index)[og_index]\n\nog_mapp[\"mappings\"][\"properties\"][\"value\"] = {\"type\": \"float\"}\n\nes.indices.create(index=new_index, body=og_mapp, ignore=400)\n\n# ---------- CRUZAR PRECIOS POR PERIODO ----------\n\nstart_time = time.time()\n\nquery = {\"query\": {\"match_all\": {}}}\n\nprices_wti_hits = []\nog_hits = []\n\n\ndef get_batch_data(index, data):\n res = es.search(index=index, body=query, size=10000, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n while len(hits) > 0:\n res = es.scroll(scroll_id=scroll_id, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n\nget_batch_data(prices_wti, prices_wti_hits)\nget_batch_data(og_index, og_hits)\n\n\n# Convertir a DataFrames\ndf_prices = pd.DataFrame(prices_wti_hits)\ndf_og_index = pd.DataFrame(og_hits)\n\n\ndf_final = pd.merge(\n df_og_index, df_prices, left_on=\"CYCLE_YEAR_MONTH\", right_on=\"period\", how=\"left\"\n)\n\ndocs = df_final.to_dict(orient=\"records\")\n\n\ndef ingestion_bulk(index_name, batch_size=5000):\n data = [{\"_index\": index_name, \"_source\": doc} for doc in docs]\n len_data = len(data)\n\n for i in range(0, len_data, batch_size):\n success, failed = bulk(es, data[i : i + batch_size])\n\n if failed:\n print(f\"Error al indexar {failed} documentos.\")\n else:\n print(f\"Se indexaron {success} documentos correctamente.\")\n\n\ningestion_bulk(new_index)\n\nend_time = time.time()\nall_time = end_time - start_time\n\nprint(f\"Tiempo de procesamiento: {float(all_time) / 60} minutos.\")\n","repo_name":"CristianERP/elastic","sub_path":"og_field_prices.py","file_name":"og_field_prices.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8621347781","text":"from collections import deque, defaultdict\n\nsteps = [(0, 0), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\nMap = [list(input()) for _ in range(8)]\nsx, sy, ex, ey = 7, 0, 0, 7\nblank_index = []\nfor i in range(8):\n for j in range(8):\n if Map[i][j] == \"#\":\n blank_index.append((i, j))\ntime = 0\nblank = defaultdict(set)\nwhile blank_index:\n next = []\n for x, y in blank_index:\n blank[(x, y)].add(time)\n if x < 7:\n x += 1\n next.append((x, y))\n blank_index = next\n time += 1\nflag = 0\nq = deque()\nvisit = defaultdict(set) # (x, y)에 time있으면 방문했다는 뜻 그시간 그자리\n# x, y , cnt\nq.append((7, 0, 0))\nvisit[(7, 0)].add(0)\nwhile q:\n x, y, cnt = q.popleft()\n # 벽에 갇히는 경우\n if cnt in blank[(x, y)]:\n continue\n # 갯수가 7개 이상인데 현재 살아있는경우 -> 벽은 이미 맨아래 남아 있으며 벽에 갇히지 않는경우니 무조건 어디로든 갈수있음\n if cnt >= 7:\n flag = 1\n break\n for dx, dy in steps:\n nx, ny = x + dx, y + dy\n if 0 <= nx < 8 and 0 <= ny < 8:\n # 지금 벽이라서 이동못하는 경우\n if cnt in blank[(nx, ny)]:\n continue\n # 이미 큐에 있거나 방문한적없는 x,y,cnt 경우일때, q에 넣어줌\n if cnt + 1 not in visit[(nx, ny)]:\n visit[(nx, ny)].add(cnt + 1)\n q.append((nx, ny, cnt + 1))\nprint(flag)","repo_name":"leejongcheal/baekjoon_course_coding","sub_path":"연습/그래프와 BFS/BFS 알고리즘/16954번 - 움직이는 미로 탈출.py","file_name":"16954번 - 움직이는 미로 탈출.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1721167013","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 16 14:28:08 2023\n\n@author: david\n\"\"\"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 7 11:12:47 2023\n\n@author: david\n\"\"\"\n\n# Imports\nimport numpy as np\n\nfrom numpy.random import MT19937\nfrom numpy.random import RandomState, SeedSequence\n\nimport Perlin_Noise as pn\n\n \nclass Simulate_Observations():\n \n def __init__(self, shape, cycleLength, amountFI=.2, amountLand=.1, fluctuation=.005, noise=.03, errorRate=0, arealError=.9, clustered=2, maxSpeed=5, steepness=23, seed=None, no_starting_error=False):\n '''\n Generates velocities which are distributed random but clustered and change they're appearance over time.\n For fast ice simulation, so there is some greater areas with 'no' movement.\n\n Parameters\n ----------\n shape : tuple\n Size of grid to simulate (y,x).\n cycleLength : float\n Time period over which the ice freezes and start breaking again.\n amountFI : float in [0,1], optional\n ~percentage of fast ice when fully 'grown'. The default is 0.2\n amountLand : float in [0,1], optional\n ~percentage of land. The default is 0.1\n fluctuation : float, optional\n Determines the fluctuation of fast ice and errorrates in each observation, VERY sensitive -> std of normal. The default is 0.01\n noise : float, optional\n Determines the noise cast onto the observation, VERY sensitive -> std of normal. The default is 0.03\n errorRate : float in [0,1], optional\n expected percentage of missing data points and also the relative size of areal errors compared to pointwise. The default is 0.\n arealError: float in [0,1], optional\n expected chance for an areal error, only occurs if errorRate > 0. The default is .9\n clustered : float, optional\n How clustered the fast ice is, the lower the fewer clusters there are -> has to do with operating of perlin noise. The default is 2.\n maxSpeed : float > 0, optional\n the max speed a pixel can have. The default is 5.\n steepness : float > 0, optional\n the steepness of the threshhold function for the fast ice development. The default is 17. (recommended between 10-25)\n seed : int, optional\n seed for the simulation. The default is None => a random seed between 0-99999999 is taken.\n no_starting_error : bool, optional\n determines if the first observation has errors or not. Default is False (Errors)\n\n '''\n self.y, self.x = shape\n self.cycle = cycleLength\n self.amountFI = amountFI\n self.amountLand = amountLand\n self.fluctuation = fluctuation\n self.noise = noise\n self.errorRate = errorRate\n self.arealError = arealError\n self.clustered = clustered\n self.maxSpeed = maxSpeed\n self.steepness = steepness\n self.seed = seed\n self.start_err = no_starting_error\n \n self.NaN = np.NaN\n self.landVal = -1\n \n # Create perlin object\n self.Perlin = pn.PerlinNoise2D()\n \n # check seed, else randomize it\n if self.seed == None: self.seed = np.random.randint(0, 100000000)\n self.rs = RandomState(MT19937(SeedSequence(self.seed)))\n \n # Initialize everything else needed\n self.initialize()\n \n \n def initialize(self):\n # Create base noise for 'sea depth'\n yLin, xLin = np.linspace(0, self.clustered, self.y), np.linspace(0, self.clustered, self.x) # The smaller 'clustered' the more the values are in one area\n xGrid, yGrid = np.meshgrid(xLin, yLin)\n self.perlinFix = self.Perlin.perlin(xGrid, yGrid, self.seed)\n \n # Compute sorted list\n sortNoise = np.sort(self.perlinFix.flatten())\n \n ## Thresholding parameters\n # Land\n self.landTresh = sortNoise[int(self.y * self.x * (1- self.amountLand))] # Everything above is land\n # Parameters for Threshholding fast ice\n minThresh = sortNoise[int(self.y * self.x * (1- self.amountLand - self.amountFI/10))] # Around 10% of all fast ice is shown at minimum\n maxThresh = sortNoise[int(self.y * self.x * (1- self.amountLand - self.amountFI))] # Around the amountFI of all pixels are fast ice\n \n # Land values\n self.land = np.where(self.perlinFix >= self.landTresh, 1, 0)\n # Generate coast pixel\n self.coast = self.coastPixel(self.land)\n \n # create thresholding function\n mean_begin = .2\n mean_end = .75\n # pseudo truncated normal, since it does not exist in numpy but I need the seed dependence\n begin = np.clip(self.rs.normal(loc=mean_begin, scale=.1), 0, 1)\n end = np.clip(self.rs.normal(loc=mean_end, scale=.1), 0, 1)\n if end > begin: \n a = begin\n begin = end\n end = a\n self.create_threshold(begin, end, self.steepness, minThresh, maxThresh)\n \n \n # Parameters for error creation\n self.keepOld = .8\n self.aErr = .9 * self.arealError\n self.pErr = 1 - self.aErr\n # Initialize step\n self.step = 0\n # safe last perlin noise\n self.lastPerlin = self.perlinFix\n # create first error noise\n xGrid, yGrid = np.meshgrid(np.linspace(0, 2, self.x), np.linspace(0, 2, self.y))\n self.lastError = self.Perlin.perlin(xGrid, yGrid, self.rs.randint(0, 100000000))\n\n \n ################\n ## Simulation ##\n ################ \n def simulate(self):\n # Increase step by 1\n self.step += 1\n # Threshhold Fast Ice, everywhere else new Perlin 'Movement'\n yLin, xLin = np.linspace(0, 2, self.y), np.linspace(0, 2, self.x) # Fixed Clustered=2, so it's more stable movement\n xGrid, yGrid = np.meshgrid(xLin, yLin)\n # seed for perlin noise\n seed = self.rs.randint(0, 100000000)\n # Compute new fast and drift ice\n factor = 1 - np.clip(np.abs(self.rs.normal(scale=.4)), 0, 1)\n newPerlin = (factor * self.prepPerlin(self.Perlin.perlin(xGrid, yGrid, seed)) + (1-factor) * self.lastPerlin)\n \n maskFI = np.logical_and(self.perlinFix >= self.threshold(self.step), self.perlinFix < self.landTresh)\n speeds = np.where(maskFI, 0, newPerlin)\n\n # create noise and add to speeds:\n noise = self.rs.normal(scale=self.noise, size=(self.y, self.x))\n total = np.abs(speeds + noise)\n # set Coast Errors to special noise:\n coastVals = self.rs.random(size=(self.y, self.x)) * self.maxSpeed / 5\n total = np.where(self.coast, coastVals, total)\n \n # Add Errors\n errors = self.createErrors()\n # Remove corner point errors for convenience (interpolating problems and such)\n errors[ 0, 0] = 0\n errors[-1,-1] = 0\n errors[0, -1] = 0\n errors[-1, 0] = 0\n # --------------- #\n preFinal = np.where(errors, self.NaN, total)\n final = np.where(self.land, self.landVal, preFinal)\n \n # Safe last perlin noise\n self.lastPerlin = newPerlin\n \n return final, maskFI\n \n ###################\n ## Create Errors ##\n ################### \n def createErrors(self):\n if self.start_err and (self.step == 1 or self.step == self.cycle): return np.zeros((self.y, self.x))\n \n # Pointswise Errors\n errorPoints = self.rs.binomial(1, self.errorRate * self.pErr, size=(self.y, self.x)) # Pointwise errors per step / later half (or 2/3) the errorRate for the area errors\n # Area Errors\n yLin, xLin = np.linspace(0, 2, self.y), np.linspace(0, 2, self.x) # Fixed Clustered=2, so it's more coherent\n xGrid, yGrid = np.meshgrid(xLin, yLin)\n seed = self.rs.randint(0, 100000000)\n factor = np.clip(self.rs.normal(loc=self.keepOld, scale=.5), 0, 1)\n perlin = factor * self.lastError + (1-factor) * self.Perlin.perlin(xGrid, yGrid, seed)\n thresh = self.generateThresh(perlin) # threshhold of last 0\n area = np.where(perlin < thresh, 1, 0)\n # setting old error\n self.lastError = perlin\n return np.logical_or(errorPoints, area)\n \n def generateThresh(self, perlin):\n # generates the threshhold for every step\n cover = self.rs.random() > 1 - self.arealError\n fluct = np.abs(self.rs.normal(loc=0, scale=self.fluctuation)) if self.errorRate > 0 else 0\n return np.sort(perlin.flatten())[int(self.errorRate * self.y * self.x * self.aErr)* cover] + fluct\n \n ######################\n ## Preparate Perlin ##\n ###################### \n def prepPerlin(self, perlin):\n return (perlin - np.min(perlin)) * self.maxSpeed\n \n ################\n ## Threshhold ##\n ################ \n def create_threshold(self, begin, end, alpha, min_thresh, max_thresh):\n def function(t):\n x = t / self.cycle\n return min_thresh + (( np.tanh(alpha * (x-begin)) - np.tanh(alpha * (x-end)) ) / 2) * (min_thresh - max_thresh) + self.rs.normal(scale=self.fluctuation)\n self.threshold = function\n\n ###############\n ## CoastLine ##\n ############### \n def coastPixel(self, pixel):\n '''\n Computes the coastline of a 2D pixelSet, the pixel in question should be masked!\n\n Parameters\n ----------\n pixel : np.array(y,x)\n 2D array of pixel, every 'land'pixel is set to 1.\n\n Returns\n -------\n coast : np.array(y,x)\n returns a mask of the coast line.\n\n '''\n def neighbours(row, col):\n\n rows, cols = pixel.shape\n out = []\n\n for i in range(row - 1, row + 2):\n row = []\n for j in range(col - 1, col + 2):\n\n if 0 <= i < rows and 0 <= j < cols:\n row.append(pixel[i,j])\n else:\n row.append(0)\n\n out.append(row)\n return np.array(out).any()\n \n y, x = pixel.shape\n # create empty canvas\n empty = np.zeros((y,x))\n \n for yy in np.arange(y):\n for xx in np.arange(x):\n if neighbours(yy, xx): empty[yy, xx] = 1\n return empty\n\n\n\n\n\n","repo_name":"P3ngwings/Bachelor_2023","sub_path":"simulation/Observation_Simulation.py","file_name":"Observation_Simulation.py","file_ext":"py","file_size_in_byte":10569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72960773767","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import FormView, CreateView\n\nfrom . import models as md\n\nREFERAL_LIST = md.UserModel.referal_list()\n\n\nclass LinkHandler(FormView):\n def get(self, request, *args, **kwargs):\n return render(\n request=request,\n template_name='index.html',\n )\n\n def post(self, request, *args, **kwargs):\n input_refer = str(request.POST['text'])\n if input_refer in REFERAL_LIST:\n url = f'https://t.me/alphabots_testbot?start={input_refer}'\n return redirect(\n to=url,\n )\n else:\n return render(\n request=request,\n template_name='referal_error.html',\n context={'refers': REFERAL_LIST},\n )\n\n# class MessageHandler(CreateView):\n# def post(self, request, *args, **kwargs):\n","repo_name":"antonyuhnovets/AlphaBots_test","sub_path":"bot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19594595031","text":"import jax\nimport jax.numpy as jnp\nfrom jax import random, grad, vmap, jit\nfrom matplotlib import pyplot as plt\n\nimport natgrad.mlp as mlp\n\nfrom natgrad.gram import gram_factory, nat_grad_factory\n\nfrom natgrad.domains import Square\nfrom natgrad.domains import SquareBoundary\nfrom natgrad.integrators import DeterministicIntegrator\nfrom natgrad.derivatives import laplace, model_laplace\n\nfrom typing import Any\nfrom jaxtyping import Array, Float, jaxtyped\nfrom typeguard import typechecked as typechecker\n\n#-Delta u(x,y) = sin(x)sin(y) on the domain [-pi,pi]\n\njax.config.update(\"jax_enable_x64\", True)\n\n# integration \nsquare = Square(3.14159265)\n#square = Square(1.)\nsquare_boundary = SquareBoundary(3.14159265)\nintegrator = DeterministicIntegrator(square, 20)\neval_integrator = DeterministicIntegrator(square, 200)\nbdry_integrator = DeterministicIntegrator(square_boundary, 50)\nx = square.deterministic_integration_points(20)\n\n# model\nactivation = lambda x : jnp.tanh(x)\n#activation = lambda x : jnp.sin(x)**2\n#activation = lambda x : jnp.maximum(0., x)**3\nlayer_sizes = [2, 32, 1]\nparams = mlp.init_params(layer_sizes, random.PRNGKey(0))\n# maps: [(*, *), ..., (*, *)], (2,) ---> ()\nmodel = mlp.mlp(activation) \n\n\ndistance_func = square.distance_function\nv_distance_func = vmap(distance_func, (0))\n\n# maps: [(*, *), ..., (*, *)], (2,) ---> ()\n@typechecker\ndef truncated_model(params: Any, x: Float[Array, \"2\"]) -> Float[Array, \"\"]:\n return model(params, x) * distance_func(x)\n\n# maps: [(*, *), ..., (*, *)], (n,2) ---> (n,)\nv_model = vmap(model, (None, 0))\n\n# maps: [(*, *), ..., (*, *)], (n,2) ---> (n,)\nv_truncated_model = vmap(truncated_model, (None, 0))\n\n# right-hand side\n# maps: (n, 2) ---> (n,)\ndef f_(xy):\n x = xy[:,0]\n y = xy[:,1]\n return 2. * jnp.sin(y) * jnp.sin(x)\n\n# right-hand side\n# maps: (2,) ---> ()\ndef f(xy):\n x = xy[0]\n y = xy[1]\n return 2. * jnp.sin(y) * jnp.sin(x)\n\n# solution\n# maps: (n, 2) ---> (n,)\ndef u_star(xy):\n x = xy[:,0]\n y = xy[:,1]\n return jnp.sin(y) * jnp.sin(x)\n\ndef u_star_(xy):\n x = xy[0]\n y = xy[1]\n return jnp.sin(y) * jnp.sin(x)\n\nlaplace_test = laplace(u_star_)\nv_laplace_test = vmap(laplace_test, (0))\nplt.scatter(x[:,0], x[:,1], c = -v_laplace_test(x) - f_(x), s = 20)\n#plt.show()\n\n# maps [(*,*), ..., (*,*)] ---> ()\n@typechecker\ndef loss(params: Any) -> Float[Array, \"\"]:\n # maps (2,) ---> ()\n laplace_model = laplace(lambda x: truncated_model(params, x))\n\n # maps: (2,) ---> ()\n integrand = lambda x: (laplace_model(x) + f(x))**2\n\n # maps: (n, 2) ---> (n,)\n v_integrand_ = vmap(integrand, (0))\n @jaxtyped\n @typechecker\n def v_integrand(x: Float[Array, \"n 2\"]) -> Float[Array, \"n\"]:\n return v_integrand_(x)\n \n return jnp.reshape(integrator(v_integrand), ())\n\n# L2Norm\n# func must map: (n, 2) ---> (n,)\ndef L2Norm(func):\n return eval_integrator(\n lambda x: (func(x))**2\n )**0.5\n\n# maps: params ---> (Pdim, Pdim)\ngram_laplace = gram_factory(\n model = truncated_model,\n trafo = model_laplace,\n integrator = integrator,\n)\n\n# maps: params, tangent_params ---> tangent_params\nnat_grad = nat_grad_factory(gram_laplace)\n\n\n\n@jit\ndef grid_line_search(params, grads):\n grid = jnp.linspace(0, 400, 401)\n steps = 0.9**grid\n\n #grid = jnp.linspace(0, 30, 31)\n #steps = 0.5**grid\n\n def loss_at_step(step):\n updated_params = [(w - step * dw, b - step * db)\n for (w, b), (dw, db) in zip(params, grads)]\n return loss(updated_params)\n \n v_loss_at_step = vmap(loss_at_step)\n\n losses = v_loss_at_step(steps)\n step_size = steps[jnp.argmin(losses)]\n return [(w - step_size * dw, b - step_size * db)\n for (w, b), (dw, db) in zip(params, grads)], step_size\n\n# gradient update\n@jit\ndef update(params, step_size):\n grads = grad(loss)(params)\n return [(w - step_size * dw, b - step_size * db)\n for (w, b), (dw, db) in zip(params, grads)]\n\nerror_func = lambda x: v_truncated_model(params, x) - u_star(x)\n\n# training loop\nstep = .0001\nfor iteration in range(100):\n params = update(params, step)\n if iteration > 4000:\n step = 0.01\n error = L2Norm(lambda x: v_truncated_model(params, x) - u_star(x))\n print(f'Iteration: {iteration} with loss: {loss(params)} with error: {error}')\n\n# natural gradient descent with line search\nfor iteration in range(100):\n grads = grad(loss)(params)\n nat_grads = nat_grad(params, grads)\n \n #params = line_search(loss, params, nat_grads)\n params, step_size = grid_line_search(params, nat_grads)\n\n error = L2Norm(error_func)\n print(f'Iteration: {iteration} with loss: {loss(params)} with error: {error} with step size: {step_size}')\n\n\n\n\nplt.scatter(x[:,0], x[:,1], c = v_truncated_model(params, x), s = 20)\nplt.savefig('out/2dpreds.png')\nplt.clf()\nplt.scatter(x[:,0], x[:,1], c = u_star(x), s = 20)\nplt.savefig('out/2dgroundtruth.png')\nplt.clf()\nplt.scatter(x[:,0], x[:,1], c = v_truncated_model(params, x) - u_star(x), s = 20)\nplt.savefig('out/2derror.png')\n","repo_name":"emastr/NaturalGradientTraining","sub_path":"examples/non-maintained/2dpinns.py","file_name":"2dpinns.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4881728614","text":"#!/usr/bin/env python3\n# coding: utf-8\n# ##############################################################################\n# Description: This script crypt or decrypt string\n#\n# Required: - Run as Standard User.\n# - Python 3.x\n#\n#\n# Author: LEJOSNE Florian\n#\n# Date: 2021.04.08\n# ##############################################################################\n\n# ==============================================================================\n# IMPORTS\n# ==============================================================================\n\n\n# ==============================================================================\n# GLOBAL VARIABLE\n# ==============================================================================\n\nfirst_mess = 'Welcome in Crypto World'\nsecond_mess = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n\n\n# ==============================================================================\n# SUB FUNCTIONS\n# ==============================================================================\n\ndef test():\n \"\"\"\n This function try to code and decode with first and second message\n\n :return: None\n \"\"\"\n # First test\n start_string()\n print(\"| Original Crypto Message: '{}'\".format(\n first_mess\n ))\n end_string()\n # Encrypted first test\n start_string()\n print(\"| Encrypted Crypto Message: '{}'\".format(\n crypt(first_mess)\n ))\n end_string()\n # Decryptes first test\n start_string()\n print(\"| Decrypted Crypto Message: '{}'\".format(\n decrypt(crypt(first_mess))\n ))\n end_string()\n # Second test\n start_string()\n print(\"| Original Crypto Message: '{}'\".format(\n second_mess\n ))\n end_string()\n # Encrypted Second test\n\n start_string()\n print(\"| Encrypted Crypto Message: '{}'\".format(\n crypt(second_mess)\n ))\n end_string()\n # Decrypted Second test\n\n start_string()\n print(\"| Decrypted Crypto Message: '{}'\".format(\n decrypt(crypt(second_mess))\n ))\n end_string()\n\n\ndef end_string():\n \"\"\"\"\n This function print the end symbol\n :return: None\n \"\"\"\n print(\"\\\\===\")\n print()\n\n\ndef start_string():\n \"\"\"\"\n This function print the start symbol\n :return: None\n \"\"\"\n print(\"/===\")\n\n\ndef decrypt(decrypt_mess):\n \"\"\"\"\n This function decrypt the message\n\n\n :param: decrypt_mess\n :return: decrypted_message\n \"\"\"\n decrypted_message = \"\"\n if decrypt_mess:\n for char in decrypt_mess:\n # If Alphabet\n if 65 <= ord(char) <= 90 or 97 <= ord(char) <= 122:\n # If Upper\n if 65 <= ord(char) <= 90:\n if ord(char) + 13 > 90:\n char = chr(ord(char) + 13 - 90 + 65 - 1)\n else:\n char = chr(ord(char) + 13)\n else:\n if ord(char) + 13 > 122:\n char = chr(ord(char) + 13 - 122 + 97 - 1)\n else:\n char = chr(ord(char) + 13)\n decrypted_message += char\n return decrypted_message\n\n\ndef crypt(encrypt_mess):\n \"\"\"\"\n This function encrypt the message\n\n\n :param: encrypt_mess\n :return: encrypted_message\n \"\"\"\n encrypted_message = \"\"\n if encrypt_mess:\n for char in encrypt_mess:\n # If Alphabet\n if 65 <= ord(char) <= 90 or 97 <= ord(char) <= 122:\n # If Upper\n if 65 <= ord(char) <= 90:\n if ord(char) + 13 > 90:\n char = chr(ord(char) + 13 - 90 + 65 - 1)\n else:\n char = chr(ord(char) + 13)\n else:\n if ord(char) + 13 > 122:\n char = chr(ord(char) + 13 - 122 + 97 - 1)\n else:\n char = chr(ord(char) + 13)\n encrypted_message += char\n return encrypted_message\n\n\n# ==============================================================================\n# PROCESS\n# ==============================================================================\nif __name__ == '__main__':\n test()\n","repo_name":"x33lyS/Code-Training","sub_path":"Python/final-py/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70678656007","text":"import os\nimport sys \nimport random\nimport argparse\nimport numpy as np \nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom torch.utils.data import TensorDataset, DataLoader\n\nfrom tensorboardX import SummaryWriter\n\nwriter = SummaryWriter('all_runs/acgan_runs_test')\nr_writer = SummaryWriter('all_runs/errD_real_runs_test')\nw_writer = SummaryWriter('all_runs/errD_wrong_runs_test')\nf_writer = SummaryWriter('all_runs/errD_fake_runs_test')\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-m','--mode',required=True, help='train | test')\nparser.add_argument('-dr','--dataroot', required=True, help='path to dataset')\nparser.add_argument('-w','--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('-b','--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('-z','--nz', type=int, default=128, help='size of the latent z vector')\nparser.add_argument('-c','--nconds',type=int, default=24,help='size of the latent c vector')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--ngf', type=int, default=64, help='number of filters of generator')\nparser.add_argument('--ndf', type=int, default=64, help='number of filters of discriminator')\nparser.add_argument('--niter', type=int, default=45, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta for optimizer ')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--outf', default='./pth', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--flip',action='store_true', help='flip training images')\n\nparser.add_argument('--testing_file',type=str,default='./test_tags.txt',help='path of testing file')\nparser.add_argument('--demo_file', type=str, default='./demo_tags.txt',help='pth of demo file')\n\nopt = parser.parse_args()\n\n############# Helper Utils #############\n\ndef resize(jpgfile):\n if jpgfile.size != 64*64*3: \n jpgfile.resize((64, 64), Image.ANTIALIAS)\n return jpgfile\n\ndef load_imgs(dirname):\n img_list = [ os.path.join(dirname, filename) for filename in sorted(os.listdir(dirname), key=lambda img:int(img[0:-4])) ]\n imgs = [ np.array(resize(Image.open(filename)), dtype=np.float32) for filename in img_list ]\n for i in imgs:\n if i.size != 12288:\n print(i.size)\n imgs = np.stack(imgs)\n imgs = imgs / 255.\n return imgs\n\ndef load_tags(filename):\n tag_file = open(filename, 'r')\n tags = []\n for line in tag_file:\n words = line.strip().split(',')[1].split()\n tags.append([' '.join(words[:2]), ' '.join(words[2:4]), ' '.join(words[4:])])\n tags = np.array(tags)\n return tags\n\ndef demo_tags(demofilename):\n tag_file = open(demofilename, 'r')\n tags = []\n for line in tag_file:\n words = line.split()\n target = [' '.join(words[:2]), ' '.join(words[2:4]), ' '.join(words[4:])]\n \n tags = [ target for _ in range(64) ]\n \n return tags\n\ndef gen_fake_conds(real_conds, num_style, num_hair, num_eyes):\n batch_size = real_conds.size(0)\n style_idx = np.random.randint(num_style, size=(batch_size,))\n hair_idx = np.random.randint(num_hair, size=(batch_size,)) + num_style\n eyes_idx = np.random.randint(num_eyes, size=(batch_size,)) + num_style + num_hair\n\n fake_conds = np.zeros((batch_size, num_style + num_hair + num_eyes), dtype=np.float32)\n\n for i, (s_i, h_i, e_i) in enumerate(zip(style_idx, hair_idx, eyes_idx)):\n if real_conds[i][h_i] == 1. and real_conds[i][e_i] == 1. and real_conds[i][s_i] == 1: \n h_i = (h_i + 1) % num_hair\n fake_conds[(i,i, i), (s_i, h_i, e_i)] = 1.\n \n fake_conds = torch.FloatTensor(fake_conds)\n\n return fake_conds\n\ndef weights_init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n module.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1: \n module.weight.data.normal_(1.0, 0.02)\n module.bias.data.fill_(0)\n\n############# Preparation #############\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\ncudnn.benchmark = True\n\nngpu = int(opt.ngpu)\nnz = int(opt.nz)\nngf = int(opt.ngf)\nndf = int(opt.ndf)\nnconds = int(opt.nconds)\n\n############# Data Folder #############\n\nimg_dir = opt.dataroot + 'imgs'\ntag_file = opt.dataroot + 'tags.csv'\nval_tag_file = 'val_tags.txt'\n\n############# Generator Model #############\n\nclass _netG(nn.Module):\n def __init__(self):\n super(_netG, self).__init__()\n self.embedding = nn.Linear(nz+nconds,ngf*8*4*4,bias=False)\n self.main = nn.Sequential(\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d( ngf, 3, 4, 2, 1, bias=False),\n nn.Tanh() \n )\n def forward(self, noises, conds):\n inputs = torch.cat((noises, conds), dim=1)\n inputs = self.embedding(inputs)\n inputs = inputs.view(-1, ngf * 8, 4, 4)\n outputs = self.main(inputs)\n return outputs\n\n############# Discriminator Model #############\n\nclass _netD(nn.Module):\n def __init__(self):\n super(_netD, self).__init__()\n self.main = nn.Sequential(\n nn.Conv2d(3, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8)\n )\n self.out = nn.Sequential(\n nn.Conv2d(ndf * 8 + nconds, ndf, 1, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf, 1, 4, 1),\n nn.Sigmoid()\n )\n def forward(self, imgs, conds):\n hiddens = self.main(imgs)\n conds = conds.view(*conds.size(), 1, 1)\n conds = conds.repeat(1, 1, 4, 4)\n hiddens = torch.cat((hiddens, conds), dim=1)\n outputs = self.out(hiddens)\n outputs = outputs.squeeze()\n return outputs\n\nnetG = _netG()\nnetG.apply(weights_init)\nprint(netG)\n\nnetD = _netD()\nnetD.apply(weights_init)\nprint(netD)\n\n\n\n\nif opt.mode == 'train':\n\n imgs = load_imgs(img_dir)\n tags = load_tags(tag_file)\n\n imgs = np.concatenate((imgs,np.flip(imgs,axis=0)),axis=0)\n tags = np.tile(tags,(2,1))\n\n print('Finished loading training images...')\n print('Number of training images: ',len(tags))\n\n imgs = torch.FloatTensor(imgs).permute(0,3,1,2)\n\n style_dict = {}\n hair_dict = {}\n eyes_dict = {}\n for style_feat in np.unique(tags[:, 0]):\n style_dict[style_feat] = len(style_dict)\n for hair_feat in np.unique(tags[:, 1]): \n hair_dict[hair_feat] = len(hair_dict)\n for eyes_feat in np.unique(tags[:, 2]): \n eyes_dict[eyes_feat] = len(eyes_dict)\n\n num_style = len(style_dict)\n num_hair = len(hair_dict)\n num_eyes = len(eyes_dict)\n\n print(style_dict)\n print(hair_dict)\n print(eyes_dict)\n\n conditions = torch.zeros((imgs.size(0), num_style + num_hair + num_eyes))\n for i, tag in enumerate(tags):\n conditions[(i, i, i), (style_dict[tag[0]], hair_dict[tag[1]] + num_style, eyes_dict[tag[2]] + num_style + num_hair)] = 1.\n\n dataset = TensorDataset(imgs,conditions)\n dataloader = DataLoader(dataset,\n batch_size=opt.batchSize,\n shuffle=True,\n num_workers=int(opt.workers))\n\n val_tags = load_tags(val_tag_file)\n val_conditions = torch.zeros((len(val_tags), num_style + num_hair + num_eyes ))\n for i, tag in enumerate(val_tags): \n val_conditions[(i, i, i), (style_dict[tag[0]], hair_dict[tag[1]] + num_style, eyes_dict[tag[2]] + num_style + num_hair)] = 1.\n val_noise = torch.Tensor(len(val_tags), nz).uniform_(-1.0, 1.0)\n\n\n if opt.cuda:\n val_noise = Variable(val_noise).cuda()\n val_conditions = Variable(val_conditions).cuda()\n netG = netG.cuda()\n netD = netD.cuda()\n\n optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\n criterion = nn.BCELoss()\n \n for epoch in range(opt.niter):\n\n netD.train()\n netG.train()\n\n for local_step, (real_x, real_conds) in enumerate(dataloader):\n\n global_step = epoch*len(dataloader) + local_step\n\n\n ############################\n # (1) Update Discriminator #\n ############################\n\n optimizerD.zero_grad()\n real_y = torch.ones((real_x.size(0),))\n fake_y = torch.zeros((real_x.size(0),))\n tr_noise = torch.Tensor(real_x.size(0), nz).uniform_(-1.0, 1.0)\n fake_conds = gen_fake_conds(real_conds, num_style ,num_hair, num_eyes)\n if opt.cuda:\n real_x = Variable(real_x).cuda()\n real_y = Variable(real_y).cuda()\n fake_y = Variable(fake_y).cuda()\n tr_noise = Variable(tr_noise).cuda()\n real_conds = Variable(real_conds).cuda()\n fake_conds = Variable(fake_conds).cuda()\n else:\n real_x = Variable(real_x)\n real_y = Variable(real_y)\n fake_y = Variable(fake_y)\n tr_noise = Variable(tr_noise)\n real_conds = Variable(real_conds)\n fake_conds = Variable(fake_conds)\n \n # Train with (real image, real tags)\n output = netD(real_x, real_conds)\n errD_real = criterion(output, real_y)\n # Train with (real_image, fake tags)\n output = netD(real_x, fake_conds)\n errD_wrong = criterion(output, fake_y)\n # Train with (fake_image, real tags)\n gen_x = netG(tr_noise, real_conds)\n output = netD(gen_x.detach(), real_conds)\n errD_fake = criterion(output, fake_y)\n \n errD = errD_real + errD_wrong + errD_fake\n errD.backward()\n optimizerD.step()\n\n r_writer.add_scalar('errD', errD_real, global_step)\n w_writer.add_scalar('errD', errD_wrong, global_step)\n f_writer.add_scalar('errD', errD_fake, global_step)\n\n #########################\n # (2) Update Generator #\n #########################\n optimizerG.zero_grad()\n fake_y.fill_(1.)\n output = netD(gen_x, real_conds)\n errG = criterion(output, fake_y)\n errG.backward()\n optimizerG.step()\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f'\n % (epoch, opt.niter, local_step, len(dataloader),\n errD.data, errG.data))\n\n writer.add_scalar('Loss/D', errD, global_step)\n writer.add_scalar('Loss/G', errG, global_step)\n\n val_x = netG(val_noise, val_conditions)\n\n writer.add_image('conditioned_fake_samples', vutils.make_grid(val_x.data, nrow=8, normalize=True), epoch)\n\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))\n\n\nif opt.mode == 'test':\n \n test_tag_file = opt.testing_file\n demo_tag_file = opt.demo_file\n\n style_dict = {'long hair': 0,\n 'short hair': 1}\n hair_dict = {'aqua hair': 0, \n 'black hair': 1, \n 'blonde hair': 2, \n 'blue hair': 3, \n 'brown hair': 4, \n 'gray hair': 5, \n 'green hair': 6, \n 'orange hair': 7, \n 'pink hair': 8, \n 'purple hair': 9, \n 'red hair': 10, \n 'white hair': 11}\n eyes_dict = {'aqua eyes': 0, \n 'black eyes': 1, \n 'blue eyes': 2, \n 'brown eyes': 3, \n 'green eyes': 4, \n 'orange eyes': 5, \n 'pink eyes': 6, \n 'purple eyes': 7, \n 'red eyes': 8, \n 'yellow eyes': 9}\n\n num_style = len(style_dict)\n num_eyes = len(eyes_dict)\n num_hair = len(hair_dict)\n\n val_tags = load_tags(test_tag_file)\n # val_tags = demo_tags(demo_tag_file)\n\n val_conditions = torch.zeros((len(val_tags), num_style + num_hair + num_eyes ))\n for i, tag in enumerate(val_tags): \n val_conditions[(i, i, i), (style_dict[tag[0]], hair_dict[tag[1]] + num_style, eyes_dict[tag[2]] + num_style + num_hair)] = 1.\n val_noise = torch.Tensor(len(val_tags), nz).uniform_(-1.0, 1.0)\n\n if opt.cuda:\n val_noise = Variable(val_noise).cuda()\n val_conditions = Variable(val_conditions).cuda()\n netG = netG.cuda()\n\n model_path = 'acgan_netG.pth'\n netG.load_state_dict(torch.load(model_path))\n\n val_x = netG(val_noise, val_conditions)\n vutils.save_image(val_x.data,\n 'cgan.png',\n nrow=8,\n normalize=True,\n padding=10,\n pad_value=1)\n\n else:\n val_noise = Variable(val_noise)\n val_conditions = Variable(val_conditions)\n netG = netG\n\n model_path = 'weights/acgan_netG_42.pth'\n netG.load_state_dict(torch.load(model_path, map_location='cpu'))\n\n val_x = netG(val_noise, val_conditions)\n vutils.save_image(val_x.data,\n 'cgan1.png',\n nrow=8,\n normalize=True,\n padding=10,\n pad_value=1)","repo_name":"yangchris11/CSE599G","sub_path":"dl-final-2019a/acgan.py","file_name":"acgan.py","file_ext":"py","file_size_in_byte":14945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17593858921","text":"import logging\nfrom modules.base_module import Module\n# from modules.location import refresh_avatar\nfrom inventory import Inventory\nimport const\n\nclass_name = \"Avatar\"\n\n\nclass Avatar(Module):\n prefix = \"a\"\n\n def __init__(self, server):\n self.server = server\n self.commands = {\"apprnc\": self.appearance, \"clths\": self.clothes}\n self.clothes_list = server.parser.parse_clothes()\n self.sets = server.parser.parse_cloth_sets()\n\n async def appearance(self, msg, client):\n subcommand = msg[1].split(\".\")[2]\n if subcommand == \"rnn\":\n name = msg[2][\"unm\"].strip()\n if not name:\n return\n if len(name) > const.MAX_NAME_LEN:\n return\n await self.server.redis.lset(f\"uid:{client.uid}:appearance\",\n 0, name)\n user_data = await self.server.get_user_data(client.uid)\n await client.send([\"a.apprnc.rnn\",\n {\"res\": {\"slvr\": user_data[\"slvr\"],\n \"enrg\": user_data[\"enrg\"],\n \"emd\": user_data[\"emd\"],\n \"gld\": user_data[\"gld\"]},\n \"unm\": name}])\n elif subcommand == \"save\":\n apprnc = msg[2][\"apprnc\"]\n current_apprnc = await self.server.get_appearance(client.uid)\n if not current_apprnc:\n await self.update_appearance(apprnc, client)\n self.server.inv[client.uid] = Inventory(self.server,\n client.uid)\n await self.server.inv[client.uid]._get_inventory()\n inv = self.server.inv[client.uid]\n await self.server.redis.set(f\"uid:{client.uid}:wearing\",\n \"casual\")\n if apprnc[\"g\"] == 1:\n weared = [\"boyShoes8\", \"boyPants10\", \"boyShirt14\"]\n available = [\"boyUnderdress1\"]\n else:\n weared = [\"girlShoes14\", \"girlPants9\", \"girlShirt12\"]\n available = [\"girlUnderdress1\", \"girlUnderdress2\"]\n for item in weared+available:\n await inv.add_item(item, \"cls\")\n user_data = await self.server.get_user_data(client.uid)\n if user_data[\"premium\"]:\n for item in const.PREMIUM_BUBBLES:\n await inv.add_item(item, \"gm\")\n if user_data[\"role\"] >= 2:\n await inv.add_item(\"moderatorChatBubbleDecor\", \"gm\")\n for item in weared:\n await inv.change_wearing(item, True)\n for item in const.room_items:\n await self.server.modules[\"frn\"].add_item(item,\n \"livingroom\",\n client.uid)\n for i in range(1, 6):\n await self.server.modules[\"frn\"].add_item(item,\n f\"room{i}\",\n client.uid)\n else:\n if apprnc[\"g\"] != current_apprnc[\"g\"]:\n logging.info(\"gender doesn't match!\")\n return\n await self.update_appearance(apprnc, client)\n apprnc = await self.server.get_appearance(client.uid)\n await client.send([\"a.apprnc.save\", {\"apprnc\": apprnc}])\n\n async def clothes(self, msg, client):\n subcommand = msg[1].split(\".\")[2]\n try:\n if subcommand == \"wear\":\n await self.wear_cloth(msg, client)\n elif subcommand == \"buy\":\n clothes = [{\"tpid\": msg[2][\"tpid\"], \"clid\": \"\"}]\n await self.buy_clothes(msg[1], clothes, msg[2][\"ctp\"], client)\n elif subcommand in [\"bcc\", \"bac\"]:\n await self.buy_clothes(msg[1], msg[2][\"clths\"], msg[2][\"ctp\"],\n client)\n elif subcommand == \"bst\":\n await self.buy_clothes_suit(msg[2][\"tpid\"], msg[2][\"ctp\"],\n client)\n else:\n logging.warning(f\"Command {msg[1]} not found\")\n except KeyError:\n client.writer.close()\n\n async def change_ctp(self, uid, new_ctp):\n ctp = await self.server.redis.get(f\"uid:{uid}:wearing\")\n if ctp == new_ctp:\n return\n await self.server.redis.set(f\"uid:{uid}:wearing\", new_ctp)\n await self.server.inv[uid]._get_inventory()\n\n async def wear_cloth(self, msg, client):\n ctp = msg[2][\"ctp\"]\n if ctp not in [\"casual\", \"club\", \"official\", \"swimwear\", \"underdress\"]:\n return\n user_data = await self.server.get_user_data(client.uid)\n if ctp != \"casual\" and not user_data[\"premium\"]:\n await client.send([\"cp.ms.rsm\", {\"txt\": \"Сохренение и покупка \"\n \"одежды доступна только в \"\n \"слот 'повседневная'. \"\n \"Чтобы сохранять и \"\n \"покупать одежду в других \"\n \"слотах, оформите \"\n \"Премиум\"}])\n return\n await self.change_ctp(client.uid, ctp)\n wearing = await self.server.redis.smembers(f\"uid:{client.uid}:{ctp}\")\n for cloth in wearing:\n await self.server.inv[client.uid].change_wearing(cloth, False)\n clths = msg[2][\"clths\"]\n for cloth in clths:\n if cloth[\"clid\"]:\n tmp = f\"{cloth['tpid']}_{cloth['clid']}\"\n else:\n tmp = cloth[\"tpid\"]\n await self.server.inv[client.uid].change_wearing(tmp, True)\n inv = self.server.inv[client.uid].get()\n clths = await self.server.get_clothes(client.uid, type_=2)\n ccltn = await self.server.get_clothes(client.uid, type_=3)\n await client.send([\"a.clths.wear\", {\"inv\": inv, \"clths\": clths,\n \"ccltn\": ccltn, \"cn\": \"\",\n \"ctp\": ctp}])\n\n async def buy_clothes(self, command, clothes, ctp, client):\n items = await self.server.redis.smembers(f\"uid:{client.uid}:items\")\n if (await self.server.get_appearance(client.uid))[\"g\"] == 1:\n gender = \"boy\"\n else:\n gender = \"girl\"\n gold = 0\n silver = 0\n rating = 0\n to_buy = []\n user_data = await self.server.get_user_data(client.uid)\n for item in clothes:\n cloth = item[\"tpid\"]\n clid = item[\"clid\"]\n if clid:\n name = f\"{cloth}_{clid}\"\n else:\n name = cloth\n if name in items or cloth in items:\n continue\n for category in self.clothes_list[gender]:\n for item in self.clothes_list[gender][category]:\n if item == cloth:\n tmp = self.clothes_list[gender][category][item]\n if not tmp[\"canBuy\"]:\n continue\n if tmp[\"vipOnly\"]:\n if not user_data[\"premium\"]:\n return\n gold += tmp[\"gold\"]\n silver += tmp[\"silver\"]\n rating += tmp[\"rating\"]\n if clid:\n to_buy.append(name)\n else:\n to_buy.append(cloth)\n break\n if ctp != \"casual\" and not user_data[\"premium\"]:\n await client.send([\"cp.ms.rsm\", {\"txt\": \"Сохренение и покупка \"\n \"одежды доступна только в \"\n \"слот 'повседневная'. \"\n \"Чтобы сохранять и \"\n \"покупать одежду в других \"\n \"слотах, оформите \"\n \"Премиум\"}])\n return\n if not to_buy or user_data[\"gld\"] < gold or user_data[\"slvr\"] < silver:\n return\n pipe = self.server.redis.pipeline()\n pipe.set(f\"uid:{client.uid}:gld\", user_data[\"gld\"] - gold)\n pipe.set(f\"uid:{client.uid}:slvr\", user_data[\"slvr\"] - silver)\n pipe.set(f\"uid:{client.uid}:crt\", user_data[\"crt\"] + rating)\n await pipe.execute()\n await self.change_ctp(client.uid, ctp)\n for cloth in to_buy:\n await self.server.inv[client.uid].add_item(cloth, \"cls\")\n await self.server.inv[client.uid].change_wearing(cloth, True)\n user_data = await self.server.get_user_data(client.uid)\n inv = self.server.inv[client.uid].get()\n clths = await self.server.get_clothes(client.uid, type_=2)\n ccltn = await self.server.get_clothes(client.uid, type_=1)\n ccltn = ccltn[\"ccltns\"][ctp]\n await client.send([command, {\"inv\": inv,\n \"res\": {\"gld\": user_data[\"gld\"],\n \"slvr\": user_data[\"slvr\"],\n \"emd\": user_data[\"emd\"],\n \"enrg\": user_data[\"enrg\"]},\n \"clths\": clths, \"ccltn\": ccltn,\n \"crt\": user_data[\"crt\"]}])\n\n async def buy_clothes_suit(self, tpid, ctp, client):\n if (await self.server.get_appearance(client.uid))[\"g\"] == 1:\n gender = \"boy\"\n else:\n gender = \"girl\"\n if tpid not in self.sets[gender]:\n logging.info(f\"Set {tpid} not found\")\n return\n gold = 0\n silver = 0\n rating = 0\n items = await self.server.redis.smembers(f\"uid:{client.uid}:items\")\n to_buy = []\n user_data = await self.server.get_user_data(client.uid)\n for cloth in self.sets[gender][tpid]:\n if \":\" in cloth:\n cloth = cloth.replace(\":\", \"_\")\n if cloth in items:\n continue\n category = self.get_category(cloth, gender)\n if not category:\n continue\n attrs = self.clothes_list[gender][category][cloth]\n if not attrs[\"canBuy\"]:\n continue\n if attrs[\"vipOnly\"]:\n if not user_data[\"premium\"]:\n continue\n gold += attrs[\"gold\"]\n silver += attrs[\"silver\"]\n rating += attrs[\"rating\"]\n to_buy.append(cloth)\n if ctp != \"casual\" and not (user_data[\"role\"] or user_data[\"premium\"]):\n await client.send([\"cp.ms.rsm\", {\"txt\": \"Сохренение и покупка \"\n \"одежды доступна только в \"\n \"слот 'повседневная'. \"\n \"Чтобы сохранять и \"\n \"покупать одежду в других \"\n \"слотах, оформите \"\n \"Премиум\"}])\n return\n if user_data[\"gld\"] < gold or user_data[\"slvr\"] < silver:\n return\n await self.server.redis.set(f\"uid:{client.uid}:gld\",\n user_data[\"gld\"] - gold)\n await self.server.redis.set(f\"uid:{client.uid}:slvr\",\n user_data[\"slvr\"] - silver)\n await self.server.redis.set(f\"uid:{client.uid}:crt\",\n user_data[\"crt\"] + rating)\n if ctp not in [\"casual\", \"club\", \"official\", \"swimwear\", \"underdress\"]:\n return\n await self.change_ctp(client.uid, ctp)\n for cloth in to_buy:\n await self.server.inv[client.uid].add_item(cloth, \"cls\")\n await self.server.inv[client.uid].change_wearing(cloth, True)\n inv = self.server.inv[client.uid].get()\n clths = await self.server.get_clothes(client.uid, type_=2)\n ccltn = await self.server.get_clothes(client.uid, type_=1)\n ccltn = ccltn[\"ccltns\"][ctp]\n user_data = await self.server.get_user_data(client.uid)\n await client.send([\"a.clths.buy\", {\"inv\": inv,\n \"res\": {\"slvr\": user_data[\"slvr\"],\n \"enrg\": user_data[\"enrg\"],\n \"emd\": user_data[\"emd\"],\n \"gld\": user_data[\"gld\"]},\n \"clths\": clths, \"ccltn\": ccltn,\n \"crt\": user_data[\"crt\"]}])\n\n async def update_appearance(self, apprnc, client):\n old = await self.server.get_appearance(client.uid)\n if old:\n nick = old[\"n\"]\n else:\n nick = apprnc[\"n\"]\n redis = self.server.redis\n await redis.delete(f\"uid:{client.uid}:appearance\")\n await redis.rpush(f\"uid:{client.uid}:appearance\", nick,\n apprnc[\"nct\"], apprnc[\"g\"], apprnc[\"sc\"],\n apprnc[\"ht\"], apprnc[\"hc\"], apprnc[\"brt\"],\n apprnc[\"brc\"], apprnc[\"et\"], apprnc[\"ec\"],\n apprnc[\"fft\"], apprnc[\"fat\"], apprnc[\"fac\"],\n apprnc[\"ss\"], apprnc[\"ssc\"], apprnc[\"mt\"],\n apprnc[\"mc\"], apprnc[\"sh\"], apprnc[\"shc\"],\n apprnc[\"rg\"], apprnc[\"rc\"], apprnc[\"pt\"],\n apprnc[\"pc\"], apprnc[\"bt\"], apprnc[\"bc\"])\n\n async def update_crt(self, uid):\n redis = self.server.redis\n clothes = []\n for tmp in await redis.smembers(f\"uid:{uid}:items\"):\n if await redis.lindex(f\"uid:{uid}:items:{tmp}\", 0) == \"cls\":\n if \"_\" in clothes:\n clothes.append(tmp.split(\"_\")[0])\n else:\n clothes.append(tmp)\n appearance = await self.server.get_appearance(uid)\n if not appearance:\n return 0\n gender = \"boy\" if appearance[\"g\"] == 1 else \"girl\"\n crt = 0\n for cloth in clothes:\n for _category in self.clothes_list[gender]:\n for item in self.clothes_list[gender][_category]:\n if item == cloth:\n item = self.clothes_list[gender][_category][cloth]\n crt += item[\"rating\"]\n break\n await self.server.redis.set(f\"uid:{uid}:crt\", crt)\n return crt\n\n def get_category(self, cloth, gender):\n if \"_\" in cloth:\n cloth = cloth.split(\"_\")[0]\n for category in self.clothes_list[gender]:\n for item in self.clothes_list[gender][category]:\n if item == cloth:\n return category\n return None\n","repo_name":"AvaCity/avacity-async","sub_path":"modules/avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":15829,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"70438782408","text":"\"\"\"\nLCP 30. 魔塔游戏\n小扣当前位于魔塔游戏第一层,共有 N 个房间,编号为 0 ~ N-1。每个房间的补血道具/怪物对于血量影响记于数组 nums,其中正数表示道具补血数值,即血量增加对应数值;负数表示怪物造成伤害值,即血量减少对应数值;0 表示房间对血量无影响。\n\n小扣初始血量为 1,且无上限。假定小扣原计划按房间编号升序访问所有房间补血/打怪,为保证血量始终为正值,小扣需对房间访问顺序进行调整,每次仅能将一个怪物房间(负数的房间)调整至访问顺序末尾。请返回小扣最少需要调整几次,才能顺利访问所有房间。若调整顺序也无法访问完全部房间,请返回 -1。\n\n示例 1:\n\n输入:nums = [100,100,100,-250,-60,-140,-50,-50,100,150]\n\n输出:1\n\n解释:初始血量为 1。至少需要将 nums[3] 调整至访问顺序末尾以满足要求。\n\n示例 2:\n\n输入:nums = [-200,-300,400,0]\n\n输出:-1\n\n解释:调整访问顺序也无法完成全部房间的访问。\n\n提示:\n\n1 <= nums.length <= 10^5\n-10^5 <= nums[i] <= 10^5\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/p0NxJO\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n def magicTower(self, nums: List[int]) -> int:\n if sum(nums) < 0:\n return -1\n time = 0\n pre = 1\n heap = []\n for num in nums:\n pre += num\n if num < 0:\n heapq.heappush(heap, num)\n # 把最小的负数弹出,放到后面\n if pre <= 0:\n pre -= heapq.heappop(heap)\n time += 1\n\n return time\n\n\nif __name__ == '__main__':\n nums = [100, 100, 100, -250, -60, -140, -50, -50, 100, 150]\n print(Solution().magicTower(nums))\n","repo_name":"yiming1012/MyLeetCode","sub_path":"LeetCode/贪心算法/堆/LCP 30. 魔塔游戏.py","file_name":"LCP 30. 魔塔游戏.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"10388545977","text":"import sys\r\nimport os\r\nfrom __main__ import MathPy\r\nfrom setuptools import setup, find_packages\r\n\r\nos.environ['ANACONDA3_PATH'] = \"C:\\\\ProgramData\\\\Anaconda3\\\\envs\\\\Calculator\\\\Library\\\\bin\\\\\"\r\n\r\nbase = None\r\n\r\n# GUI applications require a different base on Windows (the default is for a console application).\r\nif sys.platform == \"win32\":\r\n base = \"Win32GUI\"\r\n\r\nelif sys.platform == \"win64\":\r\n base = \"Win64GUI\"\r\n\r\nsetup(\r\n packages=find_packages(),\r\n scripts=\"__main__.py\",\r\n icon=\"Alecive-Flatwoken-Apps-Libreoffice-Math-B.ico\",\r\n name=MathPy.__name__,\r\n version=MathPy.__version__,\r\n base=base,\r\n\r\n # Project uses reStructuredText, so ensure that the docutils get\r\n # installed or upgraded on the target machine\r\n include_package_data=True,\r\n install_requires=['tkinter', 'matplotlib', 'sympy', 'numpy', 'itertools', 'random'],\r\n\r\n # metadata to display on PyPI\r\n author=MathPy.__author__,\r\n author_email=\"najmi.achraf@gmail.com\",\r\n description=\"GUI Calculator\",\r\n license=\"MIT License\",\r\n keywords=['gui', 'executable'],\r\n url=\"\", # project home page, if any\r\n project_urls={\r\n \"Bug Tracker\": \"https://github.com/AchrafNajmi/MathPy/issues\",\r\n \"Documentation\": \"https://github.com/AchrafNajmi/MathPy/blob/master/Release%20Notes.txt\",\r\n \"Source Code\": \"https://github.com/AchrafNajmi/MathPy\",\r\n },\r\n python_requires='>=3.7',\r\n classifiers=[\r\n \"License :: MIT :: AchrafNajmi/MathPy\",\r\n \"License :: OSI Approved :: Python Software Foundation License\",\r\n 'Programming Language :: Python :: 3.7',\r\n \"Operating System :: Microsoft :: Windows\"\r\n ]\r\n\r\n # could also include long_description, download_url, etc.\r\n)\r\n","repo_name":"mchliyah/MathPy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70923558090","text":"import os, natsort, math, sys\nfrom pathlib import Path\nfrom moviepy.editor import *\nimport mimetypes\n\n\n\n\n\ndef merger(input_path, out_file_name):\n file_type = mimetypes.MimeTypes().guess_type(input_path)[0]\n # if \"video\" not in file_type:\n # print(\"Pass the valid video file as INPUT..\")\n # sys.exit(1)\n video_files = os.listdir(input_path)\n for file in video_files:\n file_type = mimetypes.MimeTypes().guess_type(file)[0]\n if \"video\" not in file_type:\n print(\"Pass the valid video file as INPUT..\")\n sys.exit(1)\n current_dir = Path(os.getcwd()) / input_path\n files_sorted = natsort.natsorted(video_files)\n print(files_sorted)\n\n list_video = [VideoFileClip(str(current_dir / file)) for file in files_sorted]\n\n out_path = Path(os.getcwd()) / \"merge_out\"\n if not os.path.isdir(out_path):\n os.mkdir(out_path)\n out_file = out_path / out_file_name\n\n # To find fps\n video = VideoFileClip(str(current_dir / files_sorted[0]))\n fps = int(video.fps)\n\n result = concatenate_videoclips(list_video)\n result.write_videofile(str(out_file) ,fps=fps)\n\n\ndef gen_file_name(index, file_path):\n if len(file_path.split(\"\\\\\")) > 1:\n file_name = str(index) + \"_\" + str(file_path.split(\"\\\\\")[-1])\n elif len(file_path.split(\"/\")) > 1:\n file_name = str(index) + \"_\" + str(file_path.split(\"/\")[-1])\n else:\n file_name = str(index) + \"_\" + str(file_path)\n return file_name\n\n\ndef splitter(video_file, subclip_duration):\n file_type = mimetypes.MimeTypes().guess_type(video_file)[0]\n if \"video\" not in file_type:\n print(\"Pass the valid video file as INPUT..\")\n sys.exit(1)\n video = VideoFileClip(video_file)\n fps = video.fps\n duration = int(video.duration)\n print(\"\\nThe original duration of video : \" + str(duration) + \" seconds with \" + str(fps) + \" fps\")\n\n number_of_subclips = math.ceil(duration / int(subclip_duration))\n print(\"\\nTOTAL number of subclips : \" + str(number_of_subclips))\n\n out_folder_path = Path(os.getcwd()) / \"split_out\"\n if not os.path.isdir(out_folder_path):\n os.mkdir(out_folder_path)\n\n for file_number in range(number_of_subclips):\n if not file_number:\n start = (file_number * subclip_duration)\n else:\n start = (file_number * subclip_duration)\n end = (file_number + 1) * subclip_duration\n if end > duration:\n end = duration\n file_name = gen_file_name(file_number, video_file)\n gen_file = out_folder_path / file_name\n sub_video = video.subclip(start, end)\n result = CompositeVideoClip([sub_video,])\n result.write_videofile(str(gen_file) ,fps=int(fps))\n\n\ndef gen_video_with_audio_name(video_path, audio_path):\n video_name = os.path.splitext(os.path.basename(video_path))[0]\n audio_name = os.path.splitext(os.path.basename(audio_path))[0]\n video_ext = os.path.splitext(video_path)[1]\n file_name = str(video_name) + \"_\" + str(audio_name) + str(video_ext)\n return file_name\n\n\ndef add_audio_to_video(video_file, audio_file):\n files ={\n \"audio\": audio_file,\n \"video\": video_file\n }\n for name, file in files.items():\n file_type = mimetypes.MimeTypes().guess_type(file)[0]\n if name not in file_type:\n print(f\"Pass the valid {name} file as INPUT..\")\n sys.exit(1)\n print(\"\\nAdding audio : \" + str(audio_file) + \" to video : \" + str(video_file))\n video = VideoFileClip(video_file)\n add_audio = AudioFileClip(audio_file)\n duration = int(video.duration)\n video = video.subclip(0,duration)\n video.audio = add_audio\n out_path = Path(os.getcwd()) / \"video_out\"\n if not os.path.isdir(out_path):\n os.mkdir(out_path)\n file_name = gen_video_with_audio_name(video_file, audio_file)\n out_file = out_path / file_name\n video.write_videofile(str(out_file))","repo_name":"AskarKani/video_editor","sub_path":"library/video_editor.py","file_name":"video_editor.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40116370998","text":"import RPi.GPIO as GPIO\nimport time\n\nfrom .AbstractHandler import AbstractHandler\n\nclass RPiHandler(AbstractHandler):\n\n def __init__(self):\n\n self.__LED1PORT = 4\n self.__LED2PORT = 17\n self.__EXTCTRLPORT = 22\n\n self.__DISTANCE_TRIGGER = 18\n self.__DISTANCE_ECHO = 24\n\n self.__initGPIO()\n\n def cleanup(self):\n # Restore port values\n self.setLED1(0)\n self.setLED2(0)\n self.setEXTCTRL(0)\n GPIO.output(self.__DISTANCE_TRIGGER, 0)\n\n # cleanup port\n GPIO.cleanup()\n\n def setLED1(self, numState):\n self.__setOUTPUTPort(self.__LED1PORT, numState)\n\n def setLED2(self, numState):\n self.__setOUTPUTPort(self.__LED2PORT, numState)\n\n def setEXTCTRL(self, numState):\n self.__setOUTPUTPort(self.__EXTCTRLPORT, numState)\n\n def getDistanceData(self):\n dist = self.__computeDistance()\n return dist\n\n def getTemperatureData(self):\n # This sensor has not been connected\n\n return None\n\n \n\n # Private methods\n def __initGPIO(self):\n GPIO.setmode(GPIO.BCM) \n\n ## LEDs\n GPIO.setup(self.__LED1PORT, GPIO.OUT)\n GPIO.setup(self.__LED2PORT, GPIO.OUT)\n GPIO.setup(self.__EXTCTRLPORT, GPIO.OUT)\n\n #set GPIO direction (IN / OUT)\n GPIO.setup(self.__DISTANCE_TRIGGER, GPIO.OUT)\n GPIO.setup(self.__DISTANCE_ECHO, GPIO.IN)\n\n\n def __setOUTPUTPort(self, portIndex, numState):\n GPIO.output(portIndex, numState)\n\n def __computeDistance(self):\n # set Trigger to HIGH\n GPIO.output(self.__DISTANCE_TRIGGER, True)\n \n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.__DISTANCE_TRIGGER, False)\n \n StartTime = time.time()\n StopTime = time.time()\n \n # save StartTime\n while GPIO.input(self.__DISTANCE_ECHO) == 0:\n StartTime = time.time()\n \n # save time of arrival\n while GPIO.input(self.__DISTANCE_ECHO) == 1:\n StopTime = time.time()\n \n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n \n return distance\n\n \n","repo_name":"nongnoochr/fullstack-rpi-app","sub_path":"rpi/handler/RPiHandler.py","file_name":"RPiHandler.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12549474176","text":"from bs4 import BeautifulSoup\nimport requests\nimport sys,getopt\nimport datetime\n\nJUMIA_SNEAKERS_MEN_URL = 'https://www.jumia.dz/baskets-de-ville/?page={0}'\n\nall_sneaker = []\n\ndef is_last_page(page):\n next_page_btn = page.select_one(\n '#jm > main > div.row.-pbm > div.-pvs.col12 > section > div.pg-w.-pvxl > a:nth-child(6)')\n return next_page_btn.get('href')\n\n\ndef scrap_page(page):\n print('scraping page ',page,' .....')\n url = JUMIA_SNEAKERS_MEN_URL.format(page)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n articles = soup.find_all('article', class_='prd')\n for article in articles:\n price = article.select_one('div.prc')\n discount = article.select_one('div._dsct')\n link = article.select_one('a.core')\n name = article.select_one('div.info > h3')\n #print( article,'::: ')\n if name.text and price.text and link.text:\n sneaker = {\n 'name':name.text if name else 'nothing',\n 'price':price.text if price else 0,\n 'discount':int(discount.text.split('%')[0]) if discount else 0,\n 'link':'https://www.jumia.dz'+link.get('href') if link else 'none'\n }\n all_sneaker.append(sneaker)\n\n return is_last_page(soup)\n\n\n\n\npage = 1\nwhile(True):\n is_next = scrap_page(page)\n page = page + 1\n if not is_next:\n break\n\nall_sneaker.sort( key=lambda x : x['discount'],reverse=True)\nprint('found ',len(all_sneaker),' articles ')\n\nfile_name = 'sneakers_'+str(datetime.datetime.now())+'.txt'\n\n\n#get script options\nn=10\ngender = 'both'\nbrand = 'all'\ntry:\n opts,args = getopt.getopt(sys.argv[1:],\"n:f:b:\")\nexcept getopt.GetoptError:\n print('sneaker.py -n -f -b ')\nfor opt,arg in opts:\n if opt == '-n':\n n = int(arg) \n if opt == '-f':\n gender = arg\n if opt == '-b':\n brand = arg\n#filter by gneder\nif gender != 'both':\n if gender == 'm':\n all_sneaker = list(filter(lambda item: not 'Femme' in item['name'].split(' '),all_sneaker))\n\n elif gender == 'w':\n all_sneaker = list(filter(lambda item: not 'Homme' in item['name'].split(' '),all_sneaker))\n#clear Pepe jeans sneakers\nall_sneaker = list(filter(lambda item: not 'Pepe Jeans' in item['name'],all_sneaker))\n#filter by brand\n\nif brand != 'all':\n all_sneaker = list(filter(lambda item: brand.lower() in item['name'].lower(),all_sneaker))\n \nfor i in range (0,n):\n f = open(file_name,'a')\n f.write(all_sneaker[i]['name']+', '+all_sneaker[i]['price']+', '+str(all_sneaker[i]['discount'])+'%, '+all_sneaker[i]['link']+'\\n')\n","repo_name":"sidisaidkarim/jumia-sneakers-scraping","sub_path":"sneakers.py","file_name":"sneakers.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35020017867","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n crack.py\r\n ~~~~~~~~\r\n\r\n This module takes captcha images as input and partitions them into\r\n n new images, 1 image per character found within the captcha.\r\n\r\n Original Code (http://tinyurl.com/puq6alb) by bboyte01@gmail.com\r\n https://web.archive.org/web/20121012023114/http://www.wausita.com/captcha/\r\n http://www.wausita.com/captcha/\r\n\r\n :copyright: (c) 2012 by Mek\r\n :license: see LICENSE for more details.\r\n\"\"\"\r\n\r\nimport os\r\nimport string\r\nfrom operator import itemgetter\r\nfrom math import sqrt\r\nfrom PIL import Image, ImageChops\r\nfrom io import BytesIO\r\n\r\ntry:\r\n from urllib.request import urlopen\r\nexcept ImportError:\r\n from urllib import urlopen\r\n\r\nSYMBOLS = list(string.ascii_lowercase + string.digits)\r\nICONS_PATH = os.path.abspath(\r\n os.path.join(os.path.dirname(__file__), 'iconset'))\r\nIMAGESET = []\r\nWHITE = 255\r\n\r\n\r\ndef imageset():\r\n \"\"\"Loads icons of various characters\"\"\"\r\n imageset = []\r\n for symbol in SYMBOLS:\r\n for imfile in os.listdir(os.path.join(ICONS_PATH, symbol)):\r\n path = os.path.join(ICONS_PATH, symbol, imfile)\r\n imageset.append({symbol: Image.open(path)})\r\n return imageset\r\n\r\n\r\ndef trim(im, color=WHITE):\r\n \"\"\"Tims image to remove excess color (default: whitespace)\"\"\"\r\n bg = Image.new(im.mode, im.size, WHITE)\r\n diff = ImageChops.difference(im, bg)\r\n diff = ImageChops.add(diff, diff, 2.0, -100)\r\n return im.crop(diff.getbbox())\r\n\r\n\r\ndef channel(im, *colors, **kwargs):\r\n \"\"\"Composes an new image with the same dimensions as `im` but\r\n draws only pixels of the specified color channels on a `bg`\r\n colored background.\r\n \"\"\"\r\n bg = kwargs.get('bg', WHITE)\r\n sample = Image.new('P', im.size, bg)\r\n width, height = im.size\r\n for col in range(width):\r\n for row in range(height):\r\n pixel = im.getpixel((col, row))\r\n if pixel in colors:\r\n sample.putpixel((col, row), pixel)\r\n return sample\r\n\r\n\r\ndef monochrome(im, threshold=255):\r\n \"\"\"Converts all colors in gif image which are less than threshold\r\n to black\"\"\"\r\n return im.point(lambda x: 0 if x < 255 else 255, '1')\r\n\r\n\r\ndef regions(im, threshold=1):\r\n \"\"\"Iterates over the columns of an image from left-to-right and\r\n composes an ordered list of (start, end) column ranges referring\r\n to discrete, contiguous columns which contain at least `threshold`\r\n non-white pixel.\r\n \"\"\"\r\n regions = []\r\n start = None\r\n width, height = im.size\r\n for col in range(width):\r\n # if column contains at least one pixel\r\n if sum([im.getpixel((col, row)) is not WHITE\r\n for row in range(height)]) >= threshold:\r\n start = start if start else col\r\n elif start:\r\n regions.append((start, col))\r\n start = None # reset start\r\n return regions\r\n\r\n\r\ndef similarity(im1, im2, equalize=False):\r\n \"\"\"Takes in two images, vectorizes them into concordance\r\n dictionaries and spits out a number from 0 to 1 indicating how\r\n related they are. 0 means no relation and 1 indicates they are the\r\n same.\r\n\r\n params:\r\n stretch - stretch im2 to be the same dimensions as 1\r\n \"\"\"\r\n def scale(im1, im2):\r\n \"\"\"Scales the image with the greater height to match the one\r\n with the smaller height\r\n \"\"\"\r\n if im1.size[1] > im2.size[1]:\r\n return im1.resize(im2.size, Image.ANTIALIAS), im2\r\n elif im1.size[1] < im2.size[1]:\r\n return im1, im2.resize(im1.size, Image.ANTIALIAS)\r\n return im1, im2\r\n\r\n def vectorize(im):\r\n \"\"\"im.getdata returns the contents of an image as a flattened\r\n sequence object containing pixel values.\r\n \"\"\"\r\n d1 = {}\r\n for count, i in enumerate(im.getdata()):\r\n d1[count] = i\r\n return d1\r\n\r\n def magnitude(concordance):\r\n return sqrt(sum(count ** 2 for word, count in concordance.items()))\r\n\r\n c1, c2 = [vectorize(im) for im in\r\n (scale(im1, im2) if equalize else (im1, im2))]\r\n topvalue = 0\r\n for word, count in c1.items():\r\n if word in c2:\r\n topvalue += count * c2[word]\r\n return topvalue / (magnitude(c1) * magnitude(c2))\r\n\r\n\r\nclass Captcha(object):\r\n\r\n def __init__(self, imgpath):\r\n self.imgpath = imgpath\r\n\r\n @property\r\n def im(self):\r\n \"\"\"Fetches captcha's image from disk or url\"\"\"\r\n try:\r\n im = Image.open(self.imgpath)\r\n except:\r\n im = Image.open(BytesIO(urlopen(self.imgpath).read()))\r\n return self.gif(im)\r\n\r\n @property\r\n def histogram(self):\r\n with self.im as im:\r\n return im.histogram()\r\n\r\n def decode(self, channels=3, limit=3, threshold=0, tolerance=3,\r\n _min=0, _max=245):\r\n \"\"\"Attempts to decode a captcha by:\r\n\r\n - Finding the `n` most prominant colors in the image\r\n - Sampling the captcha into `n` images, each discretely composed\r\n of a differnet prominant colors.\r\n - Segmenting each sample into regions of contiguous columns\r\n containing any pixels pixelation (which are hopefully\r\n equates to individual alphanumeric characters), and finally\r\n - Guessing which character appears in each segment\r\n\r\n XXX Prettier output and organizing of results required\r\n \"\"\"\r\n colors = [color for color, _ in self\r\n .prominant_colors(n=channels, _min=_min, _max=_max)]\r\n sample = monochrome(self.channel(*colors))\r\n return [self.guess_character(segment, limit=limit, threshold=threshold)\r\n for segment in self.segments(sample, tolerance=tolerance)]\r\n\r\n def prominant_colors(self, n=5, _min=0, _max=256):\r\n \"\"\"Calculates the n most prominant colors of an image as an\r\n ordered list of (color, frequency) tuples.\r\n\r\n params:\r\n n - limit the number of colors to `n`\r\n _min - exclude any colors below this number (filter\r\n out dark colors, like black/0)\r\n _max - exclude any colors above this number (filter out\r\n light colors, like white/256)\r\n\r\n XXX consider sorted(im.getcolors(n), reverse=True)\r\n \"\"\"\r\n hist = self.histogram[_min:_max]\r\n return sorted([(c, f) for c, f in enumerate(hist)],\r\n key=itemgetter(1), reverse=True)[:n]\r\n\r\n def channel(self, *colors, **kwargs):\r\n \"\"\"Composes an image with the same dimensions as `im` but\r\n draws only pixels of the specified colors on a `bg` colored\r\n background.\r\n \"\"\"\r\n with self.im as im:\r\n return channel(im, *colors, **kwargs)\r\n\r\n @staticmethod\r\n def gif(im):\r\n \"\"\"Converts captcha to a GIF (makes things easier since it has\r\n 255 colors) and finds the most prominent colors in the image\r\n \"\"\"\r\n return im if im.mode is 'P' else im.convert('P')\r\n\r\n @classmethod\r\n def segments(cls, im, tolerance=3, crop=True):\r\n \"\"\"Discover \"\"\"\r\n return [cls.segment(im, region, crop=crop) for\r\n region in regions(im, threshold=tolerance)]\r\n\r\n @classmethod\r\n def segment(cls, im, region, crop=True):\r\n \"\"\"Returns a cropped image segment (hopefully of an\r\n alphanumeric character) within the range of the region\r\n \"\"\"\r\n start, end = 0, 1\r\n segment = im.crop((region[start], 0, region[end], im.size[1]))\r\n return trim(segment) if crop else segment\r\n\r\n @staticmethod\r\n def guess_character(im, threshold=0, limit=None):\r\n \"\"\"Guess alphanumeric character in image using Basic Vector\r\n Space Search algorithm.\r\n\r\n http://la2600.org/talks/files/20040102/Vector_Space_Search_Engine_Theory.pdf\r\n \"\"\"\r\n global IMAGESET # lazy-ish style iconset loading\r\n if not IMAGESET:\r\n IMAGESET = imageset()\r\n\r\n guesses = []\r\n for icon in IMAGESET:\r\n for symbol, im2 in icon.items():\r\n guess = similarity(im, im2, equalize=True)\r\n if guess >= threshold:\r\n guesses.append((guess, symbol))\r\n return sorted(guesses, reverse=True)[:limit]\r\n\r\n\r\ndef decode(captcha, channels=1, limit=3, threshold=0, tolerance=3,\r\n _min=0, _max=256):\r\n \"\"\"Backward compatible method for decoding a captcha\"\"\"\r\n return Captcha(captcha).decode(\r\n channels=channels,\r\n limit=limit,\r\n threshold=threshold,\r\n tolerance=tolerance,\r\n _min=_min, _max=_max)\r\n","repo_name":"mekarpeles/captcha-decoder","sub_path":"decaptcha/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"16"} +{"seq_id":"71309215367","text":"# -*- coding: utf-8 -*-\nfrom itertools import permutations\n\nfrom django.shortcuts import render, redirect\nfrom SGMGU.models import *\nfrom SGMGU.forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom .utiles import *\nfrom django.http import HttpResponse, Http404\nfrom django.db.models import Q\nfrom django.db import models\nimport time\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\n\n\ndef devolver_listado_annos(anno):\n annos = {'anno1': anno + 1, 'anno2': anno + 2, 'anno3': anno + 3, 'anno4': anno + 4, 'anno5': anno + 5,\n 'anno6': anno + 6, 'anno7': anno + 7, 'anno8': anno + 8, 'anno9': anno + 9, 'anno10': anno + 10}\n return annos\n\n\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef buscar_demanda(request):\n if request.method == \"POST\":\n texto = request.POST['texto_demanda'].lower()\n\n demandas = DemandaGraduados.objects.filter(entidad__e_nombre__contains=texto)\n else:\n demandas = []\n demandas = paginar(request, demandas)\n context = {'demandas': demandas, 'paginas': crear_lista_pages(demandas), 'busqueda': True,\n 'texto': request.POST['texto_demanda']}\n return render(request, \"Demandas/gestion_demanda.html\", context)\n\n\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef gestion_demanda(request):\n anno = int(time.strftime(\"%Y\"))\n annos = devolver_listado_annos(anno)\n\n perfil = Perfil_usuario.objects.get(usuario=request.user)\n\n if perfil.categoria.nombre == \"organismo\":\n demandas = DemandaGraduados.objects.filter(organismo=perfil.organismo)\n else:\n demandas = DemandaGraduados.objects.all()\n\n demandas = paginar(request, demandas)\n context = {'demandas': demandas, 'paginas': crear_lista_pages(demandas), 'anno': anno, 'annos': annos}\n return render(request, \"Demandas/gestion_demanda.html\", context)\n\n\n# ---------------------------------------------------------------------\n\n\n\n# ---------------------------------------------------------------------\n\n\n# registrar demanda\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef registrar_demanda(request):\n perfil = Perfil_usuario.objects.get(usuario=request.user)\n\n if perfil.categoria.nombre == \"organismo\":\n lista_organismos_por_usuario = Organismo.objects.filter(id=perfil.organismo_id)\n lista_entidades_por_usuario = Entidad.objects.filter(id_organismo_s=perfil.organismo_id)\n else:\n lista_organismos_por_usuario = Organismo.objects.all()\n lista_entidades_por_usuario = Entidad.objects.all()\n\n if request.method == 'POST':\n form = DemandaForm(request.POST)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, \"La demanda se ha registrado con éxito.\")\n return redirect('/demandas')\n else:\n # --------------------------------\n\n form = DemandaForm()\n context = {'form': form, 'lista_organismos_por_usuario': lista_organismos_por_usuario,\n 'lista_entidades_por_usuario': lista_entidades_por_usuario, 'nombre_accion': 'Registrar'}\n return render(request, \"demandas/form_demanda.html\", context)\n\n\n# 'lista_entidades_por_usuario': lista_entidades_por_usuario,\n\n# para modificar una demanda:\n\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef modificar_demanda(request, identificador):\n\n perfil = Perfil_usuario.objects.get(usuario=request.user)\n\n if perfil.categoria.nombre == \"organismo\":\n lista_organismos_por_usuario = Organismo.objects.filter(id=perfil.organismo_id)\n lista_entidades_por_usuario = Entidad.objects.filter(id_organismo_s=perfil.organismo_id)\n else:\n lista_organismos_por_usuario = Organismo.objects.all()\n lista_entidades_por_usuario = Entidad.objects.all()\n\n demanda = DemandaGraduados.objects.get(codigo_demanda=identificador)\n if request.method == 'POST':\n form = DemandaForm(request.POST, instance=demanda)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, \"La demanda se ha modificado con éxito.\")\n return redirect('/demandas')\n else:\n form = DemandaForm(instance=demanda)\n # Creamos el contexto\n context = {'form': form, 'nombre_accion': 'Modificar','lista_organismos_por_usuario':lista_organismos_por_usuario,'lista_entidades_por_usuario':lista_entidades_por_usuario}\n # Y mostramos los datos\n return render(request, \"demandas/form_demanda.html\", context)\n\n\n# -----------pruebas con AJAX------------------------------------------------------------------------\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef demanda_create(request):\n data = dict()\n\n if request.method == 'POST':\n form = DemandaForm(request.POST)\n if form.is_valid():\n form.save()\n data['form_is_valid'] = True\n else:\n data['form_is_valid'] = False\n else:\n form = DemandaForm()\n\n context = {'form': form}\n data['html_form'] = render_to_string('Demandas/form_demanda_modal.html', context, request=request)\n\n return JsonResponse(data)\n\n\n# primera version del create demanda\n# @login_required\n# @permission_required(['administrador','especialista','organismo'])\n# def demanda_create(request):\n# form = DemandaForm()\n# context = {'form':form}\n# html_form = render_to_string('Demandas/form_demanda_modal.html',context,request=request)\n# return JsonResponse({'html_form':html_form})\n\n\n\n\n\n\n\n\n# -----------------------------------------------------------------------------------\n# eliminar una entidad\n\n@login_required\n@permission_required(['administrador', 'especialista', 'organismo'])\ndef eliminar_demanda(request, identificador):\n demanda = DemandaGraduados.objects.get(codigo_demanda=identificador)\n # demanda.estado = False\n demanda.delete()\n messages.add_message(request, messages.SUCCESS, \"La demanda ha sido eliminada con éxito.\")\n return redirect('/demandas')\n","repo_name":"marioriguera/egresadosmario","sub_path":"SGMGU/views/views_demanda.py","file_name":"views_demanda.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12151816855","text":"import requests\nimport time\nimport json\nimport os\nimport configparser\n\nclass VK_get_data:\n \"\"\"\n Класс предназначен для работы с API VK\n \"\"\"\n # считываем токен из файла\n config = configparser.ConfigParser() # создаём объекта парсера\n config.read(\"/Users/denistimakov/PycharmProjects/TestingYaUpload/settings.ini\") # читаем конфиг\n config['DEFAULT']['vk_token']\n vk_token = config['DEFAULT']['vk_token']\n\n def search_user(self):\n \"\"\"\n Метод поиска пользователя ВК по названию страницы либо по идентификатору\n Возвращает идентификатор пользователя\n \"\"\"\n choise = input(\n 'Для поиска по названию страницы введите 1, для поиска по идентификатору в��едите 2: ')\n\n if choise == '1':\n\n URL = 'https://api.vk.com/method/utils.resolveScreenName'\n screen_name = input('Введите название страницы: ')\n\n params = {\n 'screen_name': screen_name,\n 'access_token': self.vk_token,\n 'extended': 1,\n 'v': '5.131'\n }\n\n res = requests.get(URL, params=params)\n res_dict = res.json()\n user_id = res_dict['response']['object_id']\n\n elif choise == '2':\n user_id = input('Введите идентификатор пользователя: ')\n\n else:\n print('Введена не корректная команда')\n\n return user_id\n\n def vk_get_data(self):\n \"\"\"\n Метод обращается по API VK и возвращает данные о пользователе\n \"\"\"\n URL = 'https://api.vk.com/method/photos.get'\n user_id = self.search_user()\n params = {\n 'owner_id': user_id,\n 'album_id': 'profile',\n 'access_token': self.vk_token,\n 'extended': 1,\n 'v': '5.131'\n }\n\n # Получаем данные из ВК\n vk_user_data = requests.get(URL, params=params)\n vk_data_dict = vk_user_data.json()\n\n return vk_data_dict\n\nclass Photo_backup:\n\n def __init__(self, photos_amount=5):\n # количество выгружаемых фотографий photos_amount по умолчанию 5\n self.photos_amount = photos_amount\n\n def get_photos(self):\n \"\"\"\n Метод возвращает словарь {<количество лайков>:[, <размер фото>]}\n \"\"\"\n\n # Создаю пустой словарь который будет наполнен лайками и ссылками на фото из ВК\n likes_photos = {}\n\n # Получаем данные из ВК\n get_vk_data = VK_get_data()\n get_data = get_vk_data.vk_get_data()\n res_list = get_data['response']['items']\n self.photos_amount = int(input(\"Введите количество фотографий для сохранения: \"))\n\n # Наполняем словарь нужными данными из res_list\n for elem in res_list:\n like = elem['likes']['count'] + elem['likes']['user_likes']\n if len(likes_photos) < self.photos_amount:\n if like in likes_photos.keys():\n like_time = str(like) + '_' + time.strftime(\"%m%d%Y\", time.localtime(int(elem['date'])))\n likes_photos[like_time] = [elem['sizes'][-1]['url'], elem['sizes'][-1]['type']]\n else:\n likes_photos[like] = [elem['sizes'][-1]['url'], elem['sizes'][-1]['type']]\n return likes_photos\n\nclass YaUploader:\n \"\"\"\n Класс предназначен для работы с API Yandex.Диск\n \"\"\"\n\n def get_headers(self):\n\n # Получаем токен для Яндекс.Диск из файла\n config = configparser.ConfigParser() # создаём объекта парсера\n config.read(\"/Users/denistimakov/PycharmProjects/TestingYaUpload/settings.ini\") # читаем конфиг\n config['DEFAULT']['vk_token']\n self.ya_token = config['DEFAULT']['ya_token']\n\n return {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"OAuth {}\".format(self.ya_token)\n }\n\n def check_folder(self):\n \"\"\"\n Метод для проверки наличия папки Photo_backup на Яндекс.Диск\n Возвращает информацию о директории в виде словаря\n \"\"\"\n check_url = \"https://cloud-api.yandex.net/v1/disk/resources?path=disk%3A%2FPhoto_backup\"\n headers = self.get_headers()\n response = requests.get(check_url, headers=headers)\n folder_info = response.json()\n return folder_info\n\n def create_folder(self):\n \"\"\"\n Метод для создания директории в Я.Диск для бэкапа\n возвращает информацию о созданной или существующей директории\n \"\"\"\n current_folder = self.check_folder()\n if 'DiskNotFoundError' in current_folder.values():\n create_url = \"https://cloud-api.yandex.net/v1/disk/resources\"\n headers = self.get_headers()\n params = {\"path\": \"Photo_backup\"}\n response = requests.put(create_url, headers=headers, params=params)\n return response.json()\n else:\n return current_folder\n\n def _get_upload_link(self, disk_file_path):\n \"\"\"\n Мето�� возвращает ссылку на Яндекс.Диск\n \"\"\"\n create_dir = self.create_folder()\n upload_url = \"https://cloud-api.yandex.net/v1/disk/resources/upload?https://sun9-69.userapi.com/c5853/u2612048/-6/w_72e349fe.jpg\"\n headers = self.get_headers()\n params = {\"path\": disk_file_path, \"overwrite\": \"true\"}\n response = requests.get(upload_url, headers=headers, params=params)\n return response.json()\n\nclass Photo_upload:\n\n def upload(self):\n \"\"\"Метод загружает файлы по списку file_list на яндекс диск\"\"\"\n\n # Создаем экземпляр класса Photo_backup и получаем словарь от get_photos\n photos = Photo_backup()\n photos_dict = photos.get_photos()\n\n # создаем переменную для сохранения результата и последующей ее записи в json-файл\n result_list = []\n\n for key, value in photos_dict.items():\n\n # Загружаем на ПК фотографию по полученной ссылке из ВК (value)\n img_data = requests.get(value[0]).content\n with open(str(key)+'.jpg', 'wb') as handler:\n handler.write(img_data)\n\n # Получаем ссылку для загрузки на Я.Диск с названием файла на Я.Диске\n get_link = YaUploader()\n href = get_link._get_upload_link(disk_file_path=\"Photo_backup/\"+str(key)+\".jpg\").get(\"href\", \"\")\n # Загружаем фотографии на Я.Диск\n response = requests.put(href, img_data)\n\n # Логируем события загрузки фотогрий на Я.Диск\n response.raise_for_status()\n if response.status_code == 201:\n print(f'{key}.jpg Succes')\n result_list.append({\"file_name\": str(key)+\".jpg\",\n \"size\": value[-1]})\n\n # Удаляем jpg файлы из рабочей директории\n dir_name = os.path.abspath(os.curdir)\n photo_files = os.listdir(dir_name)\n for item in photo_files:\n if item.endswith(\".jpg\"):\n os.remove(os.path.join(dir_name, item))\n else:\n print(f'{key}.jpg Error')\n return result_list\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n uploader = Photo_upload()\n result = uploader.upload()\n\n def save_result():\n # Записываем в json результат загрузки файлов на Я.Диск\n with open('data.json', 'w') as f:\n json.dump(result, f)","repo_name":"denistimakovgit/BackupPhotos","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8750,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38915412432","text":"import entities.project as project\nfrom utilities.Validator import Validator\nfrom datetime import *\n\n\ndef get_title():\n title = input(\"> Please Enter The project title: \")\n while project.exists(title) or not Validator.validate_title(title):\n print(\"> This project title already exists or You entered invalid title name !!\")\n print(\"> please use only alphabetic chars\")\n title = input(\"> Please Enter a new project title: \")\n return title.strip()\n\n\ndef get_details():\n details = input(\"> Please Enter The project details: \")\n while not details:\n print(\"> details can't be empty !!\")\n details = input(\"> Please Enter The project details: \")\n return details\n\n\ndef get_total_target():\n total_target = input(\"> Please Enter the total project target: \")\n while not total_target.isdigit():\n total_target = input(\"> Please Enter a correct project target: \")\n return total_target\n\n\ndef get_start_time():\n start_time = input(\"Please Enter the start date in the form of dd-mm-yyyy: \")\n while not Validator.validate_datetime(start_time):\n print(\"The start time must be in the form of dd-mm-yyyy !! and valid time after (now)\")\n start_time = input(\"Please Enter the start date in the form of dd-mm-yyyy: \")\n return start_time\n\n\ndef get_end_time(start_time):\n _format = \"%d-%m-%Y\"\n end_time = input(\"Please Enter the end date in the form of dd-mm-yyyy: \")\n while not Validator.validate_datetime(start_time):\n print(\"The end time must be in the form of dd-mm-yyyy !!\")\n end_time = input(\"Please Enter the end date in the form of dd-mm-yyyy: \")\n _start_time = datetime.strptime(start_time, _format)\n _end_time = datetime.strptime(end_time, _format)\n if _start_time > _end_time:\n print(\"End Time must came after start time\")\n get_end_time(start_time)\n return end_time\n\n\ndef get_project_data():\n from entities.user import main_user as user\n title = get_title()\n details = get_details()\n total_target = get_total_target()\n start_time = get_start_time()\n end_time = get_end_time(start_time)\n\n return {\n \"title\": title,\n \"details\": details,\n \"total_target\": total_target,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"user_email\": user['email']\n }\n","repo_name":"AymanxMohamed/python-crowd-funding","sub_path":"utilities/create_project.py","file_name":"create_project.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28153510509","text":"#import LogBot.py\n\n\n# hello_psg.py\n\nimport PySimpleGUI as sg\n\nlayout = [[sg.Text(\"Simplifies the log diagnosis process!\")],\n [sg.Text(\"whatever you type here will be displayed when you click SUBMIT:\")], [sg.Input(key='-INPUT-')],\n [sg.Text(\"End Date:\")], [sg.Input()],\n [sg.Text(\"Number of Days:\")], [sg.Input()],\n [sg.Text(\"Number of Hours:\")], [sg.Input()],\n [sg.Text(\"Categories:\")], [sg.Input()],\n [sg.Text(\"Pattern:\")], [sg.Input()],\n [sg.Button(\"SUBMIT\")], [sg.Button(\"QUIT\")],\n [sg.Text(size=(40,2), key='-OUTPUT-')]\n ]\n\n# Create the window\nwindow = sg.Window(\"LogBot\", layout)\n\n# Create an event loop\nwhile True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n if event == \"QUIT\":\n window.close()\n if event == \"SUBMIT\" or event == sg.WIN_CLOSED:\n window['-OUTPUT-'].update('Look at me: ' + values['-INPUT-'] + '\\nWOW this text is fucking yellow!', text_color='yellow')\n\nwindow.close()","repo_name":"sborbel/LogBot","sub_path":"LogBot_gui.py","file_name":"LogBot_gui.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70574194887","text":"from __future__ import absolute_import\nfrom __future__ import division\n\nimport functools\nimport logging\nimport os.path\nimport threading\nimport time\nimport uuid\n\nfrom contextlib import contextmanager\nfrom itertools import product\n\nimport libvirt\nfrom six.moves import zip\n\nfrom vdsm import constants\n\nfrom vdsm.common import cpuarch\nfrom vdsm.common import define\nfrom vdsm.common import exception\nfrom vdsm.common import libvirtconnection\nfrom vdsm.common import response\nfrom vdsm.common import xmlutils\n\nimport vdsm.common.time\n\nfrom vdsm.virt import cpumanagement\nfrom vdsm.virt import periodic\nfrom vdsm.virt import utils\nfrom vdsm.virt import virdomain\nfrom vdsm.virt import vm\nfrom vdsm.virt import vmdevices\nfrom vdsm.virt import vmexitreason\nfrom vdsm.virt import vmstats\nfrom vdsm.virt import vmstatus\nfrom vdsm.virt import xmlconstants\nfrom vdsm.virt.vmdevices import hwclass\nfrom vdsm.virt.vmdevices.storage import DISK_TYPE\nfrom vdsm.virt.vmtune import (\n io_tune_merge,\n io_tune_dom_to_values,\n io_tune_to_dom,\n)\n\nfrom monkeypatch import MonkeyPatch, MonkeyPatchScope\nfrom testValidation import brokentest\nfrom testlib import VdsmTestCase as TestCaseBase\nfrom testlib import XMLTestCase\nfrom testlib import make_config\nfrom testlib import namedTemporaryDir\nfrom testlib import permutations, expandPermutations\nfrom testlib import recorded\n\nfrom fakelib import FakeLogger\n\nfrom . import vmfakelib as fake\nimport pytest\n\n\n_VM_PARAMS = {\n 'displayPort': -1,\n 'displaySecurePort': -1,\n 'display': 'qxl',\n 'displayIp': '127.0.0.1',\n 'vmType': 'kvm',\n 'memSize': 1024\n}\n\n\n_TICKET_PARAMS = {\n 'userName': 'admin',\n 'userId': 'fdfc627c-d875-11e0-90f0-83df133b58cc'\n}\n\n\n@expandPermutations\nclass TestVm(XMLTestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestVm, self).__init__(*args, **kwargs)\n self.channelListener = None\n self.conf = {'vmName': 'testVm',\n 'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f',\n 'smp': '8', 'maxVCpus': '160',\n 'memSize': '1024', 'memGuaranteedSize': '512'}\n\n def testIoTuneException(self):\n SERIAL = '54-a672-23e5b495a9ea'\n devConf = {'index': '0', 'propagateErrors': 'on', 'iface': 'virtio',\n 'name': 'vda', 'format': 'cow', 'device': 'disk',\n 'path': '/tmp/disk1.img', 'type': 'disk',\n 'readonly': 'False', 'shared': 'True', 'serial': SERIAL}\n tuneConfs = [\n {'read_iops_sec': 1000, 'total_iops_sec': 2000},\n {'read_bytes_sec': -5},\n {'aaa': 100},\n {'read_iops_sec': 'aaa'}]\n\n expectedExceptMsgs = [\n 'A non-zero total value and non-zero read/write value for'\n ' iops_sec can not be set at the same time',\n 'parameter read_bytes_sec value should be equal or greater'\n ' than zero',\n 'parameter aaa name is invalid',\n 'an integer is required for ioTune parameter read_iops_sec']\n\n for (tuneConf, exceptionMsg) in \\\n zip(tuneConfs, expectedExceptMsgs):\n drive = vmdevices.storage.Drive(self.log, diskType=DISK_TYPE.FILE,\n **devConf)\n\n with pytest.raises(Exception) as cm:\n drive.iotune = tuneConf\n\n assert cm.value.args[0] == exceptionMsg\n\n def testVmPolicyOnStartup(self):\n LIMIT = '50'\n with fake.VM(_VM_PARAMS) as testvm:\n dom = fake.Domain()\n dom.setMetadata(libvirt.VIR_DOMAIN_METADATA_ELEMENT,\n '%s' % (\n LIMIT\n ),\n xmlconstants.METADATA_VM_TUNE_PREFIX,\n xmlconstants.METADATA_VM_TUNE_URI,\n 0)\n testvm._dom = dom\n # it is bad practice to test private functions -and we know it.\n # But enduring the full VM startup is too cumbersome, and we\n # need to test this code.\n testvm._updateVcpuLimit()\n stats = testvm.getStats()\n assert stats['vcpuUserLimit'] == LIMIT\n\n def testGetVmPolicySucceded(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n self.assertXMLEqual(xmlutils.tostring(testvm._getVmPolicy()),\n '')\n\n def testGetVmPolicyEmptyOnNoMetadata(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain(\n virtError=libvirt.VIR_ERR_NO_DOMAIN_METADATA)\n self.assertXMLEqual(xmlutils.tostring(testvm._getVmPolicy()),\n '')\n\n def testGetVmPolicyFailOnNoDomain(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain(virtError=libvirt.VIR_ERR_NO_DOMAIN)\n assert testvm._getVmPolicy() is None\n\n def testUpdateVmPolicy(self):\n with fake.VM() as machine:\n dom = fake.Domain()\n machine._dom = dom\n\n policy = {\n \"vcpuLimit\": 50,\n \"ioTune\": [\n {\n \"name\": \"test-device-by-name\",\n \"maximum\": {\n \"total_bytes_sec\": 200, \"total_iops_sec\": 201,\n \"read_bytes_sec\": 202, \"read_iops_sec\": 203,\n \"write_bytes_sec\": 204, \"write_iops_sec\": 205\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 100, \"total_iops_sec\": 101,\n \"read_bytes_sec\": 102, \"read_iops_sec\": 103,\n \"write_bytes_sec\": 104, \"write_iops_sec\": 105\n }\n },\n {\n \"path\": \"test-device-by-path\",\n \"maximum\": {\n \"total_bytes_sec\": 400, \"total_iops_sec\": 401,\n \"read_bytes_sec\": 402, \"read_iops_sec\": 403,\n \"write_bytes_sec\": 404, \"write_iops_sec\": 405\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 300, \"total_iops_sec\": 301,\n \"read_bytes_sec\": 302, \"read_iops_sec\": -1,\n \"write_bytes_sec\": 304, \"write_iops_sec\": 305\n }\n }\n ]\n }\n\n machine.updateVmPolicy(policy)\n\n expected_xml = (u\"\"\"\n \n 50\n \n \n \n 200\n 201\n 202\n 203\n 204\n 205\n \n \n 100\n 101\n 102\n 103\n 104\n 105\n \n \n \n \n 400\n 401\n 402\n 403\n 404\n 405\n \n \n 300\n 301\n 302\n 304\n 305\n \n \n \n \n \"\"\")\n\n self.assertXMLEqual(expected_xml, dom._metadata)\n\n def testCpuTune(self):\n LIMIT = 50\n with fake.VM(_VM_PARAMS) as machine:\n machine._dom = fake.Domain()\n policy = {\"vcpuLimit\": LIMIT}\n\n machine.updateVmPolicy(policy)\n\n stats = machine.getStats()\n assert stats['vcpuUserLimit'] == LIMIT\n\n def testIoTuneParser(self):\n with fake.VM() as machine:\n dom = fake.Domain()\n machine._dom = dom\n\n ioTuneValues = {\n \"name\": \"test-device-by-name\",\n \"path\": \"test-path\",\n \"maximum\": {\n \"total_bytes_sec\": 200, \"total_iops_sec\": 201,\n \"read_bytes_sec\": 202, \"read_iops_sec\": 203,\n \"write_bytes_sec\": 204, \"write_iops_sec\": 205\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 100, \"total_iops_sec\": 101,\n \"read_bytes_sec\": 102, \"read_iops_sec\": 103,\n \"write_bytes_sec\": 104, \"write_iops_sec\": 105\n }\n }\n\n dom = io_tune_to_dom(ioTuneValues)\n parsed = io_tune_dom_to_values(dom)\n\n assert ioTuneValues == parsed\n\n def testIoTuneMerge(self):\n with fake.VM() as machine:\n dom = fake.Domain()\n machine._dom = dom\n\n ioTuneValues1 = {\n \"path\": \"test-path\",\n \"maximum\": {\n \"total_bytes_sec\": 0, \"total_iops_sec\": 0,\n \"read_bytes_sec\": 0,\n \"write_bytes_sec\": 999, \"write_iops_sec\": 0\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 999, \"total_iops_sec\": 0,\n \"read_bytes_sec\": 0, \"read_iops_sec\": 0,\n \"write_bytes_sec\": 0, \"write_iops_sec\": 0\n }\n }\n\n ioTuneValues2 = {\n \"name\": \"test-device-by-name\",\n \"maximum\": {\n \"total_bytes_sec\": 200, \"total_iops_sec\": 201,\n \"read_bytes_sec\": 202, \"read_iops_sec\": 203,\n \"write_iops_sec\": 205\n },\n \"guaranteed\": {\n \"total_bytes_sec\": -1, \"total_iops_sec\": 101,\n \"read_bytes_sec\": 102, \"read_iops_sec\": 103,\n \"write_bytes_sec\": 104, \"write_iops_sec\": 105\n }\n }\n\n ioTuneExpectedValues = {\n \"name\": \"test-device-by-name\",\n \"path\": \"test-path\",\n \"maximum\": {\n \"total_bytes_sec\": 200, \"total_iops_sec\": 201,\n \"read_bytes_sec\": 202, \"read_iops_sec\": 203,\n \"write_bytes_sec\": 999, \"write_iops_sec\": 205\n },\n \"guaranteed\": {\n \"total_bytes_sec\": -1, \"total_iops_sec\": 101,\n \"read_bytes_sec\": 102, \"read_iops_sec\": 103,\n \"write_bytes_sec\": 104, \"write_iops_sec\": 105\n }\n }\n\n ioTuneMerged = io_tune_merge(ioTuneValues1, ioTuneValues2)\n\n assert ioTuneMerged == ioTuneExpectedValues\n\n def testUpdateExistingVmPolicy(self):\n with fake.VM() as machine:\n dom = fake.Domain()\n dom._metadata = \"\"\"\n \n 999\n \n \n \n 9999\n \n \n \n \n 9999\n \n \n \n \n \"\"\"\n\n machine._dom = dom\n\n policy = {\n \"vcpuLimit\": 50,\n \"ioTune\": [\n {\n \"name\": \"test-device-by-name\",\n \"maximum\": {\n \"total_bytes_sec\": 200, \"total_iops_sec\": 201,\n \"read_bytes_sec\": 202, \"read_iops_sec\": 203,\n \"write_bytes_sec\": 204, \"write_iops_sec\": 205\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 100, \"total_iops_sec\": 101,\n \"read_bytes_sec\": 102, \"read_iops_sec\": 103,\n \"write_bytes_sec\": 104, \"write_iops_sec\": 105\n }\n },\n {\n \"path\": \"test-device-by-path\",\n \"maximum\": {\n \"total_bytes_sec\": 400, \"total_iops_sec\": 401,\n \"read_bytes_sec\": 402, \"read_iops_sec\": 403,\n \"write_bytes_sec\": 404, \"write_iops_sec\": 405\n },\n \"guaranteed\": {\n \"total_bytes_sec\": 300, \"total_iops_sec\": 301,\n \"read_bytes_sec\": 302, \"read_iops_sec\": 303,\n \"write_bytes_sec\": 304, \"write_iops_sec\": 305\n }\n }\n ]\n }\n\n machine.updateVmPolicy(policy)\n\n expected_xml = (u\"\"\"\n \n 50\n \n \n \n 200\n 201\n 202\n 203\n 204\n 205\n \n \n 100\n 101\n 102\n 103\n 104\n 105\n \n \n \n \n 9999\n \n \n \n \n 400\n 401\n 402\n 403\n 404\n 405\n \n \n 300\n 301\n 302\n 303\n 304\n 305\n \n \n \n \n \"\"\")\n\n self.assertXMLEqual(expected_xml, dom._metadata)\n\n def testGetIoTunePolicy(self):\n with fake.VM() as machine:\n dom = fake.Domain()\n dom._metadata = \"\"\"\n \n 999\n \n \n \n 9999\n \n \n \n \n 9999\n \n \n \n \n \"\"\"\n machine._dom = dom\n machine._updateIoTuneInfo()\n\n tunables = machine.io_tune_policy()\n expected = [\n {'name': u'test-device-by-name',\n 'maximum': {\n u'total_bytes_sec': 9999\n }},\n {'name': u'other-device',\n 'guaranteed': {\n u'total_bytes_sec': 9999\n }}\n ]\n assert tunables == expected\n\n @permutations([[''], [None]])\n def testNoIoTunePolicy(self, metadata):\n with fake.VM() as machine:\n dom = fake.Domain()\n dom._metadata = metadata\n machine._dom = dom\n\n tunables = machine.io_tune_policy()\n assert tunables == []\n\n @brokentest(\"the test expects overwrite, the code incrementally updates\")\n @permutations([\n # old_iotune\n [{}],\n [{\"ioTune\": {}}],\n [{\"ioTune\": {\"total_bytes_sec\": 9999}}],\n [{\"ioTune\": {\"total_iops_sec\": 9999}}],\n [{\"ioTune\": {\"total_bytes_sec\": 9999, \"total_iops_sec\": 9999}}],\n ])\n def testSetIoTune(self, old_iotune):\n\n drives = [\n vmdevices.storage.Drive(\n log=self.log,\n index=0,\n device=\"hdd\",\n path=\"/dev/dummy\",\n type=hwclass.DISK,\n iface=\"ide\",\n specParams=old_iotune,\n diskType=DISK_TYPE.BLOCK\n )\n ]\n\n # Make the drive look like a VDSM volume\n required = ('domainID', 'imageID', 'poolID', 'volumeID')\n for p in required:\n setattr(drives[0], p, \"1\")\n\n new_iotune = {\n \"write_bytes_sec\": 1,\n \"total_bytes_sec\": 0,\n \"read_bytes_sec\": 2\n }\n\n tunables = [\n {\n \"name\": drives[0].name,\n \"ioTune\": new_iotune,\n }\n ]\n\n expected_io_tune = {\n drives[0].name: new_iotune,\n }\n\n expected_xml = \"\"\"\n \n \n \n %s\n \"\"\" % (\"\\n\".join([\"<%s>%s\" % (k, v, k)\n for k, v in sorted(new_iotune.items())]))\n\n with fake.VM() as machine:\n dom = fake.Domain()\n machine._dom = dom\n for drive in drives:\n machine._devices[drive.type].append(drive)\n\n machine.setIoTune(tunables)\n\n assert expected_io_tune == dom._io_tune\n\n # Test that caches were properly updated\n assert drives[0].iotune == expected_io_tune[drives[0].name]\n self.assertXMLEqual(drives[0]._deviceXML, expected_xml)\n\n def testGetPolicyDisconnected(self):\n with fake.VM() as machine:\n machine._dom = virdomain.Disconnected(machine.id)\n policy = machine._getVmPolicy()\n assert policy is None\n\n def testSdIds(self):\n \"\"\"\n Tests that VM storage domains in use list is in sync with the vm\n devices in use\n \"\"\"\n domainID = uuid.uuid4()\n drives = [\n vmdevices.storage.Drive(\n self.log,\n index=0,\n device=\"disk\",\n path=\"/dev/dummy\",\n type=hwclass.DISK,\n iface=\"ide\",\n domainID=domainID,\n imageID=uuid.uuid4(),\n poolID=uuid.uuid4(),\n volumeID=uuid.uuid4(),\n diskType=DISK_TYPE.BLOCK,\n ),\n vmdevices.storage.Drive(\n self.log,\n index=0,\n device=\"hdd2\",\n path=\"/dev/dummy2\",\n type=hwclass.DISK,\n iface=\"ide\",\n diskType=DISK_TYPE.BLOCK,\n )\n ]\n\n with fake.VM() as machine:\n for drive in drives:\n machine._devices[drive.type].append(drive)\n\n assert machine.sdIds == set([domainID])\n\n def testVmGuestSocketFile(self):\n # No channel\n with fake.VM(self.conf) as testvm:\n assert testvm._guestSocketFile is None\n # New name\n channel = '''\n\n \n \n\n '''\n with fake.VM(self.conf, xmldevices=channel) as testvm:\n assert testvm._guestSocketFile == '/path/to/channel'\n # Old name\n channel = '''\n\n \n \n\n '''\n with fake.VM(self.conf, xmldevices=channel) as testvm:\n assert testvm._guestSocketFile == '/path/to/channel'\n\n def test_spice_restore_set_passwd(self):\n devices = '''\n\n \n\n'''\n with fake.VM(xmldevices=devices, create_device_objects=True) as testvm:\n out_dom_xml = testvm._correctGraphicsConfiguration(\n _load_xml('vm_restore_spice_before.xml'))\n\n self.assertXMLEqual(out_dom_xml,\n _load_xml('vm_restore_spice_after.xml'))\n\n @MonkeyPatch(os, 'unlink', lambda _: None)\n def test_release_vm_succeeds(self):\n with fake.VM(self.conf) as testvm:\n testvm.guestAgent = fake.GuestAgent()\n\n dom = fake.Domain()\n\n status = {\n 'graceful': 0,\n 'forceful': 0,\n }\n\n def graceful(*args):\n status['graceful'] += 1\n return response.success()\n\n def forceful(*args):\n status['forceful'] += 1\n return response.success()\n\n dom.destroyFlags = graceful\n dom.destroy = forceful\n testvm._dom = dom\n\n testvm.releaseVm()\n assert status == {\n 'graceful': 1,\n 'forceful': 0,\n }\n\n @MonkeyPatch(os, 'unlink', lambda _: None)\n @permutations([[1], [2], [3], [9]])\n def test_releasevm_fails(self, attempts):\n with fake.VM(self.conf) as testvm:\n testvm.guestAgent = fake.GuestAgent()\n\n dom = fake.Domain()\n\n status = {\n 'graceful': 0,\n 'forceful': 0,\n }\n\n def graceful(*args):\n status['graceful'] += 1\n raise fake.Error(libvirt.VIR_ERR_SYSTEM_ERROR)\n\n def forceful(*args):\n status['forceful'] += 1\n return response.success()\n\n dom.destroyFlags = graceful\n dom.destroy = forceful\n testvm._dom = dom\n\n testvm.releaseVm(gracefulAttempts=attempts)\n assert status == {\n 'graceful': attempts,\n 'forceful': 1,\n }\n\n def test_acpi_enabled(self):\n with fake.VM(arch=cpuarch.X86_64, features='') as testvm:\n assert testvm.acpi_enabled()\n\n def test_acpi_disabled(self):\n with fake.VM(arch=cpuarch.X86_64) as testvm:\n assert not testvm.acpi_enabled()\n\n def test_hotplug_lease(self):\n params = {\n 'type': hwclass.LEASE,\n 'sd_id': 'sd_id',\n 'lease_id': 'lease_id',\n }\n expected_conf = {\n 'device': hwclass.LEASE,\n 'path': '/path',\n 'offset': 1048576,\n }\n expected_conf.update(params)\n\n # we add a serial console device to the minimal XML,\n # because this is the simplest way to trigger the\n # flow that broke in rhbz#1590063\n devices = [{\n u'device': u'console',\n u'specParams': {\n u'consoleType': u'serial',\n u'enableSocket': u'true'\n },\n u'type': u'console',\n u'deviceId': u'd0fac53d-68cf-4cbb-8c9d-5f18625f04e7',\n u'alias': u'serial0'\n }]\n\n with fake.VM(\n params={},\n devices=devices,\n create_device_objects=True,\n arch=cpuarch.X86_64\n ) as testvm:\n testvm._dom = FakeLeaseDomain()\n testvm.cif = FakeLeaseClientIF(expected_conf)\n res = testvm.hotplugLease(params)\n\n assert res.pop('vmList') is not None\n assert res == response.success()\n # Up until here we verified the hotplugLease proper.\n\n def testMemSize(self):\n with fake.VM() as testvm:\n assert testvm.mem_size_mb() == 4096\n\n def testNvdimms(self):\n # Add NVDIMM to the devices, update memory and make sure\n # mem_size_mb() returns proper size\n vm_xml = '''\n \n nTESTING\n TESTING\n 12582912\n \n hvm\n \n \n \n \n /dev/pmem0\n 131072\n \n \n 8388608\n 0\n \n \n
\n \n \n \n '''\n with fake.VM(params={'xml': vm_xml}) as testvm:\n # Verify the memory is 4 GB and not 12 GB (4 + 8)\n assert testvm.mem_size_mb() == 4096\n\n\nclass ExpectedError(Exception):\n pass\n\n\nclass UnexpectedError(Exception):\n pass\n\n\n@expandPermutations\nclass TestVmDeviceHandling(TestCaseBase):\n conf = {\n 'devices': [],\n 'maxVCpus': '160',\n 'memGuaranteedSize': '512',\n 'memSize': '1024',\n 'smp': '8',\n 'vmId': '9ffe28b6-6134-4b1e-beef-1185f49c436f',\n 'vmName': 'testVm',\n }\n xml_conf = '''\n \n testVm\n 1234\n 1048576\n 1048576\n 160\n \n \n \n \n 9876\n
\n \n \n
\n \n \n \n \n \n \n \n \n
\n \n \n \n \n \n \n '''\n\n def test_device_setup_success(self):\n devices = [fake.Device('device_{}'.format(i)) for i in range(3)]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n self.assertNotRaises(testvm._setup_devices)\n assert devices[0].state == fake.SETUP\n assert devices[1].state == fake.SETUP\n assert devices[2].state == fake.SETUP\n\n def test_device_setup_fail_first(self):\n devices = ([fake.Device('device_0', fail_setup=ExpectedError)] +\n [fake.Device('device_{}'.format(i)) for i in range(1, 3)])\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n with pytest.raises(ExpectedError):\n testvm._setup_devices()\n assert devices[0].state == fake.SETUP\n assert devices[1].state == fake.CREATED\n assert devices[2].state == fake.CREATED\n\n def test_device_setup_fail_second(self):\n devices = [fake.Device('device_0'),\n fake.Device('device_1', fail_setup=ExpectedError),\n fake.Device('device_2')]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n with pytest.raises(ExpectedError):\n testvm._setup_devices()\n assert devices[0].state == fake.TEARDOWN\n assert devices[1].state == fake.SETUP\n assert devices[2].state == fake.CREATED\n\n def test_device_setup_fail_third(self):\n devices = [fake.Device('device_0'), fake.Device('device_1'),\n fake.Device('device_2', fail_setup=ExpectedError)]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n with pytest.raises(ExpectedError):\n testvm._setup_devices()\n assert devices[0].state == fake.TEARDOWN\n assert devices[1].state == fake.TEARDOWN\n assert devices[2].state == fake.SETUP\n\n def test_device_setup_correct_exception(self):\n devices = [fake.Device('device_0', fail_teardown=UnexpectedError),\n fake.Device('device_1',\n fail_setup=ExpectedError,\n fail_teardown=UnexpectedError),\n fake.Device('device_2', fail_setup=UnexpectedError)]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n with pytest.raises(ExpectedError):\n testvm._setup_devices()\n assert devices[0].state == fake.TEARDOWN\n assert devices[1].state == fake.SETUP\n assert devices[2].state == fake.CREATED\n\n def test_device_teardown_success(self):\n devices = [fake.Device('device_{}'.format(i)) for i in range(3)]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n self.assertNotRaises(testvm._setup_devices)\n self.assertNotRaises(testvm._teardown_devices)\n assert devices[0].state == fake.TEARDOWN\n assert devices[1].state == fake.TEARDOWN\n assert devices[2].state == fake.TEARDOWN\n\n def test_device_teardown_fail_all(self):\n devices = [fake.Device('device_{}'.format(i),\n fail_teardown=UnexpectedError)\n for i in range(3)]\n\n with fake.VM(self.conf, create_device_objects=True) as testvm:\n testvm._devices['general'] = devices\n self.assertNotRaises(testvm._setup_devices)\n self.assertNotRaises(testvm._teardown_devices)\n assert devices[0].state == fake.TEARDOWN\n assert devices[1].state == fake.TEARDOWN\n assert devices[2].state == fake.TEARDOWN\n\n @permutations([\n [[], '0'],\n [[0], '1'],\n [[1, 2], '0'],\n [[0, 2], '1'],\n [[0, 1], '2'],\n ])\n def test_getNextIndex(self, used, expected):\n with fake.VM(self.conf) as testvm:\n # TODO: get rid of mangling\n assert testvm._Vm__getNextIndex(used) == expected\n\n @permutations([\n ['', ''],\n ['123', '123'],\n ['ide', 'ide'],\n ['sata', 'sd'],\n ['scsi', 'sd'],\n ])\n def test_indiceForIface(self, iface, expected):\n with fake.VM(self.conf) as testvm:\n assert testvm._indiceForIface(iface) == expected\n\n @permutations([\n # We have to make sure that 'sd' key exists otherwise even defaultdict\n # will KeyError on access.\n [{'sd': []}, {'iface': 'sata'}, {'sd': [0]}],\n [{'sd': [0]}, {'iface': 'sata'}, {'sd': [0, 1]}],\n [{'sd': [1]}, {'iface': 'sata'}, {'sd': [1, 0]}],\n [{'sd': [0, 2]}, {'iface': 'sata'}, {'sd': [0, 2, 1]}],\n [{'sd': [], 'other': [0]}, {'iface': 'sata'},\n {'other': [0], 'sd': [0]}],\n [{'sd': [0]}, {'iface': 'scsi'}, {'sd': [0, 1]}],\n ])\n def test_updateDriveIndex(self, used, drv, expected):\n with fake.VM(self.conf) as testvm:\n testvm._usedIndices = used\n testvm.updateDriveIndex(drv)\n assert testvm._usedIndices == expected\n\n @permutations([\n [[{'iface': 'scsi', 'index': '1'}, {'iface': 'sata'}],\n [{'iface': 'scsi', 'index': '1'}, {'iface': 'sata', 'index': '0'}]],\n [[{'iface': 'scsi'}, {'iface': 'ide'}],\n [{'iface': 'scsi', 'index': '0'}, {'iface': 'ide', 'index': '0'}]],\n [[{'iface': 'scsi'}, {'iface': 'sata'}, {'iface': 'ide'}],\n [{'iface': 'scsi', 'index': '0'}, {'iface': 'sata', 'index': '1'},\n {'iface': 'ide', 'index': '0'}]],\n ])\n def test_normalizeDrivesIndices(self, drives, expected):\n with fake.VM(self.conf) as testvm:\n assert testvm.normalizeDrivesIndices(drives) == expected\n\n def test_xml_device_processing(self):\n with fake.VM({'xml': self.xml_conf}) as vm:\n devices = vm._make_devices()\n assert sum([len(v) for v in devices.values()]) == 2\n\n\nVM_EXITS = tuple(product((define.NORMAL, define.ERROR),\n list(vmexitreason.exitReasons.keys())))\n\n\n@expandPermutations\nclass TestVmExit(TestCaseBase):\n @permutations(VM_EXITS)\n def testExitReason(self, exitCode, exitReason):\n \"\"\"\n test of:\n exitReason round trip;\n error message is constructed correctly automatically\n \"\"\"\n with fake.VM() as testvm:\n testvm.setDownStatus(exitCode, exitReason)\n stats = testvm.getStats()\n assert stats['exitReason'] == exitReason\n assert stats['exitMessage'] == \\\n vmexitreason.exitReasons.get(exitReason)\n\n @permutations(VM_EXITS)\n def testExitReasonExplicitMessage(self, exitCode, exitReason):\n \"\"\"\n test of:\n exitReason round trip;\n error message can be overridden explicitely\n \"\"\"\n with fake.VM() as testvm:\n msg = \"test custom error message\"\n testvm.setDownStatus(exitCode, exitReason, msg)\n stats = testvm.getStats()\n assert stats['exitReason'] == exitReason\n assert stats['exitMessage'] == msg\n\n\n_VM_PARAMS = {'displayPort': -1, 'displaySecurePort': -1,\n 'display': 'qxl', 'displayIp': '127.0.0.1',\n 'vmType': 'kvm', 'memSize': 1024}\n\n\nclass TestVmStats(TestCaseBase):\n\n def testGetNicStats(self):\n GBPS = 10 ** 9 // 8\n MAC = '52:54:00:59:F5:3F'\n pretime = vdsm.common.time.monotonic_time()\n with fake.VM(_VM_PARAMS) as testvm:\n res = vmstats._nic_traffic(\n testvm, fake.Nic(\n name='vnettest', model='virtio', mac_addr=MAC\n ),\n start_sample={'net.0.rx.bytes': 2 ** 64 - 15 * GBPS,\n 'net.0.rx.pkts': 1,\n 'net.0.rx.errs': 2,\n 'net.0.rx.drop': 3,\n 'net.0.tx.bytes': 0,\n 'net.0.tx.pkts': 4,\n 'net.0.tx.errs': 5,\n 'net.0.tx.drop': 6},\n start_index=0,\n end_sample={'net.0.rx.bytes': 0,\n 'net.0.rx.pkts': 7,\n 'net.0.rx.errs': 8,\n 'net.0.rx.drop': 9,\n 'net.0.tx.bytes': 5 * GBPS,\n 'net.0.tx.pkts': 10,\n 'net.0.tx.errs': 11,\n 'net.0.tx.drop': 12},\n end_index=0)\n posttime = vdsm.common.time.monotonic_time()\n assert 'sampleTime' in res\n assert pretime <= res['sampleTime'] <= posttime, \\\n 'sampleTime not in [%s..%s]' % (pretime, posttime)\n del res['sampleTime']\n assert res == {\n 'rxErrors': '8', 'rxDropped': '9',\n 'txErrors': '11', 'txDropped': '12',\n 'macAddr': MAC, 'name': 'vnettest',\n 'speed': '1000', 'state': 'unknown',\n 'rx': '0', 'tx': '625000000',\n }\n\n def testMultipleGraphicDeviceStats(self):\n device_types = ['spice', 'vnc']\n devices = '\\n'.join(['''\n\n \n'''.format(type_=t) for t in device_types])\n with fake.VM(xmldevices=devices, create_device_objects=True) as testvm:\n res = testvm.getStats()\n assert res['displayInfo']\n for dev_stats, type_ in zip(res['displayInfo'], device_types):\n assert dev_stats['type'] in type_\n assert 'port' in dev_stats\n\n def testDiskMappingHashInStatsHash(self):\n with fake.VM(_VM_PARAMS) as testvm:\n res = testvm.getStats()\n testvm.guestAgent.diskMappingHash += 1\n assert res['hash'] != testvm.getStats()['hash']\n\n @MonkeyPatch(vm, 'config',\n make_config([('vars', 'vm_command_timeout', '10')]))\n def testMonitorTimeoutResponsive(self):\n with fake.VM(_VM_PARAMS) as testvm:\n assert not testvm.isMigrating()\n stats = {'monitorResponse': '0'}\n testvm._setUnresponsiveIfTimeout(stats, 1) # any value < timeout\n assert stats['monitorResponse'] == '0'\n\n @MonkeyPatch(vm, 'config',\n make_config([('vars', 'vm_command_timeout', '1')]))\n def testMonitorTimeoutUnresponsive(self):\n with fake.VM(_VM_PARAMS) as testvm:\n assert testvm._monitorResponse == 0\n assert not testvm.isMigrating()\n stats = {'monitorResponse': '0'}\n testvm._setUnresponsiveIfTimeout(stats, 10) # any value > timeout\n assert stats['monitorResponse'] == '-1'\n\n @MonkeyPatch(vm, 'config',\n make_config([('vars', 'vm_command_timeout', '10')]))\n def testMonitorTimeoutOnAlreadyUnresponsive(self):\n with fake.VM(_VM_PARAMS) as testvm:\n self._monitorResponse = -1\n assert not testvm.isMigrating()\n stats = {'monitorResponse': '-1'}\n testvm._setUnresponsiveIfTimeout(stats, 1) # any value < timeout\n assert stats['monitorResponse'] == '-1'\n\n\n@expandPermutations\nclass TestLibVirtCallbacks(TestCaseBase):\n FAKE_ERROR = 'EFAKERROR'\n\n def test_onIOErrorPause(self):\n with fake.VM(_VM_PARAMS, runCpu=True) as testvm:\n assert testvm._guestCpuRunning\n testvm.onIOError('fakedev', self.FAKE_ERROR,\n libvirt.VIR_DOMAIN_EVENT_IO_ERROR_PAUSE)\n assert not testvm._guestCpuRunning\n assert testvm._pause_code == self.FAKE_ERROR\n\n def test_onIOErrorReport(self):\n with fake.VM(_VM_PARAMS, runCpu=True) as testvm:\n assert testvm._guestCpuRunning\n testvm.onIOError('fakedev', self.FAKE_ERROR,\n libvirt.VIR_DOMAIN_EVENT_IO_ERROR_REPORT)\n assert testvm._guestCpuRunning\n assert testvm._pause_code != self.FAKE_ERROR\n\n def test_onIOErrorNotSupported(self):\n \"\"\"action not explicitely handled, must be skipped\"\"\"\n with fake.VM(_VM_PARAMS, runCpu=True) as testvm:\n assert testvm._guestCpuRunning\n testvm.onIOError('fakedev', self.FAKE_ERROR,\n libvirt.VIR_DOMAIN_EVENT_IO_ERROR_NONE)\n assert testvm._guestCpuRunning\n assert testvm._pause_code is None # no error recorded\n\n @permutations([\n ['net1', set()],\n ['missing', set(('net1',))],\n ])\n def test_onDeviceRemoved(self, alias, kept_aliases):\n devices = '''\n\n \n \n \n \n \n \n
\n\n'''\n with fake.VM(_VM_PARAMS, xmldevices=devices,\n create_device_objects=True) as testvm:\n testvm._updateDomainDescriptor = lambda *args: None\n testvm.onDeviceRemoved(alias)\n assert set([d.alias for group in testvm._devices.values()\n for d in group]) == kept_aliases\n\n\nclass TestVmStatusTransitions(TestCaseBase):\n @pytest.mark.slow\n def testSavingState(self):\n with fake.VM(runCpu=True, status=vmstatus.UP) as testvm:\n testvm._dom = fake.Domain(domState=libvirt.VIR_DOMAIN_RUNNING)\n\n def _asyncEvent():\n testvm.onLibvirtLifecycleEvent(\n libvirt.VIR_DOMAIN_EVENT_SUSPENDED,\n -1, -1)\n\n t = threading.Thread(target=_asyncEvent)\n t.daemon = True\n\n def _fireAsyncEvent(*args):\n t.start()\n time.sleep(0.5)\n # pause the main thread to let the event one run\n\n with MonkeyPatchScope([(testvm, '_underlyingPause',\n _fireAsyncEvent)]):\n assert testvm.status()['status'] == vmstatus.UP\n testvm.pause(vmstatus.SAVING_STATE)\n assert testvm.status()['status'] == vmstatus.SAVING_STATE\n t.join()\n assert testvm.status()['status'] == vmstatus.SAVING_STATE\n # state must not change even after we are sure the event was\n # handled\n\n\nclass TestVmBalloon(TestCaseBase):\n def assertAPIFailed(self, res, specificErr=None):\n if specificErr is None:\n assert res['status']['code'] != 0\n else:\n assert res['status']['code'] == \\\n define.errCode[specificErr]['status']['code']\n\n def testSucceed(self):\n devices = ''\n with fake.VM(\n params={'memSize': 128 * 1024},\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n testvm._dom = fake.Domain()\n target = 256 * 1024\n testvm.setBalloonTarget(target)\n assert testvm._dom.__calls__ == [('setMemory', (target,), {})]\n\n def testVmWithoutDom(self):\n devices = ''\n with fake.VM(\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n with pytest.raises(exception.BalloonError):\n testvm.setBalloonTarget(128)\n\n def testTargetValueNotInteger(self):\n devices = ''\n with fake.VM(\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n with pytest.raises(exception.BalloonError):\n testvm.setBalloonTarget('foobar')\n\n def testLibvirtFailure(self):\n devices = ''\n with fake.VM(\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n testvm._dom = fake.Domain(virtError=libvirt.VIR_ERR_INTERNAL_ERROR)\n # we don't care about the error code as long as is != NO_DOMAIN\n with pytest.raises(exception.BalloonError):\n testvm.setBalloonTarget(256)\n\n def testGetBalloonInfo(self):\n with fake.VM() as testvm:\n assert testvm.get_balloon_info() == {}\n\n def testSkipBalloonModelNone(self):\n devices = ''\n with fake.VM(\n params={'memSize': 128 * 1024},\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n testvm._dom = fake.Domain()\n target = 256 * 1024\n testvm.setBalloonTarget(target)\n assert not hasattr(testvm._dom, '__calls__')\n\n def testBalloningDisabled(self):\n devices = ''\n with fake.VM(\n params={'memSize': 128 * 1024},\n xmldevices=devices,\n create_device_objects=True\n ) as testvm:\n testvm._dom = fake.Domain()\n target = 256 * 1024\n assert testvm._ballooning_enabled # Verify the default\n testvm._ballooning_enabled = False\n testvm.setBalloonTarget(target)\n assert not hasattr(testvm._dom, '__calls__')\n assert testvm._balloon_target is None\n\n\nclass ChangeBlockDevTests(TestCaseBase):\n def test_update_drive_parameters_failure(self):\n with fake.VM() as testvm:\n testvm.log = FakeLogger()\n\n # to make the update fail, the simplest way is to have\n # no devices whatsoever\n assert testvm._devices == vmdevices.common.empty_dev_map()\n assert testvm.conf['devices'] == []\n\n # the method will swallow all the errors\n testvm.updateDriveParameters({'name': 'vda'})\n\n # nothing should be added...\n assert testvm._devices == vmdevices.common.empty_dev_map()\n assert testvm.conf['devices'] == []\n\n # ... and the reason for no update should be logged\n assert testvm.log.messages\n\n\nclass FakeVm(vm.Vm):\n \"\"\"\n Fake Vm required for testing code that does not care about vm state,\n invoking libvirt apis via Vm._dom, and logging via Vm.log.\n \"\"\"\n\n log = logging.getLogger()\n\n def __init__(self, dom):\n self._dom = dom\n self._qga_lock = threading.Lock()\n\n\nclass FreezingTests(TestCaseBase):\n\n def setUp(self):\n self.dom = fake.Domain()\n self.vm = FakeVm(self.dom)\n\n def test_freeze(self):\n res = self.vm.freeze()\n assert res == response.success()\n assert self.dom.__calls__ == [(\"fsFreeze\", (), {})]\n\n def test_thaw(self):\n res = self.vm.thaw()\n assert res == response.success()\n assert self.dom.__calls__ == [(\"fsThaw\", (), {})]\n\n\nclass FreezingGuestAgentUnresponsiveTests(TestCaseBase):\n\n expected = response.error(\"nonresp\", message=\"fake error\")\n\n def setUp(self):\n self.dom = fake.Domain(\n virtError=libvirt.VIR_ERR_AGENT_UNRESPONSIVE,\n errorMessage=\"fake error\")\n self.vm = FakeVm(self.dom)\n\n def test_freeze(self):\n res = self.vm.freeze()\n assert res == self.expected\n\n def test_thaw(self):\n res = self.vm.thaw()\n assert res == self.expected\n\n\nclass FreezingUnsupportedTests(TestCaseBase):\n\n expected = response.error(\"unsupportedOperationErr\", message=\"fake error\")\n\n def setUp(self):\n self.dom = fake.Domain(\n virtError=libvirt.VIR_ERR_NO_SUPPORT,\n errorMessage=\"fake error\")\n self.vm = FakeVm(self.dom)\n\n def test_freeze(self):\n res = self.vm.freeze()\n assert res == self.expected\n\n def test_thaw(self):\n res = self.vm.thaw()\n assert res == self.expected\n\n\nclass FreezingUnexpectedErrorTests(TestCaseBase):\n\n def setUp(self):\n self.dom = fake.Domain(\n virtError=libvirt.VIR_ERR_INTERNAL_ERROR,\n errorMessage=\"fake error\")\n self.vm = FakeVm(self.dom)\n\n def test_freeze(self):\n res = self.vm.freeze()\n assert res == response.error(\"freezeErr\", message=\"fake error\")\n\n def test_thaw(self):\n res = self.vm.thaw()\n assert res == response.error(\"thawErr\", message=\"fake error\")\n\n\ndef err_no_domain():\n error = libvirt.libvirtError(\"No such domain\")\n error.err = [libvirt.VIR_ERR_NO_DOMAIN]\n return error\n\n\nclass FakePersistentDomain(object):\n\n def __init__(self, undefined, uuid, name, state):\n self.id = uuid\n self.name = name\n self._state = state\n self.undefined = undefined\n\n def state(self, flags):\n return self._state, 0\n\n def undefineFlags(self, flags=0):\n if self.id in self.undefined:\n raise err_no_domain()\n self.undefined.append(self.id)\n\n\nclass FakePersistentConnection(object):\n\n def __init__(self, domains):\n self.domains = domains\n\n def _no_domain_error(self):\n raise err_no_domain()\n\n def lookupByUUIDString(self, uuid):\n for d in self.domains:\n if d.id == uuid:\n return d\n else:\n raise self._no_domain_error()\n\n def lookupByName(self, name):\n for d in self.domains:\n if d.name == name:\n return d\n else:\n raise self._no_domain_error()\n\n\nclass FakePersistentVm(object):\n\n def __init__(self):\n self.id = '123'\n self.name = 'foo'\n self.log = logging.getLogger()\n\n\n@expandPermutations\nclass TestVmPersistency(TestCaseBase):\n\n @permutations([\n ((('123', 'bar', libvirt.VIR_DOMAIN_SHUTOFF),\n ('456', 'foo', libvirt.VIR_DOMAIN_SHUTOFF),),\n ['123', '456'],),\n ((('123', 'bar', libvirt.VIR_DOMAIN_CRASHED),\n ('456', 'foo', libvirt.VIR_DOMAIN_SHUTOFF),),\n ['123', '456'],),\n ((('123', 'foo', libvirt.VIR_DOMAIN_SHUTOFF),\n ('456', 'bar', libvirt.VIR_DOMAIN_SHUTOFF),),\n ['123'],),\n ((('123', 'foo', libvirt.VIR_DOMAIN_SHUTOFF),\n ('456', 'bar', libvirt.VIR_DOMAIN_RUNNING),),\n ['123'],),\n ((('123', 'bar', libvirt.VIR_DOMAIN_SHUTOFF),\n ('456', 'foo', libvirt.VIR_DOMAIN_RUNNING),),\n None,),\n ((('123', 'bar', libvirt.VIR_DOMAIN_RUNNING),\n ('456', 'foo', libvirt.VIR_DOMAIN_SHUTOFF),),\n None,),\n ((('456', 'bar', libvirt.VIR_DOMAIN_RUNNING),\n ('789', 'baz', libvirt.VIR_DOMAIN_SHUTOFF),),\n [],),\n ])\n def test_domain_cleanup(self, domain_specs, result):\n undefined = []\n domains = [FakePersistentDomain(undefined, *s) for s in domain_specs]\n connection = FakePersistentConnection(domains)\n if result is None:\n with pytest.raises(exception.VMExists):\n vm._undefine_stale_domain(FakePersistentVm(), connection)\n else:\n vm._undefine_stale_domain(FakePersistentVm(), connection)\n assert undefined == result\n\n\nclass BlockIoTuneTests(TestCaseBase):\n\n def setUp(self):\n self.iotune_low = {\n 'total_bytes_sec': 0,\n 'read_bytes_sec': 1000,\n 'write_bytes_sec': 1000,\n 'total_iops_sec': 0,\n 'write_iops_sec': 0,\n 'read_iops_sec': 0\n }\n self.iotune_high = {\n 'total_bytes_sec': 0,\n 'read_bytes_sec': 2000,\n 'write_bytes_sec': 2000,\n 'total_iops_sec': 0,\n 'write_iops_sec': 0,\n 'read_iops_sec': 0\n }\n self.iotune_wrong = {\n 'total_bytes_sec': 'XXX',\n 'read_bytes_sec': 1000,\n 'write_bytes_sec': 1000,\n 'total_iops_sec': 0,\n 'write_iops_sec': 0,\n 'read_iops_sec': 0\n }\n\n self.drive = FakeBlockIoTuneDrive('vda', path='/fake/path/vda')\n\n self.dom = FakeBlockIoTuneDomain()\n self.dom.iotunes = {self.drive.name: self.iotune_low.copy()}\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_get_fills_cache(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n\n res = testvm.io_tune_values()\n assert res\n assert self.dom.__calls__ == \\\n [('blockIoTune',\n (self.drive.name, libvirt.VIR_DOMAIN_AFFECT_LIVE),\n {})]\n\n res = testvm.io_tune_values()\n assert res\n assert self.dom.__calls__ == \\\n [('blockIoTune',\n (self.drive.name, libvirt.VIR_DOMAIN_AFFECT_LIVE),\n {})]\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_set_updates_cache(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n\n tunables = [\n {\"name\": self.drive.name, \"ioTune\": self.iotune_high}\n ]\n\n res = testvm.io_tune_values()\n self.assert_iotune_in_response(res, self.iotune_low)\n\n testvm.setIoTune(tunables)\n\n res = testvm.io_tune_values()\n self.assert_iotune_in_response(res, self.iotune_high)\n\n assert len(self.dom.__calls__) == 2\n self.assert_nth_call_to_dom_is(0, 'blockIoTune')\n self.assert_nth_call_to_dom_is(1, 'setBlockIoTune')\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_set_fills_cache(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n\n tunables = [\n {\"name\": self.drive.name, \"ioTune\": self.iotune_high}\n ]\n\n testvm.setIoTune(tunables)\n\n res = testvm.io_tune_values()\n self.assert_iotune_in_response(res, self.iotune_high)\n\n assert len(self.dom.__calls__) == 1\n self.assert_nth_call_to_dom_is(0, 'setBlockIoTune')\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_cold_cache_set_preempts_get(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n\n def _interleaved_update():\n # this will run in the middle of io_tune_values()\n tunables = [\n {\"name\": self.drive.name, \"ioTune\": self.iotune_high}\n ]\n testvm.setIoTune(tunables)\n\n self.dom.callback = _interleaved_update\n self.assert_iotune_in_response(\n testvm.io_tune_values(),\n self.iotune_low\n )\n\n assert self.dom.iotunes == \\\n {self.drive.name: self.iotune_high}\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_set_iotune_invalid(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n\n tunables = [\n {\"name\": self.drive.name, \"ioTune\": self.iotune_wrong}\n ]\n\n with pytest.raises(exception.UpdateIOTuneError):\n testvm.setIoTune(tunables)\n\n @MonkeyPatch(utils, 'config',\n make_config([('sanlock', 'io_timeout', '1')]))\n def test_exit_with_error_on_resume(self):\n with fake.VM() as testvm:\n pretime = vdsm.common.time.monotonic_time() - 30.0\n testvm._pause_time = pretime\n testvm._resume_behavior = vm.ResumeBehavior.KILL\n\n testvm._dom = fake.Domain()\n\n with pytest.raises(vm.DestroyedOnResumeError):\n testvm.maybe_resume()\n\n testvm.onLibvirtLifecycleEvent(\n libvirt.VIR_DOMAIN_EVENT_STOPPED,\n libvirt.VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN,\n None)\n\n stats = testvm.getStats()\n assert stats['status'] == vmstatus.DOWN\n assert stats['exitCode'] == define.ERROR\n assert stats['exitReason'] == \\\n vmexitreason.DESTROYED_ON_PAUSE_TIMEOUT\n\n _PAUSED_VMS = {'auto_resume':\n {'pause_time_offset': 81.0,\n 'resume_behavior': vm.ResumeBehavior.AUTO_RESUME,\n 'pause': True, 'pause_code': 'EIO',\n 'expected_status': vmstatus.PAUSED},\n 'leave_paused':\n {'pause_time_offset': 81.0,\n 'resume_behavior': vm.ResumeBehavior.LEAVE_PAUSED,\n 'pause': True, 'pause_code': 'EIO',\n 'expected_status': vmstatus.PAUSED},\n 'kill':\n {'pause_time_offset': 81.0,\n 'resume_behavior': vm.ResumeBehavior.KILL,\n 'pause': True, 'pause_code': 'EIO',\n 'expected_status': vmstatus.DOWN},\n 'paused':\n {'pause_time_offset': 81.0,\n 'pause': True,\n 'expected_status': vmstatus.PAUSED},\n 'paused_now':\n {'pause_time_offset': 0.0,\n 'resume_behavior': vm.ResumeBehavior.KILL,\n 'pause': True, 'pause_code': 'EIO',\n 'expected_status': vmstatus.PAUSED},\n 'paused_later':\n {'pause_time_offset': 70.0,\n 'resume_behavior': vm.ResumeBehavior.KILL,\n 'pause': True, 'pause_code': 'EIO',\n 'expected_status': vmstatus.PAUSED},\n 'running':\n {'pause_time_offset': 81.0,\n 'pause': False,\n 'expected_status': vmstatus.WAIT_FOR_LAUNCH},\n }\n\n def test_kill_long_paused(self):\n cif = fake.ClientIF()\n test = functools.partial(self._kill_long_paused, cif)\n vm_params = []\n for vmid, params in self._PAUSED_VMS.items():\n params = {name: value for name, value in params.items()\n if name in ('pause_time_offset', 'resume_behavior',)}\n params['cif'] = cif\n params['vmid'] = vmid\n vm_params.append(params)\n fake.run_with_vms(test, vm_params)\n\n def _kill_long_paused(self, cif, vms):\n spec = self._PAUSED_VMS\n for vm_ in cif.getVMs().values():\n vm_._dom = fake.Domain()\n for vm_ in cif.getVMs().values():\n if not spec[vm_.id]['pause']:\n continue\n vm_.set_last_status(vmstatus.PAUSED)\n pause_code = spec[vm_.id].get('pause_code')\n if pause_code is not None:\n vm_._pause_code = pause_code\n periodic._kill_long_paused_vms(cif)\n for vm_ in cif.getVMs().values():\n expected_status = spec[vm_.id]['expected_status']\n assert vm_.lastStatus == expected_status, \\\n (\"Wrong status of `%s': actual=%s, expected=%s\" %\n (vm_.id, vm_.lastStatus, expected_status,))\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_io_tune_policy_values(self):\n with fake.VM() as testvm:\n testvm._dom = self.dom\n testvm._devices[hwclass.DISK] = (self.drive,)\n assert testvm.io_tune_policy_values() == {\n 'current_values': [{\n 'ioTune': self.iotune_low,\n 'name': self.drive.name,\n 'path': self.drive.path,\n }],\n 'policy': []\n }\n\n @MonkeyPatch(vm, 'isVdsmImage', lambda *args: True)\n def test_io_tune_policy_values_handle_exceptions(self):\n with fake.VM() as testvm:\n testvm._dom = virdomain.Disconnected(testvm.id)\n testvm._devices[hwclass.DISK] = (self.drive,)\n assert testvm.io_tune_policy_values() == {}\n\n def assert_nth_call_to_dom_is(self, nth, call):\n assert self.dom.__calls__[nth][0] == call\n\n def assert_iotune_in_response(self, res, iotune):\n assert res[0]['ioTune'] == iotune\n\n\nclass FakeBlockIoTuneDrive(object):\n\n def __init__(self, name, path=None):\n self.name = name\n self.path = path or os.path.join('fake', 'path', name)\n self.iotune = {}\n self._deviceXML = ''\n\n def getXML(self):\n return xmlutils.fromstring('')\n\n\nclass FakeBlockIoTuneDomain(object):\n\n def __init__(self):\n self.iotunes = {}\n self.callback = None\n\n @recorded\n def blockIoTune(self, name, flags=0):\n ret = self.iotunes.get(name, {}).copy()\n if self.callback is not None:\n self.callback()\n return ret\n\n @recorded\n def setBlockIoTune(self, name, iotune, flags=0):\n self.iotunes[name] = iotune.copy()\n\n\n@expandPermutations\nclass SyncGuestTimeTests(TestCaseBase):\n\n def _make_vm(self, virt_error=None):\n dom = fake.Domain(virtError=virt_error)\n return FakeVm(dom)\n\n @MonkeyPatch(time, 'time', lambda: 1234567890.125)\n def test_success(self):\n vm = self._make_vm()\n vm.syncGuestTime()\n assert vm._dom.__calls__ == [\n ('setTime', (), {'time': {'seconds': 1234567890,\n 'nseconds': 125000000}})\n ]\n\n @permutations([[libvirt.VIR_ERR_AGENT_UNRESPONSIVE],\n [libvirt.VIR_ERR_NO_SUPPORT],\n [libvirt.VIR_ERR_INTERNAL_ERROR]])\n def test_swallow_expected_errors(self, virt_error):\n vm = self._make_vm(virt_error=virt_error)\n with self.assertNotRaises():\n vm.syncGuestTime()\n\n\n@expandPermutations\nclass MetadataTests(TestCaseBase):\n\n _TEST_XML = u'''\n\n TESTING\n \n \n 4.2\n \n bar\n buzz\n \n \n \n \n'''\n\n _TEST_XML_CLUSTER_VERSION = u'''\n\n TESTING\n \n \n 4.2\n 4.2\n \n bar\n buzz\n \n \n \n \n'''\n\n _TEST_XML_LAUNCH_PAUSED = u'''\n\n TESTING\n \n \n true\n \n \n \n'''\n\n _TEST_XML_IMPLIED_PIN = u'''\n\n TESTING\n \n \n \n \n'''\n\n _TEST_XML_DEDICATED = u'''\n\n TESTING\n \n \n dedicated\n \n \n \n'''\n\n @contextmanager\n def test_vm(self, test_xml=None):\n with namedTemporaryDir() as tmp_dir:\n with MonkeyPatchScope([\n (constants, 'P_VDSM_RUN', tmp_dir),\n (libvirtconnection, 'get', fake.Connection),\n ]):\n params = {\n 'vmId': 'TESTING',\n 'vmName': 'nTESTING',\n 'xml': self._TEST_XML if test_xml is None else test_xml,\n }\n cif = fake.ClientIF()\n yield vm.Vm(cif, params)\n\n def test_conf_devices_empty(self):\n with self.test_vm() as testvm:\n assert testvm.conf['devices'] == []\n\n def test_custom_properties(self):\n with self.test_vm() as testvm:\n assert testvm._custom == \\\n {\n 'vmId': 'TESTING',\n 'custom':\n {\n 'foo': 'bar',\n 'fizz': 'buzz',\n },\n }\n\n @permutations([\n (3, 6, True,),\n (4, 1, True,),\n (4, 2, True,),\n (4, 3, False,),\n (5, 1, False,),\n ])\n def test_min_cluster_version(self, major, minor, result):\n with self.test_vm(test_xml=self._TEST_XML_CLUSTER_VERSION) as testvm:\n assert testvm.min_cluster_version(major, minor) == result\n\n @permutations([\n (3, 6, False,),\n (4, 1, False,),\n (4, 2, False,),\n (4, 3, False,),\n (5, 1, False,),\n ])\n def test_void_cluster_version(self, major, minor, result):\n with self.test_vm(test_xml=self._TEST_XML) as testvm:\n assert testvm.min_cluster_version(major, minor) == result\n\n def test_launch_paused_default_false(self):\n with self.test_vm(test_xml=self._TEST_XML) as testvm:\n assert not testvm._launch_paused\n\n def test_launch_paused(self):\n with self.test_vm(test_xml=self._TEST_XML_LAUNCH_PAUSED) as testvm:\n assert testvm._launch_paused\n\n @permutations([\n (_TEST_XML, cpumanagement.CPU_POLICY_NONE),\n (_TEST_XML_IMPLIED_PIN, cpumanagement.CPU_POLICY_MANUAL),\n (_TEST_XML_DEDICATED, cpumanagement.CPU_POLICY_DEDICATED),\n ])\n def test_cpu_policy(self, xml, expected):\n with self.test_vm(test_xml=xml) as testvm:\n assert testvm.cpu_policy() == expected\n\n def test_manually_pinned_cpus(self):\n with self.test_vm(test_xml=self._TEST_XML_IMPLIED_PIN) as testvm:\n assert testvm.manually_pinned_cpus() == frozenset([0])\n\n\nclass TestQgaContext(TestCaseBase):\n\n def test_default_timeout(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n assert testvm._dom._agent_timeout == \\\n libvirt.VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK\n with testvm.qga_context():\n assert testvm._dom._agent_timeout == \\\n libvirt.VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK\n assert testvm._dom._agent_timeout == \\\n libvirt.VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK\n\n def test_reset_default_timeout(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n with testvm.qga_context(10):\n assert testvm._dom._agent_timeout == 10\n assert testvm._dom._agent_timeout == \\\n libvirt.VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK\n\n def test_libvirtError_not_handled(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n with pytest.raises(libvirt.libvirtError):\n with testvm.qga_context():\n # This exception should be propagated outside the context\n raise libvirt.libvirtError(\"Some error\")\n\n def test_unlock_clean(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n with testvm.qga_context():\n assert testvm._qga_lock.locked()\n # Make sure the lock was released properly\n assert not testvm._qga_lock.locked()\n\n def test_unlock_dirty(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n with pytest.raises(libvirt.libvirtError):\n with testvm.qga_context():\n assert testvm._qga_lock.locked()\n # Simulate error condition\n raise libvirt.libvirtError(\"Some error\")\n # Make sure the lock was released properly\n assert not testvm._qga_lock.locked()\n\n def test_handle_lock_timeout(self):\n with fake.VM() as testvm:\n testvm._dom = fake.Domain()\n # Lock it before entering the context\n testvm._qga_lock.acquire()\n with pytest.raises(exception.NonResponsiveGuestAgent):\n with testvm.qga_context(1):\n # We should not get here because the attempt to lock should\n # time out and qga_context should raise an exception.\n pass\n\n\nclass FakeLeaseDomain(object):\n\n def attachDevice(self, device_xml):\n pass\n\n def XMLDesc(self, flags=0):\n return ''\n\n def all_channels(self):\n return []\n\n\nclass FakeLeaseIRS(object):\n def __init__(self, conf):\n self._conf = conf\n\n def lease_info(self, lease):\n return response.success(result=self._conf)\n\n\nclass FakeLeaseClientIF(object):\n def __init__(self, conf):\n self.irs = FakeLeaseIRS(conf)\n\n\ndef _load_xml(name):\n test_path = os.path.realpath(__file__)\n data_path = os.path.join(\n os.path.split(test_path)[0], '..', 'devices', 'data')\n\n with open(os.path.join(data_path, name), 'r') as f:\n return f.read()\n","repo_name":"oVirt/vdsm","sub_path":"tests/virt/vm_test.py","file_name":"vm_test.py","file_ext":"py","file_size_in_byte":71249,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"} +{"seq_id":"42852528615","text":"import pandas as pd\nfrom datetime import timedelta\n\n\ndef __min_to_seconds(\n x: str, # mm:ss string\n) -> float:\n mins, secs = map(int, x.split(':'))\n td = timedelta(minutes=mins, seconds=secs)\n return td.total_seconds()\n\n\ndef __player_name_fix(\n x: str, # name str\n):\n return x.split(x[0]+'.')[0]\n\n\ndef preprocess_full(\n df: pd.DataFrame,\n) -> pd.DataFrame:\n df = df.drop(columns=['Unnamed: 0.1','Unnamed: 0']) # drop player numbers\n df = df.set_index('DATE')\n df['MIN'] = df['MIN'].apply(__min_to_seconds)\n df['PLAYER'] = df['PLAYER'].apply(__player_name_fix)\n df[['HOME','AWAY']] = df['MATCHUP'].str.split(' vs ', 1, expand=True) # split matchup into home/away cols\n df = df.drop(['MATCHUP'], axis=1)\n\n return df\n","repo_name":"yavord/nbaModel","sub_path":"src/lib/dataloader/preprocess/nba_preprocess.py","file_name":"nba_preprocess.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9158782978","text":"# main file, runs each module\r\nimport addition_module, subtraction_module, multiplication_module, division_module\r\n\r\nprint(\"\"\"\r\nwhich of the following modules would you like to do?\r\n┍---------┒ ┍-----------┒ ┍--------------┒ ┍--------┒\r\n|addition | |subtraction| |multiplication| |division|\r\n┕---------┛ ┕-----------┛ ┕--------------┛ ┕--------┛\r\n\"\"\")\r\nask = input()\r\nif ask == 'addition':\r\n addition_module.addit()\r\nelif ask == 'subtraction':\r\n subtraction_module.subtraction()\r\nelif ask == 'multiplication':\r\n multiplication_module.multiplication()\r\nelif ask == 'division':\r\n division_module.division()\r\n","repo_name":"whyammehere/adaptive_math_game","sub_path":"adaptive_math.py","file_name":"adaptive_math.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16159957114","text":"from src.wator.WAgent import WAgent\n\n\"\"\"\nImplémentation d'un agent ayant le comportement d'un requin\n\"\"\"\nclass Shark(WAgent):\n def __init__(self, posX, posY, data):\n # position initiale de la particule\n super(Shark, self).__init__(posX, posY)\n\n # Gestation\n self.gestationDay = data[0]\n self.deadTime =data[1]\n self.hungry = 0\n self.color = \"pink\"\n\n\n def decide(self, env):\n \"\"\"\n Méthode qui permet à un agent de décider de son comportement\n\n :param env: Environement de l'agent\n \"\"\"\n self.gestation += 1\n self.age += 1\n self.change = False\n\n if (self.age >= 2):\n self.change = True\n self.color = \"red\"\n\n self.hungry +=1 # sinon il a faim\n if(self.hungry>=self.deadTime): # et il meurt\n env.kill(self.posX, self.posY)\n return\n\n positions = env.moore(self.posX, self.posY)\n poslibres = []\n # newPos = env.near(self.posX, self.posY)\n\n for pos in positions :\n if (pos[1] != None and pos[1].getType() == 1): # si le requin peut monger\n self.hungry = 0\n env.kill(pos[0][0], pos[0][1])\n self.updatePosition(env, pos[0], Shark, [self.gestationDay, self.deadTime])\n return\n if (pos[1] == None):\n poslibres.append(pos)\n\n if poslibres :\n self.updatePosition(env, poslibres[0][0], Shark, [self.gestationDay, self.deadTime])\n\n\n def getType(self):\n \"\"\"\n Retourne le type de l'agent\n\n :return: Retourne le type de l'agent\n \"\"\"\n return 2\n","repo_name":"lrdoz/Simulation_centre_individus","sub_path":"src/wator/Shark.py","file_name":"Shark.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2705612769","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 6 15:20:32 2018\n\n@author: 燃烧杯\n\"\"\"\nimport os\nimport re\nfrom .smali import Smali\n\nclass Ware:\n \n __smali_pat = re.compile(r\"\\.smali$\")\n \n def __init__(self, path, isMalware):\n self.name = os.path.split(path)[-1]\n smaliPath = os.path.join(path, \"smali\")\n self.smalis = []\n self.isMalware = isMalware\n for root, dirs, files in os.walk(smaliPath):\n for file in files:\n if Ware.__smali_pat.findall(file):\n self.smalis.append(Smali(\n os.path.join(root, file)\n ))\n \n def extractFeature(self, datafile):\n feature = ''\n for smali in self.smalis:\n feature += smali.getFeature()\n datafile.append(self.name, feature, self.isMalware)\n \n \n \n ","repo_name":"DQinYuan/AndroidMalwareWithN-gram","sub_path":"infrastructure/ware.py","file_name":"ware.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"16"} +{"seq_id":"71742428489","text":"from collections import deque\n\nn, l, r = map(int, input().split())\ndata = []\nfor _ in range(n):\n data.append(list(map(int, input().split())))\n\n\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n united = [] # 연합\n united.append((x, y))\n visited[x][y] = 1\n total = data[x][y] # 연합의 인구수\n count = 1 # 연합을 이루고 있는 칸의 개수\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n mx = x + dx[i]\n my = y + dy[i]\n\n if mx < 0 or my < 0 or mx >= n or my >= n:\n continue\n\n if l <= abs(data[mx][my]-data[x][y]) <= r and visited[mx][my] == 0:\n visited[mx][my] = 1\n united.append((mx, my))\n queue.append((mx, my))\n total += data[mx][my]\n count += 1\n\n for i, j in united:\n data[i][j] = total // count\n\n\nanswer = 0\nwhile True:\n visited = [[0]*n for _ in range(n)]\n idx = 0\n for i in range(n):\n for j in range(n):\n if visited[i][j] == 0:\n bfs(i, j)\n idx += 1\n if idx == n*n:\n break\n\n answer += 1\nprint(answer)\n","repo_name":"rbgksqkr/TIL","sub_path":"이코테/3. 탐색/Q21_인구이동.py","file_name":"Q21_인구이동.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20508756560","text":"# draw an art piece\nimport turtle\n\ndef draw_sq():\n\t# get a turtule\n\tchuck = turtle.Turtle()\n\t# get it some personality\n\tchuck.shape(\"turtle\")\n\tchuck.color(\"brown\")\n\n\tfor i in range(0, 4):\n\t\tchuck.forward(100)\n\t\tchuck.right(90)\n\t\tchuck.speed(1)\n\ndef draw_circle():\n\tpink = turtle.Turtle()\n\tpink.shape(\"arrow\")\n\tpink.color(\"red\")\n\n\tpink.circle(20)\n\ndef draw_tri():\n\tringo = turtle.Turtle()\n\tringo.shape(\"turtle\")\n\tringo.color(\"black\")\n\tringo.speed(5)\n\n\tfor i in range(0, 3):\n\t\tringo.forward(70)\n\t\tringo.right(120)\n\t\tringo.forward(70)\n\ndef draw_art():\n\tcanv = turtle.Screen()\n\tcanv.bgcolor(\"white\")\n\tdraw_sq()\n\tdraw_circle()\n\tdraw_tri()\n\t# close canvas on mouse click\n\tcanv.exitonclick()\n\ndraw_art()\n\n\n\n\n\n\t","repo_name":"amrsekilly/proFound-Python","sub_path":"2a/art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10025732315","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@author: @樊厚翔\n@contact: houxiang_fan@163.com \n@file: j_one_hot_encode.py\n@time: 2019/6/25 16:08 \n\"\"\"\n\n\"\"\"单词级的one-hot编码\"\"\"\nimport numpy as np\n\nsamples = ['The cat sat on the mat.','The dog ate my homework.']\ntoken_index = {}\n\nfor sample in samples:\n for word in sample.split():\n if word not in token_index:\n token_index[word] = len(token_index) + 1\n\nmax_length = 10\nresults = np.zeros(shape = (len(samples),\n max_length,\n max(token_index.values()) + 1))\n# print(results)\nfor i,sample in enumerate(samples):\n # print(i,sample)\n for j,word in list(enumerate(sample.split()))[:max_length]:\n # print(j,word)\n index = token_index.get(word)\n # print(index,word)\n results[i,j,index] = 1.\n# print(results)\n\n","repo_name":"masamibf/deep_leraning_with_python--note","sub_path":"j_one_hot_encode.py","file_name":"j_one_hot_encode.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"33468646444","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport sys\nimport pdb\nimport argparse\nimport utils as ut\nimport numpy as np\n\nsys.path.append('../tool')\nimport toolkits\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--net', default='resnet50', type=str)\nparser.add_argument('--loss', default='softmax', choices=['softmax'], type=str)\nparser.add_argument('--aggregation', default='avg', choices=['avg'], type=str)\nparser.add_argument('--batch_size', default=4, type=int)\nparser.add_argument('--resume', default='', type=str)\nparser.add_argument('--gpu', default='0', type=str)\nparser.add_argument('--mode', default='eval', choices=['train', 'eval'], type=str)\nparser.add_argument('--benchmark', default='ijbb', choices=['ijbb', 'ijbc'], type=str)\n\nparser.add_argument('--feature_dim', default=512, choices=[512], type=int)\nparser.add_argument('--data_path', default='path_to_the_ijbb', type=str)\n\nglobal args\nargs = parser.parse_args()\n\n\ndef chunks(l, n):\n # For item i in a range that is a length of l,\n for i in range(0, len(l), n):\n # Create an index range for l of n items:\n yield l[i:i+n]\n\n\ndef get_data_path():\n print('==> get data path, template id and media id.')\n\n def get_datalist(s):\n file = open(s, 'r')\n ijbb_meta = file.readlines()\n faceid, tid, mid = [], [], []\n for j in ijbb_meta:\n jsplit = j.split()\n faceid += [jsplit[0]]\n tid += [int(jsplit[1])]\n mid += [int(jsplit[-1])]\n\n faceid, template_id, media_id = map(np.array, [faceid, tid, mid])\n return faceid, template_id, media_id\n\n # ==> put the image paths into a long list, and break down into sublists as indicated by batch_size\n tid_mid_path = '../meta/{}_face_tid_mid.txt'.format(args.benchmark)\n faces, templates, medias = get_datalist(tid_mid_path)\n facepaths = np.array([os.path.join(args.data_path, f) for f in faces])\n return facepaths, templates, medias\n\n\ndef get_verification_label():\n # =============================================================\n # load meta information for template-to-template verification.\n # tid --> template id, label --> 1/0\n # format:\n # tid_1 tid_2 label\n # =============================================================\n print('==> get verification template pair and label.')\n tid_pair_path = '../meta/{}_template_pair_label.txt'.format(args.benchmark)\n file = open(tid_pair_path, 'r')\n meta = file.readlines()\n Y, p1, p2 = [], [], []\n for m in meta:\n msplit = m.split()\n Y += [int(msplit[-1])]\n p1 += [int(msplit[0])]\n p2 += [int(msplit[1])]\n Y, p1, p2 = map(np.array, [Y, p1, p2])\n return Y, p1, p2\n\n\ndef initialize_model():\n # Set basic environments.\n # Initialize GPUs\n toolkits.initialize_GPU(args)\n\n # ==> loading the pre-trained model.\n import model\n model_eval = None\n if args.aggregation == 'avg':\n if args.loss == 'softmax':\n model_eval = model.Vggface2_ResNet50(mode=args.mode)\n else:\n raise IOError('==> unknown loss type.')\n else:\n raise IOError('==> unknown aggregation mode.')\n\n print('test: {}_{}_{} on {} benchmark.'.format(args.net, args.aggregation, args.loss, args.benchmark))\n\n if args.resume:\n if os.path.isfile(args.resume):\n model_eval.load_weights(args.resume, by_name=True)\n print('==> successfully loaded the model {}'.format(args.resume))\n else:\n raise IOError('==> can not find the model to load {}'.format(args.resume))\n return model_eval\n\n\ndef image_encoding(model, facepaths):\n print('==> compute image-level feature encoding.')\n num_faces = len(facepaths)\n face_feats = np.empty((num_faces, args.feature_dim))\n imgpaths = facepaths.tolist()\n imgchunks = list(chunks(imgpaths, args.batch_size))\n\n for c, imgs in enumerate(imgchunks):\n im_array = np.array([ut.load_data(path=i, shape=(224, 224, 3), mode='eval') for i in imgs])\n f = model.predict(im_array, batch_size=args.batch_size)\n start = c * args.batch_size\n end = min((c + 1) * args.batch_size, num_faces)\n face_feats[start:end] = f\n if c % 500 == 0:\n print('-> finish encoding {}/{} images.'.format(c*args.batch_size, num_faces))\n return face_feats\n\n\ndef template_encoding(templates, medias, img_norm_feats):\n # ==========================================================\n # 1. face image --> l2 normalization.\n # 2. compute media encoding.\n # 3. compute template encoding.\n # 4. save template features.\n # ==========================================================\n print('==> compute template-level feature encoding.')\n\n uq_temp = np.unique(templates)\n num_temp = len(uq_temp)\n tmp_feats = np.empty((num_temp, args.feature_dim))\n\n for c, uqt in enumerate(uq_temp):\n (ind_t,) = np.where(templates == uqt)\n face_norm_feats = img_norm_feats[ind_t]\n faces_media = medias[ind_t]\n uqm, counts = np.unique(faces_media, return_counts=True)\n media_norm_feats = []\n\n for u,ct in zip(uqm, counts):\n (ind_m,) = np.where(faces_media == u)\n if ct < 2:\n media_norm_feats += [face_norm_feats[ind_m]]\n else:\n media_norm_feats += [np.sum(face_norm_feats[ind_m], 0, keepdims=True)]\n\n media_norm_feats = np.array(media_norm_feats)\n media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))\n template_norm_feats = np.sum(media_norm_feats, 0)\n tmp_feats[c] = template_norm_feats\n if c % 500 == 0:\n print('-> finish encoding {}/{} templates.'.format(c, num_temp))\n return tmp_feats\n\n\ndef verification(unique_templates, tmp_feats, p1, p2):\n print('==> compute template verification results.')\n # ==========================================================\n # Loading the Template-specific Features.\n # ==========================================================\n uq_temp = unique_templates\n score = np.zeros((len(p1),))\n # ==========================================================\n # Compute set-to-set Similarity Score.\n # ==========================================================\n total_pairs = np.array(range(len(p1)))\n batchsize = 256\n sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)]\n num_sublists = len(sublists)\n\n for c, s in enumerate(sublists):\n t1 = p1[s]\n t2 = p2[s]\n ind1 = np.squeeze(np.array([np.where(uq_temp == j) for j in t1]))\n ind2 = np.squeeze(np.array([np.where(uq_temp == j) for j in t2]))\n\n inp1 = tmp_feats[ind1]\n inp2 = tmp_feats[ind2]\n\n v1 = inp1 / np.sqrt(np.sum(inp1 ** 2, -1, keepdims=True))\n v2 = inp2 / np.sqrt(np.sum(inp2 ** 2, -1, keepdims=True))\n\n similarity_score = np.sum(v1 * v2, -1)\n score[s] = similarity_score\n if c % 500 == 0: print('-> finish {}/{} pair verification.'.format(c, num_sublists))\n return score\n\n\ndef compute_ROC(labels, scores, roc_path):\n print('==> compute ROC.')\n import sklearn.metrics as skm\n from scipy import interpolate\n fpr, tpr, thresholds = skm.roc_curve(labels, scores)\n fpr_levels = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n f_interp = interpolate.interp1d(fpr, tpr)\n tpr_at_fpr = [f_interp(x) for x in fpr_levels]\n roc_txt = roc_path[:-3] + 'txt'\n file = open('../result/{}'.format(roc_txt), 'w')\n for (far, tar) in zip(fpr_levels, tpr_at_fpr):\n print('TAR @ FAR = {} : {}'.format(far, tar))\n file.write('TAR @ FAR = {}: {}\\n'.format(far, tar))\n file.close()\n\n\nif __name__ == '__main__':\n facepaths, templates, medias = get_data_path()\n groundtruth, template_1, template_2 = get_verification_label()\n unique_templates = np.unique(templates)\n\n model_eval = initialize_model()\n face_feats = image_encoding(model_eval, facepaths)\n template_feats = template_encoding(templates, medias, face_feats)\n score = verification(unique_templates, template_feats, template_1, template_2)\n\n score_path = args.resume.split(os.sep)[-2] + '_dim{}_scores.npy'.format(args.feature_dim)\n np.save('../result/{}'.format(score_path), score)\n compute_ROC(groundtruth, score, score_path)\n","repo_name":"WeidiXie/Keras-VGGFace2-ResNet50","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"16"} +{"seq_id":"33673783151","text":"num = int(input('Enter a positive number. Enter 0 to end: '))\n\noutput = 0\nfor i in range(1, num+1):\n if num % i == 0:\n print(i)\n output = output + i\n\nprint('The sum of divisors is:'+ str(output))\n\n","repo_name":"grijdas/learning-python","sub_path":"Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74409760009","text":"import math\nimport cv2\nimport os\nimport numpy as np\nfrom scipy.ndimage.filters import maximum_filter, convolve1d\nfrom scipy.interpolate import interp2d\nimport matplotlib.pyplot as plt\n\ndef gaussian(n=5, sigma=1):\n\n x = range(-int(n/2), int(n/2)+1)\n # raspakivanje liste\n x_osa = [*x]\n # prolazak kroz svaki element liste\n f = [1/(sigma*math.sqrt(2*math.pi))*math.exp(-float(el)**2/(2*sigma**2)) for el in x]\n\n return f, x_osa\n\n\ndef im_norm(im):\n im_n = (im-im.min())/(im.max()-im.min())\n return im_n\n\n\ndef im_pyr_decomp(im, N):\n\n size_vec = im.shape\n red_faktor = 2**N\n add_size = np.flip([int((np.ceil(el / red_faktor) * red_faktor - el)) for el in im.shape])\n [v, s] = im.shape\n opim = np.zeros((add_size[1] + v, add_size[0] + s))\n opim[0: v, 0: s] = im\n opim[v + 1: v + add_size[1], :] = opim[v:v-add_size[1]+1:-1, :]\n opim[:, s + 1: s + add_size[0]] = opim[:, s:s - add_size[0] + 1:-1]\n im = opim\n\n\n\n # Funkcija pravi Gausovu i Laplasovu piramidu od N nivoa od slike im\n GPyr = []\n LPyr = []\n for i in range(N):\n GPyr.append(im)\n g = cv2.pyrDown(im, borderType=cv2.BORDER_REPLICATE)\n g_up = cv2.pyrUp(g, cv2.BORDER_REPLICATE)\n\n\n l = im - g_up\n LPyr.append(l)\n im = g\n Res = im\n return LPyr, GPyr, Res, size_vec\n\n\ndef im_pyr_recon(LPyr, Res, size_vec):\n # Funkcija rekonstruise sliku na osnovu Laplasove piramide i reziduala\n\n # dubina razlaganja\n N = len(LPyr)\n for i in range(N, 0, -1):\n\n Res = cv2.pyrUp(Res, cv2.BORDER_REFLECT)+LPyr[i-1]\n\n Res = Res[0:size_vec[0], 0:size_vec[1]]\n im_rec = Res\n return im_rec\n\ndef data_fname_split(fn, param='fname'):\n f1 = os.path.split(fn)\n f2 = os.path.splitext(f1[1])\n if param=='fname':\n res = f2[0]\n elif param=='ext':\n res = f2[1]\n elif param=='path':\n res = f1[0]\n\n return res\n\ndef read_raw(im_fname,crop_flag=0, preview_flag=0):\n\n hdr = []\n l = os.stat(im_fname).st_size # size in bytes\n ext = data_fname_split(im_fname,'ext')\n L_hdr = 0\n if ext in ['.fxd','.FXD','.raw','.RAW','']:\n if l==13824000: #Pixium 3543 EZ, indeks 7\n im_size = [2880, 2400]\n #Mode informacije\n DetInfo = {'Index':7, 'Name': 'Trixell3543EZ', 'Type': 'DR', 'LinMax':65535, 'PixelSize':0.148}\n elif l==16588800: #Pixium 4143 ili 4343, indeks 3\n im_size = [2880, 2880]\n # Mode informacije\n DetInfo = {'Index': 3, 'Name': 'Trixell4143/4343EZ', 'Type': 'DR', 'LinMax': 65535, 'PixelSize': 0.148}\n elif l==19481282: #Pixium 4600, indeks 0\n im_size = [3121, 3121]\n # Mode informacije\n DetInfo = {'Index': 0, 'Name': 'TrixellPixium4600', 'Type': 'DR', 'LinMax': 16384, 'PixelSize': 0.143,\n 'Xmin':60, 'Ymin':60, 'Nx':3001, 'Ny':3001} #Aktivni deo slike\n elif l==14400000: #Trixell Pixium portable 3543, indeks 4\n im_size = [2400, 3000]\n # Mode informacije\n DetInfo = {'Index': 4, 'Name': 'TrixellPixium3543', 'Type': 'DR', 'LinMax': 65535, 'PixelSize': 0.144}\n elif l==5990400: #TRix 2430EZ, indeks 5\n im_size = [1920, 1560]\n # Mode informacije\n DetInfo = {'Index': 5, 'Name': 'Trixell2430EZ', 'Type': 'DR', 'LinMax': 65535, 'PixelSize': 0.148}\n elif l==18481152: #Toshiba 4343\n im_size = [3072, 3008]\n # Mode informacije\n DetInfo = {'Index': 1, 'Name': 'Toshiba4343', 'Type': 'DR', 'LinMax': 13000, 'PixelSize': 0.143}\n elif l==18458880: #Toshiba 4343 RPW\n im_size = [3036,3040]\n # Mode informacije\n DetInfo = {'Index': 29, 'Name': 'Toshiba4343RPW', 'Type': 'DR', 'PixelSize': 0.140}\n elif l==14993280: #Toshiba 3543_W\n im_size = [2466, 3040]\n # Mode informacije\n DetInfo = {'Index': 30, 'Name': 'Toshiba3543W', 'Type': 'DR', 'PixelSize': 0.140}\n elif l==18874368: #Varian PaxScan4343, Rayence, DRTech EVS, Pixxgen\n im_size = [3072, 3072]\n if im_fname.find('Samsung'): #Vrlo niskobudzetno testiranje za Samsung\n # Mode informacije\n DetInfo = {'Index': 2, 'Name': 'Rayence1717', 'Type': 'DR', 'LinMax':16536, 'PixelSize': 0.143}\n elif im_fname.find('DRTech'):\n # Mode informacije\n DetInfo = {'Index': 13, 'Name': 'DRTech4343EVS', 'Type': 'DR', 'LinMax': 16536, 'PixelSize': 0.140}\n elif im_fname.find('Toshiba'):\n # Mode informacije\n DetInfo = {'Index': 1, 'Name': 'Toshiba4343', 'Type': 'DR', 'LinMax': 13000, 'PixelSize': 0.143}\n elif im_fname.find('Pixxgen'): #Pixxgen 1717\n # Mode informacije\n DetInfo = {'Index': 40, 'Name': 'Pixxgen1717', 'Type': 'DR', 'LinMax': 32422, 'PixelSize': 0.140}\n else: #Inace Varex\n # Mode informacije\n DetInfo = {'Index': 26, 'Name': 'Varex4343', 'Type': 'DR', 'LinMax': 60000, 'PixelSize': 0.139}\n elif l==13041663: #Careray 1500P\n im_size = [2304, 2816]\n # Mode informacije\n DetInfo = {'Index': 9, 'Name': 'Careray1500P', 'Type': 'DR', 'LinMax': 47000, 'PixelSize': 0.154}\n elif l==12976128: #Careray 1500CW\n im_size = [2304, 2816]\n # Mode informacije\n DetInfo = {'Index': 8, 'Name': 'Careray1500CW', 'Type': 'DR', 'LinMax': 47000, 'PixelSize': 0.154}\n elif l==15859712: #Careray 1800R\n im_size = [2816,2816]\n # Mode informacije\n DetInfo = {'Index': 10, 'Name': 'Careray1800R', 'Type': 'DR', 'LinMax': 47000, 'PixelSize': 0.154}\n elif l==15212544: #Varex 4336Wv4\n im_size = [3072, 2476]\n # Mode informacije\n DetInfo = {'Index': 27, 'Name': 'Varex4336Wv4', 'Type': 'DR', 'LinMax': 6e4, 'PixelSize': 0.139}\n elif l==30723840: #PerkinElmer XRPAD 4336\n im_size = [4320, 3556]\n # Mode informacije\n DetInfo = {'Index': 11, 'Name': 'PerkinElmerXRPAD4336', 'Type': 'DR', 'LinMax': 16384, 'PixelSize': 0.100}\n elif l==18013216: #Rayence 14x17\n im_size = [2756, 3268]\n # Mode informacije\n DetInfo = {'Index': 17, 'Name': 'Rayence1417', 'Type': 'DR', 'LinMax': 16384, 'PixelSize': 0.143}\n elif l==11790368: #Konica AeroDR 1717 (20)\n im_size = [2428, 2428]\n # Mode informacije\n DetInfo = {'Index': 20, 'Name': 'KonicaAeroDR1717', 'Type': 'DR', 'LinMax': 4095, 'PixelSize': 0.175}\n elif l==9690840: #Konica AeroDR 1417, indeks 21\n im_size = [2430, 1994]\n # Mode informacije\n DetInfo = {'Index': 21, 'Name': 'KonicaAeroDR1417', 'Type': 'DR', 'LinMax': 4095, 'PixelSize': 0.175}\n elif l==4762368: #Konica AeroDR 1012, indeks 22\n im_size = [1696, 1404]\n # Mode informacije\n DetInfo = {'Index': 22, 'Name': 'KonicaAeroDR1012', 'Type': 'DR', 'LinMax': 4095, 'PixelSize': 0.175}\n elif l==12902400: #iRay 1417 (62)\n im_size = [2800, 2304]\n # Mode informacije\n DetInfo = {'Index': 62, 'Name': 'iRay1417', 'Type': 'DR', 'LinMax': 13000, 'PixelSize': 0.150}\n elif l==13105152: #iRay 1417 (63)\n im_size = [2844, 2304]\n # Mode informacije\n DetInfo = {'Index': 63, 'Name': 'iRay1417', 'Type': 'DR', 'LinMax': 13000, 'PixelSize': 0.150}\n elif l==31195136: #iRay Mars1417X (67)\n im_size = [4352, 3584]\n # Mode informacije\n DetInfo = {'Index': 67, 'Name': 'iRay1417X', 'Type': 'DR', 'LinMax': 65535, 'PixelSize': 0.100}\n elif l==15728640: #Pixxgen 1417 (81)\n im_size = [3072, 2560]\n # Mode informacije\n DetInfo = {'Index': 41, 'Name': 'Pixxgen1417', 'Type': 'DR', 'LinMax': 65535, 'PixelSize': 0.140}\n\n # R&F slike\n elif l==1843200: #RF Trixell, 3x3 binning\n im_size = [960, 960]\n # Mode informacije\n DetInfo = {'Index': 104}\n elif l==1843456: #RF Trixell, 3x3 binning, 960x960, sa zaglavljem 256 bita\n im_size = [960, 960]\n L_hdr = 256 # zaglavlje formata\n # Mode informacije\n DetInfo = {'Index': 104, 'Name': 'Trixel4343RF', 'Type': 'RF', 'LinMax': 45000, 'PixelSize': 3 * 0.148,\n 'Xmin':0, 'Ymin':0, 'Nx':960, 'Ny':960} # aktivni deo slike\n elif l==2097408: #Varex 4343DXV, 3x3 binning, 1024x1024, zaglavlje 256\n im_size = [1024, 1024]\n L_hdr = 256 # zaglavlje formata\n # Mode informacije\n DetInfo = {'Index': 205, 'Name': 'Varex4343DXV', 'LinMax': 50000, 'PixelSize': 0.417,\n 'Xmin':0, 'Ymin':0, 'Nx':1024, 'Ny':1024} # aktivni deo slike\n else:\n print('Nepoznat detektor, duzina '+str(l))\n return\n # ucitaj sliku odgovarajuce velicine\n f = open(im_fname, 'rb')\n im = np.fromfile(f, np.dtype('uint16'))\n im = im[:im_size[0]*im_size[1]]\n hdr = im[:L_hdr].astype(np.uint8)\n im = im.reshape(im_size)\n\n elif ext == '.dcm':\n print()\n #dicom ucitavanje\n else:\n print('Unrecognised panel format!')\n return\n\n f.close()\n\n if not list(im.shape)==im_size:\n print('Error reading image. Detektor: '+ str(DetInfo['Index']) +' Treba da je velicina '+ str(im_size)\n +' a u stvari je ' + ''.join(str(el) for el in im.shape))\n im = []\n prev_im = []\n return\n\n # Neophodne korekcije slika\n if DetInfo['Index']==11:\n im[im>16384] = 16384 # korigujemo zasicenje na 16 bita umesto na 14\n elif DetInfo['Index']==104: #Trixell RF 3x3 mod, fali prva kolona\n im[:,0] = im[:,1] #kopiramo drugu\n\n # Isecanje neaktivnog dela slike\n if crop_flag and im.shape[0]>1000: #I da nije prikaz (1/4 rezolucije)\n if DetInfo['Index']==0: #Trixell Pixium 4600\n im = im[DetInfo['Ymin']:DetInfo['Ymin']+DetInfo['Ny'], DetInfo['Xmin']: DetInfo['Xmin'] + DetInfo['Nx']];\n elif DetInfo['Index']==1: #Toshiba 4343\n im = im[32:3040,:]\n elif DetInfo['Index']==2: #Varian PaxScan4343\n im = im[10:3062, 10:3062] #varian 4343\n elif DetInfo['Index']==3: #Trixell 4143 ili 4343\n # Odluci da li je 4143 ili 4343\n if sum(sum(im[99: 150, 2809: 2860])) == 0:\n im = im[4:2876, 32:2804] #Trix 4143\n else:\n im = im[4:2876, 32:2868] #Trix 4343\n elif DetInfo['Index']==7: #Trixell 3543 EZ\n im = im[24:2864,34:2366]\n elif DetInfo['Index']==9: #Careray\n im = im[6:2298,6:2810]\n elif DetInfo['Index']==10: #Careray 1800R\n im = im[6:2810,6:2810]\n elif DetInfo['Index']==13: #DRTech EVS\n im = im[20:3052,20:3052]\n elif DetInfo['Index']==21: #Konica 1417\n im = im[:2428,:1992]\n elif DetInfo['Index']==22: #Konica 1012\n im = im[:1692,:1404]\n elif DetInfo['Index']==26: #Varian 4343v3/RC\n im = im[20:3052,20:3052]\n elif DetInfo['Index']==27: #Varian 4336Wv4\n im = im[20:3052,20:2456]\n elif DetInfo['Index']==41: #Pixxgen 1417\n im = im[:3052,:2500]\n elif DetInfo['Index']==67: #iRay Mars1417X\n im = im[26:4326,42:3542]\n\n # izlazna lista\n out = [im, DetInfo, hdr]\n\n # simuliranje preview slika\n\n if preview_flag:\n prev_im = []\n if DetInfo['Index']==0: #Pixium 4600, redukujemo rezoluciju na 1/4\n if im.shape[0]!=3001:\n prox_im = im[60:3060,60:3060]\n else:\n prox_im = im[:3000, :3000]\n prev_im = np.uint16(prox_im[0:3000:4, 0:3000:4])\n elif DetInfo['Index'] in [4,7,20,21,27]: #Trixell 4143, Varian, Samsung i Imix imaju sliku iste velicine\n prev_im = im[2::4, 2::4]\n\n out.append(prev_im)\n\n return out\n\n\ndef log_LUT(ip_opseg, op_opseg=256, tol = 0.04):\n\n lin_op_opseg = int(np.ceil(tol*op_opseg))\n lin_ip_opseg = int(np.ceil(tol*ip_opseg))\n log_min = np.log(lin_ip_opseg+1)\n log_max = np.log(ip_opseg)\n\n # lut = np.zeros([1, ip_opseg])\n lut = np.zeros(ip_opseg)\n\n step = (lin_op_opseg-1)/(lin_ip_opseg-1)\n lut[0:lin_ip_opseg] = np.arange(0, lin_op_opseg-1+step, step)\n\n k = (op_opseg-lin_op_opseg)/(log_max-log_min)\n log_deo = lin_op_opseg+k*(np.log(np.arange(lin_ip_opseg, ip_opseg)) - log_min)\n lut[lin_ip_opseg:ip_opseg+1] = log_deo\n\n lut[-1] = op_opseg\n\n return lut\n\n\ndef sigmLUT(ip_range, op_range, k):\n\n k = k*2/op_range\n lut = 2*op_range*(1/(1+np.exp(-k*(np.arange(-ip_range, ip_range+1))))-0.5)\n\n return lut\n\n\ndef sigmLUT_lin(ip_range, op_range, k, t=0.05):\n\n k = k*2/op_range\n lut = 2*op_range*(1/(1+np.exp(-k*(np.arange(-ip_range, ip_range+1))))-0.5)\n\n sum_tol = int(lut.size*t)\n lin_deo = np.arange(-sum_tol, sum_tol+1)\n lut[lin_deo+ip_range] = lin_deo\n\n return lut\n\n\ndef sigmLUT_z(ip_range, op_range, k, t=0.05):\n\n k = k*2/op_range\n lut = 2*op_range*(1/(1+np.exp(-k*(np.arange(-ip_range, ip_range+1))))-0.5)\n\n if t<1:\n sum_tol = int(lut.size*t)\n lin_deo = np.arange(-sum_tol, sum_tol+1)\n lut[lin_deo+ip_range] = 0\n else:\n x = np.arange(-ip_range, ip_range+1)\n lut[abs(x) 1 - 1e-9)[0][0]\n else:\n top_bin = np.where(ch>= top_lim)[0][0]\n\n #vec prvi akumulator prelazi granicu\n if top_bin.size == 0:\n bin_lims[1] = samples.size-1\n else:\n bin_lims[1] = top_bin\n\n limits[1] = samples[bin_lims[1]]\n\n # Donja granica\n if bot_lim == 0:\n bot_lim = np.finfo(float).eps\n\n below = np.where(samples[ch 1:\n h = h/sum(h)\n\n # Cumulative histogram\n ch = np.cumsum(h)\n\n limits=[0,0]\n bin_lims=[0,0]\n # Top limit, check if 0 in which case return max sample\n if top_lim == 1:\n top_lim = 1-1 / a.size+np.finfo(float).eps\n top_bin = np.where(ch >= top_lim)[0][0]\n if top_bin.size==0 or top_bin == 0:\n top_bin = len(samples)-1\n limits[1] = samples[top_bin]\n bin_lims[1] = top_bin\n\n # Bottom limit\n if bot_lim == 0:\n bot_lim = 1 / a.size\n samples = samples[:-1]\n below = np.where(samples[ch < bot_lim])[0][-1]\n if below.size==0:\n bin_lims[0]=1\n else:\n bin_lims[0] = below\n limits[0] = samples[bin_lims[0]]\n\n return limits, h, bin_lims\n\n\n\ndef autocrop(im, disp_flag=0, oz_pov=0.2):\n\n # oz_pov - Sigurno ozracena povrsina\n\n dir_oz = False\n\n # Ulazna slika (ucitaj ako je dato ime)\n if type(im)==str:\n ime = im\n im = read_raw(im, 1)\n else:\n ime=''\n\n # Redukuj sliku za faktor 4 ako je 3001x3001, uvek radimo na prikazu\n if any(np.array(im.shape) > 2380):\n work_im = np.float32(im[2:im.shape[0]:4, 2:im.shape[1]:4])\n else:\n work_im = np.float32(im)\n\n uk_fak = 4 # Ukupni faktor redukcije rezolucije\n (poc_v, poc_s) = work_im.shape #Granice radne slike da posle ogranicimo roi\n\n # Redukuj sa velicine preview slike na zeljenu rezoluciju\n\n k = np.max(np.where((np.array(im.shape).min()/(2**np.arange(1,6)))>50))\n LP, GP, Res, size_vec = im_pyr_decomp(work_im, k)\n work_im = Res\n uk_fak = uk_fak*(2**k)\n\n # Parametri\n tol = 2 #Tolerancija po rubovima slike u cm\n lin_T = 0.9 #Prag usaglasenosti linije\n cr_tol = 0.12 #Tolerancija najmanje udaljenosti lamela od centra slike\n\n # Izvedeni parametri\n v, s = work_im.shape\n k = round(s*tol/43)\n if disp_flag:\n fig = plt.figure()\n fig.suptitle = ime\n plt.imshow(work_im*255/min(1300, 1.3 * work_im.max()))\n plt.axis('off')\n plt.show()\n\n # Nalazenje sigurno ozracenog regiona\n # Nadji raspon intenziteta sigurno ozracenog regiona\n [lims, h, bin_lims] = stat_hist_lims(work_im, [1 - oz_pov, 0.01], np.arange(0, 11000+1, 4))\n if disp_flag:\n print('Limits are: ' + [str(l) for l in lims])\n\n valid_int = work_im > lims[0] - 1\n\n # Nadji vrhove intenziteta\n vrhovi = maximum_filter(work_im, size=8)\n # vrhovi = (im == vrhovi_m)\n\n # Kombinuj u region i prikazi sigurno osvetljeni region\n osv = vrhovi * valid_int\n x = np.where(np.sum(osv, axis=0))\n y = np.where(np.sum(osv, axis=1))\n mx_x = np.max(x)\n mn_x = np.min(x)\n mx_y = np.max(y)\n mn_y = np.min(y)\n\n if disp_flag:\n # disp_draw_box\n print()\n\n # Testiraj ako je cela slika nekolimisana\n if mn_x<=k and mn_y<=k and mx_x>=s-k+1 and mx_y>=v-k+1:\n # za sada dict, napraviti klasu\n roi = {'x':1, 'y':1, 'nx':4 * poc_s, 'ny':4 * poc_v, 'dx':1, 'dy':0}\n cr_im = im\n return\n\n vg = np.zeros((work_im.shape[0], work_im.shape[1]))\n hg = vg\n vg[0: v,:] = convolve1d(work_im,[-1, 0, 1], axis=0)\n hg[:, 0: s] = convolve1d(work_im,[-1, 0, 1],axis=0)\n sg = np.sqrt(vg**2 + hg**2)\n\n r_h = sum(np.sign(hg)* np.abs(hg))/(sum(np.abs(sg)) + np.finfo(float).eps)\n r_v = sum(np.sign(vg)*np.abs(vg))/(sum(np.abs(sg))+np.finfo(float).eps)\n\n r_h[mn_x-1: mx_x] = 0\n r_v[mn_y-1: mx_y] = 0\n r_h[:k] = 0\n r_h[s - k + 1: s] = 0\n r_v[: k - 1] = 0\n r_v[v - k + 1: v] = 0\n\n g_L = np.where(r_h < -lin_T)\n g_D = np.where(r_h > lin_T)\n g_G = np.where(r_v < -lin_T)\n g_I = np.where(r_v > lin_T)\n\n if (np.array(g_L) < (0.5 - cr_tol) * s).size != 0:\n mn_x = np.max(g_L[np.array(g_L) < (0.5 - cr_tol) * s])\n else:\n mn_x = 1\n if (np.array(g_D) > (0.5 + cr_tol) * s).size != 0:\n mx_x = np.min(g_D[g_D > (0.5 + cr_tol) * s])\n else:\n mx_x = s\n if (np.array(g_G) < (0.5 - cr_tol) * v).size != 0:\n mn_y = np.max(g_G[g_G < (0.5 - cr_tol) * v])\n else:\n mn_y = 1\n if (np.array(g_I) > (0.5 + cr_tol) * v).size != 0:\n mx_y = np.min(g_I[g_I > (0.5 + cr_tol) * v])\n else:\n mx_y = v\n\n\n mn_x = uk_fak * mn_x + 1\n mx_x = uk_fak * mx_x\n mn_y = uk_fak * mn_y + 1\n mx_y = uk_fak * mx_y\n\n if mx_x > 4 * poc_s: mx_x = 4 * poc_s\n if mx_y > 4 * poc_v: mx_y = 4 * poc_v\n\n roi = [mn_x, mn_y, mx_x - mn_x + 1, mx_y - mn_y + 1, 1, 0]\n x = mn_x\n y = mn_y\n nx=mx_x - mn_x + 1\n ny=mx_y - mn_y + 1\n dx=1\n dy=0\n\n if (dx*dy) == 0 and (dx + dy) == 1:\n if dx*10 + dy == 10: okret = 0\n if dx * 10 + dy == 1: okret = 1\n if dx * 10 + dy == -10: okret = 2\n if dx * 10 + dy == -1: okret = 3\n\n xt = [x, x+(nx-1)*dx,x+(nx-1)*dx-(ny-1)*dy,x-(ny-1)*dy]\n yt = [y, y+(nx-1)*dy,y+(nx-1)*dy+(ny-1)*dx,y+(ny-1)*dx]\n cr_im = np.rot90(im[np.array(yt).min()-1:np.array(yt).max(), np.array(xt).min()-1:np.array(xt).max()], okret)\n else:\n [x_fromx, x_fromy] = np.meshgrid(np.arange(0,nx)*dx, np.arange(0,ny)*dy)\n [y_fromx, y_fromy] = np.meshgrid(np.arange(0,nx)*dy, -1*np.arange(0,ny)*dx)\n x_ind = x + x_fromx - x_fromy\n y_ind = y + y_fromx - y_fromy\n\n cr_im = interp2d(x_ind, y_ind, im, kind='linear', fill_value=0)\n\n return cr_im\n\n\ndef disp_im(im, lmin=0, lmax=255, title=''):\n\n fig = plt.figure()\n ax = fig.add_axes([0,0,1,1])\n ax.imshow(im, cmap='gray', vmin=lmin, vmax=lmax)\n plt.axis('off')\n plt.title(title)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nebojsa-bozanic/BMI_OSuM","sub_path":"Vezba10: Sjedinjavanje multi-modalnih slika/osum.py","file_name":"osum.py","file_ext":"py","file_size_in_byte":20713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36361862493","text":"def binary_search(array, target):\n \"\"\" An implementation of iterative binary search.\n\n Takes in a list of sorted numbers 'array', and a target number 'target'.\n Returns the index of the targe number if it is in 'array', and False otherwise.\n \"\"\"\n # Invariant: if the target is in array, it is between low (inclusive) and high (exclusive)\n # aka in array[low, high-1]\n\n low = 0 # first index of array\n high = len(array) # end of array (exclusive)\n\n while (low < high -1): # when is array[low, high-1] empty (aka when have we looked at all elements or if the list is empty)?\n # when low = high-1 (because it'd be array[low, low], which is empty)\n mid = (low + high)//2 # mid point of the looked at array\n print(\"mid\", mid)\n if target < array[mid]: # the target is in the lower half\n print(\"lower\")\n high = mid # move high down so we look at the lower half\n # remember in this case, high is excluded from the active range.\n # since we know that mid isnt the target, we can directly move high to mid\n if target >= array[mid]: # the target is in the upper half OR\n # the target is equal to the mid point\n print(\"higher\")\n low = mid # move low up so we look at the upper half\n # directly move low to mid since we want to include that element\n # OR if target is at the midpoint, purposefully fail the while loop\n # this way, when the loop ends low = high - 1, and there is only element in the active range, which is array[low]\n if array[low] == target: # after the while loop terminates, there is only one element left to look at\n return low\n return False # the target number isn't in the array\n\nprint(binary_search([0,1,2,3,4,5,6,7,8], 5))\n","repo_name":"kimypham/algo-data","sub_path":"searching/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29596690037","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport altair as alt\nfrom vega_datasets import data\nimport pandas as pd\n\nticks_settings = {'fontsize':15}\nlabel_settings = {'fontsize':25}\n\ndef plot_segment_volume(df):\n sub_df = df[df['year'] > 2011]\n grouped_df = sub_df.groupby(['model', 'year'], as_index=False).count()\n\n fig = plt.figure(figsize=(18, 10))\n plt.title('Resale volume by model year for different models', **label_settings)\n sns.barplot(y=grouped_df['price'], x=grouped_df['model'], hue=grouped_df['year'])\n plt.xticks(**ticks_settings)\n plt.xlabel(\"Models and their model years\", **label_settings)\n plt.ylabel(\"Resale volume\", **label_settings)\n\n return fig\n\ndef plot_segment_volume_altair(df):\n sub_df = df[df['year'] > 2011]\n grouped_df = sub_df.groupby(['model', 'year'], as_index=False).count()\n\n chart = alt.Chart(grouped_df).mark_bar().encode(\n x=alt.X('year:N', title='Year'),\n y=alt.Y('price:Q', title='Volume'),\n color=alt.Color('year:N', title='Year')\n ).facet(\n column='model:N'\n )\n return chart\n\ndef plot_price_with_age(df):\n grouped_df = df.groupby(['model', 'age']).mean()[['price', 'odometer']].reset_index()\n grouped_df = grouped_df[grouped_df['age'] <= 15]\n # Create Altair chart\n chart = alt.Chart(grouped_df).mark_line().encode(\n x=alt.X('age:Q', title='Age'),\n y=alt.Y('price:Q', title='Price'),\n color=alt.Color('model:N', title='Model')\n ).properties(\n title='Price by Age and Model',\n width=550,\n height=400,\n )\n \n return chart\n\ndef plot_mileage_with_age(df):\n grouped_df = df.groupby(['model', 'age']).mean()[['price', 'odometer']].reset_index()\n grouped_df = grouped_df[grouped_df['age'] <= 15]\n # Create Altair chart\n chart = alt.Chart(grouped_df).mark_line().encode(\n x=alt.X('age:Q', title='Age'),\n y=alt.Y('odometer:Q', title='Mileage'),\n color=alt.Color('model:N', title='Model')\n ).properties(\n title='Mileage by Age and Model',\n width=550,\n height=400,\n )\n \n return chart\n\ndef plot_choropleth(df, var, model):\n df = df.copy()\n df = df[df['model'] == model]\n grouped_df = df.groupby(['state']).agg({'price': 'mean', 'year': 'count'}).reset_index()\n state_names = grouped_df.state.unique()\n state_ids = [x+1 for x in range(len(state_names))]\n state_mappings = pd.DataFrame({'state': state_names, 'id': state_ids})\n\n grouped_df = grouped_df.merge(state_mappings, on='state', how='left')\n\n states = alt.topo_feature(data.us_10m.url, 'states')\n\n background = alt.Chart(states).mark_geoshape(\n fill='lightgray',\n stroke='white'\n ).project('albersUsa').properties(\n width=600,\n height=400\n )\n\n ch_map = alt.Chart(states).mark_geoshape().encode(\n color='price:Q',\n tooltip=['id:O', 'price:Q']\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(grouped_df, 'id', ['price'])\n ).project(\n type='albersUsa'\n ).properties(\n width=600,\n height=400,\n title=f'Price of {model} for each states'\n )\n return background+ch_map\n\ndef plot_scatter_with_age(df, model, var):\n model_df = df[df['model'] == model]\n model_df = model_df[(model_df['price'] < 35000) & (model_df['age'] < 30) & (model_df['odometer'] < 300000)]\n model_df.dropna(subset=['condition'], inplace=True)\n\n scatter = alt.Chart(model_df).mark_point().encode(\n x='age',\n y=var,\n color='condition'\n ).properties(\n title=f'{var.capitalize()} vs Age marked by the condition',\n height=600, width=750,\n )\n \n return scatter","repo_name":"nevinbaiju/ITCS-5122-final_project","sub_path":"pages/_plot_utils.py","file_name":"_plot_utils.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70110268168","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ngoodness = []\ntimes = []\nN = []\n\nreadN = False\nfor i in range(4) :\n with open(\"data/goodnessMC\" + str(i)) as f:\n dataContent = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n\n data = []\n for string in dataContent :\n split = string.split(\",\")\n if not readN :\n N.append(int(split[0]))\n data.append(float(split[1]))\n\n readN = True\n\n with open(\"data/timesMC\" + str(i)) as f:\n timeContent = f.readlines()\n\n \n time = []\n for string in timeContent :\n split = string.split(\",\")\n\n time.append(float(split[1]))\n\n goodness.append(np.asarray(data))\n times.append(np.asarray(time))\n\nN = np.asarray(N)\n\nf, (ax1, ax2) = plt.subplots(1,2, sharex=True)\n\nax1.set_title(\"Goodness vs N\")\nfor i in range(4) :\n j = i + 1\n if i > 1 :\n j += 1\n ax1.semilogx(N, goodness[i], label=\"MonteCarlo \" + str(j))\nax1.set_xlabel(r\"$N$\")\nax1.set_ylabel(r\"$Goodness (euclidean)$\")\nax1.legend()\n# ax1.set_xlim(5, 9)\n# ax1.set_ylim(0, 1e-3)\n\nax2.set_title(\"Time consumption vs N\")\nfor i in range(4) :\n j = i + 1\n if i > 1 :\n j += 1\n ax2.semilogx(N, times[i], label=\"MonteCarlo \" + str(j))\nax2.set_xlabel(r\"$N$\")\nax2.set_ylabel(r\"Computation time (s)\")\nax2.legend()\n# ax2.set_xlim(5, 9)\n# ax2.set_ylim(0, 35)\n\nf.set_size_inches(20, 10)\n\nplt.savefig(\"data/montecarlo_plots.eps\")\nplt.show()\nplt.close()","repo_name":"antolu/KTH-InformationRetrieval-DD2476","sub_path":"montecarlo_alignment.py","file_name":"montecarlo_alignment.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15810367499","text":"# Uses python3\nimport sys\n\n\ndef optimal_weight(capacity, gold_items):\n # write your code here\n result = []\n for i in range(len(gold_items)+1):\n result.append((capacity+1) * [0])\n\n for gold_idx in range(1, len(gold_items)+1):\n gold_w = gold_items[gold_idx-1]\n for weight in range(1, capacity+1):\n result[gold_idx][weight] = result[gold_idx-1][weight]\n if gold_w <= weight:\n result[gold_idx][weight] = max(result[gold_idx-1][weight-gold_w] + gold_w, result[gold_idx][weight])\n\n return result[len(gold_items)][capacity]\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n W, n, *w = list(map(int, input.split()))\n print(optimal_weight(W, w))\n","repo_name":"artsiom-kotau/coursera-alg-spec","sub_path":"alg-toolbox/week6_dynamic_programming2/1_maximum_amount_of_gold/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34763367838","text":"import torch\nimport torch.nn.functional as F\nimport cv2 as cv\nimport numpy as np\nimport os\nfrom glob import glob\nfrom icecream import ic\nfrom scipy.spatial.transform import Rotation as Rot\nfrom scipy.spatial.transform import Slerp\n\ndef decomposeP(P):\n M = P[0:3,0:3]\n Q = np.eye(3)[::-1]\n P_b = Q @ M @ M.T @ Q\n K_h = Q @ np.linalg.cholesky(P_b) @ Q\n K = K_h / K_h[2,2]\n A = np.linalg.inv(K) @ M\n l = (1/np.linalg.det(A)) ** (1/3)\n R = l * A\n t = l * np.linalg.inv(K) @ P[0:3,3]\n w2c = np.concatenate([R, t[:, None]], axis=1)\n w2c = np.concatenate([w2c, np.array([[0, 0, 0, 1]])], axis=0) # [4, 4]\n K_out = np.eye(4)\n K_out[0:3,0:3] = K\n return K_out, w2c\n\nclass Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = [] # camera to world\n self.inv_scale_all = [] # from bbox to unit sphere\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n intrinsic, w2c_pose = decomposeP(world_mat)\n c2w_pose = np.linalg.inv(w2c_pose)\n self.intrinsics_all.append(torch.from_numpy(intrinsic).float())\n self.pose_all.append(torch.from_numpy(c2w_pose).float())\n self.inv_scale_all.append(torch.from_numpy(np.linalg.inv(scale_mat)).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.inv_scale_all = torch.stack(self.inv_scale_all).to(self.device) # [n_images, 4, 4]\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n\n print('Load data: End')\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n Args:\n pose: [N, 4, 4] camera pose matrix from camera to world (c2w)\n intrinsic: [N, 4, 4] camera intrinsic matrix\n inv_scale_mat: [N, 4, 4] scale matrix from bbox to unit sphere\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = (self.inv_scale_all[img_idx] @ self.pose_all[img_idx])[None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n Args:\n pose: [N, 4, 4] camera pose matrix from camera to world (c2w)\n intrinsic: [N, 4, 4] camera intrinsic matrix\n inv_scale_mat: [N, 4, 4] scale matrix from bbox to unit sphere\n \"\"\"\n pixels_x = torch.randint(low=0, high=self.W, size=[batch_size]).cpu()\n pixels_y = torch.randint(low=0, high=self.H, size=[batch_size]).cpu()\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n p = torch.stack([pixels_x.cuda(), pixels_y.cuda(), torch.ones_like(pixels_y).cuda()], dim=-1).float() # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = (self.inv_scale_all[img_idx] @ self.pose_all[img_idx])[None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = (self.inv_scale_all[0] @ trans[None, None, :3]).expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)\n\n","repo_name":"Tianhang-Cheng/NeuS_friendly","sub_path":"models/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"32472690178","text":"import os\nfrom tkinter import Tk\nfrom tkinter import Toplevel\nfrom tkinter import Label\nfrom tkinter import Button\nfrom tkinter import Entry\nfrom tkinter import messagebox\nfrom datetime import datetime\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill\n\n# color for Excel\ngreenFill = PatternFill(start_color='92D050', end_color='92D050', fill_type='solid')\nredFill = PatternFill(start_color='FFFF0000', end_color='FFFF0000', fill_type='solid')\n\n# months for creating time-based excel\nmonths = ['Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli',\n 'August', 'September', 'Oktober', 'November', 'Dezember']\n\n# safe darts and score for each player\nplayer1 = []\nplayer2 = []\nplayer3 = []\nplayer4 = []\n\n# dictionaries for each player\nplayer1_kpis = [{\"Score\": 0, \"Darts\": 0, \"180\": 0, \"140\": 0, \"100\": 0, \"80\": 0, \"60\": 0, \"Legs\": 0}]\nplayer2_kpis = [{\"Score\": 0, \"Darts\": 0, \"180\": 0, \"140\": 0, \"100\": 0, \"80\": 0, \"60\": 0, \"Legs\": 0}]\nplayer3_kpis = [{\"Score\": 0, \"Darts\": 0, \"180\": 0, \"140\": 0, \"100\": 0, \"80\": 0, \"60\": 0, \"Legs\": 0}]\nplayer4_kpis = [{\"Score\": 0, \"Darts\": 0, \"180\": 0, \"140\": 0, \"100\": 0, \"80\": 0, \"60\": 0, \"Legs\": 0}]\n\nplayer1_scores = [{\"T20\": 0, \"T19\": 0, \"T18\": 0, \"S20\": 0, \"S19\": 0, \"S18\": 0,\n \"Bull\": 0, \"Single_Bull\": 0, \"Triple\": 0, \"Double\": 0, \"No_Score\": 0}]\nplayer2_scores = [{\"T20\": 0, \"T19\": 0, \"T18\": 0, \"S20\": 0, \"S19\": 0, \"S18\": 0,\n \"Bull\": 0, \"Single_Bull\": 0, \"Triple\": 0, \"Double\": 0, \"No_Score\": 0}]\nplayer3_scores = [{\"T20\": 0, \"T19\": 0, \"T18\": 0, \"S20\": 0, \"S19\": 0, \"S18\": 0,\n \"Bull\": 0, \"Single_Bull\": 0, \"Triple\": 0, \"Double\": 0, \"No_Score\": 0}]\nplayer4_scores = [{\"T20\": 0, \"T19\": 0, \"T18\": 0, \"S20\": 0, \"S19\": 0, \"S18\": 0,\n \"Bull\": 0, \"Single_Bull\": 0, \"Triple\": 0, \"Double\": 0, \"No_Score\": 0}]\n\n\ndef create_directory_if_not_exists():\n \"\"\"\n This function creates a directory for the Excel file\n :return: return the path to the Excel file\n \"\"\"\n if not os.path.isdir(\"Spielstände\"):\n os.mkdir(\"Spielstände\")\n\n if not os.path.isdir(\"Spielstände/Scoring\"):\n os.mkdir(\"Spielstände/Scoring\")\n\n # current year\n current_year = datetime.now().strftime('%Y')\n if not os.path.isdir(\"Spielstände/Scoring/\" + current_year):\n os.mkdir(\"Spielstände/Scoring/\" + current_year)\n\n # current month\n current_month = datetime.now().strftime('%m')\n month_name = months[int(current_month) - 1]\n if not os.path.isdir(\"Spielstände/Scoring/\" + current_year + \"/\" + month_name):\n os.mkdir(\"Spielstände/Scoring/\" + current_year + \"/\" + month_name)\n\n # current day\n current_day = int(datetime.now().strftime('%d'))\n date = str(current_day) + \".\" + str(current_month)\n if not os.path.isdir(\"Spielstände/Scoring/\" + current_year + \"/\" + month_name + \"/\" + date):\n os.mkdir(\"Spielstände/Scoring/\" + current_year + \"/\" + month_name + \"/\" + date)\n\n # create new score - file\n time = datetime.now().strftime('%H-%M-%S')\n\n path = \"Spielstände/Scoring/\" + current_year + \"/\" + \\\n month_name + \"/\" + date + \"/\" + time + \".xlsx\"\n\n return path\n\n\ndef set_standards_in_excel(sheet):\n \"\"\"\n This function fills the cells in the Excel sheet\n :param sheet: the Excel sheet to write in\n :return:\n \"\"\"\n sheet['A3'].fill = greenFill\n sheet['B3'].fill = greenFill\n sheet['C3'].fill = greenFill\n sheet['D3'].fill = greenFill\n sheet['E3'].fill = greenFill\n sheet['F3'].fill = greenFill\n sheet['G3'].fill = greenFill\n sheet['H3'].fill = greenFill\n sheet['I3'].fill = greenFill\n sheet['J3'].fill = greenFill\n sheet['K3'].fill = redFill\n sheet['L3'].fill = redFill\n sheet['M3'].fill = greenFill\n sheet['N3'].fill = greenFill\n sheet['O3'].fill = redFill\n sheet['P3'].fill = redFill\n sheet['Q3'].fill = greenFill\n sheet['R3'].fill = redFill\n sheet['S3'].fill = greenFill\n sheet['T3'].fill = redFill\n sheet['U3'].fill = greenFill\n\n sheet.column_dimensions['B'].width = 16\n sheet.column_dimensions['C'].width = 8\n sheet.column_dimensions['I'].width = 18\n sheet.column_dimensions['J'].width = 18\n sheet.column_dimensions['K'].width = 14\n sheet.column_dimensions['L'].width = 14\n sheet.column_dimensions['M'].width = 14\n sheet.column_dimensions['N'].width = 14\n sheet.column_dimensions['O'].width = 14\n sheet.column_dimensions['P'].width = 14\n sheet.column_dimensions['Q'].width = 21\n sheet.column_dimensions['R'].width = 15\n sheet.column_dimensions['S'].width = 17\n sheet.column_dimensions['T'].width = 17\n sheet.column_dimensions['U'].width = 12\n\n\ndef fill_values_in_cells(sheet):\n \"\"\"\n This function sets standard values in the Excel file\n :param sheet: the Excel sheet to write in\n :return:\n \"\"\"\n sheet.cell(row=3, column=1).value = \"Legs won\"\n sheet.cell(row=3, column=2).value = \"Spieler\"\n sheet.cell(row=3, column=3).value = \"Average\"\n sheet.cell(row=3, column=4).value = \"180\"\n sheet.cell(row=3, column=5).value = \"140+\"\n sheet.cell(row=3, column=6).value = \"100+\"\n sheet.cell(row=3, column=7).value = \"80+\"\n sheet.cell(row=3, column=8).value = \"60+\"\n\n sheet.cell(row=3, column=9).value = \"Thrown Points\"\n sheet.cell(row=3, column=10).value = \"Thrown Darts\"\n sheet.cell(row=3, column=11).value = \"Thrown T20\"\n sheet.cell(row=3, column=12).value = \"Thrown S20\"\n sheet.cell(row=3, column=13).value = \"Thrown T19\"\n sheet.cell(row=3, column=14).value = \"Thrown S19\"\n sheet.cell(row=3, column=15).value = \"Thrown T18\"\n sheet.cell(row=3, column=16).value = \"Thrown S18\"\n sheet.cell(row=3, column=17).value = \"Thrown Single-Bulls\"\n sheet.cell(row=3, column=18).value = \"Thrown Bulls\"\n sheet.cell(row=3, column=19).value = \"Thrown Triple\"\n sheet.cell(row=3, column=20).value = \"Thrown Double\"\n sheet.cell(row=3, column=21).value = \"No hit\"\n\n # set player names in Excel\n sheet.cell(row=4, column=2).value = label_player_1_name['text']\n sheet.cell(row=5, column=2).value = label_player_2_name['text']\n sheet.cell(row=6, column=2).value = label_player_3_name['text']\n sheet.cell(row=7, column=2).value = label_player_4_name['text']\n\n\ndef add_players(sheet):\n \"\"\"\n This function adds player 3 and 4 to the Excel file if they exist\n :param sheet: the Excel sheet to write in\n :return:\n \"\"\"\n # player 3\n if int(label_number_players['text']) >= 3:\n sheet.cell(row=6, column=1).value = player3_kpis[0]['Legs']\n sheet.cell(row=6, column=3).value = round((player3_kpis[0]['Score'] /\n player3_kpis[0]['Darts']) * 3, 2)\n sheet.cell(row=6, column=4).value = player3_kpis[0]['180']\n sheet.cell(row=6, column=5).value = player3_kpis[0]['140']\n sheet.cell(row=6, column=6).value = player3_kpis[0]['100']\n sheet.cell(row=6, column=7).value = player3_kpis[0]['80']\n sheet.cell(row=6, column=8).value = player3_kpis[0]['60']\n\n sheet.cell(row=6, column=9).value = player3_kpis[0]['Score']\n sheet.cell(row=6, column=10).value = player3_kpis[0]['Darts']\n\n sheet.cell(row=6, column=11).value = player3_scores[0]['T20']\n sheet.cell(row=6, column=12).value = player3_scores[0]['S20']\n sheet.cell(row=6, column=13).value = player3_scores[0]['T19']\n sheet.cell(row=6, column=14).value = player3_scores[0]['S19']\n sheet.cell(row=6, column=15).value = player3_scores[0]['T18']\n sheet.cell(row=6, column=16).value = player3_scores[0]['S18']\n sheet.cell(row=6, column=17).value = player3_scores[0]['Single_Bull']\n sheet.cell(row=6, column=18).value = player3_scores[0]['Bull']\n sheet.cell(row=6, column=19).value = player3_scores[0]['Triple']\n sheet.cell(row=6, column=20).value = player3_scores[0]['Double']\n sheet.cell(row=6, column=21).value = player3_scores[0]['No_Score']\n\n # player 4\n if int(label_number_players['text']) == 4:\n sheet.cell(row=7, column=1).value = player4_kpis[0]['Legs']\n sheet.cell(row=7, column=3).value = round((player4_kpis[0]['Score'] /\n player4_kpis[0]['Darts']) * 3, 2)\n sheet.cell(row=7, column=4).value = player4_kpis[0]['180']\n sheet.cell(row=7, column=5).value = player4_kpis[0]['140']\n sheet.cell(row=7, column=6).value = player4_kpis[0]['100']\n sheet.cell(row=7, column=7).value = player4_kpis[0]['80']\n sheet.cell(row=7, column=8).value = player4_kpis[0]['60']\n\n sheet.cell(row=7, column=9).value = player4_kpis[0]['Score']\n sheet.cell(row=7, column=10).value = player4_kpis[0]['Darts']\n\n sheet.cell(row=7, column=11).value = player4_scores[0]['T20']\n sheet.cell(row=7, column=12).value = player4_scores[0]['S20']\n sheet.cell(row=7, column=13).value = player4_scores[0]['T19']\n sheet.cell(row=7, column=14).value = player4_scores[0]['S19']\n sheet.cell(row=7, column=15).value = player4_scores[0]['T18']\n sheet.cell(row=7, column=16).value = player4_scores[0]['S18']\n sheet.cell(row=7, column=17).value = player4_scores[0]['Single_Bull']\n sheet.cell(row=7, column=18).value = player4_scores[0]['Bull']\n sheet.cell(row=7, column=19).value = player4_scores[0]['Triple']\n sheet.cell(row=7, column=20).value = player4_scores[0]['Double']\n sheet.cell(row=7, column=21).value = player4_scores[0]['No_Score']\n\n\ndef create_excel():\n \"\"\"\n This function creates an Excel file\n :return:\n \"\"\"\n path = create_directory_if_not_exists()\n\n excel_file = Workbook()\n sheet = excel_file.create_sheet('Scoring')\n\n set_standards_in_excel(sheet)\n fill_values_in_cells(sheet)\n\n # player 1\n sheet.cell(row=4, column=1).value = player1_kpis[0]['Legs']\n sheet.cell(row=4, column=3).value = round((player1_kpis[0]['Score'] /\n player1_kpis[0]['Darts']) * 3, 2)\n sheet.cell(row=4, column=4).value = player1_kpis[0]['180']\n sheet.cell(row=4, column=5).value = player1_kpis[0]['140']\n sheet.cell(row=4, column=6).value = player1_kpis[0]['100']\n sheet.cell(row=4, column=7).value = player1_kpis[0]['80']\n sheet.cell(row=4, column=8).value = player1_kpis[0]['60']\n\n sheet.cell(row=4, column=9).value = player1_kpis[0]['Score']\n sheet.cell(row=4, column=10).value = player1_kpis[0]['Darts']\n\n sheet.cell(row=4, column=11).value = player1_scores[0]['T20']\n sheet.cell(row=4, column=12).value = player1_scores[0]['S20']\n sheet.cell(row=4, column=13).value = player1_scores[0]['T19']\n sheet.cell(row=4, column=14).value = player1_scores[0]['S19']\n sheet.cell(row=4, column=15).value = player1_scores[0]['T18']\n sheet.cell(row=4, column=16).value = player1_scores[0]['S18']\n sheet.cell(row=4, column=17).value = player1_scores[0]['Single_Bull']\n sheet.cell(row=4, column=18).value = player1_scores[0]['Bull']\n sheet.cell(row=4, column=19).value = player1_scores[0]['Triple']\n sheet.cell(row=4, column=20).value = player1_scores[0]['Double']\n sheet.cell(row=4, column=21).value = player1_scores[0]['No_Score']\n\n # player 2\n sheet.cell(row=5, column=1).value = player2_kpis[0]['Legs']\n sheet.cell(row=5, column=3).value = round((player2_kpis[0]['Score'] /\n player2_kpis[0]['Darts']) * 3, 2)\n sheet.cell(row=5, column=4).value = player2_kpis[0]['180']\n sheet.cell(row=5, column=5).value = player2_kpis[0]['140']\n sheet.cell(row=5, column=6).value = player2_kpis[0]['100']\n sheet.cell(row=5, column=7).value = player2_kpis[0]['80']\n sheet.cell(row=5, column=8).value = player2_kpis[0]['60']\n\n sheet.cell(row=5, column=9).value = player2_kpis[0]['Score']\n sheet.cell(row=5, column=10).value = player2_kpis[0]['Darts']\n\n sheet.cell(row=5, column=11).value = player2_scores[0]['T20']\n sheet.cell(row=5, column=12).value = player2_scores[0]['S20']\n sheet.cell(row=5, column=13).value = player2_scores[0]['T19']\n sheet.cell(row=5, column=14).value = player2_scores[0]['S19']\n sheet.cell(row=5, column=15).value = player2_scores[0]['T18']\n sheet.cell(row=5, column=16).value = player2_scores[0]['S18']\n sheet.cell(row=5, column=17).value = player2_scores[0]['Single_Bull']\n sheet.cell(row=5, column=18).value = player2_scores[0]['Bull']\n sheet.cell(row=5, column=19).value = player2_scores[0]['Triple']\n sheet.cell(row=5, column=20).value = player2_scores[0]['Double']\n sheet.cell(row=5, column=21).value = player2_scores[0]['No_Score']\n\n add_players(sheet)\n\n # save excel - file\n excel_file.save(path)\n messagebox.showinfo(\"Info\", \"Excel - file was successfully created.\")\n\n\ndef reset():\n \"\"\"\n This function restores the original state\n :return:\n \"\"\"\n clear_players()\n\n # disable excel button\n button_create_excel.pack()\n button_create_excel.pack_forget()\n\n # disable all throw buttons\n button_triple_20.pack()\n button_double_20.pack()\n button_single_20.pack()\n button_triple_19.pack()\n button_double_19.pack()\n button_single_19.pack()\n button_triple_18.pack()\n button_double_18.pack()\n button_single_18.pack()\n button_triple_17.pack()\n button_double_17.pack()\n button_single_17.pack()\n\n button_triple_20.pack_forget()\n button_double_20.pack_forget()\n button_single_20.pack_forget()\n button_triple_19.pack_forget()\n button_double_19.pack_forget()\n button_single_19.pack_forget()\n button_triple_18.pack_forget()\n button_double_18.pack_forget()\n button_single_18.pack_forget()\n button_triple_17.pack_forget()\n button_double_17.pack_forget()\n button_single_17.pack_forget()\n\n button_triple_16.pack()\n button_double_16.pack()\n button_single_16.pack()\n button_triple_15.pack()\n button_double_15.pack()\n button_single_15.pack()\n button_triple_14.pack()\n button_double_14.pack()\n button_single_14.pack()\n\n button_triple_16.pack_forget()\n button_double_16.pack_forget()\n button_single_16.pack_forget()\n button_triple_15.pack_forget()\n button_double_15.pack_forget()\n button_single_15.pack_forget()\n button_triple_14.pack_forget()\n button_double_14.pack_forget()\n button_single_14.pack_forget()\n\n reset2()\n\n\ndef reset2():\n \"\"\"\n This function restores the original state\n :return:\n \"\"\"\n button_triple_13.pack()\n button_double_13.pack()\n button_single_13.pack()\n button_triple_13.pack_forget()\n button_double_13.pack_forget()\n button_single_13.pack_forget()\n\n button_triple_12.pack()\n button_double_12.pack()\n button_single_12.pack()\n button_triple_11.pack()\n button_double_11.pack()\n button_single_11.pack()\n button_triple_10.pack()\n button_double_10.pack()\n button_single_10.pack()\n button_triple_9.pack()\n button_double_9.pack()\n button_single_9.pack()\n\n button_triple_12.pack_forget()\n button_double_12.pack_forget()\n button_single_12.pack_forget()\n button_triple_11.pack_forget()\n button_double_11.pack_forget()\n button_single_11.pack_forget()\n button_triple_10.pack_forget()\n button_double_10.pack_forget()\n button_single_10.pack_forget()\n button_triple_9.pack_forget()\n button_double_9.pack_forget()\n button_single_9.pack_forget()\n\n button_triple_8.pack()\n button_double_8.pack()\n button_single_8.pack()\n button_triple_7.pack()\n button_double_7.pack()\n button_single_7.pack()\n button_triple_6.pack()\n button_double_6.pack()\n button_single_6.pack()\n\n button_triple_8.pack_forget()\n button_double_8.pack_forget()\n button_single_8.pack_forget()\n button_triple_7.pack_forget()\n button_double_7.pack_forget()\n button_single_7.pack_forget()\n button_triple_6.pack_forget()\n button_double_6.pack_forget()\n button_single_6.pack_forget()\n\n reset3()\n\n\ndef reset3():\n \"\"\"\n This function restores the original state\n :return:\n \"\"\"\n button_triple_5.pack()\n button_double_5.pack()\n button_single_5.pack()\n button_triple_4.pack()\n button_double_4.pack()\n button_single_4.pack()\n button_triple_3.pack()\n button_double_3.pack()\n button_single_3.pack()\n button_triple_2.pack()\n button_double_2.pack()\n button_single_2.pack()\n button_triple_1.pack()\n button_double_1.pack()\n button_single_1.pack()\n\n button_triple_5.pack_forget()\n button_double_5.pack_forget()\n button_single_5.pack_forget()\n button_triple_4.pack_forget()\n button_double_4.pack_forget()\n button_single_4.pack_forget()\n button_triple_3.pack_forget()\n button_double_3.pack_forget()\n button_single_3.pack_forget()\n button_triple_2.pack_forget()\n button_double_2.pack_forget()\n button_single_2.pack_forget()\n button_triple_1.pack_forget()\n button_double_1.pack_forget()\n button_single_1.pack_forget()\n\n button_bull.pack()\n button_single_bull.pack()\n button_0.pack()\n\n button_bull.pack_forget()\n button_single_bull.pack_forget()\n button_0.pack_forget()\n\n # disable all player names and scores\n label_player_1_name.pack()\n label_player_2_name.pack()\n label_player_3_name.pack()\n label_player_4_name.pack()\n\n label_player_1_name.pack_forget()\n label_player_2_name.pack_forget()\n label_player_3_name.pack_forget()\n label_player_4_name.pack_forget()\n\n reset4()\n\n\ndef reset4():\n \"\"\"\n This function restores the original state\n :return:\n \"\"\"\n\n label_1_score.pack()\n label_2_score.pack()\n label_3_score.pack()\n label_4_score.pack()\n\n label_1_score.pack_forget()\n label_2_score.pack_forget()\n label_3_score.pack_forget()\n label_4_score.pack_forget()\n\n zwischen_label.pack()\n zwischen_label.pack_forget()\n\n label_first_dart.pack()\n label_second_dart.pack()\n label_third_dart.pack()\n\n label_first_dart.pack_forget()\n label_second_dart.pack_forget()\n label_third_dart.pack_forget()\n\n next_button.pack()\n next_button.pack_forget()\n\n button_dart_score.pack()\n button_dart_score.pack_forget()\n\n label_dart_score.pack()\n label_dart_score.pack_forget()\n\n label_switch_starting_points.place(x=587.5, y=120, height=30, width=100)\n button_dec_starting_points.place(x=557.5, y=120, height=30, width=30)\n button_inc_starting_points.place(x=687.5, y=120, height=30, width=30)\n\n label_number_players.place(x=587.5, y=200, height=30, width=100)\n button_minus_number_players.place(x=557.5, y=200, height=30, width=30)\n button_plus_number_players.place(x=687.5, y=200, height=30, width=30)\n\n input_name1.place(x=563.5, y=280, height=30, width=150)\n input_name2.place(x=563.5, y=330, height=30, width=150)\n\n label_welcome.place(x=337.5, y=20, height=50, width=600)\n button_continue.place(x=750, y=120, height=30, width=100)\n\n label_switch_starting_points['text'] = \"501\"\n label_number_players['text'] = \"2\"\n\n\ndef save_score():\n \"\"\"\n This function saves the score for player 1 and player 2\n :return:\n \"\"\"\n # update player 1\n for item in player1:\n score = item['Score']\n darts = item['Darts']\n\n player1_kpis[0]['Score'] += score\n player1_kpis[0]['Darts'] += darts\n\n if score == 180:\n player1_kpis[0]['180'] += 1\n\n elif score >= 140:\n player1_kpis[0]['140'] += 1\n\n elif score >= 100:\n player1_kpis[0]['100'] += 1\n\n elif score >= 80:\n player1_kpis[0]['80'] += 1\n\n elif score >= 60:\n player1_kpis[0]['60'] += 1\n\n # update player 2\n for item in player2:\n score = item['Score']\n darts = item['Darts']\n\n player2_kpis[0]['Score'] += score\n player2_kpis[0]['Darts'] += darts\n\n if score == 180:\n player2_kpis[0]['180'] += 1\n\n elif score >= 140:\n player2_kpis[0]['140'] += 1\n\n elif score >= 100:\n player2_kpis[0]['100'] += 1\n\n elif score >= 80:\n player2_kpis[0]['80'] += 1\n\n elif score >= 60:\n player2_kpis[0]['60'] += 1\n\n save_score2()\n\n\ndef save_score2():\n \"\"\"\n This function is called bei save-score and saves the scores for player 3 and 4\n :return\n \"\"\"\n # update player 3\n for item in player3:\n score = item['Score']\n darts = item['Darts']\n\n player3_kpis[0]['Score'] += score\n player3_kpis[0]['Darts'] += darts\n\n if score == 180:\n player3_kpis[0]['180'] += 1\n\n elif score >= 140:\n player3_kpis[0]['140'] += 1\n\n elif score >= 100:\n player3_kpis[0]['100'] += 1\n\n elif score >= 80:\n player3_kpis[0]['80'] += 1\n\n elif score >= 60:\n player3_kpis[0]['60'] += 1\n\n # update player 4\n for item in player4:\n score = item['Score']\n darts = item['Darts']\n\n player4_kpis[0]['Score'] += score\n player4_kpis[0]['Darts'] += darts\n\n if score == 180:\n player4_kpis[0]['180'] += 1\n\n elif score >= 140:\n player4_kpis[0]['140'] += 1\n\n elif score >= 100:\n player4_kpis[0]['100'] += 1\n\n elif score >= 80:\n player4_kpis[0]['80'] += 1\n\n elif score >= 60:\n player4_kpis[0]['60'] += 1\n\n\ndef add_player1(result, dart):\n \"\"\"\n This function adds a throw to the list\n :param result: thrown points\n :param dart: number of darts\n :return:\n \"\"\"\n player1.append({\"Score\": result, \"Darts\": dart})\n\n\ndef add_player2(result, dart):\n \"\"\"\n This function adds a throw to the list\n :param result: thrown points\n :param dart: number of darts\n :return:\n \"\"\"\n player2.append({\"Score\": result, \"Darts\": dart})\n\n\ndef add_player3(result, dart):\n \"\"\"\n This function adds a throw to the list\n :param result: thrown points\n :param dart: number of darts\n :return:\n \"\"\"\n player3.append({\"Score\": result, \"Darts\": dart})\n\n\ndef add_player4(result, dart):\n \"\"\"\n This function adds a throw to the list\n :param result: thrown points\n :param dart: number of darts\n :return:\n \"\"\"\n player4.append({\"Score\": result, \"Darts\": dart})\n\n\ndef t20():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T20\"\n label_dart_score['text'] = \"60\"\n\n\ndef d20():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D20\"\n label_dart_score['text'] = \"40\"\n\n\ndef s20():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S20\"\n label_dart_score['text'] = \"20\"\n\n\ndef t19():\n \"\"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T19\"\n label_dart_score['text'] = \"57\"\n\n\ndef d19():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D19\"\n label_dart_score['text'] = \"38\"\n\n\ndef s19():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S19\"\n label_dart_score['text'] = \"19\"\n\n\ndef t18():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T18\"\n label_dart_score['text'] = \"54\"\n\n\ndef d18():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D18\"\n\n label_dart_score['text'] = \"36\"\n\n\ndef s18():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S18\"\n\n label_dart_score['text'] = \"18\"\n\n\ndef t17():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T17\"\n label_dart_score['text'] = \"51\"\n\n\ndef d17():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D17\"\n label_dart_score['text'] = \"34\"\n\n\ndef s17():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S17\"\n label_dart_score['text'] = \"17\"\n\n\ndef t16():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T16\"\n label_dart_score['text'] = \"48\"\n\n\ndef d16():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D16\"\n label_dart_score['text'] = \"32\"\n\n\ndef s16():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S16\"\n label_dart_score['text'] = \"16\"\n\n\ndef t15():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T15\"\n label_dart_score['text'] = \"45\"\n\n\ndef d15():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D15\"\n label_dart_score['text'] = \"30\"\n\n\ndef s15():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S15\"\n label_dart_score['text'] = \"15\"\n\n\ndef t14():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T14\"\n label_dart_score['text'] = \"42\"\n\n\ndef d14():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D14\"\n label_dart_score['text'] = \"28\"\n\n\ndef s14():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S14\"\n label_dart_score['text'] = \"14\"\n\n\ndef t13():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T13\"\n label_dart_score['text'] = \"39\"\n\n\ndef d13():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D13\"\n label_dart_score['text'] = \"26\"\n\n\ndef s13():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S13\"\n label_dart_score['text'] = \"13\"\n\n\ndef t12():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T12\"\n label_dart_score['text'] = \"36\"\n\n\ndef d12():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D12\"\n label_dart_score['text'] = \"24\"\n\n\ndef s12():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S12\"\n label_dart_score['text'] = \"12\"\n\n\ndef t11():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T11\"\n label_dart_score['text'] = \"33\"\n\n\ndef d11():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D11\"\n label_dart_score['text'] = \"22\"\n\n\ndef s11():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S11\"\n label_dart_score['text'] = \"11\"\n\n\ndef t10():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T10\"\n label_dart_score['text'] = \"30\"\n\n\ndef d10():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D10\"\n label_dart_score['text'] = \"20\"\n\n\ndef s10():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S10\"\n label_dart_score['text'] = \"10\"\n\n\ndef t_9():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T9\"\n label_dart_score['text'] = \"27\"\n\n\ndef d_9():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D9\"\n label_dart_score['text'] = \"18\"\n\n\ndef s_9():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S9\"\n label_dart_score['text'] = \"9\"\n\n\ndef t_8():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T8\"\n label_dart_score['text'] = \"24\"\n\n\ndef d_8():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D8\"\n label_dart_score['text'] = \"16\"\n\n\ndef s_8():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S8\"\n label_dart_score['text'] = \"8\"\n\n\ndef t_7():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T7\"\n label_dart_score['text'] = \"21\"\n\n\ndef d_7():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D7\"\n label_dart_score['text'] = \"14\"\n\n\ndef s_7():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S7\"\n label_dart_score['text'] = \"7\"\n\n\ndef t_6():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T6\"\n label_dart_score['text'] = \"18\"\n\n\ndef d_6():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D6\"\n label_dart_score['text'] = \"12\"\n\n\ndef s_6():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S6\"\n label_dart_score['text'] = \"6\"\n\n\ndef t_5():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T5\"\n label_dart_score['text'] = \"15\"\n\n\ndef d_5():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D5\"\n label_dart_score['text'] = \"10\"\n\n\ndef s_5():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S5\"\n label_dart_score['text'] = \"5\"\n\n\ndef t_4():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T4\"\n label_dart_score['text'] = \"12\"\n\n\ndef d_4():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D4\"\n label_dart_score['text'] = \"8\"\n\n\ndef s_4():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S4\"\n label_dart_score['text'] = \"4\"\n\n\ndef t_3():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T3\"\n label_dart_score['text'] = \"9\"\n\n\ndef d_3():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D3\"\n label_dart_score['text'] = \"6\"\n\n\ndef s_3():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S3\"\n label_dart_score['text'] = \"3\"\n\n\ndef t_2():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T2\"\n label_dart_score['text'] = \"6\"\n\n\ndef d_2():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D2\"\n label_dart_score['text'] = \"4\"\n\n\ndef s_2():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S2\"\n label_dart_score['text'] = \"2\"\n\n\ndef t_1():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"T1\"\n label_dart_score['text'] = \"3\"\n\n\ndef d_1():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"D1\"\n label_dart_score['text'] = \"2\"\n\n\ndef s_1():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"S1\"\n label_dart_score['text'] = \"1\"\n\n\ndef bull():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"Bull\"\n label_dart_score['text'] = \"50\"\n\n\ndef single_bull():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"Single_Bull\"\n label_dart_score['text'] = \"25\"\n\n\ndef null():\n \"\"\"\n This function adds the thrown score\n :return:\n \"\"\"\n label_invisible['text'] = \"\"\n label_dart_score['text'] = \"0\"\n\n\ndef button_exit():\n \"\"\"\n This function creates an exit - button for the gui\n :return:\n \"\"\"\n if not any(isinstance(window, Toplevel) for window in gui.winfo_children()):\n exit_window = Toplevel(gui)\n exit_window.geometry('250x150')\n exit_window.resizable(width=False, height=False)\n exit_window.title(\"Stop?\")\n\n label_exit = Label(exit_window, text=\"End game?\", font=('Arial', 11))\n button_yes = Button(exit_window, text=\"Yes\", command=exit_window.quit,\n font=('Arial', 10, 'bold'), bg=\"white\",\n fg=\"green\")\n button_no = Button(exit_window, text=\"No\", command=exit_window.destroy,\n font=('Arial', 10, 'bold'),\n bg=\"white\", fg=\"red\")\n\n label_exit.place(x=80, y=0, width=100, height=50)\n button_yes.place(x=50, y=60, width=50, height=50)\n button_no.place(x=150, y=60, width=50, height=50)\n\n else:\n messagebox.showinfo(\"Info\", \"You already clicked on \\\"Stop\\\"!\")\n\n\ndef increment_starting_points():\n \"\"\"\n Ths function sets all labels to 501\n :return:\n \"\"\"\n label_switch_starting_points['text'] = \"501\"\n\n\ndef decrement_starting_points():\n \"\"\"\n This function sets all labels to 301\n :return:\n \"\"\"\n label_switch_starting_points['text'] = \"301\"\n\n\ndef next_button():\n \"\"\"\n This function switches to the next player\n :return:\n \"\"\"\n zwischen_label['text'] = \"0\"\n label_first_dart['bg'] = \"yellow\"\n label_second_dart['bg'] = \"white\"\n label_third_dart['bg'] = \"white\"\n\n number = int(label_number_players['text'])\n\n # check label 1\n if label_1_score['bg'] == \"yellow\":\n label_1_score['bg'] = \"white\"\n\n if number == 2:\n label_2_score['bg'] = \"yellow\"\n\n elif number == 3:\n if int(label_2_score['text']) == 0:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_2_score['text']) == 0:\n if int(label_3_score['text']) == 0:\n label_4_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n else:\n next_button2(number)\n\n\ndef next_button2(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 2\n if label_2_score['bg'] == \"yellow\":\n label_2_score['bg'] = \"white\"\n\n if number == 2:\n label_1_score['bg'] = \"yellow\"\n\n elif number == 3:\n if int(label_3_score['text']) == 0:\n label_1_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_3_score['text']) == 0:\n if int(label_4_score['text']) == 0:\n label_1_score['bg'] = \"yellow\"\n else:\n label_4_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n\n else:\n next_button3(number)\n\n\ndef next_button3(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 3\n if label_3_score['bg'] == \"yellow\":\n label_3_score['bg'] = \"white\"\n\n if number == 3:\n if int(label_1_score['text']) == 0:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_4_score['text']) == 0:\n if int(label_1_score['text']) == 0:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n else:\n label_4_score['bg'] = \"yellow\"\n else:\n next_button4(number)\n\n\ndef next_button4(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 4\n if label_4_score['bg'] == \"yellow\":\n label_4_score['bg'] = \"white\"\n\n if number == 4:\n if int(label_1_score['text']) == 0:\n if int(label_2_score['text']) == 0:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n\n\ndef next_label():\n \"\"\"\n This function switches to the next player\n :return:\n \"\"\"\n number = int(label_number_players['text'])\n\n # check label 1\n if label_1_score['bg'] == \"yellow\":\n label_1_score['bg'] = \"white\"\n\n if number == 2:\n label_2_score['bg'] = \"yellow\"\n\n elif number == 3:\n if int(label_2_score['text']) == 0:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_2_score['text']) == 0:\n if int(label_3_score['text']) == 0:\n label_4_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n else:\n next_label2(number)\n\n\ndef next_label2(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 2\n if label_2_score['bg'] == \"yellow\":\n label_2_score['bg'] = \"white\"\n\n if number == 2:\n label_1_score['bg'] = \"yellow\"\n\n elif number == 3:\n if int(label_3_score['text']) == 0:\n label_1_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_3_score['text']) == 0:\n if int(label_4_score['text']) == 0:\n label_1_score['bg'] = \"yellow\"\n else:\n label_4_score['bg'] = \"yellow\"\n else:\n label_3_score['bg'] = \"yellow\"\n\n else:\n next_label3(number)\n\n\ndef next_label3(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 3\n if label_3_score['bg'] == \"yellow\":\n label_3_score['bg'] = \"white\"\n\n if number == 3:\n if int(label_1_score['text']) == 0:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n\n elif number == 4:\n if int(label_4_score['text']) == 0:\n if int(label_1_score['text']) == 0:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n else:\n label_4_score['bg'] = \"yellow\"\n else:\n next_label4(number)\n\n\ndef next_label4(number):\n \"\"\"\n This function switches to the next player\n :param number: number of players\n :return:\n \"\"\"\n # check label 4\n if label_4_score['bg'] == \"yellow\":\n label_4_score['bg'] = \"white\"\n\n if number == 4:\n if int(label_1_score['text']) == 0:\n if int(label_2_score['text']) == 0:\n label_3_score['bg'] = \"yellow\"\n else:\n label_2_score['bg'] = \"yellow\"\n else:\n label_1_score['bg'] = \"yellow\"\n\n\ndef get_amount_of_darts():\n \"\"\"\n This function calculate the number of thrown darts\n :return: amount of darts\n \"\"\"\n count_down_button.pack()\n count_down_button.pack_forget()\n count_down_button['text'] = \"Count down\"\n\n button_dart_score.pack()\n button_dart_score.place(x=90, y=300, height=30, width=80)\n\n zwischen_label['text'] = \"0\"\n\n darts = 3\n\n if label_first_dart['bg'] == \"yellow\":\n darts = 1\n elif label_second_dart['bg'] == \"yellow\":\n darts = 2\n elif label_third_dart['bg'] == \"yellow\":\n darts = 3\n\n label_first_dart['bg'] = \"yellow\"\n label_second_dart['bg'] = \"white\"\n label_third_dart['bg'] = \"white\"\n\n return darts\n\n\ndef count_down():\n \"\"\"\n This function counts the score down\n :return:\n \"\"\"\n flag = True\n number = int(label_number_players['text'])\n result = int(zwischen_label['text'])\n darts = get_amount_of_darts()\n\n two = int(label_2_score['text'])\n\n try:\n three = int(label_3_score['text'])\n four = int(label_4_score['text'])\n\n except ValueError:\n three = 501\n four = 501\n\n # count down player 1\n if label_1_score['bg'] == \"yellow\":\n current = int(label_1_score['text'])\n if result > current:\n messagebox.showinfo(\"Warning\", \"No score.\")\n result2 = 0\n add_player1(result2, darts)\n\n elif current > result:\n current = current - result\n label_1_score['text'] = current\n add_player1(result, darts)\n\n elif result == current:\n current = current - result\n label_1_score['text'] = current\n add_player1(result, darts)\n\n # first check: 2 players\n if number == 2:\n player1_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_1_name['text'] + \" is the winner.\")\n flag = False\n\n end_game()\n\n # second check: 3 players\n elif number == 3:\n # check if one player has 0 points left\n if two == 0 or three == 0:\n messagebox.showinfo(\"Info\", label_player_1_name['text'] +\n \" is the second winner.\")\n flag = False\n end_game()\n else:\n player1_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_1_name['text'] +\n \" is the first winner.\")\n\n # third check: 4 players\n elif number == 4:\n flag = check_4players_label1(two, three, four)\n\n if flag:\n next_label()\n else:\n count_down_player2(result, darts)\n\n\ndef check_4players_label1(two, three, four):\n \"\"\"\n This function check 4 players for label 1\n :param two: score of player 2\n :param three: score of player 3\n :param four: score of player 4\n :return: false if game is finished\n \"\"\"\n players_with_zero_points = 0\n\n if two == 0:\n players_with_zero_points += 1\n\n if three == 0:\n players_with_zero_points += 1\n\n if four == 0:\n players_with_zero_points += 1\n\n if players_with_zero_points == 0:\n player1_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_1_name['text'] +\n \" is the first winner.\")\n\n elif players_with_zero_points == 1:\n messagebox.showinfo(\"Info\", label_player_1_name['text'] +\n \" is second the winner.\")\n\n elif players_with_zero_points == 2:\n messagebox.showinfo(\"Info\", label_player_1_name['text'] +\n \" is the third winner.\")\n end_game()\n return False\n\n return True\n\ndef count_down_player2(result, darts):\n \"\"\"\n This function counts the score of player 2 down\n :param result: thrown points\n :param darts: number of darts\n :return:\n \"\"\"\n flag = True\n one = int(label_1_score['text'])\n\n try:\n three = int(label_3_score['text'])\n four = int(label_4_score['text'])\n\n except ValueError:\n three = 501\n four = 501\n\n number = int(label_number_players['text'])\n # count down label 2\n if label_2_score['bg'] == \"yellow\":\n current = int(label_2_score['text'])\n if result > current:\n messagebox.showinfo(\"Warning\", \"No score.\")\n result2 = 0\n add_player2(result2, darts)\n\n elif current > result:\n current = current - result\n label_2_score['text'] = current\n add_player2(result, darts)\n\n elif result == current:\n current = current - result\n label_2_score['text'] = current\n add_player2(result, darts)\n\n # first check: 2 players\n if number == 2:\n player2_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_2_name['text'] + \" is the winner.\")\n flag = False\n end_game()\n\n # second check: 3 players\n elif number == 3:\n # check if one player has 0 points left\n if one == 0 or three == 0:\n messagebox.showinfo(\"Info\", label_player_2_name['text'] +\n \" is the second winner.\")\n flag = False\n end_game()\n else:\n player2_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_2_name['text'] +\n \" is the first winner.\")\n\n # third check: 4 players\n elif number == 4:\n flag = check_4players_label2(one, three, four)\n\n if flag:\n next_label()\n else:\n count_down_player3(result, darts)\n\n\ndef check_4players_label2(one, three, four):\n \"\"\"\n This function check 4 players for label 2\n :param one: score of player 1\n :param three: score of player 3\n :param four: score of player 4\n :return: false if game is finished\n \"\"\"\n players_with_zero_points = 0\n\n if one == 0:\n players_with_zero_points += 1\n\n if three == 0:\n players_with_zero_points += 1\n\n if four == 0:\n players_with_zero_points += 1\n\n if players_with_zero_points == 0:\n player2_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_2_name['text'] +\n \" is the first winner.\")\n\n elif players_with_zero_points == 1:\n messagebox.showinfo(\"Info\", label_player_2_name['text'] +\n \" is second the winner.\")\n\n elif players_with_zero_points == 2:\n messagebox.showinfo(\"Info\", label_player_2_name['text'] +\n \" is the third winner.\")\n end_game()\n return False\n\n return True\n\n\ndef count_down_player3(result, darts):\n \"\"\"\n This function counts the score of player 3 down\n :param result: thrown points\n :param darts: number of darts\n :return:\n \"\"\"\n flag = True\n one = int(label_1_score['text'])\n two = int(label_2_score['text'])\n\n try:\n four = int(label_4_score['text'])\n\n except ValueError:\n four = 501\n\n number = int(label_number_players['text'])\n\n # label 3\n if label_3_score['bg'] == \"yellow\":\n current = int(label_3_score['text'])\n if result > current:\n messagebox.showinfo(\"Warning\", \"No score.\")\n result2 = 0\n add_player3(result2, darts)\n\n elif current > result:\n current = current - result\n label_3_score['text'] = current\n add_player3(result, darts)\n\n elif result == current:\n current = current - result\n label_3_score['text'] = current\n add_player3(result, darts)\n\n # first check: 3 players\n if number == 3:\n # check if one player has 0 points left\n if one == 0 or two == 0:\n messagebox.showinfo(\"Info\", label_player_3_name['text'] +\n \" is the second winner.\")\n flag = False\n end_game()\n else:\n player3_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_3_name['text'] +\n \" is the first winner.\")\n\n # second check: 4 players\n elif number == 4:\n flag = check_4players_label3(one, two, four)\n\n if flag:\n next_label()\n else:\n count_down_player4(result, darts)\n\n\ndef check_4players_label3(one, two, four):\n \"\"\"\n This function check 4 players for label 3\n :param one: score of player 1\n :param two: score of player 2\n :param four: score of player 4\n :return: false if game is finished\n \"\"\"\n players_with_zero_points = 0\n\n if one == 0:\n players_with_zero_points += 1\n\n if two == 0:\n players_with_zero_points += 1\n\n if four == 0:\n players_with_zero_points += 1\n\n if players_with_zero_points == 0:\n player3_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_3_name['text'] +\n \" is the first winner.\")\n\n elif players_with_zero_points == 1:\n messagebox.showinfo(\"Info\", label_player_3_name['text'] +\n \" is second the winner.\")\n\n elif players_with_zero_points == 2:\n messagebox.showinfo(\"Info\", label_player_3_name['text'] +\n \" is the third winner.\")\n end_game()\n return False\n\n return True\n\ndef count_down_player4(result, darts):\n \"\"\"\n This function counts the score of player 3 down\n :param result: thrown points\n :param darts: number of darts\n :return:\n \"\"\"\n flag = True\n one = int(label_1_score['text'])\n two = int(label_2_score['text'])\n\n try:\n three = int(label_3_score['text'])\n\n except ValueError:\n three = 501\n\n number = int(label_number_players['text'])\n\n if label_4_score['bg'] == \"yellow\":\n current = int(label_4_score['text'])\n if result > current:\n messagebox.showinfo(\"Warning\", \"No score.\")\n result2 = 0\n add_player4(result2, darts)\n\n elif current > result:\n current = current - result\n label_4_score['text'] = current\n add_player4(result, darts)\n\n elif result == current:\n current = current - result\n label_4_score['text'] = current\n add_player4(result, darts)\n\n # first check: 4 players\n if number == 4:\n flag = check_4players_label4(one, two, three)\n\n if flag:\n next_label()\n\n\ndef check_4players_label4(one, two, three):\n \"\"\"\n This function check 4 players for label 4\n :param one: score of player 1\n :param two: score of player 2\n :param three: score of player 3\n :return: false if game is finished\n \"\"\"\n players_with_zero_points = 0\n\n if one == 0:\n players_with_zero_points += 1\n\n if two == 0:\n players_with_zero_points += 1\n\n if three == 0:\n players_with_zero_points += 1\n\n if players_with_zero_points == 0:\n player4_kpis[0]['Legs'] += 1\n messagebox.showinfo(\"Info\", label_player_4_name['text'] +\n \"is the first winner.\")\n\n elif players_with_zero_points == 1:\n messagebox.showinfo(\"Info\", label_player_4_name['text'] +\n \" is second the winner.\")\n\n elif players_with_zero_points == 2:\n messagebox.showinfo(\"Info\", label_player_4_name['text'] +\n \" is the third winner.\")\n end_game()\n return False\n\n return True\n\n\ndef add_scores():\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 1\n :return:\n \"\"\"\n # get the score from invisible label and reset it to 0\n score = label_invisible['text']\n label_invisible['text'] = \"0\"\n\n # player 1\n if label_1_score['bg'] == \"yellow\":\n if \"T\" in score:\n player1_scores[0][\"Triple\"] += 1\n if score == \"T20\":\n player1_scores[0][\"T20\"] += 1\n elif score == \"T19\":\n player1_scores[0][\"T19\"] += 1\n elif score == \"T18\":\n player1_scores[0][\"T18\"] += 1\n\n elif \"D\" in score:\n player1_scores[0][\"Double\"] += 1\n\n elif \"Bull\" in score:\n if score == \"Single_Bull\":\n player1_scores[0][\"Single_Bull\"] += 1\n else:\n player1_scores[0][\"Bull\"] += 1\n elif \"S\" in score:\n add_scores_player1_help(score)\n else:\n player1_scores[0][\"No_Score\"] += 1\n\n else:\n add_scores_player2(score)\n\n\ndef add_scores_player1_help(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 1\n :param score: current score\n :return:\n \"\"\"\n if score == \"S20\":\n player1_scores[0][\"S20\"] += 1\n elif score == \"S19\":\n player1_scores[0][\"S19\"] += 1\n elif score == \"S18\":\n player1_scores[0][\"S18\"] += 1\n\n\ndef add_scores_player2(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 2\n :param score: current score\n :return\n \"\"\"\n # player 2\n if label_2_score['bg'] == \"yellow\":\n if \"T\" in score:\n player2_scores[0][\"Triple\"] += 1\n if score == \"T20\":\n player2_scores[0][\"T20\"] += 1\n elif score == \"T19\":\n player2_scores[0][\"T19\"] += 1\n elif score == \"T18\":\n player2_scores[0][\"T18\"] += 1\n\n elif \"D\" in score:\n player2_scores[0][\"Double\"] += 1\n\n elif \"Bull\" in score:\n if score == \"Single_Bull\":\n player2_scores[0][\"Single_Bull\"] += 1\n else:\n player2_scores[0][\"Bull\"] += 1\n\n elif \"S\" in score:\n add_scores_player2_help(score)\n\n else:\n player2_scores[0][\"No_Score\"] += 1\n\n else:\n add_scores_player3(score)\n\n\ndef add_scores_player2_help(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 2\n :param score: current score\n :return:\n \"\"\"\n if score == \"S20\":\n player2_scores[0][\"S20\"] += 1\n elif score == \"S19\":\n player2_scores[0][\"S19\"] += 1\n elif score == \"S18\":\n player2_scores[0][\"S18\"] += 1\n\n\ndef add_scores_player3(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 3\n :param score: current score\n :return\n \"\"\"\n # player 3\n if label_3_score['bg'] == \"yellow\":\n if \"T\" in score:\n player3_scores[0][\"Triple\"] += 1\n if score == \"T20\":\n player3_scores[0][\"T20\"] += 1\n elif score == \"T19\":\n player3_scores[0][\"T19\"] += 1\n elif score == \"T18\":\n player3_scores[0][\"T18\"] += 1\n\n elif \"D\" in score:\n player3_scores[0][\"Double\"] += 1\n\n elif \"Bull\" in score:\n if score == \"Single_Bull\":\n player3_scores[0][\"Single_Bull\"] += 1\n else:\n player3_scores[0][\"Bull\"] += 1\n\n elif \"S\" in score:\n add_scores_player3_help(score)\n\n else:\n player3_scores[0][\"No_Score\"] += 1\n\n else:\n add_scores_player4(score)\n\n\ndef add_scores_player3_help(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 3\n :param score: current score\n :return:\n \"\"\"\n if score == \"S20\":\n player3_scores[0][\"S20\"] += 1\n elif score == \"S19\":\n player3_scores[0][\"S19\"] += 1\n elif score == \"S18\":\n player3_scores[0][\"S18\"] += 1\n\n\ndef add_scores_player4(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 4\n :param score: current score\n :return:\n \"\"\"\n # player 4\n if label_4_score['bg'] == \"yellow\":\n if \"T\" in score:\n player4_scores[0][\"Triple\"] += 1\n if score == \"T20\":\n player4_scores[0][\"T20\"] += 1\n elif score == \"T19\":\n player4_scores[0][\"T19\"] += 1\n elif score == \"T18\":\n player4_scores[0][\"T18\"] += 1\n\n elif \"D\" in score:\n player4_scores[0][\"Double\"] += 1\n\n elif \"Bull\" in score:\n if score == \"Single_Bull\":\n player4_scores[0][\"Single_Bull\"] += 1\n else:\n player4_scores[0][\"Bull\"] += 1\n\n elif \"S\" in score:\n add_scores_player4_help(score)\n\n else:\n player4_scores[0][\"No_Score\"] += 1\n\n else:\n messagebox.showinfo(\"Error\", \"Restart\")\n\n\ndef add_scores_player4_help(score):\n \"\"\"\n This function adds the thrown darts to the kpis/player_scores of player 4\n :param score: current score\n :return:\n \"\"\"\n if score == \"S20\":\n player4_scores[0][\"S20\"] += 1\n elif score == \"S19\":\n player4_scores[0][\"S19\"] += 1\n elif score == \"S18\":\n player4_scores[0][\"S18\"] += 1\n\n\ndef add():\n \"\"\"\n This function adds the thrown score to the total score\n :return:\n \"\"\"\n # save scores to kpis\n add_scores()\n\n count = int(label_dart_score['text'])\n current = int(zwischen_label['text'])\n\n result = current + count\n zwischen_label['text'] = result\n label_dart_score['text'] = \"\"\n\n flag1 = False\n\n if (label_1_score['bg'] == \"yellow\" and result == int(label_1_score['text'])) \\\n or (label_2_score['bg'] == \"yellow\" and result == int(label_2_score['text'])):\n flag1 = True\n\n if flag1 or (label_3_score['bg'] == \"yellow\" and result == int(label_3_score['text'])) \\\n or (label_4_score['bg'] == \"yellow\" and result == int(label_4_score['text'])):\n count_down_button.pack()\n count_down_button['text'] = \"Count down\"\n count_down_button.place(x=440, y=300, height=30, width=90)\n button_dart_score.pack()\n button_dart_score.pack_forget()\n return\n\n flag2 = False\n\n if (label_1_score['bg'] == \"yellow\" and result > int(label_1_score['text'])) \\\n or (label_2_score['bg'] == \"yellow\" and result > int(label_2_score['text'])):\n flag2 = True\n\n if flag2 or (label_3_score['bg'] == \"yellow\" and result > int(label_3_score['text'])) \\\n or (label_4_score['bg'] == \"yellow\" and result > int(label_4_score['text'])):\n count_down_button.pack()\n count_down_button['text'] = \"No score. Next Player\"\n count_down_button.place(x=440, y=300, height=30, width=150)\n button_dart_score.pack()\n button_dart_score.pack_forget()\n return\n\n if label_first_dart['bg'] == \"yellow\":\n label_first_dart['bg'] = \"white\"\n label_second_dart['bg'] = \"yellow\"\n return\n\n if label_second_dart['bg'] == \"yellow\":\n label_second_dart['bg'] = \"white\"\n label_third_dart['bg'] = \"yellow\"\n return\n\n if label_third_dart['bg'] == \"yellow\":\n count_down_button.pack()\n count_down_button.place(x=440, y=300, height=30, width=90)\n button_dart_score.pack()\n button_dart_score.pack_forget()\n\n\ndef end_game():\n \"\"\"\n This function stops the game\n \"\"\"\n messagebox.showinfo(\"Info\", \"Game is over.\")\n\n label_1_score['bg'] = \"yellow\"\n label_2_score['bg'] = \"white\"\n label_3_score['bg'] = \"white\"\n label_4_score['bg'] = \"white\"\n\n if label_switch_starting_points['text'] == \"501\":\n label_1_score['text'] = \"501\"\n label_2_score['text'] = \"501\"\n\n if int(label_number_players['text']) == 3:\n label_3_score['text'] = \"501\"\n\n if int(label_number_players['text']) == 4:\n label_4_score['text'] = \"501\"\n else:\n label_1_score['text'] = \"301\"\n label_2_score['text'] = \"301\"\n if int(label_number_players['text']) == 3:\n label_3_score['text'] = \"301\"\n\n if int(label_number_players['text']) == 4:\n label_4_score['text'] = \"301\"\n\n button_create_excel.place(x=1125, y=210, height=80, width=150)\n save_score()\n clear_players()\n\n\ndef clear_players():\n \"\"\"\n This function removes all items from the players\n :return:\n \"\"\"\n\n while len(player1) > 0:\n for item in player1:\n player1.remove(item)\n\n while len(player2) > 0:\n for item in player2:\n player2.remove(item)\n\n while len(player3) > 0:\n for item in player3:\n player3.remove(item)\n\n while len(player4) > 0:\n for item in player4:\n player4.remove(item)\n\n\ndef plus_player_numbers():\n \"\"\"\n This function increments the number of players, max. 4 players\n :return:\n \"\"\"\n number = int(label_number_players['text'])\n\n if number == 2:\n label_number_players['text'] = 3\n input_name3.place(x=563.5, y=380, height=30, width=150)\n\n elif number == 3:\n label_number_players['text'] = 4\n input_name4.place(x=563.5, y=430, height=30, width=150)\n\n\ndef minus_player_numbers():\n \"\"\"\n This function decrements the number of players, min. 2 players\n :return:\n \"\"\"\n number = int(label_number_players['text'])\n\n if number == 4:\n label_number_players['text'] = 3\n input_name4.pack()\n input_name4.pack_forget()\n\n elif number == 3:\n label_number_players['text'] = 2\n input_name3.pack()\n input_name3.pack_forget()\n\n\ndef check_names():\n \"\"\"\n This function checks if all names were entered, and then the game will start\n :return:\n \"\"\"\n flag = True\n number = int(label_number_players['text'])\n name1 = input_name1.get()\n name2 = input_name2.get()\n name3 = input_name3.get()\n name4 = input_name4.get()\n\n if number == 2:\n if name1 == \"\" or name2 == \"\":\n messagebox.showinfo(\"Error\", \"Not all names were entered.\")\n flag = False\n\n elif number == 3:\n if name1 == \"\" or name2 == \"\" or name3 == \"\":\n messagebox.showinfo(\"Error\", \"Not all names were entered.\")\n flag = False\n else:\n label_player_3_name['text'] = name3\n\n elif number == 4:\n if name1 == \"\" or name2 == \"\" or name3 == \"\" or name4 == \"\":\n messagebox.showinfo(\"Error\", \"Not all names were entered.\")\n flag = False\n else:\n label_player_3_name['text'] = name3\n label_player_4_name['text'] = name4\n\n if flag:\n check_names2(name1, name2)\n\n\ndef check_names2(name1, name2):\n \"\"\"\n This function checks if all names were entered, and then the game will start\n :return:\n \"\"\"\n label_player_1_name['text'] = name1\n label_player_2_name['text'] = name2\n\n # disable input fields and labels and buttons for selecting player numbers\n input_name1.pack()\n input_name2.pack()\n input_name3.pack()\n input_name4.pack()\n\n input_name1.pack_forget()\n input_name2.pack_forget()\n input_name3.pack_forget()\n input_name4.pack_forget()\n\n button_continue.pack()\n button_continue.pack_forget()\n\n # disable plus and minus\n button_plus_number_players.pack()\n button_minus_number_players.pack()\n\n button_plus_number_players.pack_forget()\n button_minus_number_players.pack_forget()\n\n label_number_players.pack()\n label_welcome.pack()\n\n label_number_players.pack_forget()\n label_welcome.pack_forget()\n\n button_inc_starting_points.pack()\n button_dec_starting_points.pack()\n label_switch_starting_points.pack()\n\n button_inc_starting_points.pack_forget()\n button_dec_starting_points.pack_forget()\n label_switch_starting_points.pack_forget()\n\n start_game()\n\n\ndef start_game():\n \"\"\"\n This function starts the game after the names were entered\n \"\"\"\n number_players = int(label_number_players['text'])\n clear_players()\n\n reset_button.place(x=1175, y=80, height=30, width=100)\n\n label_1_score['bg'] = \"yellow\"\n label_2_score['bg'] = \"white\"\n label_3_score['bg'] = \"white\"\n label_4_score['bg'] = \"white\"\n\n if label_switch_starting_points['text'] == \"501\":\n label_1_score['text'] = \"501\"\n label_2_score['text'] = \"501\"\n label_3_score['text'] = \"501\"\n label_4_score['text'] = \"501\"\n\n else:\n label_1_score['text'] = \"301\"\n label_2_score['text'] = \"301\"\n label_3_score['text'] = \"301\"\n label_4_score['text'] = \"301\"\n\n label_1_score.place(x=10, y=60, height=30, width=150)\n label_2_score.place(x=200, y=60, height=30, width=150)\n\n label_player_1_name.place(x=10, y=10, height=30, width=150)\n label_player_2_name.place(x=200, y=10, height=30, width=150)\n\n if number_players >= 3:\n label_3_score.place(x=390, y=60, height=30, width=150)\n label_player_3_name.place(x=390, y=10, height=30, width=150)\n\n if number_players == 4:\n label_4_score.place(x=580, y=60, height=30, width=150)\n label_player_4_name.place(x=580, y=10, height=30, width=150)\n\n next_button.place(x=810, y=60, height=30, width=130)\n\n label_dart_score.pack()\n button_dart_score.pack()\n\n label_dart_score.place(x=0, y=300, height=30, width=90)\n button_dart_score.place(x=90, y=300, height=30, width=80)\n\n label_first_dart.pack()\n label_second_dart.pack()\n label_third_dart.pack()\n zwischen_label.pack()\n\n label_first_dart.place(x=210, y=300, height=30, width=30)\n label_second_dart.place(x=250, y=300, height=30, width=30)\n label_third_dart.place(x=290, y=300, height=30, width=30)\n zwischen_label.place(x=340, y=300, height=30, width=100)\n\n enable_throw_buttons()\n\n\ndef enable_throw_buttons():\n \"\"\"\n This function enables button (0 to T20)\n :return:\n \"\"\"\n button_triple_20.place(x=0, y=400, height=60, width=60)\n button_double_20.place(x=0, y=470, height=60, width=60)\n button_single_20.place(x=0, y=540, height=60, width=60)\n button_triple_19.place(x=60, y=400, height=60, width=60)\n button_double_19.place(x=60, y=470, height=60, width=60)\n button_single_19.place(x=60, y=540, height=60, width=60)\n button_triple_18.place(x=120, y=400, height=60, width=60)\n button_double_18.place(x=120, y=470, height=60, width=60)\n button_single_18.place(x=120, y=540, height=60, width=60)\n button_triple_17.place(x=180, y=400, height=60, width=60)\n button_double_17.place(x=180, y=470, height=60, width=60)\n button_single_17.place(x=180, y=540, height=60, width=60)\n\n button_triple_16.place(x=240, y=400, height=60, width=60)\n button_double_16.place(x=240, y=470, height=60, width=60)\n button_single_16.place(x=240, y=540, height=60, width=60)\n button_triple_15.place(x=300, y=400, height=60, width=60)\n button_double_15.place(x=300, y=470, height=60, width=60)\n button_single_15.place(x=300, y=540, height=60, width=60)\n button_triple_14.place(x=360, y=400, height=60, width=60)\n button_double_14.place(x=360, y=470, height=60, width=60)\n button_single_14.place(x=360, y=540, height=60, width=60)\n button_triple_13.place(x=420, y=400, height=60, width=60)\n button_double_13.place(x=420, y=470, height=60, width=60)\n button_single_13.place(x=420, y=540, height=60, width=60)\n\n button_triple_12.place(x=480, y=400, height=60, width=60)\n button_double_12.place(x=480, y=470, height=60, width=60)\n button_single_12.place(x=480, y=540, height=60, width=60)\n button_triple_11.place(x=540, y=400, height=60, width=60)\n button_double_11.place(x=540, y=470, height=60, width=60)\n button_single_11.place(x=540, y=540, height=60, width=60)\n button_triple_10.place(x=600, y=400, height=60, width=60)\n button_double_10.place(x=600, y=470, height=60, width=60)\n button_single_10.place(x=600, y=540, height=60, width=60)\n button_triple_9.place(x=660, y=400, height=60, width=60)\n button_double_9.place(x=660, y=470, height=60, width=60)\n button_single_9.place(x=660, y=540, height=60, width=60)\n\n button_triple_8.place(x=720, y=400, height=60, width=60)\n button_double_8.place(x=720, y=470, height=60, width=60)\n button_single_8.place(x=720, y=540, height=60, width=60)\n button_triple_7.place(x=780, y=400, height=60, width=60)\n button_double_7.place(x=780, y=470, height=60, width=60)\n button_single_7.place(x=780, y=540, height=60, width=60)\n button_triple_6.place(x=840, y=400, height=60, width=60)\n button_double_6.place(x=840, y=470, height=60, width=60)\n button_single_6.place(x=840, y=540, height=60, width=60)\n button_triple_5.place(x=900, y=400, height=60, width=60)\n button_double_5.place(x=900, y=470, height=60, width=60)\n button_single_5.place(x=900, y=540, height=60, width=60)\n\n enable_throw_buttons2()\n\n\ndef enable_throw_buttons2():\n \"\"\"\n This function enables button (0 to T20)\n :return:\n \"\"\"\n\n button_triple_4.place(x=960, y=400, height=60, width=60)\n button_double_4.place(x=960, y=470, height=60, width=60)\n button_single_4.place(x=960, y=540, height=60, width=60)\n button_triple_3.place(x=1020, y=400, height=60, width=60)\n button_double_3.place(x=1020, y=470, height=60, width=60)\n button_single_3.place(x=1020, y=540, height=60, width=60)\n button_triple_2.place(x=1080, y=400, height=60, width=60)\n button_double_2.place(x=1080, y=470, height=60, width=60)\n button_single_2.place(x=1080, y=540, height=60, width=60)\n button_triple_1.place(x=1140, y=400, height=60, width=60)\n button_double_1.place(x=1140, y=470, height=60, width=60)\n button_single_1.place(x=1140, y=540, height=60, width=60)\n\n button_single_bull.place(x=1210, y=400, height=60, width=60)\n button_bull.place(x=1210, y=470, height=60, width=60)\n button_0.place(x=1210, y=540, height=60, width=60)\n\n\nif __name__ == \"__main__\":\n # configure the window to generate\n gui = Tk()\n gui.geometry('1275x645')\n gui.resizable(width=False, height=False)\n gui.title(\"Darts counter - User Interface\")\n gui.configure(background='grey')\n\n # define the exit - button\n exit_button = Button(gui, text=\"End game\", command=button_exit, fg=\"black\", bg=\"lightgreen\",\n font=('Arial', 10, 'bold'))\n exit_button.place(x=1175, y=0, height=80, width=100)\n\n # label for introducing and welcome\n label_welcome = Label(gui, text=\"Welcome to the darts - counter!\\n\"\n \" Please select the number of players \"\n \"and the starting points.\", bg=\"grey\",\n font=('Arial', 14))\n label_welcome.place(x=337.5, y=20, height=50, width=600)\n\n # label and buttons for selecting the number of players\n label_number_players = Label(gui, text=\"2\", fg=\"black\", font=('Arial', 13, 'bold'))\n\n button_plus_number_players = Button(gui, text=\"+\", fg=\"black\", bg=\"lightgreen\",\n font=('Arial', 13, 'bold'), command=plus_player_numbers)\n button_minus_number_players = Button(gui, text=\"-\", fg=\"black\", bg=\"red\",\n font=('Arial', 13, 'bold'), command=minus_player_numbers)\n\n label_number_players.place(x=587.5, y=200, height=30, width=100)\n button_minus_number_players.place(x=557.5, y=200, height=30, width=30)\n button_plus_number_players.place(x=687.5, y=200, height=30, width=30)\n\n # text input for 4 player names\n input_name1 = Entry(gui, bd=1, font=('Arial', 13))\n input_name2 = Entry(gui, bd=1, font=('Arial', 13))\n input_name3 = Entry(gui, bd=1, font=('Arial', 13))\n input_name4 = Entry(gui, bd=1, font=('Arial', 13))\n\n # default: 2 input fields are enabled\n input_name1.place(x=563.5, y=280, height=30, width=150)\n input_name2.place(x=563.5, y=330, height=30, width=150)\n\n # button for checking names and starting the game\n button_continue = Button(gui, text=\"Continue\", bg=\"lightgreen\", fg=\"black\",\n font=('Arial', 13, 'bold'), command=check_names)\n button_continue.place(x=750, y=120, height=30, width=100)\n\n # label and button for selecting the starting points (301 or 501)\n label_switch_starting_points = Label(gui, text=\"501\", fg=\"black\", font=('Arial', 13, 'bold'))\n button_inc_starting_points = Button(gui, text=\"+\", fg=\"black\",\n bg=\"lightgreen\", font=('Arial', 10),\n command=increment_starting_points)\n button_dec_starting_points = Button(gui, text=\"-\", fg=\"black\", bg=\"red\", font=('Arial', 10),\n command=decrement_starting_points)\n\n label_switch_starting_points.place(x=587.5, y=120, height=30, width=100)\n button_dec_starting_points.place(x=557.5, y=120, height=30, width=30)\n button_inc_starting_points.place(x=687.5, y=120, height=30, width=30)\n\n # labels for 4 players\n label_player_1_name = Label(gui, text=\"\", fg=\"black\", font=('Arial', 13, 'bold'))\n label_player_2_name = Label(gui, text=\"\", fg=\"black\", font=('Arial', 13, 'bold'))\n label_player_3_name = Label(gui, text=\"\", fg=\"black\", font=('Arial', 13, 'bold'))\n label_player_4_name = Label(gui, text=\"\", fg=\"black\", font=('Arial', 13, 'bold'))\n\n # labels for game score for 4 players\n label_1_score = Label(gui, text=\"501\", fg=\"black\", bg=\"white\", font=('Arial', 13, 'bold'))\n label_2_score = Label(gui, text=\"501\", fg=\"black\", bg=\"white\", font=('Arial', 13, 'bold'))\n label_3_score = Label(gui, text=\"501\", fg=\"black\", bg=\"white\", font=('Arial', 13, 'bold'))\n label_4_score = Label(gui, text=\"501\", fg=\"black\", bg=\"white\", font=('Arial', 13, 'bold'))\n\n # start - button und stop - button\n button_start_game = Button(gui, text=\"Start\", bd=4, fg=\"black\", bg=\"yellow\", font=('Arial', 11),\n command=start_game)\n # button_start_game.place(x=850, y=90, height=30, width=100)\n\n button_stop_game = Button(gui, text=\"Stop\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 11),\n command=start_game)\n\n # next - button\n next_button = Button(gui, text=\"Next\", bd=4, fg=\"black\", bg=\"yellow\", font=('Arial', 11),\n command=next_button)\n\n # textarea for scoring points which shall be decremented\n label_dart_score = Label(gui, text=\"\", bd=4, font=('Arial', 13))\n button_dart_score = Button(gui, text=\"Add\", bd=4, fg=\"black\",\n bg=\"lightgreen\", font=('Arial', 10),\n command=add)\n\n # label, which will be invisible, for calculating scores at the end\n label_invisible = Label(gui, text=\"\", bd=4, font=('Arial', 13))\n\n # create Label for 1,2 and 3 Darts and count down button\n label_first_dart = Label(gui, text=\"1\", bd=4, bg=\"yellow\", font=('Arial', 13))\n label_second_dart = Label(gui, text=\"2\", bd=4, bg=\"white\", font=('Arial', 13))\n label_third_dart = Label(gui, text=\"3\", bd=4, bg=\"white\", font=('Arial', 13))\n\n zwischen_label = Label(gui, text=\"0\", bd=4, bg=\"yellow\", font=('Arial', 13))\n count_down_button = Button(gui, text=\"Count down\", bd=4, fg=\"black\",\n bg=\"lightgreen\", font=('Arial', 10),\n command=count_down)\n\n # calculate kpis button\n button_create_excel = Button(gui, text=\"Calculate Score\", bd=4, fg=\"black\",\n bg=\"lightblue\", font=('Arial', 11),\n command=create_excel)\n # button_create_excel.place(x=1125, y=250, height=80, width=150)\n\n # create all triple, double and single buttons\n button_triple_20 = Button(gui, text=\"T20\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t20)\n button_single_20 = Button(gui, text=\"S20\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s20)\n button_double_20 = Button(gui, text=\"D20\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d20)\n\n button_triple_19 = Button(gui, text=\"T19\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t19)\n button_single_19 = Button(gui, text=\"S19\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s19)\n button_double_19 = Button(gui, text=\"D19\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d19)\n\n button_triple_18 = Button(gui, text=\"T18\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t18)\n button_single_18 = Button(gui, text=\"S18\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s18)\n button_double_18 = Button(gui, text=\"D18\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d18)\n\n button_triple_17 = Button(gui, text=\"T17\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t17)\n button_single_17 = Button(gui, text=\"S17\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s17)\n button_double_17 = Button(gui, text=\"D17\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d17)\n\n button_triple_16 = Button(gui, text=\"T16\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t16)\n button_single_16 = Button(gui, text=\"S16\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s16)\n button_double_16 = Button(gui, text=\"D16\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d16)\n button_triple_15 = Button(gui, text=\"T15\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t15)\n button_single_15 = Button(gui, text=\"S15\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s15)\n button_double_15 = Button(gui, text=\"D15\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d15)\n button_triple_14 = Button(gui, text=\"T14\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t14)\n button_single_14 = Button(gui, text=\"S14\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s14)\n button_double_14 = Button(gui, text=\"D14\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d14)\n\n button_triple_13 = Button(gui, text=\"T13\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t13)\n button_single_13 = Button(gui, text=\"S13\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s13)\n button_double_13 = Button(gui, text=\"D13\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d13)\n button_triple_12 = Button(gui, text=\"T12\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t12)\n button_single_12 = Button(gui, text=\"S12\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s12)\n button_double_12 = Button(gui, text=\"D12\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d12)\n button_triple_11 = Button(gui, text=\"T11\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t11)\n button_single_11 = Button(gui, text=\"S11\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s11)\n button_double_11 = Button(gui, text=\"D11\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d11)\n\n button_triple_10 = Button(gui, text=\"T10\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t10)\n button_single_10 = Button(gui, text=\"S10\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s10)\n button_double_10 = Button(gui, text=\"D10\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d10)\n button_triple_9 = Button(gui, text=\"T9\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t_9)\n button_single_9 = Button(gui, text=\"S9\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s_9)\n button_double_9 = Button(gui, text=\"D9\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d_9)\n button_triple_8 = Button(gui, text=\"T8\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t_8)\n button_single_8 = Button(gui, text=\"S8\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s_8)\n button_double_8 = Button(gui, text=\"D8\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d_8)\n\n button_triple_7 = Button(gui, text=\"T7\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t_7)\n button_single_7 = Button(gui, text=\"S7\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s_7)\n button_double_7 = Button(gui, text=\"D7\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d_7)\n button_triple_6 = Button(gui, text=\"T6\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t_6)\n button_single_6 = Button(gui, text=\"S6\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s_6)\n button_double_6 = Button(gui, text=\"D6\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d_6)\n button_triple_5 = Button(gui, text=\"T5\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t_5)\n button_single_5 = Button(gui, text=\"S5\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s_5)\n button_double_5 = Button(gui, text=\"D5\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d_5)\n\n button_triple_4 = Button(gui, text=\"T4\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t_4)\n button_single_4 = Button(gui, text=\"S4\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s_4)\n button_double_4 = Button(gui, text=\"D4\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d_4)\n button_triple_3 = Button(gui, text=\"T3\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t_3)\n button_single_3 = Button(gui, text=\"S3\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s_3)\n button_double_3 = Button(gui, text=\"D3\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d_3)\n button_triple_2 = Button(gui, text=\"T2\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=t_2)\n button_single_2 = Button(gui, text=\"S2\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=s_2)\n button_double_2 = Button(gui, text=\"D2\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=d_2)\n\n button_triple_1 = Button(gui, text=\"T1\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=t_1)\n button_single_1 = Button(gui, text=\"S1\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=s_1)\n button_double_1 = Button(gui, text=\"D1\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=d_1)\n button_single_bull = Button(gui, text=\"25\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=single_bull)\n button_bull = Button(gui, text=\"BULL\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 14),\n command=bull)\n button_0 = Button(gui, text=\"0\", bd=4, fg=\"black\", bg=\"green\", font=('Arial', 14),\n command=null)\n\n # reset - button\n reset_button = Button(gui, text=\"Go back\", bd=4, fg=\"black\", bg=\"red\", font=('Arial', 11),\n command=reset)\n\n gui.mainloop()\n","repo_name":"Manu100001/Python-Darts-GUI","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":84237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74859760328","text":"from odoo import api, fields, models\r\nfrom datetime import datetime\r\n\r\nclass CashCancel(models.TransientModel):\r\n _name = \"cash_managment.cancel_cash\"\r\n _description = \"Reject Cash Request\"\r\n _rec_name = 'state'\r\n\r\n \r\n state = fields.Selection([('new','New'),('validate','Validated'),('cancel','Canceled'),('reject','Reject'),('approve','Approved'),('closed','Closed'),('implement','Implement')],string=\"Status\", required=True, default=\"new\")\r\n cancel_comment = fields.Text(string=\"Comment\")\r\n cancel_date = fields.Datetime(string='Cancel Date', default=lambda self: fields.datetime.now())\r\n canceled_by = fields.Many2one('res.users','Canceled By',default=lambda self: self.env.user)\r\n \r\n @api.multi\r\n def cash_cancel(self):\r\n self.write({'state': 'cancel'})\r\n cash = self.env['cash_managment.request'].browse(self._context.get('active_ids'))\r\n for req in cash:\r\n req.state = self.state\r\n req.cancel_comment = self.cancel_comment\r\n req.cancel_date = self.cancel_date\r\n req.canceled_by = self.canceled_by\r\n\r\n template_id = self.env.ref('cash_managment.email_template_cancel_request').id\r\n template = self.env['mail.template'].browse(template_id)\r\n template.send_mail(req.id,force_send=True)\r\n \r\n","repo_name":"kluz116/cash_managment","sub_path":"wizard/cash_cancel_request.py","file_name":"cash_cancel_request.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42876749287","text":"from django.contrib import admin\nfrom django.urls import path\nfrom home import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.ind,name='ind'),\n path('cup',views.cups,name='cup'),\n path('cons',views.cons,name='cons'),\n path('contact/',views.contact,name='contact'),\n path('contact2/',views.contact2,name='contact'),\n path('family',views.family,name='family'),\n path('about',views.about,name='about'),\n # path('',views.,name=''),\n\n]\n","repo_name":"Parthiv586/ice-cream","sub_path":"Hello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"27183305516","text":"from typing import List\nfrom pyspark import Row\n\nfrom .base import *\n\n\nclass MeanShiftModel(ClusteringModel):\n def __init__(self, jvm_model):\n super().__init__(jvm_model)\n\n @property\n def means(self) -> List[Row]:\n return self._jvm_model.means()\n\n\nclass MeanShift(ClusteringAlgo):\n \"\"\"\n Mean Shift is a clustering algorithm commonly used in machine learning and computer vision. It works by\n iteratively shifting the centroids of the clusters towards the mean of the points in their local neighborhood\n until convergence.\n\n The algorithm starts by initializing each data point as a cluster centroid. Then, for each centroid, it computes\n the mean of the points in its neighborhood within a specified bandwidth. The centroid is then shifted towards\n this mean, and the process repeats until convergence.\n\n The resulting clusters are defined by their mode and can have arbitrary shapes. Mean Shift is often used for\n datasets with non-uniform density or complex shapes, and can handle noise and outliers effectively. It requires\n one main parameter: the bandwidth, which determines the size of the neighborhood used to compute the mean.\n \"\"\"\n\n def __init__(\n self, *,\n radius: float,\n max_clusters: int,\n max_iterations: int,\n initial: int,\n convergence: float = 1e-7,\n seed: Optional[int] = None\n ):\n \"\"\"\n :param radius: mean-object's neighbourhood radius\n :param max_clusters: upper bound for number of clusters\n :param max_iterations: maximum number of steps after which the stop condition is met\n :param initial: number of initial candidates. Should fit into driver memory\n :param convergence: the minimum distance considered significant.\n If means shifted less than param, then stop condition is met\n :param seed: random seed for picking initial candidates\n \"\"\"\n super().__init__(\n radius=radius,\n max_clusters=max_clusters,\n max_iterations=max_iterations,\n initial=initial,\n convergence=convergence,\n seed=ClusteringAlgo.make_seed(seed)\n )\n\n def fit(self, sparkling_df: SparklingDF) -> MeanShiftModel:\n jvm_model = self._jvm_algo.fit(sparkling_df.jdf, sparkling_df.dist)\n return MeanShiftModel(jvm_model)\n\n def fit_predict_with_model(self, sparkling_df: SparklingDF) -> Tuple[MeanShiftModel, SparklingDF]:\n model = self.fit(sparkling_df)\n return model, model.predict(sparkling_df)\n\n def _jvm_builder(self, jvm, **kwargs):\n return jvm.ru.ifmo.rain.algorithms.meanshift.MeanShift(\n kwargs['radius'],\n kwargs['max_clusters'],\n kwargs['max_iterations'],\n kwargs['initial'],\n kwargs['convergence'],\n kwargs['seed']\n )\n\n\nclass MeanShiftDefaults:\n @staticmethod\n def initial(sparkling_df: SparklingDF) -> int:\n return 2 * Defaults.max_clusters(sparkling_df)\n\n\nclass MeanShiftConf(AlgoConf):\n def __init__(\n self, *,\n radius=(1e-5, 0.61),\n max_clusters=Defaults.max_clusters,\n max_iterations=Defaults.max_iterations,\n initial=MeanShiftDefaults.initial,\n convergence=1e-7\n ):\n super().__init__(\n MeanShift,\n radius=radius,\n max_clusters=max_clusters,\n initial=initial,\n max_iterations=max_iterations,\n convergence=convergence\n )\n","repo_name":"mishiuss/sparkling","sub_path":"sparkling/algorithms/meanshift.py","file_name":"meanshift.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70250538888","text":"import sys\nimport argparse\nimport random\nimport copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport os\nimport glob\nimport pdb\n\nimport dataloader\nimport modules\nimport scipy.stats as stats\nimport math\nimport time\n\ntorch.manual_seed(0)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nfrom PIL import Image\n\nout = open('prev_outputs.txt', 'a')\n\ndef print(*args):\n s = ' '.join([str(x) for x in args])\n if s[-1]!='\\n':\n s += '\\n'\n sys.stdout.write(s)\n sys.stdout.flush()\n out.write(s)\n out.flush()\n\nprint('-'*50, '\\n\\n')\nprint(time.strftime(\"%d %b %Y %H:%M:%S\", time.localtime()))\n\n\ndef track_training_loss(model, train_x, train_y, bmm_model, epoch):\n model.eval()\n\n predictions = torch.Tensor()\n\n with torch.no_grad():\n all_losses = torch.Tensor()\n\n for x, y in zip(train_x, train_y):\n data, target = Variable(x), Variable(y)\n clean_output, noisy_output = model(data)\n prediction = F.log_softmax(clean_output, dim=1)\n idx_loss = F.nll_loss(prediction, target, reduction='none').detach_()\n all_losses = torch.cat((all_losses, idx_loss.cpu()))\n predictions = torch.cat((predictions, prediction.cpu()))\n \n model.train()\n\n loss_tr = all_losses.data.numpy()\n batch_losses = all_losses.clone()\n \n # outliers detection\n max_perc = np.percentile(loss_tr, 98)\n min_perc = np.percentile(loss_tr, 2)\n loss_tr = loss_tr[(loss_tr<=max_perc) & (loss_tr>=min_perc)]\n bmm_model_maxLoss = torch.FloatTensor([max_perc])\n bmm_model_minLoss = torch.FloatTensor([min_perc]) + 10e-6\n loss_tr = (loss_tr - bmm_model_minLoss.data.cpu().numpy()) / (bmm_model_maxLoss.data.cpu().numpy() - bmm_model_minLoss.data.cpu().numpy() + 1e-6)\n loss_tr[loss_tr>=1] = 1-10e-4\n loss_tr[loss_tr <= 0] = 10e-4\n \n #FIT BMM on loss_tr\n bmm_model = BetaMixture1D(max_iters=30)\n bmm_model.fit(loss_tr)\n bmm_model.create_lookup(1)\n \n if epoch//10==0:\n epoch = \"0\"+str(epoch)\n else:\n epoch = str(epoch)\n \n global results_dir\n \n folder = os.path.join(results_dir)\n if not os.path.isdir(folder):\n os.mkdir(folder)\n folder = os.path.join(results_dir,'pngs')\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n fname = os.path.join(folder,'bmm_%s.png'%epoch)\n \n bmm_model.plot('Epoch %s'%epoch, fname)\n \n #Get probabilities from BMM\n batch_losses = (batch_losses - bmm_model_minLoss) / (bmm_model_maxLoss - bmm_model_minLoss + 1e-6)\n batch_losses[batch_losses >= 1] = 1-10e-4\n batch_losses[batch_losses <= 0] = 10e-4\n B = bmm_model.look_lookup(batch_losses, bmm_model_maxLoss, bmm_model_minLoss)\n\n _, predictions = torch.max(predictions, axis=1)\n\n return bmm_model, torch.FloatTensor(B).cuda(), predictions.cuda()\n\n\nclass BetaMixture1D(object):\n def __init__(self, max_iters=10,\n alphas_init=[1, 2],\n betas_init=[2, 1],\n weights_init=[0.5, 0.5]):\n self.alphas = np.array(alphas_init, dtype=np.float64)\n self.betas = np.array(betas_init, dtype=np.float64)\n self.weight = np.array(weights_init, dtype=np.float64)\n self.max_iters = max_iters\n self.lookup = np.zeros(100, dtype=np.float64)\n self.lookup_resolution = 100\n self.lookup_loss = np.zeros(100, dtype=np.float64)\n self.eps_nan = 1e-12\n\n def likelihood(self, x, y):\n return stats.beta.pdf(x, self.alphas[y], self.betas[y])\n\n def weighted_likelihood(self, x, y):\n return self.weight[y] * self.likelihood(x, y)\n\n def probability(self, x):\n return sum(self.weighted_likelihood(x, y) for y in range(2))\n\n def posterior(self, x, y):\n return self.weighted_likelihood(x, y) / (self.probability(x) + self.eps_nan)\n\n def responsibilities(self, x):\n r = np.array([self.weighted_likelihood(x, i) for i in range(2)])\n # there are ~200 samples below that value\n r[r <= self.eps_nan] = self.eps_nan\n r /= r.sum(axis=0)\n return r\n\n def score_samples(self, x):\n return -np.log(self.probability(x))\n\n def fit(self, x):\n\n def fit_beta_weighted(x, w):\n \n def weighted_mean(x, w):\n return np.sum(w * x) / np.sum(w)\n\n x_bar = weighted_mean(x, w)\n s2 = weighted_mean((x - x_bar)**2, w)\n alpha = x_bar * ((x_bar * (1 - x_bar)) / s2 - 1)\n beta = alpha * (1 - x_bar) /x_bar\n return alpha, beta\n\n\n x = np.copy(x)\n\n # EM on beta distributions unsable with x == 0 or 1\n eps = 1e-4\n x[x >= 1 - eps] = 1 - eps\n x[x <= eps] = eps\n\n for i in range(self.max_iters):\n\n # E-step\n r = self.responsibilities(x)\n\n # M-step\n self.alphas[0], self.betas[0] = fit_beta_weighted(x, r[0])\n self.alphas[1], self.betas[1] = fit_beta_weighted(x, r[1])\n self.weight = r.sum(axis=1)\n self.weight /= self.weight.sum()\n print(\"Fitted BMM Means\")\n print(self.alphas[0]/(self.alphas[0]+self.betas[0]), self.alphas[1]/(self.alphas[1]+self.betas[1]))\n print(-self.alphas[0]/(self.alphas[0]+self.betas[0]) + self.alphas[1]/(self.alphas[1]+self.betas[1]))\n return self\n\n def predict(self, x):\n return self.posterior(x, 1) > 0.5\n\n def create_lookup(self, y):\n x_l = np.linspace(0+self.eps_nan, 1-self.eps_nan, self.lookup_resolution)\n lookup_t = self.posterior(x_l, y)\n lookup_t[np.argmax(lookup_t):] = lookup_t.max()\n self.lookup = lookup_t\n self.lookup_loss = x_l # I do not use this one at the end\n\n def look_lookup(self, x, loss_max, loss_min):\n x_i = x.clone().cpu().numpy()\n x_i = np.array((self.lookup_resolution * x_i).astype(int))\n x_i[x_i < 0] = 0\n x_i[x_i == self.lookup_resolution] = self.lookup_resolution - 1\n return self.lookup[x_i]\n\n def plot(self, title=\"BMM\", save_path=None):\n x = np.linspace(0, 1, 100)\n plt.figure()\n plt.plot(x, self.weighted_likelihood(x, 0), label='negative')\n plt.plot(x, self.weighted_likelihood(x, 1), label='positive')\n # plt.plot(x, self.probability(x), lw=2, label='mixture')\n plt.title(title)\n plt.legend()\n if save_path:\n plt.savefig(save_path)\n\n def __str__(self):\n return 'BetaMixture1D(w={}, a={}, b={})'.format(self.weight, self.alphas, self.betas)\n\n\nclass Model(nn.Module):\n def __init__(self, embedding, hidden_size=800, depth=6, dropout=0.3, cnn=False, nclasses=2):\n super(Model, self).__init__()\n self.cnn = cnn\n self.drop = nn.Dropout(dropout)\n self.emb_layer = modules.EmbeddingLayer(\n embs = dataloader.load_embedding(embedding)\n )\n self.word2id = self.emb_layer.word2id\n\n if cnn:\n self.encoder = modules.CNN_Text(\n self.emb_layer.n_d,\n widths = [3,4,5],\n filters=hidden_size\n )\n self.d_out = 3*hidden_size\n else:\n self.encoder = nn.LSTM(\n self.emb_layer.n_d,\n hidden_size//2,\n depth,\n dropout = dropout,\n # batch_first=True,\n bidirectional=True\n )\n self.d_out = hidden_size\n self.out = nn.Linear(self.d_out, nclasses)\n\n def forward(self, input):\n if self.cnn:\n input = input.t()\n #print(input.size())\n emb = self.emb_layer(input)\n emb = self.drop(emb)\n #print(emb.size())\n #exit()\n \n if not self.cnn:\n self.encoder.flatten_parameters() \n\n if self.cnn:\n output = self.encoder(emb)\n else:\n output, hidden = self.encoder(emb)\n output = torch.max(output, dim=0)[0].squeeze()\n #print(output.size())\n #exit()\n output = self.drop(output)\n output = self.out(output)\n\n return output, output\n\n def text_pred(self, text, batch_size=32):\n batches_x = dataloader.create_batches_x(\n text,\n batch_size,\n self.word2id\n )\n outs = []\n with torch.no_grad():\n for x in batches_x:\n x = Variable(x)\n if self.cnn:\n x = x.t()\n emb = self.emb_layer(x)\n\n if self.cnn:\n output = self.encoder(emb)\n else:\n output, hidden = self.encoder(emb)\n output = torch.max(output, dim=0)[0]\n\n outs.append(F.softmax(self.out(output), dim=-1))\n\n return torch.cat(outs, dim=0)\n \n def get_n_params(self):\n pp=0\n for p in list(self.parameters()):\n nn=1\n for s in list(p.size()):\n nn = nn*s\n pp += nn\n return pp\n\n\nclass Feedforward(torch.nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Feedforward, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc3 = torch.nn.Linear(self.hidden_size, self.output_size)\n \n def forward(self, x):\n hidden = self.fc1(x)\n relu = self.relu(hidden)\n output = self.fc3(relu)\n return output\n\n\nclass Model_NM(Model):\n\n def __init__(self, embedding, hidden_size=600, depth=2, dropout=0.3, cnn=False, nclasses=2):\n super(Model_NM, self).__init__(embedding, hidden_size, depth, dropout, cnn, nclasses)\n\n #NM_inp_size = nclasses + self.d_out\n NM_inp_size = nclasses\n NM_hidden_size = int(NM_inp_size*4)\n\n self.NM = Feedforward(NM_inp_size+self.d_out, NM_hidden_size, nclasses)\n #self.NM = Feedforward(NM_inp_size, NM_hidden_size, nclasses)\n\n # print(NM_inp_size, NM_hidden_size)\n\n\n def forward(self, input):\n if self.cnn:\n input = input.t()\n emb = self.emb_layer(input)\n emb = self.drop(emb)\n\n if not self.cnn:\n self.encoder.flatten_parameters() \n\n if self.cnn:\n output = self.encoder(emb)\n else:\n output, hidden = self.encoder(emb)\n output = torch.max(output, dim=0)[0].squeeze()\n\n output = self.drop(output)\n clean_output = self.out(output)\n #pdb.set_trace()\n\n noisy_output = self.NM(torch.cat((output, clean_output), dim=1))\n #noisy_output = self.NM(clean_output)\n \n return clean_output, noisy_output\n\n\ndef eval_model(niter, model, input_x, input_y, noisy=False):\n model.eval()\n correct = 0.0\n cnt = 0.\n\n with torch.no_grad():\n for x, y in zip(input_x, input_y):\n x, y = Variable(x), Variable(y)\n clean_output, noisy_output = model(x)\n output = noisy_output if noisy else clean_output\n pred = output.data.max(1)[1]\n correct += pred.eq(y.data).cpu().sum()\n cnt += y.numel()\n model.train()\n\n return correct.item()/cnt\n\ndef eval_model_noisy(model, input_x, input_y, input_noise):\n model.eval()\n correct = 0.0\n cnt = 0. \n with torch.no_grad():\n for x, y, z in zip(input_x, input_y, input_noise):\n x, y, z = Variable(x), Variable(y), z.cpu().numpy()\n clean_output, _ = model(x)\n pred = clean_output.cpu().data.max(1)[1]\n #print(pred, y,z)\n correct += np.sum(pred.eq(y.cpu().data).numpy()*z)\n cnt += np.sum(z)\n model.train()\n #print(correct, cnt, correct/cnt)\n\n return correct/cnt\n\n\n\ndef train_model(epoch, model, optimizer, train_x, train_y, dev_x, dev_y,\n best_test, save_path, bmm_model, nclasses, train_noise=None, train_orig_labels=None, prob=None, preds=None, warmup=None, round_prob=True, beta=10):\n\n if bmm_model is not None:\n if epoch == warmup:\n print('Fitting BMM')\n bmm_model, prob, preds = track_training_loss(model, train_x, train_y, bmm_model, epoch)\n prob2 = torch.round(prob)\n #pdb.set_trace()\n count_var=0\n correct=0\n correct_noisy=0\n print(np.sum(prob.cpu().numpy()),prob.size())\n for z in train_noise:\n p = prob2[count_var:count_var+train_x[0].size()[1]]\n count_var+= train_x[0].size()[1]\n correct += np.sum(p.data.eq(z.data).cpu().numpy())\n correct_noisy += np.sum(p.data.eq(z.data).cpu().numpy()*z.cpu().numpy())\n print(\"BMM correctly fits %f %% of all training points.\"%(correct/count_var))\n print(\"BMM correctly fits %f %% of noisy training points.\"%(correct_noisy/count_var))\n \n# bootstrap_acc = eval_model_noisy(model, train_x, train_orig_labels, train_noise)\n# print(\"Noisy Points accuracy on original label: %f\"%bootstrap_acc)\n# bootstrap_acc = eval_model_noisy(model, train_x, train_y, train_noise)\n# print(\"Noisy Points accuracy on noisy label: %f\"%bootstrap_acc)\n \n \n model.train()\n niter = epoch*len(train_x)\n criterion = nn.CrossEntropyLoss()\n criterion2 = nn.CrossEntropyLoss(reduce=False)\n kl_criterion = nn.KLDivLoss(reduction=\"none\")\n softmax_criterion= nn.Softmax()\n log_softmax_criterion= nn.LogSoftmax()\n #pdb.set_trace()\n\n\n def contrastive_loss(clean_output, noisy_output, prob, y, epoch, preds, true_noise):\n\n # contrastive_loss = torch.sum((1-2*prob)*hellinger_loss)\n\n #kl_loss = torch.sum(kl_criterion(log_softmax_criterion(noisy_output), softmax_criterion(clean_output)),axis=1)\n #contrastive_loss = torch.sum((1-2*prob)*kl_loss) # - prob*torch.clamp(kl_loss, min=0, max=1))\n #contrastive_loss = torch.sum((1-prob)*kl_loss - prob*torch.clamp(kl_loss, min=0, max=1))\n \n #clean_softmax = softmax_criterion(clean_output)\n #noisy_softmax = softmax_criterion(noisy_output)\n #hellinger_loss = torch.sum(torch.sqrt(((torch.sqrt(clean_softmax) - torch.sqrt(noisy_softmax)) ** 2) / 2),\n # axis=1)\n \n #contrastive_loss = torch.sum((1 - true_noise) * criterion2(clean_output, y))\n #kl_loss = torch.sum(kl_criterion(log_softmax_criterion(noisy_output), softmax_criterion(clean_output)),axis=1)\n #contrastive_loss += torch.sum((1-prob)*kl_loss - prob*torch.clamp(kl_loss, min=0, max=5))\n #pdb.set_trace()\n #contrastive_loss += torch.sum((true_noise) * criterion2(clean_output, preds))\n \n contrastive_loss = torch.sum((1-prob)*criterion2(clean_output, y))\n #_, preds = torch.max(clean_output, axis=1)\n #contrastive_loss += torch.sum((prob)*criterion2(clean_output, preds))\n return contrastive_loss\n\n '''\n # uniform prior\n uniform_output=(1/(nclasses-1))*torch.ones(clean_output.size()).cuda()\n a= torch.linspace(0,batch_size-1,steps=batch_size).long()\n uniform_output[a,y]=0\n kl_loss = torch.sum(kl_criterion(log_softmax_criterion(clean_output),uniform_output),axis=1)\n contrastive_loss=torch.sum((1-prob)*criterion2(clean_output, y) + prob*kl_loss)\n '''\n\n element_criterion = nn.CrossEntropyLoss(reduce=False)\n element_loss = []\n cnt=count_var=0\n \n #if not epoch+1== warmup:\n # beta = 1/(epoch-warmup+1)\n #lamda = 0.2\n\n total_contrast_loss=total_cross_entropy_loss=total_loss=0\n prob2 = torch.round(prob) if epoch>=warmup and round_prob else prob\n #pdb.set_trace()\n for x, y, z in zip(train_x, train_y, train_noise):\n niter += 1\n cnt += 1\n model.zero_grad()\n x, y , z= Variable(x), Variable(y), Variable(z)\n clean_output, noisy_output = model(x)\n\n if bmm_model is not None:\n if epoch < warmup:\n cross_entropy_loss = criterion(clean_output, y)\n loss = cross_entropy_loss\n else:\n p = prob2[count_var:count_var+x.size()[1]]\n preds_batch = preds[count_var:count_var+x.size()[1]]\n #p = z\n count_var+=x.size()[1]\n p = Variable(p)\n\n cross_entropy_loss = criterion(noisy_output, y)\n #cross_entropy_loss += lamda* sum([p.pow(2).sum() for p in model.NM.parameters()]) #regularization loss\n contrast_loss = contrastive_loss(clean_output, noisy_output, p, y, epoch, preds_batch, z)\n loss = cross_entropy_loss + beta*contrast_loss\n else:\n cross_entropy_loss = criterion(clean_output, y)\n loss = cross_entropy_loss\n\n\n element_loss.extend(element_criterion(clean_output,y).cpu().detach().numpy().tolist())\n loss.backward()\n optimizer.step()\n \n if bmm_model is not None and epoch >= warmup:\n total_contrast_loss += contrast_loss.item()\n total_cross_entropy_loss += cross_entropy_loss.item()\n total_loss += loss.item()\n \n\n test_acc = eval_model(niter, model, dev_x, dev_y, noisy=True)\n\n print(\"Epoch={} train_loss={:.6f} contrast_loss={:.6f} CE_loss={:.6f} dev_acc={:.6f}\\n\".format(\n epoch,\n total_loss, total_contrast_loss, total_cross_entropy_loss,\n test_acc\n ))\n\n if save_path:\n torch.save(model.state_dict(), os.path.join(save_path,\"best_model.bin\"))\n return test_acc, element_loss, prob, preds, bmm_model\n\n\ndef save_data(data, labels, path, type='train'):\n with open(os.path.join(path, type+'.txt'), 'w') as ofile:\n for text, label in zip(data, labels):\n ofile.write('{} {}\\n'.format(label, ' '.join(text)))\n\n\ndef create_gif(frames, fname='hist.gif'):\n frames = [Image.open(i) for i in frames]\n folder = os.path.join(results_dir)\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n fname = os.path.join(folder,fname)\n frames[0].save(fname, format='GIF',\n append_images=frames[1:],\n save_all=True,\n duration=400, loop=0)\n \n \ndef plot_histogram(losses, labels, epoch, bmm_model):\n clean = [x for x,y in zip(losses, labels) if y==0]\n noisy = [x for x,y in zip(losses, labels) if y==1]\n \n# plt.figure()\n# hist, bins = np.histogram(clean, bins=50)\n# width = 0.7 * (bins[1] - bins[0])\n# center = (bins[:-1] + bins[1:]) / 2\n# plt.bar(center, hist, align='center', width=width)\n\n# hist, bins = np.histogram(noisy, bins=50)\n# width = 0.7 * (bins[1] - bins[0])\n# center = (bins[:-1] + bins[1:]) / 2\n# plt.bar(center, hist, align='center', width=width, color='red')\n# plt.title('Epoch %d'%(int(epoch)+1))\n\n fig, (ax1, ax2, ax3) = plt.subplots(1,3,figsize=(14,6))\n \n hist, bins = np.histogram(clean, bins=50)\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n ax1.bar(center, hist, align='center', width=width)\n\n hist, bins = np.histogram(noisy, bins=50)\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n ax1.bar(center, hist, align='center', width=width, color='red')\n ax1.set_title('Epoch %d'%(int(epoch)+1))\n \n hist, bins = np.histogram(losses, bins=50)\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n ax2.bar(center, hist, align='center', width=width, color='green')\n ax2.set_title('Epoch %d'%(int(epoch)+1))\n \n if bmm_model is not None:\n x = np.linspace(0, 1, 100)\n ax3.plot(x, bmm_model.weighted_likelihood(x, 0), label='negative')\n ax3.plot(x, bmm_model.weighted_likelihood(x, 1), label='positive')\n# ax3.plot(x, bmm_model.probability(x), lw=2, label='mixture')\n ax3.legend()\n ax3.set_title('Epoch %d'%(int(epoch)+1))\n\n\n if epoch//10 == 0:\n epoch = \"0\"+str(int(epoch)+1)\n else:\n epoch = str(int(epoch)+1)\n\n# plt.title('Epoch %s'%epoch)\n\n folder = os.path.join(results_dir)\n if not os.path.isdir(folder):\n os.mkdir(folder)\n folder = os.path.join(results_dir, 'pngs')\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n fname = os.path.join(folder, 'hist_%s.png'%epoch)\n\n #plt.show()\n plt.savefig(fname)\n plt.clf()\n\n return fname\n\n\ndef main(args):\n\n folder = os.path.join(results_dir)\n if not os.path.isdir(folder):\n os.mkdir(folder)\n folder = os.path.join(results_dir,'pngs')\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n for file in glob.glob(os.path.join(folder, '*.png')):\n os.remove(file)\n\n train_x, train_y, train_noise, train_orig_labels = dataloader.read_corpus(os.path.join(args.dataset,\"train.tsv\"), shuffle=True, get_noise=True)\n print(len(train_x), len(train_y), len(train_noise), len(train_orig_labels))\n # exit()\n dev_x, dev_y, dev_noise, dev_orig_labels = dataloader.read_corpus(os.path.join(args.dataset,\"dev.tsv\"), shuffle=True, get_noise=True)\n test_x, test_y = dataloader.read_corpus(os.path.join(args.dataset,\"test.tsv\"), shuffle=True)\n\n #print(len(train_x), len(dev_x), len(test_x))\n # 4949 503 500\n # 112000 8000 7600\n# pdb.set_trace()\n nclasses = max(train_y) + 1\n print(\"NUM CLASSES: \"+ str(nclasses))\n \n if args.baseline:\n model = Model(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses=nclasses).cuda()\n bmm_model = None\n else:\n model = Model_NM(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses=nclasses).cuda()\n bmm_model = BetaMixture1D(max_iters=10)\n \n params = filter(lambda x: x.requires_grad, list(model.parameters()))\n \n print(model)\n print('Number of parameters:',model.get_n_params())\n\n optimizer = optim.Adam(params, lr = args.lr)\n\n train_x, train_y, train_noise_batches, train_orig_labels_batches = dataloader.create_batches_xyz(\n train_x, train_y, train_noise, train_orig_labels,\n args.batch_size,\n model.word2id,\n )\n print(f\"Noise = {1.0*len(list(filter(lambda x: x == 1, train_noise)))/len(train_noise)}\")\n #pdb.set_trace()\n #print(train_y[0], train_noise_batches[0], train_orig_labels_batches[0])\n #exit()\n \n dev_x, dev_y = dataloader.create_batches(\n dev_x, dev_y,\n args.batch_size,\n model.word2id,\n )\n \n test_x, test_y = dataloader.create_batches(\n test_x, test_y,\n args.batch_size,\n model.word2id,\n )\n\n curr_best_dev=0\n early_stopping=0\n \n frames = []\n bmm_frames = []\n folder = os.path.join(results_dir)\n if not os.path.isdir(folder):\n os.mkdir(folder)\n folder = os.path.join(results_dir,'pngs')\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n prob = None\n preds = None\n for epoch in range(args.max_epoch):\n curr_dev, element_loss, prob, preds, bmm_model = train_model(epoch, model, optimizer,\n train_x, train_y,\n dev_x, dev_y,\n curr_best_dev, args.save_path,\n bmm_model, nclasses, train_noise_batches, train_orig_labels_batches, prob=prob, preds=preds, warmup=args.warmup,\n round_prob=args.round_prob, beta=args.beta)\n\n if curr_best_dev <= curr_dev:\n #print('New best model found', curr_best_dev, curr_dev, curr_best_dev<=curr_dev)\n curr_best_dev=curr_dev\n early_stopping=0\n best_model=copy.deepcopy(model)\n else:\n early_stopping+=1\n \n if early_stopping == 100:\n break\n \n if args.lr_decay>0:\n optimizer.param_groups[0]['lr'] *= args.lr_decay\n\n frames.append(plot_histogram(element_loss, train_noise, epoch, bmm_model))\n create_gif(frames)\n \n test_acc = eval_model(args.max_epoch, best_model, test_x, test_y, noisy=False)\n print(\"Best Model Test Acc.: {:.6f}\\n\".format(\n test_acc\n ))\n\n test_acc = eval_model(args.max_epoch, model, test_x, test_y, noisy=False)\n print(\"Latest Model Test Acc.: {:.6f}\\n\\n\".format(\n test_acc\n ))\n\n if bmm_model is not None:\n if epoch//10==0:\n e = \"0\"+str(epoch)\n else:\n e = str(epoch)\n bmm_frames.append(os.path.join(folder,'bmm_%s.png'%e))\n bmm_frames = sorted(glob.glob(os.path.join(folder, 'bmm*.png')))\n try:\n create_gif(bmm_frames, fname='bmm.gif')\n except:\n print('Couldnt create GIF of BMM')\n\n if args.save_path:\n torch.save(model.state_dict(), os.path.join(args.save_path, \"last_model.bin\"))\n\n for file in glob.glob(os.path.join(folder, '*.png')):\n os.remove(file)\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')\n argparser.add_argument(\"--cnn\", action='store_true', help=\"whether to use cnn\")\n argparser.add_argument(\"--lstm\", action='store_true', help=\"whether to use lstm\")\n argparser.add_argument(\"--dataset\", type=str, default=\"yelp\", help=\"which dataset\")\n argparser.add_argument(\"--embedding\", type=str, default=\"embeddings/glove.txt\", help=\"word vectors\")\n argparser.add_argument(\"--batch_size\", \"--batch\", type=int, default=32)\n argparser.add_argument(\"--max_epoch\", type=int, default=50)\n argparser.add_argument(\"--d\", type=int, default=300)\n argparser.add_argument(\"--warmup\", type=int, default=100)\n argparser.add_argument(\"--dropout\", type=float, default=0.3)\n argparser.add_argument(\"--depth\", type=int, default=2)\n argparser.add_argument(\"--lr\", type=float, default=0.001)\n argparser.add_argument(\"--lr_decay\", type=float, default=0)\n argparser.add_argument(\"--cv\", type=int, default=0)\n argparser.add_argument(\"--save_path\", type=str, default=None)\n argparser.add_argument(\"--save_data_split\", action='store_true', help=\"whether to save train/test split\")\n argparser.add_argument(\"--gpu_id\", type=int, default=0)\n argparser.add_argument(\"--baseline\", action='store_true', default=False)\n argparser.add_argument(\"--round_prob\", type=int, default=1)\n argparser.add_argument('--noise', type=float)\n argparser.add_argument('--beta', type=float, default=10)\n argparser.add_argument(\"--result\", type=str, default=\"result_random\", help=\"which result directory to put in\")\n\n args = argparser.parse_args()\n\n if args.dataset == \"data/ag_news\":\n args.beta = args.noise*20 if args.noise != 0.0 else 2.0\n #args.warmup = 6\n\n # args.save_path = os.path.join(args.save_path, args.dataset)\n #args.beta = 2 if args.noise==0.0 else args.noise*20 # remove this line\n old_out = sys.stdout\n\n class St_ampe_dOut:\n \"\"\"Stamped stdout.\"\"\"\n def __init__(self, f):\n self.f = f\n self.nl = True\n\n def write(self, x):\n \"\"\"Write function overloaded.\"\"\"\n if x == '\\n':\n old_out.write(x)\n self.nl = True\n elif self.nl:\n old_out.write('%s'%(x))\n self.nl = False\n else:\n old_out.write(x)\n try:\n self.f.write(str(x))\n self.f.flush()\n except:\n pass\n old_out.flush()\n\n def flush(self):\n try:\n self.f.flush()\n except:\n pass\n old_out.flush()\n \n global results_dir\n b = 'baseline' if args.baseline else 'ours'\n if not os.path.exists(args.result):\n os.mkdir(args.result)\n results_dir = os.path.join(args.result, args.dataset.split('/')[-1])\n if not os.path.exists(results_dir):\n os.mkdir(results_dir)\n results_dir = os.path.join(results_dir, f\"results_{args.noise}_{args.beta}_{args.round_prob}_{args.warmup}\")\n if not os.path.exists(results_dir):\n os.mkdir(results_dir)\n sys.stdout = St_ampe_dOut(open(os.path.join(results_dir,'output.txt'), 'w'))\n \n\n\n\n \n print(args)\n torch.cuda.set_device(args.gpu_id)\n main(args)\n","repo_name":"thumbe3/label-noise-nlp","sub_path":"train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":28527,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"19173642126","text":"from math import sqrt\n\n##################################################################################################################################################################\n\ndef PCC_calculator(abund_thresh,PCC_abund_thresh,pro_mz_tol,leading_bio_scan,leading_syn_scan):\n\n ''' \n PCC: Pair data\n For each peak in the syn spectrum, check the peaks in the bio spectrum one by one to see if any have an m/z within +/-pro_mz_tol. If so, pair the two peaks.\n If more than one match is found, use the one with the closest m/z.\n If a match cannot be found, designate an abundance of zero for this m/z in the bio spectrum.\n PCC: Filter noise from peak lists (based on fixed, user-defined threshold. Could use alternative approaches like avg + 2SD)\n '''\n\n pro_mz_tol = 2*pro_mz_tol #This value is doubled to account for the possibility that one run could be -pro_mz_tol from predicted and the other could be +pro_mz_tol from predicted \n \n warning = \"none\"\n syn_list_filtered = []\n biomin, synmin = 0, 0\n if leading_bio_scan==(\"Missing spectrum\") or leading_syn_scan==(\"Missing spectrum\"):\n PCC_r=\"Missing spectrum\"\n if leading_bio_scan==(\"Missing spectrum\") and leading_syn_scan==(\"Missing spectrum\"):\n warning = \"both runs\"\n elif leading_bio_scan==(\"Missing spectrum\"):\n warning = \"bio run\"\n else:\n warning = \"syn run\"\n \n else:\n bio_pre = float(leading_bio_scan[3][0][8:])\n z = int(leading_bio_scan[4][0][7])\n syn_pre = float(leading_syn_scan[3][0][8:])\n increment = 1/z\n bio_pre_list = [bio_pre, bio_pre + increment, bio_pre + 2*increment]\n syn_pre_list = [syn_pre, syn_pre + increment, syn_pre + 2*increment]\n bio_window, syn_window = [], []\n for i in range(0, len(bio_pre_list)):\n bio_window.append([bio_pre_list[i] - bio_pre_list[i]/1000000*pro_mz_tol, bio_pre_list[i] + bio_pre_list[i]/1000000*pro_mz_tol])\n for i in range(0, len(syn_pre_list)):\n syn_window.append([syn_pre_list[i] - syn_pre_list[i]/1000000*pro_mz_tol, syn_pre_list[i] + syn_pre_list[i]/1000000*pro_mz_tol])\n \n bio_list=[]\n for i in range(5,len(leading_bio_scan)-1):\n bio_list.append(leading_bio_scan[i])\n syn_list=[]\n for i in range(5,len(leading_syn_scan)-1):\n syn_list.append(leading_syn_scan[i])\n for i in range(0,len(syn_list)):\n mz=float(syn_list[i][0])\n for j in range(0,len(bio_list)):\n if ((mz-mz/1000000*pro_mz_tol) <= float(bio_list[j][0]) <= (mz+mz/1000000*pro_mz_tol)) and (len(syn_list[i])==2):\n syn_list[i].append(bio_list[j][1])\n syn_list[i].append(bio_list[j][0])\n elif ((mz-mz/1000000*pro_mz_tol) <= float(bio_list[j][0]) <= (mz+mz/1000000*pro_mz_tol)) and (len(syn_list[i])>2) and (abs(mz-float(bio_list[j][0])) < abs(mz-float(syn_list[i][3]))):\n #If the previous match and the current match are equally good, algorithm keeps the first match (lower m/z).\n #This makes sense because software is scanning from low to high so you would prioritize what is likely to be the C12 peak.\n syn_list[i][2]=bio_list[j][1]\n syn_list[i][3]=bio_list[j][0]\n elif len(syn_list[i])==2 and j==(len(bio_list)-1):\n syn_list[i].append(0)\n syn_list[i].append(0)\n #Find any instances where a bio value was used more than once.\n for k in range(0,len(bio_list)):\n times_used=0\n uses=[]\n mz=float(bio_list[k][0])\n for l in range(0,len(syn_list)):\n if float(syn_list[l][3])==mz:\n uses.append(syn_list[l][0])\n times_used=times_used+1\n if times_used>1:\n uses_diff=[]\n for m in range(0,len(uses)):\n uses_diff.append(abs(float(uses[m])-mz))\n uses_diff_sort=[]\n uses_diff_sort=uses_diff_sort+uses_diff\n uses_diff_sort.sort()\n best=uses_diff_sort[0]\n best_mz=uses[uses_diff.index(best)]\n #If more than one match had the same diff, index reports the one with the lowest m/z, and that is the one that will be used.\n for n in range(0,len(syn_list)):\n if (float(syn_list[n][3])==mz) and (float(syn_list[n][0])!=float(best_mz)):\n syn_list[n][2]=0\n syn_list[n][3]=0\n #Now that you deleted this match, you need to see if there was a rank2 match in the bio spectrum. Make sure you don't add back the rank1 match.\n syn_mz=float(syn_list[n][0])\n for o in range(0,len(bio_list)):\n if ((syn_mz-syn_mz/1000000*pro_mz_tol) <= float(bio_list[o][0]) <= (syn_mz+syn_mz/1000000*pro_mz_tol)) and (float(bio_list[o][0]) != mz) and (abs(syn_mz-float(bio_list[o][0])) < abs(syn_mz-float(syn_list[n][3]))):\n #The syn_list row here will always be 4 columns long so don't need to check its length. \n syn_list[n][2]=bio_list[o][1]\n syn_list[n][3]=bio_list[o][0]\n #During this process, you might have made a second instance of an bio m/z you already checked, so you need to report an error if this happens...\n #I realized that whenever i have it put in the rank2 match to replace a rank 1 that had already been used, if I just have the criteria that the rank 2 can't already exist as a hit then that would take care of it.\n #I don't know though if that approach would ensure that this \"rank2\" match ended up assigned to the appropriate syn m/z, so maybe i have to do a nested check here where if it does already exist elsewhere it decides which spot is better \n for p in range(0,len(bio_list)):\n times_used=0\n uses=[]\n mz=float(bio_list[p][0])\n for q in range(0,len(syn_list)):\n if float(syn_list[q][3])==mz:\n uses.append(syn_list[q][0])\n times_used=times_used+1\n if times_used>1:\n print(\"\")\n print(\"The following m/z in the bio spectrum was used more than once:\")\n print(mz)\n elif times_used==0:\n syn_list.append([0,0,bio_list[p][1],bio_list[p][0]])\n #Check if you have any instances where when matching bio peaks to syn peaks the m/z got out of order.\n for r in range(0,len(syn_list)-1):\n if (float(syn_list[r][3]) > float(syn_list[r+1][3])) and (float(syn_list[r+1][3]) != 0) and (float(syn_list[r+1][0]) != 0):\n print(\"\")\n print(\"The following peaks from the bio file were switched during matching:\")\n print(syn_list[r][3])\n print(syn_list[r+1][3])\n\n #PCC: Filter\n \n #convert pcc abund thresh into a number relative to max and then add to conditions below\n \n syn_list_nopre = []\n for i in range(len(syn_list)): \n precursor = \"no\"\n for j in range(0, len(bio_window)):\n if bio_window[j][0] <= float(syn_list[i][3]) <= bio_window[j][1]:\n precursor = \"yes\"\n for j in range(0, len(syn_window)):\n if syn_window[j][0] <= float(syn_list[i][0]) <= syn_window[j][1]:\n precursor = \"yes\"\n if precursor == \"no\":\n syn_list_nopre.append(syn_list[i])\n\n syn1max = max(syn_list_nopre, key = lambda x: float(x[1]))\n syn2max = max(syn_list_nopre, key = lambda x: float(x[2]))\n synmin = float(syn1max[1])*PCC_abund_thresh/100\n biomin = float(syn2max[2])*PCC_abund_thresh/100\n\n for i in range(len(syn_list_nopre)): \n if (float(syn_list_nopre[i][1])>abund_thresh and float(syn_list_nopre[i][1])>synmin) or (float(syn_list_nopre[i][2])>abund_thresh and float(syn_list_nopre[i][2])>biomin):\n syn_list_filtered.append(syn_list_nopre[i])\n\n #PCC: Calculate sample PCC\n\n filtered_bio_total, filtered_syn_total = 0, 0\n for i in range(0, len(syn_list_filtered)):\n bio_abund=float(syn_list_filtered[i][2])\n syn_abund=float(syn_list_filtered[i][1])\n filtered_bio_total=filtered_bio_total+bio_abund\n filtered_syn_total=filtered_syn_total+syn_abund\n filtered_bio_avg=filtered_bio_total/len(syn_list_filtered)\n filtered_syn_avg=filtered_syn_total/len(syn_list_filtered)\n r_numerator=0\n for i in range(0, len(syn_list_filtered)):\n r_numerator=r_numerator+(float(syn_list_filtered[i][2])-filtered_bio_avg)*(float(syn_list_filtered[i][1])-filtered_syn_avg)\n sum_bio_sq=0\n sum_syn_sq=0\n for i in range(0, len(syn_list_filtered)):\n sum_bio_sq=sum_bio_sq+(float(syn_list_filtered[i][2])-filtered_bio_avg)**2\n sum_syn_sq=sum_syn_sq+(float(syn_list_filtered[i][1])-filtered_syn_avg)**2\n PCC_r=r_numerator/(sqrt(sum_bio_sq)*sqrt(sum_syn_sq))\n\n return(syn_list_filtered,PCC_r,warning,biomin,synmin)\n \n##################################################################################################################################################################\n","repo_name":"Delong-Lab/PSM_validator","sub_path":"PCC_calculator.py","file_name":"PCC_calculator.py","file_ext":"py","file_size_in_byte":9573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5054504717","text":"import luserver.components.script as script\r\nfrom luserver.world import server\r\n\r\nclass ScriptComponent(script.ScriptComponent):\r\n\tdef on_enter(self, player):\r\n\t\tfor obj in server.world_data.objects.values():\r\n\t\t\tif obj.lot == 4945 and self.script_vars[\"teleport_respawn_point_name\"] in obj.groups: # respawn point lot\r\n\t\t\t\tplayer.render.play_animation(\"teledeath\", play_immediate=True, priority=4)\r\n\t\t\t\tself.object.call_later(0.5, self.teleport, player, obj)\r\n\t\t\t\tbreak\r\n\r\n\tdef teleport(self, player, obj):\r\n\t\tplayer.char.camera.play_cinematic(path_name=self.script_vars[\"cinematic\"], start_time_advance=0)\r\n\t\tplayer.char.teleport(ignore_y=False, pos=obj.physics.position, set_rotation=True, x=obj.physics.rotation.x, y=obj.physics.rotation.y, z=obj.physics.rotation.z, w=obj.physics.rotation.w)\r\n\t\tplayer.render.play_animation(\"paradox-teleport-in\", play_immediate=True, priority=4)\r\n","repo_name":"lcdr/luserver","sub_path":"luserver/scripts/nexus_tower/paradox_teleporter.py","file_name":"paradox_teleporter.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"8429854946","text":"import os, re\nimport numpy as np\n\ninput_dir = '/gpfs/alpine/world-shared/stf011/atsaris/datagnn/datagnn_ras_August2020/KRAS_g12D_r0_chain2_25'\ncnt = 0\nfreq = 1\n\nfilenames = os.listdir(input_dir)\nnum_list_on = []\nnum_list_off = []\n\nfor fil in filenames:\n\n if re.search(\"_ras_on.npz\", fil) is not None:\n searchObj = re.finditer(\"_ras_on.npz\", fil, re.M | re.I)\n for match in searchObj:\n start = match.span()[0]\n end = match.span()[1]\n num_list_on.append(int(fil[:start]))\n\n if re.search(\"_ras_off.npz\", fil) is not None:\n searchObj = re.finditer(\"_ras_off.npz\", fil, re.M | re.I)\n for match in searchObj:\n start = match.span()[0]\n end = match.span()[1]\n num_list_off.append(int(fil[:start]))\n\n\n\nnum_list_on = np.array(num_list_on)\nnum_list_off = np.array(num_list_off)\nnum_list_on = np.sort(num_list_on)\nnum_list_off = np.sort(num_list_off)\n\nfor i in range(0, num_list_on.shape[0]):\n if (cnt%freq) == 0:\n print('%s/%d_ras_on.npz'%(input_dir, num_list_on[i]))\n print('%s/%d_ras_off.npz'%(input_dir, num_list_off[i]))\n cnt+=1\n","repo_name":"tsaris/gnn-protmd","sub_path":"scripts/gen_list.py","file_name":"gen_list.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17830713519","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\n\n# Append parent dir to sys path.\nos.sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom cube.io_utils.model_store import ModelMetadata, ModelStore\nfrom datetime import datetime\n\nif __name__ == \"__main__\": \n print(\"Usage: python3 export_model.py path-to-my-model --tokenizer(optional) --compound-word-expander(optional) --lemmatizer(optional) --tagger(optional) --parser(optional)\")\n print(\"Example: 'python3 export_model.py path-to-my-model --tokenizer --tagger' will create a zip file named 'language_code-model_version.zip' (taken from the metadata.json) containing a tokenizer and a tagger.\")\n \n # parameter checking\n _tokenizer = False\n _compound_word_expander = False \n _lemmatizer = False \n _parser = False \n _tagger = False \n model_folder_path = \"\"\n \n for param in sys.argv:\n if not param.startswith(\"--\"):\n model_folder_path = param\n else:\n if \"--tokenizer\" in param:\n _tokenizer = True\n if \"--compound-word-expander\" in param:\n _compound_word_expander = True\n if \"--lemmatizer\" in param:\n _lemmatizer = True\n if \"--tagger\" in param:\n _tagger = True\n if \"--parser\" in param:\n _parser = True \n \n print(\"\\n\\tModel folder: \"+model_folder_path)\n print(\"\\tUse tokenizer: {}\".format(_tokenizer))\n print(\"\\tUse compound word expander: {}\".format(_compound_word_expander))\n print(\"\\tUse lemmatizer: {}\".format(_lemmatizer))\n print(\"\\tUse tagger: {}\".format(_tagger))\n print(\"\\tUse parser: {}\\n\".format(_parser))\n \n # check if path exists\n if not os.path.exists(model_folder_path):\n raise Exception (\"Model folder not found!\")\n \n # check if metadata exists\n if not os.path.exists(os.path.join(model_folder_path,\"metadata.json\")):\n raise Exception (\"metadata.json not found in model folder!\")\n \n # check if metadata is valid\n metadata = ModelMetadata()\n metadata.read(os.path.join(model_folder_path, \"metadata.json\"))\n \n output_folder_path = os.path.dirname(model_folder_path) \n model_store_object = ModelStore(disk_path=output_folder_path)\n \n model_store_object.package_model(model_folder_path, output_folder_path, metadata, should_contain_tokenizer = _tokenizer, should_contain_compound_word_expander = _compound_word_expander, should_contain_lemmatizer = _lemmatizer, should_contain_tagger = _tagger, should_contain_parser = _parser)","repo_name":"adobe/NLP-Cube","sub_path":"scripts/export_model.py","file_name":"export_model.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":537,"dataset":"github-code","pt":"16"} +{"seq_id":"72067510727","text":"import argparse\nimport os\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torch.cuda.amp import GradScaler, autocast\nfrom tqdm.auto import tqdm\n\nfrom .model import NCF\nfrom .utils.data import NCFData, get_dataset\nfrom .utils.helper import load_config\nfrom .utils.metrics import get_metrics\n\ncfg = load_config(\"./config/config.yaml\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--out\", default=True, help=\"Save the model\")\nparser.add_argument(\"--gpu\", type=str, default=\"0\", help=\"GPU card ID\")\nargs = parser.parse_args()\n\n# Set GPU\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\ncudnn.benchmark = True\n\n\ndef construct_data():\n \"\"\"Helper function to load data\"\"\"\n train_data, test_data, num_users, num_items, train_mat = get_dataset()\n\n # construct dataset\n train_set = NCFData(\n data=train_data,\n num_items=num_items,\n train_mat=train_mat,\n num_negative_samples=cfg[\"params\"][\"num_negative_samples\"],\n is_training=True,\n )\n test_set = NCFData(\n data=test_data,\n num_items=num_items,\n train_mat=train_mat,\n num_negative_samples=0,\n is_training=False,\n )\n train_loader = data.DataLoader(\n dataset=train_set,\n batch_size=cfg[\"params\"][\"batch_size\"],\n shuffle=True,\n num_workers=4,\n )\n test_loader = data.DataLoader(\n dataset=test_set,\n batch_size=cfg[\"params\"][\"num_test_negative_samples\"] + 1,\n shuffle=False,\n num_workers=0,\n )\n return num_users, num_items, train_loader, test_loader\n\n\ndef load_pretrain_models():\n if cfg[\"model_type\"] != \"NeuMF-pre\":\n return None, None\n pretrained_gmf = torch.load(cfg[\"model_path\"][\"gmf\"])\n pretrained_mlp = torch.load(cfg[\"model_path\"][\"mlp\"])\n return pretrained_gmf, pretrained_mlp\n\n\n# sourcery skip: remove-unused-enumerate\nif __name__ == \"__main__\":\n num_users, num_items, train_loader, test_loader = construct_data()\n (\n num_users,\n num_items,\n pretrained_gmf,\n pretrained_mlp,\n ) = load_pretrain_models()\n\n model = NCF(\n num_users=num_users,\n num_items=num_items,\n num_factors=cfg[\"params\"][\"num_factors\"],\n num_layers=cfg[\"params\"][\"num_layers\"],\n dropout=cfg[\"params\"][\"dropout\"],\n model_type=cfg[\"model_type\"],\n pretrained_gmf=pretrained_gmf,\n pretrained_mlp=pretrained_mlp,\n )\n model.cuda()\n loss_f = nn.BCEWithLogitsLoss()\n\n if cfg[\"model_type\"] == \"NeuMF-pre\":\n optimizer = optim.SGD(\n model.parameters(), lr=cfg[\"params\"][\"learning_rate\"]\n )\n else:\n optimizer = optim.Adam(\n model.parameters(), lr=cfg[\"params\"][\"learning_rate\"]\n )\n\n # metrics\n best_hr, best_ndcg, best_epoch = 0, 0, 0\n\n # Gradient scaler\n scaler = GradScaler()\n\n # training process\n for epoch in range(cfg[\"params\"][\"epochs\"]):\n model.train()\n train_loader.dataset.negative_sampling()\n\n for idx, (user, item, label) in enumerate(tqdm(train_loader)):\n user = user.cuda()\n item = item.cuda()\n label = label.float().cuda()\n\n model.zero_grad()\n with autocast():\n pred = model(user, item)\n loss = loss_f(pred, label)\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n model.eval()\n hr, ndcg = get_metrics(model, test_loader, cfg[\"params\"][\"top_k\"])\n\n print(f\"[Epoch {epoch}] :: Hit Ratio: {hr:.3f}\\tNDCG: {ndcg:.3f}\")\n\n if hr > best_hr:\n best_hr, best_ndcg, best_epoch = hr, ndcg, epoch\n if args.out:\n if not os.path.exists(cfg[\"model_path\"][cfg[\"model_type\"]]):\n os.mkdir(cfg[\"model_path\"][cfg[\"model_type\"]])\n torch.save(\n model,\n f'{cfg[\"model_path\"][cfg[\"model_type\"]]}{cfg[\"model_type\"]}.pth',\n )\n\n print(\n f\"Done. Best epoch {epoch}\"\n f\"Hit Ratio: {best_hr:.3f}, NDCG: {best_ndcg:.3f}.\"\n )\n","repo_name":"otzslayer/torch-neural-collaborative-filtering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70106131528","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom pandas import to_datetime\nfrom ddf_utils.factory import CDIACLoader\n\n\nsource_dir = '../source/'\nlast_update = to_datetime('2018-02-01')\n\n\nif __name__ == '__main__':\n cdiac = CDIACLoader()\n updated = cdiac.has_newer_source(last_update)\n if not updated:\n print('no newer source!')\n sys.exit(0)\n cdiac.bulk_download(source_dir)\n","repo_name":"open-numbers/ddf--cdiac--co2","sub_path":"etl/scripts/_update_source.py","file_name":"_update_source.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29943501160","text":"from validateHTMLFiles import get_tagname, readHTML, validateHTML\r\n\r\nclass Tree:\r\n def __init__(self, data = None):\r\n self.data = data\r\n self.parent = None\r\n self.child = []\r\n\r\n def __str__(self, level = 0):\r\n ret = \"\\t\"*level+repr(self.data)+\"\\n\"\r\n for i in self.child:\r\n ret += i.__str__(level+1)\r\n return ret\r\n\r\n def __repr__(self):\r\n return ''\r\n\r\nroot = None\r\n\r\ndef buildTree(htmlStr):\r\n if not validateHTML(htmlStr):\r\n return None\r\n else:\r\n tags_l = htmlStr.split(' ')[:-1]\r\n html_T = Tree(get_tagname(tags_l[0]))\r\n current = html_T\r\n for i in tags_l[1:]:\r\n if i[1] != '/':\r\n new = Tree(get_tagname(i))\r\n new.parent = current\r\n current.child.append(new)\r\n current = current.child[len(current.child) - 1]\r\n elif i[1] == '/':\r\n current = current.parent\r\n if i[-2] == '/':\r\n new = Tree(get_tagname(i))\r\n current = current.parent\r\n global root\r\n root = html_T\r\n\r\ndef printTree():\r\n global root\r\n print(str(root))\r\n \r\ndef containsTag(tag):\r\n s = str(root)\r\n return tag in s\r\n\r\n\r\ndef printChildren(tag):\r\n if containsTag(tag):\r\n global root\r\n if root.data == tag:\r\n printTree()\r\n else:\r\n helper(root, tag)\r\n\r\n# function is a recursion helper for printChildren\r\ndef helper(x, tag):\r\n for c in x.child:\r\n if c.data == tag:\r\n global root\r\n temp = root\r\n root = c\r\n printTree()\r\n root = temp\r\n else:\r\n helper(c, tag)\r\n\r\ndef removeTag(tag):\r\n if containsTag(tag):\r\n global root\r\n if root.data != tag:\r\n pos = find(root, tag)\r\n parent = pos.parent\r\n parent.child.extend(pos.child)\r\n parent.child.remove(pos)\r\n return True\r\n return False\r\n\r\n# to help the removeTag function to find the tag\r\ndef find(x, tag):\r\n for c in x.child:\r\n if c.data == tag:\r\n return c\r\n else:\r\n return find(c, tag)\r\n\r\nif __name__ == '__main__': #From Xiangyu Ren\r\n #Test 1\r\n htmlStr = readHTML('validHTML.html')\r\n assert len(htmlStr) > 0\r\n assert htmlStr.startswith(' 0\r\n assert htmlStr.endswith('') or htmlStr.endswith(' 0\r\n assert htmlStr.startswith('/delete', views.delete, name=\"delete\"),\n path('post_message', views.post_mess, name=\"post_mess\"),\n path('add_comment/', views.post_comment, name=\"post_comment\"),\n path('like/', views.add_like, name=\"add_like\"),\n path('delete/', views.delete_comment, name=\"delete_comment\"),\n path('message_board/', views.message_board, name=\"message_board\"),\n path('message_page/', views.message_page, name=\"message_page\"),\n path('user_profile/', views.user_profile, name=\"user_profile\"),\n path('update_profile_page/', views.update_profile_page, name=\"update_profile_page\"),\n path('edit_profile/', views.edit_profile, name=\"edit_profile\"),\n path('contact/', views.contact, name=\"contact\"),\n \n \n \n]","repo_name":"enbaba/Car_Forum","sub_path":"Dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8105569968","text":"from __future__ import print_function\nimport json\nimport requests\nimport luigi\nimport datetime\nimport re\nimport os\nfrom slackclient import SlackClient\n\ndef slackMessageToTsvLine(message):\n if (not message):\n return (\"type\", \"ts\", \"user\", \"is_starred\", \"text\")\n return (\n message.get(\"type\"),\n message.get(\"ts\"),\n message.get(\"user\"),\n message.get(\"is_starred\"),\n (message.get(\"text\") or \"\")\n .encode('ascii', 'ignore')\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\"),\n )\n \n\nclass DownloadSlackChannelHistoryChunk(luigi.Task):\n\n channel_name = luigi.Parameter(\"\")\n last_message_id = luigi.Parameter(\"\")\n date = luigi.DateParameter(default=datetime.date.today())\n\n def run(self):\n # create one at https://api.slack.com/web#authentication, and set the environment variable\n sc = SlackClient(os.environ[\"SLACK_CLIENT_TOKEN\"])\n channel_id = 0\n\n # Get Channel Information\n for channel in sc.api_call(\"channels.list\")[\"channels\"]:\n if (channel[\"name\"] == self.channel_name):\n channel_id = channel[\"id\"]\n\n # Get Channel History\n if (self.last_message_id):\n channel_history_chunk = sc.api_call(\"channels.history\", channel=channel_id, count=1000, latest=self.last_message_id)\n else:\n channel_history_chunk = sc.api_call(\"channels.history\", channel=channel_id, count=1000)\n \n if (not channel_history_chunk[\"ok\"]):\n raise Exception('Channel not found, or permissions error', 'channel_name=' + self.channel_name)\n\n channel_history_chunk_last_message = channel_history_chunk[\"messages\"]\n outputdata = channel_history_chunk\n with self.output().open('w') as outfile:\n json.dump(outputdata, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n def output(self):\n return luigi.LocalTarget(self.date.strftime('data/Slack/ChannelHistory/' + self.channel_name + '/Chunk_' + str(self.last_message_id) + '_%Y-%m-%d.json'))\n\n\nclass DownloadSlackChannelHistory(luigi.Task):\n\n channel_name = luigi.Parameter()\n date = luigi.DateParameter(default=datetime.date.today())\n\n def run(self):\n # create one at https://api.slack.com/web#authentication, and set the environment variable\n sc = SlackClient(os.environ[\"SLACK_CLIENT_TOKEN\"])\n channel_id = 0\n\n # Get Channel Information\n for channel in sc.api_call(\"channels.list\")[\"channels\"]:\n if (channel[\"name\"] == self.channel_name):\n channel_id = channel[\"id\"]\n\n if (0 == channel_id):\n raise Exception('Unable to find such channel by name', 'channel_name=' + self.channel_name)\n\n with self.output().open('w') as outfile:\n print(*slackMessageToTsvLine(False), file=outfile, sep='\\t')\n\n # Get the first chunk\n last_message_id = 0\n taskOutput = yield DownloadSlackChannelHistoryChunk(channel_name=self.channel_name)\n with taskOutput.open('r') as infile:\n last_chunk = json.load(infile)\n last_message_id = last_chunk[\"messages\"][-1:][0][\"ts\"] # TODO check 4 failures\n for message in last_chunk[\"messages\"]:\n print(*slackMessageToTsvLine(message), file=outfile, sep='\\t')\n\n # Get more chunks\n while (last_chunk[\"has_more\"]):\n taskOutput = yield DownloadSlackChannelHistoryChunk(channel_name=self.channel_name, last_message_id=last_message_id)\n with taskOutput.open('r') as infile:\n last_chunk = json.load(infile)\n last_message_id = last_chunk[\"messages\"][-1:][0][\"ts\"] # TODO check 4 failures\n for message in last_chunk[\"messages\"]:\n print(*slackMessageToTsvLine(message), file=outfile, sep='\\t')\n\n\n def output(self):\n return luigi.LocalTarget(self.date.strftime('data/Slack/ChannelHistory/' + self.channel_name + '_%Y-%m-%d.tsv'))\n\n","repo_name":"gerardobort/luigi-slack","sub_path":"slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"818551053","text":"import reversion\n\nfrom django.core.exceptions import MultipleObjectsReturned, ValidationError\nfrom django.db import models\nfrom django.apps import apps\n\nfrom .tracked_model import TrackedModel\nfrom .derived_sample import DerivedSample\n\nfrom ._utils import add_error as _add_error\n\n@reversion.register()\nclass SampleLineage(TrackedModel):\n child = models.ForeignKey(\"Sample\", on_delete=models.PROTECT, related_name=\"child_sample\", help_text=\"Child sample.\")\n parent = models.ForeignKey(\"Sample\", on_delete=models.PROTECT, related_name=\"parent_sample\", help_text=\"Parent sample.\")\n process_measurement = models.ForeignKey(\"ProcessMeasurement\", on_delete=models.PROTECT, related_name=\"lineage\",\n help_text=\"process used for sample creation.\")\n\n def clean(self):\n super().clean()\n errors = {}\n\n def add_error(field: str, error: str):\n _add_error(errors, field, ValidationError(error))\n\n protocol_name = self.process_measurement.process.protocol.name\n if protocol_name == \"Extraction\":\n # There is only a single expected derived sample for the child and parent of an extraction\n try:\n child_derived = self.child.derived_samples.get()\n parent_derived = self.parent.derived_samples.get()\n except MultipleObjectsReturned:\n add_error(\"derived_sample\", \"Extraction child and/or parent has more than one derived sample.\")\n\n if not errors:\n if not child_derived.sample_kind.is_extracted:\n add_error(\"sample_kind\", \"Extracted sample need to be a type of Nucleic Acid.\")\n if parent_derived.sample_kind.is_extracted:\n add_error(\"extracted_from\",\n \"Extraction process cannot be run on samples of extracted kinds like DNA and RNA.\")\n if not child_derived.tissue_source:\n add_error(\"tissue_source\", \"Extracted sample need to have a tissue source.\")\n elif child_derived.tissue_source != parent_derived.sample_kind:\n add_error(\"tissue_source\", \"Extracted sample tissue_source must match parent sample_kind.\")\n elif any([protocol_name == \"Transfer\", protocol_name == \"Illumina Infinium Preparation\", protocol_name == \"DNBSEQ Preparation\"]):\n if list(self.child.derived_samples.values_list(\"id\", flat=True).order_by(\"id\")) != list(self.parent.derived_samples.values_list(\"id\", flat=True).order_by(\"id\")):\n add_error(\"derived_sample\", f\"Transferred sample {self.child.name} need to have the same derived samples as its parent.\")\n elif protocol_name == \"Sample Pooling\":\n # Check that parent.derived_samples are a subset of the child.derived_samples.\n if not set(self.parent.derived_samples.values_list(\"id\", flat=True)).issubset(set(self.child.derived_samples.values_list(\"id\", flat=True))):\n add_error(\"derived_sample\", f\"Pooled sample {self.child.name} does not include all the derived samples of the parent.\")\n\n if self.child == self.parent:\n add_error(\"child\", \"A sample cannot have itself as child.\")\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super().save(*args, **kwargs) # Save the object","repo_name":"c3g/freezeman","sub_path":"backend/fms_core/models/sample_lineage.py","file_name":"sample_lineage.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"31303099793","text":"from workers.data_scraper.scraper_dormitory.scraping_default_usage import Scraper as ABCScraper\nfrom workers.data_scraper.scraper_dormitory.scraper_tools.tools import *\nfrom workers.data_scraper.scraper_dormitory.parser_tools.tools import *\nfrom urllib.parse import urlencode\n\n# 채널 이름 : 중랑\n\n# 타겟 : 공연안내 및 예약\n# 중단 시점 : 마지막 페이지 도달시\n\n# HTTP Request\n'''\n @post list\n\n method : GET\n url : https://www.jungnang.go.kr/portal/app/integrateApp/integrateList.do?division=event&programId=integrateApp&menuNo=200383&pageIndex={page_count}\n header :\n None\n\n'''\n'''\n @post info\n method : GET\n url : https://www.jungnang.go.kr/portal/app/event/select.do?id={post_id}&programId=event&division=event&menuNo=200383\n header :\n None\n\n'''\nsleepSec = 1\nisUpdate = True\n\n\nclass Scraper(ABCScraper):\n def __init__(self, session):\n super().__init__(session)\n self.channel_name = '중랑'\n self.post_board_name = '공연안내 및 예약'\n self.channel_main_url = 'https://www.jungnang.go.kr'\n\n def scraping_process(self, channel_code, channel_url, dev, full_channel_code):\n super().scraping_process(channel_code, channel_url, dev, full_channel_code)\n self.session = set_headers(self.session)\n self.page_count = 1\n while True:\n print(f'PAGE {self.page_count}')\n\n self.channel_url = self.channel_url_frame.format(self.page_count)\n\n self.post_list_scraping(post_list_parsing_process, 'get')\n if self.scraping_target:\n self.target_contents_scraping()\n self.collect_data()\n self.mongo.reflect_scraped_data(self.collected_data_list)\n self.page_count += 1\n else:\n break\n self.session.cookies.clear()\n\n def target_contents_scraping(self):\n super().target_contents_scraping(post_content_parsing_process, sleepSec)\n\n\ndef post_list_parsing_process(**params):\n target_key_info = {\n 'multiple_type': ['post_url', 'post_subject', 'start_date', 'end_date', 'is_going_on']\n }\n\n var, soup, key_list, text = html_type_default_setting(params, target_key_info)\n\n # 2022-1-26 HYUN\n # html table header index\n table_column_list = ['번호', '구분', '제목', '신청기간', '처리현황']\n\n # 게시물 리스트 테이블 영역\n post_list_table_bs = soup.find('div', class_='table_wrap')\n\n if not post_list_table_bs:\n raise TypeError('CANNOT FIND LIST TABLE')\n\n post_list_table_bs = post_list_table_bs.find('table', class_='inc_head')\n\n # 테이블 컬럼 영역\n post_list_table_header_area_bs = post_list_table_bs.find('thead')\n # 테이블 칼럼 리스트\n post_list_table_header_list_bs = post_list_table_header_area_bs.find_all('th')\n\n # 테이블 컬럼명 검증 로직\n for column_idx, tmp_header_column in enumerate(post_list_table_header_list_bs):\n if table_column_list[column_idx] != tmp_header_column.text.strip():\n print(f'IDX {column_idx} ERROR - {table_column_list[column_idx]} is {tmp_header_column.text.strip()}')\n raise('List Column Index Change')\n\n post_row_list = post_list_table_bs.find('tbody').find_all('tr')\n\n for tmp_post_row in post_row_list:\n # 첫번째, 게시물 '번호'가 th이므로 td 인덱스에서 1개 제외\n for idx, tmp_td in enumerate(tmp_post_row.find_all('td')):\n\n\n if idx == 0:\n var['post_subject'].append(clean_text(tmp_td.text))\n elif idx == 1:\n var['post_url'].append(make_absolute_url(\n in_url=tmp_td.find('a').get('href'),\n channel_main_url=var['response'].url))\n elif idx == 2:\n tmp_date_period_str = clean_text(tmp_td.text)\n date_str_list = tmp_date_period_str.split('~')\n date_str_list = [f.strip() for f in date_str_list]\n if len(date_str_list) == 2:\n var['start_date'].append(convert_datetime_string_to_isoformat_datetime(date_str_list[0]))\n var['end_date'].append(convert_datetime_string_to_isoformat_datetime(date_str_list[1]))\n else:\n var['start_date'].append(tmp_date_period_str)\n elif idx == 3:\n if tmp_td.find('img', {'alt':'마감'}):\n var['is_going_on'].append(False)\n else:\n var['is_going_on'].append(True)\n\n result = merge_var_to_dict(key_list, var)\n if var['dev']:\n print(result)\n return result\n\n\ndef post_content_parsing_process(**params):\n target_key_info = {\n # start_date2 : 공연시작 시간\n 'single_type': ['post_text', 'post_title', 'start_date2'],\n 'multiple_type': ['post_image_url', 'extra_info']\n }\n var, soup, key_list, _ = html_type_default_setting(params, target_key_info)\n var['extra_info'] = [{\n 'info_title':'공연 상세'\n }]\n content_info_area = soup.find('div', class_='txt_box')\n var['post_title'] = content_info_area.find('h2').text.strip()\n\n content_info_area = content_info_area.find('div', class_='half_txt')\n content_info_area = content_info_area.find('ul')\n\n extra_info_column_list = ['관람료', '공연장소']\n\n for tmp_row_area in content_info_area.find_all('li'):\n column_title = tmp_row_area.em.text.strip()\n tmp_row_area.em.decompose()\n column_value = clean_text(tmp_row_area.text).replace(': ', '').strip()\n\n if column_title == '공연시간':\n var['start_date2'] = convert_datetime_string_to_isoformat_datetime(column_value)\n elif column_title in extra_info_column_list:\n tmp_extra_info_index = extra_info_column_list.index(column_title)\n var['extra_info'][0]['info_' + str(tmp_extra_info_index + 1)] = [column_title,\n column_value]\n\n poster_area = soup.find('div', class_='half_box')\n poster_area = poster_area.find('div', class_='half_img')\n\n var['post_image_url'] = search_img_list_in_contents(poster_area, var['response'].url)\n\n context_area = soup.select_one('div.gray_bg_box.img_box.line')\n var['post_text'] = clean_text(context_area.text.strip())\n\n if var['post_image_url'] is None:\n var['post_image_url'] = []\n\n var['post_image_url'].extend(search_img_list_in_contents(context_area, var['response'].url))\n\n result = convert_merged_list_to_dict(key_list, var)\n if var['dev']:\n print(result)\n return result","repo_name":"choiseulong/chancewave_scraping","sub_path":"scrapingProject/workers/data_scraper/scraper_dormitory/rooms/seoul/jungnang/scraper_2.py","file_name":"scraper_2.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26256396170","text":"#!/usr/bin/env python\n# coding=utf8\nimport os\nimport json\nimport time, datetime\nfrom model.setting import withBase, basecfg\nfrom flask import Blueprint, request, Response, render_template, g\nfrom rest import api\nfrom model.base import Unit\nfrom . import exepath, allowed\n\nINIT = \"\"\"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n\n@api.route('/unit', methods=['POST'])\n@api.route('/unit/', methods=['POST'])\n@withBase(basecfg.W, resutype='DICT', autocommit=True)\ndef unit(uid=None):\n user = request.user\n condition = request.form.get('condition', '{}')\n condition = json.loads(condition)\n data = request.form.get('data', '{}')\n data = json.loads(data)\n pyfile = request.files.get('file')\n projection = request.form.get('projection', '{}')\n projection = json.loads(projection)\n\n limit = request.form.get('limit', 'one')\n\n if uid is not None:\n condition['_id'] = uid\n POST = False\n if pyfile:\n POST = True\n result = {'stat':0, 'desc':'请上传正确格式的python文件', 'unit':Unit.queryOne(user, condition, projection=projection)}\n if pyfile and allowed(pyfile.filename):\n filename = pyfile.filename\n filepath = exepath(filename)\n pyfile.save(filepath)\n filepath = os.path.join(os.path.dirname(filepath), '__init__.py')\n if not os.path.exists(filepath):\n fi = open(filepath, 'w')\n fi.write(INIT)\n fi.close()\n result['stat'] = 1\n result['desc'] = '上传成功'\n result = json.dumps(result, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')\n if data:\n POST = True\n if '_id' in condition:\n data['$set'] = data.get('$set', {})\n data['$set']['updator'] = user['_id']\n Unit.update(user, condition, data)\n uid = condition['_id']\n else:\n data['updator'] = user['_id']\n data['creator'] = user['_id']\n data = Unit(**data)\n uid = Unit.insert(user, data)\n result = json.dumps({'stat':1, 'desc':'Unit is set successfully.', 'unit':{'_id':uid}}, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')\n if not POST:\n if limit == 'one':\n result = Unit.queryOne(user, condition, projection=projection)\n else:\n result = list(Unit.queryAll(user, condition, projection=projection))\n result = json.dumps({'stat':1, 'desc':'', 'unit':result}, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')\n return result\n ","repo_name":"listen-lavender/pholcus","sub_path":"gds/blueprint/api/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"41808773551","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 19 14:16:59 2021\n\n@author: Musya\n\"\"\"\n\nimport time\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nimport matplotlib.image as mpimg\nfrom imgaug import augmenters as iaa\nimport cv2\nimport random\nfrom dynamic import DynamicArray\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D #Import various layers for the Neural Net\nfrom tensorflow.keras.optimizers import Adam #Import optimizers for the Neural Net\n#from keras.layers import concatenate, Dropout\n\nos.chdir('/home/pi/Autonomous-Car/DATA')\nos.chdir('/home/pi/Autonomous-Car/DATA/CSV')\npath = os.getcwd()\n\nname = os.path.join(path, 'drivelog.csv')\ndef getName(filePath):\n return filePath.split(\"\\\\\")[-1]\ndef importData():\n print(\"Importing Data...\")\n columns = ['Image_name','PWML','PWMR']\n data = pd.read_csv(name,names = columns)\n data['Image_name'] = data['Image_name'].apply(getName)\n #print(\"Total Rows:\", data.shape[0])\n #print(\"Total Columns:\", data.shape[1])\n return data\ndef balanceData(data):\n\n nBins = 121\n spBin = 200\n\n hist,bins = np.histogram(data['PWML'],nBins)\n center = (bins[-1:]+bins[1:])*.5\n plt.bar(center,hist,width = .05)\n plt.plot((0,50),(spBin,spBin))\n plt.title('Initial Distribution')\n #plt.show()\n\n\n #hist2,bins2 = np.histogram(data['PWMR'],nBins)\n #center2 = (bins2[-1:]+bins2[1:])*.5\n #plt.bar(center2,hist2,width = .5)\n #plt.plot((0,50),(spBin,spBin))\n #plt.title('Initial Distribution')\n #plt.show()\n\n q = input(\"Do you want to balance data? y>Yes/n>No: \")\n q = q.lower()\n if q == ('y' or 'yes'):\n print(\"Balancing Data...\")\n removedIndex = []\n for j in range(nBins):\n binData = []\n for i in range(len(data['PWML'])):\n if data['PWML'][i]>=bins[j] and data['PWML'][i] <= bins[j+1]:\n binData.append(i)\n binData = shuffle(binData)\n binData = binData[spBin:]\n removedIndex.extend(binData)\n print(\"Available Images:\", len(data))\n data.drop(data.index[removedIndex], inplace = True)\n print(\"Removed Images:\", len(removedIndex))\n print(\"Remaining Images:\", len(data))\n\n hist,bins = np.histogram(data['PWML'],nBins)\n plt.bar(center,hist,width = .5)\n plt.plot((0,50),(spBin,spBin))\n plt.title('Final Distribution')\n plt.show()\n\n print(\"Balancing finished\")\n elif q == ('n' or 'no'):\n print(\"Pass\")\n pass\n return data\n\ndef loadData(path,data):\n print(\"Loading Data...\")\n imgPath = []\n steering = []\n throttle = []\n for i in range(len(data)):\n indexData = data.iloc[i]\n imgPath.append(os.path.join(path,'DATA\\IMG',indexData[0]))\n steering.append(float(indexData[1]))\n throttle.append(float(indexData[2]))\n print(i, \"of\", len(data))\n imgPath = np.asarray(imgPath)\n steering = np.asarray(steering)\n throttle = np.asarray(throttle)\n print(\"Returning data\")\n return imgPath, steering, throttle\n\ndef augmentImage(imgPath,PWML,PWMR):\n img = mpimg.imread(imgPath)\n if np.random.rand() < 0.5:\n zoom = iaa.Affine(scale=(1,1.2))\n img = zoo.augment_image(img)\n if np.random.rand() < 0.5:\n img = cv2.flip(img,1)\n PWML = PWMR\n PWMR = PWML\n return img,PWML,PWMR\ndef preProcess(img):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n img = cv2.GaussianBlur(img, (3,3),0)\n img = cv2.resize(img, (200,66))/255.0\n return img\n\n\ndef batchGen(imgPath,outputs, batchSize):\n steeringList = outputs[0]\n throttleList = outputs[1]\n while True:\n imgBatch = []\n steeringBatch = []\n throttleBatch = []\n for i in range(batchSize):\n index = random.randint(0, len(imgPath)-1)\n #if trainFlag:\n #img = augmentImage(imgPath[index],\n #steering = steering[index],\n #throttle = throttle[index])\n #else:\n img = mpimg.imread(imgPath[index])\n steering = steeringList[index]\n throttle = throttleList[index]\n\n img = preProcess(img)\n imgBatch.append(img)\n steeringBatch.append(steering)\n throttleBatch.append(throttle)\n yield(np.asarray(imgBatch),\n [np.asarray(steeringBatch),\n np.asarray(throttleBatch)]\n )\n\ndef createModel():\n print(\"Creating Model\")\n img_Input = keras.Input(shape=(66, 200,3), name='Image')\n\n img = Conv2D(24, (5,5), (2, 2), activation='elu')(img_Input)\n img = Conv2D(36, (5,5), (2, 2), activation='elu')(img)\n img = Conv2D(48, (5,5), (2, 2), activation='elu')(img)\n img = Conv2D(64, (3,3), activation='elu')(img)\n img = Conv2D(64, (3,3), activation='elu')(img)\n\n img = Flatten()(img)\n\n img = Dense(100, activation='elu',name=\"layer1\")(img)\n img = Dense(50, activation='elu',name=\"layer2\")(img)\n img = Dense(10, activation='elu',name=\"layer3\")(img)\n\n steering = Dense(1, name = 'Steering')(img)\n throttle = Dense(1, name = 'Throttle')(img)\n\n model = keras.Model(inputs = img_Input, outputs = [steering,throttle])\n print(\"Creating Model Done\")\n #keras.utils.plot_model(model,\"AI-CAR.jpeg\")\n\n model.compile(loss='mean_squared_error',\n optimizer=Adam(learning_rate=0.0001),\n metrics=['Accuracy'])\n\n return model","repo_name":"JosephMusya/Tensorflow-AI-self-driving-","sub_path":"utilis.py","file_name":"utilis.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74781464649","text":"from setuptools import setup, find_packages\nrequired_modules = [ \n\t\"simplejson\"\n\t]\n\nsetup(\n\tname=\"jsonify\",\n\tversion=\"0.0.1\",\n\tdescription=\"\",\n\tauthor=\"Christopher H. Casebeer\",\n\tauthor_email=\"christopher@chc.name\",\n\turl=\"\",\n\tentry_points = '''\n\t\t[console_scripts]\n\t\tjsonify = jsonify:main\n\t''',\n\tpackages=find_packages(exclude='tests'),\n\tinstall_requires=required_modules\n\t)\n\n","repo_name":"casebeer/jsonify","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"22929016716","text":"import sys\n\n\ndef main():\n chunk_cnt = 0\n variance_of_all = 0\n mean_of_all = 0\n\n for line in sys.stdin:\n chunk, mean, variance = line.split()\n chunk = int(chunk)\n mean = float(mean)\n variance = float(variance)\n mean_of_all = (mean_of_all * chunk_cnt + chunk * mean) / (chunk + chunk_cnt)\n variance_of_all = (chunk * variance + chunk_cnt * variance_of_all) / (chunk + chunk_cnt) + \\\n chunk * chunk_cnt * ((mean_of_all - mean) / (chunk + chunk_cnt)) ** 2\n chunk_cnt += chunk\n\n print(variance_of_all)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"genusB/made_mldb","sub_path":"hw1/reducer_var.py","file_name":"reducer_var.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74084186889","text":"input()\nst = \"\"\nwhile True:\n try: \n s = input()\n except EOFError:\n break\n \n if(s == \"\"):\n break\n \n st += s\n \nst = \" \".join(st.split()).split(\" \")\n\nnumbList = [int(x) for x in st]\nmi = min(numbList)\nprint(f\"\"\"Menor valor: {mi}\nPosicao: {numbList.index(mi)}\"\"\")","repo_name":"LedoVeras/CodeProblems","sub_path":"Beecrowd/python/1180 Menor e Posição.py","file_name":"1180 Menor e Posição.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70342165450","text":"\nimport os\nimport sys\n\nproject_slug = \"{{ cookiecutter.project_slug }}\"\n\nMESSAGE_COLOR = \"\\x1b[32m\"\nRESET_ALL = \"\\x1b[0m\"\n\nprint(f\"{MESSAGE_COLOR}Initializing a new project\")\nprint(f\"Creating project at {os.getcwd()}{RESET_ALL}\")","repo_name":"daniel-epm/cookiecutter-template-draft","sub_path":"hooks/pre_gen_project.py","file_name":"pre_gen_project.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3881863","text":"# https://www.acmicpc.net/problem/1254\nfrom collections import deque\n\nS = deque(list(input()))\n\n# 팰린드롬 체크\ndef check(S):\n for i in range(len(S)//2):\n if S[i] != S[len(S)-1-i]:# 0/3 1/2\n return False\n return True\n\nreverseS = deque([])\nif check(S) is False:\n for i in range(len(S)):\n reverseS.appendleft(S[i])\n if check(S+reverseS) is True:\n print(len(S)+len(reverseS))\n break\nelse:\n print(len(S))","repo_name":"spdlqjdk1012/naver","sub_path":"21.팰린드롬만들기.py","file_name":"21.팰린드롬만들기.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17129341839","text":"import os\nimport matplotlib.pyplot as plt\nfrom util.pause import pause\n\n\ndef dbToDataStore(dirIn, dirOut, extOrig, extNew, log):\n\n # display\n if log:\n print(\"Transforming DB...\")\n\n # transform db\n for name in os.listdir(dirIn):\n if name.endswith(extOrig):\n # display\n #if log:\n #print(\"\\tProcessing: \" + name)\n # read name\n pre, ext = os.path.splitext(name)\n # get label\n C = pre.split('_')\n # create dir with label\n dirOutLabel = os.path.join(dirOut, C[1])\n # newname\n newName = pre + '.' + extNew\n newPath = os.path.join(dirOutLabel, newName)\n # if already present skip\n if os.path.exists(newPath):\n continue\n # create directory if not present\n if not os.path.exists(dirOutLabel):\n os.makedirs(dirOutLabel)\n # read\n img = plt.imread(os.path.join(dirIn, name))\n\n # display\n \"\"\"\n print(newName)\n plt.imshow(img)\n plt.show()\n pause()\n \"\"\"\n\n # write\n plt.imsave(newPath, img)\n\n #pause()\n\n print()\n\n","repo_name":"AngeloUNIMI/HistoTNet","sub_path":"(2) PyTorch_HistoTNet/util/dbToDataStore.py","file_name":"dbToDataStore.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"7622037183","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n# Settings\nfrom django.conf import settings\n\n# Serializers\nfrom ..serializers import ReviewSerializer\n\n# Utils\nfrom utils.s3_instance import s3\nfrom utils.create_error import create_error\nfrom ..utils import generate_filename\n\n# Review Create and Retrieve List view\nclass ReviewCreateRetrieve(APIView):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.BAD_REQUEST = create_error('Bad Request', 'The review data is invalid')\n\n # Upload image to Amazon S3\n # Args:\n # image: Image file\n # Return\n # Amazon S3 URL\n def upload_image_to_s3(self, image):\n # Extract the file extension from the image name\n file_format = image.name.split('.')[1]\n\n # Generate a unique name for the image\n file_name = generate_filename(file_format)\n\n # Generate a unique key for the image in S3\n image_key = f\"reviews/{file_name}\"\n\n # Upload the image to Amazon S3\n s3.upload_fileobj(image, settings.AWS_STORAGE_BUCKET_NAME, image_key)\n\n # Construct the S3 URL of the uploaded image\n s3_url = f\"https://{settings.AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/{image_key}\"\n\n return s3_url\n\n # Create a new review in the database\n # Args:\n # response: Response object\n # Return:\n # Response object\n def post(self, request, **kwargs):\n # Get the uploaded image file from the request\n image_file = request.FILES.get('image')\n\n # If image file does not exist, raise a bad request error\n if image_file is None:\n error = create_error('Missing file', 'The image file is required')\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n # Extract the product Id number from the keyword arguments\n product_id = kwargs.get('product_id')\n\n # Retrieve user from the body of the request\n user_id = request.user.pk\n\n # Retrieve feedback from the body of the request\n feedback = request.data.get('feedback')\n\n # If the feedback field does not exist, raise a missing field error\n if feedback is None:\n error = create_error('Missing field', 'The feedback field is required')\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n # Retrieve rating from the body of the request\n rating = request.data.get('rating')\n\n # If the rating field does not exist, raise a missing field error\n if rating is None:\n error = create_error('Missing field', 'The rating field is required')\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n # Upload image to S3 bucket\n s3_url = self.upload_image_to_s3(image_file)\n\n # Initialize review data\n review_data = {\n 'user': user_id,\n 'product': product_id,\n 'feedback': feedback,\n 'rating': rating,\n 'media_url': s3_url\n }\n\n # Validate review data against its serializer\n serializer = ReviewSerializer(data=review_data)\n\n # If data is invalid, raise a bad request error\n if serializer.is_valid() == False:\n return Response(self.BAD_REQUEST, status=status.HTTP_400_BAD_REQUEST)\n\n # Save new review in the database\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\n# View\nreview_create_view = ReviewCreateRetrieve.as_view()","repo_name":"EgorUshakovOfficial/ecommerce-project","sub_path":"server/project/apps/reviews/views/ReviewCreateView.py","file_name":"ReviewCreateView.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"962231830","text":"import requests\nimport time\nimport threading\nfrom time import sleep\nfrom random import randint\nimport os\nfrom urllib.parse import urlencode\nfrom pyquery import PyQuery as Pq\nfrom static import tools\nfrom get_weibo_comment import get_weibo_comment\n\n# 手机端微博的网址\nhost = 'm.weibo.cn'\n# 个人主页\nbase_url = 'https://%s/api/container/getIndex?' % host\n# 博文\nweibo_baseurl = 'https://m.weibo.cn/detail/'\n# 存储地址\nedges_user_pub_weibo = r'E:\\weibo\\edges_user_pub_weibo_%s.txt' # 发表关系\nedges_user_trans_weibo = r'E:\\weibo\\edges_user_trans_weibo_%s.txt' # 转发关系\nweibo_content = r'E:\\weibo\\weibo_content_%s.txt'\nweiboNodes_path = r'E:\\weibo\\weiboNodes.txt'\nweibo_comments_path = r'E:\\weibo\\weibo_comments_%s.txt'\n\n\ndef trans_format(time_string, from_format=\"%a %b %d %H:%M:%S +0800 %Y\", to_format='%Y.%m.%d %H:%M:%S'):\n \"\"\"\n @note 时间格式转化\n :param time_string:\n :param from_format:\n :param to_format:\n :return:\n \"\"\"\n time_struct = time.strptime(time_string, from_format)\n times = time.strftime(to_format, time_struct)\n return times\n\n\ndef get_single_page(user_id, page=1):\n \"\"\"\n 爬取第page页的用户博文内容\n 这里为了简便分析,一般只爬取一个用户2-6条博文\n :param page: 页码\n :param user_id: 需要爬取的用户id\n :return:\n \"\"\"\n # 必要的参数\n params = {\n 'type': 'uid',\n 'value': user_id,\n 'containerid': int('107603' + user_id), # containerid就是微博用户id前面加上107603\n 'page': page\n }\n # 爬取请求头\n headers = {\n 'Host': host,\n 'Referer': 'https://m.weibo.cn/u/%s' % user_id,\n 'User-Agent': tools.get_random_ua()['User-Agent']\n }\n # 爬取网址\n url = base_url + urlencode(params)\n try:\n # 请求网页\n response = requests.get(url, headers=headers)\n # 请求成功\n if response.status_code == 200:\n return response.json()\n except requests.ConnectionError as e60:\n print('e60抓取错误:', e60.args)\n\n\ndef analysis(response_json, user_id, thread_id, weibo_id_list, f_weibo_comment):\n \"\"\"\n 解析爬取用户博文页面返回的json数据\n :param f_weibo_comment: 写入评论句柄\n :param response_json: 返回的json数据\n :param user_id: 爬取的用户id\n :param thread_id: 线程id\n :param weibo_id_list: 存储博文id的列表\n :return:\n \"\"\"\n # 博文所处的位置\n items = response_json.get('data').get('cards')\n # 当博文大于6条时,随机爬取3到6条\n if len(items) > 6:\n num = randint(3, 6)\n # 博文数小于3条时,全部爬取\n else:\n num = len(items)\n current_num = 0\n f_user_pub_weibo = open(edges_user_pub_weibo % str(thread_id), 'a+', encoding='utf-8')\n f_user_trans_weibo = open(edges_user_trans_weibo % str(thread_id), 'a+', encoding='utf-8')\n f_weibo_content = open(weibo_content % str(thread_id), 'a+', encoding='utf-8')\n for item in items:\n try:\n init_wb = item.get('mblog')\n # 存在微博\n if init_wb:\n weibo_id = init_wb.get('id')\n cr_time = trans_format(init_wb.get('created_at'))\n # 获取本项目所需属性\n data = {\n 'weibo_id': weibo_id,\n '发表时间': cr_time,\n 'text': Pq(init_wb.get(\"text\")).text(), # 仅提取内容中的文本\n '点赞': init_wb.get('attitudes_count'), # 点赞数\n '评论': init_wb.get('comments_count'), # 评论数\n # 'reposts': init_wb.get('reposts_count') # 转发数\n }\n # 代表是转发的微博\n if init_wb.get('retweeted_status') is not None:\n trans_weibo = init_wb.get('retweeted_status')\n trans_weibo_id = trans_weibo.get('id')\n trans_weibo_created_at = trans_format(trans_weibo.get('created_at'))\n trans_weibo_content = get_specific_weibo(trans_weibo_id)\n trans_weibo_content = Pq(trans_weibo_content).text()\n data['text'] = trans_weibo_content\n data['发表时间'] = trans_weibo_created_at\n # 博文ID暂存列表\n weibo_id_list.append(trans_weibo_id)\n # 写入文件\n f_user_trans_weibo.write(str(user_id) + ' ' + str(trans_weibo_id) + '\\n')\n f_weibo_content.write(str(data) + '\\n')\n get_weibo_comment(trans_weibo_id, f_weibo_comment)\n else:\n # 博文ID暂存列表\n weibo_id_list.append(weibo_id)\n # 写入文件\n f_user_pub_weibo.write(str(user_id) + ' ' + str(weibo_id) + '\\n')\n f_weibo_content.write(str(data) + '\\n')\n get_weibo_comment(weibo_id, f_weibo_comment)\n current_num += 1\n if current_num >= num:\n break\n except Exception as e116:\n # print('e116:' + str(e116.args))\n pass\n f_user_trans_weibo.close()\n f_user_pub_weibo.close()\n f_weibo_content.close()\n\n\ndef get_specific_weibo(weibo_id):\n \"\"\"\n 爬取特定博文内容\n :param weibo_id:\n :return:\n \"\"\"\n # 爬取请求头\n headers = {\n 'Host': host,\n 'Referer': 'https://m.weibo.cn/detail/%s' % weibo_id,\n 'User-Agent': tools.get_random_ua()['User-Agent']\n }\n # 爬取网址\n url = weibo_baseurl + weibo_id\n try:\n # 请求网页\n response = requests.get(url, headers=headers)\n # 请求成功\n if response.status_code == 200:\n text = str(response.text).split('\"text\": \"')[1].split('\"textLength\"')[0].replace(' \",', '')\n return text\n except requests.ConnectionError as e:\n print('抓取错误', e.args)\n\n\nclass MyThread (threading.Thread):\n \"\"\"\n 自定义线程,爬取内容分根据用户ID分为4个线程一起爬,加快速率\n \"\"\"\n def __init__(self, thread_id, user_id_list):\n \"\"\"\n 初始化线程函数\n :param thread_id: 线程id\n :param user_id_list: 用户id列表\n \"\"\"\n threading.Thread.__init__(self)\n self.thread_id = thread_id\n self.user_id_list = user_id_list\n self.weibo_id_list = []\n # 删除已有文件\n if os.path.exists(weibo_content % thread_id):\n os.remove(weibo_content % thread_id)\n if os.path.exists(edges_user_pub_weibo % thread_id):\n os.remove(edges_user_pub_weibo % thread_id)\n if os.path.exists(edges_user_trans_weibo % thread_id):\n os.remove(edges_user_trans_weibo % thread_id)\n if os.path.exists(weibo_comments_path % thread_id):\n os.remove(weibo_comments_path % thread_id)\n self.f_weibo_comment = open(weibo_comments_path % thread_id, 'w+', encoding='utf-8')\n\n def run(self):\n \"\"\"\n 线程函数\n :return:\n \"\"\"\n print('线程%d爬取开始' % self.thread_id)\n for user_id in self.user_id_list:\n try:\n response_json = get_single_page(user_id)\n analysis(response_json, user_id, self.thread_id, self.weibo_id_list, self.f_weibo_comment)\n sleep(randint(1, 3))\n except Exception as e1:\n print(e1.args)\n # 线程计数器,爬取完加1\n global thread_countLock\n thread_countLock = thread_countLock + 1\n print('线程%d爬取结束' % self.thread_id)\n self.f_weibo_comment.close()\n\n def get_weibo_id_list(self):\n return self.weibo_id_list\n\n\n# 线程计数器\nthread_countLock = 0\nuser_id_list = []\n\n\ndef work_fun():\n try:\n f_users_id = open(r'E:\\weibo\\userNodes.txt', 'r', encoding='utf-8')\n lines = f_users_id.readlines()\n for line in lines:\n user_id = line.strip('\\n')\n user_id_list.append(user_id)\n f_users_id.close()\n # 将用户结点列表一分为三\n each_coverage_size = int(len(user_id_list)/4)\n # 所有博文ID\n all_weibo_id_list = []\n # 开始爬取时间\n # time_start = time.time()\n # 创建线程爬取\n thread1 = MyThread(1, user_id_list[0: each_coverage_size])\n thread2 = MyThread(2, user_id_list[each_coverage_size: 2*each_coverage_size])\n thread3 = MyThread(3, user_id_list[2*each_coverage_size: 3*each_coverage_size])\n thread4 = MyThread(4, user_id_list[3 * each_coverage_size:])\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n # 爬取完之前不执行后续工作\n reptile_threads = [thread1, thread2, thread3, thread4]\n for thread in reptile_threads:\n thread.join()\n all_weibo_id_list.extend(thread.get_weibo_id_list())\n # 将博文id写入文件\n all_weibo_id_list = list(set(all_weibo_id_list))\n f_weibo_id = open(weiboNodes_path, 'w+', encoding='utf-8')\n for weibo_id in all_weibo_id_list:\n f_weibo_id.write(weibo_id + '\\n')\n except Exception as e:\n print('error:', e.args)\n finally:\n f_weibo_id.close()\n # time_end = time.time()\n # print('\\n totally cost', time_end - time_start) # 显示程序运行时间\n","repo_name":"csxwant/WeiboReptile","sub_path":"reptile/get_weibo_content.py","file_name":"get_weibo_content.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8650421401","text":"#!/usr/bin/env python\n\n\"\"\"\nconference.py -- Udacity conference server-side Python App Engine API;\n uses Google Cloud Endpoints\n\n$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $\n\nOriginally created by wesc on 2014 apr 21\nModified for Udacity class by Greg Palen, October 2015\n\n\"\"\"\n\n__author__ = 'wesc+api@google.com (Wesley Chun)'\n\n\"\"\"\n\n This file represents the core functionality of the Conference API.\n Internal \"private\" methods are all prefixed with an underscore ( _ )\n to clarify their use.\n\n For each exposed method/endpoint, an annotation describes the request\n container expected, the response container to be returned, the path,\n the Endpoint name, and the HTTP method.\n\n settings.py contains relevant key informations such as client and api keys.\n\n\"\"\"\n\nfrom datetime import datetime\n\nfrom collections import Counter\n\nimport endpoints\nfrom protorpc import messages\nfrom protorpc import message_types\nfrom protorpc import remote\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import ndb\n\nfrom constants import *\nfrom models import *\n\nfrom settings import WEB_CLIENT_ID\nfrom settings import ANDROID_CLIENT_ID\nfrom settings import IOS_CLIENT_ID\nfrom settings import ANDROID_AUDIENCE\n\nfrom utils import getUserId\n\nEMAIL_SCOPE = endpoints.EMAIL_SCOPE\nAPI_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID\nMEMCACHE_ANNOUNCEMENTS_KEY = \"RECENT_ANNOUNCEMENTS\"\nMEMCACHE_SPEAKER_KEY = \"FEATURED_SPEAKER\"\nANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '\n 'are nearly sold out: %s')\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\"\"\" Overall API definition required by Cloud Endpoints. \"\"\"\n@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],\n allowed_client_ids=[WEB_CLIENT_ID,\n API_EXPLORER_CLIENT_ID,\n ANDROID_CLIENT_ID,\n IOS_CLIENT_ID],\n scopes=[EMAIL_SCOPE])\n\nclass ConferenceApi(remote.Service):\n \"\"\" Conference API to facilitate creating and managing Conferences\n and the Sessions and Speakers associated with them. \"\"\"\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n @endpoints.method(SESSIONS_POST_REQUEST, SessionForm,\n path='createSession/{conferenceKey}',\n name='createSession',\n http_method='POST')\n def createSession (self, request):\n \"\"\" Create new Session for a specific Conference. Provide the\n 'websafe' ConferenceKey in the parameter. Returns the newly created\n Session object\n \"\"\"\n return self._createSessionObject(request)\n\n def _createSessionObject (self, request):\n \"\"\" This method requires a logged-in user. First, get the user\n and throw an error if there is no authorized user. \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # Next, validate that a name for the session was passed in\n if not request.sessionName:\n raise endpoints.BadRequestException(\n \"Session name is required\")\n\n conf = ndb.Key(urlsafe=request.conferenceKey)\n\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % request.conferenceKey)\n\n if conf.parent() != ndb.Key(Profile, getUserId(user)):\n raise endpoints.ForbiddenException(\n 'You must be the conference organizer to be able to create'\n 'sessions for this conference.'\n )\n\n # copy SessionForm/ProtoRPC Message into dict\n data = {field.name: getattr(request, field.name)\n for field in request.all_fields()}\n del data['conferenceKey']\n\n # convert date from strings to Date objects\n if data['date']:\n data['date'] = datetime.strptime(data['date'][:10],\n \"%Y-%m-%d\").date()\n\n if data['startTime']:\n data['startTime'] = datetime.strptime(\n data['startTime'], \"%H:%M\").time()\n\n # generate Conf Key\n wsck = request.conferenceKey\n conf_key = ndb.Key(urlsafe=wsck)\n\n # get the conference entity\n conf = conf_key.get()\n\n # if not found, raise an error and abort\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % wsck)\n\n # create a unique session ID\n s_id = Session.allocate_ids(size=1, parent=conf_key)[0]\n\n # create a key from the ID and save it to the dictionary\n s_key = ndb.Key(Session, s_id, parent=conf_key)\n data['key'] = s_key\n\n # create Session & save to Datastore\n sess = Session(**data)\n sess.put()\n\n \"\"\" add a task to the background queue that will determine if the\n Speaker for this Session should be the Featured Speaker.\n NOTE: if there is no Speaker defined for this Session, do not\n call the task. The endpoint for this is at\n /tasks/set_featured_speaker \"\"\"\n if data['speakerKey']:\n taskqueue.add(params=\n {'c_key': wsck}, url='/tasks/set_featured_speaker')\n return self._copySessionToForm(s_key.get())\n\n @endpoints.method(SESSIONS_GET_REQUEST, SessionForms,\n path='getConferenceSessions/{conferenceKey}',\n http_method='GET', name='getConferenceSessions')\n def getConferenceSessions (self, request):\n \"\"\" Returns all Sessions associated with a particular Conference.\n Provide the websafe ConferenceKey for the Conference to retrieve\n sessions for as the parameter to the request.\n \"\"\"\n wsck = request.conferenceKey\n conf_key = ndb.Key(urlsafe=wsck)\n confSessions = Session.query(ancestor=conf_key)\n return SessionForms(\n items=[self._copySessionToForm(sess)\n for sess in confSessions]\n )\n\n @endpoints.method(SESSION_BY_TYPE_POST_REQUEST, SessionForms,\n path='session/{conferenceKey}/{typeOfSession}',\n http_method='POST', name='getConferenceSessionsByType')\n def getConferenceSessionsByType (self, request):\n \"\"\" Returns a list of Sessions for a given Conference.\n Provide the conferenceKey parameter to specify which Conference\n you want Sessions for, and specify the type of Session desired\n ('Class', 'Workshop,' etc.) using the typeOfSession parameter.\"\"\"\n\n \"\"\" first, build the key to the conference based on the websafe key\n that was in the request \"\"\"\n c_key = ndb.Key(urlsafe=request.conferenceKey)\n\n \"\"\" retrieve the conference. If not found, raise an\n exception and quit \"\"\"\n if not c_key.get():\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % request.conferenceKey)\n\n \"\"\" now find all child sessions for this conference with\n ancestor query that will find all sessions with the selected\n conference as a parent \"\"\"\n sessions = Session.query(ancestor=c_key) \\\n .filter(Session.typeOfSession == request.typeOfSession)\n\n # return result(s)\n return SessionForms(\n items=[self._copySessionToForm(sess) for sess in sessions]\n )\n\n \"\"\" Utility method to copy a given Session object to a SessionForm response\n container. This method is called multiple times for queries that return\n multiple sessions. The calling method is responsible for aggregating\n the individual Sessions this method returns into a SessonForms (plural)\n response object \"\"\"\n def _copySessionToForm (self, sess):\n sf = SessionForm()\n for field in sf.all_fields():\n if hasattr(sess, field.name):\n # convert Date to date string; just copy others\n if field.name.endswith('date'):\n setattr(sf, field.name, str(getattr(sess, field.name)))\n elif field.name == 'startTime':\n setattr(sf, field.name, str(getattr(sess, field.name)))\n else:\n setattr(sf, field.name, getattr(sess, field.name))\n if field.name == 'sessionKey':\n setattr(sf, field.name, sess.key.urlsafe())\n sf.check_initialized()\n return sf\n\n\n\n @endpoints.method(SESSION_BY_SPEAKER_POST_REQUEST, SessionForms,\n path='session_by_speaker/{speaker}', http_method='POST',\n name='getSessionsBySpeaker')\n def getSessionsBySpeaker (self, request):\n \"\"\" Returns all Sessions that a particular Speaker is speaking at.\n Provide the websafe key for the Speaker in the request parameter.\n \"\"\"\n sessions = Session.query(Session.speakerKey == request.speakerKey)\n\n return SessionForms(\n items=[self._copySessionToForm(sess) for sess in sessions]\n )\n\n @endpoints.method(SessionQueryForms, SessionForms,\n path='querySessions', http_method='POST',\n name='querySessions')\n def querySessions (self, request):\n \"\"\" Returns all Sessions that match the filters specified in the\n SessionQueryForms POST body. See source code for details on\n how to construct and use the filters. \"\"\"\n sessions = self._sessionQueryFactory(request)\n\n return SessionForms(\n items=[self._copySessionToForm(sess) for sess in sessions]\n )\n\n @endpoints.method(WISHLIST_REQUEST, BooleanMessage,\n path='wishlist', http_method='POST',\n name='addSessionToWishlist')\n def addSessionToWishlist (self, request):\n \"\"\" Adds a particular Session of a Conference to the current user's\n 'wishlist' of Sessions (which is part of their Profile).\n In the request body, provide the websafe Session Key for\n the Session to attach to the Wishlist. A Session can only\n be added once (no duplicates allowed). \"\"\"\n return self._addSessionToWishlist(request)\n\n def _addSessionToWishlist (self, request):\n # adds a session to a the current user's wish list\n result = None\n\n # get the user's profile\n prof = self._getProfileFromUser()\n\n # get the key for the session that will be added to the wishlist\n wssk = request.sessionKey\n\n # retrieve the session from Datastore\n sess = ndb.Key(urlsafe=wssk).get()\n\n \"\"\" if we get no results on the session query, throw an exception.\n Otherwise, check to see if the session is already in the wish-\n list and if it is, throw an error preventing a duplicate entry \"\"\"\n if not sess:\n raise endpoints.NotFoundException(\n 'No Session found with key: %s' % wssk)\n if wssk in prof.sessionKeysWishList:\n raise ConflictException(\n \"You have already added for this session\")\n\n \"\"\" If we get here, all is good, so add the session to the user's\n wish list, which is part of their profile \"\"\"\n prof.sessionKeysWishList.append(wssk)\n result = True\n\n # Save the profile back to Datastore\n prof.put()\n\n return BooleanMessage(data=result)\n\n @endpoints.method(WISHLIST_REQUEST, BooleanMessage,\n path='wishlist',\n http_method='DELETE', name='removeSessionFromWishlist')\n def removeSessionFromWishlist (self, request):\n \"\"\" Removes a specific Session from the current user's wishlist of\n Sessions. Specify which session to remove by providing the websafe\n Session Key in the request body. \"\"\"\n return self._removeSessionFromWishlist(request)\n\n def _removeSessionFromWishlist (self, request):\n \"\"\" Takes a session in the request body and if present in the user's\n wish list, removes it. \"\"\"\n result = None\n\n # get the user's profile which contains the wishlist\n prof = self._getProfileFromUser()\n\n \"\"\" Now use the websafe key in the request to find and load the\n session the user wants to remove \"\"\"\n wssk = request.sessionKey\n sess = ndb.Key(urlsafe=wssk).get()\n\n \"\"\" If the session was not found, or if the session was not in the\n user's wishlist already, return an error \"\"\"\n if not sess:\n raise endpoints.NotFoundException(\n 'No Session found with key: %s' % wssk)\n if wssk in prof.sessionKeysWishList:\n prof.sessionKeysWishlist.remove(wssk)\n result = True\n else:\n result = False\n\n \"\"\" If we get to this point, all is good. Now safe the updated\n profile \"\"\"\n prof.put()\n return BooleanMessage(data=result)\n\n @endpoints.method(message_types.VoidMessage, SessionForms,\n http_method='POST', name='getSessionsInWishlist')\n def getSessionsInWishlist (self, request):\n \"\"\" Returns a the current user's wishlist of sessions. \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n prof = self._getProfileFromUser()\n\n # Now get the session keys in their wishlist\n sessions = prof.sessionKeysWishList\n\n # return the collection of sessions\n return SessionForms(\n items=[self._copySessionToForm(ndb.Key(urlsafe=session).get())\n for session in sessions]\n )\n\n def _sessionQueryFactory (self, request):\n # Return formatted session query from the submitted filters\n q = Session.query()\n inequality_filter, filters = self._formatFilters(request.filters)\n\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Session.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Session.name)\n\n for filtr in filters:\n if filtr[\"field\"] in [\"duration\", \"startTime\"]:\n filtr[\"value\"] = int(filtr[\"value\"])\n formatted_query = ndb.query.FilterNode(filtr[\"field\"],\n filtr[\"operator\"],\n filtr[\"value\"])\n q = q.filter(formatted_query)\n return q\n\n \"\"\"\n The following method satisfies:\n Requirement 4.2: Come up with 2 additional queries\n\n This method searches for sessions based on a combination of both\n the session time and the session type. Both the time and the type\n are contained in the body of the POST request.\n\n The endpoint will return all sessions that match the type of the\n request AND occur BEFORE the time specified in the request.\n \"\"\"\n @endpoints.method(QUERY_POST_REQUEST, SessionForms,\n path='sessionsByTypeLessThanTime',\n http_method='POST',\n name='sessionsByTypeLessThanTime')\n def sessionsByTypeLessThanTime (self, request):\n \"\"\" Returns all Sessions (spanning all Conferences) that are of a\n specified type and that occur strictly before a specified time\n (strictly meaning NOT \"at or before,\" but just \"before\").\n In the POST request body, the typeOfSession field should be\n set to the type of session to search for (e.g. 'workshop,'\n 'lecture,' etc.) and the startTime field should contain a\n properly formatted Time string ('HH:MM' in 24 hour format).\n \"\"\"\n \"\"\" startTime is stored as a string and is in 24-hour HH:MM format\n so a less-than search will yield the correct results.\n For this to work, a query with a subsequent filter is needed.\n First, all sessions (regardless of which conference they are part\n of) will be queried to find the subset of sessions that match the\n type defined by the request. From there, that query will be\n filtered to contain only those sessions that start strictly\n before the time specified in the request (meaning NOT \"at or\n before,\" but purely before. So if the request startTime is 19:00:00\n then a session starting at exactly that time will NOT be returned.\n\n First build a query for all sessions that match the session type in\n the request and sort it by typeOfSession \"\"\"\n matchingSessions = Session.query(\n Session.typeOfSession == request.typeOfSession).filter(\n Session.startTime < datetime.strptime(\n request.startTime, \"%H:%M\").time()\n )\n\n \"\"\" Now copy the matching sessions into the SessionForms and return\n them \"\"\"\n return SessionForms(\n items=[self._copySessionToForm(sess)\n for sess in matchingSessions]\n )\n\n \"\"\"\n The following method satisfies:\n Requirement 4.2: Come up with 2 additional queries\n Requirement 4.4: Student proposes one or more solutions to the\n problematic query\n\n This method searches for sessions based on a combination of both\n the session time and the session type. Both the time and the type\n are contained in the body of the POST request.\n\n The endpoint will return all sessions that DO NOT match the session\n type specified in the request and that occur BEFORE the start time\n specified in the request.\n \"\"\"\n @endpoints.method(QUERY_POST_REQUEST, SessionForms,\n path='queryProblem',\n http_method='POST',\n name='queryProblem')\n def queryProblem (self, request):\n \"\"\" Returns all Sessions (across all Conferences) that do NOT match\n the specified typeOfSession and that DO occur strictly before\n (not \"at or before\") the specified startTime.\\n\n\n typeOfSession is a string that identifies the specific type\n of session to EXCLUDE from the search (e.g. 'workshop,' or\n 'lecture').\\n\n\n startTime is a string in proper Time format (HH:MM) specified\n using 24 hour time. \"\"\"\n\n \"\"\" First, start by getting all of the sessions that do NOT match\n the type specified in the query. Then, iterate over the results\n and build a new list that contains only those sessions that start\n before the specified time.\n\n First build a query for all sessions that do not match the session\n type in the request \"\"\"\n sessionsByType = Session.query(\n Session.typeOfSession != request.typeOfSession)\n\n \"\"\" Using the sessions that were retrieved by the query above, set up\n a new list to contain only those sessions that start before the\n specified startTime and then iterate over the query results,\n appending only those sessions that start before the time specified\n in the request \"\"\"\n matchingSessions = []\n for sess in sessionsByType:\n \"\"\" make sure that there is a valid startTime and that the\n starTime is less than the specified time \"\"\"\n if sess.startTime and (sess.startTime < datetime.strptime(\n request.startTime, \"%H:%M\").time()):\n matchingSessions.append(sess)\n\n \"\"\" Now copy the matching sessions into the SessionForms and return\n them \"\"\"\n return SessionForms(\n items=[self._copySessionToForm(sess)\n for sess in matchingSessions]\n )\n\n# - - - Speaker objects - - - - - - - - - - - - - - - - -\n\n @endpoints.method(SpeakerForm, SpeakerForm, path='speaker',\n http_method='POST', name='addSpeaker')\n def addSpeaker (self, request):\n # Create a new speaker\n return self._createSpeakerObject(request)\n\n def _createSpeakerObject (self, request):\n \"\"\" Creates a new Speaker in the system and returns the newly created\n Speaker object as evidence of success. \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n \"\"\" If we got here, the user is authorized. Now confirm that they\n have a Display Name set in their profile \"\"\"\n if not request.displayName:\n raise endpoints.BadRequestException(\n \"Speaker 'displayName' field required\")\n\n # Set up a dictionary containing the speaker object fields\n data = {field.name: getattr(request, field.name)\n for field in request.all_fields()}\n\n # don't use the websafekey - it's not part of the speaker\n del data['websafeKey']\n\n # generate a unique ID as a key for this speaker\n sp_id = Speaker.allocate_ids(size=1)[0]\n sp_key = ndb.Key(Speaker, sp_id)\n\n # now save the new key to the dictionary\n data['key'] = sp_key\n del data['profileKey']\n\n \"\"\" create the Speaker object, passing in the dictionary to the\n constructor. The returned object will be the new Speaker object\n with the relevant fields filled in. \"\"\"\n sp = Speaker(**data)\n\n # Save the speaker to Datastore\n sp.put()\n return self._copySpeakerToForm(sp)\n\n def _copySpeakerToForm (self, speaker):\n # Copy relevant fields from Speaker to SpeakerForm\n sf = SpeakerForm()\n for field in sf.all_fields():\n if hasattr(speaker, field.name):\n setattr(sf, field.name, getattr(speaker, field.name))\n elif field.name == \"websafeKey\":\n setattr(sf, field.name, speaker.key.urlsafe())\n sf.check_initialized()\n return sf\n\n @endpoints.method(GET_FEATURED_SPEAKER_REQUEST, FeaturedSpeakerData,\n path='getFeaturedSpeaker/{conf_key}',\n http_method='GET', name='getFeaturedSpeaker')\n def getFeaturedSpeaker (self, request):\n \"\"\" Returns information about the Featured Speaker for a particular\n Conference. In the request, specify the Conference for which\n the Featured Speaker information is desired.\\n\n\n If a Featured Speaker is set for the specified Conference, this\n will return a FeaturedSpeakerData object that contains a field\n for the Speaker key (uniquely identifies the Speaker in the system)\n and a list of Session Names that the Speaker is speaking at.\\n\n\n THe \"Featured Speaker\" is defined as the Speaker at a Conference\n who is speaking at the most Sessions. If there are multiple\n Speakers that are \"tied\" for the most Sessions, an arbitrary\n Speaker is chosen from the Speakers in the tie.\\n\n\n See _setFeaturedSpeaker() in the source code for more details.\"\"\"\n featuredSpeakerMessage = memcache.get(\n MEMCACHE_SPEAKER_KEY + request.conf_key)\n return FeaturedSpeakerData(\n speakerKey=featuredSpeakerMessage['key'],\n items=[self._copySpeakerSessionToForm(sess)\n for sess in featuredSpeakerMessage['sessionName']])\n\n\n def _copySpeakerSessionToForm (self, sess):\n sf = FeaturedSpeakerSession()\n # Copy relevant fields from Speaker to SpeakerForm\n sf.sessionName = sess\n sf.check_initialized()\n return sf\n\n @endpoints.method(message_types.VoidMessage, SpeakerForms,\n path='speakers',\n http_method='GET', name='getAllSpeakers')\n def getAllSpeakers (self, request):\n \"\"\" Returns a list of all the Speakers that are in the system. \"\"\"\n speakers = Speaker.query()\n return SpeakerForms(items=[self._copySpeakerToForm(speaker)\n for speaker in speakers])\n\n# - - - Conference objects - - - - - - - - - - - - - - - - -\n def _copyConferenceToForm (self, conf, displayName):\n # Copy relevant fields from Conference to ConferenceForm.\n cf = ConferenceForm()\n for field in cf.all_fields():\n if hasattr(conf, field.name):\n \"\"\" convert Date to date string; just copy others \"\"\"\n if field.name.endswith('Date'):\n setattr(cf, field.name, str(getattr(conf, field.name)))\n else:\n setattr(cf, field.name, getattr(conf, field.name))\n elif field.name == \"websafeKey\":\n setattr(cf, field.name, conf.key.urlsafe())\n if displayName:\n setattr(cf, 'organizerDisplayName', displayName)\n cf.check_initialized()\n return cf\n\n def _createConferenceObject (self, request):\n \"\"\" Create or update Conference object,\n returning ConferenceForm/request.\"\"\"\n\n \"\"\" Check that there is a currently logged-in user (authorized) and\n if not, raise an exception. \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n \"\"\" The 'name' field of the Conference object is a required field.\n Here, we check to make sure that the data passed in does include\n a filled-in name field. Raise an exception if it's blank. \"\"\"\n if not request.name:\n raise endpoints.BadRequestException(\n \"Conference 'name' field required\")\n\n # copy ConferenceForm/ProtoRPC Message into dict\n data = {field.name: getattr(request, field.name)\n for field in request.all_fields()}\n del data['websafeKey']\n del data['organizerDisplayName']\n\n \"\"\" add default values for those missing\n (both data model & outbound Message) \"\"\"\n for df in DEFAULTS:\n if data[df] in (None, []):\n data[df] = DEFAULTS[df]\n setattr(request, df, DEFAULTS[df])\n\n \"\"\" convert dates from strings to Date objects;\n set month based on start_date \"\"\"\n if data['startDate']:\n data['startDate'] = datetime.strptime(data['startDate'][:10],\n \"%Y-%m-%d\").date()\n data['month'] = data['startDate'].month\n else:\n data['month'] = 0\n if data['endDate']:\n data['endDate'] = datetime.strptime(data['endDate'][:10],\n \"%Y-%m-%d\").date()\n\n # set seatsAvailable to be same as maxAttendees on creation\n if data[\"maxAttendees\"] > 0:\n data[\"seatsAvailable\"] = data[\"maxAttendees\"]\n\n # get Profile Key based on user ID\n p_key = ndb.Key(Profile, user_id)\n\n \"\"\" generate a unique conference ID with the profile\n as the ancestor \"\"\"\n c_id = Conference.allocate_ids(size=1, parent=p_key)[0]\n\n # generate the conference key with the ancestor profile\n c_key = ndb.Key(Conference, c_id, parent=p_key)\n\n # store the key and the organizer ID in the dictionary\n data['key'] = c_key\n data['organizerUserId'] = request.organizerUserId = user_id\n\n # Save the Conference to Datastore\n Conference(**data).put()\n\n \"\"\" Now send email to organizer confirming\n creation of Conference & return (modified) ConferenceForm \"\"\"\n taskqueue.add(params={'email': user.email(),\n 'conferenceInfo': repr(request)},\n url='/tasks/send_confirmation_email'\n )\n return request\n\n @ndb.transactional()\n def _updateConferenceObject (self, request):\n # This method updates an existing conference\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n # update existing conference\n conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()\n\n # check that conference exists\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' %\n request.websafeConferenceKey)\n\n # check that user is owner\n if user_id != conf.organizerUserId:\n raise endpoints.ForbiddenException(\n 'Only the owner can update the conference.')\n\n \"\"\" Not getting all the fields, so don't create a new object; just\n copy relevant fields from ConferenceForm to Conference object \"\"\"\n for field in request.all_fields():\n data = getattr(request, field.name)\n # only copy fields where we get data\n if data not in (None, []):\n # special handling for dates (convert string to Date)\n if field.name in ('startDate', 'endDate'):\n data = datetime.strptime(data, \"%Y-%m-%d\").date()\n if field.name == 'startDate':\n conf.month = data.month\n # write to Conference object\n setattr(conf, field.name, data)\n\n # save Conference to Datastore\n conf.put()\n prof = ndb.Key(Profile, user_id).get()\n return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))\n\n @endpoints.method(ConferenceForm, ConferenceForm,\n path='conference', http_method='POST',\n name='createConference')\n def createConference (self, request):\n \"\"\" Create a new Conference in the system. \"\"\"\n return self._createConferenceObject(request)\n\n @endpoints.method(CONF_POST_REQUEST, ConferenceForm,\n path='conference/{websafeConferenceKey}',\n http_method='PUT', name='updateConference')\n def updateConference (self, request):\n \"\"\" Updates an existing Conference (as identified by the\n websafeConferenceKey parameter) with the data provided in the\n request body. Returns the udpated Conference object. \"\"\"\n return self._updateConferenceObject(request)\n\n @endpoints.method(CONF_GET_REQUEST, ConferenceForm,\n path='conference/{websafeConferenceKey}',\n http_method='GET', name='getConference')\n def getConference (self, request):\n \"\"\" Returns the Conference object identified by the\n websafeConferenceKey parameter or an exception if the specified\n Conference key does not exist. \"\"\"\n conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' %\n request.websafeConferenceKey)\n prof = conf.key.parent().get()\n\n # return ConferenceForm\n return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))\n\n @endpoints.method(message_types.VoidMessage, ConferenceForms,\n path='getConferencesCreated',\n http_method='POST', name='getConferencesCreated')\n def getConferencesCreated (self, request):\n \"\"\" Return a list of all Conferences that the current user has\n created/organized. \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n # create ancestor query for all key matches for this user#\n confs = Conference.query(ancestor=ndb.Key(Profile, user_id))\n prof = ndb.Key(Profile, user_id).get()\n\n # return set of ConferenceForm objects per Conference\n return ConferenceForms(\n items=[self._copyConferenceToForm(\n conf, getattr(prof, 'displayName')) for conf in confs]\n )\n\n def _getQuery (self, request):\n # Return formatted query from the submitted filters\n q = Conference.query()\n inequality_filter, filters = self._formatFilters(request.filters)\n\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Conference.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Conference.name)\n\n for filtr in filters:\n if filtr[\"field\"] in [\"month\", \"maxAttendees\"]:\n filtr[\"value\"] = int(filtr[\"value\"])\n formatted_query = ndb.query.FilterNode(\n filtr[\"field\"], filtr[\"operator\"], filtr[\"value\"])\n q = q.filter(formatted_query)\n return q\n\n def _formatFilters (self, filters):\n # Parse, check validity and format user supplied filters\n formatted_filters = []\n inequality_field = None\n\n \"\"\" loop through the filters that were provided and make sure that\n each in the dictionary is actually a valid filter identifier.\n See Constants.py for the list of defined filters \"\"\"\n for f in filters:\n filtr = {field.name: getattr(f, field.name)\n for field in f.all_fields()}\n\n try:\n filtr[\"field\"] = FIELDS[filtr[\"field\"]]\n filtr[\"operator\"] = OPERATORS[filtr[\"operator\"]]\n except KeyError:\n raise endpoints.BadRequestException(\n \"Filter contains invalid field or operator.\")\n\n # Every operation except \"=\" is an inequality\n if filtr[\"operator\"] != \"=\":\n \"\"\" check if inequality op has been used in previous filters\n disallow filter if inequality was performed\n on different field before\n track the field on which the inequality\n operation is performed \"\"\"\n if inequality_field and inequality_field != filtr[\"field\"]:\n raise endpoints.BadRequestException(\n \"Inequality filter is allowed on only one field.\")\n else:\n inequality_field = filtr[\"field\"]\n\n formatted_filters.append(filtr)\n return (inequality_field, formatted_filters)\n\n @endpoints.method(ConferenceQueryForms, ConferenceForms,\n path='queryConferences', http_method='POST',\n name='queryConferences')\n def queryConferences (self, request):\n \"\"\" Returns a list of Conferences that satisfy the query specifications\n provided by the request body. See the source code for specifics\n on how to specify the query terms. \"\"\"\n conferences = self._getQuery(request)\n\n \"\"\" need to fetch organiser displayName from profiles.\n Get all keys and use get_multi for speed \"\"\"\n organisers = [(ndb.Key(Profile, conf.organizerUserId))\n for conf in conferences]\n profiles = ndb.get_multi(organisers)\n\n # put display names in a dict for easier fetching\n names = {}\n for profile in profiles:\n names[profile.key.id()] = profile.displayName\n\n # copy conference objects to form that can return multiple confs\n return ConferenceForms(\n items=[self._copyConferenceToForm(\n conf, names[conf.organizerUserId])\n for conf in conferences]\n )\n\n# - - - Profile objects - - - - - - - - - - - - - - - - - - -\n\n def _copyProfileToForm (self, prof):\n \"\"\" Copy relevant fields from Profile to ProfileForm.\n Create a new, blank profile form to populate \"\"\"\n pf = ProfileForm()\n for field in pf.all_fields():\n \"\"\" loop through the fields of the form and populate them with\n data from the request \"\"\"\n if hasattr(prof, field.name):\n # convert t-shirt string to Enum; just copy others\n if field.name == 'teeShirtSize':\n setattr(pf, field.name, getattr(\n TeeShirtSize, getattr(prof, field.name)))\n else:\n setattr(pf, field.name, getattr(prof, field.name))\n pf.check_initialized()\n return pf\n\n def _getProfileFromUser (self):\n \"\"\" Return user Profile from datastore,\n creating new one if non-existent.\n Verify a logged-in user (auth) and return an error if not \"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # get the user's Profile based on their user ID\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n\n # create new Profile if one was not retrieved by the above query\n if not profile:\n profile = Profile(\n key=p_key,\n displayName=user.nickname(),\n mainEmail=user.email(),\n teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n\n # return the profile fully populated\n return profile\n\n def _doProfile (self, save_request=None):\n \"\"\" Get user Profile and return to user, possibly updating it first.\n Get user Profile \"\"\"\n prof = self._getProfileFromUser()\n\n \"\"\" if this request is to save an udpated profile, then store the\n user-modifiable data into the existing fields and then save\n the profile back to Datastore \"\"\"\n if save_request:\n for field in ('displayName', 'teeShirtSize'):\n if hasattr(save_request, field):\n val = getattr(save_request, field)\n if val:\n setattr(prof, field, str(val))\n prof.put()\n\n # return ProfileForm\n return self._copyProfileToForm(prof)\n\n @endpoints.method(message_types.VoidMessage, ProfileForm,\n path='profile', http_method='GET', name='getProfile')\n def getProfile (self, request):\n \"\"\" Returns the Profile of the current user. \"\"\"\n return self._doProfile()\n\n @endpoints.method(ProfileMiniForm, ProfileForm,\n path='profile', http_method='POST', name='saveProfile')\n def saveProfile (self, request):\n \"\"\" Updates the Profile of the current user with the data provided\n in the request body. \"\"\"\n return self._doProfile(request)\n\n# - - - Announcements - - - - - - - - - - - - - - - - - - - -\n\n @staticmethod\n def _cacheAnnouncement ():\n \"\"\" Create Announcement & assign to memcache; used by\n memcache cron job & putAnnouncement().\n \"\"\"\n confs = Conference.query(ndb.AND(\n Conference.seatsAvailable <= 5,\n Conference.seatsAvailable > 0)\n ).fetch(projection=[Conference.name])\n\n if confs:\n \"\"\" If there are almost sold out conferences,\n format announcement and set it in memcache \"\"\"\n announcement = ANNOUNCEMENT_TPL % (\n ', '.join(conf.name for conf in confs))\n memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)\n else:\n \"\"\" If there are no sold out conferences,\n delete the memcache announcements entry \"\"\"\n announcement = \"\"\n memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)\n\n return announcement\n\n @staticmethod\n def _setFeaturedSpeaker(self, request):\n \"\"\"\n For the purposes of this project, there will be only one Featured\n Speaker for each conference. The Featured Speaker will be the\n speaker who has the MOST speaking sessions at this conference.\n\n This was a conscious design decision to define Featured Speaker\n more \"robustly\" than the project requirements. I chose to do this\n in order to learn how to create and return more sophisticated\n Message objects (in this case, one that contained a string and\n a list that had to be built up).\n \"\"\"\n\n # Create a key based on the c_key parameter passed in.\n c_key = ndb.Key(urlsafe=request.get('c_key'))\n\n # Limit the projection to minimize data transfer - only need 2 fields.\n qo = ndb.QueryOptions(projection=['speakerKey', 'sessionName'])\n\n # Construct the query - all Sessions for the specified Conference\n q = Session.query(ancestor=c_key)\n\n # Perform the query\n results = q.fetch(options=qo)\n\n # Set up a few variables to hold the processed results\n\n \"\"\" speakerSummary will contain a list of Speaker keys. If a Speaker\n is speaking at more than one Session, their key will appear\n in the list once for every Session they are speaking at. This\n fact will be used to count their \"appearances\" later. \"\"\"\n speakerSummary = []\n\n \"\"\" sessions will contain a list of tuples that each contain the key\n for the Speakera and the name of the Session they are speaking\n at. This list will be used later to extract the session names\n for whichever Speaker is determined to be the Featured Speaker.\"\"\"\n sessions = []\n featuredMessage = {}\n\n # the 'key' element will hold the websafe key for the Speaker object\n featuredMessage['key'] = \"\"\n\n \"\"\" The 'sessionName' list will contain a list of the Session names\n that this Speaker is speaking at \"\"\"\n featuredMessage['sessionName'] = []\n\n \"\"\" To determine the Featured Speaker, an intermediate data object is\n needed as there is no query-based way to do a summarized count\n by Speaker. With traditional SQL, one could use a group-by\n constraint combined with a count() method in the query to retrieve\n these same results very easily. Datastore's \"GQL\" language does\n not support this type of query, so it has to be derived\n programatically. The overall approach is to retrieve all Sessions\n for a particular Conference then iterate over the results and\n build up an interim data structure that has a single \"row\" for\n each Speaker that contains their websafe key and a count of how\n many Sessions they are speaking at. From there, the Speaker\n with the highest count can be easily extracted and the Memcache\n entry defined accordingly. \"\"\"\n\n # Iterate over results and build a set of lists for later processing\n for row in results:\n # Update the two lists if there is a Speaker associated\n if row.speakerKey is not None:\n speakerSummary.append(row.speakerKey)\n sessions.append(row)\n\n \"\"\" The Counter class takes the speakerSummary list and creates a tuple\n for each Speaker. The tuple consists of the Speaker's key as the\n first element and the count of how many times that key appeared\n in the speakerSummary list as the 2nd data element. \"\"\"\n summary = Counter(speakerSummary)\n\n \"\"\" this method is a utility method that returns the count which will\n be the 2nd element of the tuple. The Counter class is responsible\n for creating these tuples. \"\"\"\n def get_count(tuple):\n return tuple[1]\n\n \"\"\" This creates the summary list object by taking the summary object\n that Counter created above and sorting it in reverse order (so\n the Speaker with the MOST entries will be 1st in the list). A\n key/value pair is created where the key is the count and the\n value is the Speaker key associated with that count. \"\"\"\n sortedSummary = sorted(summary.items(), key=get_count, reverse=True)\n\n \"\"\" Make sure there are ANY Speakers for the Conference. If so, grab\n the first one and make them the Featured Speaker. \"\"\"\n if sortedSummary:\n featuredSpeakerKey = sortedSummary[0][0]\n else:\n featuredSpeakerKey = None\n\n \"\"\" If a Featured Speaker was found, build up a Memcache entry that\n contains the Speaker's key and a list of Session names for the\n Sessions they are speaking at. \"\"\"\n if featuredSpeakerKey:\n featuredMessage['key'] = featuredSpeakerKey\n for session in sessions:\n if session.speakerKey == featuredSpeakerKey:\n featuredMessage['sessionName'].append(session.sessionName)\n\n \"\"\" The Memcache key consists of the string constant and the\n websafe key for the Conference this relates to. The value\n of the entry is the featuredMessage object that is built up\n in the code block directly above. \"\"\"\n memcache.set(MEMCACHE_SPEAKER_KEY + c_key.urlsafe(),\n featuredMessage)\n\n # If there was no featured Speaker, clear out any Memcache entry.\n else:\n # No featured speakers in the system, so clear the MemCache\n memcache.delete(MEMCACHE_SPEAKER_KEY + c_key.urlsafe())\n return\n\n @endpoints.method(\n message_types.VoidMessage, StringMessage,\n path='conference/announcement/get', http_method='GET',\n name='getAnnouncement')\n def getAnnouncement (self, request):\n \"\"\" Return any current Announcement from Memcache. If there is\n no Announcement present, return an empty string. \"\"\"\n return StringMessage(data=memcache.get(\n MEMCACHE_ANNOUNCEMENTS_KEY) or \"\")\n\n # - - - Registration - - - - - - - - - - - - - - - - - - - -\n\n @ndb.transactional(xg=True)\n def _conferenceRegistration (self, request, reg=True):\n \"\"\" Register or unregister user for selected Conference. Will throw\n an exception if the specified Conference does not exist. Will\n also throw an exception if the user is trying to register for a\n Conferene they have already registered for or if the Conference\n has no remaining seats available. Will also throw an exception if\n trying to unregister from a Conference that the user is not\n presently registered for. \"\"\"\n retval = None\n\n # get the Profile from the logged-in user\n prof = self._getProfileFromUser()\n\n \"\"\" check if conf exists with provided websafeConfKey and throw\n an exception if the conference is not in Datastore \"\"\"\n wsck = request.websafeConferenceKey\n conf = ndb.Key(urlsafe=wsck).get()\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % wsck)\n\n # start the registration if that's what this request is for\n if reg:\n # check if user already registered otherwise add\n if wsck in prof.conferenceKeysToAttend:\n raise ConflictException(\n \"You have already registered for this conference\")\n\n # check if seats avail and raise exception if none left\n if conf.seatsAvailable <= 0:\n raise ConflictException(\n \"There are no seats available.\")\n\n # register user, take away one seat\n prof.conferenceKeysToAttend.append(wsck)\n conf.seatsAvailable -= 1\n retval = True\n\n else: # not a register request, so must be unregister.\n # First confirm user already registered\n if wsck in prof.conferenceKeysToAttend:\n\n # unregister user, add back one seat\n prof.conferenceKeysToAttend.remove(wsck)\n conf.seatsAvailable += 1\n retval = True\n else:\n retval = False\n\n # write things back to the datastore & return\n prof.put()\n conf.put()\n return BooleanMessage(data=retval)\n\n @endpoints.method(message_types.VoidMessage, ConferenceForms,\n path='conferences/attending',\n http_method='GET', name='getConferencesToAttend')\n def getConferencesToAttend (self, request):\n \"\"\" Return list of Conferences the current user is registered for. \"\"\"\n\n \"\"\" First, get user's profile and then use that to build a query for\n all descendant conferences (which represent the conferences the\n user has registered for) \"\"\"\n prof = self._getProfileFromUser()\n conf_keys = [ndb.Key(urlsafe=wsck)\n for wsck in prof.conferenceKeysToAttend]\n conferences = ndb.get_multi(conf_keys)\n\n # get organizers of the above conferences\n organisers = [ndb.Key(Profile, conf.organizerUserId)\n for conf in conferences]\n profiles = ndb.get_multi(organisers)\n\n # put display names in a dict for easier fetching\n names = {}\n for profile in profiles:\n names[profile.key.id()] = profile.displayName\n\n # return set of ConferenceForm objects per Conference\n return ConferenceForms(items=[self._copyConferenceToForm(\n conf, names[conf.organizerUserId])\n for conf in conferences])\n\n @endpoints.method(CONF_GET_REQUEST, BooleanMessage,\n path='conference/{websafeConferenceKey}',\n http_method='POST', name='registerForConference')\n def registerForConference (self, request):\n \"\"\" Register the current user for the Conference specified in the\n websafeConferenceKey parameter assuming there are still seats\n available for that Conference and the user isn't already registered\n for that Conference (both will throw exceptions). \"\"\"\n return self._conferenceRegistration(request)\n\n @endpoints.method(CONF_GET_REQUEST, BooleanMessage,\n path='conference/{websafeConferenceKey}',\n http_method='DELETE', name='unregisterFromConference')\n def unregisterFromConference (self, request):\n \"\"\" Unregisters the current user from the Conference specified in the\n websafeConferenceKey parameter assuming they are presently\n registered for that Conference (throws exception if the user is\n not presently registered for that Conference). \"\"\"\n return self._conferenceRegistration(request, reg=False)\n\n @endpoints.method(message_types.VoidMessage, ConferenceForms,\n path='filterPlayground',\n http_method='GET', name='filterPlayground')\n def filterPlayground (self, request):\n \"\"\" Filter Playground - a section used for testing various filters\n to validate the result set obtained \"\"\"\n q = Conference.query()\n # field = \"city\"\n # operator = \"=\"\n # value = \"London\"\n # f = ndb.query.FilterNode(field, operator, value)\n # q = q.filter(f)\n q = q.filter(Conference.city == \"London\")\n q = q.filter(Conference.topics == \"Medical Innovations\")\n q = q.filter(Conference.month == 6)\n\n return ConferenceForms(\n items=[self._copyConferenceToForm(conf, \"\") for conf in q]\n )\n\n# register API\napi = endpoints.api_server([ConferenceApi])\n","repo_name":"codingvirtual/fullstack-p4-conference","sub_path":"conference.py","file_name":"conference.py","file_ext":"py","file_size_in_byte":52093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3882896742","text":"from main import db\nfrom main import bcrypt\nfrom flask import Blueprint\n\nfrom models.analyser import Analyser\nfrom models.analysts import Analyst\nfrom models.tests import Test\nfrom models.requests import Request\nfrom models.requests_tests import Request_test\n\n\ndb_commands = Blueprint(\"db\", __name__)\n\n\n@db_commands.cli.command(\"create\")\ndef create_db():\n db.create_all()\n print(\"Tables created\")\n\n\n@db_commands.cli.command(\"drop\")\ndef drop_db():\n db.drop_all()\n print(\"Tables dropped\")\n\n\n@db_commands.cli.command(\"seed\")\ndef seed_db():\n analyser1 = Analyser(\n name = \"ICP1\",\n brand = \"Thermo\",\n model = \"2000\",\n year = \"2012\",\n )\n db.session.add(analyser1)\n\n analyser2 = Analyser(\n name = \"ICP2\",\n brand = \"Thermo\",\n model = \"3000\",\n year = \"2013\"\n )\n db.session.add(analyser2)\n \n admin_user = Analyst(\n email = \"admin@email.com\",\n password = bcrypt.generate_password_hash(\"password123\").decode(\"utf-8\"),\n admin = True\n )\n db.session.add(admin_user)\n\n analyst1 = Analyst(\n email = \"analyst1@email.com\",\n password = bcrypt.generate_password_hash(\"123456\").decode(\"utf-8\"),\n )\n db.session.add(analyst1)\n\n test1 = Test(\n name = \"zinc\"\n )\n db.session.add(test1)\n\n test2 = Test(\n name = \"copper\"\n )\n db.session.add(test2)\n\n test3 = Test(\n name = \"lead\"\n )\n db.session.add(test3)\n\n test4 = Test(\n name = \"mercury\"\n )\n db.session.add(test4)\n\n db.session.commit()\n\n request1 = Request(\n date = \"12/10/2020\",\n status = \"processing\",\n analyst_email = analyst1.email,\n analyser_name = analyser1.name\n\n )\n db.session.add(request1)\n\n request2 = Request(\n date = \"12/11/2020\",\n status = \"finalized\",\n analyst_email = admin_user.email,\n analyser_name = analyser2.name\n )\n db.session.add(request2)\n\n\n request_test1 = Request_test(\n request_id = request1.id,\n test_name = test1.name\n )\n db.session.add(request_test1)\n\n request_test2 = Request_test(\n request_id = request1.id,\n test_name = test2.name\n )\n db.session.add(request_test2)\n\n request_test3 = Request_test(\n request_id = request1.id,\n test_name = test3.name\n )\n db.session.add(request_test3)\n\n request_test4 = Request_test(\n request_id = request2.id,\n test_name = test4.name\n )\n db.session.add(request_test4)\n\n\n db.session.commit()\n print(\"Table seeded\")\n","repo_name":"AndresBo/Laboratory-API","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70208038089","text":"string = input('Введите строку: ')\nletter_set = set()\nalone_list = list()\nletter_list = list()\n\nfor i in list(string):\n if list(string).count(i) > 1:\n letter_set.add(i)\n letter_list = sorted(list(letter_set))\n else:\n alone_list.append(i)\nfrst_part = ''.join(letter_list)\nletter_list.extend(alone_list)\nscnd_part = ''.join(reversed(letter_list))\nresult = frst_part + scnd_part\n\nif result == ''.join(reversed(result)):\n print('Можно сделать палидромом')\nelse:\n print('Нельзя сделать полидромом')","repo_name":"hummius/Python_basic","sub_path":"Module19/10_palindrome_again/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70815765448","text":"\"\"\"empty message\n\nRevision ID: 3ae005e36d6c\nRevises: aa0524bf1470\nCreate Date: 2023-06-22 20:07:08.962928\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3ae005e36d6c'\ndown_revision = 'aa0524bf1470'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Transactions', sa.Column('price', sa.DECIMAL(precision=32, scale=17), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Transactions', 'price')\n # ### end Alembic commands ###\n","repo_name":"Stanislav-3/propheter-ds","sub_path":"models/migrations/versions/3ae005e36d6c_.py","file_name":"3ae005e36d6c_.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42948481014","text":"import threading\nimport RPi.GPIO as GPIO\nimport time\nimport json\nfrom take_picture import take_picture\nfrom urllib import request\nimport requests\n\n\ntry:\n # setup motion sensore\n GPIO.setmode(GPIO.BCM)\n\n # get some params from the API\n try:\n with request.urlopen(\"http://www.scrutoscope.live/api/settings\") as url:\n data = json.loads(url.read().decode())\n except:\n print('error')\n\n Trig = 23\n Echo = 24\n previous = 0\n callApi = 0\n amountPicture = data[0]['amountCapture']\n\n GPIO.setup(Trig, GPIO.OUT)\n GPIO.setup(Echo, GPIO.IN)\n GPIO.output(Trig, False)\n\n # main code loop\n while True:\n # if there is no call to the API we call the API (every 20 loop it calls the API)\n if callApi == 0:\n try:\n with request.urlopen(\"http://www.scrutoscope.live/api/settings/camera/1\") as url:\n data = json.loads(url.read().decode())\n except:\n print('error')\n exit()\n\n minimumDistance = json.loads((data[0]['params']))['distance']\n width = json.loads((data[0]['params']))['width']\n height = json.loads((data[0]['params']))['height']\n pictureType = data[0]['type']['type']\n callApi = 20\n else:\n callApi = callApi - 1\n\n GPIO.output(Trig, True)\n time.sleep(0.00001)\n GPIO.output(Trig, False)\n\n while GPIO.input(Echo) == 0: # send ultrasound\n startImpulse = time.time()\n\n while GPIO.input(Echo) == 1: # return of the echo\n endImpulse = time.time()\n\n distance = int(round((endImpulse - startImpulse) * 340 * 100 / 2, 1)) # calculate distance (cm)\n\n # if the new distance is less than the previous or if the new distance is less than 2 meters it takes a picture\n if distance <= minimumDistance and previous <= minimumDistance:\n # take amount of picture defined by the API\n threads = [threading.Thread(target=take_picture(width, height, pictureType)) for _ in range(amountPicture)]\n [thread.start() for thread in threads]\n [thread.join() for thread in threads]\n\n # send the fact that we take some pictures for statistics\n url = 'http://www.scrutoscope.live/api/Statistics/post'\n data = {\"amount\": amountPicture, \"type\": pictureType}\n req = requests.post(url, json=data)\n\n previous = distance\nfinally:\n GPIO.cleanup()\n","repo_name":"iot-itakademy/raspberry-hard","sub_path":"security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3644086518","text":"'''\n(c) 2011 Thomas Holder, MPI for Developmental Biology\n\nLicense: BSD-2-Clause\n'''\n\nfrom pymol import cmd\n\n\ndef save_settings(filename='~/.pymolrc-settings.py', quiet=1, *, _self=cmd):\n '''\nDESCRIPTION\n\n Dumps all settings with non-default values to ~/.pymolrc-settings.py\n\n Feature Request: Save settings for later use - ID: 1009951\n https://sourceforge.net/tracker/?func=detail&aid=1009951&group_id=4546&atid=354546\n '''\n from pymol.setting import get_name_list\n quiet = int(quiet)\n if not filename.endswith('.py'):\n print('Warning: filename should end with \".py\"')\n # temporatily load default settings and remember them\n _self.reinitialize('store_defaults')\n _self.reinitialize('original_settings')\n original = [(name, _self.get(name)) for name in get_name_list()]\n _self.reinitialize('settings')\n # dump to file\n filename = cmd.exp_path(filename)\n f = open(filename, 'w')\n f.write('# AUTOGENERATED FILE\\n')\n f.write('from pymol import cmd, invocation\\n')\n f.write('if invocation.options.show_splash: ') # no newline\n f.write(' print(\"Loading settings from \" + ' + repr(filename) + ')\\n')\n count = 0\n for name, o_value in original:\n value = _self.get(name)\n if value != o_value:\n f.write('cmd.set(\"%s\", %s)\\n' % (name, repr(value)))\n if not quiet:\n print('set %s, %s # default: %s' % (name, value, o_value))\n count += 1\n f.close()\n if not quiet:\n print('Dumped %d settings to %s' % (count, filename))\n\n\ndef paper_settings(fancy=0, quiet=0, *, _self=cmd):\n '''\nDESCRIPTION\n\n Set rendering quality high and some stuff good for printing:\n\n * Side chain helper (cartoon_side_chain_helper = 1)\n * No shadows (ray_shadows = 0)\n\nARGUMENTS\n\n fancy = 0 or 1: set cartoon_fancy_helices and cartoon_highlight_color\n {default: 0}\n\nNOTES\n\n You may also try \"set ray_trace_mode, 1\"\n '''\n fancy, quiet = int(fancy), int(quiet)\n if fancy == 1:\n _self.set('cartoon_fancy_helices', 1, quiet=quiet)\n _self.set('cartoon_highlight_color', 'grey50', quiet=quiet)\n _self.set('cartoon_side_chain_helper', 1, quiet=quiet)\n _self.set('ray_shadows', 0, quiet=quiet)\n _self.set('opaque_background', 0, quiet=quiet)\n _self.bg_color('white')\n\n\nclass set_temporary(object):\n '''\nDESCRIPTION\n\n API only. Supports the following pattern:\n\n >>> with set_temporary(pdb_retain_ids=1):\n ... cmd.save('out.pdb')\n '''\n\n def __init__(self, *args, _self=cmd, **kwargs):\n self._self = _self\n self.sele = kwargs.pop('selection', '')\n self.args = args + tuple(kwargs.items())\n\n def __enter__(self):\n self.saved = []\n for k, v in self.args:\n v_saved = self._self.get(k, self.sele)\n if v != v_saved:\n self.saved.append((k, v_saved))\n self._self.set(k, v, self.sele)\n return self\n\n def __exit__(self, type, value, traceback):\n for k, v in self.saved:\n self._self.set(k, v, self.sele)\n\n\ncmd.extend('save_settings', save_settings)\ncmd.extend('paper_settings', paper_settings)\n\n# vi:expandtab:smarttab\n","repo_name":"speleo3/pymol-psico","sub_path":"psico/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"16"} +{"seq_id":"11065524823","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, api\nfrom odoo.exceptions import UserError\n\n\nclass TaskStage(models.Model):\n _inherit = 'project.task'\n\n def write(self, values):\n if values.get('stage_id'):\n v_c_user = self.env['res.users'].search([('project_user_type', 'in', ['vendor', 'customer'])])\n if self.env.uid in [val.id for val in v_c_user]:\n a = 0\n else:\n if self.env.uid == self.manager_id.id:\n a = 0\n else:\n if self.env.uid == self.user_id.id:\n a = 0\n else:\n if self.user_has_groups('project.group_project_manager'):\n a = 0\n else:\n if self.user_has_groups('project.group_project_manager'):\n a = 0\n else:\n raise UserError('You have not permission to change task stage!')\n return super(TaskStage, self).write(values)\n","repo_name":"sagarpise/datn","sub_path":"project_permission/models/project_task.py","file_name":"project_task.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21148946879","text":"\"\"\"\nReceives an index of the recipes and a list of (preprocessed) text files.\nWrites the recipes that match each of the data sets to a separate file.\n\"\"\"\nimport sys\nimport random\nrandom.seed(7)\n\n\nf = open(sys.argv[1])\nL = []\nfor line in f:\n fields = line.strip().split()\n L.append((fields[1], int(fields[0])))\nf.close()\n\nL = random.sample(L,len(L))\ntot_recipes = 0\ntrain_files = []\ntest_files = []\ndev_files = []\n\nfor filename,recipes in L:\n tot_recipes += recipes\n if tot_recipes < 60000:\n train_files.append((filename,recipes))\n elif tot_recipes < 67500:\n test_files.append((filename,recipes))\n else:\n dev_files.append((filename,recipes))\n \n\nprint(' '.join([x[0] for x in train_files])+'\\n')\nprint(sum([x[1] for x in train_files]))\nprint(' '.join([x[0] for x in test_files])+'\\n')\nprint(sum([x[1] for x in test_files]))\nprint(' '.join([x[0] for x in dev_files])+'\\n')\nprint(sum([x[1] for x in dev_files]))\n\n\n\"\"\"\ndef non_empty(L):\n \"returns True if L contains a non-empty string\"\n for x in L:\n if x != '':\n return True\n return False\n\nif len(sys.argv) != 3:\n print('Usage: grep_orig_text.py ' + \\\n '')\n sys.exit(-1)\n\nf = open(sys.argv[1])\nindex = {}\nfor line in f:\n fields = line.strip().split()\n index[(fields[0].split('.')[0],int(fields[1]))] = fields[2]\nf.close()\n\nfilenames = sys.argv[2].split(':')\nfor filename in filenames:\n recipe_index = 0\n cur_recipe = []\n f = open(filename)\n f_outs = {}\n f_outs['train'] = open(filename+'.train','w')\n f_outs['test'] = open(filename+'.test','w')\n f_outs['dev'] = open(filename+'.dev','w')\n for line in f:\n line = line.strip()\n if 'END_RECIPE' in line:\n if non_empty(cur_recipe):\n f_outs[index[(filename.split('.')[0],recipe_index)]].write('\\n'.join(cur_recipe)+'\\n')\n cur_recipe = []\n recipe_index += 1\n else:\n cur_recipe.append(line)\n f.close()\n for f_out in f_outs.values():\n f_out.close()\n\n\n \n\n \n\"\"\"\n","repo_name":"zoharai/Lexical-Event-Ordering-with-an-Edge-Factored-Model---improved","sub_path":"grep_orig_text.py","file_name":"grep_orig_text.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4986627850","text":"from typing import Optional\nimport zlib\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom production.solver_interface import ProblemType, Solver, SolverResult, Fail, Pass\nfrom production.cpp_emulator.emulator import Matrix\nfrom production import db\n\n_conn = None\ndef get_shared_conn():\n global _conn\n if _conn is None:\n _conn = db.get_conn()\n return _conn\n\nclass Combiner(Solver):\n def __init__(self, args):\n [self.epoch] = args\n\n def scent(self) -> str:\n return 'Combiner ' + self.epoch\n\n def supports(self, problem_type: ProblemType) -> bool:\n return problem_type == ProblemType.Reassemble\n\n def solve(\n self, name: str,\n src_model: Optional[bytes],\n tgt_model: Optional[bytes]) -> SolverResult:\n conn = get_shared_conn()\n cur = conn.cursor()\n\n assert name.startswith('FR')\n\n traces = {}\n energies = {}\n for part in ['ZD', 'ZA']:\n cur.execute('''\n SELECT\n traces.data, energy\n FROM problems\n JOIN traces\n ON traces.problem_id = problems.id\n WHERE problems.name = %s AND traces.status = 'DONE'\n ORDER BY traces.energy\n LIMIT 1\n ''', [name.replace('FR', part)])\n rows = cur.fetchall()\n if not rows:\n return SolverResult(Pass())\n [[trace, energy]] = rows\n traces[part] = zlib.decompress(trace)\n energies[part] = energy\n\n R = Matrix.parse(src_model).R\n assert traces['ZD'][-1] == 255\n return SolverResult(\n traces['ZD'][:-1] + traces['ZA'],\n dict(expected_energy=energies['ZD'] - 3 * R**3 - 20 + energies['ZA']))\n","repo_name":"Vlad-Shcherbina/icfpc2018-tbd","sub_path":"production/combiner.py","file_name":"combiner.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"19087158812","text":"import machine\n\nfrom .base import BaseApp\n\n\nclass App(BaseApp):\n _dac = None\n\n async def init(self):\n port = int(self._config.get('port'))\n\n self._dac = machine.DAC(\n machine.Pin(port, mode=machine.Pin.OUT)\n )\n\n async def process(self, payload: dict, subtopics: list):\n value = payload.get('value')\n if value is not None:\n value = int(max(0., min(1., float(value))) * 190)\n self._dac.write(value)\n print(\"DAC: Value {} was written.\".format(value))\n","repo_name":"thejoeejoee/fis-esp-firmware","sub_path":"fis/apps/dac.py","file_name":"dac.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"16655487581","text":"from wordle import Wordle\nfrom wordle_solver import WordleSolver\n\nfrom nltk.corpus import words\nfrom random import randint\n\nif __name__ == \"__main__\":\n\n all_words = words.words()\n all_words = list(filter(lambda x:len(x) == 5, all_words))\n\n TOTAL_PLAYS = 1000\n num_wins = 0\n for i in range(TOTAL_PLAYS):\n\n rand_idx = randint(0, len(all_words) - 1)\n word = all_words[rand_idx]\n\n game = WordleSolver(word)\n\n while not game.wordle.game_over:\n guess = game.random_prediction()\n \n if game.wordle.win:\n num_wins += 1\n\n print(i)\n \n accuracy = (num_wins / TOTAL_PLAYS) * 100\n print(accuracy)\n \n\n\n","repo_name":"chenalan02/WordleSolver","sub_path":"get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27111974885","text":"from rest_framework import serializers, viewsets, status, response\nfrom rest_framework.decorators import list_route, parser_classes\nfrom rest_framework.parsers import MultiPartParser, FormParser\n\nfrom visualiser import models, data\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n labels = serializers.SerializerMethodField()\n text = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Image\n fields = ('id', 'url', 'last_modified', 'labels', 'text')\n\n def get_labels(self, obj):\n labels = obj.get_labels()\n labels = [{'description': label.description, 'score': label.score,\n 'mid': label.mid, 'topicality': label.topicality} for\n label in labels]\n return sorted(labels, key=lambda x: x.get('score', 0), reverse=True)\n\n def get_text(self, obj):\n texts = obj.get_text()\n texts = [ind_text for text in texts for ind_text in\n text.text.split('\\n') if ind_text]\n return texts\n\n\nclass ImageView(viewsets.ViewSet):\n\n def queryset(self):\n return models.Image.objects.all()\n\n def get_serializer_class(self):\n if self.action in ['list', 'retrieve']:\n return ImageSerializer\n\n @list_route(methods=['post'])\n @parser_classes((FormParser, MultiPartParser,))\n def upload(self, request):\n upload = request.FILES.get('file')\n image_url = request.data.get('image_url')\n if upload:\n image_data = data.create_update_image(upload)\n elif image_url:\n image_data = data.create_update_image(image_url=image_url)\n else:\n image_data = []\n return response.Response(image_data, status=status.HTTP_200_OK)\n\n def retrieve(self, request, pk=None):\n instance = models.Image.objects.get(pk=pk)\n serializer = ImageSerializer(instance)\n return response.Response(serializer.data)\n\n def list(self, request):\n queryset = models.Image.objects.order_by('-last_modified')\n serializer = ImageSerializer(queryset, many=True)\n return response.Response(serializer.data)\n","repo_name":"tasnim07/image_visualiser","sub_path":"visualiser/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73667811207","text":"#!/usr/bin/env python\n\"\"\"\nCreate a conda-pip 'copip' development overlay.\n\nUsage:\n mkcopip env_name\n\"\"\"\n\n# Stdlib imports\n\nimport os\nimport sys\nimport typing as T\n\nfrom pathlib import Path\nfrom subprocess import check_output as sh\n\n# Config global constants\nCONDA_BASE = Path(sh(['conda', 'info', '--base']).decode().strip())/'envs'\nCOPIP_DIR = Path(__file__).parent\nCOPIP_ON = Path('copipon.sh')\nCOPIP_OFF = Path('copipoff.sh')\n\n\n# Function definitions\ndef main(args: T.Optional[list]=None) -> int:\n if args is None:\n args = sys.argv[1:]\n\n try:\n ename = args[0]\n except IndexError:\n print(__doc__, file=sys.stderr)\n return 64\n\n # Create directories for holding installed files and env. config\n copip_dir = CONDA_BASE/ename/'copip'\n acti_dir = CONDA_BASE/ename/'etc/conda/activate.d'\n deac_dir = CONDA_BASE/ename/'etc/conda/deactivate.d'\n\n if not (CONDA_BASE/ename).is_dir():\n print(f\"Environment {ename} doesn't exist, exiting.\", file=sys.stderr)\n return 64\n\n for d in [copip_dir, acti_dir, deac_dir]:\n d.mkdir(parents=True, exist_ok=True)\n\n # Symlink env. config scripts inside conda activ/deact directories\n for script, cdir in [(COPIP_ON, acti_dir), (COPIP_OFF, deac_dir)]:\n dest = cdir/script\n if not dest.is_file():\n os.link(COPIP_DIR/script, dest)\n\n print(f\"Environment dev overlay `{ename}` ready at `{copip_dir}`\")\n\n return 0\n\n\n# Unit tests\ndef test_no_args():\n assert main([]) == 64\n\n\ndef test_noenv():\n assert main(['__BADENV_NAME_zyxw__']) == 64\n\n\ndef test_normal():\n import functools\n import subprocess\n\n sh = functools.partial(subprocess.run, shell=True, check=True)\n\n ename = '__tmp_copip_env__'\n copip = CONDA_BASE/ename\n sh(f\"conda create -n {ename} --yes\")\n try:\n assert main([ename]) == 0\n assert copip.is_dir()\n\n for script, cdir in [(COPIP_ON, 'activate.d'),\n (COPIP_OFF, 'deactivate.d')]:\n src = CONDA_BASE/ename/'etc/conda'/cdir/script\n assert src.is_file()\n assert src.samefile(COPIP_DIR/script)\n finally:\n sh(f\"conda remove -n {ename} --all --yes\")\n\n\n# Main entry point\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"fperez/copip","sub_path":"copip.py","file_name":"copip.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"16"} +{"seq_id":"21860107558","text":"from collections import deque\r\n\r\nmaze = deque([\r\n input() for _ in range(8)\r\n])\r\n\r\ndef in_range(x, y):\r\n return 0 <= x < 8 and 0 <= y < 8\r\n\r\ndef can_go(x, y):\r\n if not in_range(x, y):\r\n return False\r\n \r\n if maze[x][y] == '#':\r\n return False\r\n \r\n return True\r\n\r\ndef bfs(x: int, y: int):\r\n queue = deque([(x, y)])\r\n \r\n wall_down = 0\r\n dxs, dys = [-1, -1, -1, 0, 1, 1, 1, 0, 0], [-1, 0, 1, 1, 1, 0, -1, -1, 0]\r\n while queue:\r\n length_queue = len(queue)\r\n \r\n for _ in range(length_queue):\r\n cur_x, cur_y = queue.popleft()\r\n \r\n # 벽이 다 내려오거나, 그 전에 x가 0에 도달했을 땐 방해물이 없음\r\n if wall_down == 8 or cur_x == 0:\r\n return True\r\n \r\n # 내려오고 나서 현재 위치가 벽이면 다음 좌표 탐색\r\n if maze[cur_x][cur_y] == '#':\r\n continue\r\n \r\n for dx, dy in zip(dxs, dys):\r\n nx, ny = cur_x + dx, cur_y + dy\r\n \r\n if can_go(nx, ny):\r\n queue.append((nx, ny))\r\n \r\n # 벽이 내려옴\r\n maze.pop()\r\n maze.appendleft('........')\r\n wall_down += 1\r\n \r\n return False\r\n\r\nresult = bfs(7, 0)\r\n\r\nif result:\r\n print(1)\r\n\r\nelse:\r\n print(0)\r\n ","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/16954. 움직이는 미로 탈출/움직이는 미로 탈출.py","file_name":"움직이는 미로 탈출.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11017381897","text":"#Um programa que receba numeros e some usando while, ate que o usuario deseje parar apertando 999\n\ncont = 0\nsoma = 0\nnum = 0\nnum = int(input(\"Digite um numero: (999 para sair): \"))\nwhile num != 999:\n cont += 1\n soma += num\n num = int(input(\"Digite um numero: (999 para sair): \"))\nprint(f\"A soma dos {cont} numeros digitados e de {soma}\")\n","repo_name":"BarbaraSaporito/CursoEmVideoPython.","sub_path":"soma_numeros_while.py","file_name":"soma_numeros_while.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11002552021","text":"ins = [int(j) for j in input().split()]\r\n#taking inputs\r\nn = ins[0]\r\nk = ins[1]\r\nb = [int(j) for j in input().split()]\r\nanswer = 0\r\npage = 0\r\n#checking for each value of chapter\r\nfor chapter in b:\r\n page += 1\r\n for p in range(1, chapter+1):\r\n if p == page:\r\n answer += 1\r\n if p%k == 0 and p != chapter:\r\n page += 1\r\n#printing answer\r\nprint (answer)","repo_name":"darpanpal7/weeklycode","sub_path":"WEEKLYCODE#4/lisa_workbook/Python/lisa_workbook .py","file_name":"lisa_workbook .py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29588542119","text":"\nimport matplotlib.pyplot as pp\nimport numpy as np\n\nMODEL_G = 9.81\nMODEL_DT = 0.001\nMODEL_DM = 0.1 #Изменение массы за единицу времени\nMODEL_U = 30 #Скорость газа относительно ракеты\n\nclass Body:\n def __init__(self, x, y, vx, vy):\n\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n self.trajectory_x = []\n self.trajectory_y = []\n\n def advance(self):\n\n self.trajectory_x.append(self.x)\n self.trajectory_y.append(self.y)\n\n self.x += self.vx * MODEL_DT\n self.y += self.vy * MODEL_DT\n self.vy -= MODEL_G * MODEL_DT\n\nclass Rocket(Body):\n def __init__(self, x, y, m): #Добавяется масса ракеты (вместе с топливом)\n super().__init__(x, y, 10, 10)\n self.m = m\n \n def advance(self):\n super().advance()\n if self.m >= 30: #Масса корпуса ракеты без топлива\n self.m -= MODEL_DM\n v = (self.vx ** 2 + self.vy ** 2) ** 0.5\n dv = MODEL_U * MODEL_DM / self.m \n self.vx += dv * self.vx / v \n self.vy += dv * self.vy / v\n\nb = Body(0, 0, 10, 10)\nr = Rocket(0, 0, 50)\n\nbodies = [b, r]\n\nfor t in np.r_[0:2:MODEL_DT]:\n for b in bodies:\n b.advance()\n\nfor b in bodies:\n pp.plot(b.trajectory_x, b.trajectory_y) \npp.show()","repo_name":"Zpryakhin/bek_mek","sub_path":"rocket.py","file_name":"rocket.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1116707770","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time : 2021/2/26 6:54 下午\n@Author : mc\n@File : solution.py\n@Software: PyCharm\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode):\n \"\"\"\n 递归实现\n :param root:\n :return:\n \"\"\"\n res = []\n\n def preorder(root, res):\n if not root:\n return\n res.append(root.val)\n if root.left:\n preorder(root.left, res)\n if root.right:\n preorder(root.right, res)\n\n preorder(root, res)\n return res\n\n def preorderTraversal(self, root: TreeNode):\n \"\"\"\n 非递归实现\n :param root:\n :return:\n \"\"\"\n res = []\n stack = []\n while stack or root:\n while root:\n res.append(root.val)\n stack.append(root)\n root = root.left\n root = stack.pop()\n root = root.right\n return res\n","repo_name":"BillyChao/leetcode","sub_path":"144-二叉树的前序遍历/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35935416887","text":"# Valid commands\nADD = \"ADD-COURSE-OFFERING\"\nREGISTER = \"REGISTER\"\nALLOT = \"ALLOT\"\nCANCEL = \"CANCEL\"\n\n# data types of arguments different commands support\nCOMMANDS_METADATA = {\n ADD: (str, str, str, int, int),\n REGISTER: (str, str),\n ALLOT: (str,),\n CANCEL: (str,)\n}\n\n# commands and expected parameters\nSUPPORTED_COMMANDS = (\n \"ADD-COURSE-OFFERING \"\n \" \\n\"\n \"REGISTER \\n\"\n \"ALLOT-COURSE \\n\"\n \"CANCEL \\n\"\n)\n\n# arguments supported while starting the app from command line\nSUPPORTED_APP_ARGS = (\"module name\", \"file path\")\n","repo_name":"SectumPsempra/command-line-app","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70800893128","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport datetime\nfrom datetime import timedelta\nimport utils\n\n\ndef calculate_watwin(main_table, subject_id):\n subject_events = main_table.loc[main_table[\"SubjectID\"] == subject_id]\n\n subject_events.sort_values(by=['Order'])\n compiles = subject_events[subject_events[\"EventType\"] == \"Compile\"]\n compile_errors = subject_events[subject_events[\"EventType\"] == \"Compile.Error\"]\n\n if len(compiles) <= 1:\n return None\n\n # time estimation: time_diff of (e_i,e_i+1) is written on e_i\n # calculate time estimation, mean and std for each subject:\n time_arr = {}\n mean_dict = {}\n std_dict = {}\n\n for subj in range(len(compiles) - 1):\n # Only look at consecutive compiles within a single assignment/problem/session\n # Before starting the algorithm:\n # Watson (2013) requires deletion fixes and commented fixes, we assume dataset have done this\n changed_segments = False\n for segment_id in [\"SessionID\", \"ProblemID\", \"AssignmentID\"]:\n if segment_id not in compiles:\n continue\n if compiles[segment_id].iloc[subj] != compiles[segment_id].iloc[subj + 1]:\n changed_segments = True\n break\n if changed_segments:\n continue\n\n sum_time = 0\n count_time = 0\n\n if len(compiles) > 1:\n time_arr[subj] = {}\n for i in range(len(compiles) - 1):\n # remove the identical pairs of events by their CodeStateID\n if compiles[\"CodeStateID\"].iloc[i + 1] != compiles[\"CodeStateID\"].iloc[i]:\n # Get all compile errors associated with compile events e1 and e2\n e1_errors = compile_errors[compile_errors[\"ParentEventID\"] == compiles[\"EventID\"].iloc[i]]\n e2_errors = compile_errors[compile_errors[\"ParentEventID\"] == compiles[\"EventID\"].iloc[i + 1]]\n # If e1 compile resulted in error\n if len(e1_errors) > 0:\n datetimeformat = '%Y-%m-%dT%H:%M:%S'\n date1 = datetime.datetime.strptime(compiles[\"ServerTimestamp\"].iloc[i + 1], datetimeformat)\n date2 = datetime.datetime.strptime(compiles[\"ServerTimestamp\"].iloc[i], datetimeformat)\n time_diff = ((((date1.month - date2.month) * 30 + (date1.day - date2.day)) * 24 + (\n date1.hour - date2.hour)) * 60 + (date1.minute - date2.minute)) * 60 + (\n date1.second - date2.second)\n sum_time += time_diff\n count_time = count_time + 1\n time_arr[subj][compiles[\"CodeStateID\"].iloc[i]] = time_diff\n\n if count_time != 0:\n mean_time = sum_time / count_time\n mean_dict[subj] = mean_time\n std_time = np.std(np.asarray(list(time_arr[subj].values())))\n std_dict[subj] = std_time\n\n # add TimeEst, TimeMean, and TimeStd to compiles dataframe\n compiles[\"TimeEst\"] = [time_arr[compiles[\"SubjectID\"][i]][compiles[\"CodeStateID\"][i]] if compiles[\"SubjectID\"][i] in time_arr.keys() and compiles[\"CodeStateID\"][i] in time_arr[compiles[\"SubjectID\"][i]].keys() else -1 for i in range(len(compiles))]\n compiles[\"TimeMean\"] = [mean_dict[i] if i in mean_dict.keys() else 0 for i in compiles[\"SubjectID\"]]\n compiles[\"TimeStd\"] = [std_dict[i] if i in std_dict.keys() else 0 for i in compiles[\"SubjectID\"]]\n\n # begin calculate WatWin scores:\n score = 0\n pair_count = 0\n\n for i in range(len(compiles) - 1):\n # Only look at consecutive compiles within a single assignment/problem/session\n changed_segments = False\n for segment_id in [\"SessionID\", \"ProblemID\", \"AssignmentID\"]:\n if segment_id not in compiles:\n continue\n if compiles[segment_id].iloc[i] != compiles[segment_id].iloc[i + 1]:\n changed_segments = True\n break\n if changed_segments:\n continue\n\n pair_count += 1\n\n # remove identical pairs\n if compiles[\"CodeStateID\"].iloc[i] != compiles[\"CodeStateID\"].iloc[i + 1]:\n # Get all compile errors associated with compile events e1 and e2\n e1_errors = compile_errors[compile_errors[\"ParentEventID\"] == compiles[\"EventID\"].iloc[i]]\n e2_errors = compile_errors[compile_errors[\"ParentEventID\"] == compiles[\"EventID\"].iloc[i + 1]]\n # if former event has error\n if len(e1_errors) > 0:\n # if later event has error\n if len(e2_errors) > 0:\n # Get the set of errors shared by both compiles\n shared_errors = set(e1_errors[\"CompileMessageType\"]).intersection(set(e2_errors[\"CompileMessageType\"]))\n # if same full message\n if e1_errors[\"ProgramErrorOutput\"] == e2_errors[\"ProgramErrorOutput\"]:\n score += 4\n # if same error type\n if len(shared_errors) > 0:\n score += 4\n\n # TODO: Watson (2013) requires for error line number of compiled code\n # if same line\n if err_df[\"SourceLocation\"].iloc[i].split(':')[1] == err_df[\"SourceLocation\"].iloc[i + 1].split(':')[1]:\n score += 2\n\n # if time < M - 1SD\n if compiles[\"TimeEst\"].iloc[i] < (\n compiles[\"TimeMean\"].iloc[i] - compiles[\"TimeStd\"].iloc[i]):\n score += 1\n else:\n # if time > M - 1SD\n if compiles[\"TimeEst\"].iloc[i] > (\n compiles[\"TimeMean\"].iloc[i] - compiles[\"TimeStd\"].iloc[i]):\n score += 25\n else:\n score += 15\n # if later event does not have error\n else:\n # if time < M - 1SD\n if compiles[\"TimeEst\"].iloc[i] < (\n compiles[\"TimeMean\"].iloc[i] - compiles[\"TimeStd\"].iloc[i]):\n score += 1\n else:\n # if time > M - 1SD\n if compiles[\"TimeEst\"].iloc[i] > (\n compiles[\"TimeMean\"].iloc[i] - compiles[\"TimeStd\"].iloc[i]):\n score += 25\n else:\n score += 15\n\n if pair_count == 0:\n return None\n\n watwin = (score / 35.) / (len(compiles) - 1.)\n return watwin\n\n\nif __name__ == \"__main__\":\n read_path = \"./data\"\n write_path = \"./out/WatWin.csv\"\n\n if len(sys.argv) > 1:\n read_path = sys.argv[1]\n if len(sys.argv) > 2:\n write_path = sys.argv[2]\n\n main_table_df = pd.read_csv(os.path.join(read_path, \"MainTable.csv\"))\n watwin_map = utils.calculate_metric_map(main_table_df, calculate_watwin)\n print(watwin_map)\n utils.write_metric_map(\"WatWinScore\", watwin_map, write_path)\n","repo_name":"thomaswp/ProgSnap2Analysis","sub_path":"dev/watwin.py","file_name":"watwin.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"14263690030","text":"#This the code for the machine learning model that is built form tensorflow and the higher level api tftlearn because I am not very good a machine learning yet. Also, pardon my grammar. \n\n#For this to work properly and to see the model being trained, you need to use pip to install tensorflow, tflearn, and nltk\nimport json\nimport random\nimport tensorflow\nimport tflearn\nimport numpy\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nimport os \n\n#I am using the Lancaster method of stemming words, which is the process of finding the root of the word \nstemmer = LancasterStemmer()\nnltk.download('punkt')\n\n#Function to train the model which will be called in the Main python file \ndef TrainModel():\n\n #Using a json file because of the easy formatting of tags, patterns and responses \n with open('intents.json') as file:\n data = json.load(file)\n\n #Initializing, allWords will be a list of all the indivual unique words \n #tags will be a list of all the tags in the json file\n #allPatterns will be a list containing lists of all the patterns tokenized \n #tagsForEachPattern will corrospond a tag for all the patterns in allPatterns \n allWords = []\n tags = []\n allPatterns = []\n tagsForEachPattern = []\n\n #This for loop will populate the 4 previous lists\n for intent in data['intents']:\n \n #Loops through each pattern in the Json file invidualy \n for pattern in intent['patterns']:\n #Each pattern is tokenized, or in other words, divides a string into a list of word substrings \n wordList = nltk.word_tokenize(pattern)\n #Using the extend method, wordList is iterated over to add each element into allWords \n allWords.extend(wordList)\n #The entire tokenized pattern list is added to allPatterns as a single element \n allPatterns.append(wordList)\n #Gets the tag for the pattern and adds it \n tagsForEachPattern.append(intent['tag'])\n\n #this creates a list of tags where each tag is unique \n if intent['tag'] not in tags:\n tags.append(intent['tag'])\n\n #The allWords list is turned lowercase and stemmed removing the questions marks. Then its sorted into a list of unique words \n allWords = [stemmer.stem(w.lower()) for w in allWords if w != '?']\n allWords = sorted(list(set(allWords)))\n\n #These two list are going to be used to train our model \n #The training list is all the data the model is going to use for its predictions\n #The output list will get the tag that the model should predict for each pattern \n training = []\n output = []\n\n #A list of 0 the spans the length of the tags list which will be used during our Bag of Words process \n outEmpty = [0 for i in range(len(tags))]\n\n #This for loop is runnnig the Bag of Words process because a model needs to trained using numbers and not strings\n for index, pattern in enumerate(allPatterns):\n #This bag list will become the length of allWords and will be made up of 0s and 1s\n #If a word in the pattern is in allWords then a 1 is appended, else a 0 is appened \n bag = []\n\n #This created a list of stemmed words from the pattern to compare to the stemmed words in allWords \n wordList = [stemmer.stem(w.lower()) for w in pattern]\n for word in allWords:\n if word in wordList:\n bag.append(1)\n else:\n bag.append(0)\n \n #outputRow is the length of tags with all 0s except for one 1. This 1 is what tag relates to the pattern of the bag list\n outputRow = outEmpty[:]\n outputRow[tags.index(tagsForEachPattern[index])] = 1\n\n #all the data is added to the training and output lists \n training.append(bag)\n output.append(outputRow)\n \n #Setting up tensorflow and the neural networks\n tensorflow.compat.v1.reset_default_graph()\n\n #Brings the data into the system for processing \n net = tflearn.input_data(shape=[None, len(training[0])])\n #Creates 2 more layers each with 10 \"neurons\"\n net = tflearn.fully_connected(net, 10)\n net = tflearn.fully_connected(net, 10)\n #This creates the output layer after data is processed by the previous layers \n net = tflearn.fully_connected(net, len(output[0]), activation='softmax')\n #This uses regressions to predict the tag \"or the numerical value of the tag\" by using previous data \n net = tflearn.regression(net)\n\n # This now trains the model using DNN \"Deep Neural Network\" and saves the model to a file\n \n #However, I already trained the model, so it wont train it again. But if you want to see it train the model, just change the if statement file name. \n model = tflearn.DNN(net)\n if os.path.exists('model.tflearn' + '.meta'):\n model.load('model.tflearn')\n else:\n model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)\n model.save('model.tflearn')\n\n return model, allWords, tags, data ","repo_name":"elyskrie21/Eliza","sub_path":"TrainModel.py","file_name":"TrainModel.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6784040124","text":"from docx import *\n\"\"\"\nhttps://github.com/python-openxml/python-docx/pull/716/commits/c09da22afaebaed2f0a3139de6ba46c8824f179e\n\ndocx/__init__.py\n31 + PartFactory.part_type_for[CT.WML_DOCUMENT_MACRO_ENABLED_MAIN] = DocumentPart\ndocx/api.py\n - if document_part.content_type != CT.WML_DOCUMENT_MAIN:\n26 + if (document_part.content_type != CT.WML_DOCUMENT_MAIN) and (document_part.content_type != CT.WML_DOCUMENT_MACRO_ENABLED_MAIN):\n\ndocx/opc/constants.py\n283 + WML_DOCUMENT_MACRO_ENABLED_MAIN = (\n284 + 'application/vnd.ms-word.document.macroEnabled.main+xml'\n285 + )\n\"\"\"\n\ndocument = Document('./work_docs/doc_with_macros.docm')\n\nfor t in document.tables:\n for row in t.rows:\n for cell in row.cells:\n for p in cell.paragraphs:\n for run in p.runs:\n if run.font.highlight_color is None:\n run.font.hidden = True\n\nfor p in document.paragraphs:\n for run in p.runs:\n\n if run.font.highlight_color is None:\n run.font.hidden = True\n\ndocument.save('./work_docs/done/doc_with_macros.docm')","repo_name":"mursigkeit22/Document_processing","sub_path":"_7_change_pydocx_for_docm.py","file_name":"_7_change_pydocx_for_docm.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12856922443","text":"from adapt.intent import IntentBuilder\nfrom adapt.engine import IntentDeterminationEngine\n\nengine = IntentDeterminationEngine()\n\nweather_keyword = [\n \"weather\"\n]\n\nfor wk in weather_keyword:\n engine.register_entity(wk, \"WeatherKeyword\")\n\nweather_types = [\n \"snow\",\n \"rain\",\n \"wind\",\n \"sleet\",\n \"sun\"\n]\n\nfor wt in weather_types:\n engine.register_entity(wt, \"WeatherType\")\n\nlocations = [\n \"Seattle\",\n \"San Francisco\",\n \"Tokyo\",\n \"Vancouver\"\n]\n\nfor loc in locations:\n engine.register_entity(loc, \"Location\")\n\nweather_intent = IntentBuilder(\"WeatherIntent\")\\\n .require(\"WeatherKeyword\")\\\n .optionally(\"WeatherType\")\\\n .require(\"Location\")\\\n .build()\n\nengine.register_intent_parser(weather_intent)\n\n\ndef get_intent(message):\n for intent in engine.determine_intent(message):\n if intent.get('confidence') > 0:\n return intent\n","repo_name":"Sudo-Kid/django_adatp_demo","sub_path":"home/adapt.py","file_name":"adapt.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12279212477","text":"from tf_donkey import Model\n\ndef VideoDrive(ckpt, video_path, num_bins=15, in_shape=(120,160,3),\n car_ip=\"192.168.43.56\", port=5555, pwm_min_max=(-0.5, 0.5), steering_range_deg=40):\n\n classes = [x for x in range(num_bins)]\n car_brain = Model(\"test_name\", in_shape=in_shape, classes=classes)\n car_brain.VideoDrive(ckpt, video_path, car_ip)\n\ndef main():\n import argparse as argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, required=True)\n parser.add_argument('--ckpt-path', type=str, required=True)\n parser.add_argument('--video-path', type=str, required=True)\n parser.add_argument('--num-bins', type=int, required=True)\n parser.add_argument('--vid-w', type=int, required=False, default=160)\n parser.add_argument('--vid-h', type=int, required=False, default=120)\n parser.add_argument('--vid-chan', type=int, required=False, default=3)\n parser.add_argument('--car-ip', type=str, required=True)\n parser.add_argument('--port', type=float, required=True)\n parser.add_argument('--steering-range-deg', type=float, required=False, default=40)\n parser.add_argument('--pwm-min', type=float, required=False, default=-0.5)\n parser.add_argument('--pwm-max', type=float, required=False, default=0.5)\n\n args = parser.parse_args()\n mode = args.mode\n ckpt = args.ckpt_path\n #ckpt = \"ep_19-step_161-loss_0.944.ckpt\"\n video_path = args.video_path\n #video_path = \"/home/jp/Documents/FYP/ml/data/videoplayback.mp4\"\n num_bins = args.num_bins\n in_shape = [args.vid_h, args.vid_w, args.vid_chan]\n car_ip = args.car_ip\n port = args.port\n pwm_min_max = (args.pwm_min, args.pwm_max)\n steering_range_deg = args.steering_range_deg\n\n print(\"mode: {}\".format(args.mode))\n if mode == \"video-drive\":\n# print(f\"ckpt: {ckpt}\")\n# print(f\"video_path: {video_path}\")\n# print(f\"num_bins: {num_bins}\")\n# print(f\"in_shape: {in_shape}\")\n# print(f\"car_ip: {car_ip}\")\n# print(f\"port: {port}\")\n# print(f\"pwm_min_max: {pwm_min_max}\")\n# print(f\"steering_range_deg: {steering_range_deg}\")\n VideoDrive(ckpt, video_path, num_bins, in_shape, car_ip, port, pwm_min_max, steering_range_deg)\n else:\n print(f\"mode: '{mode}' is not valid :-(\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tall-josh/fyp_diy_robo_car","sub_path":"end2end/video_infer.py","file_name":"video_infer.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"71832248327","text":"#question-1 (Area And Circumference)\nclass circle:\n def __init__(self, r):\n self.radius = r\n def getArea(self):\n return(3.14*self.radius*self.radius)\n def getCircumference(self):\n return(2*3.14*self.radius)\nr = int(input(\"enter the radius of circle\"))\nc = circle(r)\nprint(\"the area of circle is \",c.getArea())\nprint(\"the circumference of circle is \",c.getCircumference())\n\n\n\n#question-2\nclass student:\n def __init__(self):\n self.name = (input(\"enter your name \"))\n self.rollno = int(input(\"enter rollno \"))\n def setAge(self):\n self.age = int(input(\"enter your age \"))\n def setMarks(self):\n self.marks = int(input(\"enter the marks \"))\n def display(self):\n print(\"name:\",self.name,\"\\n\",\"rollno:\",self.rollno,\"\\n\",\"age:\",self.age,\"\\n\",\"marks:\",self.marks)\ns = student()\ns.setAge()\ns.setMarks()\ns.display()\n\n\n\n\n#question-3\nclass temperature:\n def convertFahrenheit(self):\n self.c = int(input(\"enter temperature in celsius \"))\n return((9/5)*self.c+32)\n def convertCelsius(self):\n self.f = int(input(\"enter temperature in Fahrenheit \"))\n return(((self.f-32)*5)/9)\nt = temperature()\nprint(t.convertFahrenheit())\nprint(t.convertCelsius())\n\n\n\n\n#question-4\nclass Movie:\n def __init__(self):\n self.artistname = input(\"enter artist name :\")\n self.year_of_release = input(\"enter year :\")\n self.rating = int(input(\"enter ratings out of 10 :\"))\n def add(self):\n self.movie_name = input(\"enter the movie name :\")\n self.collection = int(input(\"enter total collection :\"))\n def display(self):\n print(self.movie_name)\n print(self.artistname)\n print(self.year_of_release)\n print(self.rating)\n print(self.collection)\nm = Movie()\nm.add()\nm.display()\n\n\n\n\n#question-5\nclass animal:\n def animal_attribute(self):\n return(\"print tiger.\")\nclass Tiger(animal):\n pass\nt = Tiger()\nprint(t.animal_attribute())\n\n\n\"\"\"\n#question-6\noutput will be:\nA B\nA B\n\"\"\"\n\n\n\n#question-7\nclass shape:\n def __init__(self, l, b):\n self.length = l\n self.breadth = b\n def area(self):\n return(self.length*self.breadth)\nclass rectangle(shape):\n pass\nclass square(shape):\n pass\nl = int(input(\"enter the length \"))\nb = int(input(\"enter the breadth \"))\nr = rectangle(l, b)\ns = square(l, b)\nprint(\"area of rectangle \", r.area())\nprint(\"area of square \", s.area())","repo_name":"yashasvi128/Assignments","sub_path":"classes_&_obj2/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73648359047","text":"# Python练习题问题如下:\n\n# 简述:要求输入某年某月某日\n# 提问:求判断输入日期是当年中的第几天?\n\n# Python解题思路分析:\n# 我们就以3月5日这一天为例。首先把前两个月的加起来,然后再加上5天即本年的第几天。这里有一种特殊的情况,就是闰月,遇到这种情况且输入月份大于2时需考虑多加一天。如果不是很明白,可以看下边的python源码。\n\nimport calendar\n\nyear = 2015\nmonth = 6\nday = 7\ndays = 0\n\nfor i in range(1, month):\n monthDay = 0\n \n if i == 2:\n if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):\n monthDay = 29\n days += monthDay\n else:\n monthDay = 28\n days += monthDay\n else:\n monthDay = calendar.monthrange(year, i)[1]\n days += monthDay\n\n print('{} 月: {}'.format(i, monthDay))\n\nprint('{} 月: {}'.format(month, day))\ndays += day\n\nprint('Days: {}'.format(days))","repo_name":"FlyMaple/iplaypy-100","sub_path":"python_project/004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37095065125","text":"from xml.dom import minidom #Unfortunately required as ElementTree won't pretty format xml\nimport xml.etree.ElementTree as ET #for parsing XML\nimport base64\nimport struct\nimport numpy as np\nimport re\nimport collections\nimport decimal as DC\n\nclass AnasysElement(object):\n# class AnasysElement(collections.abc.Mapping):\n \"\"\"Blank object for storing xml data\"\"\"\n def __init__(self, parent_obj=None, etree=None):\n self._parent_obj = parent_obj\n self._attributes = [] #list of dicts of tags:attributes, where applicable\n if not hasattr(self, '_iterable_write'):\n self._iterable_write = {} #just in case\n if not hasattr(self, '_special_write'):\n self._special_write = {} #just in case\n if not hasattr(self, '_special_read'):\n self._special_read = {} #just in case\n if not hasattr(self, '_skip_on_write'):\n self._skip_on_write = [] #just in case\n if etree is not None:\n self._etree_to_anasys(etree) #really just parses the hell outta this tree\n\n def __dir__(self):\n \"\"\"Returns a list of user-accessible attributes\"\"\"\n vars_and_funcs = [x for x in object.__dir__(self) if x[0]!='_']\n return vars_and_funcs\n\n def __getitem__(self, key):\n \"\"\"Class attributes can be called by subscription, e.g. Foo['bar']\"\"\"\n items = dir(self)\n if key in items:\n return getattr(self, key)\n else:\n raise KeyError\n\n def __iter__(self):\n \"\"\"Makes object iterable. Returns all user-accessible, non-method, attributes\"\"\"\n for obj in dir(self):\n if not callable(self[obj]):\n yield self[obj]\n\n def _get_iterator(self, obj):\n \"\"\"For use with _anasys_to_etree. Returns a dict to iterate over, or None\"\"\"\n #If obj is a dict, return its items\n if type(obj) == dict:\n return obj#.items()\n #If obj is derived from AnasysElement, return its user-accessible attributes that aren't in _skip_on_write\n elif isinstance(obj, AnasysElement):\n return {k: obj[k] for k in obj.__dict__.keys() if k[0] != '_' and k not in obj._skip_on_write}\n #If it's something else, return None. _anasys_to_etree will test for this condition\n else:\n return None\n\n def _object_to_text(self, obj):\n \"\"\"Takes an object, returns it to text to append to an etree object\"\"\"\n if isinstance(obj, np.ndarray):\n return self._encode_bs64(obj)\n else:\n return str(obj)\n\n def _anasys_to_etree(self, obj, name=\"APlaceholder\"):\n \"\"\"Return object and all sub objects as an etree object for writing\"\"\"\n # Create new element for appending tags to\n obj_items = self._get_iterator(obj)\n #Test object list for None, indicating it's time to return some text\n if obj_items is None:\n txt = self._object_to_text(obj)\n rtn = ET.Element(name)\n rtn.text = txt\n return rtn\n #Odd case where there's no text and nothing to return\n if obj_items == {}:\n return ET.Element(name)\n #If it's made it this far, it's time to loop through obj_items\n elem = ET.Element(name)\n # pdb.set_trace()\n for k, v in obj_items.items():\n #If element was once an xml attribute, make it so again\n try: #Too lazy to deal with the fact dicts won't have this attribute\n if k in obj._attributes:\n elem.set(k, v)\n continue\n except: #If axz's had unique tag names I wouldn't have to do this\n pass\n #Iterable conversions\n if k in obj._iterable_write.keys():\n obj._iterable_to_etree(elem, k, v)\n #Special return values\n elif k in obj._special_write.keys():\n if callable(obj._special_write[k]):\n obj._special_write[k](elem, k, v)\n else:\n obj._special_write[k]\n else:\n rr = self._anasys_to_etree(v, k)\n #Create subelement k, with a value determined by recursion\n elem.append(rr)\n return elem\n\n def _attr_to_children(self, et_elem):\n \"\"\"Convert element attributes of given etree object to child elements\"\"\"\n for attr in et_elem.items():\n ET.SubElement(et_elem, attr[0])\n et_elem.find(attr[0]).text = attr[1]\n\n def _etree_to_anasys(self, element, parent_obj=None):\n \"\"\"Iterates through element tree object and adds atrtibutes to HeightMap Object\"\"\"\n #If element has attributes, make them children before continuing\n self._attr_to_children(element)\n # If element is a key in _special_read, set special return value\n if element.tag in self._special_read.keys():\n return self._special_read[element.tag](element)\n #If element is a key in _base_64_tags, return decoded data\n if '64' in element.tag:\n return self._decode_bs64(element.text)\n #If element has no children, return either it's text or {}\n if list(element) == []:\n if element.text:\n #Default return value for an element with text\n return element.text\n else:\n #Default return value for an empty tree leaf/XML tag\n return \"\"\n #If element has children, return an object with its children\n else:\n if parent_obj == None:\n #Top level case, we want to add to self, rather than blank object\n element_obj = self\n else:\n #Default case, create blank object to add attributes to\n element_obj = AnasysElement()#parent_obj=self)\n #store the etree tag name for later use\n element_obj._name = element.tag\n #Update _attributes of given element\n element_obj._attributes.extend(element.keys())\n #Loop over each child and add attributes\n for child in element:\n #Get recursion return value - either text, {} or AnasysElement() instance\n rr = element_obj._etree_to_anasys(child, element)\n #Set element_obj.child_tag = rr\n setattr(element_obj, child.tag, rr)\n #Return the object containing all children and attributes\n return element_obj\n\n def _check_key(self, key, _dict, copy=1):\n \"\"\"Check if key is in dict. If it is, increment key until key is unique, and return\"\"\"\n if key not in _dict:\n return key\n num_list = re.findall('\\s\\((\\d+)\\)', key)\n if num_list != [] and key[-1] == ')':\n copy = int(num_list[-1])\n index = key.find(' ({})'.format(copy))\n if index != -1:\n key = key[:index] + ' ({})'.format(copy+1)\n return self._check_key(key, _dict, copy+1)\n else:\n key += ' ({})'.format(copy)\n return self._check_key(key, _dict, copy)\n\n def _decode_bs64(self, data):\n \"\"\"Returns base64 data decoded in a numpy array\"\"\"\n decoded_bytes = base64.b64decode(data.encode())\n fmt = 'f'*int((len(decoded_bytes)/4))\n structured_data = struct.unpack(fmt, decoded_bytes)\n decoded_array = np.array(structured_data)\n return decoded_array\n\n def _encode_bs64(self, np_array):\n \"\"\"Returns numpy array encoded as base64 string\"\"\"\n tup = tuple(np_array.flatten())\n fmt = 'f'*np_array.size\n structured_data = struct.pack(fmt, *tup)\n encoded_string = base64.b64encode(structured_data).decode()\n return encoded_string\n\n def _serial_tags_to_nparray(self, parent_tag):\n \"\"\"Return floats listed consecutively (e.g., background tables) as numpy array\"\"\"\n np_array = []\n for child_tag in list(parent_tag):\n np_array.append(DC.Decimal(child_tag.text))\n parent_tag.remove(child_tag)\n np_array = np.array(np_array)\n return np_array\n\n def _nparray_to_serial_tags(self, elem, nom, np_array):\n \"\"\"Takes a numpy array returns an etree object and of consecutive float tags\"\"\"\n root = ET.Element(nom)\n flat = np_array.flatten()\n for x in flat:\n el = ET.SubElement(root, 'Double')\n el.text=str(x)\n elem.append(root)\n\n def write(self, filename):\n \"\"\"Writes the current object to file\"\"\"\n xml = self._anasys_to_etree(self, 'Document')\n #ElementTree annoyingly only remembers namespaces that are used so next line is necessary\n xml.set(\"xmlns\", \"www.anasysinstruments.com\")\n #Can't see any reason to add unused namespaces other than default, as Analysis Studio won't complain,\n #but minidom will if one is duplicated (can't easily get around this lame default behavior in etree)\n with open(filename, 'wb') as f:\n xmlstr = minidom.parseString(ET.tostring(xml)).toprettyxml(indent=\" \", encoding='UTF-16')\n f.write(xmlstr)\n\n def _etree_to_dict(self, etree, key_tag):\n \"\"\"\n Converts an ET element to a dict containing its children as AnasysElements.\n e.g.,\n \n \n A\n \n \n B\n \n ...\n \n becomes:\n parent = {'A': obj1, 'B': obj2, ...}\n Arguments:\n self = calling object (will be an instance of or derived from AnasysElement)\n etree = element tree object to be converted\n key_tag = object element to be used as key (e.g., Label, Name, ID, etc.)\n \"\"\"\n return_dict = {}\n for child in etree:\n new_obj = AnasysElement(etree=child)\n key = new_obj[key_tag]\n key = self._check_key(key, return_dict)\n return_dict[key] = new_obj\n return return_dict\n\n def _etree_to_list(self, etree):\n \"\"\"\n Converts an ET element to a list containing its children as AnasysElements.\n e.g.,\n \n \n \n ...\n \n becomes:\n parent = [obj1, obj2, ...]\n Arguments:\n self = calling object (will be an instance of or derived from AnasysElement)\n etree = element tree object to be converted\n \"\"\"\n return_list = []\n for child in etree:\n new_obj = AnasysElement(etree=child)\n return_list.append(new_obj)\n return return_list\n\n def _iterable_to_etree(self, parent_elem, iterable_elem_name, iterable_obj):\n \"\"\"\n Converts a named dict or list of Anasys Elements to an Element Tree\n object representation of the object\n\n e.g.,\n parent.var = {'ID1': obj1, 'ID2': obj2, ...} or parent.var = [obj1, obj2, ...]\n becomes:\n \n \n ...\n ...\n ...\n \n \n Arguments:\n self = calling object (will be an instance of or derived from AnasysElement)\n parent_elem = the parent etree object to append to\n iterable_elem_name = the name of the dict or list variable (will become etree element name)\n iterable_obj = the dict or list itself\n \"\"\"\n parent_etree = ET.SubElement(parent_elem, iterable_elem_name)\n if type(iterable_obj) == dict:\n for child in iterable_obj.values():\n new_elem = child._anasys_to_etree(child, name=child._name)\n parent_etree.append(new_elem)\n else:\n for child in iterable_obj:\n new_elem = child._anasys_to_etree(child, name=child._name)\n parent_etree.append(new_elem)\n","repo_name":"AnasysInstruments/anasys-python-tools","sub_path":"anasyspythontools/anasysfile.py","file_name":"anasysfile.py","file_ext":"py","file_size_in_byte":12068,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"6412411248","text":"import numpy as np\nimport skfuzzy as fuzz\nfrom skfuzzy import control as ctrl\nimport matplotlib.pyplot as plt\n\n\nclass BoatDriver:\n def __init__(self, mom=False):\n des_dir = ctrl.Antecedent(np.arange(-180, 181, 1), 'desired direction')\n turn = ctrl.Antecedent(np.arange(-50, 51, 1), 'turn')\n if mom:\n # mean of maximum czyli zawsze wartosci ze srodku najwiekszego wyniku ktory wychodzi,\n # mniej wartosci ale pewnie bardziej stabilne\n defuzzify_method = 'mom'\n else: # wiecej wartosci\n defuzzify_method = 'centroid'\n rudd_chng = ctrl.Consequent(np.arange(-20, 21, 1), 'rudder change', defuzzify_method=defuzzify_method)\n\n des_dir['strong left'] = fuzz.trapmf(des_dir.universe, [-180, -180, -100, -25])\n des_dir['left'] = fuzz.trimf(des_dir.universe, [-100, -25, 0])\n des_dir['middle'] = fuzz.trapmf(des_dir.universe, [-25, -3, 3, 25])\n des_dir['right'] = fuzz.trimf(des_dir.universe, [0, 25, 100])\n des_dir['strong right'] = fuzz.trapmf(des_dir.universe, [25, 100, 180, 180])\n\n turn['left'] = fuzz.trapmf(turn.universe, [-50, -50, -20, 0])\n turn['neutral'] = fuzz.trimf(turn.universe, [-20, 0, 20])\n turn['right'] = fuzz.trapmf(turn.universe, [0, 20, 50, 50])\n\n rudd_chng['strong left'] = fuzz.trapmf(rudd_chng.universe, [-20, -20, -10, -3])\n rudd_chng['left'] = fuzz.trimf(rudd_chng.universe, [-10, -3, 0])\n rudd_chng['keep'] = fuzz.trimf(rudd_chng.universe, [-3, 0, 3])\n rudd_chng['right'] = fuzz.trimf(rudd_chng.universe, [0, 3, 10])\n rudd_chng['strong right'] = fuzz.trapmf(rudd_chng.universe, [3, 10, 20, 20])\n\n rules = [ctrl.Rule(des_dir['strong left'] & turn['left'], rudd_chng['left']),\n ctrl.Rule(des_dir['left'] & turn['left'], rudd_chng['keep']),\n ctrl.Rule(des_dir['middle'] & turn['left'], rudd_chng['right']),\n ctrl.Rule(des_dir['right'] & turn['left'], rudd_chng['strong right']),\n ctrl.Rule(des_dir['strong right'] & turn['left'], rudd_chng['strong right']),\n\n ctrl.Rule(des_dir['strong left'] & turn['neutral'], rudd_chng['strong left']),\n ctrl.Rule(des_dir['left'] & turn['neutral'], rudd_chng['left']),\n ctrl.Rule(des_dir['middle'] & turn['neutral'], rudd_chng['keep']),\n ctrl.Rule(des_dir['right'] & turn['neutral'], rudd_chng['right']),\n ctrl.Rule(des_dir['strong right'] & turn['neutral'], rudd_chng['strong right']),\n\n ctrl.Rule(des_dir['strong left'] & turn['right'], rudd_chng['strong left']),\n ctrl.Rule(des_dir['left'] & turn['right'], rudd_chng['strong left']),\n ctrl.Rule(des_dir['middle'] & turn['right'], rudd_chng['left']),\n ctrl.Rule(des_dir['right'] & turn['right'], rudd_chng['keep']),\n ctrl.Rule(des_dir['strong right'] & turn['right'], rudd_chng['right']),\n ]\n\n self.des_dir = des_dir\n self.turn = turn\n self.rudd_chng = rudd_chng\n self.steering_ctrl = ctrl.ControlSystem(rules)\n\n def rudder_change(self, desired_direction, turn, visualization=False):\n steering = ctrl.ControlSystemSimulation(self.steering_ctrl)\n steering.input['desired direction'] = desired_direction\n steering.input['turn'] = turn\n steering.compute()\n out = steering.output['rudder change']\n if visualization:\n self.des_dir.view(sim=steering)\n self.turn.view(sim=steering)\n self.rudd_chng.view(sim=steering)\n plt.show()\n return out\n\n\nif __name__ == \"__main__\":\n driver = BoatDriver()\n desired_direction = 5\n turn = -5\n out = driver.rudder_change(desired_direction, turn, True)\n print(out)\n","repo_name":"AleksanderZawisza/Fuzzy-Boat","sub_path":"app/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37249309819","text":"import numpy as np\n\ndef dereg_value_error_handler(data_value):\n if len(data_value) < 2:\n return np.nan\n\n try:\n value_parts = data_value[0].split('$')[1]\n desired_value = int(''.join(value_parts.split(',')))\n except (IndexError, ValueError):\n desired_value = np.nan\n\n return desired_value\n\ndef dereg_value_retrieval(listing_url):\n row_info = listing_url.find_all(class_='row_info')[2].text.strip().split()\n dereg_value_from_scrape_date = dereg_value_error_handler(row_info)\n return dereg_value_from_scrape_date\n\n\n","repo_name":"shepherd333/INF1002-Web-Crawler-Project","sub_path":"Scrapers/dereg_scraper.py","file_name":"dereg_scraper.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39782469182","text":"def insertion(list):\n for i in range(len(list)):\n\n currentvalue = list[i]\n j = i - 1\n while j >= 0 and currentvalue < list[j]:\n list[j + 1] = list[j]\n j -= 1\n list[j + 1] = currentvalue\n\nlist = ['Timor-Leste', 'Cambodia', 'Philippines', 'Brunei', 'Vietnam', 'Myanmar', 'Malaysia', 'Indonesia', 'Singapore', 'Philippines', 'Timor-Leste', 'Brunei', 'Timor-Leste', 'Laos', 'Thailand', 'Thailand']\n\ninsertion(list)\nprint(list)","repo_name":"ReubMaster420/Reubs-code","sub_path":"Python Challenges/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35722056938","text":"import pgzrun\nfrom pgzero.keyboard import keyboard\n\n\nWITTH = 500\nHEIGHT = 500\n\nhetao = Actor('小核桃')\nhetao.x, hetao.y = 100, 200\n\ndef draw():\n screen.clear()\n hetao.draw()\n\ndef update():\n if keyboard.space:\n hetao.y += 5\n if hetao.y >= 500:\n hetao.x, hetao.y = 100, 200\n\npgzrun.go()","repo_name":"Myco-Chen/Mine","sub_path":"python学习/L4-6/L4-6练习题之4题.py","file_name":"L4-6练习题之4题.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15785342522","text":"import numpy as np\nimport numpy.typing as npt\nimport warnings\nfrom stoneforge.petrophysics.helpers import correct_petrophysic_estimation_range\n\n\n# Make anomalous water saturation values larger than 1 be one\ndef correct_range(sw: np.ndarray):\n sw[sw > 1] = 1\n return sw\n\ndef archie(rw: float, rt: npt.ArrayLike, phi: npt.ArrayLike, a: float,\n m: float, n: float) -> np.ndarray:\n \"\"\"Estimate the Water Saturation from Archie's [1]_ equation.\n\n Parameters\n ----------\n rw : int, float\n Water resistivity. \n rt : array_like\n Formation resistivity. \n phi : array_like\n Porosity. \n a : int, float\n Tortuosity factor.\n m : int, float\n Cementation exponent.\n n : int, float\n Saturation exponent.\n\n Returns\n -------\n sw : array_like\n Water saturation from Archie equation.\n\n References\n ----------\n .. [1] Archie GE (1942) The electrical resistivity log as an aid in determining some\n reservoir characteristics. Transactions of the AIME, 146(01), 54-62.\n\n \"\"\"\n if any(((a*rw) / (phi**m * rt))**(1/n) > 1):\n warnings.warn(UserWarning(\"saturation of water must be a value between 0 and 1\"))\n sw = ((a*rw) / (phi**m * rt))**(1/n)\n sw = correct_petrophysic_estimation_range(sw)\n return sw\n\n else:\n sw = ((a*rw) / (phi**m * rt))**(1/n)\n sw = correct_petrophysic_estimation_range(sw)\n return sw\n\n\ndef simandoux(rw: float, rt: npt.ArrayLike, phi: npt.ArrayLike, a: float,\n m: float, n: float, vsh: npt.ArrayLike,\n rsh: float) -> np.ndarray:\n \"\"\"Estimate water saturation from Simandoux [1]_ equation.\n\n Parameters\n ----------\n rw : int, float\n Water resistivity.\n rt : array_like\n True resistivity. \n phi : array_like\n Porosity.\n a : int, float\n Tortuosity factor.\n m : int, float\n Cementation exponent.\n n : int, float\n Saturation exponent.\n vsh : array_like\n Clay volume log.\n rsh : int, float\n Clay resistivity.\n\n Returns\n -------\n sw : array_like\n Water saturation from Simandoux equation.\n\n References\n ----------\n .. [1] Simandoux P (1963) Measures die techniques an milieu application a measure des\n saturation en eau, etude du comportement de massifs agrileux. Review du’Institute Francais\n du Patrole 18(Supplemen-tary Issue):193\n\n \"\"\"\n C = (1 - vsh) * a * rw / phi**m\n D = C * vsh / (2*rsh)\n E = C / rt\n sw = ((D**2 + E)**0.5 - D)**(2/n)\n\n sw = correct_petrophysic_estimation_range(sw)\n\n\n return sw\n\n\ndef indonesia(rw: float, rt: npt.ArrayLike, phi: npt.ArrayLike, a: float,\n m: float, n: float, vsh: npt.ArrayLike,\n rsh: float) -> np.ndarray:\n \"\"\"Estimate water saturation from Poupon-Leveaux (Indonesia) [1]_ equation.\n\n Parameters\n ----------\n rw : int, float\n Water resistivity. \n rt : array_like\n True resistivity. \n phi : array_like\n Porosity. \n vsh : array_like\n Clay volume log.\n a : int, float\n Tortuosity factor.\n m : int, float\n Cementation exponent.\n n : int, float\n Saturation exponent.\n rsh : float\n Clay resistivity.\n\n Returns\n -------\n indonesia : array_like\n Water saturation from Poupon-Leveaux equation.\n\n References\n ----------\n .. [1] Poupon, A. and Leveaux, J. (1971) Evaluation of Water Saturation in Shaly Formations.\n The Log Analyst, 12, 1-2.\n\n \"\"\"\n sw = ((1/rt)**0.5 / ((vsh**(1 - 0.5*vsh) / (rsh)**0.5) + (phi**m / a*rw)**0.5))**(2/n)\n sw = correct_petrophysic_estimation_range(sw)\n\n\n return sw\n\n\ndef fertl(rw: float, rt: npt.ArrayLike, phi: npt.ArrayLike, a: float,\n m: float, vsh: npt.ArrayLike, alpha: float) -> np.ndarray:\n \"\"\"Estimate water saturation from Fertl [1]_ equation.\n\n Parameters\n ----------\n rw : int, float\n Water resistivity.\n rt : array_like\n True resistivity. \n phi : array_like\n Porosity (must be effective). \n vsh : array_like\n Clay volume log. \n a : int, float\n Tortuosity factor.\n m : int, float\n Cementation exponent.\n alpha : int, float\n Alpha parameter from Fertl equation.\n\n Returns\n -------\n fertl : array_like\n Water saturation from Fertl equation.\n\n References\n ----------\n .. [1] Fertl, W. H. (1975, June). Shaly sand analysis in development wells.\n In SPWLA 16th Annual Logging Symposium. OnePetro.\n\n \"\"\"\n sw = phi**(-m/2) * ((a*rw/rt + (alpha*vsh/2)**2)**0.5 - (alpha*vsh/2))\n sw = correct_petrophysic_estimation_range(sw)\n\n\n return sw\n\n\n_sw_methods = {\n \"archie\": archie,\n \"simandoux\": simandoux,\n \"indonesia\": indonesia,\n \"fertl\": fertl\n}\n\n\ndef water_saturation(rw: float, rt: npt.ArrayLike, phi: npt.ArrayLike,\n a: float, m: float, method: str = \"archie\",\n **kwargs) -> np.ndarray:\n \"\"\"Compute water saturation from resistivity log.\n\n This is a façade for the methods:\n - archie\n - simandoux\n - indonesia\n - fertl\n\n Parameters\n ----------\n rw : int, float\n Water resistivity.\n rt : array_like\n True resistivity.\n phi : array_like\n Porosity (must be effective).\n a : int, float\n Tortuosity factor.\n m : int, float\n Cementation exponent.\n n : int, float\n Saturation exponent. Required if `method` is \"archie\", \"simandoux\" or\n \"indonesia\".\n vsh : array_like\n Clay volume log. Required if `method` is \"simandoux\", \"indonesia\" or\n \"fertl\".\n rsh : float\n Clay resistivity. Required if `method` is \"simandoux\" or \"indonesia\".\n alpha : array_like\n Alpha parameter from Fertl equation. Required if `method` is \"fertl\"\n method : str, optional\n Name of the method to be used. Should be one of\n - 'archie'\n - 'simandoux'\n - 'indonesia'\n - 'fertl\n If not given, default method is 'archie'\n\n Returns\n -------\n water_saturation : array_like\n Water saturation for the aimed interval using the defined method.\n\n \"\"\"\n options = {}\n \n required = []\n if method == \"archie\":\n required = [\"n\"]\n elif method == \"simandoux\":\n required = [\"n\", \"vsh\", \"rsh\"]\n elif method == \"indonesia\":\n required = [\"n\", \"vsh\", \"rsh\"]\n elif method == \"fertl\":\n required = [\"vsh\", \"alpha\"]\n \n for arg in required:\n if arg not in kwargs:\n msg = f\"Missing required argument for method '{method}': '{arg}'\"\n raise TypeError(msg)\n options[arg] = kwargs[arg]\n \n fun = _sw_methods[method]\n\n\n sw = fun(rw, rt, phi, a, m, **options)\n \n return sw\n","repo_name":"giecaruff/stoneforge","sub_path":"stoneforge/petrophysics/water_saturation.py","file_name":"water_saturation.py","file_ext":"py","file_size_in_byte":6891,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"71939015047","text":"import logging\nimport os\nimport pickle\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom .. import model as m\nfrom ..sqlalchemy_ import sa\nfrom ...mapper.model_object import Identifier\n\nlogger = logging.getLogger(\n __name__\n)\n\n\ndef _parent_identifier(\n directory: str\n) -> Optional[str]:\n \"\"\"\n Read the parent identifier for a fit in a directory.\n\n Defaults to None if no .parent_identifier file is found.\n \"\"\"\n try:\n with open(f\"{directory}/.parent_identifier\") as f:\n return f.read()\n except FileNotFoundError:\n return None\n\n\nclass Scraper:\n def __init__(\n self,\n directory: Union[Path, str],\n session: sa.orm.Session\n ):\n \"\"\"\n Facilitates scraping of data output into a directory\n into the database.\n\n Parameters\n ----------\n directory\n A directory in which data has been stored\n session\n A database session\n \"\"\"\n self.directory = directory\n self.session = session\n\n def scrape(self):\n \"\"\"\n Recursively scrape fits from the directory and\n add them to the session\n \"\"\"\n for fit in self._fits():\n self.session.add(\n fit\n )\n for grid_search in self._grid_searches():\n self.session.add(\n grid_search\n )\n\n def _fits(self):\n \"\"\"\n Scrape data output into a directory tree so it can be added to the\n aggregator database.\n\n Returns\n -------\n Generator yielding Fit database objects\n \"\"\"\n logger.info(\n f\"Scraping directory {self.directory}\"\n )\n from autofit.aggregator.aggregator import Aggregator as ClassicAggregator\n aggregator = ClassicAggregator(\n self.directory\n )\n logger.info(\n f\"{len(aggregator)} searches found\"\n )\n for item in aggregator:\n is_complete = os.path.exists(\n f\"{item.directory}/.completed\"\n )\n\n parent_identifier = _parent_identifier(\n directory=item.directory\n )\n\n model = item.model\n samples = item.samples\n\n try:\n instance = samples.max_log_likelihood_instance\n except (AttributeError, NotImplementedError):\n instance = None\n\n identifier = _make_identifier(item)\n\n logger.info(\n f\"Creating fit for: \"\n f\"{item.search.paths.path_prefix} \"\n f\"{item.search.unique_tag} \"\n f\"{item.search.name} \"\n f\"{identifier} \")\n\n try:\n fit = self._retrieve_model_fit(\n item\n )\n logger.warning(\n f\"Fit already existed with identifier {identifier}\"\n )\n except sa.orm.exc.NoResultFound:\n try:\n log_likelihood = samples.max_log_likelihood_sample.log_likelihood\n except AttributeError:\n log_likelihood = None\n fit = m.Fit(\n id=identifier,\n name=item.search.name,\n unique_tag=item.search.unique_tag,\n model=model,\n instance=instance,\n is_complete=is_complete,\n info=item.info,\n max_log_likelihood=log_likelihood,\n parent_id=parent_identifier\n )\n\n pickle_path = Path(item.pickle_path)\n _add_pickles(\n fit,\n pickle_path\n )\n\n yield fit\n\n def _grid_searches(\n self\n ):\n \"\"\"\n Retrieve grid searches recursively from an output directory by\n searching for the .is_grid_search file.\n\n Should be called after adding Fits as it relies on querying fits\n\n Yields\n ------\n Fit objects representing grid searches with child fits associated\n \"\"\"\n from autofit.aggregator.aggregator import Aggregator as ClassicAggregator\n for root, _, filenames in os.walk(self.directory):\n if \".is_grid_search\" in filenames:\n path = Path(root)\n\n is_complete = (path / \".completed\").exists()\n\n with open(\n path / \".is_grid_search\"\n ) as f:\n unique_tag = f.read()\n\n grid_search = m.Fit(\n id=path.name,\n unique_tag=unique_tag,\n is_grid_search=True,\n parent_id=_parent_identifier(\n root\n ),\n is_complete=is_complete\n )\n\n pickle_path = path / \"pickles\"\n _add_pickles(\n grid_search,\n pickle_path\n )\n\n aggregator = ClassicAggregator(\n root\n )\n for item in aggregator:\n fit = self._retrieve_model_fit(\n item\n )\n grid_search.children.append(\n fit\n )\n yield grid_search\n\n def _retrieve_model_fit(\n self,\n item\n ) -> m.Fit:\n \"\"\"\n Retrieve a Fit, if one exists, corresponding to a given SearchOutput\n\n Parameters\n ----------\n item\n A SearchOutput from the classic Aggregator\n\n Returns\n -------\n A fit with the corresponding identifier\n\n Raises\n ------\n NoResultFound\n If no fit is found with the identifier\n \"\"\"\n return self.session.query(\n m.Fit\n ).filter(\n m.Fit.id == _make_identifier(\n item\n )\n ).one()\n\n\ndef _make_identifier(\n item\n) -> str:\n \"\"\"\n Create a unique identifier for a SearchOutput.\n\n This accounts for the Search, Model and unique_tag\n\n Parameters\n ----------\n item\n An output from the classic aggregator\n\n Returns\n -------\n A unique identifier that is sensitive to changes that affect\n the search\n \"\"\"\n search = item.search\n model = item.model\n return str(Identifier([\n search,\n model,\n search.unique_tag\n ]))\n\n\ndef _add_pickles(\n fit: m.Fit,\n pickle_path: Path\n):\n \"\"\"\n Load pickles from the path and add them to the database.\n\n Parameters\n ----------\n fit\n A fit to which the pickles belong\n pickle_path\n The path in which the pickles are stored\n \"\"\"\n try:\n filenames = os.listdir(\n pickle_path\n )\n except FileNotFoundError as e:\n logger.exception(e)\n filenames = []\n\n for filename in filenames:\n\n try:\n with open(\n pickle_path / filename,\n \"r+b\"\n ) as f:\n fit[\n filename.split(\".\")[0]\n ] = pickle.load(f)\n except (pickle.UnpicklingError, ModuleNotFoundError) as e:\n\n if filename == \"dynesty.pickle\":\n continue\n\n raise pickle.UnpicklingError(f\"Failed to unpickle: {pickle_path} {filename}\") from e\n","repo_name":"Jammy2211/autolens_abell_1201","sub_path":"PyAuto/PyAutoFit/autofit/database/aggregator/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38478392150","text":"import json\n\nfrom JoinCensusWithTweets import GeoShape, CensusLoader, Integrator\n# Parse json file\nfrom utils import MongoDbUtils, GraphDbUtils\n\n\nclass BaseParser:\n\n def __init__(self, json_filename, verbose=0):\n self.filename = json_filename\n self.verbose = verbose\n self.all_gdf, self.main_crs = GeoShape.load_shape_files()\n\n def extract_tweets(self):\n tweets = []\n data_package = []\n\n # integrate self.main_crs in tweets\n with open(self.filename, 'rb') as input_file:\n count = 0\n for line in input_file:\n obj = json.loads(line)\n tweet = GraphDbUtils.make_data(obj)\n tweet_package = MongoDbUtils.make_data(obj)\n\n if len(tweet) != 0:\n tweets.append(tweet)\n if len(tweet_package) != 0:\n data_package.append(tweet_package)\n if self.verbose == 1:\n # print(count, '. \\t ', tweet)\n print(count, '. \\t ', tweet_package)\n count = count + 1\n\n return tweets, data_package\n\n def extract_census_data(self):\n all_census_df = CensusLoader.load_census_concat()\n pop_census_gdf = Integrator.join_census_shpfile(self.all_gdf, all_census_df)\n return pop_census_gdf\n","repo_name":"JulinaM/DrugAbusePrevention","sub_path":"BaseParser.py","file_name":"BaseParser.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16172086327","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.forms import inlineformset_factory\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom store.models import *\nfrom .forms import ProductForm, OrderForm, ImageForm\nfrom .filters import OrderFilter, OrderFilterC, ItemFilter, ProductFilter, SearchRegistered, SearchGuest\nfrom store.decorators import unauthenticated_user\n\n# Create your views here.\n\n@unauthenticated_user\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('staff:index'))\n else:\n messages.info(request, 'Username or password is incorrect.')\n return render(request, \"staff/login.html\")\n\ndef logoutUser(request):\n logout(request)\n return HttpResponseRedirect(reverse('staff:login'))\n\n@staff_member_required(login_url='staff:login')\ndef index(request):\n\n orders = Order.objects.filter(complete=True).order_by('-date_ordered')\n total_orders = orders.count()\n delivered = orders.filter(status='Delivered').count()\n pending = orders.filter(status='Pending').count()\n\n filterOrder = OrderFilter(request.GET, queryset=orders)\n orders = filterOrder.qs\n \n paginatedOrder = Paginator(filterOrder.qs, 3)\n order_page_number = request.GET.get('page')\n order_page_obj = paginatedOrder.get_page(order_page_number)\n\n context = {\n 'orders': orders,\n 'total_orders': total_orders,\n 'delivered': delivered,\n 'pending': pending,\n 'filterOrder': filterOrder,\n 'order_page_obj': order_page_obj\n }\n return render(request, 'staff/index.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef deleteOrder(request, pk):\n order = Order.objects.get(id=pk)\n\n if request.method == \"POST\":\n order.delete()\n return HttpResponseRedirect(reverse('staff:index'))\n\n context = {\n 'order': order\n }\n return render(request, 'staff/delete_order.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef updateOrder(request, pk):\n order = Order.objects.get(id=pk)\n form = OrderForm(instance=order)\n\n if request.method == \"POST\":\n form = OrderForm(request.POST, instance=order)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('staff:index'))\n\n context = {\n 'form': form,\n 'order': order\n }\n return render(request, 'staff/update_order.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef customers(request):\n registered = User.objects.exclude(is_staff=True).order_by('-id')\n guest = Customer.objects.exclude(is_user=True).order_by('-id')\n staff = User.objects.filter(is_staff=True)\n \n filterRegistered = SearchRegistered(request.GET, queryset=registered)\n registered = filterRegistered.qs\n\n filterGuest = SearchGuest(request.GET, queryset=guest)\n guest = filterGuest.qs\n\n paginatedRegistered = Paginator(filterRegistered.qs, 5)\n registered_page_number = request.GET.get('page')\n registered_page_obj = paginatedRegistered.get_page(registered_page_number)\n\n paginatedGuest = Paginator(filterGuest.qs, 5)\n guest_page_number = request.GET.get('page')\n guest_page_obj = paginatedGuest.get_page(guest_page_number)\n\n context = {\n 'registered': registered,\n 'guest': guest,\n 'staff': staff,\n 'filterRegistered': filterRegistered,\n 'filterGuest': filterGuest,\n 'registered_page_obj': registered_page_obj,\n 'guest_page_obj': guest_page_obj\n }\n return render(request, 'staff/customer.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef registered_profile(request, pk):\n user = User.objects.get(id=pk)\n customer = Customer.objects.get(user=user)\n\n try:\n orders = customer.order_set.filter(complete=True).order_by('-id')\n count = orders.count()\n address = ShippingAddress.objects.filter(customer=customer).last()\n \n filterOrderC = OrderFilterC(request.GET, queryset=orders)\n orders = filterOrderC.qs \n\n #orderitems = OrderItem.objects.all() \n\n #filterItem = ItemFilter(request.GET, queryset=orderitems)\n #orderitems = filterItem.qs\n except:\n orders = {}\n count = 0\n address = {},\n filterOrderC = {}\n filterItem = {}\n\n context = {\n 'user': user,\n 'customer': customer,\n 'count': count,\n 'orders': orders,\n 'address': address,\n 'filterOrderC': filterOrderC\n #'filterItem': filterItem\n }\n return render(request, 'staff/registered_profile.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef guest_profile(request, pk):\n guest = Customer.objects.get(id=pk)\n\n try:\n orders = guest.order_set.filter(complete=True).order_by('-id')\n count = orders.count()\n address = ShippingAddress.objects.filter(customer=guest).last()\n\n filterOrderC = OrderFilterC(request.GET, queryset=orders)\n orders = filterOrderC.qs\n\n #orderitems = OrderItem.objects.all() \n\n #filterItem = ItemFilter(request.GET, queryset=orderitems)\n #orderitems = filterItem.qs\n except:\n orders = {}\n count = 0\n address = {}\n filterItem = {}\n filterOrderC = {}\n\n context = {\n 'guest': guest,\n 'count': count,\n 'orders': orders,\n 'address': address,\n 'filterOrderC': filterOrderC\n #'filterItem': filterItem\n }\n\n return render(request, 'staff/guest_profile.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef createProduct(request):\n form = ProductForm()\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('staff:products'))\n\n context = {\n 'form': form\n }\n return render(request, 'staff/crud.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef updateProduct(request, pk):\n product = Product.objects.get(id=pk)\n form = ProductForm(instance=product)\n\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('staff:products'))\n\n context = {\n 'form': form\n }\n return render(request, 'staff/crud.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef deleteProduct(request, pk):\n product = Product.objects.get(id=pk)\n\n if request.method == \"POST\":\n product.delete()\n return HttpResponseRedirect(reverse('staff:products'))\n\n context = {\n 'product': product\n }\n return render(request, 'staff/delete_product.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef products(request):\n products = Product.objects.all().order_by('-date_created')\n\n filterProduct = ProductFilter(request.GET, queryset=products)\n products = filterProduct.qs\n\n paginatedProduct = Paginator(filterProduct.qs, 5)\n product_page_number = request.GET.get('page')\n product_page_obj = paginatedProduct.get_page(product_page_number)\n\n context = {\n 'products': products,\n 'filterProduct': filterProduct,\n 'product_page_obj': product_page_obj\n }\n return render(request, 'staff/products.html', context)\n\n@staff_member_required(login_url='staff:login')\ndef product_details(request, pk):\n product = Product.objects.get(id=pk)\n \n context = {\n 'product': product\n }\n return render(request, \"staff/product_details.html\", context)\n\n@staff_member_required(login_url='staff:login')\ndef addImages(request, pk):\n ImageFormSet = inlineformset_factory(Product, Image, fields=('image',), extra=5)\n product = Product.objects.get(id=pk)\n formset = ImageFormSet(instance=product)\n\n if request.method == \"POST\":\n formset = ImageFormSet(request.POST, request.FILES, instance=product)\n if formset.is_valid():\n formset.save()\n return HttpResponseRedirect(reverse('staff:products'))\n\n context = {\n 'formset': formset\n }\n return render(request, 'staff/add_images.html', context)\n","repo_name":"shaakirag/E-Commerce_Website","sub_path":"staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39411822251","text":"__author__ = 'Daoyuan'\nfrom BaseSolution import *\nclass RemoveElement(BaseSolution):\n def __init__(self):\n BaseSolution.__init__(self)\n def solution(self, nums, val):\n if not nums: return 0\n tmp = []\n for v in nums:\n if v != val:\n tmp.append(v)\n nums[:] = tmp[:]\n return len(tmp)","repo_name":"caunion/leetcode","sub_path":"solutions/RemoveElement.py","file_name":"RemoveElement.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"846220204","text":"from collections import deque\nfrom typing import List\n\n\nclass Solution:\n def maxSlidingWindow(nums: List[int], k: int) -> List[int]:\n \"\"\"\n Intuition: Monotonic stack\n Time complexity: O(N)\n Space complexity: O(N)\n \"\"\"\n output = []\n q = deque()\n l = r = 0\n\n while r < len(nums):\n while q and nums[q[-1]] < nums[r]:\n q.pop()\n q.append(r)\n\n if l > q[0]:\n q.popleft()\n\n if (r + 1) >= k:\n output.append(nums[q[0]])\n l += 1\n\n r += 1\n\n return output\n\n\nif __name__ == \"__main__\":\n print(Solution.maxSlidingWindow(nums=[1, 3, -1, -3, 5, 3, 6, 7], k=3))\n","repo_name":"jovanvuleta/leetcoding","sub_path":"leetcode/medium/sliding_window/sliding_window_maximum_239.py","file_name":"sliding_window_maximum_239.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14143129710","text":"'''\nThis script will divide the whole GTFS database into several individual GTFS real-time \ntrip-update database and add index.\n'''\n\nimport pymongo\nfrom datetime import timedelta, date\nimport time\n\nclient = pymongo.MongoClient('mongodb://localhost:27017/')\ndb_feed = client.trip_update\ndb_tripupdate = db_feed.full_trip_update\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\nstart_date = date(2018, 9, 3)\nend_date = date(2019, 1, 31)\n\nfor single_date in daterange(start_date, end_date):\n today_date = single_date.strftime(\"%Y%m%d\") # date\n print(str(today_date))\n db_today_feeds=(db_tripupdate.find({\"start_date\": str(today_date)},no_cursor_timeout=True))\n print(\"---------------\",today_date,\": Query\",\"---------------\")\n for each_feed in db_today_feeds:\n db_feed[today_date].insert_one(each_feed)\n print(\"---------------\",today_date,\": Insert\",\"---------------\")\n db_feed[today_date].create_index([(\"trip_id\",pymongo.ASCENDING)])\n print(\"---------------\",today_date,\": Index\",\"---------------\")","repo_name":"luyuliu/transfer","sub_path":"scr/mongodb/divide_collection.py","file_name":"divide_collection.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15745853231","text":"import copy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Node:\r\n def __init__(self, state=None, parent=None):\r\n self.state = state\r\n self.parent = parent\r\n self.children = []\r\n\r\n def addChildren(self, children):\r\n self.children.extend(children)\r\n \r\ndef expandAndReturnChildren(node):\r\n children = []\r\n board = node.state \r\n j = sum(line.count(1) for line in board) #number of queens already placed\r\n for i in range(0,len(board[0])):\r\n child = copy.deepcopy(board)\r\n child[j][i] = 1\r\n children.append(Node(child, board))\r\n return children\r\n\r\ndef isPossible(board):\r\n board = board.state\r\n row = sum(line.count(1) for line in board) - 1 #number of queens already placed\r\n for z in range(len(board)):\r\n if board[row][z] == 1:\r\n col = z \r\n \r\n # Check this col on left side\r\n for i in range(row):\r\n if board[i][col] == 1:\r\n return False\r\n \r\n # check right diagonal above\r\n for i, j in zip(range(row-1, -1, -1), range(col+1, (len(board)), 1) ):\r\n if board[i][j] == 1:\r\n return False\r\n \r\n # check left diagonal above\r\n for i, j in zip(range(row-1, -1, -1),range(col-1, -1, -1)):\r\n if board[i][j] == 1:\r\n return False\r\n\r\n return True\r\n \r\ndef dfs(board):\r\n frontier = []\r\n explored = []\r\n found_goal = False\r\n goalie = Node()\r\n \r\n # add initial state to frontier\r\n frontier.append(Node(board, None))\r\n \r\n while not found_goal:\r\n # goal test\r\n if (sum(line.count(1) for line in frontier[0].state) == len(board)):\r\n found_goal = True\r\n goalie = frontier[0]\r\n break\r\n \r\n # expand the first in the frontier\r\n children = expandAndReturnChildren(frontier[0])\r\n # add children list to the expanded node\r\n frontier[0].addChildren(children)\r\n # add to the explored list\r\n explored.append(frontier[0])\r\n # remove the expanded frontier\r\n del frontier[0]\r\n # add children to the frontier\r\n index = 0\r\n for child in children:\r\n if isPossible(child):\r\n frontier.insert(index,child)\r\n index = index + 1\r\n \r\n solution = goalie.state\r\n\r\n return solution\r\n\r\ndef drawChessboard(solution):\r\n # print the solution as characters in the console\r\n print(\"1 represents a queen | 0 represents an empty space\\n\")\r\n print('\\n'.join([''.join(['{:2}'.format(item) for item in row]) for row in solution]))\r\n \r\n # create the chessboard and display it with matplotlib\r\n plt.figure(figsize=(len(solution),len(solution)))\r\n plt.title(\"Solution for %i queens on a %i x %i chessboard\" %(N, N, N))\r\n chessboard = np.zeros((len(solution),len(solution)))\r\n chessboard[1::2,0::2] = 1\r\n chessboard[0::2,1::2] = 1\r\n plt.imshow(chessboard, cmap='binary')\r\n \r\n # hide figure axises\r\n ax = plt.gca()\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n \r\n # plot the queens from the solution onto the chessboard\r\n for i in range(len(solution)):\r\n for j in range(len(solution)):\r\n if solution[j][i] == 1:\r\n plt.text(i, j, '\\u265b', fontsize=30, ha='center', va='center', color='black' if (i - j) % 2 == 0 else 'white')\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n print(\"\\n=== Solving N-Queen Problem using the DFS Search Algorithm ===\")\r\n print(\"\\n\" + \"-\"*90 + \"\\nSolution:\\n\")\r\n N = int(input(\"Enter number of queens (minimum 4): \"))\r\n while N < 4:\r\n print(\"Number of queens must be at least 4.\")\r\n N = int(input(\"Enter number of queens: \"))\r\n \r\n board = [[0 for i in range(N)] for j in range(N)]\r\n solution = dfs(board)\r\n drawChessboard(solution)","repo_name":"mintchococookies/8-queens","sub_path":"8_queens.py","file_name":"8_queens.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8675586348","text":"# import library\r\nimport json\r\nimport arcpy\r\nimport requests\r\nimport logging\r\n\r\n# Function for changing list resp into dictionary with the bus route's direction and sequence stop number as key\r\ndef parseRouteETA(resp):\r\n result = {}\r\n for etaData in resp:\r\n result.setdefault((etaData['dir'], etaData['seq']), []).append(etaData)\r\n return result\r\n\r\ndef updateETA():\r\n\r\n try:\r\n\r\n # online url link for first extraction\r\n kmbETA_url = \"https://data.etabus.gov.hk/v1/transport/kmb/route-eta/\"\r\n # Used as current indicator and stored route NO. and service type\r\n currQ = None\r\n # used as pointer of current founded online data\r\n idx = 0\r\n #store the length of current founded online data\r\n len_of_data = 0\r\n\r\n logging.info('Start Data Update...')\r\n with arcpy.da.UpdateCursor(\"GDB/KMB.gdb/ETA\",\r\n ('route', 'service_type', 'seq', 'eta_seq',\r\n 'eta', 'rmk_tc', 'rmk_sc', 'rmk_en', 'timestamp')) as uCursor:\r\n for row in uCursor:\r\n if (row[0], row[1]) != currQ:\r\n # Change currQ to match with RS and load new data\r\n currQ = (row[0], row[1])\r\n query_url = kmbETA_url + r\"{}/{}\".format(currQ[0], currQ[1])\r\n etaResp = requests.get(url=query_url)\r\n resp_data = json.loads(etaResp.text)['data']\r\n routeETA_data = parseRouteETA(resp_data)\r\n # reset pointer and length of current founded online data\r\n idx = 0\r\n len_of_data = len(resp_data)\r\n # updating data when index is less that total length of current founded online data \r\n # and the route shop sequence is the same as that of the row pointed by idx \r\n if ((idx < len_of_data ) and (row[2] == list(resp_data[idx].values())[4])):\r\n # update current row\r\n row[3:] = list(resp_data[idx].values())[8:]\r\n uCursor.updateRow(row)\r\n # point to next row of online data\r\n idx += 1\r\n else:\r\n # reset row to None\r\n row[3:] = [None] * 6\r\n uCursor.updateRow(row)\r\n\r\n\r\n except Exception as inst:\r\n print(inst)\r\n\r\n logging.info('Main Finished')\r\n print(\"Program Finished. Please check the log file for more information.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n updateETA()\r\n","repo_name":"yaucp/KMB","sub_path":"updateETA.py","file_name":"updateETA.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42871307417","text":"from ppb import BaseScene, RectangleSprite, Text, Vector\nfrom ppb.buttons import Primary\nfrom ppb.events import ButtonReleased, StartScene, StopScene\n\nfrom smugglersrun import font\nfrom smugglersrun.sandbox import Sandbox\nfrom smugglersrun.systems import BackgroundMusic, QueueBackgroundMusic\nfrom smugglersrun.utils import sprite_contains_point\n\n\nclass Display(RectangleSprite):\n height = 0.5\n text: str = \"Default Text\"\n font = font.button\n\n @property\n def image(self):\n return Text(self.text, font=self.font, color=font.color)\n\n\nclass LargeDisplay(Display):\n height = 2\n font = font.title\n\n\nbgm = BackgroundMusic(\"smugglersrun/resources/bgm.wav\", play_forever=True)\n\n\nclass Credits(BaseScene):\n\n def __init__(self):\n super().__init__()\n self.back_button = Display(text=\"Back\", position=(-10, -5.5))\n self.add(self.back_button, tags=[\"back_button\"])\n\n self.add(Display(text=\"Game Design and Programming By:\", position=Vector(-6, 5)))\n self.add(Display(text=\"Piper Thunstrom\", position=Vector(-6, 4)))\n\n self.add(Display(text=\"Font Anita Semi-Square By:\", position=Vector(-6.75, 2.5)))\n self.add(Display(text=\"Gustavo Paz -- Used under CC-SA 4.0\", position=Vector(-2.9, 1.5))) # Last 3.5 3.25\n\n self.add(Display(text=\"Images by:\", position=(-9, 0))) # Last: 7\n self.add(Display(text=\"Kenney studios\", position=(-6, -1)))\n\n self.add(Display(text=\"Music Ludum Dare 28 - Track 3 by:\", position=(-5.47, -2.5))) # 5.75 5\n self.add(Display(text=\"Abstraction -- www.abstractionmusic.com\", position=(-2.62, -3.5))) # 2.75 2.5\n\n def on_button_released(self, event: ButtonReleased, signal):\n if event.button is not Primary:\n return\n if sprite_contains_point(self.back_button, event.position):\n signal(StopScene())\n\n\nclass Menu(BaseScene):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.add(LargeDisplay(text=\"Smuggler's Run\", position=Vector(0, 5)))\n\n self.credits_button = Display(text=\"Credits\", position=Vector(0, -5))\n self.add(self.credits_button, tags=[\"button\"])\n\n self.play_game_button = Display(text=\"Play\", position=Vector(0, -4))\n self.add(self.play_game_button, tags=[\"button\"])\n\n def on_scene_started(self, event, signal):\n signal(QueueBackgroundMusic(bgm))\n\n def on_button_released(self, event: ButtonReleased, signal):\n if event.button is not Primary:\n return\n for button in self.get(tag=\"button\"):\n if sprite_contains_point(button, event.position):\n if button is self.credits_button:\n signal(StartScene(Credits))\n elif button is self.play_game_button:\n signal(StartScene(Sandbox))\n","repo_name":"pathunstrom/game-jam-gmtk-2020","sub_path":"smugglersrun/src/smugglersrun/menu/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71100791047","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 4 18:49:51 2020\r\n\r\n@author: douzi\r\n\"\"\"\r\n\r\nimport re\r\n\r\ndef word(text):\r\n return re.findall('[a-z]', text.lower())\r\n\r\ndef train(features):\r\n model = {}\r\n for f in features:\r\n model[f] = model.get(f, 0) + 1\r\n \r\n return model\r\n\r\ndef main():\r\n statistic = train(word(open('./file/file_2005_2.txt').read()))\r\n statistic = list(statistic.items())\r\n \r\n statistic.sort(key=lambda x:(x[1], x[0]), reverse=True)\r\n print(statistic)\r\n \r\n \r\nif __name__=='__main__':\r\n main()","repo_name":"douzujun/Python-Foundation-Suda","sub_path":"苏大上机代码/python_project/03_历年真题期末期中/RealExercise3/py02_2005.py","file_name":"py02_2005.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"} +{"seq_id":"33501736229","text":"class Solution: \n def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: \n seats=[0]*(n+1) \n\n for i,j,k in bookings: \n seats [i-1] += k \n seats[j] -= k \n seats.pop() \n\n for i in range(1,n): \n seats[i] += seats[i-1] \n\n return seats","repo_name":"dagiTensay/competitve-programming","sub_path":"1109-corporate-flight-bookings/1109-corporate-flight-bookings.py","file_name":"1109-corporate-flight-bookings.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12857568043","text":"import cv2\n\ndef video_reader(cap, factor=0.6):\n # is_first = True\n\n ret, frame = cap.read()\n\n if ret:\n h, w = frame.shape[:2]\n h *= factor\n w *= factor\n\n frame = cv2.resize(frame, (int(w), int(h)))\n # print(frame.shape)\n \n # if is_first:\n # cv2.imwrite('ParkingSaved.jpg', frame)\n # is_first = False\n\n return ret, frame","repo_name":"sudo-Erno/Parking-Counter","sub_path":"video_reader.py","file_name":"video_reader.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31771460884","text":"# Autor: Michał Hemperek\n# Złożoność: O(n)\n# Na samym początku wybieramy z tablicy elementy większe\n# niż jej długość, bo i tak zostałyby wybrane.\n# Później spisujemy ilość elementów o poszczególnych wartościach\n# do dodatkowej tablicy gdzie wartości elementu odpowiada indeks\n# jej równy. Na końcu wybieramy największe elementy, które nie\n# roztopią się, zanim do nich dotrzemy\n\nfrom zad2testy import runtests\n\n\ndef snow(S):\n total = 0\n j = 0\n aux = [0 for i in range(len(S)+1)]\n\n for i in range(len(S)):\n if S[i] > len(S):\n total += S[i] - j\n j += 1\n else:\n aux[S[i]] += 1\n\n for i in range(len(S), -1, -1):\n while aux[i] > 0 and i-j > 0:\n total += i-j\n j += 1\n aux[i] -= 1\n if i < j:\n break\n\n return total\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(snow, all_tests=True)\n","repo_name":"Rellikeht/zadanka","sub_path":"asd/zad2/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72819521927","text":"\"\"\"Module for handling lines in PowerFactory.\"\"\"\nfrom sinfactory.component import Component\n\n\nclass Line(Component):\n \"\"\"Class for interfacing with powerfactory lines.\"\"\"\n\n def __init__(self, pf_object):\n \"\"\"PowerFactoryLine constructor\n\n Args:\n pf_object: The line object\n \"\"\"\n super().__init__(pf_object)\n\n self.f_bus_cub = self.pf_object.bus1\n self.t_bus_cub = self.pf_object.bus2\n self.f_bus = pf_object.bus1.GetFullName().split(\"\\\\\")[-2].split(\".\")[0]\n self.t_bus = pf_object.bus2.GetFullName().split(\"\\\\\")[-2].split(\".\")[0]\n self.switches = [self.pf_object.bus1.cpCB,\n self.pf_object.bus2.cpCB]\n\n @property\n def loading(self):\n \"\"\"The loading of the line in percent of rating.\"\"\"\n return self.get_attribute(\"c:loading\")\n\n @property\n def p(self):\n \"\"\"The active power flow on the line\"\"\"\n return self.get_attribute(\"m:P:bus1\")\n\n","repo_name":"Hofsmo/SinFactory","sub_path":"sinfactory/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"2421637537","text":"# Check if RDS Instances are tagged as per the tagging policy..\r\n# Report if Block ID-tag and Billing code-tag are not present or doesn't have values.\r\n# (enabled is COMPLIANT, disabled is NON_COMPLIANT)\r\n#\r\n# Trigger Type: Configuration AWS::RDS::DBInstance and Periodic 24 hrs.\r\n# Scope of Changes: AWS::RDS::DBInstance.\r\n\r\nimport json\r\nimport boto3\r\n\r\nAPPLICABLE_RESOURCES = ['AWS::RDS::DBInstance']\r\nREQUIRED_TAG_KEYS = ['Block ID', 'Billing code','OAR/OPR ID','Business Application CI','Environment','Confidentiality','Integrity','Availability']\r\n\r\ndef check_tags(tags):\r\n\r\n for key in REQUIRED_TAG_KEYS:\r\n if key not in tags or not tags[key]:\r\n return False\r\n return True\r\n\r\ndef evaluate_compliance(configuration_item):\r\n if configuration_item['resourceType'] not in APPLICABLE_RESOURCES:\r\n return {\r\n 'compliance_type': 'NOT_APPLICABLE',\r\n 'annotation': 'The rule doesn\\'t apply to resources of type ' +\r\n configuration_item['resourceType'] + '.'\r\n }\r\n\r\n if configuration_item['configurationItemStatus'] == 'ResourceDeleted':\r\n return {\r\n 'compliance_type': 'NOT_APPLICABLE',\r\n 'annotation': 'The configurationItem was deleted and therefore cannot be validated.'\r\n }\r\n\r\n current_tags = configuration_item['tags']\r\n compliant = check_tags(current_tags)\r\n\r\n if compliant:\r\n return {\r\n 'compliance_type': 'COMPLIANT',\r\n 'annotation': 'Block ID and Billing code are set'\r\n }\r\n else:\r\n return {\r\n 'compliance_type': 'NON_COMPLIANT',\r\n 'annotation': 'Block ID or Billing code are not set'\r\n }\r\n\r\n\r\ndef lambda_handler(event, _context):\r\n invoking_event = json.loads(event['invokingEvent'])\r\n configuration_item = invoking_event['configurationItem']\r\n\r\n result_token = 'No token found.'\r\n if 'resultToken' in event:\r\n result_token = event['resultToken']\r\n\r\n evaluation = evaluate_compliance(configuration_item)\r\n\r\n config = boto3.client('config')\r\n config.put_evaluations(\r\n Evaluations=[\r\n {\r\n 'ComplianceResourceType':\r\n configuration_item['resourceType'],\r\n 'ComplianceResourceId':\r\n configuration_item['resourceId'],\r\n 'ComplianceType':\r\n evaluation['compliance_type'],\r\n 'Annotation':\r\n evaluation['annotation'],\r\n 'OrderingTimestamp':\r\n configuration_item['configurationItemCaptureTime']\r\n },\r\n ],\r\n ResultToken=result_token\r\n )\r\n","repo_name":"ebindavis17/Shell","sub_path":"RDS_022.py","file_name":"RDS_022.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1387216866","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom ydata_profiling import ProfileReport\r\ndata = pd.read_csv('D:\\\\python\\\\MLProjects\\\\MLForCloudDeployment_Heroku\\\\SourceFiles\\\\winequality-red.csv')\r\nprint(data.describe())\r\nprint('Checking Null values in the data set')\r\nprint(data.isnull().sum())\r\n#report = ProfileReport(data,title='Profile Report for SVM')\r\n#report.to_file('ProfileReportForSVM')\r\nsns.scatterplot(data=data,x='density',y='pH')\r\nplt.show()\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nnew_data = scaler.fit_transform(data.drop(labels=['quality'],axis=1))\r\nprint(data.columns)\r\nscaled_data = pd.DataFrame(new_data,columns=['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',\r\n 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',\r\n 'pH', 'sulphates', 'alcohol'])\r\nprint('New data columns now.')\r\nprint(scaled_data.columns)\r\nprint(scaled_data.head(20))\r\nfrom sklearn.preprocessing import RobustScaler\r\nr_scalar = RobustScaler()\r\nr_scaler_data = r_scalar.fit_transform(data.drop(labels=['quality'],axis=1))\r\nrscaled_data_df = pd.DataFrame(r_scaler_data,columns=['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',\r\n 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',\r\n 'pH', 'sulphates', 'alcohol'])\r\n\r\nx = rscaled_data_df\r\ny = data['quality']\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=101)\r\nprint(y)\r\n\r\nfrom sklearn.svm import SVC\r\nSVC_Model = SVC()\r\nSVC_Model.fit(x_train,y_train)\r\ny_predicted = SVC_Model.predict(x_test)\r\nfrom sklearn.metrics import accuracy_score\r\nprint('SVC Accuracy score is:',accuracy_score(y_test,y_predicted))\r\nprint('As checked our predicted model is quite low. We have improve the accuracy using')\r\nprint('grid serach cv approch to optimize the parameter to get best accuracy')\r\n\r\ngridscv_Model = SVC(C=3,kernel='rbf')\r\ngridscv_Model.fit(x_train,y_train)\r\ny_predict = gridscv_Model.predict(x_test)\r\nprint('SVC with hiper parameter Accuracy score is:',accuracy_score(y_test,y_predict))\r\n\r\nprint('Trying with logistic Regression')\r\nfrom sklearn.linear_model import LogisticRegression\r\nLR = LogisticRegression(multi_class='ovr')\r\nLR.fit(x_train,y_train)\r\nlr_predict = LR.predict(x_test)\r\nprint('Logistic Regression Accuracy score is:',accuracy_score(y_test,lr_predict))\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\n#grid_param ={'C':[2,5,0,1,7,10,15,23,60,100],'gamma':[1,3,6,0.1,0.00123],'kernel':('rbf','sigmoid','linear')}\r\ngrid_param = {'C': [1], 'gamma': [1], 'kernel': ['rbf']} #this is the best param we got so using this one\r\ngrid_cv =GridSearchCV(SVC(),param_grid=grid_param,verbose=1,n_jobs=2)\r\ngrid_cv.fit(x_train,y_train)\r\ngrid_predict = grid_cv.predict(x_test)\r\nprint('Best param :',grid_cv.best_params_)\r\nprint('GSR Accuracy score is:',accuracy_score(y_test,grid_predict))\r\n\r\nprint('Support vector regression')\r\nprint(''' ''')\r\n\r\nprint(''' The fit() method helps in fitting the data into a model, \r\n transform() method helps in transforming the data into a form that is more suitable for the model. \r\n Fit_transform() method, on the other hand, combines the functionalities of both fit() and transform() methods in one step''')\r\n\r\n#https://www.geeksforgeeks.org/data-analysis-with-python/?ref=shm very nice one\r\n\r\n#EDA\r\n#1.Anaysis\r\nprint(''' 2.Preprocessing\r\n 1. Missing values\r\n 2. Outlyer\r\n 3. scaling \r\n 4. encoding \r\n 5. Feature selection\r\n 6. Transfermation\r\n 7. Feature merging \r\n 8. In balanced data''' )\r\n\r\n\r\n","repo_name":"vissenthil/Python","sub_path":"SVM_Implementation.py","file_name":"SVM_Implementation.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14666560672","text":"\"\"\" \nThis module parsers SmarTest 7 pins-configuration files. \n\nThis parser does not implement a lexer and parser stage becasue the \nSmarTest 7 file contains a fairly strict line-by-line syntax. This \nmodule mainly uses a suite of regex patterns. \n\nNote to self: because the configuration file is variable fields for a\ngiven command, its line-by-line, we can probably get away with simply \nchecking the front of the line. \n\n\"\"\"\nimport os, sys, re, argparse\nfrom collections import OrderedDict\nimport st7putils \n# ----------------------------------------------------------------------------:\ndef dut_interface(): \n x = \"\"\"\nThe V9300 test head can be configured with different combinations of different\ntypes of cards to meet specific test requirements. \n\n\n ===============================================================================================\n| ___________________ ___________________ ___________________ ___________________ |\n| | Group 7 | | Group 8 | | Group 2 | | Group 6 | |\n| |-------------------| |-------------------| |-------------------| |-------------------| \n| | DPS.7 | | UTIL.8 | | DPS.8 | | UTIL.4 | | DPS.2 | | UTIL.3 | | DPS.6 | | UTIL.7 | |\n| | (272) | | (172) | | (268) | | (168) | | (267) | | (167) | | (271) | | (171) | \n| | -424- | | -420- | | -432- | | -428- | | -316- | | -312- | | -416- | | -412- | \n \n\n\n\n\n\n\n| | 224 | | 220 | | 224 | | 220 | | 224 | | 220 | | 224 | | 220 | |\n| | 223 | | 219 | | 224 | | 220 | | 224 | | 220 | | 224 | | 220 | |\n| | 222 | | 218 | | 222 | | 218 | | 222 | | 218 | | 222 | | 218 | |\n| | 221 | | 217 | | 222 | | 218 | | 222 | | 218 | | 222 | | 218 | |\n| ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ |\n| |\n| _______ _______ _______ _______ _______ _______ _______ _______ |\n| | DPS 7 | | Util.8| | DPS 8 | | Util.4| | DPS 7 | | Util.8| || DPS 7 | | Util.8|| |\n| | 224 | | 220 | | 224 | | 220 | | 224 | | 220 | || 224 | | 220 || |\n| | 223 | | 219 | | 224 | | 220 | | 224 | | 220 | || 224 | | 220 || |\n| | 222 | | 218 | | 222 | | 218 | | 222 | | 218 | || 222 | | 218 || |\n| | 221 | | 217 | | 222 | | 218 | | 222 | | 218 | || 222 | | 218 || |\n| ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ ^^^^^^^ | ^^^^^^^ ^^^^^^^ | | \n| | Group 7 | | Group 8 | | Group 2 | | Group 6 | |\n| ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ |\n ===============================================================================================\n \"\"\"\n print(x)\n return \n\n# ----------------------------------------------------------------------------:\n# \n\n\n\n\n# DFDM: Missing from TDC.\n# The only entry is on TDC topic 150673, \"Enabling upgrade licenses using the \n# DC Scale Extensions editor\" \n# \n# 'Data that are defined by the DC Scale Extensions Editor are stored in \n# the pin configuration file using the FW command DFDM'\n\n\nRE_HP93000_CONFIG = re.compile(\"^hp93000,config,\\d\\.\\d$\")\nRE_DDCH = re.compile(\"^DDCH\\s+(?P[\\d]+),\\s*(?P[\\d]+)$\") \nRE_PSTE = re.compile(\"^PSTE\\s(?P[\\d]+)$\")\nRE_NOOP = re.compile(\"^NOOP (?P\\\"[a-zA-Z\\d\\_.\\s]{0,128}\\\"|\\d*|(?!\\s*)*),(?P\\\"[a-zA-Z\\d\\_.]{0,128}\\\"|\\d*|(?!\\s*)*),(?P\\\"[a-zA-Z\\d\\_.]{0,128}\\\"|\\d*|(?!\\s*)*),(?P\\\"[a-zA-Z\\d\\_.]{0,128}\\\"|\\d*|(?!\\s*)*)$\")\n# TDC: 98664\nRE_DDIC = re.compile(\"^DDIC \")\n# TODO:\nRE_PSSL = re.compile(\"^PSSL\\s+(?P[OFF0-9\\.]+)\\s*,\\s*(?P[OFF0-9\\.]+)\\s*,\\s*(?P[OFF0-9\\.]+)\\s*,\\s*(?P[OFF0-9\\.]+)\\s*,\\s*\\((?P.*)\\)\")\n# TODO: Power Supply Safety Limits TDC: 143177\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nRE_PALS_NO_CHN3 = re.compile(\"PALS\\s*(?P\\d+)\\s*,\\s*(?P[\\d,\\(\\)]+)\\s*,\\s*(?P[\\d,]*)\\s*,\\s*\\((?P[\\w\\/]+)\\)\")\n# TODO: Channel 3 is typically for FVI16 HW\n\nRE_UPAS = re.compile(\"UPAS\\s+(?P\\d+)\\s*,\\s*\\\"(?P[01Xx]+)\\\"\\s*,\\s*\\((?P[\\w\\/]+)\\)\")\n\n\nRE_DFPN_TYP = re.compile(\"^DFPN\\s+(?P\\d+)\\s*,\\s*\\\"(?P[\\w\\s]*)\\\"\\s*,\\s*\\((?P[\\w\\/\\[\\]]+)\\)$\")\nRE_DFPN_GNG = re.compile(\"^DFPN\\s+\\((?P[\\d\\,\\s]+)\\),\\\"(?P[\\w\\s]*)\\\",\\((?P[\\w\\/]+)\\)$\")\n\nRE_DFPS_SNG_CHN = re.compile(\"^DFPS\\s*(?P\\d+),(?P[\\w]+),\\((?P[\\w\\/]+)\\)$\")\nRE_DFPS_GNG_RNG = re.compile(\"^DFPS\\s*\\((?P\\d+)\\-(?P\\d+)\\),(?P[\\w]+),\\((?P[\\w\\/]+)\\)$\")\nRE_DFPS_GNG_LST = re.compile(\"^DFPS\\s*\\((?P[\\d,]+)\\),(?P[\\w]+),\\((?P[\\w\\/]+)\\)$\") \n\nRE_DFGP = re.compile(\"^DFGP (?P[\\w]+),\\s*\\((?P[\\w,\\/\\s\\[\\]]+)\\),\\((?P[\\w\\d\\_\\/]+)\\)$\")\nRE_DFGE = re.compile(\"^DFGE (?P[\\w]+),\\\"(?P[\\s\\w\\d\\+\\-\\*]+)\\\",\\((?P[\\w\\d\\_]+)\\)$\")\n\nRE_CONF_CTX = re.compile(\"^CONF \\\"(?P\\w+)\\\"\\s*,\\s*(?P[\\w]+),(?P[\\w\\d]+),\\((?P[\\w,\\/\\[\\]]+)\\)$\")\nRE_CONF_REG = re.compile(\"^CONF (?P[\\w]+),(?P[\\w\\d]+),\\((?P[\\w,\\/\\[\\]]+)\\)$\")\n\nRE_DFPT = re.compile(\"^DFPT\\s*\\((?P[\\w\\,\\s\\@\\[\\]]+)\\),\\s*\\((?P[\\w]+)\\)$\")\n\nRE_DFUP = re.compile(\"^DFUP \\((?P[\\d\\s,]+)\\)\\s*,\\s*\\\"(?P[01xX]+)\\\"\\s*,\\s*\\((?P[\\w\\/]+)\\)$\")\nRE_UPTI = re.compile(\"^UPTI (?P